hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
240a3bdc1d5c63b91d07d527be155ad771e6c02e
| 7,097
|
py
|
Python
|
backend/fms_core/services/container.py
|
c3g/freezeman
|
bc4b6c8a2876e888ce41b7d14127cc22bc2b2143
|
[
"W3C"
] | 2
|
2021-07-31T13:20:08.000Z
|
2021-09-28T13:18:55.000Z
|
backend/fms_core/services/container.py
|
c3g/freezeman
|
bc4b6c8a2876e888ce41b7d14127cc22bc2b2143
|
[
"W3C"
] | 71
|
2021-03-12T22:08:19.000Z
|
2022-03-25T15:24:40.000Z
|
backend/fms_core/services/container.py
|
c3g/freezeman
|
bc4b6c8a2876e888ce41b7d14127cc22bc2b2143
|
[
"W3C"
] | null | null | null |
from datetime import datetime
from django.core.exceptions import ValidationError
from fms_core.models import Container
from ..containers import CONTAINER_KIND_SPECS
def get_container(barcode):
container = None
errors = []
warnings = []
if barcode:
try:
container = Container.objects.get(barcode=barcode)
except Container.DoesNotExist:
errors.append(f"Could not find Container with barcode {barcode}")
else:
errors.append(f"Barcode is required to get a container.")
return (container, errors, warnings)
def get_or_create_container(barcode,
kind=None, name=None, coordinates=None,
container_parent=None, creation_comment=None):
container = None
created_entity = False
errors = []
warnings = []
if barcode:
container_data = dict(
**(dict(location=container_parent) if container_parent else dict()),
**(dict(barcode=barcode) if barcode is not None else dict()),
**(dict(name=name) if name is not None else dict(name=barcode)), # By default, a container name will be his barcode
**(dict(coordinates=coordinates) if coordinates is not None else dict()),
**(dict(kind=kind) if kind is not None else dict()),
)
#TODO: check sample or container creation templates where only barcode OR name is required
comment = creation_comment or (f"Automatically generated on {datetime.utcnow().isoformat()}Z")
try:
container = Container.objects.get(barcode=barcode)
# Validate that the retrieved container is the right one
if kind and kind != container.kind:
errors.append(f"Provided container kind {kind} does not match the container kind {container.kind} of the container retrieved using the barcode {barcode}.")
if name and name != container.name:
errors.append(f"Provided container name {name} does not match the container name {container.name} of the container retrieved using the barcode {barcode}.")
if container_parent and container_parent.id != container.location.id:
errors.append(f"Provided parent container {container_parent.barcode} does not match the parent container {container.location.barcode} of the container retrieved using the barcode {barcode}.")
if coordinates and coordinates != container.coordinates:
errors.append(f"Provided container coordinates {coordinates} do not match the container coordinates {container.coordinates} of the container retrieved using the barcode {barcode}.")
except Container.DoesNotExist:
if container_parent and CONTAINER_KIND_SPECS[container_parent.kind].requires_coordinates and not coordinates:
errors.append(f"Parent container kind {container_parent.kind} requires that you provide coordinates.")
else:
try:
container = Container.objects.create(**container_data, comment=comment)
created_entity = True
# Pile up all validation error raised during the creation of the container
except ValidationError as e:
errors.append(';'.join(e.messages))
else:
errors.append(f"Barcode is required to get or create a container.")
return (container, created_entity, errors, warnings)
def create_container(barcode, kind,
name=None, coordinates=None, container_parent=None, creation_comment=None):
container = None
errors = []
warnings = []
if barcode:
if Container.objects.filter(barcode=barcode).exists():
errors.append(f"Container with barcode {barcode} already exists.")
else:
container_data = dict(
**(dict(location=container_parent) if container_parent else dict()),
**(dict(barcode=barcode) if barcode is not None else dict()),
**(dict(name=name) if name is not None else dict(name=barcode)), # By default, a container name will be his barcode
**(dict(coordinates=coordinates) if coordinates is not None else dict()),
**(dict(kind=kind) if kind is not None else dict()),
)
comment = creation_comment or (f"Automatically generated on {datetime.utcnow().isoformat()}Z")
if container_parent and CONTAINER_KIND_SPECS[container_parent.kind].requires_coordinates and not coordinates:
errors.append(f"Parent container kind {container_parent.kind} requires that you provide coordinates.")
else:
try:
container= Container.objects.create(**container_data, comment=comment)
# Pile up all validation error raised during the creation of the container
except ValidationError as e:
errors.append(';'.join(e.messages))
else:
errors.append(f"Barcode is required to create a container.")
return (container, errors, warnings)
def rename_container(container_to_update, barcode=None, name=None, update_comment=None):
errors = []
warnings = []
if not any([barcode, name]):
errors.append(f'Either New Barcode or New Name are required.')
return (container_to_update, errors, warnings)
if barcode:
container_to_update.barcode = barcode
if name:
container_to_update.name = name
if update_comment:
container_to_update.update_comment = update_comment
try:
container_to_update.save()
except Exception as e:
errors.append(str(e))
return (container_to_update, errors, warnings)
def move_container(container_to_move, destination_barcode,
destination_coordinates=None, update_comment=None):
destination_container = None
errors = []
warnings = []
if not destination_barcode:
errors.append(f'Destination location barcode is required.')
return (container_to_move, errors, warnings)
try:
# Test for container barcode to provide a better error message.
destination_container = Container.objects.get(barcode=destination_barcode)
except Container.DoesNotExist as e:
errors.append(f"Destination Container barcode {destination_barcode} does not exist.")
if container_to_move.location == destination_container and container_to_move.coordinates == destination_coordinates:
errors.append(f"Container {container_to_move.name } already is at container {destination_barcode} at coodinates {destination_coordinates}.")
return (container_to_move, errors, warnings)
container_to_move.location = destination_container
container_to_move.coordinates = destination_coordinates if destination_coordinates else ""
container_to_move.update_comment = update_comment
try:
container_to_move.save()
except Exception as e:
errors.append(str(e))
return (container_to_move, errors, warnings)
| 45.203822
| 207
| 0.672397
| 827
| 7,097
| 5.650544
| 0.140266
| 0.048791
| 0.041729
| 0.022256
| 0.644768
| 0.561952
| 0.479349
| 0.424995
| 0.415365
| 0.376204
| 0
| 0
| 0.247569
| 7,097
| 157
| 208
| 45.203822
| 0.875094
| 0.063266
| 0
| 0.570248
| 0
| 0.033058
| 0.21036
| 0.041861
| 0
| 0
| 0
| 0.006369
| 0
| 1
| 0.041322
| false
| 0
| 0.033058
| 0
| 0.140496
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
240a907bd2e16d0aa3aa271652ece1375536949a
| 3,298
|
py
|
Python
|
task_09/task.py
|
prashnts/advent-of-code--2021
|
315fcf470c8c1260057aeafa6d2c42f4c0f74f3f
|
[
"MIT"
] | null | null | null |
task_09/task.py
|
prashnts/advent-of-code--2021
|
315fcf470c8c1260057aeafa6d2c42f4c0f74f3f
|
[
"MIT"
] | null | null | null |
task_09/task.py
|
prashnts/advent-of-code--2021
|
315fcf470c8c1260057aeafa6d2c42f4c0f74f3f
|
[
"MIT"
] | null | null | null |
import os
from functools import reduce
__here__ = os.path.dirname(__file__)
TEST_DATA = '''\
2199943210
3987894921
9856789892
8767896789
9899965678\
'''
def gen_neighbors(array, x, y):
'''Generated points in north, south, east, and west directions.
On edges only valid points are generated.
'''
dirs = [
(x - 1, y),
(x + 1, y),
(x, y - 1),
(x, y + 1),
]
for x, y in dirs:
if x >= 0 and y >= 0:
try:
yield array[x][y]
except IndexError:
continue
def decode_input(data):
lines = data.split('\n')
for line in lines:
yield [int(x) for x in line]
def lowest_points(array, shape):
'''Generates the points that are lower than all neighbors.'''
x_max, y_max = shape
for x in range(0, x_max):
for y in range(0, y_max):
current = array[x][y]
neighborhood = gen_neighbors(array, x, y)
if current < min(neighborhood):
yield current, (x, y)
def calculate_1(data):
array = list(decode_input(data))
x_max, y_max = len(array), len(array[0])
lows = [x for x, _ in lowest_points(array, (x_max, y_max))]
risk_levels = sum([r + 1 for r in lows])
return risk_levels
def flood_fill(data, origin, shape):
'''Returns an array filled with `10` starting from origin and bounded by `9`.
Mostly implmented as stack-based recursive flood-fill implementation given on
Wikipedia.
See: https://en.wikipedia.org/wiki/Flood_fill#Stack-based_recursive_implementation_(four-way)
Things to note:
- Since we know that boundaries of fill are `9`, we fill the points with `10`
so that we can distinguish those filled points from unfilled or not-to-be filled
ones.
- We use a copy of the data as array which is modified recursively by filler.
- We move in N, S, E, W directions from origin and fill as many points as we can.
'''
x_max, y_max = shape
array = [d[:] for d in data]
def filler(x, y):
if x < 0 or y < 0 or x >= x_max or y >= y_max:
# Bounds check.
return
if array[x][y] >= 9:
# Boundary check.
return
array[x][y] = 10 # use this to distinguish filled points.
filler(x, y + 1) # North
filler(x, y - 1) # South
filler(x + 1, y) # East
filler(x - 1, y) # West
filler(*origin)
return array
def calculate_2(data):
array = list(decode_input(data))
x_max, y_max = len(array), len(array[0])
low_coords = [coord for _, coord in lowest_points(array, (x_max, y_max))]
basins = []
for coord in low_coords:
filled = flood_fill(array, coord, (x_max, y_max))
basin_size = sum([row.count(10) for row in filled])
basins.append(basin_size)
top_3_basins = sorted(basins)[-3:]
return reduce(lambda x, y: x * y, top_3_basins, 1)
if __name__ == '__main__':
assert calculate_1(TEST_DATA) == 15
assert calculate_2(TEST_DATA) == 1134
with open(os.path.join(__here__, 'input.txt'), 'r') as fp:
data = fp.read()
answer_1 = calculate_1(data)
answer_2 = calculate_2(data)
print(f'{answer_1=}')
print(f'{answer_2=}')
| 25.175573
| 97
| 0.593087
| 494
| 3,298
| 3.803644
| 0.334008
| 0.015966
| 0.018627
| 0.029803
| 0.119212
| 0.085152
| 0.085152
| 0.085152
| 0.056413
| 0.056413
| 0
| 0.042006
| 0.292602
| 3,298
| 130
| 98
| 25.369231
| 0.763395
| 0.259551
| 0
| 0.105263
| 0
| 0
| 0.042194
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.092105
| false
| 0
| 0.026316
| 0
| 0.184211
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
240b42fee6717a7b90d0b1207b8b05b2c8fb6f5b
| 2,046
|
py
|
Python
|
setup.py
|
dotpy/step
|
03a5fa3e2ef35675b6729a00a4752b0c703ee243
|
[
"BSD-3-Clause"
] | 13
|
2016-06-29T21:19:45.000Z
|
2021-12-26T20:36:05.000Z
|
setup.py
|
dotpy/step
|
03a5fa3e2ef35675b6729a00a4752b0c703ee243
|
[
"BSD-3-Clause"
] | 3
|
2015-03-19T22:21:27.000Z
|
2019-10-10T23:03:45.000Z
|
setup.py
|
dotpy/step
|
03a5fa3e2ef35675b6729a00a4752b0c703ee243
|
[
"BSD-3-Clause"
] | 3
|
2018-03-27T14:27:31.000Z
|
2020-08-07T08:23:08.000Z
|
#!/usr/bin/env python
"""
This is the installation script of the step module, a light and fast template engine. You can run it by typing:
python setup.py install
You can also run the test suite by running:
python setup.py test
"""
import sys
from distutils.core import setup
from step.tests import TestCommand
__author__ = "Daniele Mazzocchio <danix@kernel-panic.it>"
__version__ = "0.0.3"
__date__ = "Jul 25, 2019"
# Python versions prior 2.2.3 don't support 'classifiers' and 'download_url'
if sys.version < "2.2.3":
from distutils.dist import DistributionMetadata
DistributionMetadata.classifiers = None
DistributionMetadata.download_url = None
setup(name = "step-template",
version = __version__,
author = "Daniele Mazzocchio",
author_email = "danix@kernel-panic.it",
packages = ["step", "step.tests"],
cmdclass = {"test": TestCommand},
description = "Simple Template Engine for Python",
download_url = "https://github.com/dotpy/step/archive/step-0.0.3.tar.gz",
classifiers = ["Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Text Processing"],
url = "https://github.com/dotpy/step",
license = "OSI-Approved :: BSD License",
keywords = "templates templating template-engines",
long_description = "step is a pure-Python module providing a very "
"simple template engine with minimum syntax. It "
"supports variable expansion, flow control and "
"embedding of Python code.")
| 37.888889
| 111
| 0.583578
| 215
| 2,046
| 5.455814
| 0.562791
| 0.035806
| 0.022165
| 0.030691
| 0.092072
| 0.044331
| 0
| 0
| 0
| 0
| 0
| 0.013669
| 0.320626
| 2,046
| 53
| 112
| 38.603774
| 0.830216
| 0.148094
| 0
| 0
| 0
| 0.030303
| 0.419839
| 0.025375
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.121212
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2410cb663e809f3a2cfff7eb2a2ab513d0a3a843
| 3,006
|
py
|
Python
|
oops_fhir/r4/code_system/effect_estimate_type.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/effect_estimate_type.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/effect_estimate_type.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["EffectEstimateType"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class EffectEstimateType:
"""
EffectEstimateType
Whether the effect estimate is an absolute effect estimate (absolute
difference) or a relative effect estimate (relative difference), and the
specific type of effect estimate (eg relative risk or median
difference).
Status: draft - Version: 4.0.1
Copyright None
http://terminology.hl7.org/CodeSystem/effect-estimate-type
"""
relative_rr = CodeSystemConcept(
{
"code": "relative-RR",
"definition": "relative risk (a type of relative effect estimate).",
"display": "relative risk",
}
)
"""
relative risk
relative risk (a type of relative effect estimate).
"""
relative_or = CodeSystemConcept(
{
"code": "relative-OR",
"definition": "odds ratio (a type of relative effect estimate).",
"display": "odds ratio",
}
)
"""
odds ratio
odds ratio (a type of relative effect estimate).
"""
relative_hr = CodeSystemConcept(
{
"code": "relative-HR",
"definition": "hazard ratio (a type of relative effect estimate).",
"display": "hazard ratio",
}
)
"""
hazard ratio
hazard ratio (a type of relative effect estimate).
"""
absolute_ard = CodeSystemConcept(
{
"code": "absolute-ARD",
"definition": "absolute risk difference (a type of absolute effect estimate).",
"display": "absolute risk difference",
}
)
"""
absolute risk difference
absolute risk difference (a type of absolute effect estimate).
"""
absolute_mean_diff = CodeSystemConcept(
{
"code": "absolute-MeanDiff",
"definition": "mean difference (a type of absolute effect estimate).",
"display": "mean difference",
}
)
"""
mean difference
mean difference (a type of absolute effect estimate).
"""
absolute_smd = CodeSystemConcept(
{
"code": "absolute-SMD",
"definition": "standardized mean difference (a type of absolute effect estimate).",
"display": "standardized mean difference",
}
)
"""
standardized mean difference
standardized mean difference (a type of absolute effect estimate).
"""
absolute_median_diff = CodeSystemConcept(
{
"code": "absolute-MedianDiff",
"definition": "median difference (a type of absolute effect estimate).",
"display": "median difference",
}
)
"""
median difference
median difference (a type of absolute effect estimate).
"""
class Meta:
resource = _resource
| 24.639344
| 95
| 0.600798
| 287
| 3,006
| 6.216028
| 0.226481
| 0.149103
| 0.054933
| 0.076233
| 0.454596
| 0.454596
| 0.400785
| 0.387892
| 0.183857
| 0
| 0
| 0.001897
| 0.298403
| 3,006
| 121
| 96
| 24.842975
| 0.844002
| 0.119428
| 0
| 0
| 0
| 0
| 0.388748
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24149d63fd46dabd5d16d77a5fa8be1e85e8bdf1
| 1,361
|
py
|
Python
|
about/tests/test_views.py
|
IMegaMaan/Django-project
|
7ebe62aacf972410299f92183c6c9e23cd837fe7
|
[
"BSD-3-Clause"
] | null | null | null |
about/tests/test_views.py
|
IMegaMaan/Django-project
|
7ebe62aacf972410299f92183c6c9e23cd837fe7
|
[
"BSD-3-Clause"
] | null | null | null |
about/tests/test_views.py
|
IMegaMaan/Django-project
|
7ebe62aacf972410299f92183c6c9e23cd837fe7
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase, Client
from django.urls import reverse
class TaskAboutViewsTests(TestCase):
def setUp(self):
self.guest_client = Client()
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.about_views = {
'about:author': 'author.html',
'about:tech': 'tech.html',
}
def test_about_pages_avialable_to_guest(self):
"""Страницы приложения about доступны гостевому пользователю"""
for view in TaskAboutViewsTests.about_views.keys():
with self.subTest():
response = self.guest_client.get(reverse(view))
status = response.status_code
self.assertEqual(
status, 200,
f'Страничка view "{view}" приложения about недоступна '
'гостевому пользователю'
)
def test_about_views_according_templates(self):
"""Проверка вызова корректных шаблонов во view приложения about"""
for view, template in TaskAboutViewsTests.about_views.items():
with self.subTest():
response = self.guest_client.get(reverse(view))
self.assertTemplateUsed(
response, template,
f'Во view "{view}" вызывется некорректный шаблон'
)
| 34.897436
| 75
| 0.588538
| 130
| 1,361
| 6.038462
| 0.453846
| 0.050955
| 0.057325
| 0.078981
| 0.132484
| 0.132484
| 0.132484
| 0.132484
| 0.132484
| 0.132484
| 0
| 0.003261
| 0.324026
| 1,361
| 38
| 76
| 35.815789
| 0.85
| 0.086701
| 0
| 0.133333
| 0
| 0
| 0.131494
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2415ff143543c2ce9970a39f3d80f9c3542308d9
| 1,937
|
py
|
Python
|
library/verification/token.py
|
LongmaoTeamTf/audio_aligner_app
|
899c27a6ce5b74ec728d70acaa2a9861f8fd7b92
|
[
"MIT"
] | 5
|
2020-01-19T07:27:31.000Z
|
2021-03-31T05:56:07.000Z
|
library/verification/token.py
|
LongmaoTeamTf/audio_aligner_app
|
899c27a6ce5b74ec728d70acaa2a9861f8fd7b92
|
[
"MIT"
] | 3
|
2021-06-02T00:55:11.000Z
|
2022-03-12T12:11:08.000Z
|
library/verification/token.py
|
LongmaoTeamTf/audio_aligner_app
|
899c27a6ce5b74ec728d70acaa2a9861f8fd7b92
|
[
"MIT"
] | 2
|
2020-03-17T07:10:48.000Z
|
2022-01-12T10:13:11.000Z
|
"""
user token
@version: v1.0.1
@Company: Thefair
@Author: Wang Yao
@Date: 2019-11-17 15:21:11
@LastEditors: Wang Yao
@LastEditTime: 2019-11-17 21:17:19
"""
from functools import wraps
from flask import request
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from library.response.tfexception import TfException
from library.request.tfrequest import TfRequest
class TfToken(object):
"""
Token类
"""
def __init__(self, secret_key: str, expires_in: int):
self._secret_key = secret_key
self._expires_in = expires_in
self.serializer = Serializer(secret_key, expires_in=expires_in)
def get_token(self, user_id: str, email: str) -> str:
"""
生成token
@param user_id: 用户id, email: 邮箱
@return: token
:param email: 邮箱
"""
data = {'user_id': user_id, 'email': email}
token = self.serializer.dumps(data).decode()
return token
def decode_token(self, token: str) -> dict:
"""
token解码
@param token
@return: data
"""
try:
data = self.serializer.loads(token)
except BadData:
code, msg = -3, "token decoded error."
raise TfException(code, msg)
return data
def check_token(self, token):
"""
校验token
@param token
@return: token_data
"""
if token == 'null':
code, msg = -3, "please login first."
raise TfException(code, msg)
token_data = self.decode_token(token)
return token_data
def login_check(func):
"""
登录校验修饰器
@param func: API函数
@return: func
"""
@wraps(func)
def wrapper(*args, **kw):
TfRequest().get_params()
if not request.params.get('token'):
raise TfException(-3, "please login first.")
return func(*args, **kw)
return wrapper
| 24.518987
| 79
| 0.593702
| 226
| 1,937
| 4.964602
| 0.376106
| 0.040107
| 0.01426
| 0.032086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024945
| 0.296335
| 1,937
| 78
| 80
| 24.833333
| 0.798239
| 0.177594
| 0
| 0.057143
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171429
| false
| 0
| 0.142857
| 0
| 0.485714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2418e136a99b83121805fd21e003e44836c184cf
| 27,910
|
py
|
Python
|
service/artifacts_unittest.py
|
khromiumos/chromiumos-chromite
|
a42a85481cdd9d635dc40a04585e427f89f3bb3f
|
[
"BSD-3-Clause"
] | null | null | null |
service/artifacts_unittest.py
|
khromiumos/chromiumos-chromite
|
a42a85481cdd9d635dc40a04585e427f89f3bb3f
|
[
"BSD-3-Clause"
] | 2
|
2021-03-26T00:29:32.000Z
|
2021-04-30T21:29:33.000Z
|
service/artifacts_unittest.py
|
khromiumos/chromiumos-chromite
|
a42a85481cdd9d635dc40a04585e427f89f3bb3f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Artifacts service tests."""
from __future__ import print_function
import json
import os
import shutil
import mock
from chromite.lib import autotest_util
from chromite.lib import build_target_lib
from chromite.lib import chroot_lib
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import partial_mock
from chromite.lib import portage_util
from chromite.lib import sysroot_lib
from chromite.lib import toolchain_util
from chromite.lib.paygen import partition_lib
from chromite.lib.paygen import paygen_payload_lib
from chromite.lib.paygen import paygen_stateful_payload_lib
from chromite.service import artifacts
class BundleAutotestFilesTest(cros_test_lib.MockTempDirTestCase):
"""Test the Bundle Autotest Files function."""
def setUp(self):
self.output_dir = os.path.join(self.tempdir, 'output_dir')
self.archive_dir = os.path.join(self.tempdir, 'archive_base_dir')
sysroot_path = os.path.join(self.tempdir, 'sysroot')
self.chroot = chroot_lib.Chroot(self.tempdir)
self.sysroot = sysroot_lib.Sysroot('sysroot')
self.sysroot_dne = sysroot_lib.Sysroot('sysroot_DNE')
# Make sure we have the valid paths.
osutils.SafeMakedirs(self.output_dir)
osutils.SafeMakedirs(sysroot_path)
def testInvalidOutputDirectory(self):
"""Test invalid output directory."""
with self.assertRaises(AssertionError):
artifacts.BundleAutotestFiles(self.chroot, self.sysroot, None)
def testInvalidSysroot(self):
"""Test sysroot that does not exist."""
with self.assertRaises(AssertionError):
artifacts.BundleAutotestFiles(self.chroot, self.sysroot_dne,
self.output_dir)
def testArchiveDirectoryDoesNotExist(self):
"""Test archive directory that does not exist causes error."""
self.assertEqual(
artifacts.BundleAutotestFiles(self.chroot, self.sysroot,
self.output_dir), {})
def testSuccess(self):
"""Test a successful call handling."""
ab_path = os.path.join(self.tempdir, self.sysroot.path,
constants.AUTOTEST_BUILD_PATH)
osutils.SafeMakedirs(ab_path)
# Makes all of the individual calls to build out each of the tarballs work
# nicely with a single patch.
self.PatchObject(autotest_util.AutotestTarballBuilder, '_BuildTarball',
side_effect=lambda _, path, **kwargs: osutils.Touch(path))
result = artifacts.BundleAutotestFiles(self.chroot, self.sysroot,
self.output_dir)
for archive in result.values():
self.assertStartsWith(archive, self.output_dir)
self.assertExists(archive)
class ArchiveChromeEbuildEnvTest(cros_test_lib.MockTempDirTestCase):
"""ArchiveChromeEbuildEnv tests."""
def setUp(self):
# Create the chroot and sysroot instances.
self.chroot_path = os.path.join(self.tempdir, 'chroot_dir')
self.chroot = chroot_lib.Chroot(path=self.chroot_path)
self.sysroot_path = os.path.join(self.chroot_path, 'sysroot_dir')
self.sysroot = sysroot_lib.Sysroot(self.sysroot_path)
# Create the output directory.
self.output_dir = os.path.join(self.tempdir, 'output_dir')
osutils.SafeMakedirs(self.output_dir)
# The sysroot's /var/db/pkg prefix for the chrome package directories.
var_db_pkg = os.path.join(self.sysroot_path, portage_util.VDB_PATH)
# Create the var/db/pkg dir so we have that much for no-chrome tests.
osutils.SafeMakedirs(var_db_pkg)
# Two versions of chrome to test the multiple version checks/handling.
chrome_v1 = '%s-1.0.0-r1' % constants.CHROME_PN
chrome_v2 = '%s-2.0.0-r1' % constants.CHROME_PN
# Build the two chrome version paths.
chrome_cat_dir = os.path.join(var_db_pkg, constants.CHROME_CN)
self.chrome_v1_dir = os.path.join(chrome_cat_dir, chrome_v1)
self.chrome_v2_dir = os.path.join(chrome_cat_dir, chrome_v2)
# Directory tuple for verifying the result archive contents.
self.expected_archive_contents = cros_test_lib.Directory('./',
'environment')
# Create a environment.bz2 file to put into folders.
env_file = os.path.join(self.tempdir, 'environment')
osutils.Touch(env_file)
cros_build_lib.run(['bzip2', env_file])
self.env_bz2 = '%s.bz2' % env_file
def _CreateChromeDir(self, path, populate=True):
"""Setup a chrome package directory.
Args:
path (str): The full chrome package path.
populate (bool): Whether to include the environment bz2.
"""
osutils.SafeMakedirs(path)
if populate:
shutil.copy(self.env_bz2, path)
def testSingleChromeVersion(self):
"""Test a successful single-version run."""
self._CreateChromeDir(self.chrome_v1_dir)
created = artifacts.ArchiveChromeEbuildEnv(self.sysroot, self.output_dir)
self.assertStartsWith(created, self.output_dir)
cros_test_lib.VerifyTarball(created, self.expected_archive_contents)
def testMultipleChromeVersions(self):
"""Test a successful multiple version run."""
# Create both directories, but don't populate the v1 dir so it'll hit an
# error if the wrong one is used.
self._CreateChromeDir(self.chrome_v1_dir, populate=False)
self._CreateChromeDir(self.chrome_v2_dir)
created = artifacts.ArchiveChromeEbuildEnv(self.sysroot, self.output_dir)
self.assertStartsWith(created, self.output_dir)
cros_test_lib.VerifyTarball(created, self.expected_archive_contents)
def testNoChrome(self):
"""Test no version of chrome present."""
with self.assertRaises(artifacts.NoFilesError):
artifacts.ArchiveChromeEbuildEnv(self.sysroot, self.output_dir)
class ArchiveImagesTest(cros_test_lib.TempDirTestCase):
"""ArchiveImages tests."""
def setUp(self):
self.image_dir = os.path.join(self.tempdir, 'images')
osutils.SafeMakedirs(self.image_dir)
self.output_dir = os.path.join(self.tempdir, 'output')
osutils.SafeMakedirs(self.output_dir)
self.images = []
for img in artifacts.IMAGE_TARS.keys():
full_path = os.path.join(self.image_dir, img)
self.images.append(full_path)
osutils.Touch(full_path)
osutils.Touch(os.path.join(self.image_dir, 'irrelevant_image.bin'))
osutils.Touch(os.path.join(self.image_dir, 'foo.txt'))
osutils.Touch(os.path.join(self.image_dir, 'bar'))
def testNoImages(self):
"""Test an empty directory handling."""
artifacts.ArchiveImages(self.tempdir, self.output_dir)
self.assertFalse(os.listdir(self.output_dir))
def testAllImages(self):
"""Test each image gets picked up."""
created = artifacts.ArchiveImages(self.image_dir, self.output_dir)
self.assertCountEqual(list(artifacts.IMAGE_TARS.values()), created)
class CreateChromeRootTest(cros_test_lib.RunCommandTempDirTestCase):
"""CreateChromeRoot tests."""
def setUp(self):
# Create the build target.
self.build_target = build_target_lib.BuildTarget('board')
# Create the chroot.
self.chroot_dir = os.path.join(self.tempdir, 'chroot')
self.chroot_tmp = os.path.join(self.chroot_dir, 'tmp')
osutils.SafeMakedirs(self.chroot_tmp)
self.chroot = chroot_lib.Chroot(path=self.chroot_dir)
# Create the output directory.
self.output_dir = os.path.join(self.tempdir, 'output_dir')
osutils.SafeMakedirs(self.output_dir)
def testRunCommandError(self):
"""Test handling when the run command call is not successful."""
self.rc.SetDefaultCmdResult(
side_effect=cros_build_lib.RunCommandError('Error'))
with self.assertRaises(artifacts.CrosGenerateSysrootError):
artifacts.CreateChromeRoot(self.chroot, self.build_target,
self.output_dir)
def testSuccess(self):
"""Test success case."""
# Separate tempdir for the method itself.
call_tempdir = os.path.join(self.chroot_tmp, 'cgs_call_tempdir')
osutils.SafeMakedirs(call_tempdir)
self.PatchObject(osutils.TempDir, '__enter__', return_value=call_tempdir)
# Set up files in the tempdir since the command isn't being called to
# generate anything for it to handle.
files = ['file1', 'file2', 'file3']
expected_files = [os.path.join(self.output_dir, f) for f in files]
for f in files:
osutils.Touch(os.path.join(call_tempdir, f))
created = artifacts.CreateChromeRoot(self.chroot, self.build_target,
self.output_dir)
# Just test the command itself and the parameter-based args.
self.assertCommandContains(['cros_generate_sysroot',
'--board', self.build_target.name])
# Make sure we
self.assertCountEqual(expected_files, created)
for f in created:
self.assertExists(f)
class BundleEBuildLogsTarballTest(cros_test_lib.TempDirTestCase):
"""BundleEBuildLogsTarball tests."""
def testBundleEBuildLogsTarball(self):
"""Verifies that the correct EBuild tar files are bundled."""
board = 'samus'
# Create chroot object and sysroot object
chroot_path = os.path.join(self.tempdir, 'chroot')
chroot = chroot_lib.Chroot(path=chroot_path)
sysroot_path = os.path.join('build', board)
sysroot = sysroot_lib.Sysroot(sysroot_path)
# Create parent dir for logs
log_parent_dir = os.path.join(chroot.path, 'build')
# Names of log files typically found in a build directory.
log_files = (
'',
'x11-libs:libdrm-2.4.81-r24:20170816-175008.log',
'x11-libs:libpciaccess-0.12.902-r2:20170816-174849.log',
'x11-libs:libva-1.7.1-r2:20170816-175019.log',
'x11-libs:libva-intel-driver-1.7.1-r4:20170816-175029.log',
'x11-libs:libxkbcommon-0.4.3-r2:20170816-174908.log',
'x11-libs:pango-1.32.5-r1:20170816-174954.log',
'x11-libs:pixman-0.32.4:20170816-174832.log',
'x11-misc:xkeyboard-config-2.15-r3:20170816-174908.log',
'x11-proto:kbproto-1.0.5:20170816-174849.log',
'x11-proto:xproto-7.0.31:20170816-174849.log',
)
tarred_files = [os.path.join('logs', x) for x in log_files]
log_files_root = os.path.join(log_parent_dir,
'%s/tmp/portage/logs' % board)
# Generate a representative set of log files produced by a typical build.
cros_test_lib.CreateOnDiskHierarchy(log_files_root, log_files)
archive_dir = self.tempdir
tarball = artifacts.BundleEBuildLogsTarball(chroot, sysroot, archive_dir)
self.assertEqual('ebuild_logs.tar.xz', tarball)
# Verify the tarball contents.
tarball_fullpath = os.path.join(self.tempdir, tarball)
cros_test_lib.VerifyTarball(tarball_fullpath, tarred_files)
class BundleChromeOSConfigTest(cros_test_lib.TempDirTestCase):
"""BundleChromeOSConfig tests."""
def setUp(self):
self.board = 'samus'
# Create chroot object and sysroot object
chroot_path = os.path.join(self.tempdir, 'chroot')
self.chroot = chroot_lib.Chroot(path=chroot_path)
sysroot_path = os.path.join('build', self.board)
self.sysroot = sysroot_lib.Sysroot(sysroot_path)
self.archive_dir = self.tempdir
def testBundleChromeOSConfig(self):
"""Verifies that the correct ChromeOS config file is bundled."""
# Create parent dir for ChromeOS Config output.
config_parent_dir = os.path.join(self.chroot.path, 'build')
# Names of ChromeOS Config files typically found in a build directory.
config_files = ('config.json',
cros_test_lib.Directory('yaml', [
'config.c', 'config.yaml', 'ec_config.c', 'ec_config.h',
'model.yaml', 'private-model.yaml'
]))
config_files_root = os.path.join(
config_parent_dir, '%s/usr/share/chromeos-config' % self.board)
# Generate a representative set of config files produced by a typical build.
cros_test_lib.CreateOnDiskHierarchy(config_files_root, config_files)
# Write a payload to the config.yaml file.
test_config_payload = {
'chromeos': {
'configs': [{
'identity': {
'platform-name': 'Samus'
}
}]
}
}
with open(os.path.join(config_files_root, 'yaml', 'config.yaml'), 'w') as f:
json.dump(test_config_payload, f)
config_filename = artifacts.BundleChromeOSConfig(self.chroot, self.sysroot,
self.archive_dir)
self.assertEqual('config.yaml', config_filename)
with open(os.path.join(self.archive_dir, config_filename), 'r') as f:
self.assertEqual(test_config_payload, json.load(f))
def testNoChromeOSConfigFound(self):
"""Verifies that None is returned when no ChromeOS config file is found."""
self.assertIsNone(
artifacts.BundleChromeOSConfig(self.chroot, self.sysroot,
self.archive_dir))
class BundleVmFilesTest(cros_test_lib.TempDirTestCase):
"""BundleVmFiles tests."""
def testBundleVmFiles(self):
"""Verifies that the correct files are bundled"""
# Create the chroot instance.
chroot_path = os.path.join(self.tempdir, 'chroot')
chroot = chroot_lib.Chroot(path=chroot_path)
# Create the test_results_dir
test_results_dir = 'test/results'
# Create a set of files where some should get bundled up as VM files.
# Add a suffix (123) to one of the files matching the VM pattern prefix.
vm_files = ('file1.txt',
'file2.txt',
'chromiumos_qemu_disk.bin' + '123',
'chromiumos_qemu_mem.bin'
)
target_test_dir = os.path.join(chroot_path, test_results_dir)
cros_test_lib.CreateOnDiskHierarchy(target_test_dir, vm_files)
# Create the output directory.
output_dir = os.path.join(self.tempdir, 'output_dir')
osutils.SafeMakedirs(output_dir)
archives = artifacts.BundleVmFiles(
chroot, test_results_dir, output_dir)
expected_archive_files = [
output_dir + '/chromiumos_qemu_disk.bin' + '123.tar',
output_dir + '/chromiumos_qemu_mem.bin.tar']
self.assertCountEqual(archives, expected_archive_files)
class BuildFirmwareArchiveTest(cros_test_lib.TempDirTestCase):
"""BuildFirmwareArchive tests."""
def testBuildFirmwareArchive(self):
"""Verifies that firmware archiver includes proper files"""
# Assorted set of file names, some of which are supposed to be included in
# the archive.
fw_files = (
'dts/emeraldlake2.dts',
'image-link.rw.bin',
'nv_image-link.bin',
'pci8086,0166.rom',
'seabios.cbfs',
'u-boot.elf',
'u-boot_netboot.bin',
'updater-link.rw.sh',
'x86-memtest',
)
board = 'link'
# fw_test_root = os.path.join(self.tempdir, os.path.basename(__file__))
fw_test_root = self.tempdir
fw_files_root = os.path.join(fw_test_root,
'chroot/build/%s/firmware' % board)
# Generate a representative set of files produced by a typical build.
cros_test_lib.CreateOnDiskHierarchy(fw_files_root, fw_files)
# Create the chroot and sysroot instances.
chroot_path = os.path.join(self.tempdir, 'chroot')
chroot = chroot_lib.Chroot(path=chroot_path)
sysroot = sysroot_lib.Sysroot('/build/link')
# Create an archive from the simulated firmware directory
tarball = os.path.join(
fw_test_root,
artifacts.BuildFirmwareArchive(chroot, sysroot, fw_test_root))
# Verify the tarball contents.
cros_test_lib.VerifyTarball(tarball, fw_files)
class BundleFpmcuUnittestsTest(cros_test_lib.TempDirTestCase):
"""BundleFpmcuUnittests tests."""
def testBundleFpmcuUnittests(self):
"""Verifies that the resulting tarball includes proper files"""
unittest_files = (
'bloonchipper/test_rsa.bin',
'dartmonkey/test_utils.bin',
)
board = 'hatch'
unittest_files_root = os.path.join(
self.tempdir,
'chroot/build/%s/firmware/chromeos-fpmcu-unittests' % board)
cros_test_lib.CreateOnDiskHierarchy(unittest_files_root, unittest_files)
chroot_path = os.path.join(self.tempdir, 'chroot')
chroot = chroot_lib.Chroot(path=chroot_path)
sysroot = sysroot_lib.Sysroot('/build/%s' % board)
tarball = os.path.join(
self.tempdir,
artifacts.BundleFpmcuUnittests(chroot, sysroot, self.tempdir))
cros_test_lib.VerifyTarball(
tarball,
unittest_files + ('bloonchipper/', 'dartmonkey/'))
class BundleAFDOGenerationArtifacts(cros_test_lib.MockTempDirTestCase):
"""BundleAFDOGenerationArtifacts tests."""
def setUp(self):
# Create the build target.
self.build_target = build_target_lib.BuildTarget('board')
# Create the chroot.
self.chroot_dir = os.path.join(self.tempdir, 'chroot')
self.chroot_tmp = os.path.join(self.chroot_dir, 'tmp')
osutils.SafeMakedirs(self.chroot_tmp)
self.chroot = chroot_lib.Chroot(path=self.chroot_dir)
# Create the output directory.
self.output_dir = os.path.join(self.tempdir, 'output_dir')
osutils.SafeMakedirs(self.output_dir)
self.chrome_root = os.path.join(self.tempdir, 'chrome_root')
def testRunSuccess(self):
"""Generic function for testing success cases for different types."""
# Separate tempdir for the method itself.
call_tempdir = os.path.join(self.chroot_tmp, 'call_tempdir')
osutils.SafeMakedirs(call_tempdir)
self.PatchObject(osutils.TempDir, '__enter__', return_value=call_tempdir)
mock_orderfile_generate = self.PatchObject(
toolchain_util, 'GenerateChromeOrderfile',
autospec=True)
mock_afdo_generate = self.PatchObject(
toolchain_util, 'GenerateBenchmarkAFDOProfile',
autospec=True)
# Test both orderfile and AFDO.
for is_orderfile in [False, True]:
# Set up files in the tempdir since the command isn't being called to
# generate anything for it to handle.
files = ['artifact1', 'artifact2']
expected_files = [os.path.join(self.output_dir, f) for f in files]
for f in files:
osutils.Touch(os.path.join(call_tempdir, f))
created = artifacts.BundleAFDOGenerationArtifacts(
is_orderfile, self.chroot, self.chrome_root,
self.build_target, self.output_dir)
# Test right class is called with right arguments
if is_orderfile:
mock_orderfile_generate.assert_called_once_with(
board=self.build_target.name,
chrome_root=self.chrome_root,
output_dir=call_tempdir,
chroot_path=self.chroot.path,
chroot_args=self.chroot.get_enter_args()
)
else:
mock_afdo_generate.assert_called_once_with(
board=self.build_target.name,
output_dir=call_tempdir,
chroot_path=self.chroot.path,
chroot_args=self.chroot.get_enter_args(),
)
# Make sure we get all the expected files
self.assertCountEqual(expected_files, created)
for f in created:
self.assertExists(f)
os.remove(f)
class FetchPinnedGuestImagesTest(cros_test_lib.TempDirTestCase):
"""FetchPinnedGuestImages tests."""
def setUp(self):
self.chroot = chroot_lib.Chroot(self.tempdir)
self.sysroot = sysroot_lib.Sysroot('/sysroot')
sysroot_path = os.path.join(self.tempdir, 'sysroot')
osutils.SafeMakedirs(sysroot_path)
self.pin_dir = os.path.join(sysroot_path, constants.GUEST_IMAGES_PINS_PATH)
osutils.SafeMakedirs(self.pin_dir)
def testSuccess(self):
"""Tests that generating a guest images tarball."""
for filename in ('file1', 'file2'):
pin_file = os.path.join(self.pin_dir, filename + '.json')
with open(pin_file, 'w') as f:
pin = {
'filename': filename + '.tar.gz',
'gsuri': 'gs://%s' % filename,
}
json.dump(pin, f)
expected = [
artifacts.PinnedGuestImage(filename='file1.tar.gz', uri='gs://file1'),
artifacts.PinnedGuestImage(filename='file2.tar.gz', uri='gs://file2'),
]
pins = artifacts.FetchPinnedGuestImages(self.chroot, self.sysroot)
self.assertCountEqual(expected, pins)
def testBadPin(self):
"""Tests that generating a guest images tarball with a bad pin file."""
pin_file = os.path.join(self.pin_dir, 'file1.json')
with open(pin_file, 'w') as f:
pin = {
'gsuri': 'gs://%s' % 'file1',
}
json.dump(pin, f)
pins = artifacts.FetchPinnedGuestImages(self.chroot, self.sysroot)
self.assertFalse(pins)
def testNoPins(self):
"""Tests that generating a guest images tarball with no pins."""
pins = artifacts.FetchPinnedGuestImages(self.chroot, self.sysroot)
self.assertFalse(pins)
class GeneratePayloadsTest(cros_test_lib.MockTempDirTestCase):
"""Test cases for the payload generation functions."""
def setUp(self):
self.target_image = os.path.join(
self.tempdir,
'link/R37-5952.0.2014_06_12_2302-a1/chromiumos_test_image.bin')
osutils.Touch(self.target_image, makedirs=True)
self.dummy_dlc_image = os.path.join(
self.tempdir,
'link/R37-5952.0.2014_06_12_2302-a1/dlc/dummy-dlc/package/dlc.img')
osutils.Touch(self.dummy_dlc_image, makedirs=True)
def testGenerateFullTestPayloads(self):
"""Verifies correctly generating full payloads."""
paygen_mock = self.PatchObject(paygen_payload_lib, 'GenerateUpdatePayload')
artifacts.GenerateTestPayloads(self.target_image, self.tempdir, full=True)
payload_path = os.path.join(
self.tempdir,
'chromeos_R37-5952.0.2014_06_12_2302-a1_link_full_dev.bin')
paygen_mock.assert_call_once_with(self.target_image, payload_path)
def testGenerateDeltaTestPayloads(self):
"""Verifies correctly generating delta payloads."""
paygen_mock = self.PatchObject(paygen_payload_lib, 'GenerateUpdatePayload')
artifacts.GenerateTestPayloads(self.target_image, self.tempdir, delta=True)
payload_path = os.path.join(
self.tempdir,
'chromeos_R37-5952.0.2014_06_12_2302-a1_R37-'
'5952.0.2014_06_12_2302-a1_link_delta_dev.bin')
paygen_mock.assert_call_once_with(self.target_image, payload_path,
src_image=self.target_image)
def testGenerateFullDummyDlcTestPayloads(self):
"""Verifies correctly generating full payloads for dummy-dlc."""
paygen_mock = self.PatchObject(paygen_payload_lib, 'GenerateUpdatePayload')
self.PatchObject(portage_util, 'GetBoardUseFlags',
return_value=['dlc_test'])
artifacts.GenerateTestPayloads(self.target_image, self.tempdir, full=True,
dlc=True)
rootfs_payload = 'chromeos_R37-5952.0.2014_06_12_2302-a1_link_full_dev.bin'
dlc_payload = ('dlc_dummy-dlc_package_R37-5952.0.2014_06_12_2302-a1_link_'
'full_dev.bin')
paygen_mock.assert_has_calls([
mock.call(self.target_image,
os.path.join(self.tempdir, rootfs_payload)),
mock.call(self.dummy_dlc_image,
os.path.join(self.tempdir, dlc_payload)),
])
def testGenerateDeltaDummyDlcTestPayloads(self):
"""Verifies correctly generating delta payloads for dummy-dlc."""
paygen_mock = self.PatchObject(paygen_payload_lib, 'GenerateUpdatePayload')
self.PatchObject(portage_util, 'GetBoardUseFlags',
return_value=['dlc_test'])
artifacts.GenerateTestPayloads(self.target_image, self.tempdir, delta=True,
dlc=True)
rootfs_payload = ('chromeos_R37-5952.0.2014_06_12_2302-a1_R37-'
'5952.0.2014_06_12_2302-a1_link_delta_dev.bin')
dlc_payload = ('dlc_dummy-dlc_package_R37-5952.0.2014_06_12_2302-a1_R37-'
'5952.0.2014_06_12_2302-a1_link_delta_dev.bin')
paygen_mock.assert_has_calls([
mock.call(self.target_image,
os.path.join(self.tempdir, rootfs_payload),
src_image=self.target_image),
mock.call(self.dummy_dlc_image,
os.path.join(self.tempdir, dlc_payload),
src_image=self.dummy_dlc_image),
])
def testGenerateStatefulTestPayloads(self):
"""Verifies correctly generating stateful payloads."""
paygen_mock = self.PatchObject(paygen_stateful_payload_lib,
'GenerateStatefulPayload')
artifacts.GenerateTestPayloads(self.target_image, self.tempdir,
stateful=True)
paygen_mock.assert_call_once_with(self.target_image, self.tempdir)
def testGenerateQuickProvisionPayloads(self):
"""Verifies correct files are created for quick_provision script."""
extract_kernel_mock = self.PatchObject(partition_lib, 'ExtractKernel')
extract_root_mock = self.PatchObject(partition_lib, 'ExtractRoot')
compress_file_mock = self.PatchObject(cros_build_lib, 'CompressFile')
artifacts.GenerateQuickProvisionPayloads(self.target_image, self.tempdir)
extract_kernel_mock.assert_called_once_with(
self.target_image, partial_mock.HasString('kernel.bin'))
extract_root_mock.assert_called_once_with(
self.target_image, partial_mock.HasString('rootfs.bin'),
truncate=False)
calls = [mock.call(partial_mock.HasString('kernel.bin'),
partial_mock.HasString(
constants.QUICK_PROVISION_PAYLOAD_KERNEL)),
mock.call(partial_mock.HasString('rootfs.bin'),
partial_mock.HasString(
constants.QUICK_PROVISION_PAYLOAD_ROOTFS))]
compress_file_mock.assert_has_calls(calls)
class GenerateCpeExportTest(cros_test_lib.RunCommandTempDirTestCase):
"""GenerateCpeExport tests."""
def setUp(self):
self.sysroot = sysroot_lib.Sysroot('/build/board')
self.chroot = chroot_lib.Chroot(self.tempdir)
self.chroot_tempdir = osutils.TempDir(base_dir=self.tempdir)
self.PatchObject(self.chroot, 'tempdir', return_value=self.chroot_tempdir)
self.output_dir = os.path.join(self.tempdir, 'output_dir')
osutils.SafeMakedirs(self.output_dir)
result_file = artifacts.CPE_RESULT_FILE_TEMPLATE % 'board'
self.result_file = os.path.join(self.output_dir, result_file)
warnings_file = artifacts.CPE_WARNINGS_FILE_TEMPLATE % 'board'
self.warnings_file = os.path.join(self.output_dir, warnings_file)
def testSuccess(self):
"""Test success handling."""
# Set up warning output and the file the command would be making.
report = 'Report.'
warnings = 'Warnings.'
self.rc.SetDefaultCmdResult(returncode=0, output=report, error=warnings)
result = artifacts.GenerateCpeReport(self.chroot, self.sysroot,
self.output_dir)
expected_cmd = ['cros_extract_deps', '--sysroot', '/build/board',
'--format', 'cpe', 'virtual/target-os', '--output-path',
self.result_file]
self.assertCommandCalled(expected_cmd, capture_output=True,
chroot_args=['--chroot', mock.ANY],
enter_chroot=True)
self.assertEqual(self.result_file, result.report)
self.assertEqual(self.warnings_file, result.warnings)
# We cannot assert that self.result_file exists and check contents since we
# are mocking cros_extract_deps, but we verified the args to
# cros_extract_deps.
self.assertFileContents(self.warnings_file, warnings)
| 39.199438
| 80
| 0.691903
| 3,457
| 27,910
| 5.394851
| 0.149841
| 0.022198
| 0.036461
| 0.039035
| 0.500375
| 0.427668
| 0.37555
| 0.354853
| 0.320858
| 0.293566
| 0
| 0.022031
| 0.203117
| 27,910
| 711
| 81
| 39.254571
| 0.81651
| 0.166535
| 0
| 0.28172
| 0
| 0.004301
| 0.118328
| 0.063997
| 0
| 0
| 0
| 0
| 0.08172
| 1
| 0.08172
| false
| 0
| 0.043011
| 0
| 0.152688
| 0.002151
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2418e436d32c1b1dc432e02916a5cc98bd2b5e5f
| 8,044
|
py
|
Python
|
src/lfw.py
|
LiuNull/dynamic_face_recognition
|
85b057e64a088fb6def28a3650218e8d6dc069cb
|
[
"MIT"
] | null | null | null |
src/lfw.py
|
LiuNull/dynamic_face_recognition
|
85b057e64a088fb6def28a3650218e8d6dc069cb
|
[
"MIT"
] | null | null | null |
src/lfw.py
|
LiuNull/dynamic_face_recognition
|
85b057e64a088fb6def28a3650218e8d6dc069cb
|
[
"MIT"
] | null | null | null |
"""Helper for evaluation on the Labeled Faces in the Wild dataset
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import facenet
import random
def evaluate(embeddings, actual_issame, nrof_folds=10):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
# start from 0, step=2
embeddings1 = embeddings[0::2]
# start from 1, step=2
embeddings2 = embeddings[1::2]
# embeddings1 is corresponding with embeddings2
tpr, fpr, accuracy = facenet.calculate_roc(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), nrof_folds=nrof_folds)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = facenet.calculate_val(thresholds, embeddings1, embeddings2,
np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far
def get_paths(lfw_dir, pairs, file_ext,timestep_size):
nrof_skipped_pairs = 0
path_list = []
issame_list = []
for pair in pairs:
if len(pair) == 5: # the same people
peoplepath=os.path.join(lfw_dir,pair[0])
videos = os.listdir(peoplepath)
video1path = os.path.join(peoplepath, videos[int(pair[1])-1]) # 取第pair[1]个视频
video2path = os.path.join(peoplepath, videos[int(pair[3])-1])
# sample timestep_siez images in video1
images_path = os.listdir(video1path)
images_path.sort(key=lambda x: int(x[2:-4]))
nrof_images = len(images_path)
for i in range(timestep_size):
length = int(nrof_images/timestep_size)
start_index = i * length
end_index = min(nrof_images-1, (i+1) * length)
# path = os.path.join(video1path, pair[0] + '_' + pair[1] + '_%04d' % int(random.randint(1, int(pair[2]))) + '.' + file_ext)
path = os.path.join(video1path,images_path[random.randint(start_index, end_index)])
path_list.append(path)
# sample timestep_size images in video2
images_path = os.listdir(video2path)
nrof_images = len(images_path)
# path = os.path.join(video2path, pair[0] + '_' + pair[3] + '_%04d' % int(random.randint(1, int(pair[4]))) + '.' + file_ext)
# path = os.path.join(video2path, pair[3] + '_label' + '.' + file_ext)
path = os.path.join(video2path,images_path[random.randint(0, nrof_images-1)])
path_list.append(path)
issame = True
elif len(pair) == 6:
people1path = os.path.join(lfw_dir, pair[0])
people2path = os.path.join(lfw_dir, pair[3])
videos1 = os.listdir(people1path)
videos2 = os.listdir(people2path)
video1path = os.path.join(people1path, videos1[int(pair[1])-1])
video2path = os.path.join(people2path, videos2[int(pair[4])-1])
images_path = os.listdir(video1path)
images_path.sort(key=lambda x: int(x[2:-4]))
nrof_images = len(images_path)
for i in range(timestep_size):
length = int(nrof_images / timestep_size)
start_index = i * length
end_index = min(nrof_images - 1, (i + 1) * length)
# path = os.path.join(video1path, pair[0] + '_' + pair[1] + '_%04d' % int(random.randint(1, int(pair[2]))) + '.' + file_ext)
path = os.path.join(video1path, images_path[random.randint(start_index, end_index)])
path_list.append(path)
# sample timestep_size images in video2
images_path = os.listdir(video2path)
nrof_images = len(images_path)
path = os.path.join(video2path, images_path[random.randint(0, nrof_images - 1)])
path_list.append(path)
issame = False
issame_list.append(issame)
'''
if os.path.exists(path0) and os.path.exists(path1): # Only add the pair if both paths exist
path_list += (path0,path1,path2,path3,path4,path5)
issame_list.append(issame)
else:
nrof_skipped_pairs += 1
print('pairs path:'+"\n"+path0+"\n"+path1+"\n"+path2+"\n"+path3+"\n"+path4+"\n"+path5)
if nrof_skipped_pairs>0:
print('Skipped %d image pairs' % nrof_skipped_pairs)
'''
return path_list, issame_list
def get_video_paths(lfw_dir, pairs, file_ext, timestep_size):
nrof_skipped_pairs = 0
path_list = []
issame_list = []
for pair in pairs:
if len(pair) == 5: # the same people
peoplepath = os.path.join(lfw_dir, pair[0])
video1path = os.path.join(peoplepath, pair[1])
video2path = os.path.join(peoplepath, pair[3])
for i in range(timestep_size):
path = os.path.join(video1path, pair[1] + '_%04d' % int(random.randint(1, int(pair[2]))) + '.' + file_ext)
path_list.append(path)
for i in range(timestep_size):
path = os.path.join(video2path, pair[3] + '_%04d' % int(random.randint(1, int(pair[4]))) + '.' + file_ext)
path_list.append(path)
issame = True
elif len(pair) == 6:
people1path = os.path.join(lfw_dir, pair[0])
people2path = os.path.join(lfw_dir, pair[3])
video1path = os.path.join(people1path, pair[1])
video2path = os.path.join(people2path, pair[4])
for i in range(timestep_size):
path = os.path.join(video1path, pair[1] + '_%04d' % int(random.randint(1, int(pair[2]))) + '.' + file_ext)
path_list.append(path)
for i in range(timestep_size):
path = os.path.join(video2path, pair[4] + '_%04d' % int(random.randint(1, int(pair[5]))) + '.' + file_ext)
path_list.append(path)
issame = False
issame_list.append(issame)
'''
if os.path.exists(path0) and os.path.exists(path1): # Only add the pair if both paths exist
path_list += (path0, path1, path2, path3, path4, path5)
issame_list.append(issame)
else:
nrof_skipped_pairs += 1
print(
'pairs path:' + "\n" + path0 + "\n" + path1 + "\n" + path2 + "\n" + path3 + "\n" + path4 + "\n" + path5)
if nrof_skipped_pairs > 0:
print('Skipped %d image pairs' % nrof_skipped_pairs)
'''
return path_list, issame_list
def read_pairs(pairs_filename):
pairs = []
with open(pairs_filename, 'r') as f:
for line in f.readlines()[1:]:
pair = line.strip().split()
pairs.append(pair)
return np.array(pairs)
| 47.597633
| 141
| 0.603555
| 1,049
| 8,044
| 4.490944
| 0.218303
| 0.038208
| 0.05519
| 0.035661
| 0.643388
| 0.616642
| 0.587136
| 0.536616
| 0.536616
| 0.536616
| 0
| 0.031963
| 0.280457
| 8,044
| 169
| 142
| 47.597633
| 0.781963
| 0.229861
| 0
| 0.613861
| 0
| 0
| 0.004977
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039604
| false
| 0
| 0.069307
| 0
| 0.148515
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
241af3ee55d444a940c6be41db409a26214a6a55
| 3,587
|
py
|
Python
|
nzarttrainer.py
|
richwalm/nzarttrainer
|
82c020172106be871771c78675a58f71b0169b17
|
[
"0BSD"
] | 2
|
2019-04-19T02:26:57.000Z
|
2021-06-22T13:19:57.000Z
|
nzarttrainer.py
|
richwalm/nzaarttrainer
|
82c020172106be871771c78675a58f71b0169b17
|
[
"0BSD"
] | 2
|
2019-04-10T22:44:59.000Z
|
2020-04-06T23:30:33.000Z
|
nzarttrainer.py
|
richwalm/nzaarttrainer
|
82c020172106be871771c78675a58f71b0169b17
|
[
"0BSD"
] | 3
|
2019-04-24T23:26:59.000Z
|
2020-04-10T11:38:01.000Z
|
#!/usr/bin/env python3
# NZART Exam Trainer
# Written by Richard Walmsley <richwalm+nzarttrainer@gmail.com> (ZL1RSW)
from flask import Flask, request, render_template, redirect, url_for, Response, abort
import random
import string
import json
import sys
app = Flask(__name__, static_folder = 's')
# Constants.
Needed = 40
MaxSeedSize = 8
# Load the database and ensure it's valid.
# Also create a cache for the answers.
with app.open_resource('questions.json') as InputFile:
Data = json.load(InputFile)
Answers = []
Required = Total = 0
for Block in Data:
if Block['RequiredAnswers'] > len(Block['Questions']):
sys.exit(1)
for Q in Block['Questions']:
if Q['Answer'] > len(Q['Choices']):
sys.exit(1)
Answers.append(Q['Answer'])
Required += Block['RequiredAnswers']
Total += len(Block['Questions'])
if Required > Total:
sys.exit(1)
# Common
def GenerateExam(Seed):
""" Returns a list of questions for each block. """
random.seed(Seed)
Blocks = []
for Block in Data:
Indexes = []
for I in range(Block['RequiredAnswers']):
while True:
R = random.randrange(len(Block['Questions']))
if R not in Indexes:
break
Indexes.append(R)
Indexes.sort()
Blocks.append(Indexes)
return Blocks
def GenerateFullExam():
""" Returns the entire exam. """
Blocks = []
for Block in Data:
Indexes = list(range(len(Block['Questions'])))
Blocks.append(Indexes)
return Blocks
# Views
@app.errorhandler(404)
def PageNotFound(e):
return render_template('404.html'), 404
@app.route('/')
def Index():
return render_template('index.html', MaxSeedSize = MaxSeedSize, Total = Total)
@app.route('/exam', methods = [ 'GET', 'POST' ])
def Exam():
AllQuestions = request.form.get('aq') or request.args.get('aq')
if not AllQuestions:
Seed = request.form.get('s') or request.args.get('s')
if not Seed:
if request.method == 'POST':
abort(403)
Seed = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(MaxSeedSize))
else:
Seed = Seed[:MaxSeedSize]
Blocks = GenerateExam(Seed)
T = Required
else:
Blocks = GenerateFullExam()
Seed = None
T = Total
if request.method == 'POST':
# Grading.
BlockAnswers = []
Offset = Correct = BlockIndex = 0
for B in Blocks:
BCorrect = 0
for I in B:
A = request.form.get(str(Offset + I))
if A:
try:
A = int(A)
except Exception:
pass
if A == Answers[Offset + I]:
Correct += 1
BCorrect += 1
BlockAnswers.append(BCorrect)
Offset += len(Data[BlockIndex]['Questions'])
BlockIndex += 1
return render_template('results.html', Seed = Seed, Blocks = Blocks, Data = Data, Needed = Needed, Correct = Correct, Answers = Answers, BlockAnswers = BlockAnswers, Total = T)
return render_template('exam.html', Seed = Seed, Blocks = Blocks, Data = Data, Needed = Needed, Total = T)
"""
@app.route('/answer/<int:ID>')
def Answer(ID):
if ID >= len(Answers):
abort(404)
return Response(str(Answers[ID]), mimetype='text/plain')
"""
if __name__ == '__main__':
app.run(debug = False, host='0.0.0.0')
| 24.073826
| 184
| 0.57346
| 415
| 3,587
| 4.903614
| 0.349398
| 0.034398
| 0.033415
| 0.020639
| 0.100246
| 0.069779
| 0.043243
| 0.043243
| 0.043243
| 0
| 0
| 0.013142
| 0.299972
| 3,587
| 148
| 185
| 24.236486
| 0.797292
| 0.081684
| 0
| 0.181818
| 0
| 0
| 0.068788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056818
| false
| 0.011364
| 0.056818
| 0.022727
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
241ca7dfdf71434c66d3f603fa5f2372d432d554
| 2,498
|
py
|
Python
|
setup.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 12
|
2020-12-28T09:40:53.000Z
|
2022-03-13T15:36:21.000Z
|
setup.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 28
|
2021-01-04T14:58:59.000Z
|
2022-01-03T03:00:16.000Z
|
setup.py
|
michelp/cxxheaderparser
|
83bb2903790cf448bf838cdb8a93ca96e758bd1a
|
[
"BSD-3-Clause"
] | 1
|
2021-11-06T03:44:53.000Z
|
2021-11-06T03:44:53.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from os.path import dirname, exists, join
import sys, subprocess
from setuptools import find_packages, setup
setup_dir = dirname(__file__)
git_dir = join(setup_dir, ".git")
version_file = join(setup_dir, "cxxheaderparser", "version.py")
# Automatically generate a version.py based on the git version
if exists(git_dir):
p = subprocess.Popen(
["git", "describe", "--tags", "--long", "--dirty=-dirty"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
# Make sure the git version has at least one tag
if err:
print("Error: You need to create a tag for this repo to use the builder")
sys.exit(1)
# Convert git version to PEP440 compliant version
# - Older versions of pip choke on local identifiers, so we can't include the git commit
v, commits, local = out.decode("utf-8").rstrip().split("-", 2)
if commits != "0" or "-dirty" in local:
v = "%s.post0.dev%s" % (v, commits)
# Create the version.py file
with open(version_file, "w") as fp:
fp.write("# Autogenerated by setup.py\n__version__ = '{0}'".format(v))
with open(version_file, "r") as fp:
exec(fp.read(), globals())
DESCRIPTION = (
"Parse C++ header files and generate a data structure representing the class"
)
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: C++",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
]
setup(
name="cxxheaderparser",
version=__version__,
author="Dustin Spicuzza",
author_email="dustin@virtualroadside.com",
maintainer="RobotPy Development Team",
maintainer_email="robotpy@googlegroups.com",
url="https://github.com/robotpy/cxxheaderparser",
description=DESCRIPTION,
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
install_requires=["dataclasses; python_version < '3.7'"],
license="BSD",
platforms="Platform Independent",
packages=find_packages(),
keywords="c++ header parser ply",
python_requires=">= 3.6",
classifiers=CLASSIFIERS,
)
| 32.025641
| 92
| 0.667734
| 310
| 2,498
| 5.264516
| 0.554839
| 0.02451
| 0.044118
| 0.023284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007972
| 0.196557
| 2,498
| 77
| 93
| 32.441558
| 0.805182
| 0.1249
| 0
| 0
| 0
| 0
| 0.403396
| 0.032584
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068966
| 0
| 0.068966
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2421fe0c328d6bbf457b874ac44ade8749c82265
| 371
|
py
|
Python
|
Message Bomber/Message Bomber with Random Words.py
|
SaiAshish-Konchada/Python-Projects-for-Beginners
|
bce0a705b636a1090b56f59205c6acb94ab2e54a
|
[
"MIT"
] | 5
|
2021-01-19T18:32:13.000Z
|
2021-05-03T05:19:11.000Z
|
Message Bomber/Message Bomber with Random Words.py
|
SaiAshish-Konchada/Python-Projects-for-Beginners
|
bce0a705b636a1090b56f59205c6acb94ab2e54a
|
[
"MIT"
] | null | null | null |
Message Bomber/Message Bomber with Random Words.py
|
SaiAshish-Konchada/Python-Projects-for-Beginners
|
bce0a705b636a1090b56f59205c6acb94ab2e54a
|
[
"MIT"
] | 2
|
2021-05-22T13:35:51.000Z
|
2021-08-31T07:05:32.000Z
|
# importing the required libraries
import pyautogui, time
# delay to switch windows
time.sleep(5)
#setting count to 5
count = 5
# loop to spam
while count >= 1:
# fetch and type each word from the file
pyautogui.write('Random Annoying Spam Words')
# press enter to send the message
pyautogui.press('enter')
# decrementing count
count = count - 1
| 23.1875
| 49
| 0.703504
| 54
| 371
| 4.833333
| 0.648148
| 0.045977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017361
| 0.22372
| 371
| 15
| 50
| 24.733333
| 0.888889
| 0.477089
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24236339afd01ee7c1bc2adc58b3562319b17c37
| 3,485
|
py
|
Python
|
wrappers/tensorflow/tools/convert_to_bag.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | 6,457
|
2016-01-21T03:56:07.000Z
|
2022-03-31T11:57:15.000Z
|
wrappers/tensorflow/tools/convert_to_bag.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | 8,393
|
2016-01-21T09:47:28.000Z
|
2022-03-31T22:21:42.000Z
|
wrappers/tensorflow/tools/convert_to_bag.py
|
NobuoTsukamoto/librealsense
|
bc0910f8ba3c33307ff247a29dd2b9e9ef1b269d
|
[
"Apache-2.0"
] | 4,874
|
2016-01-21T09:20:08.000Z
|
2022-03-31T15:18:00.000Z
|
import numpy as np
import cv2
import pyrealsense2 as rs
import time, sys, glob
focal = 0.0021
baseline = 0.08
sd = rs.software_device()
depth_sensor = sd.add_sensor("Depth")
intr = rs.intrinsics()
intr.width = 848
intr.height = 480
intr.ppx = 637.951293945312
intr.ppy = 360.783233642578
intr.fx = 638.864135742188
intr.fy = 638.864135742188
vs = rs.video_stream()
vs.type = rs.stream.infrared
vs.fmt = rs.format.y8
vs.index = 1
vs.uid = 1
vs.width = intr.width
vs.height = intr.height
vs.fps = 30
vs.bpp = 1
vs.intrinsics = intr
depth_sensor.add_video_stream(vs)
vs.type = rs.stream.depth
vs.fmt = rs.format.z16
vs.index = 1
vs.uid = 3
vs.bpp = 2
depth_sensor.add_video_stream(vs)
vs.type = rs.stream.depth
vs.fmt = rs.format.z16
vs.index = 2
vs.uid = 4
vs.bpp = 2
depth_sensor.add_video_stream(vs)
vs.type = rs.stream.depth
vs.fmt = rs.format.z16
vs.index = 3
vs.uid = 5
vs.bpp = 2
depth_sensor.add_video_stream(vs)
depth_sensor.add_read_only_option(rs.option.depth_units, 0.001)
name = "virtual camera"
sd.register_info(rs.camera_info.name, name)
ctx = rs.context()
sd.add_to(ctx)
dev = ctx.query_devices()[0]
for d in ctx.query_devices():
if d.get_info(rs.camera_info.name) == name:
dev = d
images_path = "."
if (len(sys.argv) > 1):
images_path = str(sys.argv[1])
rec = rs.recorder(images_path + "/1.bag", dev)
sensor = rec.query_sensors()[0]
q = rs.frame_queue()
sensor.open(sensor.get_stream_profiles())
sensor.start(q)
files = glob.glob1(images_path, "gt*")
index = []
for f in files:
idx = (f.split('-')[1]).split('.')[0]
index.append(int(idx))
for i in index:
left_name = images_path + "/left-" + str(i) + ".png"
depth_name = images_path + "/gt-" + str(i) + ".png"
result_name = images_path + "/res-" + str(i) + ".png"
denoised_name = images_path + "/res_denoised-" + str(i) + ".png"
img = cv2.imread(left_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
f = rs.software_video_frame()
f.stride = intr.width
f.bpp = 1
f.pixels = np.asarray(img, dtype="byte")
f.timestamp = i * 0.01
f.frame_number = i
f.profile = sensor.get_stream_profiles()[0].as_video_stream_profile()
depth_sensor.on_video_frame(f)
time.sleep(0.01)
f3 = rs.software_video_frame()
img = cv2.imread(result_name, cv2.IMREAD_ANYDEPTH)
f3.stride = 2 * intr.width
f3.bpp = 2
px = np.asarray(img, dtype="ushort")
f3.pixels = px
f3.timestamp = i * 0.01
f3.frame_number = i
f3.profile = sensor.get_stream_profiles()[1].as_video_stream_profile()
depth_sensor.on_video_frame(f3)
time.sleep(0.01)
f4 = rs.software_video_frame()
img = cv2.imread(depth_name, cv2.IMREAD_ANYDEPTH)
f4.stride = 2 * intr.width
f4.bpp = 2
px = np.asarray(img, dtype="ushort")
f4.pixels = px
f4.timestamp = i * 0.01
f4.frame_number = i
f4.profile = sensor.get_stream_profiles()[2].as_video_stream_profile()
depth_sensor.on_video_frame(f4)
time.sleep(0.01)
f5 = rs.software_video_frame()
img = cv2.imread(denoised_name, cv2.IMREAD_ANYDEPTH)
f5.stride = 2 * intr.width
f5.bpp = 2
px = np.asarray(img, dtype="ushort")
f5.pixels = px
f5.timestamp = i * 0.01
f5.frame_number = i
f5.profile = sensor.get_stream_profiles()[3].as_video_stream_profile()
depth_sensor.on_video_frame(f5)
time.sleep(0.01)
time.sleep(1)
print("a")
f = q.wait_for_frame()
print("b")
time.sleep(1)
sensor.stop()
sensor.close()
| 23.389262
| 74
| 0.672597
| 582
| 3,485
| 3.857388
| 0.230241
| 0.048998
| 0.028953
| 0.051225
| 0.353675
| 0.291759
| 0.270379
| 0.227617
| 0.188864
| 0.09755
| 0
| 0.062915
| 0.179053
| 3,485
| 148
| 75
| 23.547297
| 0.721776
| 0
| 0
| 0.196721
| 0
| 0
| 0.028694
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.032787
| 0
| 0.032787
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2424352a464760abcf3b0b917902ab3292e94368
| 1,043
|
py
|
Python
|
src/experiments/eval_minimal.py
|
peldszus/evidencegraph
|
1720b74d801e738d08996f22e8be676114426408
|
[
"MIT"
] | 3
|
2019-07-31T14:48:59.000Z
|
2021-09-01T07:26:15.000Z
|
src/experiments/eval_minimal.py
|
peldszus/evidencegraph
|
1720b74d801e738d08996f22e8be676114426408
|
[
"MIT"
] | 7
|
2019-07-30T23:22:15.000Z
|
2021-05-22T14:11:02.000Z
|
src/experiments/eval_minimal.py
|
peldszus/evidencegraph
|
1720b74d801e738d08996f22e8be676114426408
|
[
"MIT"
] | 1
|
2019-09-16T07:23:04.000Z
|
2019-09-16T07:23:04.000Z
|
from argparse import ArgumentParser
from evidencegraph.argtree import RELATION_SETS_BY_NAME
from evidencegraph.corpus import CORPORA
from evidencegraph.evaluation import evaluate_setting
if __name__ == "__main__":
parser = ArgumentParser(
description="""Evaluate argumentation parsing predictions"""
)
parser.add_argument(
"--corpus",
"-c",
choices=CORPORA,
default="m112en",
help="the corpus to evaluate the predictions of",
)
args = parser.parse_args()
corpus_name = args.corpus
language = CORPORA[corpus_name]["language"]
settings = {
("adu", "SIMPLE_RELATION_SET"): [
"{}-test-adu-simple-noop|equal".format(corpus_name)
]
}
for (segmentation, relationset), conditions in settings.items():
relationset = RELATION_SETS_BY_NAME.get(relationset)
evaluate_setting(
language,
segmentation,
relationset,
conditions,
corpus_id=corpus_name,
)
| 27.447368
| 68
| 0.637584
| 100
| 1,043
| 6.4
| 0.51
| 0.0625
| 0.04375
| 0.05625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003932
| 0.268456
| 1,043
| 37
| 69
| 28.189189
| 0.834862
| 0
| 0
| 0
| 0
| 0
| 0.159156
| 0.027804
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2425800f04602f6f58d2c94bce88633d9eebc37c
| 4,221
|
py
|
Python
|
classifier/hist_classifier.py
|
adalrsjr1/smart-tuning
|
d8cb9f4ba41e7c068eda75b0fb581dcc8f329064
|
[
"MIT"
] | 1
|
2021-10-04T18:02:55.000Z
|
2021-10-04T18:02:55.000Z
|
classifier/hist_classifier.py
|
adalrsjr1/smart-tuning
|
d8cb9f4ba41e7c068eda75b0fb581dcc8f329064
|
[
"MIT"
] | null | null | null |
classifier/hist_classifier.py
|
adalrsjr1/smart-tuning
|
d8cb9f4ba41e7c068eda75b0fb581dcc8f329064
|
[
"MIT"
] | null | null | null |
from common.dataaccess import MongoAccessLayer
from common.timeutil import now
import numpy as np
import os
import sys
from classifier import workload_comparision as wc
# data = []
# data.append({'metric': metric, 'mean': query_mean[0], 'std': query_std[0]})
# data = {
# 'metrics': {'n_samples': QUERY_STEP // SCRAP_INTERVAL, 'values': data},
# 'histogram': histogram,
# 'start': end - QUERY_STEP,
# 'end': end,
# 'step': QUERY_STEP
# }
class HistClassifier:
def __init__(self, application, mongo_url, mongo_port, mongo_db, histogram_collection, tuning_collection):
self.application = application
self.mongo = MongoAccessLayer(mongo_url, mongo_port, mongo_db)
self.histogram_collection = self.mongo.collection(histogram_collection)
self.tuning_collection = self.mongo.collection(tuning_collection)
def close(self):
self.mongo.close()
def tunings(self, start, end):
return self.mongo.find({'start': {'$gte': start}, 'end': {'$lte': end}},
self.tuning_collection)
def histograms(self, start, end):
return self.mongo.find({'application': self.application, 'start': {'$gte': start}, 'end': {'$lte': end}},
self.histogram_collection)
def join_tuning_histogram(self, start, end):
_tunings = self.tunings(start, end)
_histograms = self.histograms(start, end)
processed_tunings = []
for tuning in _tunings:
start = tuning['start']
end = tuning['end']
filtered_histograms = []
for histogram in _histograms:
if histogram['start'] >= start and histogram['end'] <= end:
filtered_histograms.append(histogram)
tuning.update({'histograms': filtered_histograms})
processed_tunings.append(tuning)
return processed_tunings
def fetch(self, start, end):
result_set = self.mongo.find({'application': self.application, 'start': {'$gte': start}, 'end': {'$lte': end}},
self.histogram_collection)
simple_histogram = {}
for result in result_set:
id = str(result['_id'])
simple_histogram[id] = np.array(list(result['histogram'].values()))
return simple_histogram
def compare(self, histograms, threshould=0):
from collections import defaultdict
workflows_group = defaultdict(set)
memory = set()
for i, hist1 in histograms.items():
for j, hist2 in histograms.items():
distance = wc.hellinger(hist1, hist2)
if distance <= threshould:
self._group(i, j, workflows_group, memory)
return workflows_group
# TODO: optimize this in the future
def _group(self, a, b, table, memory):
if a not in memory and b not in memory:
table[a].add(b)
elif a in memory and b not in memory:
if a in table:
table[a].add(b)
else:
return self._group(b, a, table, memory)
elif a not in memory and b in memory:
for key, value in table.items():
if b in value:
value.add(a)
break
memory.add(a)
memory.add(b)
def main():
from common.timeutil import minute, day
start = now(past=day(2))
end = now()
classifier = HistClassifier('acmeair', 'localhost', 27017, 'acmeair_db_experiments', 'acmeair_collection_histogram',
'acmeair_collection_tuning')
histograms = classifier.fetch(start, end)
print(classifier.compare(histograms, 0))
#for hist in classifier.join_tuning_histogram(start, end):
# print(hist)
# results = classifier.fetch(start, end)
# print(len(results))
# print(results)
# print(classifier.compare(results, threshould=0))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 33.768
| 120
| 0.587302
| 463
| 4,221
| 5.207343
| 0.2527
| 0.046454
| 0.019909
| 0.019909
| 0.171298
| 0.148071
| 0.119867
| 0.069681
| 0.069681
| 0.069681
| 0
| 0.005757
| 0.300403
| 4,221
| 124
| 121
| 34.040323
| 0.810701
| 0.12959
| 0
| 0.071429
| 0
| 0
| 0.059612
| 0.020509
| 0
| 0
| 0
| 0.008065
| 0
| 1
| 0.107143
| false
| 0
| 0.095238
| 0.02381
| 0.285714
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2426476d2dec7acfd35014f0a631024c226cd418
| 10,199
|
py
|
Python
|
tests/test_cc_oop.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | null | null | null |
tests/test_cc_oop.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | null | null | null |
tests/test_cc_oop.py
|
J08nY/sec-certs
|
d25a4a7c830c587a45eb8e37d99f8794dec1a5eb
|
[
"MIT"
] | null | null | null |
import filecmp
import os
import shutil
import tempfile
from datetime import date, datetime
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from unittest import TestCase
import sec_certs.constants as constants
import sec_certs.helpers as helpers
from sec_certs.dataset.common_criteria import CCDataset
from sec_certs.sample.common_criteria import CommonCriteriaCert
from sec_certs.sample.protection_profile import ProtectionProfile
class TestCommonCriteriaOOP(TestCase):
def setUp(self):
self.test_data_dir = Path(__file__).parent / "data" / "test_cc_oop"
self.crt_one = CommonCriteriaCert(
"active",
"Access Control Devices and Systems",
"NetIQ Identity Manager 4.7",
"NetIQ Corporation",
"SE",
{"ALC_FLR.2", "EAL3+"},
date(2020, 6, 15),
date(2025, 6, 15),
"https://www.commoncriteriaportal.org/files/epfiles/Certification%20Report%20-%20NetIQ®%20Identity%20Manager%204.7.pdf",
"https://www.commoncriteriaportal.org/files/epfiles/ST%20-%20NetIQ%20Identity%20Manager%204.7.pdf",
"https://www.commoncriteriaportal.org/files/epfiles/Certifikat%20CCRA%20-%20NetIQ%20Identity%20Manager%204.7_signed.pdf",
"https://www.netiq.com/",
set(),
set(),
None,
None,
None,
)
self.crt_two = CommonCriteriaCert(
"active",
"Access Control Devices and Systems",
"Magic SSO V4.0",
"Dreamsecurity Co., Ltd.",
"KR",
set(),
date(2019, 11, 15),
date(2024, 11, 15),
"https://www.commoncriteriaportal.org/files/epfiles/KECS-CR-19-70%20Magic%20SSO%20V4.0(eng)%20V1.0.pdf",
"https://www.commoncriteriaportal.org/files/epfiles/Magic_SSO_V4.0-ST-v1.4_EN.pdf",
None,
"https://www.dreamsecurity.com/",
{
ProtectionProfile(
"Korean National Protection Profile for Single Sign On V1.0",
"https://www.commoncriteriaportal.org/files/ppfiles/KECS-PP-0822-2017%20Korean%20National%20PP%20for%20Single%20Sign%20On%20V1.0(eng).pdf",
)
},
set(),
None,
None,
None,
)
pp = ProtectionProfile("sample_pp", "https://sample.pp")
update = CommonCriteriaCert.MaintenanceReport(
date(1900, 1, 1), "Sample maintenance", "https://maintenance.up", "https://maintenance.up"
)
self.fictional_cert = CommonCriteriaCert(
"archived",
"Sample category",
"Sample certificate name",
"Sample manufacturer",
"Sample scheme",
{"Sample security level"},
date(1900, 1, 2),
date(1900, 1, 3),
"https://path.to/report/link",
"https://path.to/st/link",
"https://path.to/cert/link",
"https://path.to/manufacturer/web",
{pp},
{update},
None,
None,
None,
)
self.template_dataset = CCDataset(
{self.crt_one.dgst: self.crt_one, self.crt_two.dgst: self.crt_two},
Path("/fictional/path/to/dataset"),
"toy dataset",
"toy dataset description",
)
self.template_dataset.timestamp = datetime(2020, 11, 16, hour=17, minute=4, second=14, microsecond=770153)
self.template_dataset.state.meta_sources_parsed = True
self.template_report_pdf_hashes = {
"309ac2fd7f2dcf17": "774c41fbba980191ca40ae610b2f61484c5997417b3325b6fd68b345173bde52",
"8cf86948f02f047d": "533a5995ef8b736cc48cfda30e8aafec77d285511471e0e5a9e8007c8750203a",
}
self.template_target_pdf_hashes = {
"309ac2fd7f2dcf17": "b9a45995d9e40b2515506bbf5945e806ef021861820426c6d0a6a074090b47a9",
"8cf86948f02f047d": "3c8614338899d956e9e56f1aa88d90e37df86f3310b875d9d14ec0f71e4759be",
}
self.template_report_txt_path = self.test_data_dir / "report_869415cc4b91282e.txt"
self.template_target_txt_path = self.test_data_dir / "target_869415cc4b91282e.txt"
def test_certificate_input_sanity(self):
self.assertEqual(
self.crt_one.report_link,
"https://www.commoncriteriaportal.org/files/epfiles/Certification%20Report%20-%20NetIQ®%20Identity%20Manager%204.7.pdf",
"Report link contains some improperly escaped characters.",
)
def test_download_and_convert_pdfs(self):
dset = CCDataset.from_json(self.test_data_dir / "toy_dataset.json")
with TemporaryDirectory() as td:
dset.root_dir = Path(td)
dset.download_all_pdfs()
dset.convert_all_pdfs()
actual_report_pdf_hashes = {
key: helpers.get_sha256_filepath(val.state.report_pdf_path) for key, val in dset.certs.items()
}
actual_target_pdf_hashes = {
key: helpers.get_sha256_filepath(val.state.st_pdf_path) for key, val in dset.certs.items()
}
self.assertEqual(
actual_report_pdf_hashes,
self.template_report_pdf_hashes,
"Hashes of downloaded pdfs (sample report) do not the template",
)
self.assertEqual(
actual_target_pdf_hashes,
self.template_target_pdf_hashes,
"Hashes of downloaded pdfs (security target) do not match the template",
)
self.assertTrue(dset["309ac2fd7f2dcf17"].state.report_txt_path.exists())
self.assertTrue(dset["309ac2fd7f2dcf17"].state.st_txt_path.exists())
self.assertAlmostEqual(
dset["309ac2fd7f2dcf17"].state.st_txt_path.stat().st_size,
self.template_target_txt_path.stat().st_size,
delta=1000,
)
self.assertAlmostEqual(
dset["309ac2fd7f2dcf17"].state.report_txt_path.stat().st_size,
self.template_report_txt_path.stat().st_size,
delta=1000,
)
def test_cert_to_json(self):
with NamedTemporaryFile("w") as tmp:
self.fictional_cert.to_json(tmp.name)
self.assertTrue(
filecmp.cmp(self.test_data_dir / "fictional_cert.json", tmp.name),
"The sample serialized to json differs from a template.",
)
def test_dataset_to_json(self):
with NamedTemporaryFile("w") as tmp:
self.template_dataset.to_json(tmp.name)
self.assertTrue(
filecmp.cmp(self.test_data_dir / "toy_dataset.json", tmp.name),
"The dataset serialized to json differs from a template.",
)
def test_cert_from_json(self):
self.assertEqual(
self.fictional_cert,
CommonCriteriaCert.from_json(self.test_data_dir / "fictional_cert.json"),
"The sample serialized from json differs from a template.",
)
def test_dataset_from_json(self):
self.assertEqual(
self.template_dataset,
CCDataset.from_json(self.test_data_dir / "toy_dataset.json"),
"The dataset serialized from json differs from a template.",
)
def test_build_empty_dataset(self):
with TemporaryDirectory() as tmp_dir:
dset = CCDataset({}, Path(tmp_dir), "sample_dataset", "sample dataset description")
dset.get_certs_from_web(to_download=False, get_archived=False, get_active=False)
self.assertEqual(len(dset), 0, "The dataset should contain 0 files.")
def test_build_dataset(self):
with TemporaryDirectory() as tmp_dir:
dataset_path = Path(tmp_dir)
os.mkdir(dataset_path / "web")
shutil.copyfile(
self.test_data_dir / "cc_products_active.csv", dataset_path / "web" / "cc_products_active.csv"
)
shutil.copyfile(
self.test_data_dir / "cc_products_active.html", dataset_path / "web" / "cc_products_active.html"
)
dset = CCDataset({}, dataset_path, "sample_dataset", "sample dataset description")
dset.get_certs_from_web(
keep_metadata=False, to_download=False, get_archived=False, get_active=True, update_json=False
)
self.assertEqual(
len(os.listdir(dataset_path)),
0,
"Meta files (csv, html) were not deleted properly albeit this was explicitly required.",
)
self.assertEqual(len(dset), 2, "The dataset should contain 2 files.")
self.assertTrue(self.crt_one in dset, "The dataset does not contain the template sample.")
self.assertEqual(dset, self.template_dataset, "The loaded dataset does not match the template dataset.")
def test_download_csv_html_files(self):
with TemporaryDirectory() as tmp_dir:
dataset_path = Path(tmp_dir)
dset = CCDataset({}, dataset_path, "sample_dataset", "sample dataset description")
dset.download_csv_html_resources(get_active=True, get_archived=False)
for x in dset.active_html_tuples:
self.assertTrue(x[1].exists())
self.assertGreaterEqual(x[1].stat().st_size, constants.MIN_CC_HTML_SIZE)
for x in dset.active_csv_tuples:
self.assertTrue(x[1].exists())
self.assertGreaterEqual(x[1].stat().st_size, constants.MIN_CC_CSV_SIZE)
def test_download_pp_dataset(self):
with tempfile.TemporaryDirectory() as tmp_dir:
self.template_dataset.root_dir = tmp_dir
self.template_dataset.process_protection_profiles()
self.assertTrue(self.template_dataset.pp_dataset_path.exists())
self.assertGreaterEqual(
self.template_dataset.pp_dataset_path.stat().st_size, constants.MIN_CC_PP_DATASET_SIZE
)
| 42.319502
| 159
| 0.618002
| 1,114
| 10,199
| 5.44614
| 0.223519
| 0.035602
| 0.019779
| 0.024724
| 0.463161
| 0.410417
| 0.327839
| 0.258283
| 0.242789
| 0.160046
| 0
| 0.06378
| 0.282086
| 10,199
| 240
| 160
| 42.495833
| 0.764545
| 0
| 0
| 0.212264
| 0
| 0.033019
| 0.27787
| 0.041769
| 0
| 0
| 0
| 0
| 0.103774
| 1
| 0.051887
| false
| 0
| 0.061321
| 0
| 0.117925
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24287f31ba2a1965f69b8b89ba6e4fe27f5a9ecc
| 1,234
|
py
|
Python
|
mountdisplay/clean.py
|
Smytten/Tangible_NFT_Thesis
|
50e6b43c85ec2836b3628015eac1f1389de4a261
|
[
"MIT",
"Unlicense"
] | 1
|
2022-03-25T20:39:31.000Z
|
2022-03-25T20:39:31.000Z
|
mountdisplay/clean.py
|
Smytten/Tangible_NFT_Thesis
|
50e6b43c85ec2836b3628015eac1f1389de4a261
|
[
"MIT",
"Unlicense"
] | null | null | null |
mountdisplay/clean.py
|
Smytten/Tangible_NFT_Thesis
|
50e6b43c85ec2836b3628015eac1f1389de4a261
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inkyphat
import time
import sys
print("""Inky pHAT: Clean
Displays solid blocks of red, black, and white to clean the Inky pHAT
display of any screen burn.
""".format(sys.argv[0]))
if len(sys.argv) < 2:
print("""Usage: {} <colour> <number of cycles>
Valid colours: red, yellow, black
""".format(sys.argv[0]))
sys.exit(0)
colour = sys.argv[1].lower()
try:
inkyphat.set_colour(colour)
except ValueError:
print('Invalid colour "{}" for V{}\n'.format(colour, inkyphat.get_version()))
if inkyphat.get_version() == 2:
sys.exit(1)
print('Defaulting to "red"')
if len(sys.argv) > 2:
cycles = int(sys.argv[2])
else:
cycles = 3
colours = (inkyphat.RED, inkyphat.BLACK, inkyphat.WHITE)
colour_names= (colour, "black", "white")
for i in range(cycles):
print("Cleaning cycle %i\n" % (i + 1))
for j, c in enumerate(colours):
print("- updating with %s" % colour_names[j])
inkyphat.set_border(c)
for x in range(inkyphat.WIDTH):
for y in range(inkyphat.HEIGHT):
inkyphat.putpixel((x, y), c)
inkyphat.show()
time.sleep(1)
print("\n")
print("Cleaning complete!")
| 24.196078
| 81
| 0.620746
| 178
| 1,234
| 4.269663
| 0.449438
| 0.055263
| 0.031579
| 0.036842
| 0.034211
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013472
| 0.21799
| 1,234
| 51
| 82
| 24.196078
| 0.774093
| 0.034036
| 0
| 0.052632
| 0
| 0
| 0.261125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.078947
| 0
| 0.078947
| 0.210526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
242ab6a7517cb79f3a010784bb05fa1bc62077cb
| 1,507
|
py
|
Python
|
project1/scoper-beginner.py
|
swtornio/pytools
|
40fa88fa754419b47f71d8f1043f266c9a308f74
|
[
"BSD-3-Clause"
] | null | null | null |
project1/scoper-beginner.py
|
swtornio/pytools
|
40fa88fa754419b47f71d8f1043f266c9a308f74
|
[
"BSD-3-Clause"
] | null | null | null |
project1/scoper-beginner.py
|
swtornio/pytools
|
40fa88fa754419b47f71d8f1043f266c9a308f74
|
[
"BSD-3-Clause"
] | null | null | null |
# Project 1 - The Scope!
# Scenario: Congrats, your Penetration testing company Red Planet has
# landed an external assessment for Microsoft! Your point of contact has
# give you a few IP addresses for you to test. Like with any test you
# should always verify the scope given to you to make sure there wasn't
# a mistake.
## Beginner Task: Write a script that will have the user input an IP
## address. The script should output the ownership and geolocation of the
## IP. The output should be presented in a way that is clean and organized
## in order to be added to your report.
# Resources:
# https://ipgeolocation.io/
# Get geolocation for an IPv4 IP Address = 8.8.8.8
# $ curl 'https://api.ipgeolocation.io/ipgeo?apiKey=API_KEY&ip=8.8.8.8'
# https://ipgeolocation.io/documentation/ip-geolocation-api.html
import requests
import configparser
# store the API key in an external file and make sure to add the file
# to .gitignore
cfg = configparser.ConfigParser()
cfg.read('ipgeo.cfg')
IPGEO_KEY = cfg.get('KEYS', 'api_key', raw='')
IPGEO_URL = "https://api.ipgeolocation.io/ipgeo"
def locate(ip):
'''Query IP Geo database for given IP and print Owner'''
resp = requests.get(f'{IPGEO_URL}?apiKey={IPGEO_KEY}&ip={ip}')
location_info = resp.json()
print(f'{ip} is owned by {location_info["isp"]}, located in '
f'{location_info["city"]}, {location_info["state_prov"]}.')
if __name__ == '__main__':
target_ip = input("Enter an IP to look up: ")
locate(target_ip)
| 33.488889
| 74
| 0.717319
| 243
| 1,507
| 4.36214
| 0.489712
| 0.011321
| 0.011321
| 0.007547
| 0.05283
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007981
| 0.168547
| 1,507
| 44
| 75
| 34.25
| 0.837989
| 0.603185
| 0
| 0
| 0
| 0
| 0.403846
| 0.201049
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.214286
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
242e68ec4abda02a1ff36b6149be9f598d440417
| 4,408
|
py
|
Python
|
full_feature_version/bot/orchestrator.py
|
inevitablepc/RedditKarmaBot
|
1716e092f662e379995b28d26881ea33ea40000e
|
[
"MIT"
] | 7
|
2020-06-24T11:36:31.000Z
|
2021-11-02T05:44:50.000Z
|
full_feature_version/bot/orchestrator.py
|
inevitablepc/RedditKarmaBot
|
1716e092f662e379995b28d26881ea33ea40000e
|
[
"MIT"
] | null | null | null |
full_feature_version/bot/orchestrator.py
|
inevitablepc/RedditKarmaBot
|
1716e092f662e379995b28d26881ea33ea40000e
|
[
"MIT"
] | 7
|
2020-04-05T22:49:11.000Z
|
2021-12-25T09:22:24.000Z
|
import itertools
import logging
import random
from collections import defaultdict
from concurrent.futures import wait
from concurrent.futures.thread import ThreadPoolExecutor
from bot import RedditBot
from utils import rand_wait_min, rand_wait_sec
class BotOrchestrator:
def __init__(self, all_credentials: dict, executor=None):
all_usernames = {cred['username'] for cred in all_credentials}
self.bots = [RedditBot(creds, all_bot_names=all_usernames) for creds in all_credentials]
self.bots = [bot for bot in self.bots if not bot.is_broken] # filter out suspended bots
self.executor = executor if executor else ThreadPoolExecutor(max_workers=len(self.bots),
thread_name_prefix="RedditBot")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.executor.shutdown(wait=True)
# This will cause bots to process same submissions
# You will get suspended for it pretty quickly once detected
def parse_subreddit(self, subreddit: str, **kwargs):
self._submit_to_executor_for_all(lambda bot: bot.work_on_subreddit(subreddit, **kwargs), to_wait=True)
def upvote_other_bot_comments(self, iterations=1, comment_sample_size=3):
session_upvotes = defaultdict(set)
def do_fetch_comment_ids(bot):
return [(c_id, bot.username) for c_id in bot.fetch_new_comments(limit=comment_sample_size)]
def do_upvote_comment(bot, comment_id):
bot.upvote_comment(comment_id)
session_upvotes[bot.username].add(comment_id)
for i_n in range(iterations):
comment_with_owner = {}
futures = []
for bot in self.bots:
futures.append(self.executor.submit(do_fetch_comment_ids, bot))
wait(futures)
for future in futures:
result = future.result()
for entry in result:
c_id, author = entry[0], entry[1]
comment_with_owner[c_id] = author
comment_ids = list(comment_with_owner.keys())
random.shuffle(comment_ids)
comment_id_iter = itertools.cycle(comment_ids)
futures = []
for bot in self.bots:
bot_name = bot.username
loop_passed = 0
while True:
comment_id = next(comment_id_iter)
owner = comment_with_owner[comment_id]
# skip your own comments and comments already upvoted
if owner != bot_name and comment_id not in session_upvotes[bot_name]:
break
loop_passed += 1
# guard from infinite loop if all comments very upvoted
if loop_passed > len(self.bots) + 1:
logging.warning(f"All comments have been already upvoted by {bot_name}")
break
futures.append(self.executor.submit(do_upvote_comment, bot, comment_id))
wait(futures)
if i_n != iterations - 1:
logging.info("Waiting between iterations")
rand_wait_sec(25, 35)
def parse_different_submissions(self, subreddit, **kwargs):
submissions = list(random.choice(self.bots).fetch_submission_ids(subreddit, **kwargs))
submission_ids = iter(submissions)
futures = []
bots_iter = itertools.cycle(self.bots)
for bot in bots_iter:
submission_id = next(submission_ids, None)
if not submission_id:
break
futures.append(self.executor.submit(bot.parse_submission, submission_id))
wait(futures)
def execute_custom_func(self, fn):
self._submit_to_executor_for_all(fn)
def log_karma(self):
self._submit_to_executor_for_all(lambda bot: bot.log_comment_karma())
def upvote_comment_sequentially_with_wait(self, comment_id=None, url=None):
for bot in self.bots:
bot.upvote_comment(comment_id, url)
rand_wait_min(1, 2)
def _submit_to_executor_for_all(self, func, to_wait=False):
futures = []
for bot in self.bots:
futures.append(self.executor.submit(func, bot))
if to_wait:
wait(futures)
| 38.330435
| 110
| 0.622958
| 540
| 4,408
| 4.822222
| 0.283333
| 0.033794
| 0.018433
| 0.023041
| 0.21275
| 0.147081
| 0.070661
| 0.070661
| 0.070661
| 0.041475
| 0
| 0.00455
| 0.301951
| 4,408
| 114
| 111
| 38.666667
| 0.841729
| 0.05422
| 0
| 0.176471
| 0
| 0
| 0.02282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.141176
| false
| 0.035294
| 0.094118
| 0.023529
| 0.270588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
242e831fbeee71641da2d329c6f5ad06e8482bc8
| 10,476
|
py
|
Python
|
urdf2casadi/numpy_geom.py
|
ultrainren/urdf2casadi
|
42318720c2922977e7d57fa638ebf5ad1c092dd6
|
[
"MIT"
] | 1
|
2020-03-30T10:26:31.000Z
|
2020-03-30T10:26:31.000Z
|
urdf2casadi/numpy_geom.py
|
ultrainren/urdf2casadi
|
42318720c2922977e7d57fa638ebf5ad1c092dd6
|
[
"MIT"
] | null | null | null |
urdf2casadi/numpy_geom.py
|
ultrainren/urdf2casadi
|
42318720c2922977e7d57fa638ebf5ad1c092dd6
|
[
"MIT"
] | null | null | null |
import numpy as np
def normalize(v):
nv = np.linalg.norm(v)
if nv > 0.0:
v[0] = v[0]/nv
v[1] = v[1]/nv
v[2] = v[2]/nv
return v
def skew_symmetric(v):
"""Returns a skew symmetric matrix from vector. p q r"""
return np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
def rotation_rpy(roll, pitch, yaw):
"""Returns a rotation matrix from roll pitch yaw. ZYX convention."""
cr = np.cos(roll)
sr = np.sin(roll)
cp = np.cos(pitch)
sp = np.sin(pitch)
cy = np.cos(yaw)
sy = np.sin(yaw)
return np.array([[cy*cp, cy*sp*sr - sy*cr, cy*sp*cr + sy*sr],
[sy*cp, sy*sp*sr + cy*cr, sy*sp*cr - cy*sr],
[ -sp, cp*sr, cp*cr]])
def quaternion_rpy(roll, pitch, yaw):
"""Returns a quaternion ([x,y,z,w], w scalar) from roll pitch yaw ZYX
convention."""
cr = np.cos(roll/2.0)
sr = np.sin(roll/2.0)
cp = np.cos(pitch/2.0)
sp = np.sin(pitch/2.0)
cy = np.cos(yaw/2.0)
sy = np.sin(yaw/2.0)
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
w = cr * cp * cy + sr * sp * sy
# Remember to normalize:
nq = np.sqrt(x*x + y*y + z*z + w*w)
return np.array([x/nq,
y/nq,
z/nq,
w/nq])
def T_rpy(displacement, roll, pitch, yaw):
"""Homogeneous transformation matrix with roll pitch yaw."""
T = np.zeros([4, 4])
T[:3, :3] = rotation_rpy(roll, pitch, yaw)
T[:3, 3] = displacement
T[3, 3] = 1.0
return T
def quaternion_product(quat0, quat1):
"""Returns the quaternion product of q0 and q1."""
quat = np.zeros(4)
x0, y0, z0, w0 = quat0[0], quat0[1], quat0[2], quat0[3]
x1, y1, z1, w1 = quat1[0], quat1[1], quat1[2], quat1[3]
quat[0] = w0*x1 + x0*w1 + y0*z1 - z0*y1
quat[1] = w0*y1 - x0*z1 + y0*w1 + z0*x1
quat[2] = w0*z1 + x0*y1 - y0*x1 + z0*w1
quat[3] = w0*w1 - x0*x1 - y0*y1 - z0*z1
return quat
def dual_quaternion_product(Q, P):
"""Returns the dual quaternion product of two 8 element vectors
representing a dual quaternions. First four elements are the real
part, last four elements are the dual part.
"""
res = np.zeros(8)
# Real and dual components
xr0, yr0, zr0, wr0 = Q[0], Q[1], Q[2], Q[3]
xd0, yd0, zd0, wd0 = Q[4], Q[5], Q[6], Q[7]
xr1, yr1, zr1, wr1 = P[0], P[1], P[2], P[3]
xd1, yd1, zd1, wd1 = P[4], P[5], P[6], P[7]
# Real part
xr = wr0*xr1 + xr0*wr1 + yr0*zr1 - zr0*yr1
yr = wr0*yr1 - xr0*zr1 + yr0*wr1 + zr0*xr1
zr = wr0*zr1 + xr0*yr1 - yr0*xr1 + zr0*wr1
wr = wr0*wr1 - xr0*xr1 - yr0*yr1 - zr0*zr1
# Dual part
xd = xr0*wd1 + wr0*xd1 + yr0*zd1 - zr0*yd1
xd += xd0*wr1 + wd0*xr1 + yd0*zr1 - zd0*yr1
yd = wr0*yd1 - xr0*zd1 + yr0*wd1 + zr0*xd1
yd += wd0*yr1 - xd0*zr1 + yd0*wr1 + zd0*xr1
zd = wr0*zd1 + xr0*yd1 - yr0*xd1 + zr0*wd1
zd += wd0*zr1 + xd0*yr1 - yd0*xr1 + zd0*wr1
wd = wr1*wd0 - xr1*xd0 - yr1*yd0 - zr1*zd0
wd += wd1*wr0 - xd1*xr0 - yd1*yr0 - zd1*zr0
res[0] = xr
res[1] = yr
res[2] = zr
res[3] = wr
res[4] = xd
res[5] = yd
res[6] = zd
res[7] = wd
return res
def dual_quaternion_conj(Q):
"""Returns the conjugate of a dual quaternion.
"""
res = np.zeros(8)
res[0] = -Q[0]
res[1] = -Q[1]
res[2] = -Q[2]
res[3] = Q[3]
res[4] = -Q[4]
res[5] = -Q[5]
res[6] = -Q[6]
res[7] = Q[7]
return res
def dual_quaternion_norm2(Q):
"""Returns the dual norm of a dual quaternion.
Based on:
https://github.com/bobbens/libdq/blob/master/dq.c
"""
real = Q[0]*Q[0] + Q[1]*Q[1] + Q[2]*Q[2] + Q[3]*Q[3]
dual = 2.*(Q[3]*Q[7] + Q[0]*Q[4] + Q[1]*Q[5] + Q[2]*Q[6])
return real, dual
def dual_quaternion_inv(Q):
"""Returns the inverse of a dual quaternion.
Based on:
https://github.com/bobbens/libdq/blob/master/dq.c
"""
res = np.zeros(8)
real, dual = dual_quaternion_norm2(Q)
res[0] = -Q[0] * real
res[1] = -Q[1] * real
res[2] = -Q[2] * real
res[3] = Q[3] * real
res[4] = Q[4] * (dual-real)
res[5] = Q[5] * (dual-real)
res[6] = Q[6] * (dual-real)
res[7] = Q[7] * (real-dual)
return res
def dual_quaternion_to_transformation_matrix(Q):
"""Transforms a dual quaternion to a 4x4 transformation matrix.
"""
res = np.zeros((4, 4))
# Rotation part:
xr, yr, zr, wr = Q[0], Q[1], Q[2], Q[3]
xd, yd, zd, wd = Q[4], Q[5], Q[6], Q[7]
res[0, 0] = wr*wr + xr*xr - yr*yr - zr*zr
res[1, 1] = wr*wr - xr*xr + yr*yr - zr*zr
res[2, 2] = wr*wr - xr*xr - yr*yr + zr*zr
res[0, 1] = 2.*(xr*yr - wr*zr)
res[1, 0] = 2.*(xr*yr + wr*zr)
res[0, 2] = 2.*(xr*zr + wr*yr)
res[2, 0] = 2.*(xr*zr - wr*yr)
res[1, 2] = 2.*(yr*zr - wr*xr)
res[2, 1] = 2.*(yr*zr + wr*xr)
# Displacement part:
res[0, 3] = 2.*(-wd*xr + xd*wr - yd*zr + zd*yr)
res[1, 3] = 2.*(-wd*yr + xd*zr + yd*wr - zd*xr)
res[2, 3] = 2.*(-wd*zr - xd*yr + yd*xr + zd*wr)
res[3, 3] = 1.0
return res
def dual_quaternion_rpy(rpy):
"""Returns the dual quaternion for a pure roll-pitch-yaw rotation.
"""
roll, pitch, yaw = rpy
# Origin rotation from RPY ZYX convention
cr = np.cos(roll/2.0)
sr = np.sin(roll/2.0)
cp = np.cos(pitch/2.0)
sp = np.sin(pitch/2.0)
cy = np.cos(yaw/2.0)
sy = np.sin(yaw/2.0)
# The quaternion associated with the origin rotation
# Note: quat = w + ix + jy + kz
x_or = cy*sr*cp - sy*cr*sp
y_or = cy*cr*sp + sy*sr*cp
z_or = sy*cr*cp - cy*sr*sp
w_or = cy*cr*cp + sy*sr*sp
res = np.zeros(8)
# Note, our dual quaternions use a different representation
# dual_quat = [xyz, w, xyz', w']
# where w + xyz represents the "real" quaternion
# and w'+xyz' represents the "dual" quaternion
res[0] = x_or
res[1] = y_or
res[2] = z_or
res[3] = w_or
return res
def dual_quaternion_translation(xyz):
"""Returns the dual quaternion for a pure translation.
"""
res = np.zeros(8)
res[3] = 1.0
res[4] = xyz[0]/2.0
res[5] = xyz[1]/2.0
res[6] = xyz[2]/2.0
return res
def dual_quaternion_axis_translation(axis, qi):
"""Returns the dual quaternion for a translation along an axis.
"""
res = np.zeros(8)
res[3] = 1.0
res[4] = qi*axis[0]/2.0
res[5] = qi*axis[1]/2.0
res[6] = qi*axis[2]/2.0
return res
def dual_quaternion_axis_rotation(axis, qi):
"""Returns the dual quaternion for a rotation along an axis.
AXIS MUST BE NORMALIZED!
"""
res = np.zeros(8)
cqi = np.cos(qi/2.0)
sqi = np.sin(qi/2.0)
res[0] = axis[0]*sqi
res[1] = axis[1]*sqi
res[2] = axis[2]*sqi
res[3] = cqi
return res
def dual_quaternion_prismatic(xyz, rpy, axis, qi):
"""Returns the dual quaternion for a prismatic joint.
"""
# Joint origin rotation from RPY ZYX convention
roll, pitch, yaw = rpy
# Origin rotation from RPY ZYX convention
cr = np.cos(roll/2.0)
sr = np.sin(roll/2.0)
cp = np.cos(pitch/2.0)
sp = np.sin(pitch/2.0)
cy = np.cos(yaw/2.0)
sy = np.sin(yaw/2.0)
# The quaternion associated with the origin rotation
# Note: quat = w + ix + jy + kz
x_or = cy*sr*cp - sy*cr*sp
y_or = cy*cr*sp + sy*sr*cp
z_or = sy*cr*cp - cy*sr*sp
w_or = cy*cr*cp + sy*sr*sp
# Joint origin translation as a dual quaternion
x_ot = 0.5*xyz[0]*w_or + 0.5*xyz[1]*z_or - 0.5*xyz[2]*y_or
y_ot = - 0.5*xyz[0]*z_or + 0.5*xyz[1]*w_or + 0.5*xyz[2]*x_or
z_ot = 0.5*xyz[0]*y_or - 0.5*xyz[1]*x_or + 0.5*xyz[2]*w_or
w_ot = - 0.5*xyz[0]*x_or - 0.5*xyz[1]*y_or - 0.5*xyz[2]*z_or
Q_o = [x_or, y_or, z_or, w_or, x_ot, y_ot, z_ot, w_ot]
# Joint displacement orientation is just identity
x_jr = 0.0
y_jr = 0.0
z_jr = 0.0
w_jr = 1.0
# Joint displacement translation along axis
x_jt = qi*axis[0]/2.0
y_jt = qi*axis[1]/2.0
z_jt = qi*axis[2]/2.0
w_jt = 0.0
Q_j = [x_jr, y_jr, z_jr, w_jr, x_jt, y_jt, z_jt, w_jt]
# Get resulting dual quaternion
return dual_quaternion_product(Q_o, Q_j)
def dual_quaternion_revolute(xyz, rpy, axis, qi):
"""Returns the dual quaternion for a revolute joint.
AXIS MUST BE NORMALIZED!
"""
# Joint origin rotation from RPY ZYX convention
roll, pitch, yaw = rpy
# Origin rotation from RPY ZYX convention
cr = np.cos(roll/2.0)
sr = np.sin(roll/2.0)
cp = np.cos(pitch/2.0)
sp = np.sin(pitch/2.0)
cy = np.cos(yaw/2.0)
sy = np.sin(yaw/2.0)
# The quaternion associated with the origin rotation
# Note: quat = w + ix + jy + kz
x_or = cy*sr*cp - sy*cr*sp
y_or = cy*cr*sp + sy*sr*cp
z_or = sy*cr*cp - cy*sr*sp
w_or = cy*cr*cp + sy*sr*sp
# Joint origin translation as a dual quaternion
x_ot = 0.5*xyz[0]*w_or + 0.5*xyz[1]*z_or - 0.5*xyz[2]*y_or
y_ot = - 0.5*xyz[0]*z_or + 0.5*xyz[1]*w_or + 0.5*xyz[2]*x_or
z_ot = 0.5*xyz[0]*y_or - 0.5*xyz[1]*x_or + 0.5*xyz[2]*w_or
w_ot = - 0.5*xyz[0]*x_or - 0.5*xyz[1]*y_or - 0.5*xyz[2]*z_or
Q_o = [x_or, y_or, z_or, w_or, x_ot, y_ot, z_ot, w_ot]
# Joint displacement rotation is from axis angle
cqi = np.cos(qi/2.0)
sqi = np.sin(qi/2.0)
x_jr = axis[0]*sqi
y_jr = axis[1]*sqi
z_jr = axis[2]*sqi
w_jr = cqi
# Joint displacement translation is nothing
x_jt = 0.0
y_jt = 0.0
z_jt = 0.0
w_jt = 0.0
Q_j = [x_jr, y_jr, z_jr, w_jr, x_jt, y_jt, z_jt, w_jt]
return dual_quaternion_product(Q_o, Q_j)
def quaternion_ravani_roth_dist(q1, q2):
"""Quaternion distance designed by ravani and roth.
See comparisons at: https://link.springer.com/content/pdf/10.1007%2Fs10851-009-0161-2.pdf"""
return min(np.linalg.norm(q1 - q2), np.linalg.norm(q1 + q2))
def quaternion_inner_product_dist(q1, q2):
"""Quaternion distance based on innerproduct.
See comparisons at: https://link.springer.com/content/pdf/10.1007%2Fs10851-009-0161-2.pdf"""
return 1.0 - abs(q1[0]*q2[0] + q1[1]*q2[1] + q1[2]*q2[2] + q1[3]*q2[3])
def rotation_distance_from_identity(R1, R2):
"""Rotation matrix distance based on distance from identity matrix.
See comparisons at: https://link.springer.com/content/pdf/10.1007%2Fs10851-009-0161-2.pdf"""
return np.linalg.norm(np.eye(1) - np.dot(R1, R2.T))
| 30.631579
| 96
| 0.55861
| 2,005
| 10,476
| 2.842893
| 0.102743
| 0.013684
| 0.021053
| 0.019649
| 0.500877
| 0.427018
| 0.405789
| 0.394561
| 0.377018
| 0.343509
| 0
| 0.08151
| 0.266896
| 10,476
| 341
| 97
| 30.721408
| 0.660677
| 0.253627
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.004386
| 0
| 0.179825
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
242e9c8ba99915ee2b252e978052c11b8a27fd13
| 3,363
|
py
|
Python
|
2. Using Python to Interact with the Operating System/Week-7.py
|
indahpuspitaa17/IT-Automation-with-Python
|
f872324b25741769506cc8ef28b5176fb9fa8997
|
[
"MIT"
] | null | null | null |
2. Using Python to Interact with the Operating System/Week-7.py
|
indahpuspitaa17/IT-Automation-with-Python
|
f872324b25741769506cc8ef28b5176fb9fa8997
|
[
"MIT"
] | null | null | null |
2. Using Python to Interact with the Operating System/Week-7.py
|
indahpuspitaa17/IT-Automation-with-Python
|
f872324b25741769506cc8ef28b5176fb9fa8997
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import re
import sys
import operator
import csv
error_counter = {}
error_user = {}
info_user = {}
#This function will read each line of the syslog.log file and check if it is an error or an info message.
def search_file():
with open('syslog.log', "r") as myfile:
for line in myfile:
if " ERROR " in line:
find_error(line)
add_user_list(line, 1)
elif " INFO " in line:
add_user_list(line, 2)
return
#If it is an error it will read the error from the line and increment into the dictionary
def find_error(str):
match = re.search(r"(ERROR [\w \[]*) ", str)
if match is not None:
aux = match.group(0).replace("ERROR ", "").strip()
if aux == "Ticket":
aux = "Ticket doesn't exist"
if not aux in error_counter:
error_counter[aux] = 1
else:
error_counter[aux] += 1
return
#This whill read the user from the string and add to the error or the info counter depending on the op number
def add_user_list(str, op):
match = re.search(r'\(.*?\)', str)
user = match.group(0)
userA = user.strip("()")
if op == 1:
if not userA in error_user:
error_user[userA] = 1
else:
error_user[userA] += 1
elif op == 2:
if not userA in info_user:
info_user[userA] = 1
else:
info_user[userA] += 1
return
#This function will read the list, arrange it and return a tuple with the dictionary items
def sort_list(op, list):
if op == 1:
s = sorted(list.items(), key=operator.itemgetter(1), reverse=True)
elif op == 2:
s = sorted(list.items(), key=operator.itemgetter(0))
return s
#This is an extra function which will read the value of a user in the error dictionary and return its value if key exists
def getErrValue(keyV):
for key, value in error_user:
if key is keyV:
return value
return 0
#This function writes both csv files
def write_csv(op):
if op == 1:
with open('user_statistics.csv', 'w', newline='') as output:
fieldnames = ['Username', 'INFO', 'ERROR']
csvw = csv.DictWriter(output, fieldnames=fieldnames)
csvw.writeheader()
for key, value in info_user:
valError = getErrValue(key)
csvw.writerow({'Username': key, 'INFO': value, 'ERROR': valError})
if op == 2:
with open('error_message.csv', 'w', newline='') as output:
fieldnames = ['Error', 'Count']
csvw = csv.DictWriter(output, fieldnames=fieldnames)
csvw.writeheader()
for key, value in error_counter:
csvw.writerow({'Error': key, 'Count': value})
return
#This function adds zero to the other dictionary in case that user is not a key, it will add a key with the user and value 0
def add_zeros():
for user in error_user.keys():
if user not in info_user:
info_user[user] = 0
for user in info_user.keys():
if user not in error_user:
error_user[user] = 0
return
#This will execute the functions
search_file()
add_zeros()
error_counter = sort_list(1, error_counter)
error_user = sort_list(2, error_user)
info_user = sort_list(2, info_user)
write_csv(1)
write_csv(2)
| 31.429907
| 124
| 0.612548
| 494
| 3,363
| 4.072874
| 0.238866
| 0.044732
| 0.023857
| 0.019384
| 0.234095
| 0.15507
| 0.107356
| 0.070577
| 0.070577
| 0.070577
| 0
| 0.011642
| 0.284865
| 3,363
| 107
| 125
| 31.429907
| 0.824948
| 0.213797
| 0
| 0.197674
| 0
| 0
| 0.066009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081395
| false
| 0
| 0.05814
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
242f6533a1559e482c850161d329f84507d8bdcb
| 2,487
|
py
|
Python
|
twitter/envs/corpus.py
|
Ra-Ni/Twitter-Language-Identifier
|
95e28cc8b0cc7b2acd96f240134649a9e601bca7
|
[
"MIT"
] | null | null | null |
twitter/envs/corpus.py
|
Ra-Ni/Twitter-Language-Identifier
|
95e28cc8b0cc7b2acd96f240134649a9e601bca7
|
[
"MIT"
] | null | null | null |
twitter/envs/corpus.py
|
Ra-Ni/Twitter-Language-Identifier
|
95e28cc8b0cc7b2acd96f240134649a9e601bca7
|
[
"MIT"
] | null | null | null |
from math import log10
from itertools import tee
class Corpus:
__global_corpus_frequency = 0.0
def __init__(self, size, depth, smoothing_value, label):
self.depth = depth
self.size = size
self.smoothing_value = smoothing_value
self.label = label
self.frequencies = {}
self.total_frequencies = float(size * smoothing_value)
self.local_corpus_frequency = 0.0
def items(self):
return self.frequencies.items()
def keys(self):
return self.frequencies.keys()
def values(self):
return self.frequencies.values()
def update(self, iterator):
for item in iterator:
target = self.frequencies
for character in item:
new_target = target.setdefault(character, [0, {}])
new_target[0] += 1
target = new_target[1]
self.local_corpus_frequency += 1
Corpus.__global_corpus_frequency += 1
def score(self, iterator):
results = log10(self.local_corpus_frequency / Corpus.__global_corpus_frequency)
for item in iterator:
previous_target, target = None, self.frequencies
for character in item:
new_target = target.get(character, [0, {}])
previous_target, target = target, new_target[1]
numerator = previous_target.get(item[-1], [0, {}])[0] + self.smoothing_value
denominator = sum([value for value, __ in previous_target.values()]) + self.smoothing_value * self.size
try:
results += log10(numerator / denominator)
except (ZeroDivisionError, ValueError):
results += log10(1e-64)
return results
def __hash__(self):
return hash(self.label)
class CorpusController:
def __init__(self, size, depth, smoothing_value, *labels):
self.languages = labels
self.smoothing_value = smoothing_value
self.size = size
self.depth = depth
self.corpora = {}
for label in labels:
self.corpora[label] = Corpus(self.size, self.depth, self.smoothing_value, label)
def train(self, iterator, label):
self.corpora[label].update(iterator)
def classify(self, iterator):
copies = iter(tee(iterator, len(self.corpora)))
probabilities = [(corpus.score(next(copies)), label) for label, corpus in self.corpora.items()]
return max(probabilities)
| 30.329268
| 115
| 0.617209
| 278
| 2,487
| 5.334532
| 0.230216
| 0.094403
| 0.060688
| 0.054619
| 0.184086
| 0.159137
| 0.110587
| 0.064734
| 0.064734
| 0
| 0
| 0.014648
| 0.286289
| 2,487
| 81
| 116
| 30.703704
| 0.820845
| 0
| 0
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.034483
| 0.068966
| 0.362069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
242f6cf35d21cd6a48c92a34883a88441a76c341
| 16,117
|
py
|
Python
|
eex/translators/amber/amber_write.py
|
dgasmith/EEX
|
7608c9ef25931040524c75d227f0bee18de9ddc1
|
[
"BSD-3-Clause"
] | null | null | null |
eex/translators/amber/amber_write.py
|
dgasmith/EEX
|
7608c9ef25931040524c75d227f0bee18de9ddc1
|
[
"BSD-3-Clause"
] | null | null | null |
eex/translators/amber/amber_write.py
|
dgasmith/EEX
|
7608c9ef25931040524c75d227f0bee18de9ddc1
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Writer for amber
"""
import time
import pandas as pd
import math
import re
import numpy as np
from collections import Counter
# Python 2/3 compat
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import eex
import logging
# AMBER local imports
from . import amber_metadata as amd
logger = logging.getLogger(__name__)
def _write_1d(file_handle, data, ncols, fmt):
data = data.ravel()
remainder_size = data.size % ncols
if data.size == 0:
file_handle.write("\n".encode())
elif remainder_size == 0:
np.savetxt(file_handle, data.reshape(-1, ncols), fmt=fmt, delimiter="")
else:
rem_data = data[-remainder_size:].reshape(1, -1)
data = data[:-remainder_size].reshape(-1, ncols)
np.savetxt(file_handle, data, fmt=fmt, delimiter="")
np.savetxt(file_handle, rem_data, fmt=fmt, delimiter="")
# print(data.shape, rem_data.shape)
# Write data to file
file_handle.flush()
def _write_amber_data(file_handle, data, category):
fmt_string = amd.data_labels[category][1]
fmt_data = amd.parse_format(fmt_string)
file_handle.write(("%%FLAG %s\n" % category).encode())
file_handle.write((fmt_string + "\n").encode())
ncols = fmt_data[0]
fmt = amd.build_format(fmt_data)
_write_1d(file_handle, np.array(data), ncols, fmt)
def _check_dl_compatibility(dl):
"""
This function examines a datalayer to determine if it is compatible with Amber.
Conversions between functional forms and pairwise interaction mixing are performed (if possible).
"""
# Loop over force field information - check functional form compatibility
for k, v in amd.forcefield_parameters.items():
if k != "nonbond":
terms = dl.list_term_parameters(v["order"])
for j in terms.values():
if j[0] != v["form"]:
# Will need to insert check to see if these can be easily converted (ex OPLS dihedral <-> charmmfsw)
raise TypeError("Functional form %s stored in datalayer is not compatible with Amber.\n" %(j[0]) )
else:
# handle non bonds here
pass
stored_properties = dl.list_atom_properties()
required_properties = list(amd.atom_property_names.values())
diff = np.setdiff1d(required_properties, stored_properties)
natoms = dl.get_atom_count()
index = np.arange(1, natoms + 1)
# Build and curate the data
df = pd.DataFrame({'atom_index': index})
df.dropna(axis=0, how="any", inplace=True)
df.set_index('atom_index', inplace=True)
add_properties = []
# Fill in default or raise error
for req in diff:
if req == 'atom_name':
atom_names = ['A'] * natoms
df[req] = atom_names
add_properties.append(req)
elif req == 'atomic_number':
# Just say it's carbon...doesn't seem like this matters too much for amber
atomic_numbers = [6] * natoms
df[req] = atomic_numbers
add_properties.append(req)
elif req == "mass":
try:
dl.get_atoms(properties=["mass"])
except:
raise KeyError("No masses stored in datalayer")
else:
raise KeyError("Atom property %s is missing from datalayer" %(req))
# Check for residue_index
if "residue_index" not in stored_properties:
# If molecule_index is set, set residue index to this.
# Otherwise, set all to 1.0
if "molecule_index" in stored_properties:
df["residue_index"] = dl.get_atoms(properties=["molecule_index"])
df["residue_name"] = ["BLA"] * natoms
add_properties.append("residue_index")
if len(add_properties) > 0:
dl.add_atoms(df, by_value=True)
def write_amber_file(dl, filename, inpcrd=None):
"""
Parameters
------------
dl : eex.DataLayer
The datalayer containing information about the system to write
filename : str
The name of the file to write
inpcrd : str, optional
If None, attempts to read the file filename.replace("prmtop", "inpcrd") otherwise passes. #
"""
### First get information into Amber pointers. All keys are initially filled with zero.
# Ones that are currently 0, but should be implemented eventually are marked with
_check_dl_compatibility(dl)
### Figure out what is hydrogen for the header
num_H_list = []
inc_hydrogen = {}
without_hydrogen = {}
hidx = (dl.get_atoms("atomic_number") == 1).values.ravel()
for term_type, term_name in zip([2, 3, 4], ["bonds", "angles", "dihedrals"]):
term = dl.get_terms(term_type)
if term.shape[0] == 0:
num_H_list.append(0)
continue
# Build up an index of what is in hydrogen or not
inc_hydrogen_mask = term["atom1"].isin(hidx)
for n in range(term_type - 1):
name = "atom" + str(n + 2)
inc_hydrogen_mask |= term[name].isin(hidx)
num_H_list.append(len(term.loc[inc_hydrogen_mask].values))
inc_hydrogen[term_name] = term.loc[inc_hydrogen_mask].values
without_hydrogen[term_name] = term.loc[~inc_hydrogen_mask].values
output_sizes = {k: 0 for k in amd.size_keys}
output_sizes['NATOM'] = dl.get_atom_count() # Number of atoms
output_sizes["NBONH"] = num_H_list[0] # Number of bonds containing hydrogen
output_sizes["MBONA"] = dl.get_term_count(2, "total") - output_sizes["NBONH"] # Number of bonds not containing hydrogen
output_sizes['NBONA'] = output_sizes["MBONA"] # MBONA + number of constraint bonds (MBONA = NBONA always)
output_sizes["NTHETH"] = num_H_list[1] # Number of angles containing hydrogen
output_sizes["MTHETA"] = dl.get_term_count(3, "total") - output_sizes["NTHETH"] # Number of angles not containing hydrogen
output_sizes['NTHETA'] = output_sizes["MTHETA"] # MTHETA + number of constraint angles (NTHETA = MTHETA always)
output_sizes["NPHIH"] = num_H_list[2] # Number of torsions containing hydrogen
output_sizes["MPHIA"] = dl.get_term_count(4, "total") - output_sizes["NPHIH"] # Number of torsions not containing hydrogen
output_sizes["NPHIA"] = output_sizes["MPHIA"]
output_sizes["NUMBND"] = len(dl.list_term_uids(2)) # Number of unique bond types
output_sizes["NUMANG"] = len(dl.list_term_uids(3)) # Number of unique angle types
output_sizes["NPTRA"] = len(dl.list_term_uids(4)) # Number of unique torsion types
output_sizes["NRES"] = len(dl.list_atom_uids("residue_name")) # Number of residues (not stable)
output_sizes["NTYPES"] = len(np.unique(dl.get_atoms("atom_type"))) # Number of distinct LJ atom types
output_sizes["NPARM"] = 0 # Used to determine if this is a LES-compatible prmtop (??)
output_sizes["NNB"] = dl.get_atom_count(
) # Number of excluded atoms - Set to num atoms for our test cases. Amber will not run with 0
# 0 - no box, 1 - orthorhombic box, 2 - truncated octahedron
output_sizes["NMXRS"] = 0 # Number of atoms in the largest residue
output_sizes["IFCAP"] = 0 # Set to 1 if a solvent CAP is being used
output_sizes["NUMEXTRA"] = 0 # Number of extra points in the topology file
## Needs check for orthorhomibic box (1) or truncated octahedron (2). Currently just 0 or 1
output_sizes["IFBOX"] = [0 if dl.get_box_size() == {} else 1][0] # Flag indicating whether a periodic box is present
written_categories = []
# Figure out size each section should be based on metadata
label_sizes = {}
for k, v in amd.data_labels.items():
if isinstance(v[0], int):
label_sizes[k] = v[0]
elif v[0] in list(output_sizes):
label_sizes[k] = output_sizes[v[0]]
else:
# print("%30s %40s %d" % (k, v[0], int(eval(v[0], sizes_dict))))
label_sizes[k] = int(eval(v[0], output_sizes))
### Write title and version information
f = open(filename, "w")
f.write('%%VERSION VERSION_STAMP = V0001.000 DATE = %s %s\n' % (time.strftime("%x"), time.strftime("%H:%M:%S")))
f.write("%FLAG TITLE\n%FORMAT(20a4)\n")
f.write("prmtop generated by MolSSI EEX\n")
## Write pointers section
f.write("%%FLAG POINTERS\n%s\n" % (amd.data_labels["POINTERS"][1]))
ncols, dtype, width = amd.parse_format(amd.data_labels["POINTERS"][1])
format_string = "%%%sd" % width
count = 0
for k in amd.size_keys:
f.write(format_string % output_sizes[k])
count += 1
if count % ncols == 0:
f.write("\n")
f.write("\n")
f.close()
written_categories.append("POINTERS")
### Write atom properties sections
file_handle = open(filename, "ab")
for k in amd.atom_property_names:
# Get data
data = dl.get_atoms(amd.atom_property_names[k], by_value=True, utype=amd.atom_data_units).values.ravel()
_write_amber_data(file_handle, data, k)
written_categories.append(k)
### Handle residues
# We assume these are sorted WRT to atom and itself at the moment... not great
res_data = dl.get_atoms(["residue_index", "residue_name"], by_value=True)
uvals, uidx, ucnts = np.unique(res_data["residue_index"], return_index=True, return_counts=True)
labels = res_data["residue_name"].iloc[uidx].values
_write_amber_data(file_handle, labels, "RESIDUE_LABEL")
written_categories.append("RESIDUE_LABEL")
starts = np.concatenate(([1], np.cumsum(ucnts) + 1))[:-1]
_write_amber_data(file_handle, starts, "RESIDUE_POINTER")
written_categories.append("RESIDUE_POINTER")
### Write out term parameters
for term_type in ["bond", "angle", "dihedral"]:
uids = sorted(dl.list_term_uids(term_type))
if len(uids) == 0: continue
term_md = amd.forcefield_parameters[term_type]
tmps = {k: [] for k in term_md["column_names"].keys()}
utype = term_md["units"]
order = term_md["order"]
inv_lookup = {v: k for k, v in term_md["column_names"].items()}
# Build lists of data since AMBER holds this as 1D
for uid in uids:
params = dl.get_term_parameter(order, uid, utype=utype)
for k, v in params[1].items():
tmps[inv_lookup[k]].append(v)
# Write out FLAGS
for k, v in tmps.items():
_write_amber_data(file_handle, v, k)
written_categories.append(k)
for term_type, term_name in zip([2, 3, 4], ["bonds", "angles", "dihedrals"]):
term = dl.get_terms(term_type)
if term.shape[0] == 0: continue
# Build up an index of what is in hydrogen or not
inc_hydrogen_mask = term["atom1"].isin(hidx)
# Scale by weird AMBER factors
inc_hydrogen[term_name][:, :-1] = (inc_hydrogen[term_name][:, :-1] - 1) * 3
without_hydrogen[term_name][:, :-1] = (without_hydrogen[term_name][:, :-1] - 1) * 3
inc_h_name = term_name.upper() + "_INC_HYDROGEN"
without_h_name = term_name.upper() + "_WITHOUT_HYDROGEN"
_write_amber_data(file_handle, inc_hydrogen[term_name], inc_h_name)
written_categories.append(inc_h_name)
_write_amber_data(file_handle, without_hydrogen[term_name], without_h_name)
written_categories.append(without_h_name)
# Handle SOLVENT_POINTERS, ATOMS_PER_MOLECULE and BOX_DIMENSIONS. Only present if IFBOX>0.
if output_sizes["IFBOX"] > 0:
#Solvent pointers section
# There are three numbers here - IPTRES, NSPM, NSPSOL
# where
# IPTRES = final residue part of solute, NSPM = total number of molecules, NSPSOL = first solvent molecule
# Just say everything is solute for now.
iptres = dl.get_atoms(["residue_index"]).values[-1]
nspm = len(np.unique(dl.get_atoms(["molecule_index"]).values))
solvent_pointers = [iptres, nspm, nspm]
_write_amber_data(file_handle, solvent_pointers, "SOLVENT_POINTERS")
# Handle atoms per molecule
molecule_list = dl.get_atoms(["molecule_index"]).values.ravel()
count_atoms_per_molecule = Counter(molecule_list)
atoms_per_molecule = []
for x in range(1, nspm+1):
atoms_per_molecule.append(count_atoms_per_molecule[x])
_write_amber_data(file_handle, atoms_per_molecule, "ATOMS_PER_MOLECULE")
# Write box dimensions section
box_dimensions = dl.get_box_size(utype={"a": amd.box_units["length"], "b": amd.box_units["length"],
"c" : amd.box_units["length"], "alpha": amd.box_units["angle"],
"beta": amd.box_units["angle"], "gamma": amd.box_units["angle"]})
write_box = [box_dimensions["beta"], box_dimensions["a"], box_dimensions["b"], box_dimensions["c"]]
_write_amber_data(file_handle, write_box, "BOX_DIMENSIONS")
written_categories.append("BOX_DIMENSIONS")
written_categories.append("SOLVENT_POINTERS")
written_categories.append("ATOMS_PER_MOLECULE")
# Quick fix for radius set will be one line string description in files prepared by xleap
_write_amber_data(file_handle, ["Place holder - EEX"], "RADIUS_SET")
written_categories.append("RADIUS_SET")
# Handle NB data
# Relevant headers = NONBOND_PARM_INDEX, LENNARD_JONES_ACOEF, LENNARD_JONES_BCOEF
stored_atom_types = dl.get_unique_atom_types()
ntypes = len(stored_atom_types)
nb_forms = dl.list_stored_nb_types()
# This can be removed if compatibility check inserted at beginning
if set(nb_forms) != set(["LJ"]):
# Write better message here
raise KeyError("Nonbond forms stored in datalayer are not compatible with Amber - %s" % nb_forms)
# Get parameters from datalayer using correct amber units
stored_nb_parameters = dl.list_nb_parameters(
nb_name="LJ", nb_model="AB", utype=amd.forcefield_parameters["nonbond"]["units"], itype="pair")
nonbonded_parm_index = np.zeros(ntypes * ntypes)
lj_a_coeff = []
lj_b_coeff = []
# Build a_coeff, b_coeff, and nb_parm lists
for key, value in stored_nb_parameters.items():
lj_a_coeff.append(value['A'])
lj_b_coeff.append(value['B'])
index_to_nb = ntypes * (key[0] - 1) + key[1]
index_to_nb2 = ntypes * (key[1] - 1) + key[0]
nonbonded_parm_index[index_to_nb - 1] = len(lj_a_coeff)
nonbonded_parm_index[index_to_nb2 - 1] = len(lj_a_coeff)
_write_amber_data(file_handle, nonbonded_parm_index, "NONBONDED_PARM_INDEX")
_write_amber_data(file_handle, lj_a_coeff, "LENNARD_JONES_ACOEF")
_write_amber_data(file_handle, lj_b_coeff, "LENNARD_JONES_BCOEF")
for category in amd.forcefield_parameters["nonbond"]["column_names"]:
written_categories.append(category)
### Write headers for other sections (file will not work in AMBER without these)
for k in amd.data_labels:
if k not in written_categories:
if label_sizes[k] > 0:
data = np.zeros(label_sizes[k])
_write_amber_data(file_handle, data, k)
else:
file_handle.write(("%%FLAG %s\n%s\n\n" % (k, amd.data_labels[k][1])).encode())
written_categories.append(k)
file_handle.close()
# Now we need to write out the INPCRD
if '.prmtop' in filename:
inpcrd_file = filename.replace('.prmtop', '.inpcrd')
else:
inpcrd_file = filename + '.inpcrd'
file_handle = open(inpcrd_file, "wb")
xyz = dl.get_atoms("XYZ", utype={"XYZ": "angstrom"})
file_handle.write("default_name\n".encode())
file_handle.write(("%6d\n" % xyz.shape[0]).encode())
_write_1d(file_handle, xyz.values.ravel(), 6, "%12.6f")
if output_sizes["IFBOX"] > 0:
box = pd.DataFrame(box_dimensions, index=[0])
box = box[['a', 'b', 'c', 'alpha', 'beta', 'gamma']]
_write_1d(file_handle, box.values.ravel(), 6, "%12.6f")
file_handle.close()
return 0
| 37.481395
| 128
| 0.647453
| 2,236
| 16,117
| 4.449911
| 0.195886
| 0.037588
| 0.021106
| 0.027136
| 0.200101
| 0.094372
| 0.050352
| 0.040905
| 0.040905
| 0.03206
| 0
| 0.011082
| 0.232984
| 16,117
| 429
| 129
| 37.568765
| 0.793804
| 0.22225
| 0
| 0.108434
| 0
| 0
| 0.116397
| 0.001777
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016064
| false
| 0.004016
| 0.048193
| 0
| 0.068273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24324bad9563b1de125f14982150d0bec0525c6c
| 7,085
|
py
|
Python
|
src/main.py
|
Mirris/ShiftManager1.1
|
6b3c1c76c8f66295053b1cf9f1187e57429cdd0b
|
[
"MIT"
] | null | null | null |
src/main.py
|
Mirris/ShiftManager1.1
|
6b3c1c76c8f66295053b1cf9f1187e57429cdd0b
|
[
"MIT"
] | null | null | null |
src/main.py
|
Mirris/ShiftManager1.1
|
6b3c1c76c8f66295053b1cf9f1187e57429cdd0b
|
[
"MIT"
] | null | null | null |
from helpers.Logger import Logger
from calendar import Calendar
from employee import Employee
import json
import sys
import prettytable
import os
import time
import re
# Logger setup
logging = Logger()
log = logging.realm('Shift Manager')
def main():
# 1] Load Configuration file
with open('../data/konfigurace.json') as json_file1:
config_data = json.load(json_file1)
# 2] Load Configuration file
with open('../data/dovolene_volna.json') as json_file2:
absences_data = json.load(json_file2)
# 3] Count public holidays in current month
vychozi_mesic = config_data['datumova_konfigurace']['mesic']
vychozi_rok = config_data['datumova_konfigurace']['rok']
vychozi_svatky = []
# 3.1] Holiday counter
for unit in config_data['kalendar']['svatky'][vychozi_rok]:
search_re = re.split("\.", unit)
if search_re[1] == vychozi_mesic:
vychozi_svatky.append(unit)
# 3.2] Holiday binder
public_holidays_list = ", ".join(vychozi_svatky)
# 3.3] Vacation counter
day_list = config_data["zamestnanci"]["denni"]
night_list = config_data["zamestnanci"]["nocni"]
day_vacation = []
day_absence = []
night_vacation = []
night_absence = []
# 3.4.1] Daily list vacations and absences
for employee in day_list:
for vacation_d in absences_data["dovolene"][employee]:
day_vacation.append(vacation_d)
for absence_d in absences_data["volna"][employee]:
day_absence.append(absence_d)
# 3.4.2] Nightly list vacations and absences
for employee in night_list:
for vacation_n in absences_data["dovolene"][employee]:
night_vacation.append(vacation_n)
for absence_n in absences_data["volna"][employee]:
night_absence.append(absence_n)
# 4] Config & Absences counter
log.info('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
log.info('Konfiguracni report')
log.info(' Vychozi mesic a rok: ' + vychozi_mesic + '/' + vychozi_rok)
log.info(' Denni smeny - interval: ' + config_data['intervaly_smen']['denni_smena'])
log.info(' Nocni smeny - interval: ' + config_data['intervaly_smen']['nocni_smena'])
log.info(' Pocet svatku v akt. mesici: ' + repr(len(vychozi_svatky)))
log.info(' Data svatku v akt. mesici: ' + public_holidays_list)
log.info('Report nahlasenych absenci')
log.info(' Nahlasene dovolene (D): ' + repr(len(day_vacation)))
log.info(' Nahlasene dovolene (N): ' + repr(len(night_vacation)))
log.info(' Nahlasene volno (D): ' + repr(len(day_absence)))
log.info(' Nahlasene volno (N): ' + repr(len(night_absence)))
log.info('Report smen')
log.info(' Zahajujici smena (D): ' + config_data['zahajeni_smen']['denni'])
log.info(' Pocet navazujicich smen (D): ' + repr(config_data['zahajeni_smen']['navazujici_denni']))
log.info(' Zahajujici smena (N): ' + config_data['zahajeni_smen']['nocni'])
log.info(' Pocet navazujicich smen (N): ' + repr(config_data['zahajeni_smen']['navazujici_nocni']))
log.info('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
# Check modules availability
modules = ['json', 'sys', 'logging', 'os', 'time', 'prettytable']
missing_module = False
for module in modules:
if module not in sys.modules:
log.critical('Pozadovany modul <' + module + '> nebyl nalezen.')
missing_module = True
if missing_module:
sys.exit(1)
log.debug('Pozadovane moduly jsou pritomny [json, sys, logging, os, time, prettytable].')
# 5] TODO: Calendar Builder
# Create calendar based on conf. data
calendar = Calendar(vychozi_mesic, vychozi_rok)
last_day = int(calendar.determine_last_day())
calendar.initialize_calendar_month(last_day)
# print(calendar.calendar_list)
log.info('Delka kalendare (denni + nocni): {}'.format(len(calendar.calendar_list)))
log.info('Delka kalendaroveho elementu (ocekavano 4): {}'.format(len(calendar.calendar_list[0])))
#calendar.allocate_vacations(absences_data)
# 6] TODO: Initialize Employees
employee_list = []
for shift_type in config_data["zamestnanci"].values():
for employee_name in shift_type:
# Employee card for every employee
employee_card = Employee(employee_name)
employee_card.initialize_schedule(last_day)
employee_card.determine_shift_type(config_data["zamestnanci"])
employee_card.process_absences(absences_data)
employee_card.process_vacations(absences_data)
# Generate a list object of employee cards
employee_list.append(employee_card)
# Checkpoint - Show employees' cards
# for employee in employee_list:
# employee.show()
# 7] TODO: Calendar - Allocate Public Holiday
calendar.allocate_public_holiday(employee_list, public_holidays_list, config_data)
# Checkpoint - Show employees' cards
# for employee in employee_list:
# employee.show()
# 8] TODO: Calendar - Shifts starter
# Finish night-shift from the last night
calendar.allocate_starting_night_shift(employee_list, config_data)
# Loop shift starters
calendar.allocate_shift_starters(employee_list, config_data)
# Checkpoint - Show employees' cards
# for employee in employee_list:
# employee.show()
#
# calendar.show()
# 8] TODO: Calendar Looper
# Loop through whole calendar and provide equal hours across employees (first shot with expected errors)
calendar.loop_calendar(employee_list, config_data, last_day)
# Fill in empties
# calendar.fill_empty_calendar_sequence(employee_list)
# Clean Overview files
open('../vystup/Prehled_zamestnancu.txt', 'w').close()
open('../vystup/Mesicni_rozpis.txt', 'w').close()
open('../vystup/Smeny_zamestnancu.txt', 'w').close()
employee_overview_table = calendar.note_employees_overview(employees=employee_list)
employees_schedule_table = calendar.note_employee_schedule(employees=employee_list, calc_month=vychozi_mesic, calc_year=vychozi_rok)
monthly_calendar_table = calendar.note_calendar_allocation(calc_month=vychozi_mesic, calc_year=vychozi_rok,
publ_holiday=public_holidays_list)
# 8] TODO: Results Review
# BIIIIG TODOOOOOS HERE :-(((((((
# 9] TODO: Results Export (Calendar, Employees)
with open('../vystup/Prehled_zamestnancu.txt', 'w') as employees_overview:
employees_overview.write(str(employee_overview_table))
with open('../vystup/Smeny_zamestnancu.txt', 'w') as employees_schedule_overview:
employees_schedule_overview.write(str(employees_schedule_table))
with open('../vystup/Mesicni_rozpis.txt', 'w') as shifts_plan:
shifts_plan.write(str(monthly_calendar_table))
if __name__ == "__main__":
main()
| 39.361111
| 136
| 0.660833
| 837
| 7,085
| 5.365591
| 0.243728
| 0.031173
| 0.018704
| 0.019595
| 0.251392
| 0.197951
| 0.0835
| 0.067023
| 0.049655
| 0.049655
| 0
| 0.005539
| 0.210021
| 7,085
| 179
| 137
| 39.581006
| 0.796855
| 0.180805
| 0
| 0.02
| 0
| 0
| 0.241236
| 0.063346
| 0
| 0
| 0
| 0.005587
| 0
| 1
| 0.01
| false
| 0
| 0.09
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
243302a8c8d74b6cc33c63504e8000d21f2d83c2
| 13,792
|
py
|
Python
|
polyphemus/utils.py
|
Xarthisius/polyphemus
|
3ae6cb9ff312d90478d8a294681bd898b7f45b1c
|
[
"BSD-2-Clause"
] | null | null | null |
polyphemus/utils.py
|
Xarthisius/polyphemus
|
3ae6cb9ff312d90478d8a294681bd898b7f45b1c
|
[
"BSD-2-Clause"
] | null | null | null |
polyphemus/utils.py
|
Xarthisius/polyphemus
|
3ae6cb9ff312d90478d8a294681bd898b7f45b1c
|
[
"BSD-2-Clause"
] | null | null | null |
"""Helpers for polyphemus.
Utilities API
=============
"""
from __future__ import print_function
import os
import io
import re
import sys
import glob
import tempfile
import functools
import subprocess
from copy import deepcopy
from pprint import pformat
from collections import Mapping, Iterable, Hashable, Sequence, namedtuple, \
MutableMapping
from hashlib import md5
from warnings import warn
try:
import cPickle as pickle
except ImportError:
import pickle
if sys.version_info[0] >= 3:
basestring = str
DEFAULT_RC_FILE = "polyphemusrc.py"
"""Default run control file name."""
DEFAULT_PLUGINS = ('polyphemus.base', 'polyphemus.githubhook', 'polyphemus.batlabrun',
'polyphemus.batlabstat', 'polyphemus.githubstat',
'polyphemus.dashboard')
"""Default list of plugin module names."""
FORBIDDEN_NAMES = frozenset(['del', 'global'])
def warn_forbidden_name(forname, inname=None, rename=None):
"""Warns the user that a forbidden name has been found."""
msg = "found forbidden name {0!r}".format(forname)
if inname is not None:
msg += " in {0!r}".format(inname)
if rename is not None:
msg += ", renaming to {0!r}".format(rename)
warn(msg, RuntimeWarning)
def indent(s, n=4, join=True):
"""Indents all lines in the string or list s by n spaces."""
spaces = " " * n
lines = s.splitlines() if isinstance(s, basestring) else s
lines = lines or ()
if join:
return '\n'.join([spaces + l for l in lines if l is not None])
else:
return [spaces + l for l in lines if l is not None]
class indentstr(str):
"""A special string subclass that can be used to indent the whol string
inside of format strings by accessing an ``indentN`` attr. For example,
``s.indent8`` will return a copy of the string s where every line starts
with 8 spaces."""
def __getattr__(self, key):
if key.startswith('indent'):
return indent(self, n=int(key[6:]))
return getattr(super(indentstr, self), key)
def expand_default_args(methods):
"""This function takes a collection of method tuples and expands all of
the default arguments, returning a set of all methods possible."""
methitems = set()
for mkey, mrtn in methods:
mname, margs = mkey[0], mkey[1:]
havedefaults = [3 == len(arg) for arg in margs]
if any(havedefaults):
# expand default arguments
n = havedefaults.index(True)
items = [((mname,)+tuple(margs[:n]), mrtn)] + \
[((mname,)+tuple(margs[:i]), mrtn) for i in range(n+1,len(margs)+1)]
methitems.update(items)
else:
# no default args
methitems.add((mkey, mrtn))
return methitems
def newoverwrite(s, filename, verbose=False):
"""Useful for not forcing re-compiles and thus playing nicely with the
build system. This is acomplished by not writing the file if the existsing
contents are exactly the same as what would be written out.
Parameters
----------
s : str
string contents of file to possible
filename : str
Path to file.
vebose : bool, optional
prints extra message
"""
if os.path.isfile(filename):
with io.open(filename, 'rb') as f:
old = f.read()
if s == old:
return
else:
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with io.open(filename, 'wb') as f:
f.write(s.encode())
if verbose:
print(" wrote " + filename)
def newcopyover(f1, f2, verbose=False):
"""Useful for not forcing re-compiles and thus playing nicely with the
build system. This is acomplished by not writing the file if the existsing
contents are exactly the same as what would be written out.
Parameters
----------
f1 : str
Path to file to copy from
f2 : str
Path to file to copy over
vebose : bool, optional
prints extra message
"""
if os.path.isfile(f1):
with io.open(f1, 'r') as f:
s = f.read()
return newoverwrite(s, f2, verbose)
def writenewonly(s, filename, verbose=False):
"""Only writes the contents of the string to a file if the file does not exist.
Useful for not tocuhing files.
Parameters
----------
s : str
string contents of file to possible
filename : str
Path to file.
vebose : bool, optional
prints extra message
"""
if os.path.isfile(filename):
return
with open(filename, 'w') as f:
f.write(str(s))
if verbose:
print(" wrote " + filename)
def ensuredirs(f):
"""For a file path, ensure that its directory path exists."""
d = os.path.split(f)[0]
if not os.path.isdir(d):
os.makedirs(d)
def touch(filename):
"""Opens a file and updates the mtime, like the posix command of the same name."""
with io.open(filename, 'a') as f:
os.utime(filename, None)
def exec_file(filename, glb=None, loc=None):
"""A function equivalent to the Python 2.x execfile statement."""
with io.open(filename, 'r') as f:
src = f.read()
exec(compile(src, filename, "exec"), glb, loc)
#
# Run Control
#
class NotSpecified(object):
"""A helper class singleton for run control meaning that a 'real' value
has not been given."""
def __repr__(self):
return "NotSpecified"
NotSpecified = NotSpecified()
"""A helper class singleton for run control meaning that a 'real' value
has not been given."""
class RunControl(object):
"""A composable configuration class. Unlike argparse.Namespace,
this keeps the object dictionary (__dict__) separate from the run
control attributes dictionary (_dict)."""
def __init__(self, **kwargs):
"""Parameters
-------------
kwargs : optional
Items to place into run control.
"""
self._dict = {}
for k, v in kwargs.items():
setattr(self, k, v)
self._updaters = {}
def __getattr__(self, key):
if key in self._dict:
return self._dict[key]
elif key in self.__dict__:
return self.__dict__[key]
elif key in self.__class__.__dict__:
return self.__class__.__dict__[key]
else:
msg = "RunControl object has no attribute {0!r}.".format(key)
raise AttributeError(msg)
def __setattr__(self, key, value):
if key.startswith('_'):
self.__dict__[key] = value
else:
if value is NotSpecified and key in self:
return
self._dict[key] = value
def __delattr__(self, key):
if key in self._dict:
del self._dict[key]
elif key in self.__dict__:
del self.__dict__[key]
elif key in self.__class__.__dict__:
del self.__class__.__dict__[key]
else:
msg = "RunControl object has no attribute {0!r}.".format(key)
raise AttributeError(msg)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
keys = sorted(self._dict.keys())
s = ", ".join(["{0!s}={1!r}".format(k, self._dict[k]) for k in keys])
return "{0}({1})".format(self.__class__.__name__, s)
def _pformat(self):
keys = sorted(self._dict.keys())
f = lambda k: "{0!s}={1}".format(k, pformat(self._dict[k], indent=2))
s = ",\n ".join(map(f, keys))
return "{0}({1})".format(self.__class__.__name__, s)
def __contains__(self, key):
return key in self._dict or key in self.__dict__ or \
key in self.__class__.__dict__
def __eq__(self, other):
if hasattr(other, '_dict'):
return self._dict == other._dict
elif isinstance(other, Mapping):
return self._dict == other
else:
return NotImplemented
def __ne__(self, other):
if hasattr(other, '_dict'):
return self._dict != other._dict
elif isinstance(other, Mapping):
return self._dict != other
else:
return NotImplemented
def _update(self, other):
"""Updates the rc with values from another mapping. If this rc has
if a key is in self, other, and self._updaters, then the updaters
value is called to perform the update. This function should return
a copy to be safe and not update in-place.
"""
if hasattr(other, '_dict'):
other = other._dict
elif not hasattr(other, 'items'):
other = dict(other)
for k, v in other.items():
if v is NotSpecified:
pass
elif k in self._updaters and k in self:
v = self._updaters[k](getattr(self, k), v)
setattr(self, k, v)
def infer_format(filename, format):
"""Tries to figure out a file format."""
if isinstance(format, basestring):
pass
elif filename.endswith('.pkl.gz'):
format = 'pkl.gz'
elif filename.endswith('.pkl'):
format = 'pkl'
else:
raise ValueError("file format could not be determined.")
return format
def sortedbytype(iterable):
"""Sorts an iterable by types first, then value."""
items = {}
for x in iterable:
t = type(x).__name__
if t not in items:
items[t] = []
items[t].append(x)
rtn = []
for t in sorted(items.keys()):
rtn.extend(sorted(items[t]))
return rtn
nyansep = r'~\_/' * 17 + '~=[,,_,,]:3'
"""WAT?!"""
def flatten(iterable):
"""Generator which returns flattened version of nested sequences."""
for el in iterable:
if isinstance(el, basestring):
yield el
elif isinstance(el, Iterable):
for subel in flatten(el):
yield subel
else:
yield el
#
# Memoization
#
def ishashable(x):
"""Tests if a value is hashable."""
if isinstance(x, Hashable):
if isinstance(x, basestring):
return True
elif isinstance(x, Iterable):
return all(map(ishashable, x))
else:
return True
else:
return False
def memoize(obj):
"""Generic memoziation decorator based off of code from
http://wiki.python.org/moin/PythonDecoratorLibrary .
This is not suitabe for method caching.
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = (args, tuple(sorted(kwargs.items())))
hashable = ishashable(key)
if hashable:
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
else:
return obj(*args, **kwargs)
return memoizer
class memoize_method(object):
"""Decorator suitable for memoizing methods, rather than functions
and classes. This is based off of code that may be found at
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
This code was originally released under the MIT license.
"""
def __init__(self, meth):
self.meth = meth
def __get__(self, obj, objtype=None):
if obj is None:
return self.meth
p = functools.partial(self, obj)
p.__doc__ = self.meth.__doc__
p.__name__ = self.meth.__name__
return p
def __call__(self, *args, **kwargs):
obj = args[0]
cache = obj._cache = getattr(obj, '_cache', {})
key = (self.meth, args[1:], tuple(sorted(kwargs.items())))
hashable = ishashable(key)
if hashable:
if key not in cache:
cache[key] = self.meth(*args, **kwargs)
return cache[key]
else:
return self.meth(*args, **kwargs)
def check_cmd(args):
"""Runs a command in a subprocess and verifies that it executed properly.
"""
#if not isinstance(args, basestring):
# args = " ".join(args)
f = tempfile.NamedTemporaryFile()
#rtn = subprocess.call(args, shell=True, stdout=f, stderr=f)
rtn = subprocess.call(args, stdout=f, stderr=f)
f.seek(0)
out = f.read()
f.close()
return rtn, out
#
# Persisted Cache
#
class PersistentCache(MutableMapping):
"""A quick persistent cache."""
def __init__(self, cachefile='cache.pkl'):
"""Parameters
-------------
cachefile : str, optional
Path to description cachefile.
"""
self.cachefile = cachefile
if os.path.isfile(cachefile):
with io.open(cachefile, 'rb') as f:
self.cache = pickle.load(f)
else:
self.cache = {}
def __len__(self):
return len(self.cache)
def __contains__(self, key):
return key in self.cache
def __getitem__(self, key):
return self.cache[key] # return the results of the finder only
def __setitem__(self, key, value):
self.cache[key] = value
self.dump()
def __delitem__(self, key):
del self.cache[key]
self.dump()
def __iter__(self):
for key in self.cache.keys():
yield key
def dump(self):
"""Writes the cache out to the filesystem."""
if not os.path.exists(self.cachefile):
pardir = os.path.split(os.path.abspath(self.cachefile))[0]
if not os.path.exists(pardir):
os.makedirs(pardir)
with io.open(self.cachefile, 'wb') as f:
pickle.dump(self.cache, f, pickle.HIGHEST_PROTOCOL)
def __str__(self):
return pformat(self.cache)
| 30.113537
| 88
| 0.59491
| 1,767
| 13,792
| 4.50481
| 0.234861
| 0.022111
| 0.013568
| 0.009799
| 0.267839
| 0.255151
| 0.232286
| 0.221482
| 0.209799
| 0.207538
| 0
| 0.004912
| 0.291473
| 13,792
| 457
| 89
| 30.179431
| 0.80966
| 0.240792
| 0
| 0.248252
| 0
| 0
| 0.048711
| 0.006393
| 0
| 0
| 0
| 0
| 0
| 1
| 0.143357
| false
| 0.006993
| 0.059441
| 0.024476
| 0.363636
| 0.013986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24334f8a7788ca759e3c468b57d76d5b40d43699
| 6,698
|
py
|
Python
|
taxscrape.py
|
gehilley/KMESTaxScrape
|
1cf85049b6d2a5fbd532107fbe44f61197ac3263
|
[
"MIT"
] | null | null | null |
taxscrape.py
|
gehilley/KMESTaxScrape
|
1cf85049b6d2a5fbd532107fbe44f61197ac3263
|
[
"MIT"
] | null | null | null |
taxscrape.py
|
gehilley/KMESTaxScrape
|
1cf85049b6d2a5fbd532107fbe44f61197ac3263
|
[
"MIT"
] | null | null | null |
output_filename = 'kings_mountain_taxes.csv'
output_pfilename = 'kmes_taxes.p'
base_url = "https://gis.smcgov.org/maps/rest/services/WEBAPPS/COUNTY_SAN_MATEO_TKNS/MapServer/identify"
token = "fytmg9tR2rSx-1Yp0SWJ_qkAExGi-ftZoK7h4wk91UY."
polygon = [(-13622312.48,4506393.674),
(-13622866.64,4504129.241),
(-13622054.51,4501702.363),
(-13622081.51,4500703.546),
(-13622336.7,4500699.901),
(-13622209.69,4500208.989),
(-13620628.37,4498576.899),
(-13620855.91,4496456.415),
(-13621178.77,4496056.135),
(-13620850.69,4493901.594),
(-13619861.84,4493897.488),
(-13619569,4490187.675),
(-13619905.81,4489530.952),
(-13619314.07,4488339.813),
(-13618317.52,4488701.441),
(-13618258.04,4488474.013),
(-13615983.9,4488310.236),
(-13615652.87,4488885.978),
(-13615005.01,4489100.013),
(-13613917.21,4488836.71),
(-13613914.39,4488379.079),
(-13612911.75,4488366.21),
(-13612888.41,4494314.066),
(-13615097.8,4495888.695),
(-13615271.32,4496853.081),
(-13616508.99,4497514.873),
(-13616383.3,4498273.144),
(-13615602.16,4498927.021),
(-13616669.94,4499925.725),
(-13617650.42,4501543.218),
(-13618538.41,4501849.55),
(-13619271.78,4503718.206),
(-13620684.15,4505168.724),
(-13620959.32,4506823.444),
(-13622312.48,4506393.674)]
sr = 102100
tax_base_url = 'https://sanmateo-ca.county-taxes.com/public/search'
tax_bill_url = 'https://sanmateo-ca.county-taxes.com'
tax_link_contents = '2019 Secured Annual Bill'
tax_key_bond = 'Cabrillo Usd Bond'
tax_key_B = 'CAB USD MEAS B 2015-20'
tax_key_countywide = 'Countywide Tax (Secured)'
def get_apns_and_tras(extent, plot_boundaries = True):
(xmin, ymin, xmax, ymax) = extent
from shapely.geometry import Polygon
p1 = Polygon(polygon)
p2 = Polygon([(xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin), (xmin, ymax)])
p3 = p1.intersection(p2)
try:
coords = list(p3.exterior.coords)
except:
coords = []
if plot_boundaries:
from matplotlib import pylab as plt
plt.ion()
plt.figure(1)
for i in range(len(coords)-1):
plt.plot([coords[i][0], coords[i+1][0]], [coords[i][1], coords[i+1][1]], 'b-')
plt.axis('equal')
geometry = '{"rings": [[' + ','.join(['[' + str(c[0]) + ',' + str(c[1]) + ']' for c in coords]) + ']]}'
import requests
payload = {"token": token,
"f": "json",
"tolerance": 0,
"returnGeometry": "false",
"geometry": geometry,
"geometryType": "esriGeometryPolygon",
"sr": sr,
"mapExtent": "{xmin}, {ymin}, {xmax}, {ymax}".format(xmin = xmin, ymin = ymin, xmax = xmax, ymax = ymax),
"layers": "visible:0",
"imageDisplay": "572%2C774%2C96"}
r = requests.get(base_url,params=payload)
records = r.json()
if records.get('exceededTransferLimit', None) is not None:
print('WARNING: Transfer limit exceeded. Reduce square size')
return [[s['attributes']['NOGEOMAPN'], s['attributes']['TRA']] for s in records['results']]
def collect_all_apns_and_tras(square_size = 5000, plot_boundaries = True):
x, y = zip(*polygon)
(minx, maxx, miny, maxy) = (min(x), max(x), min(y), max(y))
import math
apns_and_tras = list()
for i in range(math.ceil((maxy-miny)/square_size)):
tile_y_min = square_size * float(i) + miny
tile_y_max = square_size * float(i+1) + miny if square_size * float(i+1) + miny < maxy else maxy
for j in range(math.ceil((maxx-minx)/square_size)):
tile_x_min = square_size * float(j) + minx
tile_x_max = square_size * float(j+1) + minx if square_size * float(j+1) + minx else maxx
extent = (tile_x_min, tile_y_min, tile_x_max, tile_y_max)
this_apns_and_tras = get_apns_and_tras(extent, plot_boundaries=plot_boundaries)
apns_and_tras += this_apns_and_tras
return apns_and_tras
def get_tax_record(apn):
payload = {"search_query":apn,
"category":all}
import requests
r = requests.get(tax_base_url, params=payload)
from bs4 import BeautifulSoup
soup = BeautifulSoup(r.content, features="html.parser")
a_tags = soup.find_all('a')
clickthrough = None
for tag in a_tags:
if tax_link_contents in tag.contents[0]:
clickthrough = tag['href']
bond_tax = 0
b_tax = 0
countywide_tax = 0
if clickthrough is not None:
r = requests.get(tax_bill_url + clickthrough)
soup = BeautifulSoup(r.content, features="html.parser")
td_countywide = soup.find("td", text=tax_key_countywide)
if td_countywide is not None:
countywide_tax = float(td_countywide.find_next_sibling("td").find_next_sibling("td") \
.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text.replace('$',
'').replace(
',', ''))
td_tax_bond = soup.find("td", text=tax_key_bond)
if td_tax_bond is not None:
bond_tax = float(td_tax_bond.find_next_sibling("td").find_next_sibling("td")\
.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text.replace('$','').replace(',',''))
td_B_tax = soup.find("td", text=tax_key_B)
if td_B_tax is not None:
b_tax = float(td_B_tax.find_next_sibling("td").text.replace('$','').replace(',',''))
return countywide_tax, bond_tax, b_tax
data = list()
APNs_and_TRAs = list(collect_all_apns_and_tras())
APN_dictionary = {a[0]:a[1] for a in APNs_and_TRAs}
APNs = list(APN_dictionary.keys())
APNs.sort()
total = len(APNs)
counter = 1
for APN in APNs:
print('{counter} / {total}'.format(counter = counter, total = total))
countywide_tax, bond_tax, b_tax = get_tax_record(APN)
this_APN = APN[0:3] + '-' + APN[3:6] + '-' + APN[6:]
data.append([this_APN, APN_dictionary[APN], countywide_tax, bond_tax, b_tax])
counter += 1
import csv
with open(output_filename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in data:
writer.writerow(row)
import pickle as p
with open(output_pfilename,'wb') as pfile:
p.dump(data, pfile)
| 42.392405
| 127
| 0.597044
| 875
| 6,698
| 4.384
| 0.380571
| 0.020073
| 0.031543
| 0.048749
| 0.187956
| 0.177007
| 0.120699
| 0.054745
| 0.054745
| 0.054745
| 0
| 0.15239
| 0.250523
| 6,698
| 157
| 128
| 42.66242
| 0.611753
| 0
| 0
| 0.027027
| 0
| 0.006757
| 0.111708
| 0.013292
| 0.006757
| 0
| 0
| 0
| 0
| 1
| 0.02027
| false
| 0
| 0.054054
| 0
| 0.094595
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24377195af086d2d9b533b86147cc02d046d8a1c
| 2,263
|
py
|
Python
|
tests/test_arraycoords_core.py
|
sakshamsingh1/micarraylib
|
e3a87a1ee55dce50ce33d0dfa23e266b733de535
|
[
"CC-BY-4.0"
] | 11
|
2021-11-14T19:33:33.000Z
|
2022-03-17T20:38:27.000Z
|
tests/test_arraycoords_core.py
|
sakshamsingh1/micarraylib
|
e3a87a1ee55dce50ce33d0dfa23e266b733de535
|
[
"CC-BY-4.0"
] | 7
|
2022-01-17T17:50:49.000Z
|
2022-03-31T14:42:34.000Z
|
tests/test_arraycoords_core.py
|
sakshamsingh1/micarraylib
|
e3a87a1ee55dce50ce33d0dfa23e266b733de535
|
[
"CC-BY-4.0"
] | 4
|
2021-11-16T14:05:11.000Z
|
2022-03-23T00:35:00.000Z
|
from micarraylib.arraycoords.core import micarray
from micarraylib.arraycoords import array_shapes_raw
from micarraylib.arraycoords.array_shapes_utils import _polar2cart
import pytest
import numpy as np
def test_micarray_init():
arr = micarray(array_shapes_raw.cube2l_raw, "cartesian", None, "foo")
assert arr.name == "foo"
assert arr.capsule_names == list(array_shapes_raw.cube2l_raw.keys())
assert arr.coords_dict == array_shapes_raw.cube2l_raw
assert arr.coords_form == "cartesian"
assert arr.angle_units == None
# no coordinates form
with pytest.raises(ValueError):
micarray(array_shapes_raw.ambeovr_raw)
# cartesian with angle units
with pytest.raises(ValueError):
micarray(array_shapes_raw.cube2l_raw, "cartesian", "degree")
def test_micarray_center_coords():
arr = micarray(array_shapes_raw.cube2l_raw, "cartesian")
arr.center_coords()
assert np.allclose(
np.mean(np.array([c for c in arr.coords_dict.values()]), axis=0), [0, 0, 0]
)
arr = micarray(array_shapes_raw.ambeovr_raw, "polar", "degrees")
arr.center_coords()
assert np.allclose(
np.mean(np.array([c for c in arr.coords_dict.values()]), axis=0), [0, 0, 0]
)
assert arr.coords_form == "cartesian"
assert arr.angle_units == None
def test_micarray_standard_coords():
arr = micarray(array_shapes_raw.eigenmike_raw, "polar", "degrees")
arr.standard_coords("cartesian")
assert np.allclose(
np.mean(np.array([c for c in arr.coords_dict.values()]), axis=0), [0, 0, 0]
)
arr.standard_coords("polar")
assert arr.coords_form == "polar"
assert arr.angle_units == "radians"
# sanity check on range of angles in polar coordinates
assert all([c[0] > 0 and c[0] < 180 for c in arr.coords_dict.values()])
assert all([c[1] <= 180 and c[1] >= -180 for c in arr.coords_dict.values()])
# returning to cartesian should result in coordinates centered around zero
coords_cart = _polar2cart(arr.coords_dict, "radians")
assert np.allclose(
np.mean(np.array([v for v in coords_cart.values()]), axis=0),
[0, 0, 0],
)
# value when form not specified
with pytest.raises(ValueError):
arr.standard_coords()
| 33.776119
| 83
| 0.691118
| 323
| 2,263
| 4.659443
| 0.241486
| 0.017276
| 0.083721
| 0.087708
| 0.499003
| 0.468439
| 0.406645
| 0.375415
| 0.217276
| 0.217276
| 0
| 0.020174
| 0.189571
| 2,263
| 66
| 84
| 34.287879
| 0.800436
| 0.089262
| 0
| 0.340426
| 0
| 0
| 0.055474
| 0
| 0
| 0
| 0
| 0
| 0.319149
| 1
| 0.06383
| false
| 0
| 0.106383
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24395c699d46f97162aa14777bf15b0856cae6e4
| 491
|
py
|
Python
|
Math/Leetcode5839.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
Math/Leetcode5839.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
Math/Leetcode5839.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
import math
import heapq
class Solution:
def minStoneSum(self, piles, k: int) -> int:
q=list()
for i in piles:
heapq.heappush(q,i)
while k:
c=q[-1]
q.pop()
c=c-math.floor(c/2)
heapq.heappush(q,c)
k-=1
res=0
for i in q:
res+=i
return res
if __name__ == '__main__':
sol=Solution()
piles = [4, 3, 6, 7]
k = 3
print(sol.minStoneSum(piles,k))
| 20.458333
| 48
| 0.462322
| 69
| 491
| 3.173913
| 0.507246
| 0.054795
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031142
| 0.411405
| 491
| 24
| 49
| 20.458333
| 0.726644
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.227273
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
243e4ebb71651a1947d9bfa01a4d3102ca1fdc2f
| 5,115
|
py
|
Python
|
kcwidrp/primitives/SubtractSky.py
|
scizen9/KCWI_DRP
|
1d82ee4f82be491628d6baa555401c18aa0472a2
|
[
"BSD-3-Clause"
] | 5
|
2020-04-09T20:05:52.000Z
|
2021-08-04T18:04:28.000Z
|
kcwidrp/primitives/SubtractSky.py
|
scizen9/KCWI_DRP
|
1d82ee4f82be491628d6baa555401c18aa0472a2
|
[
"BSD-3-Clause"
] | 80
|
2020-03-19T00:35:27.000Z
|
2022-03-07T20:08:23.000Z
|
kcwidrp/primitives/SubtractSky.py
|
scizen9/KCWI_DRP
|
1d82ee4f82be491628d6baa555401c18aa0472a2
|
[
"BSD-3-Clause"
] | 9
|
2021-01-22T02:00:32.000Z
|
2022-02-08T19:43:16.000Z
|
from keckdrpframework.primitives.base_primitive import BasePrimitive
from kcwidrp.primitives.kcwi_file_primitives import kcwi_fits_reader, \
kcwi_fits_writer, get_master_name, strip_fname
import os
class SubtractSky(BasePrimitive):
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.logger = context.pipeline_logger
def _pre_condition(self):
"""
Checks if a master sky exists to subtract
:return:
"""
self.logger.info("Checking precondition for SubtractSky")
skyfile = None
skymask = None
# check if kcwi.sky exists
if os.path.exists('kcwi.sky'):
f = open('kcwi.sky')
skyproc = f.readlines()
f.close()
# is our file in the list?
ofn = self.action.args.name
for row in skyproc:
if ofn in row.split()[0]:
skyfile = row.split()[1]
if len(row.split()) > 2:
skymask = row.split()[2]
if skyfile:
if not os.path.exists(skyfile):
skyfile = None
if skymask:
if not os.path.exists(skymask):
skymask = None
self.action.args.skyfile = skyfile
self.action.args.skymask = skymask
if skyfile:
self.logger.info("pre condition got 1 master sky, expected 1")
return True
else:
target_type = 'SKY'
tab = self.context.proctab.search_proctab(frame=self.action.args.ccddata,
target_type=target_type,
nearest=True)
self.logger.info("pre condition got %d master sky, expected 1"
% len(tab))
if len(tab) <= 0:
return False
else:
return True
def _perform(self):
self.logger.info("Subtracting sky background")
# Header keyword to update
key = 'SKYCOR'
keycom = 'sky corrected?'
target_type = 'SKY'
skyfile = self.action.args.skyfile
skymask = self.action.args.skymask
if not self.action.args.skyfile:
tab = self.context.proctab.search_proctab(frame=self.action.args.ccddata,
target_type=target_type,
nearest=True)
self.logger.info("%d master sky frames found" % len(tab))
if len(tab) > 0:
skyfile = tab['filename'][0]
msname = strip_fname(skyfile) + '_' + target_type.lower() + ".fits"
if os.path.exists(os.path.join(self.config.instrument.cwd,
'redux', msname)):
self.logger.info("Reading image: %s" % msname)
msky = kcwi_fits_reader(
os.path.join(self.config.instrument.cwd, 'redux',
msname))[0]
# scale the sky?
obtime = self.action.args.ccddata.header['XPOSURE']
sktime = msky.header['XPOSURE']
if obtime <= 0. or sktime <= 0.:
self.logger.warning("Bad exposure times (obj, sky): %.1f, %1f"
% (obtime, sktime))
skscl = 1.
else:
skscl = obtime / sktime
self.logger.info("Sky scale factor = %.3f" % skscl)
# do the subtraction
self.action.args.ccddata.data -= msky.data * skscl
# update header keywords
self.action.args.ccddata.header[key] = (True, keycom)
self.action.args.ccddata.header['SKYMAST'] = (msname,
"Master sky filename")
self.action.args.ccddata.header['SKYSCL'] = (skscl,
'sky scale factor')
if skymask:
self.action.args.ccddata.header['SKYMSKF'] = (skymask,
'sky mask file')
else:
# update header keywords
self.action.args.ccddata.header[key] = (False, keycom)
log_string = SubtractSky.__module__
self.action.args.ccddata.header['HISTORY'] = log_string
# write out int image
kcwi_fits_writer(self.action.args.ccddata,
table=self.action.args.table,
output_file=self.action.args.name,
output_dir=self.config.instrument.output_directory,
suffix="intk")
self.context.proctab.update_proctab(frame=self.action.args.ccddata,
suffix="intk",
filename=self.action.args.name)
self.context.proctab.write_proctab()
self.logger.info(log_string)
return self.action.args
# END: class SubtractSky()
| 38.75
| 85
| 0.504594
| 514
| 5,115
| 4.92607
| 0.272374
| 0.094787
| 0.121643
| 0.099526
| 0.266193
| 0.199447
| 0.157188
| 0.157188
| 0.157188
| 0.082938
| 0
| 0.005543
| 0.400391
| 5,115
| 131
| 86
| 39.045802
| 0.82002
| 0.049071
| 0
| 0.22449
| 0
| 0
| 0.086443
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030612
| false
| 0
| 0.030612
| 0
| 0.112245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
243f3be35cd23daccbea4684b6c7521e62d8c778
| 2,517
|
py
|
Python
|
ask-sdk-model-runtime/ask_sdk_model_runtime/api_configuration.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | 90
|
2018-09-19T21:56:42.000Z
|
2022-03-30T11:25:21.000Z
|
ask-sdk-model-runtime/ask_sdk_model_runtime/api_configuration.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | 11
|
2018-09-23T12:16:48.000Z
|
2021-06-10T19:49:45.000Z
|
ask-sdk-model-runtime/ask_sdk_model_runtime/api_configuration.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | 28
|
2018-09-19T22:30:38.000Z
|
2022-02-22T22:57:07.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
import typing
if typing.TYPE_CHECKING:
from .serializer import Serializer
from .api_client import ApiClient
class ApiConfiguration(object):
"""Represents a class that provides API configuration options needed by
service clients.
:param serializer: serializer implementation for encoding/decoding JSON
from/to Object models.
:type serializer: (optional) ask_sdk_model_runtime.serializer.Serializer
:param api_client: API Client implementation
:type api_client: (optional) ask_sdk_model_runtime.api_client.ApiClient
:param authorization_value: Authorization value to be used on any calls of
the service client instance
:type authorization_value: (optional) str
:param api_endpoint: Endpoint to hit by the service client instance
:type api_endpoint: (optional) str
"""
def __init__(self, serializer=None, api_client=None,
authorization_value=None, api_endpoint=None):
# type: (Serializer, ApiClient, str, str) -> None
"""Represents a class that provides API configuration options needed by
service clients.
:param serializer: serializer implementation for encoding/decoding JSON
from/to Object models.
:type serializer: (optional) ask_sdk_model_runtime.serializer.Serializer
:param api_client: API Client implementation
:type api_client: (optional) ask_sdk_model_runtime.api_client.ApiClient
:param authorization_value: Authorization value to be used on any calls
of the service client instance
:type authorization_value: (optional) str
:param api_endpoint: Endpoint to hit by the service client instance
:type api_endpoint: (optional) str
"""
self.serializer = serializer
self.api_client = api_client
self.authorization_value = authorization_value
self.api_endpoint = api_endpoint
| 41.95
| 80
| 0.72507
| 322
| 2,517
| 5.540373
| 0.341615
| 0.060538
| 0.03139
| 0.042601
| 0.567265
| 0.567265
| 0.567265
| 0.567265
| 0.567265
| 0.567265
| 0
| 0.004525
| 0.209774
| 2,517
| 59
| 81
| 42.661017
| 0.892408
| 0.740961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2442557711a70e9fcc1b17d9d62fad88b7a458da
| 2,149
|
py
|
Python
|
src/stats/__main__.py
|
bmcculley/pycalcstats
|
74501b3fb2c5c061e5629eed127d8554345c0bd3
|
[
"MIT"
] | null | null | null |
src/stats/__main__.py
|
bmcculley/pycalcstats
|
74501b3fb2c5c061e5629eed127d8554345c0bd3
|
[
"MIT"
] | null | null | null |
src/stats/__main__.py
|
bmcculley/pycalcstats
|
74501b3fb2c5c061e5629eed127d8554345c0bd3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
## Copyright (c) 2011 Steven D'Aprano.
## See the file __init__.py for the licence terms for this software.
"""
Run the stats package as if it were an executable module.
Usage:
$ python3 -m stats [options]
Options:
-h --help Print this help text.
-V --version Print the version number.
-v --verbose Run tests verbosely.
-q --quiet Don't print anything on success.
With no options, perform a self-test of the stats package by running all
doctests in the package. By default, failed tests will be printed. If all
tests pass, a count of how many tests were performed is printed.
To print details of all tests regardless of whether they succeed or fail,
pass the verbose flag after the package name:
$ python3 -m stats -v
To suppress output if all tests pass, pass the quiet flag:
$ python3 -m stats -q
"""
import sys
def process_options():
argv = sys.argv[1:]
if '-h' in argv or '--help' in argv:
print(__doc__)
sys.exit(0)
verbose = '-v' in argv or '--verbose' in argv
quiet = '-q' in argv or '--quiet' in argv
if verbose and quiet:
print('cannot be both quiet and verbose', file=sys.stderr)
sys.exit(1)
if '-V' in argv or '--version' in argv:
import stats
print(stats.__version__)
sys.exit(0)
return verbose, quiet
def self_test(verbose, quiet):
assert not (verbose and quiet)
import doctest
import stats, stats.co, stats.multivar, stats.order, \
stats.univar, stats.utils, stats.vectorize
modules = (stats, stats.co, stats.multivar, stats.order,
stats.univar, stats.utils, stats.vectorize,
)
failed = tried = 0
for module in modules:
a, b = doctest.testmod(module, verbose=verbose)
failed += a
tried += b
if failed == 0 and not quiet:
print("Successfully run %d doctests from %d files."
% (tried, len(modules)))
return failed
if __name__ == '__main__' and __package__ is not None:
verbose, quiet = process_options()
sys.exit(self_test(verbose, quiet))
| 29.040541
| 73
| 0.642624
| 312
| 2,149
| 4.336538
| 0.38141
| 0.035477
| 0.023651
| 0.020695
| 0.103474
| 0.103474
| 0.103474
| 0.103474
| 0.103474
| 0.103474
| 0
| 0.008827
| 0.261982
| 2,149
| 73
| 74
| 29.438356
| 0.844262
| 0.402047
| 0
| 0.055556
| 0
| 0
| 0.095987
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.222222
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2442c8f27888189c71758520f5da703f6def7184
| 4,277
|
py
|
Python
|
tests/field_test.py
|
intelligenceunion/mongo-driver
|
02bbc6839c8264d4b06b053e8cc83d42147ede17
|
[
"MIT"
] | null | null | null |
tests/field_test.py
|
intelligenceunion/mongo-driver
|
02bbc6839c8264d4b06b053e8cc83d42147ede17
|
[
"MIT"
] | null | null | null |
tests/field_test.py
|
intelligenceunion/mongo-driver
|
02bbc6839c8264d4b06b053e8cc83d42147ede17
|
[
"MIT"
] | 1
|
2019-06-21T17:49:08.000Z
|
2019-06-21T17:49:08.000Z
|
import unittest
import pymongo
import datetime
from bson import ObjectId
from iu_mongo import Document, connect
from iu_mongo.fields import *
from iu_mongo.errors import ValidationError
import iu_mongo
class Person(Document):
meta = {
'db_name': 'test'
}
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: 'test', required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
day = DateField(default=datetime.date.today)
class FieldTests(unittest.TestCase):
def setUp(self):
connect(db_names=['test'])
def tearDown(self):
Person.remove({})
def test_default_not_set(self):
person = Person(name="Ross")
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved,
['age', 'created', 'day', 'name', 'userid']
)
self.assertTrue(person.validate() is None)
self.assertEqual(person.name, person.name)
self.assertEqual(person.age, person.age)
self.assertEqual(person.userid, person.userid)
self.assertEqual(person.created, person.created)
self.assertEqual(person.day, person.day)
self.assertEqual(person._data['name'], person.name)
self.assertEqual(person._data['age'], person.age)
self.assertEqual(person._data['userid'], person.userid)
self.assertEqual(person._data['created'], person.created)
self.assertEqual(person._data['day'], person.day)
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(
data_to_be_saved, ['age', 'created', 'day', 'name', 'userid'])
def test_default_set_none(self):
person = Person(name=None, age=None, userid=None,
created=None, day=None)
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved, ['age', 'created', 'day', 'userid'])
def test_int_field(self):
# max integer value mongodb can handle, i.e. 64-bit signed integer
max_int_val = (1 << 63)-1
class Doc(Document):
meta = {
'db_name': 'test'
}
test_int = IntField(min_value=-123, max_value=max_int_val)
Doc.remove({})
doc1 = Doc(test_int=max_int_val)
doc2 = Doc(test_int=None)
doc3 = Doc(test_int=max_int_val+1)
doc4 = Doc(test_int=-200)
doc1.save()
doc2.save()
self.assertEqual(Doc.count({'test_int': None}), 1)
self.assertEqual(Doc.count({'test_int': {'$ne': None}}), 1)
doc1 = Doc.find_one({'test_int': None})
doc2 = Doc.find_one({'test_int': {'$ne': None}})
self.assertEqual(doc1.test_int, None)
self.assertEqual(doc2.test_int, max_int_val)
self.assertRaises(iu_mongo.errors.ValidationError, doc3.save)
self.assertRaises(iu_mongo.errors.ValidationError, doc4.save)
doc5 = Doc(test_int='-123')
doc5.save()
self.assertEqual(Doc.count({'test_int': '-123'}), 0)
doc5 = Doc.find_one({'test_int': -123})
self.assertEqual(doc5.test_int, -123)
# 32-bit signed type
self.assertEqual(Doc.count({'test_int': {'$type': 'int'}}), 1)
# 64-bit signed type
self.assertEqual(Doc.count({'test_int': {'$type': 'long'}}), 1)
Doc.remove({})
def test_string_field(self):
class Doc(Document):
meta = {
'db_name': 'test'
}
test_str = StringField()
Doc.remove({})
doc1 = Doc(test_str=None)
doc2 = Doc(test_str='')
doc3 = Doc(test_str='abcdefghij')
doc4 = Doc(test_str='我')
doc1.save()
doc2.save()
doc3.save()
doc4.save()
self.assertEqual(Doc.count({'test_str': None}), 1)
self.assertEqual(Doc.count({'test_str': {'$ne': None}}), 3)
self.assertEqual(Doc.count({'test_str': ''}), 1)
doc4.reload()
doc3.reload()
self.assertIsInstance(doc3.test_str, str)
self.assertIsInstance(doc4.test_str, str)
self.assertEqual(Doc.count({'test_str': {'$type': 'string'}}), 3)
Doc.remove({})
| 35.347107
| 79
| 0.599018
| 523
| 4,277
| 4.728489
| 0.193117
| 0.151638
| 0.084917
| 0.083704
| 0.480793
| 0.43146
| 0.22038
| 0.167004
| 0.139507
| 0.139507
| 0
| 0.021297
| 0.253449
| 4,277
| 120
| 80
| 35.641667
| 0.75321
| 0.023848
| 0
| 0.188119
| 0
| 0
| 0.068329
| 0
| 0
| 0
| 0
| 0
| 0.29703
| 1
| 0.059406
| false
| 0
| 0.079208
| 0
| 0.237624
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2444309351fb1815ca19b79bdbd6b418c960ad90
| 3,874
|
py
|
Python
|
bitsv/network/services/bchsvexplorer.py
|
yoursmengle/bitsv
|
387ff649e3916521fc3528469fdb0eef9c97e624
|
[
"MIT"
] | 1
|
2019-06-28T05:20:07.000Z
|
2019-06-28T05:20:07.000Z
|
bitsv/network/services/bchsvexplorer.py
|
joshua-s/bitsv
|
8ca960c39cef7a7d655011fba690510684190f1e
|
[
"MIT"
] | 1
|
2020-01-10T13:16:36.000Z
|
2020-01-10T13:16:36.000Z
|
bitsv/network/services/bchsvexplorer.py
|
yoursmengle/bitsv
|
387ff649e3916521fc3528469fdb0eef9c97e624
|
[
"MIT"
] | null | null | null |
import requests
import json
from decimal import Decimal
from bitsv.network import currency_to_satoshi
from bitsv.network.meta import Unspent
# left here as a reminder to normalize get_transaction()
from bitsv.network.transaction import Transaction, TxInput, TxOutput
from bitsv.constants import BSV
DEFAULT_TIMEOUT = 30
BSV_TO_SAT_MULTIPLIER = BSV
class BCHSVExplorerAPI:
"""
Simple bitcoin SV REST API --> uses base58 address format (addresses start with "1")
- get_address_info
- get_balance
- get_transactions
- get_transaction
- get_unspent
- broadcast_tx
"""
MAIN_ENDPOINT = 'https://bchsvexplorer.com/'
MAIN_ADDRESS_API = MAIN_ENDPOINT + 'api/addr/{}'
MAIN_BALANCE_API = MAIN_ADDRESS_API + '/balance'
MAIN_UNSPENT_API = MAIN_ADDRESS_API + '/utxo'
MAIN_TX_PUSH_API = MAIN_ENDPOINT + 'api/tx/send/'
MAIN_TX_API = MAIN_ENDPOINT + 'api/tx/{}'
MAIN_TX_AMOUNT_API = MAIN_TX_API
TX_PUSH_PARAM = 'create_rawtx'
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
@classmethod
def get_address_info(cls, address):
r = requests.get(cls.MAIN_ADDRESS_API.format(address), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return r.json()
@classmethod
def get_balance(cls, address):
r = requests.get(cls.MAIN_BALANCE_API.format(address), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return r.json()
@classmethod
def get_transactions(cls, address):
r = requests.get(cls.MAIN_ADDRESS_API.format(address), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return r.json()['transactions']
@classmethod
def get_transaction(cls, txid):
r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
response = r.json(parse_float=Decimal)
tx = Transaction(response['txid'],
(Decimal(response['valueIn']) * BSV_TO_SAT_MULTIPLIER).normalize(),
(Decimal(response['valueOut']) * BSV_TO_SAT_MULTIPLIER).normalize())
for txin in response['vin']:
part = TxInput(txin['addr'], txin['valueSat'])
tx.add_input(part)
for txout in response['vout']:
addr = None
if 'addresses' in txout['scriptPubKey'] and \
txout['scriptPubKey']['addresses'] is not None:
addr = txout['scriptPubKey']['addresses'][0]
part = TxOutput(addr,
(Decimal(txout['value']) * BSV_TO_SAT_MULTIPLIER).normalize(),
txout['scriptPubKey']['asm'])
tx.add_output(part)
return tx
@classmethod
def raw_get_transaction(cls, txid):
"""un-altered return value from API - useful for debugging"""
r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return r.json()
@classmethod
def get_unspents(cls, address):
r = requests.get(cls.MAIN_UNSPENT_API.format(address), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return [
Unspent(currency_to_satoshi(tx['amount'], 'bsv'),
tx['confirmations'],
tx['scriptPubKey'],
tx['txid'],
tx['vout'])
for tx in r.json()
]
@classmethod
def send_transaction(cls, rawtx): # pragma: no cover
r = requests.post(
'https://bchsvexplorer.com/api/tx/send',
data=json.dumps({'rawtx': rawtx}),
headers=cls.headers,
)
r.raise_for_status()
return r.json()['txid']
| 33.982456
| 93
| 0.617708
| 455
| 3,874
| 5.048352
| 0.248352
| 0.042664
| 0.027427
| 0.045712
| 0.338703
| 0.286025
| 0.286025
| 0.260775
| 0.260775
| 0.260775
| 0
| 0.002119
| 0.268973
| 3,874
| 113
| 94
| 34.283186
| 0.808969
| 0.106866
| 0
| 0.25
| 0
| 0
| 0.103873
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.369048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2444ac3c50dcef8edaf375b5ced228049606008a
| 336
|
py
|
Python
|
utils/common.py
|
Dineshs91/django-revert-test
|
e1954f287427e74255f17fb56886fbf90580ab77
|
[
"MIT"
] | null | null | null |
utils/common.py
|
Dineshs91/django-revert-test
|
e1954f287427e74255f17fb56886fbf90580ab77
|
[
"MIT"
] | null | null | null |
utils/common.py
|
Dineshs91/django-revert-test
|
e1954f287427e74255f17fb56886fbf90580ab77
|
[
"MIT"
] | null | null | null |
from rest_framework.response import Response
def create_response(data=None, error=None, status=None):
if 200 <= status < 400:
success = True
else:
success = False
response = {
"data": data,
"error": error,
"success": success
}
return Response(data=response, status=status)
| 21
| 56
| 0.607143
| 37
| 336
| 5.459459
| 0.513514
| 0.178218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025105
| 0.28869
| 336
| 15
| 57
| 22.4
| 0.820084
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24468a96f003b4054d7dff4b59d198dddc557f17
| 1,699
|
py
|
Python
|
email-updates-stonks-AI-ML/venv/Lib/site-packages/pandas_datareader/econdb.py
|
iVibudh/stock-prediction
|
3900619224bb86e382c782d39138914294199733
|
[
"MIT"
] | 1
|
2021-02-06T21:00:00.000Z
|
2021-02-06T21:00:00.000Z
|
venv/lib/python3.8/site-packages/pandas_datareader/econdb.py
|
jsherretts/stock-trading-bot
|
234bd7b1e67e6e2d9c728dce1851e020aab0662e
|
[
"MIT"
] | 2
|
2021-03-31T19:54:17.000Z
|
2021-06-02T02:33:56.000Z
|
venv/lib/python3.8/site-packages/pandas_datareader/econdb.py
|
jsherretts/stock-trading-bot
|
234bd7b1e67e6e2d9c728dce1851e020aab0662e
|
[
"MIT"
] | 1
|
2021-07-28T20:35:14.000Z
|
2021-07-28T20:35:14.000Z
|
import pandas as pd
import requests
from pandas_datareader.base import _BaseReader
class EcondbReader(_BaseReader):
"""Get data for the given name from Econdb."""
_URL = "https://www.econdb.com/api/series/"
_format = None
_show = "labels"
@property
def url(self):
"""API URL"""
if not isinstance(self.symbols, str):
raise ValueError("data name must be string")
return "{0}?{1}&format=json&page_size=500&expand=both".format(
self._URL, self.symbols
)
def read(self):
""" read one data from specified URL """
results = requests.get(self.url).json()["results"]
df = pd.DataFrame({"dates": []}).set_index("dates")
if self._show == "labels":
def show_func(x):
return x.split(":")[1]
elif self._show == "codes":
def show_func(x):
return x.split(":")[0]
for entry in results:
series = pd.DataFrame(entry["data"])[["dates", "values"]].set_index("dates")
head = entry["additional_metadata"]
if head != "": # this additional metadata is not blank
series.columns = pd.MultiIndex.from_tuples(
[[show_func(x) for x in head.values()]],
names=[show_func(x) for x in head.keys()],
)
if not df.empty:
df = df.join(series, how="outer")
else:
df = series
if df.shape[0] > 0:
df.index = pd.to_datetime(df.index, errors="ignore")
df.index.name = "TIME_PERIOD"
df = df.truncate(self.start, self.end)
return df
| 29.293103
| 88
| 0.535021
| 205
| 1,699
| 4.336585
| 0.453659
| 0.035996
| 0.040495
| 0.026997
| 0.096738
| 0.096738
| 0.096738
| 0
| 0
| 0
| 0
| 0.007881
| 0.32784
| 1,699
| 57
| 89
| 29.807018
| 0.770578
| 0.071218
| 0
| 0.05
| 0
| 0
| 0.128123
| 0.028828
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.075
| 0.05
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2446ff9bc5f75c50ae2d83b9186ef463217c4dff
| 2,500
|
py
|
Python
|
adet/modeling/SparseMaskInst/SparseMaskEncoding/utils.py
|
shuaiqi361/AdelaiDet
|
35d944033a8d2f7aa623ad607b57bd8a1fe88b43
|
[
"BSD-2-Clause"
] | null | null | null |
adet/modeling/SparseMaskInst/SparseMaskEncoding/utils.py
|
shuaiqi361/AdelaiDet
|
35d944033a8d2f7aa623ad607b57bd8a1fe88b43
|
[
"BSD-2-Clause"
] | null | null | null |
adet/modeling/SparseMaskInst/SparseMaskEncoding/utils.py
|
shuaiqi361/AdelaiDet
|
35d944033a8d2f7aa623ad607b57bd8a1fe88b43
|
[
"BSD-2-Clause"
] | null | null | null |
# coding:utf-8
import numpy as np
import torch
import math
class IOUMetric(object):
"""
Class to calculate mean-iou using fast_hist method
"""
def __init__(self, num_classes):
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def _fast_hist(self, label_pred, label_true):
mask = (label_true >= 0) & (label_true < self.num_classes)
hist = np.bincount(
self.num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def add_batch(self, predictions, gts):
for lp, lt in zip(predictions, gts):
self.hist += self._fast_hist(lp.flatten(), lt.flatten())
def evaluate(self):
acc = np.diag(self.hist).sum() / self.hist.sum()
acc_cls = np.diag(self.hist) / self.hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
mean_iu = np.nanmean(iu)
freq = self.hist.sum(axis=1) / self.hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, iu, mean_iu, fwavacc
def soft_thresholding(x, lm):
ze_ = torch.zeros(size=x.size(), device=x.device)
return torch.sign(x) * torch.maximum(torch.abs(x) - lm, ze_)
@torch.no_grad()
def fast_ista(b, A, lmbda, max_iter):
"""
This is the fast Iterative Shrinkage-Thresholding Algorithm to solve the following objective:
min: {L2_norm(Ax - b) + L1_norm(x)}
:param b: input data with shape: [n_samples, n_features]
:param A: a pre-learned Dictionary, with shape: [n_coeffs, n_features]
:param lmbda: sparsity term to control the importance of the L1 term
:param max_iter:
:return: sparse codes with shape: [n_samples, n_coeffs]
"""
n_coeffs, n_feats = A.size()
n_samples = b.size()[0]
x = torch.zeros(size=(n_samples, n_coeffs), device=b.device)
t = 1.
z = torch.zeros(size=(n_samples, n_coeffs), device=b.device)
L = torch.linalg.norm(A, ord=2) ** 2 # Lipschitz constant, 2-norm (largest sing. value)
for k in range(max_iter):
x_old = x.clone()
z = z + torch.matmul(b - torch.matmul(z, A), A.T) / L
x = soft_thresholding(z, lmbda / L)
t0 = t
t = (1. + math.sqrt(1. + 4. * t ** 2)) / 2.
z = x + ((t0 - 1.) / t) * (x - x_old)
return x
| 35.211268
| 106
| 0.6164
| 388
| 2,500
| 3.822165
| 0.332474
| 0.064734
| 0.066082
| 0.037761
| 0.184761
| 0.160486
| 0.122724
| 0.122724
| 0.0971
| 0.056642
| 0
| 0.013151
| 0.2396
| 2,500
| 70
| 107
| 35.714286
| 0.766965
| 0.2052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.068182
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24483a88227f9c9ae585353b8fb8fe058a952b42
| 11,524
|
py
|
Python
|
nautobot_chatops/api/views/mattermost.py
|
smk4664/nautobot-plugin-chatops
|
223d15a8a3364b44740f2912b44a2f11837946b3
|
[
"Apache-2.0"
] | null | null | null |
nautobot_chatops/api/views/mattermost.py
|
smk4664/nautobot-plugin-chatops
|
223d15a8a3364b44740f2912b44a2f11837946b3
|
[
"Apache-2.0"
] | null | null | null |
nautobot_chatops/api/views/mattermost.py
|
smk4664/nautobot-plugin-chatops
|
223d15a8a3364b44740f2912b44a2f11837946b3
|
[
"Apache-2.0"
] | null | null | null |
"""Views to receive inbound notifications from Mattermost, parse them, and enqueue worker actions."""
import json
import logging
import shlex
from django.conf import settings
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from nautobot_chatops.workers import get_commands_registry, commands_help, parse_command_string
from nautobot_chatops.dispatchers.mattermost import MattermostDispatcher, Driver
from nautobot_chatops.utils import check_and_enqueue_command
from nautobot_chatops.metrics import signature_error_cntr
from nautobot_chatops.models import CommandToken
from nautobot_chatops.choices import CommandTokenPlatformChoices
# pylint: disable=logging-fstring-interpolation
logger = logging.getLogger(__name__)
def verify_signature(request):
"""Verify that a given request was legitimately signed by Mattermost.
https://developers.mattermost.com/integrate/slash-commands/
Returns:
tuple: (valid, reason)
"""
if request.headers.get("Authorization"):
expected_signature = request.headers.get("Authorization")
else:
# For some reason Integration Messages from Mattermost do not show up in POST.items()
# in these cases, we have to load the request.body
try:
data = json.loads(request.body)
except ValueError as err:
logger.info("No request body to decode, setting data to empty dict. Error: %s", err)
data = {}
if request.POST.items():
data.update(request.POST)
# For Interactive Messages, the token will be passed in the context.
if data.get("context"):
action = data.get("context")
expected_signature = action.get("token")
# For Interactive Dialogs, the token will be passed in the state.
elif data.get("state"):
expected_signature = data.get("state")
else:
signature_error_cntr.labels("mattermost", "missing_signature").inc()
return False, "Missing Command Token in Body or Header"
if not expected_signature:
signature_error_cntr.labels("mattermost", "missing_signature").inc()
return False, "Missing Command Token"
command_tokens = CommandToken.objects.filter(platform=CommandTokenPlatformChoices.MATTERMOST)
if not command_tokens.filter(token=expected_signature.split("Token ")[1]):
signature_error_cntr.labels("mattermost", "incorrect_signature").inc()
return False, "Incorrect signature"
return True, "Signature is valid"
@method_decorator(csrf_exempt, name="dispatch")
class MattermostSlashCommandView(View):
"""Handle notifications from a Mattermost /command."""
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
"""Handle an inbound HTTP POST request representing a user-issued /command."""
valid, reason = verify_signature(request)
if not valid:
return HttpResponse(status=401, reason=reason)
command = request.POST.get("command")
if not command:
return HttpResponse("No command specified")
command = command.replace("/", "")
params = request.POST.get("text", "")
context = {
"request_scheme": request.scheme,
"request_host": request.get_host(),
"org_id": request.POST.get("team_id"),
"org_name": request.POST.get("team_domain"),
"channel_id": request.POST.get("channel_id"),
"channel_name": request.POST.get("channel_name"),
"user_id": request.POST.get("user_id"),
"user_name": request.POST.get("user_name"),
"response_url": request.POST.get("response_url"),
"trigger_id": request.POST.get("trigger_id"),
"integration_url": request.build_absolute_uri("/api/plugins/chatops/mattermost/interaction/"),
"token": request.headers.get("Authorization"),
}
try:
command, subcommand, params = parse_command_string(f"{command} {params}")
except ValueError as err:
logger.error("%s", err)
return HttpResponse(status=400, reason=f"'Error: {err}' encountered on '{command} {params}")
registry = get_commands_registry()
if command not in registry:
MattermostDispatcher(context).send_markdown(commands_help(prefix="/"))
return HttpResponse()
MattermostDispatcher(context).send_busy_indicator()
return check_and_enqueue_command(registry, command, subcommand, params, context, MattermostDispatcher)
@method_decorator(csrf_exempt, name="dispatch")
class MattermostInteractionView(View):
"""Handle notifications resulting from a Mattermost interactive block."""
http_method_names = ["post"]
# pylint: disable=too-many-locals,too-many-return-statements,too-many-branches,too-many-statements
def post(self, request, *args, **kwargs):
"""Handle an inbound HTTP POST request representing a user interaction with a UI element."""
valid, reason = verify_signature(request)
if not valid:
return HttpResponse(status=401, reason=reason)
# For some reason Integration Messages from Mattermost do not show up in POST.items()
# in these cases, we have to load the request.body
try:
data = json.loads(request.body)
except ValueError as err:
logger.info("No request body to decode, setting data to empty dict. Error: %s", err)
data = {}
if request.POST.dict():
data.update(request.POST)
context = {
"org_id": data.get("team_id"),
"org_name": data.get("team_domain"),
"channel_id": data.get("channel_id"),
"channel_name": data.get("channel_name"),
"user_id": data.get("user_id"),
"user_name": data.get("user_name"),
"response_url": data.get("response_url"),
"trigger_id": data.get("trigger_id"),
"post_id": data.get("post_id"),
"request_scheme": request.get_host(),
"request_host": request.get_host(),
"integration_url": request.build_absolute_uri("/api/plugins/chatops/mattermost/interaction/"),
}
# Check for channel_name if channel_id is present
mm_url = settings.PLUGINS_CONFIG["nautobot_chatops"]["mattermost_url"]
token = settings.PLUGINS_CONFIG["nautobot_chatops"]["mattermost_api_token"]
if context["channel_name"] is None and context["channel_id"] is not None:
# Build a Mattermost Client Object
mm_client = Driver(
{
"url": mm_url,
"token": token,
}
)
# Get the channel information from Mattermost API
channel_info = mm_client.get(f'/channels/{context["channel_id"]}')
# Assign the Channel name out of the conversations info end point
context["channel_name"] = channel_info["name"]
if context["user_name"] is None and context["user_id"] is not None:
# Build a Mattermost Client Object
mm_client = Driver(
{
"url": mm_url,
"token": token,
}
)
# Get the channel information from Mattermost API
user_info = mm_client.get(f'/users/{context["user_id"]}')
# Assign the Channel name out of the conversations info end point
context["user_name"] = user_info["username"]
# Block action triggered by a non-modal interactive component
if data.get("context"):
action = data.get("context")
action_id = action.get("action_id", "")
context["token"] = action.get("token", "")
if action["type"] == "static_select":
value = action.get("selected_option", "")
elif action["type"] == "button":
value = action.get("value")
else:
logger.error(f"Unhandled action type {action['type']} in Mattermost Dispatcher")
return HttpResponse(status=500)
selected_value = f"'{value}'"
elif data.get("submission"):
# View submission triggered from a modal dialog
logger.info("Submission triggered from a modal dialog")
values = data.get("submission")
context["token"] = data.get("state")
callback_id = data.get("callback_id")
logger.debug(json.dumps(data, indent=2))
# Handling for multiple fields. This will be used when the multi_input_dialog() method of the Mattermost
# Dispatcher class is utilized.
if len(values) > 1:
selected_value = ""
# sometimes in the case of back-to-back dialogs there will be
# parameters included in the callback_id. Below parses those
# out and adds them to selected_value.
try:
cmds = shlex.split(callback_id)
except ValueError as err:
logger.error("Mattermost: %s", err)
return HttpResponse(status=400, reason=f"Error: {err} encountered when processing {callback_id}")
for i, cmd in enumerate(cmds):
if i == 2:
selected_value += f"'{cmd}'"
elif i > 2:
selected_value += f" '{cmd}'"
action_id = f"{cmds[0]} {cmds[1]}"
sorted_params = sorted(values.keys())
for blk_id in sorted_params:
selected_value += f" '{values[blk_id]}'"
# Original un-modified single-field handling below
else:
action_id = sorted(values.keys())[0]
selected_value = values[action_id]
else:
return HttpResponse(status=500, reason="I didn't understand that notification.")
if settings.PLUGINS_CONFIG["nautobot_chatops"].get("delete_input_on_submission"):
# Delete the interactive element since it's served its purpose
# Does not work for Ephemeral Posts.
if context["post_id"] is not None:
MattermostDispatcher(context).delete_message(context["post_id"])
if action_id == "action" and selected_value == "cancel":
# Nothing more to do
return HttpResponse()
logger.info(f"action_id: {action_id}, selected_value: {selected_value}")
try:
command, subcommand, params = parse_command_string(f"{action_id} {selected_value}")
except ValueError as err:
logger.error("%s", err)
return HttpResponse(
status=400, reason=f"Error: {err} encountered on command '{action_id} {selected_value}'"
)
logger.info(f"command: {command}, subcommand: {subcommand}, params: {params}")
registry = get_commands_registry()
if command not in registry:
MattermostDispatcher(context).send_markdown(commands_help())
return HttpResponse()
MattermostDispatcher(context).send_busy_indicator()
return check_and_enqueue_command(registry, command, subcommand, params, context, MattermostDispatcher)
| 42.681481
| 117
| 0.622527
| 1,294
| 11,524
| 5.397991
| 0.220247
| 0.019041
| 0.020043
| 0.015032
| 0.459699
| 0.391267
| 0.358053
| 0.338869
| 0.315676
| 0.315676
| 0
| 0.003464
| 0.273516
| 11,524
| 269
| 118
| 42.840149
| 0.830865
| 0.171295
| 0
| 0.365591
| 0
| 0
| 0.199219
| 0.01836
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0
| 0.075269
| 0
| 0.204301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
244c391e531a230265dcfe8e38f79f69557d41fd
| 2,401
|
py
|
Python
|
discordspy/utils.py
|
NextChai/discordspy
|
a6f4f2f1c9ad9facfdb92e653d0d9655606878fa
|
[
"MIT"
] | null | null | null |
discordspy/utils.py
|
NextChai/discordspy
|
a6f4f2f1c9ad9facfdb92e653d0d9655606878fa
|
[
"MIT"
] | null | null | null |
discordspy/utils.py
|
NextChai/discordspy
|
a6f4f2f1c9ad9facfdb92e653d0d9655606878fa
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2021 NextChai
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from .errors import IncorrectType
__all__ = (
'Post',
)
class Post:
"""A custom class to organise different posting types."""
__slots__ = ('_is_auto', '_intervals', 'interval')
def __init__(self, post_type: str, interval: int = 1800):
self._is_auto = False
self._intervals = False
if post_type not in {'auto', 'intervals'}:
raise IncorrectType(f'type {post_type} can not be used with class Post.')
if post_type == "auto":
self.is_auto = True
elif post_type == "intervals":
self.interval = interval
@property
def is_auto(self) -> bool:
"""":class:`bool`: Denotes if the Post is automatic."""
return self._is_auto
@is_auto.setter
def is_auto(self, new: bool) -> None:
self._is_auto = new
@property
def uses_intervals(self) -> bool:
"""":class:`bool`: Denotes if the Post uses intervals."""
return not self.is_auto
@classmethod
def auto(cls):
return cls('auto')
@classmethod
def intervals(cls, seconds: int = 0, minutes: int = 0, hours: int = 0):
interval = seconds + minutes*60 + hours*3600
return cls("intervals", interval=interval)
| 33.816901
| 85
| 0.68055
| 327
| 2,401
| 4.883792
| 0.440367
| 0.033813
| 0.031309
| 0.016281
| 0.041327
| 0.041327
| 0.041327
| 0.041327
| 0
| 0
| 0
| 0.009315
| 0.2399
| 2,401
| 70
| 86
| 34.3
| 0.865753
| 0.507289
| 0
| 0.125
| 0
| 0
| 0.101724
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.0625
| 0.03125
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
244f5d19d999147c55383cd2348a0a544bf75692
| 5,007
|
py
|
Python
|
dump/gui_v1.0.py
|
axcelerateai/Urdu-Handwriting-Recognition-using-Deep-Learning
|
38c6e4676b393feac380781cf37ce1abf8051132
|
[
"MIT"
] | 3
|
2020-10-09T13:30:47.000Z
|
2021-11-03T17:55:47.000Z
|
dump/gui_v1.0.py
|
axcelerateai/Urdu-Handwriting-Recognition-using-Deep-Learning
|
38c6e4676b393feac380781cf37ce1abf8051132
|
[
"MIT"
] | 2
|
2020-06-12T20:03:56.000Z
|
2020-06-16T03:53:17.000Z
|
dump/gui_v1.0.py
|
axcelerateai/Urdu-Handwriting-Recognition-using-Deep-Learning
|
38c6e4676b393feac380781cf37ce1abf8051132
|
[
"MIT"
] | 3
|
2021-02-25T03:30:32.000Z
|
2022-02-07T20:04:19.000Z
|
import os
import webbrowser
import numpy as np
import csv
import traceback
import arabic_reshaper
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
from PIL import ImageTk, Image
from run_model import create_and_run_model
def make_menu(w):
global the_menu
the_menu = Menu(w, tearoff=0)
the_menu.add_command(label="Cut")
the_menu.add_command(label="Copy")
the_menu.add_command(label="Paste")
def show_menu(e):
w = e.widget
the_menu.entryconfigure("Cut", command=lambda: w.event_generate("<<Cut>>"))
the_menu.entryconfigure("Copy", command=lambda: w.event_generate("<<Copy>>"))
the_menu.entryconfigure("Paste", command=lambda: w.event_generate("<<Paste>>"))
the_menu.tk.call("tk_popup", the_menu, e.x_root, e.y_root)
def main_window():
window = Tk()
make_menu(window)
window.title("Urdu Handwriting Recognition System")
window.geometry('1000x1000')
title = Label(window, text="Urdu Handwriting Recognition System", font=("Arial Bold", 30))
title.grid(column=1, row=0, columnspan=10)
window.grid_rowconfigure(0, minsize=100)
window.grid_rowconfigure(1, minsize=70)
window.grid_columnconfigure(0, weight=1)
window.grid_columnconfigure(11, weight=1)
window.grid_columnconfigure(11, weight=1)
col_path = 3
row_path = 2
display_path = Label(window, text="Enter Image Path: ")
display_path.grid(column=col_path, row=row_path)
window.grid_rowconfigure(row_path+1, minsize=50)
window.grid_rowconfigure(row_path+2, minsize=100)
display_image = Label(window, image='')
display_image.grid(column=col_path-2, row=row_path+2, columnspan=10)
display_raw_output = Label(window, text='', font=("Arial Bold", 15))
display_raw_output.grid(column= col_path-2, row=row_path+3, columnspan=10)
window.grid_rowconfigure(row_path+3, minsize=60)
#display_output = Label(window, text='', font=("Arial Bold", 15))
display_output = Entry(window, width=40, justify='right')
display_output.bind_class("Entry", "<Button-3><ButtonRelease-3>", show_menu)
display_output.grid(column= col_path-2, row=row_path+4, columnspan=10)
get_image_path = Entry(window,width=40)
get_image_path.bind_class("Entry", "<Button-3><ButtonRelease-3>", show_menu)
get_image_path.grid(column=col_path+1, row=row_path)
get_image_path.focus()
def select():
image_path = askopenfilename()
get_image_path.delete(0, END)
get_image_path.insert(0, image_path)
img = ImageTk.PhotoImage(Image.open(image_path))
display_image.configure(image = img)
display_image.image = img
display_raw_output.configure(text = '')
#display_output.configure(text = '')
display_output.delete(0, END)
def clicked():
image_path = get_image_path.get()
if image_path is '':
messagebox.showinfo("Error", "Select an image")
elif os.path.isfile(image_path) == False:
messagebox.showinfo("Error", "File does not exist")
else:
img = ImageTk.PhotoImage(Image.open(image_path))
display_image.configure(image = img)
display_image.image = img
output = create_and_run_model('CONV_BLSTM_CTC', None, image_path)
raw_output, join_char = get_urdu_output(output)
with open("output.txt", "w") as text_file:
text_file.write("%s" % join_char)
webbrowser.open("output.txt")
#with open("output.txt", "r") as text_file:
# join_char = text_file.read().replace('\n', '')
display_raw_output.configure(text = raw_output)
#display_output.configure(text = join_char)
display_output.delete(0, END)
display_output.insert(0, join_char)
#with open("output.csv", mode='w') as f:
# f_w = csv.writer(f, delimiter=',')
# f_w.writerow(join_char)
browse = Button(window, text="Browse", command=select)
browse.grid(column=col_path+2, row=row_path)
recognize = Button(window, text="Recognize", command=clicked)
recognize.grid(column=col_path+3, row=row_path)
window.mainloop()
def get_urdu_output(output):
lt_file = 'data/segmented_cc/labels/lt_char.csv'
lt = {}
with open(lt_file, 'r', encoding='utf8') as file:
text = csv.reader(file)
for row in text:
lt[int(row[1])] = row[0]
urdu_output = [lt[output[i]] for i in range(len(output)-1, -1, -1)]
join_char = ''
for i in range(len(urdu_output)-1, -1, -1):
#for i in range(0, len(urdu_output)):
join_char += urdu_output[i][0]
if urdu_output[i][2:] == 'final' or urdu_output[i][2:] == 'isolated':
join_char += ' '
#join_char = arabic_reshaper.reshape(join_char)
return urdu_output, join_char
if __name__ == "__main__":
main_window()
| 35.764286
| 94
| 0.656481
| 688
| 5,007
| 4.550872
| 0.236919
| 0.045992
| 0.029064
| 0.038007
| 0.327371
| 0.174066
| 0.174066
| 0.174066
| 0.132226
| 0.055573
| 0
| 0.02207
| 0.212702
| 5,007
| 139
| 95
| 36.021583
| 0.772197
| 0.083683
| 0
| 0.09901
| 0
| 0
| 0.087593
| 0.019659
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059406
| false
| 0
| 0.108911
| 0
| 0.178218
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
245279e99eccc5a89222148405717013b6bf5d45
| 4,042
|
py
|
Python
|
blaspy/level_2/trmv.py
|
nicholas-moreles/blaspy
|
c4af6258e17dd996c4b6d90bbaae15b31b8702b4
|
[
"BSD-3-Clause"
] | 4
|
2015-01-25T12:44:44.000Z
|
2022-03-19T08:36:19.000Z
|
blaspy/level_2/trmv.py
|
nicholas-moreles/blaspy
|
c4af6258e17dd996c4b6d90bbaae15b31b8702b4
|
[
"BSD-3-Clause"
] | 7
|
2015-01-20T13:35:39.000Z
|
2015-05-31T17:11:50.000Z
|
blaspy/level_2/trmv.py
|
nicholas-moreles/blaspy
|
c4af6258e17dd996c4b6d90bbaae15b31b8702b4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Copyright (c) 2014-2015-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from ..helpers import (get_vector_dimensions, get_square_matrix_dimension, get_cblas_info,
check_equal_sizes, convert_uplo, convert_trans, convert_diag, ROW_MAJOR)
from ctypes import c_int, POINTER
def trmv(A, x, uplo='u', trans_a='n', diag='n', lda=None, inc_x=1):
"""
Perform a triangular matrix-vector multiplication operation.
x := A * x
where alpha is a scalar, A is a triangular matrix, and x is a general column vector.
The 'uplo' argument indicates whether the lower or upper triangle of A is to be referenced and
updated by the operation. The 'trans_a' argument allows the computation to proceed as if A is
transposed. The 'diag' argument indicates whether the diagonal of A is unit or non-unit.
Vector x can be passed in as either row or column vector. If necessary, an implicit
transposition occurs.
Args:
x: 2D NumPy matrix or ndarray representing vector x
A: 2D NumPy matrix or ndarray representing matrix A
--optional arguments--
uplo: 'u' if the upper triangle of A is to be used
'l' if the lower triangle A is to be used
< default is 'u' >
trans_a: 'n' if the operation is to proceed normally
't' if the operation is to proceed as if A is transposed
< default is 'n' >
diag: 'n' if the diagonal of A is non-unit
'u' if the diagonal of A is unit
< default is 'n' >
lda: leading dimension of A (must be >= # of cols in A)
< default is the number of columns in A >
inc_x: stride of x (increment for the elements of x)
< default is 1 >
Returns:
Vector x (which is also overwritten)
Raises:
ValueError: if any of the following conditions occur:
- A or x is not a 2D NumPy ndarray or NumPy matrix
- A and x do not have the same dtype or that dtype is not supported
- A is not a square matrix
- x is not a vector
- the effective length of x does not equal the dimension of A
- uplo is not equal to one of the following: 'u', 'U', 'l', 'L'
- trans_a is not equal to one of the following: 'n', 'N', 't', 'T'
- diag is not equal to one fo the following: 'n', 'N', 'u', 'U'
"""
# get the dimensions of the parameters
m_x, n_x, x_length = get_vector_dimensions('x', x, inc_x)
dim_A = get_square_matrix_dimension('A', A)
# assign a default value to lda if necessary (assumes row-major order)
if lda is None:
lda = dim_A
# ensure the parameters are appropriate for the operation
check_equal_sizes('A', dim_A, 'x', x_length)
# convert to appropriate CBLAS values
cblas_uplo = convert_uplo(uplo)
cblas_trans_a = convert_trans(trans_a)
cblas_diag = convert_diag(diag)
# determine which CBLAS subroutine to call and which ctypes data type to use
cblas_func, data_type = get_cblas_info('trmv', (A.dtype, x.dtype))
# create a ctypes POINTER for each vector and matrix
ctype_x = POINTER(data_type * n_x * m_x)
ctype_A = POINTER(data_type * dim_A * dim_A)
# call CBLAS using ctypes
cblas_func.argtypes = [c_int, c_int, c_int, c_int, c_int, ctype_A, c_int, ctype_x, c_int]
cblas_func.restype = None
cblas_func(ROW_MAJOR, cblas_uplo, cblas_trans_a, cblas_diag, dim_A,
A.ctypes.data_as(ctype_A), lda, x.ctypes.data_as(ctype_x), inc_x)
return x # x is also overwritten
| 42.104167
| 98
| 0.621475
| 622
| 4,042
| 3.913183
| 0.278135
| 0.013558
| 0.010271
| 0.013147
| 0.153246
| 0.14092
| 0.071487
| 0.032046
| 0
| 0
| 0
| 0.006803
| 0.309005
| 4,042
| 96
| 99
| 42.104167
| 0.864662
| 0.676645
| 0
| 0
| 0
| 0
| 0.009892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2453cf34dc912ae247fbb288fdddd145a7ca95cb
| 13,822
|
py
|
Python
|
Neural Style Transfer/model.py
|
KingJamesSong/FastDifferentiableMatSqrt
|
ab5278195e25df0192096581e0c0c288e0c66bd2
|
[
"MIT"
] | 15
|
2022-01-21T11:57:01.000Z
|
2022-03-27T07:22:16.000Z
|
Neural Style Transfer/model.py
|
KingJamesSong/FastDifferentiableMatSqrt
|
ab5278195e25df0192096581e0c0c288e0c66bd2
|
[
"MIT"
] | null | null | null |
Neural Style Transfer/model.py
|
KingJamesSong/FastDifferentiableMatSqrt
|
ab5278195e25df0192096581e0c0c288e0c66bd2
|
[
"MIT"
] | 1
|
2022-01-24T11:29:25.000Z
|
2022-01-24T11:29:25.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils.torch_utils import *
class Content_Encoder(nn.Module):
def __init__(self, conv_dim=64, repeat_num=4, norm='in', activation='relu'):
super(Content_Encoder, self).__init__()
layers = []
layers += [ConvBlock(3, conv_dim, 7, 1, 3, norm=norm, activation=activation)] # H,W,3 => H,W,64
# Down-sampling layers
curr_dim = conv_dim
for i in range(2):
layers += [ConvBlock(curr_dim, curr_dim*2, 4, 2, 1, norm=norm, activation=activation)] # H,W,64 => H/2,W/2,128 => H/4,W/4,256
curr_dim = curr_dim * 2
# Bottleneck layers
for i in range(repeat_num):
layers += [ResidualBlock(dim=curr_dim, norm=norm, activation=activation)]
self.main = nn.Sequential(*layers)
self.curr_dim = curr_dim
def forward(self, x):
return self.main(x)
class Style_Encoder(nn.Module):
def __init__(self, conv_dim=64, n_group=32, norm='ln', activation='relu'):
super(Style_Encoder, self).__init__()
curr_dim = conv_dim
layers = []
layers += [ConvBlock(3, conv_dim, 7, 1, 3, norm='none', n_group=n_group, activation=activation)] # H,W,3 => H,W,64
# Down-sampling layers (dim*2)
curr_dim = conv_dim
for i in range(2):
layers += [ConvBlock(curr_dim, curr_dim*2, 4, 2, 1, norm=norm, n_group=n_group, activation=activation)] # H,W,64 => H/2,W/2,128 => H/4,W/4,256
curr_dim = curr_dim * 2
# Down-sampling layers (keep dim)
for i in range(2): # original: 2
layers += [ConvBlock(curr_dim, curr_dim, 4, 2, 1, norm=norm, n_group=n_group, activation=activation)] # H/4,W/4,256, H/8,W/8,256, H/16,W/16,256
layers += [nn.AdaptiveAvgPool2d(1)] # H/16,W/16,256 => 1,1,256
self.main = nn.Sequential(*layers)
self.curr_dim = curr_dim
def forward(self, x):
return self.main(x)
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, dim, num_block=1, norm='none', n_group=32, activation='relu'):
super(MLP, self).__init__()
layers = []
curr_dim = dim
layers += [LinearBlock(input_dim, curr_dim, norm=norm, n_group=n_group, activation=activation)]
for _ in range(num_block):
layers += [LinearBlock(curr_dim, curr_dim, norm=norm, n_group=n_group, activation=activation)]
layers += [LinearBlock(curr_dim, output_dim, norm='none', activation='none')] # no output activations
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x.view(x.size(0), -1))
class WCT(nn.Module):
def __init__(self, n_group, device, input_dim, mlp_dim, bias_dim, mask, w_alpha=0.4):
super(WCT, self).__init__()
self.G = n_group
self.device = device
self.alpha = nn.Parameter(torch.ones(1)-w_alpha)
self.mask = mask
self.sqrt_root = MPA_Lya.apply
self.sqrt_root_inv = MPA_Lya_Inv.apply
def forward(self, c_A, s_B):
return self.wct(c_A, s_B)
def wct(self, c_A, s_B):
B,C,H,W = c_A.size()
n_mem = C // self.G # 32 if G==8
eps = 1e-5
#Whitening Transform
c_A_ = c_A.view(self.G*B, n_mem, -1) # B,C,H,W => C,B,H,W => GB,C//G,HW
c_A_mean = torch.mean(c_A_, dim=2, keepdim=True)
c_A_ = c_A_ - c_A_mean # GB,C//G,HW
cov_c = torch.bmm(c_A_, c_A_.transpose(1,2)).div(H * W - 1) + eps*torch.eye(n_mem).unsqueeze(0).to(self.device) # GB,C//G,C//G
cov_c_inv_sqrt=self.sqrt_root_inv(cov_c)
whitened = cov_c_inv_sqrt.bmm(c_A_)
#Coloring Transform
s_B_ = s_B.view(self.G*B, n_mem, -1) # B,C,H,W => C,B,H,W => GB,C//G,HW
s_B_mean = torch.mean(s_B_, dim=2, keepdim=True)
s_B_ = s_B_ - s_B_mean # GB,C//G,HW
cov_b = torch.bmm(s_B_, s_B_.transpose(1,2)).div(H * W - 1) + eps * torch.eye(n_mem).unsqueeze(0).to(self.device) # GB,C//G,C//G
cov_b_sqrt=self.sqrt_root(cov_b)
colored_B = cov_b_sqrt.bmm(whitened).view(B, C, H, W)
return self.alpha * (colored_B + s_B_mean.view(B,C,1,1)) + (1 - self.alpha) * c_A
class Decoder(nn.Module):
def __init__(self, input_dim, mask, n_group, bias_dim, mlp_dim, repeat_num=4,
norm='ln', device=None):
super(Decoder, self).__init__()
curr_dim = input_dim
# Bottleneck layers
self.resblocks = nn.ModuleList([ResidualBlock(dim=curr_dim, norm='none', n_group=n_group) for i in range(repeat_num)])
self.gdwct_modules = nn.ModuleList([WCT(n_group, device, input_dim, mlp_dim, bias_dim, mask) for i in range(repeat_num+1)])
# Up-sampling layers
layers = []
for i in range(2):
layers += [Upsample(scale_factor=2, mode='nearest')]
layers += [ConvBlock(curr_dim, curr_dim//2, 5, 1, 2, norm=norm, n_group=n_group)]
curr_dim = curr_dim // 2
layers += [ConvBlock(curr_dim, 3, 7, 1, 3, norm='none', activation='tanh')]
self.main = nn.Sequential(*layers)
def forward(self, c_A, s_B):
# Multi-hops
for i, resblock in enumerate(self.resblocks):
if i == 0:
c_A = self.gdwct_modules[i](c_A, s_B)
c_A = resblock(c_A)
c_A = self.gdwct_modules[i+1](c_A, s_B)
return self.main(c_A)
class Generator(nn.Module):
"""Generator network."""
def __init__(self, conv_dim=64, repeat_num=8, mask=None, n_group=16,
mlp_dim=256, bias_dim=512, content_dim=256, device=None):
super(Generator, self).__init__()
self.c_encoder = Content_Encoder(conv_dim, repeat_num//2, norm='in',activation='relu')
self.s_encoder = Style_Encoder(conv_dim, n_group, norm= 'gn', activation='relu')
self.decoder = Decoder(content_dim, mask, n_group, bias_dim, mlp_dim, repeat_num//2, norm='ln', device=device)
def forward(self, c_A, s_B_):
return self.decoder(c_A, s_B_)
class ResidualBlock(nn.Module):
"""Residual Block with instance normalization."""
def __init__(self, dim, norm='in', n_group=32, activation='relu', use_affine=True):
super(ResidualBlock, self).__init__()
layers = []
layers += [ConvBlock(dim, dim, 3, 1, 1, norm=norm, n_group=n_group, activation=activation, use_affine=use_affine)]
layers += [ConvBlock(dim, dim, 3, 1, 1, norm=norm, n_group=n_group, activation='none', use_affine=use_affine)]
self.main = nn.Sequential(*layers)
def forward(self, x):
return x + self.main(x)
class ConvBlock(nn.Module):
def __init__(self, input_dim, output_dim, k, s, p, dilation=False, norm='in', n_group=32,
activation='relu', pad_type='mirror', use_affine=True, use_bias=True):
super(ConvBlock, self).__init__()
# Init Normalization
if norm == 'in':
self.norm = nn.InstanceNorm2d(output_dim, affine=use_affine, track_running_stats=True)
elif norm == 'ln':
# LayerNorm(output_dim, affine=use_affine)
self.norm = nn.GroupNorm(1, output_dim)
elif norm == 'bn':
self.norm = nn.BatchNorm2d(output_dim)
elif norm == 'gn':
self.norm = nn.GroupNorm(n_group, output_dim)
elif norm == 'none':
self.norm = None
# Init Activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.01, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU(num_parameters=1, init=0.25)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
# Init pad-type
if pad_type == 'mirror':
self.pad = nn.ReflectionPad2d(p)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(p)
# initialize convolution
if dilation:
self.conv = nn.Conv2d(input_dim, output_dim, k, s, dilation=p, bias=use_bias)
else:
self.conv = nn.Conv2d(input_dim, output_dim, k, s, bias=use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='ln', n_group=32, activation='relu', use_affine=True):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# Init Normalization
if norm == 'ln':
# self.norm = LayerNorm(output_dim, affine=use_affine)
self.norm = nn.GroupNorm(1, output_dim)
elif norm == 'gn':
self.norm = nn.GroupNorm(n_group, output_dim)
elif norm == 'none':
self.norm = None
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.01, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU(num_parameters=1, init=0.25)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None):
super(Upsample, self).__init__()
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, input):
return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners)
def extra_repr(self):
if self.scale_factor is not None:
info = 'scale_factor=' + str(self.scale_factor)
else:
info = 'size=' + str(self.size)
info += ', mode=' + self.mode
return info
class Discriminator(nn.Module):
# Multi-scale discriminator architecture
def __init__(self, input_dim, params):
super(Discriminator, self).__init__()
self.n_layer = params['N_LAYER']
self.gan_type = params['GAN_TYPE']
self.dim = params['FIRST_DIM']
self.norm = params['NORM']
self.activ = params['ACTIVATION']
self.num_scales = params['NUM_SCALES']
self.pad_type = params['PAD_TYPE']
self.input_dim = input_dim
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
self.cnns = nn.ModuleList()
for _ in range(self.num_scales):
self.cnns.append(self._make_net())
def _make_net(self):
dim = self.dim
cnn_x = []
cnn_x += [ConvBlock(self.input_dim, dim, 4, 2, 1, norm='none', activation=self.activ, pad_type=self.pad_type)]
for i in range(self.n_layer - 1):
cnn_x += [ConvBlock(dim, dim * 2, 4, 2, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)]
dim *= 2
cnn_x += [nn.Conv2d(dim, 1, 1, 1, 0)]
cnn_x = nn.Sequential(*cnn_x)
return cnn_x
def forward(self, x):
outputs = []
for model in self.cnns:
outputs.append(model(x))
x = self.downsample(x)
return outputs
def calc_dis_loss(self, input_fake, input_real):
# calculate the loss to train D
outs0 = self.forward(input_fake)
outs1 = self.forward(input_real)
loss = 0
for it, (out0, out1) in enumerate(zip(outs0, outs1)):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 0)**2) + torch.mean((out1 - 1)**2)
elif self.gan_type == 'nsgan':
all0 = Variable(torch.zeros_like(out0.data).cuda(), requires_grad=False)
all1 = Variable(torch.ones_like(out1.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all0) +
F.binary_cross_entropy(F.sigmoid(out1), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
def calc_gen_loss(self, input_fake):
# calculate the loss to train G
outs0 = self.forward(input_fake)
loss = 0
for it, (out0) in enumerate(outs0):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 1)**2) # LSGAN
elif self.gan_type == 'nsgan':
all1 = Variable(torch.ones_like(out0.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
| 39.491429
| 156
| 0.576183
| 1,921
| 13,822
| 3.931286
| 0.123373
| 0.025424
| 0.017214
| 0.018538
| 0.519862
| 0.451271
| 0.416976
| 0.395392
| 0.372484
| 0.320313
| 0
| 0.024532
| 0.292215
| 13,822
| 350
| 157
| 39.491429
| 0.747419
| 0.065186
| 0
| 0.329502
| 0
| 0
| 0.027381
| 0
| 0
| 0
| 0
| 0
| 0.007663
| 1
| 0.103448
| false
| 0
| 0.019157
| 0.02682
| 0.226054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2455cb80e1aaf2ba0d39b50ac976cccb541ad91b
| 13,355
|
py
|
Python
|
main.py
|
hunterowens/frankenstein
|
36305cad0ab6552597c707c813d20eaa3ad38f81
|
[
"Apache-2.0"
] | 2
|
2018-10-17T02:27:59.000Z
|
2019-03-12T00:58:48.000Z
|
main.py
|
hunterowens/frankenstein
|
36305cad0ab6552597c707c813d20eaa3ad38f81
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
hunterowens/frankenstein
|
36305cad0ab6552597c707c813d20eaa3ad38f81
|
[
"Apache-2.0"
] | null | null | null |
import json
import pythonosc
import argparse
import math
import datetime
from pythonosc import dispatcher, osc_server, udp_client, osc_message_builder
import requests
from collections import OrderedDict
from statistics import mean
## added variables to change the ip and port easily
## testing if Git works with ST
ip_osc = '192.168.1.255'
##ip_osc = '192.168.0.255'
ip_osc_server='0.0.0.0'
ip_osc_editor='196.168.1.255'
## ip_osc = '10.253.0.255'
port_server = 7007
port_client = 7007
port_client_editor = 7007
api_url = "http://frankenstein.hunterowens.net/"
## Some comments
current_state = OrderedDict()
current_state["/state"] = "calm"
current_state["/action"] = "start"
current_state["/sentiment"] = 0.0
current_state["/energy"] = 0.0
current_state["/focus"] = 0.0
def change_state(current_state, new_state):
"""
Change the current state dict to
reflect state param: new_state
return current_state
"""
current_state['/state'] = new_state
print("New State Set to {0}".format(current_state))
return current_state
def send_surface_state_to_ai(sentiment, energy, focus):
"""
sents the state / new talking as JSON to the AI
focus, energy, and sentiment are floats; unit is a string; words and parts are arrays of strings where the indexes correspond, so words[0] goes with parts[0]
"""
print("AI State is: {0} focus, {1} energy, and {2} sentiment".format(current_focus, current_energy, current_sentiment))
data = {
'focus': focus,
'sentiment': sentiment,
'energy': energy
}
r = requests.post(api_url + 'interact-surface', data = data)
return r
def send_answer_to_ai(answer):
"""
Sents an answer to the AI
"""
print("Answer sending ", answer)
headers = {
"content-type": "application/json"
}
r = requests.post(api_url + 'interact',
json={'string': answer},
headers=headers)
return r
def get_api_interact_data():
"""
Gets state from AI, transforms into sentiment.
Returns a string of JSON
"""
print("Getting Data from AI")
r = requests.get(api_url + 'interact')
if r.status_code == 200:
data = r.json()
else:
data = pickle.load(open('./default-api-response.p','rb'))
print("Using Default Data: {}".format(data))
current_state['/state'] = data['state']
current_state['/sentiment'] = data['sentiment']
current_state['/focus'] = data['focus']
current_state['/energy'] = data['energy']
print('state updated')
return data
def setup():
"""
sets AI in waiting state
"""
r = requests.get(api_url + "reset")
print("AI Init State Waiting")
current_state = get_api_interact_data()
##pull text from AI
return None
def osc_dispatch(addr, msg, ip=ip_osc, port=port_client):
"""
Dispatches a message in state change over OSC to all listeners
"""
client = udp_client.UDPClient(ip, port,1)
## SimpleOSCClientRedux(client)
## client._sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
print("Sent {0} with {1} to {2} at {3}".format(addr, msg, ip, port))
builder = osc_message_builder.OscMessageBuilder(address=addr)
builder.add_arg(msg)
client.send(builder.build())
## print(client(addr, msg))
return None
def broadcast_state(state=current_state, ip=ip_osc, port=port_client):
"""
Broadcasts state
"""
print("Called Broadcast State Function")
#client = udp_client.UDPClient(ip, port,1)
#builder = osc_message_builder.OscMessageBuilder(address='/status')
#for k,v in state.items():
# builder.add_arg(v)
#client.send(builder.build())
#print("sent {0} to {1}:{2}".format(builder.args, ip, port))
return None
def broadcast_text(AItext):
"""
send a fixed piece of text from the AI
add delay into this OSC as second args
get text somehow
"""
osc_dispatch('/textnoquest', AItext, port=port_client_editor)
print("Updating State")
broadcast_state()
return None
def send_questions_to_line_editor():
"""
Sends data for display to Line Editor
"""
data = get_api_interact_data()['questions']
print("Called send question to the line editor")
#client = udp_client.UDPClient(ip_osc_editor, port_client_editor,1)
#builder = osc_message_builder.OscMessageBuilder(address='/textques')
#for k,v in data.items():
# builder.add_arg(v)
#builder.add_arg(.75)
#print('builder ', builder.address)
#client.send(builder.build())
#osc_dispatch('/textquest', .75, ip=ip_osc_server, port=port_client_editor)
# print("sent {0} to {1}:{2}".format(builder.args, ip_osc_editor, port_client_editor))
ip=ip_osc
port=port_client_editor
client = udp_client.UDPClient(ip, port,1)
print("Send Data to Line Editor {}:{}", ip, port)
builder = osc_message_builder.OscMessageBuilder(address='/textques')
for k,v in data.items():
print(k,v)
builder.add_arg(v)
client.send(builder.build())
print("sent {0} to {1}:{2}".format(builder.args, ip, port))
broadcast_state()
return None
surface_data = []
def surface_handler(unused_addr, args):
"""
Handles the surface messages, alts sentiment
Surface argument to be OSC String Formatted as followed
"sentiment: value; focus: value; energy: value"
"""
print("Got Surface Message")
try:
vals = json.loads(args)
## surfaces need to be directed to pi, look in js/machineConfiguration.json
except ValueError:
print("Unable to decode JSON from Surface")
exit()
current_sentiment = vals['sentiment']
current_focus = vals['focus']
current_energy = vals['energy']
current_unit = vals['unit']
print("From Surface Unit {0}".format(current_unit))
current_words = vals['words']
current_parts = vals['parts']
surface_data.append(vals)
return None
def reset_handler(unused_addr, args):
"""
Handles the reset from Editor
"""
## TODO: Implement
print("reset handler")
setup()
surface_data = []
current_state.update({'/action': 'start'})
broadcast_state()
current_state.update({'/action': 'expectant'})
return None
def answer_handler(unused_addr, args):
"""
Starts answering
"""
print("send answer to ai")
send_answer_to_ai(args)
current_state.update({'/action': 'thinking'})
broadcast_state()
## Call line editor
send_questions_to_line_editor()
return None
def refresh_handler(unused_addr, args):
"""
Refresh text
"""
print("Refreshing text")
send_questions_to_line_editor()
return None
def talking_handler(unused_addr, args):
"""
Starts talking
"""
print("talking handler")
current_state.update({'/action': 'talking'})
broadcast_state()
send_questions_to_line_editor()
return None
def question_handler(unused_addr, args):
"""
shuts the machine up
"""
print('question handler')
current_state.update({'/action': 'question'})
broadcast_state()
return None
def thinking_handler(unsused_addr, args):
"""
shuts the machine up
"""
print('thinking handler')
current_state.update({'/action': 'thinking'})
broadcast_state()
return None
def silent_handler(unused_addr, args):
"""
silences the system after TTS
"""
print("silence handles")
current_state.update({'/action': 'expectant'})
broadcast_state()
return None
def surfacestart_handler(unused_addr, args):
"""
blasts start to the surfaces
"""
print("Blasting Start to the Surfaces")
osc_dispatch('/start-surface', 1)
def surfacereset_handler(unused_addr, args):
"""
blasts reset to surface
"""
print("Blasting Reset to the Surface")
osc_dispatch('/reset-surface', 1)
def surfaceclose_handler(unused_addr, args):
"""
blasts close to surface
"""
print("Blasting Close to the Surface")
osc_dispatch('/close-surface', 1)
sentiment = mean([d['sentiment'] for d in surface_data])
energy = mean([d['energy'] for d in surface_data])
focus = mean([d['focus'] for d in surface_data])
send_surface_state_to_ai(sentiment, energy, focus)
def end_handler(unused_addr, args):
"""
ends the show
"""
print("end of show")
current_state.update({'/action': 'end'})
broadcast_state()
return
print("some stupid stuff")
def osc_server(ip=ip_osc_server, port=port_server):
"""
sets up and runs the OSC server.
"""
dispatch = dispatcher.Dispatcher()
dispatch.map("/surface-sentiments", surface_handler)
dispatch.map("/reset", reset_handler)
dispatch.map("/silent", silent_handler)
dispatch.map("/answer", answer_handler)
dispatch.map("/refresh", refresh_handler)
dispatch.map("/talking", talking_handler)
dispatch.map("/end", end_handler)
dispatch.map("/question", question_handler)
dispatch.map("/thinking", thinking_handler)
dispatch.map("/startsurface", surfacestart_handler)
dispatch.map("/closesurface", surfaceclose_handler)
dispatch.map("/resetsurface", surfacereset_handler)
## TODO: Talk State - > triger from AI to get new words/questions etc from teh AI on the server and then broadcast
server = pythonosc.osc_server.ThreadingOSCUDPServer(
(ip, port), dispatch)
print("Serving on {}".format(server.server_address))
server.serve_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default=ip_osc,
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=port_server,
help="The port the OSC server is listening on")
parser.add_argument('--server', action='store_true', default=False,
help="Run in server mode")
parser.add_argument('--text', action='store_true', default=False,
help="broadcast the text questions")
parser.add_argument('--silent', action='store_true', default=False, help="end talking cue")
parser.add_argument('--talking', action='store_true', default=False, help="get talking cue")
parser.add_argument('--answer', action='store_true', default=False, help="get answer")
parser.add_argument('--reset', action='store_true', default=False, help="start over")
parser.add_argument('--refresh', action='store_true', default=False, help="refresh questions")
parser.add_argument('--end', action='store_true', default=False, help="end experience")
parser.add_argument('--question', action='store_true', default=False, help='test question handler')
parser.add_argument('--thinking', action='store_true', default=False, help='test thinking handler')
parser.add_argument('--surface', action='store_true', default=False, help="send dummy surface data")
parser.add_argument( "--set-state", dest="new-state", default='guarded',
help="set teh new state", metavar="STATE")
parser.add_argument('--startsurface', action='store_true', default=False, help="test surface start")
parser.add_argument('--resetsurface', action='store_true', default=False, help="test surface reset")
parser.add_argument('--closesurface', action='store_true', default=False, help="test surface stop")
args = parser.parse_args()
print("Got argument: {}".format(args))
if args.server:
print("Sending Server")
osc_server()
elif args.text:
print("Sending Text")
broadcast_questions()
elif args.silent:
print("Sending OSC Test Message")
osc_dispatch('/silent', 1)
elif args.talking:
print("Sending Talking")
osc_dispatch('/talking', "answer")
elif args.answer:
print("Sending Answer") ## verified with web app
osc_dispatch('/answer', "answer")
elif args.reset:
print("Reseting") ## verified with web app
osc_dispatch('/reset', 1)
elif args.refresh:
print("Refreshing questions")
osc_dispatch('/refresh', 1)
elif args.end:
print("End experience")
osc_dispatch('/end', 1)
elif args.question:
print("Sending a question")
osc_dispatch('/question', 1)
elif args.thinking:
print("Setting thinking")
osc_dispatch('/thinking', 1)
elif args.startsurface:
print("Telling surfaces to turn on")
osc_dispatch('/startsurface', 1)
elif args.closesurface:
print("Telling surfaces to close")
osc_dispatch('/closesurface', 1)
elif args.resetsurface:
print("Telling surfaces to start over")
osc_dispatch('/resetsurface', 1)
elif args.surface:
print("Sending Surface Message")
## foo = json.loads('{"number": 1.0, "other": 4.3}')
osc_dispatch('/surface-sentiments', '{"sentiment": 0.15, "focus": 0.65, "energy": -0.3, "unit": "test", "words": ["inspired", "anxious", "understanding"], "parts": ["hand", "eye", "head"]}')
elif vars(args)['new-state']:
print('changing state')
change_state(current_state, vars(args)['new-state'])
| 31.646919
| 198
| 0.653463
| 1,678
| 13,355
| 5.043504
| 0.1764
| 0.038284
| 0.034149
| 0.036394
| 0.284533
| 0.203828
| 0.142148
| 0.083067
| 0.038166
| 0.038166
| 0
| 0.011121
| 0.212205
| 13,355
| 421
| 199
| 31.72209
| 0.793271
| 0.172969
| 0
| 0.143411
| 0
| 0.003876
| 0.236649
| 0.002256
| 0
| 0
| 0
| 0.004751
| 0
| 1
| 0.085271
| false
| 0
| 0.034884
| 0
| 0.189922
| 0.178295
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2456194d25cdc4ce95af04bc56ef7e00c8643248
| 635
|
py
|
Python
|
src/lit_tracking/utils/run_converter.py
|
Actis92/lit-tracking
|
9e7b243ba77c80ca260bff479e54db271d10c195
|
[
"MIT"
] | null | null | null |
src/lit_tracking/utils/run_converter.py
|
Actis92/lit-tracking
|
9e7b243ba77c80ca260bff479e54db271d10c195
|
[
"MIT"
] | 14
|
2021-11-01T08:48:23.000Z
|
2022-01-08T14:20:17.000Z
|
src/lit_tracking/utils/run_converter.py
|
Actis92/lit-tracking
|
9e7b243ba77c80ca260bff479e54db271d10c195
|
[
"MIT"
] | null | null | null |
import argparse
import importlib
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', help='Path that contains data in MOT format', required=True)
parser.add_argument('--output_path', help='Path that will contains the output', required=True)
parser.add_argument('--converter_name', help='Name of the converter to use', required=True)
args = parser.parse_args()
module = importlib.import_module("lit_tracking.converter.mot_to_coco")
mot2coco = getattr(module, args.converter_name)(input_path=args.input_path, output_path=args.output_path)
mot2coco.convert()
| 48.846154
| 109
| 0.754331
| 85
| 635
| 5.352941
| 0.435294
| 0.059341
| 0.112088
| 0.07033
| 0.127473
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00361
| 0.127559
| 635
| 12
| 110
| 52.916667
| 0.81769
| 0
| 0
| 0
| 0
| 0
| 0.286614
| 0.053543
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2456670193398aaa3aace6b23615d552f25b4839
| 5,825
|
py
|
Python
|
test/test_npu/test_network_ops/test_neg.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1
|
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_neg.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1
|
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_neg.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNeg(TestCase):
def cpu_op_exec(self, input1):
output = torch.neg(input1)
output = output.numpy()
return output
def npu_op_exec(self, input1):
output = torch.neg(input1)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec_out(self, input1, input2):
torch.neg(input1, out=input2)
output = input2.to("cpu")
output = output.numpy()
return output
def cpu_inp_op_exec(self, input1):
torch.neg_(input1)
output = input1.numpy()
return output
def npu_inp_op_exec(self, input1):
torch.neg_(input1)
output = input1.to("cpu")
output = output.numpy()
return output
def neg_result(self, shape_format):
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], -100, 100)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output = self.npu_op_exec(npu_input1)
cpu_output = cpu_output.astype(npu_output.dtype)
self.assertRtolEqual(cpu_output, npu_output)
cpu_input_inp, npu_input_inp = create_common_tensor(item[0], -100, 100)
if cpu_input_inp.dtype == torch.float16:
cpu_input_inp = cpu_input_inp.to(torch.float32)
cpu_output_inp = self.cpu_inp_op_exec(cpu_input_inp)
npu_output_inp = self.npu_inp_op_exec(npu_input_inp)
cpu_output_inp = cpu_output_inp.astype(npu_output_inp.dtype)
self.assertRtolEqual(cpu_output_inp, npu_output_inp)
def neg_out_result(self, shape_format):
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], -100, 100)
cpu_input2, npu_input2 = create_common_tensor(item[0], -100, 100)
cpu_input3, npu_input3 = create_common_tensor(item[1], -100, 100)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output_out1 = self.npu_op_exec_out(npu_input1, npu_input2)
npu_output_out2 = self.npu_op_exec_out(npu_input1, npu_input3)
cpu_output = cpu_output.astype(npu_output_out1.dtype)
self.assertRtolEqual(cpu_output, npu_output_out1)
self.assertRtolEqual(cpu_output, npu_output_out2)
def test_neg_out_result(self, device):
shape_format = [
[[np.float16, 0, [128, 116, 14, 14]], [np.float16, 0, [256, 116, 1, 1]]],
[[np.float16, 0, [128, 58, 28, 28]], [np.float16, 0, [58, 58, 1, 1]]],
[[np.float16, 0, [128, 3, 224, 224]], [np.float16, 0, [3, 3, 3, 3]]],
[[np.float16, 0, [128, 116, 14, 14]], [np.float16, 0, [116, 116, 1, 1]]],
[[np.float32, 0, [256, 128, 7, 7]], [np.float32, 0, [128, 128, 3, 3]]],
[[np.float32, 0, [256, 3, 224, 224]], [np.float32, 0, [3, 3, 7, 7]]],
[[np.float32, 0, [2, 3, 3, 3]], [np.float32, 0, [3, 1, 3, 3]]],
[[np.float32, 0, [128, 232, 7, 7]], [np.float32, 0, [232, 232, 1, 1]]],
]
self.neg_out_result(shape_format)
def test_neg_shape_format_fp16_1d(self, device):
format_list = [0, 3]
shape_format = [[[np.float16, i, [96]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_1d(self, device):
format_list = [0, 3]
shape_format = [[[np.float32, i, [96]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_2d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [448, 1]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_2d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [448, 1]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_3d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [64, 24, 38]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_3d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [64, 24, 38]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_4d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [32, 3, 3, 3]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_4d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [32, 3, 3, 3]]] for i in format_list]
self.neg_result(shape_format)
instantiate_device_type_tests(TestNeg, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 41.607143
| 85
| 0.635365
| 854
| 5,825
| 4.063232
| 0.168618
| 0.095101
| 0.025937
| 0.04611
| 0.636023
| 0.588473
| 0.569164
| 0.525937
| 0.476945
| 0.436599
| 0
| 0.082688
| 0.246352
| 5,825
| 139
| 86
| 41.906475
| 0.707745
| 0.098541
| 0
| 0.371429
| 0
| 0
| 0.003819
| 0
| 0
| 0
| 0
| 0
| 0.038095
| 1
| 0.152381
| false
| 0
| 0.047619
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2456f6933037766ba90c0161dd21869ac9959ef3
| 370
|
py
|
Python
|
Web_decoder_2.py
|
cegador/python_exercises
|
cdae01f845288475c00ed4c7c45db17e7dfb751e
|
[
"MIT"
] | null | null | null |
Web_decoder_2.py
|
cegador/python_exercises
|
cdae01f845288475c00ed4c7c45db17e7dfb751e
|
[
"MIT"
] | null | null | null |
Web_decoder_2.py
|
cegador/python_exercises
|
cdae01f845288475c00ed4c7c45db17e7dfb751e
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
url = 'https://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture'
vf = requests.get(url)
vf.status_code
s = BeautifulSoup(vf.text, 'lxml')
print(s.prettify())
text_article = s.find('div', attrs={'class' : 'content-background'}).find_all('p')
text = [print(i.get_text()) for i in text_article]
| 26.428571
| 92
| 0.737838
| 56
| 370
| 4.785714
| 0.696429
| 0.08209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020896
| 0.094595
| 370
| 14
| 93
| 26.428571
| 0.779104
| 0
| 0
| 0
| 0
| 0.111111
| 0.309973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
245a13172d7f4a65ed0b24d40fe80fd7eeecd806
| 3,849
|
py
|
Python
|
eva-accession-release-automation/include_mapping_weight_from_dbsnp/export_all_multimap_snps_from_dbsnp_dumps.py
|
sundarvenkata-EBI/eva-accession
|
b26f0b5e5acaafe63d0755bad81837b9a5976237
|
[
"Apache-2.0"
] | 3
|
2018-02-28T17:14:53.000Z
|
2020-03-17T17:19:45.000Z
|
eva-accession-release-automation/include_mapping_weight_from_dbsnp/export_all_multimap_snps_from_dbsnp_dumps.py
|
sundarvenkata-EBI/eva-accession
|
b26f0b5e5acaafe63d0755bad81837b9a5976237
|
[
"Apache-2.0"
] | 52
|
2018-03-29T15:44:23.000Z
|
2022-02-16T00:54:28.000Z
|
eva-accession-release-automation/include_mapping_weight_from_dbsnp/export_all_multimap_snps_from_dbsnp_dumps.py
|
sundarvenkata-EBI/eva-accession
|
b26f0b5e5acaafe63d0755bad81837b9a5976237
|
[
"Apache-2.0"
] | 15
|
2018-03-02T13:34:19.000Z
|
2021-06-22T15:54:59.000Z
|
# Copyright 2020 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The purpose of this script is to validate the mapping weight attribute addition that was performed by
# the script incorporate_mapping_weight_into_accessioning.py
import click
import logging
import psycopg2
from collections import defaultdict
from ebi_eva_common_pyutils.command_utils import run_command_with_output
from ebi_eva_common_pyutils.config_utils import get_pg_metadata_uri_for_eva_profile
from ebi_eva_common_pyutils.metadata_utils import get_species_info, get_db_conn_for_species
from ebi_eva_common_pyutils.pg_utils import get_all_results_for_query, get_result_cursor
logger = logging.getLogger(__name__)
def get_assemblies_with_multimap_snps_for_species(metadata_connection_handle):
assembly_GCA_accession_map = defaultdict(dict)
query = "select distinct database_name, assembly, assembly_accession " \
"from dbsnp_ensembl_species.EVA2015_snpmapinfo_asm_lookup " \
"where assembly_accession is not null"
for result in get_all_results_for_query(metadata_connection_handle, query):
species_name, assembly, GCA_accession = result
assembly_GCA_accession_map[species_name][assembly] = GCA_accession
return assembly_GCA_accession_map
def export_all_multimap_snps_from_dbsnp_dumps(private_config_xml_file):
result_file = "all_multimap_snp_ids_from_dbsnp_dumps.txt"
with psycopg2.connect(get_pg_metadata_uri_for_eva_profile("development", private_config_xml_file), user="evadev") \
as metadata_connection_handle:
assembly_GCA_accession_map = get_assemblies_with_multimap_snps_for_species(metadata_connection_handle)
for species_info in get_species_info(metadata_connection_handle):
species_name = species_info["database_name"]
logger.info("Processing species {0}...".format(species_name))
if species_name in assembly_GCA_accession_map:
with get_db_conn_for_species(species_info) as species_connection_handle:
export_query = "select snp_id, assembly from dbsnp_{0}.multimap_snps " \
"where assembly in ({1})"\
.format(species_name,",".join(["'{0}'".format(assembly) for assembly in
assembly_GCA_accession_map[species_name].keys()]))
logger.info("Running export query: " + export_query)
with open(result_file, 'a') as result_file_handle:
for snp_id, assembly in get_result_cursor(species_connection_handle, export_query):
result_file_handle.write("{0},{1}\n"
.format(snp_id,
assembly_GCA_accession_map[species_name][assembly]))
run_command_with_output("Sorting multimap SNP IDs from dbSNP source dumps...",
"sort -u {0} -o {0}".format(result_file))
@click.option("--private-config-xml-file", help="ex: /path/to/eva-maven-settings.xml", required=True)
@click.command()
def main(private_config_xml_file):
export_all_multimap_snps_from_dbsnp_dumps(private_config_xml_file)
if __name__ == "__main__":
main()
| 50.644737
| 119
| 0.71629
| 499
| 3,849
| 5.148297
| 0.340681
| 0.038536
| 0.070066
| 0.06267
| 0.326197
| 0.187622
| 0.174387
| 0.091865
| 0.091865
| 0.091865
| 0
| 0.007275
| 0.214341
| 3,849
| 75
| 120
| 51.32
| 0.842262
| 0.191998
| 0
| 0
| 0
| 0
| 0.161603
| 0.055268
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.173913
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
245c288390cedd8347f4657b97e7c373c4bcbfa5
| 7,357
|
py
|
Python
|
hilbert_curve.py
|
qypea/disk-usage-visualizer
|
c26c5866a6d885347a92f1212d132286a6ab9ddc
|
[
"MIT"
] | null | null | null |
hilbert_curve.py
|
qypea/disk-usage-visualizer
|
c26c5866a6d885347a92f1212d132286a6ab9ddc
|
[
"MIT"
] | null | null | null |
hilbert_curve.py
|
qypea/disk-usage-visualizer
|
c26c5866a6d885347a92f1212d132286a6ab9ddc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# From https://people.sc.fsu.edu/~jburkardt/py_src/hilbert_curve/hilbert_curve.py
#
def d2xy ( m, d ):
#*****************************************************************************80
#
## D2XY converts a 1D Hilbert coordinate to a 2D Cartesian coordinate.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Parameters:
#
# Input, integer M, the index of the Hilbert curve.
# The number of cells is N=2^M.
# 0 < M.
#
# Input, integer D, the Hilbert coordinate of the cell.
# 0 <= D < N * N.
#
# Output, integer X, Y, the Cartesian coordinates of the cell.
# 0 <= X, Y < N.
#
n = 2 ** m
x = 0
y = 0
t = d
s = 1
while ( s < n ):
rx = ( ( t // 2 ) % 2 )
if ( rx == 0 ):
ry = ( t % 2 )
else:
ry = ( ( t ^ rx ) % 2 )
x, y = rot ( s, x, y, rx, ry )
x = x + s * rx
y = y + s * ry
t = ( t // 4 )
s = s * 2
return x, y
def d2xy_test ( ):
#*****************************************************************************80
#
## D2XY_TEST tests D2XY.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'D2XY_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' D2XY converts a Hilbert linear D coordinate to an (X,Y) 2D coordinate.' )
m = 3
n = 2 ** m
print ( '' )
print ( ' D X Y' )
print ( '' )
for d in range ( 0, n * n ):
x, y = d2xy ( m, d )
print ( ' %3d %3d %3d' % ( d, x, y ) )
#
# Terminate.
#
print ( '' )
print ( 'D2XY_TEST:' )
print ( ' Normal end of execution.' )
return
def rot ( n, x, y, rx, ry ):
#*****************************************************************************80
#
## ROT rotates and flips a quadrant appropriately.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Parameters:
#
# Input, integer N, the length of a side of the square.
# N must be a power of 2.
#
# Input/output, integer X, Y, the coordinates of a point.
#
# Input, integer RX, RY, ???
#
if ( ry == 0 ):
#
# Reflect.
#
if ( rx == 1 ):
x = n - 1 - x
y = n - 1 - y
#
# Flip.
#
t = x
x = y
y = t
return x, y
def rot_test ( ):
#*****************************************************************************80
#
## ROT_TEST tests ROT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'ROT_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' ROT rotates and flips a quadrant appropriately.' )
print ( '' )
print ( ' X Y X0 Y0 X1 Y1' )
print ( '' )
m = 3
n = 2 ** m
ry = 0
for y in range ( 0, n ):
for x in range ( 0, n ):
rx = 0
x0 = x
y0 = y
x0, y0 = rot ( n, x0, y0, rx, ry )
rx = 1
x1 = x
y1 = y
x1, y1 = rot ( n, x1, y1, rx, ry )
print ( ' %2d %2d %2d %2d %2d %2d' % ( x, y, x0, y0, x1, y1 ) )
#
# Terminate.
#
print ( '' )
print ( 'ROT_TEST:' )
print ( ' Normal end of execution.' )
return
def timestamp ( ):
#*****************************************************************************80
#
## TIMESTAMP prints the date as a timestamp.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 06 April 2013
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# None
#
import time
t = time.time ( )
print ( time.ctime ( t ) )
return None
def timestamp_test ( ):
#*****************************************************************************80
#
## TIMESTAMP_TEST tests TIMESTAMP.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 December 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# None
#
import platform
print ( '' )
print ( 'TIMESTAMP_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' TIMESTAMP prints a timestamp of the current date and time.' )
print ( '' )
timestamp ( )
#
# Terminate.
#
print ( '' )
print ( 'TIMESTAMP_TEST:' )
print ( ' Normal end of execution.' )
return
def xy2d ( m, x, y ):
#*****************************************************************************80
#
## XY2D converts a 2D Cartesian coordinate to a 1D Hilbert coordinate.
#
# Discussion:
#
# It is assumed that a square has been divided into an NxN array of cells,
# where N is a power of 2.
#
# Cell (0,0) is in the lower left corner, and (N-1,N-1) in the upper
# right corner.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Parameters:
#
# Input, integer M, the index of the Hilbert curve.
# The number of cells is N=2^M.
# 0 < M.
#
# Input, integer X, Y, the Cartesian coordinates of a cell.
# 0 <= X, Y < N.
#
# Output, integer D, the Hilbert coordinate of the cell.
# 0 <= D < N * N.
#
xcopy = x
ycopy = y
d = 0
n = 2 ** m
s = ( n // 2 )
while ( 0 < s ):
if ( 0 < ( abs ( xcopy ) & s ) ):
rx = 1
else:
rx = 0
if ( 0 < ( abs ( ycopy ) & s ) ):
ry = 1
else:
ry = 0
d = d + s * s * ( ( 3 * rx ) ^ ry )
xcopy, ycopy = rot ( s, xcopy, ycopy, rx, ry )
s = ( s // 2 )
return d
def xy2d_test ( ):
#*****************************************************************************80
#
## XY2D_TEST tests XY2D.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'XY2D_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' XY2D converts an (X,Y) 2D coordinate to a Hilbert linear D coordinate.' )
m = 3
n = 2 ** m
print ( '' )
print ( ' ', end = '' )
for x in range ( 0, n ):
print ( '%3d' % ( x ), end = '' )
print ( '' )
print ( '' )
for y in range ( n - 1, -1, -1 ):
print ( ' %3d: ' % ( y ), end = '' )
for x in range ( 0, n ):
d = xy2d ( m, x, y )
print ( '%3d' % ( d ), end = '' )
print ( '' )
#
# Terminate.
#
print ( '' )
print ( 'XY2D_TEST:' )
print ( ' Normal end of execution.' )
return
def hilbert_curve_test ( ):
#*****************************************************************************80
#
## HILBERT_CURVE_TEST tests the HILBERT_CURVE library.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'HILBERT_CURVE_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' Test the HILBERT_CURVE library.' )
d2xy_test ( )
rot_test ( )
xy2d_test ( )
#
# Terminate.
#
print ( '' )
print ( 'HILBERT_CURVE_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
timestamp ( )
hilbert_curve_test ( )
timestamp ( )
| 18.12069
| 86
| 0.482126
| 921
| 7,357
| 3.800217
| 0.15418
| 0.012
| 0.043714
| 0.048857
| 0.596857
| 0.537143
| 0.496286
| 0.436857
| 0.379143
| 0.309143
| 0
| 0.036318
| 0.292646
| 7,357
| 405
| 87
| 18.165432
| 0.636241
| 0.44529
| 0
| 0.483221
| 0
| 0
| 0.195292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060403
| false
| 0
| 0.040268
| 0
| 0.161074
| 0.355705
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
245ea6b8625b098807a43420900faed747e3a21d
| 3,314
|
py
|
Python
|
src/search.py
|
yrwq/ytw
|
787d5c2d5d49d1cdd99a2d2448210af11727a278
|
[
"MIT"
] | 5
|
2020-11-26T08:38:45.000Z
|
2021-03-14T11:38:38.000Z
|
src/search.py
|
yrwq/ytw
|
787d5c2d5d49d1cdd99a2d2448210af11727a278
|
[
"MIT"
] | null | null | null |
src/search.py
|
yrwq/ytw
|
787d5c2d5d49d1cdd99a2d2448210af11727a278
|
[
"MIT"
] | 2
|
2020-11-26T07:21:49.000Z
|
2021-01-01T13:09:34.000Z
|
#!/usr/bin/env python
import sys
import getopt
import requests
import urllib.parse
import json
class YoutubeSearch:
def __init__(self, search_terms: str, max_results=None):
self.search_terms = search_terms
self.max_results = max_results
self.videos = self.search()
def search(self):
encoded_search = urllib.parse.quote(self.search_terms)
BASE_URL = "https://youtube.com"
url = f"{BASE_URL}/results?search_query={encoded_search}"
response = requests.get(url).text
while "ytInitialData" not in response:
response = requests.get(url).text
results = self.parse_html(response)
if self.max_results is not None and len(results) > self.max_results:
return results[: self.max_results]
return results
def parse_html(self, response):
results = []
start = (
response.index("ytInitialData")
+ len("ytInitialData")
+ 3
)
end = response.index("};", start) + 1
json_str = response[start:end]
data = json.loads(json_str)
videos = data["contents"]["twoColumnSearchResultsRenderer"]["primaryContents"][
"sectionListRenderer"
]["contents"][0]["itemSectionRenderer"]["contents"]
for video in videos:
res = {}
if "videoRenderer" in video.keys():
video_data = video.get("videoRenderer", {})
res["id"] = video_data.get("videoId", None)
res["thumbnails"] = [thumb.get("url", None) for thumb in video_data.get("thumbnail", {}).get("thumbnails", [{}])]
res["title"] = video_data.get("title", {}).get("runs", [[{}]])[0].get("text", None)
res["channel"] = video_data.get("longBylineText", {}).get("runs", [[{}]])[0].get("text", None)
res["duration"] = video_data.get("lengthText", {}).get("simpleText", 0)
res["views"] = video_data.get("viewCountText", {}).get("simpleText", 0)
res["url_suffix"] = video_data.get("navigationEndpoint", {}).get("commandMetadata", {}).get("webCommandMetadata", {}).get("url", None)
results.append(res)
return results
def to_dict(self):
return self.videos
def to_json(self):
return json.dumps({"videos": self.videos}, indent=4)
argumentList = sys.argv[1:]
# Options
options = "ht:"
# Long options
long_options = ["help", "title"]
def help():
print("\nYoutube Search \n")
print("Usage:")
print(" -t or --title search with title")
print(" -h or --help show this useful help message ...")
print("")
print("Example:")
print(' -t "interesting title"')
print("")
try:
# Parsing argument
arguments, values = getopt.getopt(argumentList, options, long_options)
# checking each argument
for currentArgument, currentValue in arguments:
if currentArgument in ("-h", "--help"):
help()
elif currentArgument in ("-t", "--title"):
results = YoutubeSearch(sys.argv[2], max_results=15).to_json()
print(results)
else:
help()
except getopt.error as err:
# output error, and return with an error code
print(str(err))
| 32.811881
| 150
| 0.583585
| 365
| 3,314
| 5.194521
| 0.347945
| 0.037975
| 0.044304
| 0.023207
| 0.082806
| 0.05538
| 0.023207
| 0
| 0
| 0
| 0
| 0.004936
| 0.266445
| 3,314
| 100
| 151
| 33.14
| 0.77499
| 0.037719
| 0
| 0.106667
| 0
| 0
| 0.202639
| 0.024505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.066667
| 0.026667
| 0.226667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
245ebdd8bc1926f462e2a124d6f07b5c8b565ff3
| 3,716
|
py
|
Python
|
mapmeta/script/script_init.py
|
bukun/mapserver_meta
|
0f1dd278b07fa747dafaf801b96eac19ffdbf327
|
[
"MIT"
] | null | null | null |
mapmeta/script/script_init.py
|
bukun/mapserver_meta
|
0f1dd278b07fa747dafaf801b96eac19ffdbf327
|
[
"MIT"
] | null | null | null |
mapmeta/script/script_init.py
|
bukun/mapserver_meta
|
0f1dd278b07fa747dafaf801b96eac19ffdbf327
|
[
"MIT"
] | 1
|
2019-06-20T00:29:25.000Z
|
2019-06-20T00:29:25.000Z
|
# -*- coding: utf-8 -*-
'''
script for initialization.
'''
import os
import requests
from .script_init_tabels import run_init_tables
from mapmeta.model.mapmeta_model import MMapMeta
from lxml import etree
def do_for_maplet(mapserver_ip):
'''
代码来自 `maplet_arch//030_gen_mapproxy.py` , 原用来找到 mapfile , 生成 yaml .
'''
rst_ws = '/opt/mapws/maplet/00_China_png'
for wroot, wdirs, wfiles in os.walk(rst_ws):
for png in wfiles:
(lyr_name, lyr_ext) = os.path.splitext(png)
if png.endswith('.png') :
pass
else:
continue
maplet_uid = lyr_name
# http://121.42.29.253/cgi-bin/mapserv?map=/opt/mapws/maplet/vect3857/China/China3857_v/mapfile.map&layer=landuse2000_v000&SERVICE=WMS&version=1.3.0&REQUEST=GetCapabilities
mapurl = 'http://{mapserver_ip}/cgi-bin/mapserv?map=/opt/mapws/maplet/maplet_00.map' \
'&layer={layer}&SERVICE=WMS&version=1.3.0' \
'&REQUEST=GetCapabilities'.format(
mapserver_ip=mapserver_ip,
layer='maplet_' + maplet_uid,
)
print(mapurl)
# xml = requests.get(mapurl)
lyr_meta = get_meta(mapurl, maplet_uid)
mapinfo = {
'uid': maplet_uid,
'url': mapurl,
'meta': lyr_meta
}
MMapMeta.add_or_update(mapinfo)
def get_meta(url, sig):
uu = requests.get(url)
uu.encoding='utf-8'
uu.encoding
xml_text = uu.text
xml_text2 = xml_text.encode('utf-8')
root = etree.XML(xml_text2) # xml_text 为xml纯文本文件
root.tag
namespace = "{http://www.opengis.net/wms}"
uu = root.findall('.//{0}Layer'.format(namespace))
bb = ''
for x in uu:
# print(x.tag)
# print(x.attrib)
tt = x.find('.//{0}Name'.format(namespace))
# tt = x.getroottree()
sig_arr = tt.text.split('_')
if sig_arr[-1] == sig:
bb= etree.tostring(x, pretty_print=True).decode()
return bb
def do_for_vector(mapserver_ip):
'''
代码来自 `maplet_arch//030_gen_mapproxy.py` , 原用来找到 mapfile , 生成 yaml .
'''
rst_ws = '/opt/mapws/maplet/vect3857'
for wroot, wdirs, wfiles in os.walk(rst_ws):
for png in wfiles:
(lyr_name, lyr_ext) = os.path.splitext(png)
lyr_name_arr = lyr_name.split('_')
if png.startswith('lyr_') and len(lyr_name_arr[-1]) == 4 and lyr_name_arr[-1][0] == 'v':
pass
else:
continue
maplet_uid = lyr_name_arr[-1]
# http://121.42.29.253/cgi-bin/mapserv?map=/opt/mapws/maplet/vect3857/China/China3857_v/mapfile.map&layer=landuse2000_v000&SERVICE=WMS&version=1.3.0&REQUEST=GetCapabilities
mapurl = 'http://{mapserver_ip}/cgi-bin/mapserv?map={mapfile}' \
'&layer={layer}&SERVICE=WMS&version=1.3.0' \
'&REQUEST=GetCapabilities'.format(
mapserver_ip=mapserver_ip,
mapfile=os.path.join(wroot, 'mapfile.map'),
layer='maplet_' + maplet_uid,
)
print(mapurl)
# print(the_html)
#for uu in the_html.iter():
# print(uu.tag)
lyr_meta = get_meta(mapurl, maplet_uid)
mapinfo = {
'uid': maplet_uid,
'url': mapurl,
'meta': lyr_meta
}
MMapMeta.add_or_update(mapinfo)
def run_init(*args):
'''
running init.
:return:
'''
run_init_tables()
do_for_vector('121.42.29.253')
do_for_maplet('121.42.29.253')
| 30.459016
| 184
| 0.558127
| 465
| 3,716
| 4.268817
| 0.277419
| 0.044332
| 0.035264
| 0.020151
| 0.58136
| 0.58136
| 0.550126
| 0.510831
| 0.510831
| 0.510831
| 0
| 0.042396
| 0.308127
| 3,716
| 121
| 185
| 30.710744
| 0.729677
| 0.191873
| 0
| 0.447368
| 0
| 0.013158
| 0.152981
| 0.062692
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.026316
| 0.065789
| 0
| 0.131579
| 0.039474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
245fd6a78e559e016c8aa53d6a9cbeb14ced94d9
| 3,713
|
py
|
Python
|
dissect/utils/data_processing.py
|
dufkan/DiSSECT
|
834755f106fefff2475bd247c414aa6d13ec0851
|
[
"MIT"
] | null | null | null |
dissect/utils/data_processing.py
|
dufkan/DiSSECT
|
834755f106fefff2475bd247c414aa6d13ec0851
|
[
"MIT"
] | null | null | null |
dissect/utils/data_processing.py
|
dufkan/DiSSECT
|
834755f106fefff2475bd247c414aa6d13ec0851
|
[
"MIT"
] | null | null | null |
from typing import Dict, Any
import pandas as pd
from tqdm.contrib import tmap
from sage.all import RR, ZZ
import dissect.utils.database_handler as database
from dissect.definitions import STD_CURVE_DICT, ALL_CURVE_COUNT
class Modifier:
"""a class of lambda functions for easier modifications if visualised values"""
def __init__(self):
pass
@staticmethod
def identity():
return lambda x: x
@staticmethod
def ratio(ratio_precision=3):
return lambda x: RR(x).numerical_approx(digits=ratio_precision)
@staticmethod
def bits():
return lambda x: ZZ(x).nbits()
@staticmethod
def factorization_bits(factor_index=-1):
return lambda x: ZZ(x[factor_index]).nbits()
@staticmethod
def length():
return lambda x: len(x)
def load_trait(
trait: str, params: Dict[str, Any] = None, curve: str = None, db = None
) -> pd.DataFrame:
if not db:
db = database.connect()
trait_results = database.get_trait_results(db, trait)
return pd.DataFrame(trait_results).convert_dtypes()
def load_curves(filters: Any = {}, db=None) -> pd.DataFrame:
if not db:
db = database.connect()
def project(record: Dict[str, Any]):
projection = {}
projection["curve"] = record["name"]
projection["simulated"] = record["simulated"]
projection["bitlength"] = int(record["field"]["bits"])
projection["field"] = record["field"]["type"]
projection["cofactor"] = (
int(record["cofactor"], base=16)
if isinstance(record["cofactor"], str)
else int(record["cofactor"])
)
return projection
curve_records = database.get_curves(db, filters, raw=True)
df = pd.DataFrame(
tmap(project, curve_records, desc="Loading curves", total=ALL_CURVE_COUNT)
).convert_dtypes()
return df
def get_trait_df(curves, trait_name, db=None):
# load all results for the given trait
df_trait = load_trait(trait_name, db=db)
# join curve metadata to trait results
df_trait = curves.merge(df_trait, "right", "curve")
return df_trait
def filter_choices(choices, ignored):
filtered = {}
for key in choices:
if key not in ignored:
filtered[key] = choices[key]
return filtered
def get_params(choices):
return filter_choices(
choices, ["source", "bitlength","field", "cofactor", "Feature:", "Modifier:"]
)
def filter_df(df, choices):
# TODO this way of checking is expensive - add curve categories to DB
allowed_curves = []
for source in choices["source"]:
allowed_curves += STD_CURVE_DICT.get(source, [])
if "sim" not in choices["source"]:
df = df[df.simulated == False]
if "std" not in choices["source"]:
df = df[df.curve.isin(allowed_curves) | (df.simulated == True)]
df = df[df.field.isin(choices["field"])]
filtered = filter_choices(choices, ["source", "field","Feature:", "Modifier:"])
for key, value in filtered.items():
options = list(map(int, value))
df = df[df[key].isin(options)]
return df
def get_all(df, choices):
modifier = getattr(Modifier, choices["Modifier:"])()
feature = choices["Feature:"]
params = get_params(choices)
if len(params)==0:
return [(filter_df(df, choices),params,feature,modifier,choices["Modifier:"])]
param, values = params.popitem()
choices.pop(param)
results = []
for v in values:
param_choice = choices.copy()
param_choice[param] = [v]
results.append((filter_df(df, param_choice), param_choice, feature, modifier, choices["Modifier:"]))
return results
| 28.782946
| 108
| 0.64153
| 467
| 3,713
| 4.982869
| 0.286938
| 0.018908
| 0.027933
| 0.012892
| 0.069618
| 0.055866
| 0.055866
| 0.035239
| 0.035239
| 0.035239
| 0
| 0.00175
| 0.230541
| 3,713
| 128
| 109
| 29.007813
| 0.812741
| 0.058174
| 0
| 0.11828
| 0
| 0
| 0.072227
| 0
| 0
| 0
| 0
| 0.007813
| 0
| 1
| 0.150538
| false
| 0.010753
| 0.064516
| 0.064516
| 0.376344
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2461451b3428279cbbc4968cdac79244d3d6001a
| 3,251
|
py
|
Python
|
djcookieauth/middleware.py
|
benoitc/dj-cookieauth
|
7397db7adf11e480f9954bf7518a3d522f5b28e5
|
[
"MIT"
] | 3
|
2015-05-18T13:49:41.000Z
|
2020-01-21T11:12:08.000Z
|
djcookieauth/middleware.py
|
benoitc/dj-cookieauth
|
7397db7adf11e480f9954bf7518a3d522f5b28e5
|
[
"MIT"
] | null | null | null |
djcookieauth/middleware.py
|
benoitc/dj-cookieauth
|
7397db7adf11e480f9954bf7518a3d522f5b28e5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of dj-cookieauth released under the Apache 2 license.
# See the NOTICE for more information.
import base64
import hmac
import hashlib
import time
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth.models import User, AnonymousUser
class CookieAuthMiddleware(object):
def __init__(self):
self.cookie_name = getattr(settings, 'COOKIE_AUTH_NAME',
'AuthSession')
def process_request(self, request):
try:
cookie = request.COOKIES[self.cookie_name]
except KeyError:
return
try:
auth_session = base64.decode(cookie)
user, timestr, cur_hash = auth_session.split(":")
except:
raise ValueError("Malformed AuthSession cookie. Please clear your cookies.")
try:
secret = settings.SECRET_KEY
except KeyError:
raise ImproperlyConfigured("secret key isn't set")
try:
user_obj = User.objects.get(username=user)
except User.DoesNotExist:
return
now = time.time()
salt = self.get_user_salt(user_obj)
full_secret = "%s%s" % (secret, salt)
expected_hash = hmac.new(full_secret, msg="%s:%s" % (user,
timestr), digestmod=hashlib.sha256).digest()
timeout = getattr(settings, 'COOKIE_AUTH_TIMEOUT', 600)
try:
timestamp = int(timestr, 16)
except:
return
if now < timestamp + timeout:
if expected_hash == cur_hash:
timeleft = timestamp + timeout - now
request.user = user_obj
request.user.timeleft = timeleft
return
request.user = AnonymousUser
def process_response(self, request, response):
if not request.user.is_authenticated():
# delete cookie
if self.cookie_name in request.COOKIES:
response.delete_cookie(
self.cookie_name,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN
)
return response
salt = request.get_user_salt(request.user)
try:
secret = settings.SECRET_KEY
except KeyError:
raise ImproperlyConfigured("secret key isn't set")
now = time.time()
full_secret = "%s%s" % (secret, salt)
new_hash = hmac.new(full_secret, msg="%s:%s" % (request.user,
now), digestmod=hashlib.sha256).digest()
key = "%s:%s:%s" % (request.user, now, new_hash)
response.set_cookie(
self.cookie_name,
base64.encode(key),
max_age=None,
expires=None,
domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=True,
httponly=True
)
return response
def get_user_salt(self, user):
if '$' not in user.password:
return ''
algo, salt, hsh = user.password.split('$')
return salt
| 28.769912
| 88
| 0.569978
| 342
| 3,251
| 5.27193
| 0.327485
| 0.042707
| 0.038824
| 0.027732
| 0.228508
| 0.143095
| 0.118691
| 0.118691
| 0.08985
| 0.08985
| 0
| 0.008891
| 0.342664
| 3,251
| 112
| 89
| 29.026786
| 0.834815
| 0.044602
| 0
| 0.358025
| 0
| 0
| 0.055144
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049383
| false
| 0.024691
| 0.08642
| 0
| 0.246914
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2461bf3779f8f53bfec6956001ecc2d3efe42214
| 1,225
|
py
|
Python
|
src/20.py
|
sroccaserra/aoc2021
|
a9024ac59fdbe519271d6fab937b9123095955e1
|
[
"BSD-3-Clause"
] | 1
|
2021-12-16T13:25:38.000Z
|
2021-12-16T13:25:38.000Z
|
src/20.py
|
sroccaserra/aoc2021
|
a9024ac59fdbe519271d6fab937b9123095955e1
|
[
"BSD-3-Clause"
] | null | null | null |
src/20.py
|
sroccaserra/aoc2021
|
a9024ac59fdbe519271d6fab937b9123095955e1
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import fileinput
from collections import defaultdict
from itertools import chain
def solve(algo, grid, times):
defaults = ['.', algo[0]]
image = grid
for i in range(times):
image = enhance(algo, defaults[i%2], image)
return sum(map(lambda c: c == '#', chain.from_iterable(image)))
def enhance(algo, default, grid):
w = len(grid[0])
h = len(grid)
infinite = defaultdict(lambda:default)
for y in range(h):
for x in range(w):
infinite[(x, y)] = grid[y][x]
result = []
d = 1
for y in range(-d, h+d):
row = ''
for x in range(-d, w+d):
i = index(infinite, x, y)
row += algo[i]
result.append(row)
return result
def index(infinite, x, y):
s = ''
for dx, dy in [(-1, -1), (0, -1), (1, -1),(-1, 0), (0, 0), (1, 0),(-1, 1), (0, 1), (1, 1)]:
p = (x+dx, y+dy)
c = infinite[p]
if c == '#':
s += '1'
else:
s += '0'
return int(s, 2)
if __name__ == '__main__' and not sys.flags.interactive:
lines = [line.strip() for line in fileinput.input()]
print(solve(lines[0], lines[2:], 2))
print(solve(lines[0], lines[2:], 50))
| 25
| 95
| 0.513469
| 184
| 1,225
| 3.369565
| 0.326087
| 0.022581
| 0.048387
| 0.019355
| 0.090323
| 0.090323
| 0
| 0
| 0
| 0
| 0
| 0.037603
| 0.305306
| 1,225
| 48
| 96
| 25.520833
| 0.690952
| 0
| 0
| 0
| 0
| 0
| 0.010612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.1
| 0
| 0.25
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24629d61c69e7cc1eb4d14dec1215374f034c8fe
| 981
|
py
|
Python
|
leetcode/_429.py
|
simonzhang0428/LeetCode
|
7daee7572d8235a34071aa831452ed5d0e93d947
|
[
"Apache-2.0"
] | 2
|
2021-07-09T23:22:25.000Z
|
2021-07-27T23:15:52.000Z
|
leetcode/_429.py
|
simonzhang0428/LeetCode
|
7daee7572d8235a34071aa831452ed5d0e93d947
|
[
"Apache-2.0"
] | null | null | null |
leetcode/_429.py
|
simonzhang0428/LeetCode
|
7daee7572d8235a34071aa831452ed5d0e93d947
|
[
"Apache-2.0"
] | null | null | null |
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
11:23 - 11:36 7/30
high level: BFS, level order traversal
mid level: queue store cur level res, for the popped node, for its all children, add them to queue
test:
size = 2
cur_res = [5, 6]
q = []
cur = 6
res = [[1], [3, 2, 4], [5, 6]]
time: O(# of nodes) -> O(n)
space:O(max(width of nodes)) ->O(n)
"""
from collections import deque
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
res = []
if not root:
return res
queue = deque([root])
while queue:
size = len(queue)
cur_res = []
for _ in range(size):
cur = queue.popleft()
cur_res.append(cur.val)
for child in cur.children:
queue.append(child)
res.append(cur_res)
return res
| 24.525
| 98
| 0.538226
| 137
| 981
| 3.788321
| 0.50365
| 0.046243
| 0.030829
| 0.034682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032407
| 0.33945
| 981
| 40
| 99
| 24.525
| 0.768519
| 0.022426
| 0
| 0.095238
| 0
| 0
| 0.006182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
246405a3df29d2d69c702ebd40ef421c5c95784b
| 12,036
|
py
|
Python
|
python/special.py
|
rodluger/starrynight
|
d3f015e466621189cb271d4d18b538430b14a557
|
[
"MIT"
] | 5
|
2020-05-20T09:30:30.000Z
|
2021-06-27T14:17:33.000Z
|
python/special.py
|
rodluger/starrynight
|
d3f015e466621189cb271d4d18b538430b14a557
|
[
"MIT"
] | 5
|
2020-05-16T18:49:42.000Z
|
2021-02-11T21:46:32.000Z
|
python/special.py
|
rodluger/starrynight
|
d3f015e466621189cb271d4d18b538430b14a557
|
[
"MIT"
] | 1
|
2020-05-19T17:11:57.000Z
|
2020-05-19T17:11:57.000Z
|
from utils import *
from mpmath import ellipe, ellipk, ellippi
from scipy.integrate import quad
import numpy as np
C1 = 3.0 / 14.0
C2 = 1.0 / 3.0
C3 = 3.0 / 22.0
C4 = 3.0 / 26.0
def J(N, k2, kappa, gradient=False):
# We'll need to solve this with gaussian quadrature
func = (
lambda x: np.sin(x) ** (2 * N) * (np.maximum(0, 1 - np.sin(x) ** 2 / k2)) ** 1.5
)
res = 0.0
for i in range(0, len(kappa), 2):
res += quad(
func, 0.5 * kappa[i], 0.5 * kappa[i + 1], epsabs=1e-12, epsrel=1e-12,
)[0]
if gradient:
# Deriv w/ respect to kappa is analytic
dJdkappa = (
0.5
* (
np.sin(0.5 * kappa) ** (2 * N)
* (np.maximum(0, 1 - np.sin(0.5 * kappa) ** 2 / k2)) ** 1.5
)
* np.repeat([-1, 1], len(kappa) // 2).reshape(1, -1)
)
# Deriv w/ respect to k2 is tricky, need to integrate
func = (
lambda x: (1.5 / k2 ** 2)
* np.sin(x) ** (2 * N + 2)
* (np.maximum(0, 1 - np.sin(x) ** 2 / k2)) ** 0.5
)
dJdk2 = 0.0
for i in range(0, len(kappa), 2):
dJdk2 += quad(
func, 0.5 * kappa[i], 0.5 * kappa[i + 1], epsabs=1e-12, epsrel=1e-12,
)[0]
return res, (dJdk2, dJdkappa)
else:
return res
def pal(bo, ro, kappa, gradient=False):
# TODO
if len(kappa) != 2:
raise NotImplementedError("TODO!")
def func(phi):
c = np.cos(phi)
z = np.minimum(
1 - 1e-12, np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
)
return (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
res, _ = quad(func, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,)
if gradient:
# Deriv w/ respect to kappa is analytic
dpaldkappa = func(kappa - np.pi) * np.repeat([-1, 1], len(kappa) // 2).reshape(
1, -1
)
# Derivs w/ respect to b and r are tricky, need to integrate
def func_bo(phi):
c = np.cos(phi)
z = np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
P = (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
q = 3.0 * z ** 0.5 / (1.0 - z ** 1.5) - 2.0 / (1.0 - z)
return P * ((bo + ro * c) * q + 1.0 / (bo + ro / c))
dpaldbo, _ = quad(
func_bo, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,
)
def func_ro(phi):
c = np.cos(phi)
z = np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
P = (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
q = 3.0 * z ** 0.5 / (1.0 - z ** 1.5) - 2.0 / (1.0 - z)
return P * ((ro + bo * c) * q + 1.0 / ro + 1.0 / (ro + bo * c))
dpaldro, _ = quad(
func_ro, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,
)
return res, (dpaldbo, dpaldro, dpaldkappa)
else:
return res
def hyp2f1(a, b, c, z, gradient=False):
term = a * b * z / c
value = 1.0 + term
n = 1
while (np.abs(term) > STARRY_2F1_TOL) and (n < STARRY_2F1_MAXITER):
a += 1
b += 1
c += 1
n += 1
term *= a * b * z / c / n
value += term
if n == STARRY_2F1_MAXITER:
raise ValueError("Series for 2F1 did not converge.")
if gradient:
dFdz = a * b / c * hyp2f1(a + 1, b + 1, c + 1, z)
return value, dFdz
else:
return value
def el2(x, kc, a, b):
"""
Vectorized implementation of the `el2` function from
Bulirsch (1965). In this case, `x` is a *vector* of integration
limits. The halting condition does not depend on the value of `x`,
so it's much faster to evaluate all values of `x` at once!
"""
if kc == 0:
raise ValueError("Elliptic integral diverged because k = 1.")
c = x * x
d = 1 + c
p = np.sqrt((1 + kc * kc * c) / d)
d = x / d
c = d / (2 * p)
z = a - b
i = a
a = (b + a) / 2
y = np.abs(1 / x)
f = 0
l = np.zeros_like(x)
m = 1
kc = np.abs(kc)
for n in range(STARRY_EL2_MAX_ITER):
b = i * kc + b
e = m * kc
g = e / p
d = f * g + d
f = c
i = a
p = g + p
c = (d / p + c) / 2
g = m
m = kc + m
a = (b / m + a) / 2
y = -e / y + y
y[y == 0] = np.sqrt(e) * c[y == 0] * b
if np.abs(g - kc) > STARRY_EL2_CA * g:
kc = np.sqrt(e) * 2
l = l * 2
l[y < 0] = 1 + l[y < 0]
else:
break
if n == STARRY_EL2_MAX_ITER - 1:
raise ValueError(
"Elliptic integral EL2 failed to converge after {} iterations.".format(
STARRY_EL2_MAX_ITER
)
)
l[y < 0] = 1 + l[y < 0]
e = (np.arctan(m / y) + np.pi * l) * a / m
e[x < 0] = -e[x < 0]
return e + c * z
def EllipF(tanphi, k2, gradient=False):
kc2 = 1 - k2
F = el2(tanphi, np.sqrt(kc2), 1, 1)
if gradient:
E = EllipE(tanphi, k2)
p2 = (1 + tanphi ** 2) ** -1
q2 = p2 * tanphi ** 2
dFdtanphi = p2 * (1 - k2 * q2) ** -0.5
dFdk2 = 0.5 * (E / (k2 * kc2) - F / k2 - tanphi * dFdtanphi / kc2)
return F, (dFdtanphi, dFdk2)
else:
return F
def EllipE(tanphi, k2, gradient=False):
kc2 = 1 - k2
E = el2(tanphi, np.sqrt(kc2), 1, kc2)
if gradient:
F = EllipF(tanphi, k2)
p2 = (1 + tanphi ** 2) ** -1
q2 = p2 * tanphi ** 2
dEdtanphi = p2 * (1 - k2 * q2) ** 0.5
dEdk2 = 0.5 * (E - F) / k2
return E, (dEdtanphi, dEdk2)
else:
return E
def rj(x, y, z, p):
"""
Carlson elliptic integral RJ.
Bille Carlson,
Computing Elliptic Integrals by Duplication,
Numerische Mathematik,
Volume 33, 1979, pages 1-16.
Bille Carlson, Elaine Notis,
Algorithm 577, Algorithms for Incomplete Elliptic Integrals,
ACM Transactions on Mathematical Software,
Volume 7, Number 3, pages 398-403, September 1981
https://people.sc.fsu.edu/~jburkardt/f77_src/toms577/toms577.f
"""
# Limit checks
if x < STARRY_CRJ_LO_LIM:
x = STARRY_CRJ_LO_LIM
elif x > STARRY_CRJ_HI_LIM:
x = STARRY_CRJ_HI_LIM
if y < STARRY_CRJ_LO_LIM:
y = STARRY_CRJ_LO_LIM
elif y > STARRY_CRJ_HI_LIM:
y = STARRY_CRJ_HI_LIM
if z < STARRY_CRJ_LO_LIM:
z = STARRY_CRJ_LO_LIM
elif z > STARRY_CRJ_HI_LIM:
z = STARRY_CRJ_HI_LIM
if p < STARRY_CRJ_LO_LIM:
p = STARRY_CRJ_LO_LIM
elif p > STARRY_CRJ_HI_LIM:
p = STARRY_CRJ_HI_LIM
xn = x
yn = y
zn = z
pn = p
sigma = 0.0
power4 = 1.0
for k in range(STARRY_CRJ_MAX_ITER):
mu = 0.2 * (xn + yn + zn + pn + pn)
invmu = 1.0 / mu
xndev = (mu - xn) * invmu
yndev = (mu - yn) * invmu
zndev = (mu - zn) * invmu
pndev = (mu - pn) * invmu
eps = np.max([np.abs(xndev), np.abs(yndev), np.abs(zndev), np.abs(pndev)])
if eps < STARRY_CRJ_TOL:
ea = xndev * (yndev + zndev) + yndev * zndev
eb = xndev * yndev * zndev
ec = pndev * pndev
e2 = ea - 3.0 * ec
e3 = eb + 2.0 * pndev * (ea - ec)
s1 = 1.0 + e2 * (-C1 + 0.75 * C3 * e2 - 1.5 * C4 * e3)
s2 = eb * (0.5 * C2 + pndev * (-C3 - C3 + pndev * C4))
s3 = pndev * ea * (C2 - pndev * C3) - C2 * pndev * ec
value = 3.0 * sigma + power4 * (s1 + s2 + s3) / (mu * np.sqrt(mu))
return value
xnroot = np.sqrt(xn)
ynroot = np.sqrt(yn)
znroot = np.sqrt(zn)
lam = xnroot * (ynroot + znroot) + ynroot * znroot
alpha = pn * (xnroot + ynroot + znroot) + xnroot * ynroot * znroot
alpha = alpha * alpha
beta = pn * (pn + lam) * (pn + lam)
if alpha < beta:
sigma += power4 * np.arccos(np.sqrt(alpha / beta)) / np.sqrt(beta - alpha)
elif alpha > beta:
sigma += power4 * np.arccosh(np.sqrt(alpha / beta)) / np.sqrt(alpha - beta)
else:
sigma = sigma + power4 / np.sqrt(beta)
power4 *= 0.25
xn = 0.25 * (xn + lam)
yn = 0.25 * (yn + lam)
zn = 0.25 * (zn + lam)
pn = 0.25 * (pn + lam)
if k == STARRY_CRJ_MAX_ITER - 1:
raise ValueError(
"Elliptic integral RJ failed to converge after {} iterations.".format(
STARRY_CRJ_MAX_ITER
)
)
def EllipJ(kappa, k2, p):
phi = (kappa - np.pi) % (2 * np.pi)
cx = np.cos(phi / 2)
sx = np.sin(phi / 2)
w = 1 - cx ** 2 / k2
J = np.zeros_like(phi)
for i in range(len(w)):
J[i] = (np.cos(phi[i]) + 1) * cx[i] * rj(w[i], sx[i] * sx[i], 1.0, p[i])
return J
def ellip(bo, ro, kappa):
# Helper variables
k2 = (1 - ro ** 2 - bo ** 2 + 2 * bo * ro) / (4 * bo * ro)
if np.abs(1 - k2) < STARRY_K2_ONE_TOL:
if k2 == 1.0:
k2 = 1 + STARRY_K2_ONE_TOL
elif k2 < 1.0:
k2 = 1.0 - STARRY_K2_ONE_TOL
else:
k2 = 1.0 + STARRY_K2_ONE_TOL
k = np.sqrt(k2)
k2inv = 1 / k2
kinv = np.sqrt(k2inv)
kc2 = 1 - k2
# Complete elliptic integrals (we'll need them to compute offsets below)
if k2 < 1:
K0 = float(ellipk(k2))
E0 = float(ellipe(k2))
E0 = np.sqrt(k2inv) * (E0 - (1 - k2) * K0)
K0 *= np.sqrt(k2)
RJ0 = 0.0
else:
K0 = float(ellipk(k2inv))
E0 = float(ellipe(k2inv))
if (bo != 0) and (bo != ro):
p0 = (ro * ro + bo * bo + 2 * ro * bo) / (ro * ro + bo * bo - 2 * ro * bo)
PI0 = float(ellippi(1 - p0, k2inv))
RJ0 = -12.0 / (1 - p0) * (PI0 - K0)
else:
RJ0 = 0.0
if k2 < 1:
# Analytic continuation from (17.4.15-16) in Abramowitz & Stegun
# A better format is here: https://dlmf.nist.gov/19.7#ii
# Helper variables
arg = kinv * np.sin(kappa / 2)
tanphi = arg / np.sqrt(1 - arg ** 2)
tanphi[arg >= 1] = STARRY_HUGE_TAN
tanphi[arg <= -1] = -STARRY_HUGE_TAN
# Compute the elliptic integrals
F = EllipF(tanphi, k2) * k
E = kinv * (EllipE(tanphi, k2) - kc2 * kinv * F)
# Add offsets to account for the limited domain of `el2`
for i in range(len(kappa)):
if kappa[i] > 3 * np.pi:
F[i] += 4 * K0
E[i] += 4 * E0
elif kappa[i] > np.pi:
F[i] = 2 * K0 - F[i]
E[i] = 2 * E0 - E[i]
else:
# Helper variables
tanphi = np.tan(kappa / 2)
# Compute the elliptic integrals
F = EllipF(tanphi, k2inv) # el2(tanphi, kcinv, 1, 1)
E = EllipE(tanphi, k2inv) # el2(tanphi, kcinv, 1, kc2inv)
# Add offsets to account for the limited domain of `el2`
for i in range(len(kappa)):
if kappa[i] > 3 * np.pi:
F[i] += 4 * K0
E[i] += 4 * E0
elif kappa[i] > np.pi:
F[i] += 2 * K0
E[i] += 2 * E0
# Must compute RJ separately
if np.abs(bo - ro) > STARRY_PAL_BO_EQUALS_RO_TOL:
p = (ro * ro + bo * bo - 2 * ro * bo * np.cos(kappa)) / (
ro * ro + bo * bo - 2 * ro * bo
)
RJ = EllipJ(kappa, k2, p)
# Add offsets to account for the limited domain of `rj`
if RJ0 != 0.0:
for i in range(len(kappa)):
if kappa[i] > 3 * np.pi:
RJ[i] += 2 * RJ0
elif kappa[i] > np.pi:
RJ[i] += RJ0
else:
RJ = np.zeros_like(kappa)
# Compute the *definite* elliptic integrals
F = pairdiff(F)
E = pairdiff(E)
PIprime = pairdiff(RJ)
return F, E, PIprime
| 28.121495
| 88
| 0.465022
| 1,843
| 12,036
| 2.978296
| 0.15898
| 0.00838
| 0.005465
| 0.020404
| 0.368555
| 0.300237
| 0.259428
| 0.183822
| 0.177992
| 0.148843
| 0
| 0.076378
| 0.389747
| 12,036
| 427
| 89
| 28.187354
| 0.670933
| 0.122798
| 0
| 0.221498
| 0
| 0
| 0.019049
| 0
| 0
| 0
| 0
| 0.002342
| 0
| 1
| 0.039088
| false
| 0
| 0.013029
| 0
| 0.107492
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
24646d858bd45611b38521c81ad12d15324e8859
| 821
|
py
|
Python
|
capitalist/contrib/django/django_capitalist/validators.py
|
wit4er/python-capitalist
|
08f74ab80155b9a4a72c3a03bd1b13153fbdb891
|
[
"MIT"
] | null | null | null |
capitalist/contrib/django/django_capitalist/validators.py
|
wit4er/python-capitalist
|
08f74ab80155b9a4a72c3a03bd1b13153fbdb891
|
[
"MIT"
] | null | null | null |
capitalist/contrib/django/django_capitalist/validators.py
|
wit4er/python-capitalist
|
08f74ab80155b9a4a72c3a03bd1b13153fbdb891
|
[
"MIT"
] | 3
|
2020-03-02T12:40:00.000Z
|
2021-12-24T12:04:36.000Z
|
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
@deconstructible
class CapitalistAccountValidator:
message = _('Invalid Capitalist account number.')
code = 'invalid_capitalist_account'
account_types = (
'R', # rub
'U', # usd
'E', # eur
'T', # usd tether
'B', # btc
)
def __init__(self, account_types=None):
self.account_types = account_types
def __call__(self, value):
if len(value) < 2 or value[0] not in self.account_types:
raise ValidationError(self.message, code=self.code)
try:
int(value[1:])
except ValueError:
raise ValidationError(self.message, code=self.code)
| 29.321429
| 64
| 0.644336
| 92
| 821
| 5.554348
| 0.554348
| 0.117417
| 0.093933
| 0.121331
| 0.168297
| 0.168297
| 0.168297
| 0
| 0
| 0
| 0
| 0.004951
| 0.261876
| 821
| 27
| 65
| 30.407407
| 0.838284
| 0.031669
| 0
| 0.086957
| 0
| 0
| 0.082383
| 0.032953
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
246a10b85099e22d96ea411de65b12bcd5947ba4
| 2,644
|
py
|
Python
|
src/query_spec.py
|
jdiaz/snorkel
|
d553480f3193f105d6f5befa04afb3656cd94d49
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/query_spec.py
|
jdiaz/snorkel
|
d553480f3193f105d6f5befa04afb3656cd94d49
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/query_spec.py
|
jdiaz/snorkel
|
d553480f3193f105d6f5befa04afb3656cd94d49
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import werkzeug
try:
import dotmap
except:
dotmap = None
try:
import addict
except:
addict = None
class QuerySpec(object):
def __init__(self, query):
# TODO: list all attributes of a query spec up front so others know what to expect
md = werkzeug.MultiDict()
for q in query:
if type(q) == dict:
md.add(q['name'], q['value'].strip())
elif type(q) == list or type(q) == tuple:
md.add(q[0], q[1].strip())
else:
md.add(q, query[q])
self.ismultidict = False
self.isdotmap = False
if isinstance(query, werkzeug.MultiDict):
self.ismultidict = True
elif addict and isinstance(query, addict.Dict):
self.isdotmap = True
elif dotmap and isinstance(query, dotmap.DotMap):
self.isdotmap = True
elif isinstance(query, list):
self.ismultidict = True
else:
raise Exception("Unknown entry for query spec")
self.md = md
# we will need to put together an exported interface
self.fields = self.get_fields()
self.groupby = self.get_groupby()
def __makedict__(self):
ret = {
}
for f in self.md:
if f.endswith("[]"):
if self.ismultidict:
ret[f] = self.md.getlist(f)
else:
ret[f] = self.md.get(f)
else:
ret[f] = self.md.get(f)
return ret
def __json__(self):
return self.__makedict__()
def setlist(self, k, v):
self.md.setlist(k, v)
def set(self, k, v):
if k in self.md:
self.md.pop(k)
self.md.add(k,v)
def add(self, k, v):
self.md.add(k, v)
def getlist(self, k, d=[]):
if self.ismultidict:
return self.md.getlist(k)
return self.md.get(k) or []
def get(self, k, d=None):
return self.md.get(k, d)
def get_metric(self):
op = self.md.get('metric')
if not op:
op = self.md.get('agg', '')
op = op.lstrip("$")
return op
def get_groupby(self):
g = self.getlist('groupby[]')
if not g:
g = self.getlist('group_by')
return g
def get_fields(self):
g = self.getlist('fields[]')
if not g:
g = self.getlist('fieldset')
return g
def get_custom_fields(self):
g = self.getlist('custom_fields[]')
if not g:
g = self.getlist('custom_fields')
return g
| 22.991304
| 90
| 0.507943
| 337
| 2,644
| 3.908012
| 0.27003
| 0.068337
| 0.041002
| 0.022779
| 0.18451
| 0.099468
| 0.064541
| 0.028094
| 0
| 0
| 0
| 0.001208
| 0.374054
| 2,644
| 114
| 91
| 23.192982
| 0.794562
| 0.049546
| 0
| 0.289157
| 0
| 0
| 0.043842
| 0
| 0
| 0
| 0
| 0.008772
| 0
| 1
| 0.144578
| false
| 0
| 0.036145
| 0.024096
| 0.301205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79e0e2d611865157c4511898cc3f7aa9623c7290
| 1,268
|
py
|
Python
|
mysite/note/management/commands/create_note.py
|
t2y/wagtail-app-sample
|
9c0592d80ccec3dd0d6f385f46372dccbcbd2a01
|
[
"Apache-2.0"
] | null | null | null |
mysite/note/management/commands/create_note.py
|
t2y/wagtail-app-sample
|
9c0592d80ccec3dd0d6f385f46372dccbcbd2a01
|
[
"Apache-2.0"
] | null | null | null |
mysite/note/management/commands/create_note.py
|
t2y/wagtail-app-sample
|
9c0592d80ccec3dd0d6f385f46372dccbcbd2a01
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, no_translations
from note.models import NoteIndexPage
from note.models import NotePage
class Command(BaseCommand):
help = 'Create note page'
def add_arguments(self, parser):
parser.add_argument(
'--index-id', action='store', required=True,
help='set index page id')
parser.add_argument(
'--title', action='store', required=True,
help='set title')
parser.add_argument(
'--intro', action='store', required=True,
help='set intro')
parser.add_argument(
'--owner', action='store', required=True,
help='set owner')
@no_translations
def handle(self, *args, **options):
index = NoteIndexPage.objects.get(id=options['index_id'])
User = get_user_model()
owner = User.objects.get(username=options['owner'])
note = NotePage(
title=options['title'],
intro=options['intro'],
date=datetime.now(),
owner=owner)
index.add_child(instance=note)
self.stdout.write(self.style.SUCCESS(f'created: {repr(note)}'))
| 30.926829
| 71
| 0.615142
| 144
| 1,268
| 5.326389
| 0.388889
| 0.046936
| 0.088657
| 0.119948
| 0.156454
| 0.156454
| 0
| 0
| 0
| 0
| 0
| 0
| 0.259464
| 1,268
| 40
| 72
| 31.7
| 0.816826
| 0
| 0
| 0.125
| 0
| 0
| 0.12224
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.15625
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79ea9a1464ec72ae217a22521335fffd9916b5cc
| 6,241
|
py
|
Python
|
danceschool/discounts/admin.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 32
|
2017-09-12T04:25:25.000Z
|
2022-03-21T10:48:07.000Z
|
danceschool/discounts/admin.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 97
|
2017-09-01T02:43:08.000Z
|
2022-01-03T18:20:34.000Z
|
danceschool/discounts/admin.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 19
|
2017-09-26T13:34:46.000Z
|
2022-03-21T10:48:10.000Z
|
from django.forms import ModelForm, ModelChoiceField
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from dal import autocomplete
from .models import (
DiscountCategory, DiscountCombo, DiscountComboComponent,
PointGroup, PricingTierGroup, RegistrationDiscount,
CustomerGroupDiscount, CustomerDiscount
)
from danceschool.core.models import (
Registration, Registration, PricingTier, Customer
)
class DiscountCategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'order', 'cannotCombine')
list_editable = ('order', )
list_filter = ('cannotCombine', )
search_fields = ('name', )
class DiscountComboComponentInline(admin.StackedInline):
model = DiscountComboComponent
extra = 1
fields = (('pointGroup', 'quantity', ), 'allWithinPointGroup', ('level', 'weekday'), )
class CustomerDiscountInlineForm(ModelForm):
customer = ModelChoiceField(
queryset=Customer.objects.all(),
widget=autocomplete.ModelSelect2(
url='autocompleteCustomer',
attrs={
# This will set the input placeholder attribute:
'data-placeholder': _('Enter a customer name'),
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-minimum-input-length': 2,
'data-max-results': 4,
'class': 'modern-style',
}
)
)
class Meta:
model = CustomerDiscount
exclude = []
class Media:
js = (
'admin/js/vendor/jquery/jquery.min.js',
'admin/js/jquery.init.js',
)
class CustomerGroupDiscountInline(admin.StackedInline):
model = CustomerGroupDiscount
extra = 1
classes = ['collapse', ]
class CustomerDiscountInline(admin.StackedInline):
model = CustomerDiscount
form = CustomerDiscountInlineForm
extra = 1
classes = ['collapse', ]
class DiscountComboAdminForm(ModelForm):
class Meta:
model = DiscountCombo
exclude = []
class Media:
js = (
'admin/js/vendor/jquery/jquery.min.js',
'js/discountcombo_collapsetypes.js',
)
class DiscountComboAdmin(admin.ModelAdmin):
inlines = [
DiscountComboComponentInline, CustomerGroupDiscountInline,
CustomerDiscountInline
]
form = DiscountComboAdminForm
list_display = (
'name', 'category', 'discountType', 'active', 'expirationDate',
'restrictions'
)
list_filter = (
'category', 'discountType', 'active', 'newCustomersOnly', 'expirationDate'
)
ordering = ('name', )
actions = ['enableDiscount', 'disableDiscount']
fieldsets = (
(None, {
'fields': (
'name', 'category',
('active', 'expirationDate'),
'newCustomersOnly', 'studentsOnly', 'daysInAdvanceRequired',
'firstXRegistered', 'customerMatchRequired', 'discountType',
)
}),
(_('Flat-Price Discount (in default currency)'), {
'classes': ('type_flatPrice', ),
'fields': ('onlinePrice', 'doorPrice'),
}),
(_('Dollar Discount (in default currency)'), {
'classes': ('type_dollarDiscount', ),
'fields': ('dollarDiscount', ),
}),
(_('Percentage Discount'), {
'classes': ('type_percentageDiscount', ),
'fields': ('percentDiscount', 'percentUniversallyApplied'),
}),
)
def restrictions(self, obj):
text = []
if obj.studentsOnly:
text.append(_('Students only'))
if obj.newCustomersOnly:
text.append(_('First-time customer'))
if obj.daysInAdvanceRequired:
text.append(_('%s day advance registration' % obj.daysInAdvanceRequired))
if obj.firstXRegistered:
text.append(_('First %s to register' % obj.firstXRegistered))
if obj.customerMatchRequired:
text.append(_('Primary customer registrations only'))
return ', '.join([str(x) for x in text])
restrictions.short_description = _('Restrictions')
def disableDiscount(self, request, queryset):
rows_updated = queryset.update(active=False)
if rows_updated == 1:
message_bit = "1 discount was"
else:
message_bit = "%s discounts were" % rows_updated
self.message_user(request, "%s successfully disabled." % message_bit)
disableDiscount.short_description = _('Disable selected Discounts')
def enableDiscount(self, request, queryset):
rows_updated = queryset.update(active=True)
if rows_updated == 1:
message_bit = "1 discount was"
else:
message_bit = "%s discounts were" % rows_updated
self.message_user(request, "%s successfully enabled." % message_bit)
enableDiscount.short_description = _('Enable selected Discounts')
class RegistrationDiscountInline(admin.TabularInline):
model = RegistrationDiscount
readonly_fields = ('discount', 'discountAmount')
exclude = ('applied',)
extra = 0
# Prevents adding new discounts without going through
# the standard registration process
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class PricingTierGroupInline(admin.TabularInline):
model = PricingTierGroup
extra = 0
verbose_name = _('pricing tier discount group')
verbose_name_plural = _('pricing tier discount groups')
class PointGroupAdmin(admin.ModelAdmin):
inlines = (PricingTierGroupInline, )
list_display = ('name', )
ordering = ('name', )
# This adds the inlines to Registration and PricingTier without subclassing
admin.site._registry[Registration].inlines.insert(0, RegistrationDiscountInline)
admin.site._registry[PricingTier].inlines.insert(0, PricingTierGroupInline)
admin.site.register(DiscountCategory, DiscountCategoryAdmin)
admin.site.register(DiscountCombo, DiscountComboAdmin)
admin.site.register(PointGroup, PointGroupAdmin)
| 32.170103
| 90
| 0.645089
| 534
| 6,241
| 7.438202
| 0.391386
| 0.016616
| 0.011329
| 0.007049
| 0.156093
| 0.143001
| 0.124874
| 0.105237
| 0.08006
| 0.08006
| 0
| 0.00298
| 0.247236
| 6,241
| 193
| 91
| 32.336788
| 0.842486
| 0.050633
| 0
| 0.233333
| 0
| 0
| 0.216458
| 0.041061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.04
| 0.013333
| 0.406667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79eaca1eab171f82b2698cf469eb4057caa27f84
| 9,666
|
py
|
Python
|
tools/create_cluster.py
|
cosh/compute-cassandra-python
|
2ae6454bbb86d00252afa415042a5e8b823c763d
|
[
"Apache-2.0"
] | 1
|
2016-12-21T09:59:16.000Z
|
2016-12-21T09:59:16.000Z
|
tools/create_cluster.py
|
cosh/compute-cassandra-python
|
2ae6454bbb86d00252afa415042a5e8b823c763d
|
[
"Apache-2.0"
] | null | null | null |
tools/create_cluster.py
|
cosh/compute-cassandra-python
|
2ae6454bbb86d00252afa415042a5e8b823c763d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to start up a demo Cassandra cluster on Google Compute Engine."""
import os
import time
import sys
# Read in global config variables
mydir = os.path.dirname(os.path.realpath(__file__))
common = mydir + os.path.sep + "common.py"
execfile(common, globals())
# Find a US region with at least two UP zones.
def find_zones():
"""Find a US region with at least two UP zones."""
print("=> Finding suitable region, selecting zones:"),
regions = subprocess.check_output(["gcutil", "--service_version",
API_VERSION, "--format=names", "listregions", "--filter",
"name eq 'us.*'"], stderr=NULL).split('\n')[0:-1]
for region in regions:
zones = subprocess.check_output(["gcutil", "--service_version",
API_VERSION, "--format=names", "listzones", "--filter",
"status eq UP", "--filter", "name eq '%s.*'" % region],
stderr=NULL).split('\n')[0:-1]
if len(zones) > 1:
print(zones)
return zones
raise BE("Error: No suitable US regions found with 2+ zones")
# Create all nodes synchronously
def create_nodes(zones):
"""Create all nodes synchronously."""
print("=> Creating %d '%s' '%s' nodes" % (NODES_PER_ZONE*len(zones),
IMAGE, MACHINE_TYPE))
for zone in zones:
for i in range(NODES_PER_ZONE):
nodename = "%s-%s-%d" % (NODE_PREFIX, zone[-1:], i)
r = subprocess.call(["gcutil",
"--service_version=%s" % API_VERSION,
"addinstance", nodename, "--zone=%s" % zone,
"--machine_type=%s" % MACHINE_TYPE, "--image=%s" % IMAGE,
"--service_account_scopes=%s" % SCOPES,
"--wait_until_running"], stdout=NULL, stderr=NULL)
if r != 0:
raise BE("Error: could not create node %s" % nodename)
print("--> Node %s created" % nodename)
# Customize node_config_tmpl script
def customize_config_script(cluster):
"""Customize the node_config_tmpl script"""
variable_substitutes = {
'@GCE_USERNAME@': GCE_USERNAME,
'@GCS_BUCKET@': GCS_BUCKET,
'@JRE7_INSTALL@': JRE7_INSTALL,
'@JRE7_VERSION@': JRE7_VERSION
}
seed_data, seed_ips = _identify_seeds(cluster)
variable_substitutes['@SEED_IPS@'] = ",".join(seed_ips)
variable_substitutes['@SNITCH_TEXT@'] = _generate_snitch_text(cluster)
script_path = _update_node_script(variable_substitutes)
return seed_data, script_path
# Configure each cluster node
def configure_nodes(cluster, script_path):
"""Configure each cluster node."""
print("=> Uploading and running configure script on nodes:"),
for zone in cluster.keys():
for node in cluster[zone]:
_ = subprocess.call(["gcutil",
"--service_version=%s" % API_VERSION, "push",
"--zone=%s" % zone, node['name'], script_path,
"/tmp/c.sh"], stdout=NULL, stderr=NULL)
done = subprocess.call(["gcutil",
"--service_version=%s" % API_VERSION, "ssh",
"--zone=%s" % zone, node['name'],
"sudo chmod +x /tmp/c.sh && sudo /tmp/c.sh"],
stdout=NULL, stderr=NULL)
if done != 0:
err = "Error: problem uploading/running config script "
err += "on %s" % node['name']
raise BE(err)
print("."),
sys.stdout.flush()
print("done.")
# Perform variable substituions on the node_config_tmpl script
def _update_node_script(variable_substitutes):
"""Update the node_config_tmpl script"""
template = "%s%s%s" % (os.path.dirname(os.path.realpath(__file__)),
os.path.sep,"node_config_tmpl")
script_path = template + ".sh"
template_fh = open(template, "r")
script_fh = open(script_path, "w")
for line in template_fh:
for k, v in variable_substitutes.iteritems():
if line.find(k) > -1:
line = line.replace(k,v)
script_fh.write(line)
template_fh.close()
script_fh.close()
return script_path
# Update the SEED list on each node.
def _identify_seeds(cluster):
"""Update the SEED list on each node."""
# Select first node from each zone as a SEED node.
seed_ips = []
seed_data = []
for z in cluster.keys():
seed_node = cluster[z][0]
seed_ips.append(seed_node['ip'])
seed_data.append(seed_node)
return seed_data, seed_ips
# Generate the text for the PropertyFileSnitch file
def _generate_snitch_text(cluster):
"""Generate the text for the PropertyFileSnitch file"""
i=1
contents = [
"# Auto-generated topology snitch during cluster turn-up", "#",
"# Cassandra node private IP=Datacenter:Rack", "#", ""
]
for z in cluster.keys():
contents.append("# Zone \"%s\" => ZONE%d" % (z, i))
for node in cluster[z]:
contents.append("%s=ZONE%d:RAC1" % (node['ip'], i))
i+=1
contents.append("")
contents.append("# default for unknown hosts")
contents.append("default=ZONE1:RAC1")
contents.append("")
return "\n".join(contents)
# Cleanly start up Cassandra on specified node
def node_start_cassandra(zone, nodename):
"""Cleanly start up Cassandra on specified node"""
status = "notok"
tries = 0
print("--> Attempting to start cassandra on node %s" % nodename),
while status != "ok" and tries < 5:
_ = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone, nodename,
"sudo service cassandra stop"], stdout=NULL, stderr=NULL)
_ = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone, nodename,
"sudo rm -f /var/run/cassandra/cassandra.pid"],
stdout=NULL, stderr=NULL)
_ = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone, nodename,
"sudo rm -rf /var/lib/cassandra/*"], stdout=NULL, stderr=NULL)
_ = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone,nodename,
"sudo service cassandra start"], stdout=NULL, stderr=NULL)
r = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone,nodename,
"sudo ls /var/run/cassandra/cassandra.pid"],
stdout=NULL, stderr=NULL)
if r == 0:
status = "ok"
print("UP")
break
tries += 1
print("."),
if status == "notok":
print("FAILED")
raise BE("Error: cassandra failing to start on node %s" % nodename)
# Bring up cassandra on cluster nodes, SEEDs first
def start_cluster(seed_data, cluster):
"""Bring up cassandra on cluster nodes, SEEDs first"""
# Start SEED nodes first.
print("=> Starting cassandra cluster SEED nodes")
started_nodes = []
for node in seed_data:
node_start_cassandra(node['zone'], node['name'])
started_nodes.append(node['name'])
# Start remaining non-seed nodes.
print("=> Starting cassandra cluster non-SEED nodes")
for z in cluster.keys():
for node in cluster[z]:
if node['name'] not in started_nodes:
node_start_cassandra(z, node['name'])
# Display cluster status by running 'nodetool status' on a node
def verify_cluster(cluster):
"""Display cluster status by running 'nodetool status' on a node"""
keys = cluster.keys()
zone = keys[0]
nodename = cluster[zone][0]['name']
status = subprocess.check_output(["gcutil",
"--service_version=%s" % API_VERSION, "ssh",
"--zone=%s" % zone, nodename, "nodetool status"], stderr=NULL)
print("=> Output from node %s and 'nodetool status'" % nodename)
print(status)
def main():
# Find a suitable US region with more than a single UP zone.
zones = find_zones()
# Make sure we don't exceed MAX_NODES.
if NODES_PER_ZONE * len(zones) > MAX_NODES:
error_string = "Error: MAX_NODES exceeded. Adjust tools/common.py "
error_string += "NODES_PER_ZONE or MAX_NODES."
raise BE(error_string)
# Create the nodes, upload/install JRE, customize/execute config script
create_nodes(zones)
cluster = get_cluster()
seed_data, script_path = customize_config_script(cluster)
configure_nodes(cluster, script_path)
# Bring up the cluster and give it a minute for nodes to join.
start_cluster(seed_data, cluster)
print("=> Cassandra cluster is up and running on all nodes")
print("=> Sleeping 30 seconds to give nodes time to join cluster")
time.sleep(30)
# Run nodetool status on a node and display output.
verify_cluster(cluster)
if __name__ == '__main__':
main()
sys.exit(0)
| 38.357143
| 78
| 0.61049
| 1,219
| 9,666
| 4.703856
| 0.235439
| 0.024939
| 0.038368
| 0.032961
| 0.367457
| 0.28392
| 0.262121
| 0.189571
| 0.167771
| 0.146669
| 0
| 0.005436
| 0.257707
| 9,666
| 251
| 79
| 38.50996
| 0.793728
| 0.200807
| 0
| 0.132184
| 0
| 0
| 0.245908
| 0.011916
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063218
| false
| 0
| 0.017241
| 0
| 0.109195
| 0.097701
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79eb237c4bd2d1099cc8ababd7408be4112e4eb8
| 770
|
py
|
Python
|
lakey_finicity/models/connect/answered_mfa_question.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | 1
|
2021-02-09T14:44:55.000Z
|
2021-02-09T14:44:55.000Z
|
lakey_finicity/models/connect/answered_mfa_question.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | null | null | null |
lakey_finicity/models/connect/answered_mfa_question.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | 1
|
2022-01-26T18:09:33.000Z
|
2022-01-26T18:09:33.000Z
|
from dataclasses import dataclass
# https://community.finicity.com/s/article/207505363-Multi-Factor-Authentication-MFA
@dataclass
class AnsweredMfaQuestion(object):
text: str
answer: str # Added by the partner for calls to the "MFA Answers" services
_unused_fields: dict # this is for forward compatibility and should be empty
def to_dict(self) -> dict:
return {
'text': self.text,
'answer': self.answer,
}
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
text = data.pop('text')
answer = data.pop('answer')
return AnsweredMfaQuestion(
text=text,
answer=answer,
_unused_fields=data,
)
| 28.518519
| 84
| 0.623377
| 89
| 770
| 5.325843
| 0.573034
| 0.063291
| 0.050633
| 0.067511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016334
| 0.284416
| 770
| 26
| 85
| 29.615385
| 0.84392
| 0.28961
| 0
| 0
| 0
| 0
| 0.0369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.047619
| 0.047619
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79ebb38627cea4de632bbd11155e51f5abc082e1
| 3,829
|
py
|
Python
|
test/python/k8s/test_client_cert_inject.py
|
cyberark/conjur-openapi-spec
|
40f2f1a6b14fb3536facc628e63321a17667148c
|
[
"Apache-2.0"
] | 6
|
2020-12-03T19:48:30.000Z
|
2021-07-19T08:36:43.000Z
|
test/python/k8s/test_client_cert_inject.py
|
cyberark/conjur-openapi-spec
|
40f2f1a6b14fb3536facc628e63321a17667148c
|
[
"Apache-2.0"
] | 116
|
2020-11-24T21:56:49.000Z
|
2021-12-10T19:27:39.000Z
|
test/python/k8s/test_client_cert_inject.py
|
cyberark/conjur-openapi-spec
|
40f2f1a6b14fb3536facc628e63321a17667148c
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
import base64
import os
import pathlib
import unittest
import conjur
from OpenSSL import crypto, SSL
CERT_DIR = pathlib.Path('config/https')
SSL_CERT_FILE = 'ca.crt'
CONJUR_CERT_FILE = 'conjur.crt'
CONJUR_KEY_FILE = 'conjur.key'
def generateKey(type, bits):
"""Generates a key using OpenSSL"""
key = crypto.PKey()
key.generate_key(type, bits)
return key
def generateCSR(host_id, key):
"""Generate a Certificate Signing Request"""
pod_name = os.environ['MY_POD_NAME']
namespace = os.environ['TEST_APP_NAMESPACE']
SANURI = f'spiffe://cluster.local/namespace/{namespace}/podname/{pod_name}'
req = crypto.X509Req()
req.get_subject().CN = host_id
req.set_pubkey(key)
formatted_SAN = f'URI:{SANURI}'
req.add_extensions([
crypto.X509Extension(
'subjectAltName'.encode('ascii'), False, formatted_SAN.encode('ascii')
)
])
req.sign(key, "sha1")
return crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
class TestClientCertInject(unittest.TestCase):
def setUp(self):
with open(os.environ['CONJUR_AUTHN_TOKEN_FILE'], 'r') as content:
encoded_token = base64.b64encode(content.read().replace('\r', '').encode()).decode('utf-8')
config = conjur.Configuration(
host='https://conjur-oss:9443'
)
with open(CERT_DIR.joinpath(SSL_CERT_FILE), 'w') as content:
content.write(os.environ['CONJUR_SSL_CERTIFICATE'])
config.ssl_ca_cert = CERT_DIR.joinpath(SSL_CERT_FILE)
config.username = 'admin'
config.api_key = {'Authorization': 'Token token="{}"'.format(encoded_token)}
self.client = conjur.ApiClient(config)
self.api = conjur.api.AuthenticationApi(self.client)
key = generateKey(crypto.TYPE_RSA, 2048)
self.csr = generateCSR('app-test/*/*', key)
def tearDown(self):
self.client.close()
def test_inject_202(self):
"""Test 202 status response when successfully requesting a client certificate injection
202 - successful request and injection
"""
# optional prefix
# prefix = 'host/conjur/authn-k8s/my-authenticator-id/apps'
response, status, _ = self.api.k8s_inject_client_cert_with_http_info(
'my-authenticator-id',
body=self.csr
)
self.assertEqual(status, 202)
self.assertEqual(None, response)
def test_inject_400(self):
"""Test 400 status response when successfully requesting a cert injection
400 - Bad Request caught by NGINX
"""
with self.assertRaises(conjur.ApiException) as context:
self.api.k8s_inject_client_cert(
'\00',
body=self.csr
)
self.assertEqual(context.exception.status, 400)
def test_inject_401(self):
"""Test 401 status response when requesting a cert injection
401 - unauthorized request. This happens from invalid Conjur auth token,
incorrect service ID, malformed CSR and others
"""
with self.assertRaises(conjur.ApiException) as context:
self.api.k8s_inject_client_cert(
'wrong-service-id',
body=self.csr
)
self.assertEqual(context.exception.status, 401)
def test_inject_404(self):
"""Test 404 status response when requesting a cert injection
404 - Resource not found, malformed service ID
"""
with self.assertRaises(conjur.ApiException) as context:
self.api.k8s_inject_client_cert(
'00.00',
body=self.csr
)
self.assertEqual(context.exception.status, 404)
if __name__ == '__main__':
unittest.main()
| 30.632
| 103
| 0.641682
| 452
| 3,829
| 5.263274
| 0.347345
| 0.014712
| 0.021858
| 0.026902
| 0.270282
| 0.270282
| 0.190416
| 0.155107
| 0.134931
| 0.093737
| 0
| 0.028253
| 0.251241
| 3,829
| 124
| 104
| 30.879032
| 0.801535
| 0.171063
| 0
| 0.128205
| 0
| 0
| 0.112198
| 0.035225
| 0
| 0
| 0
| 0
| 0.102564
| 1
| 0.102564
| false
| 0
| 0.089744
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79ecb3c7117dbfd7119dcd2c527e73deda680a91
| 2,909
|
py
|
Python
|
khl/websocket/net_client_websocket.py
|
hang333/khl.py
|
1d235541528b070a1206eaf31ccaded9eac52da1
|
[
"MIT"
] | null | null | null |
khl/websocket/net_client_websocket.py
|
hang333/khl.py
|
1d235541528b070a1206eaf31ccaded9eac52da1
|
[
"MIT"
] | null | null | null |
khl/websocket/net_client_websocket.py
|
hang333/khl.py
|
1d235541528b070a1206eaf31ccaded9eac52da1
|
[
"MIT"
] | null | null | null |
import asyncio
from asyncio.events import AbstractEventLoop
import json
import logging
import zlib
from aiohttp import ClientSession, ClientWebSocketResponse
from ..cert import Cert
from ..hardcoded import API_URL
from ..net_client import BaseClient
class WebsocketClient(BaseClient):
"""
implements BaseClient with websocket protocol
"""
__slots__ = 'cert', 'compress', 'event_queue', 'NEWEST_SN', 'RAW_GATEWAY'
logger = logging.getLogger('khl.WebsocketClient')
__loop = asyncio.get_event_loop()
def __init__(self, cert: Cert, compress: bool = True):
super().__init__()
self.cert = cert
self.compress = compress
self.event_queue = asyncio.Queue()
self.NEWEST_SN = 0
self.RAW_GATEWAY = ''
async def heartbeater(self, ws_conn: ClientWebSocketResponse):
while True:
await asyncio.sleep(26)
await ws_conn.send_json({'s': 2, 'sn': self.NEWEST_SN})
def setup_event_loop(self, loop: AbstractEventLoop):
self.__loop = loop
self.event_queue = asyncio.Queue(loop=loop)
return
def __raw_2_req(self, data: bytes) -> dict:
"""
convert raw data to human-readable request data
decompress and decrypt data(if configured with compress or encrypt)
:param data: raw data
:return human-readable request data
"""
data = self.compress and zlib.decompress(data) or data
data = json.loads(str(data, encoding='utf-8'))
return data
async def _main(self):
async with ClientSession() as cs:
headers = {
'Authorization': f"Bot {self.cert.token}",
'Content-type': 'application/json'
}
params = {'compress': self.compress and 1 or 0}
async with cs.get(f"{API_URL}/gateway/index",
headers=headers,
params=params) as res:
res_json = await res.json()
if res_json['code'] != 0:
self.logger.error(f'error getting gateway: {res_json}')
return
self.RAW_GATEWAY = res_json['data']['url']
async with cs.ws_connect(self.RAW_GATEWAY) as ws_conn:
asyncio.ensure_future(self.heartbeater(ws_conn),
loop=self.__loop)
async for msg in ws_conn:
try:
req_json = self.__raw_2_req(msg.data)
except Exception as e:
logging.error(e)
return
if req_json['s'] == 0:
self.NEWEST_SN = req_json['sn']
event = req_json['d']
await self.event_queue.put(event)
async def run(self):
await self._main()
| 33.056818
| 77
| 0.562049
| 325
| 2,909
| 4.836923
| 0.338462
| 0.019084
| 0.026718
| 0.020356
| 0.033079
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005768
| 0.344448
| 2,909
| 87
| 78
| 33.436782
| 0.818563
| 0.075627
| 0
| 0.048387
| 0
| 0
| 0.08035
| 0.008759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.145161
| 0
| 0.322581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79f1612b542eb9d1c94aaf73dc2f5954230da328
| 5,305
|
py
|
Python
|
taattack/utils.py
|
linerxliner/ValCAT
|
e62985c6c64f6415bb2bb4716bd02d9686badd47
|
[
"MIT"
] | null | null | null |
taattack/utils.py
|
linerxliner/ValCAT
|
e62985c6c64f6415bb2bb4716bd02d9686badd47
|
[
"MIT"
] | null | null | null |
taattack/utils.py
|
linerxliner/ValCAT
|
e62985c6c64f6415bb2bb4716bd02d9686badd47
|
[
"MIT"
] | null | null | null |
import flair
import numpy as np
import spacy
import tensorflow_hub as hub
import torch
from flair.data import Sentence
from flair.models import SequenceTagger
from nltk.tokenize.treebank import TreebankWordDetokenizer
from sklearn.metrics.pairwise import cosine_similarity
from string import punctuation
from transformers import AutoTokenizer, GPT2LMHeadModel, MT5ForConditionalGeneration, T5ForConditionalGeneration
from .config import DEVICES
class ModelPool:
ENCODER_DECODER2MODEL_TOKENIZER = {
't5-base': 't5_base',
't5-large': 't5_large',
't5-v1_1-base': 't5_v1_1_base',
'mt5-base': 'mt5_base',
}
def encoder_decoder2model_token(self, encoder_decoder):
return getattr(self, self.ENCODER_DECODER2MODEL_TOKENIZER[encoder_decoder])
@property
def flair_pos_tagger(self):
if not hasattr(self, '_flair_pos_tagger'):
flair.device = torch.device(DEVICES[1])
self._flair_pos_tagger = SequenceTagger.load('upos-fast')
return self._flair_pos_tagger
@property
def gpt2(self):
if not hasattr(self, '_gpt2_model'):
self._gpt2_model = GPT2LMHeadModel.from_pretrained('gpt2')
if not hasattr(self, '_gpt2_tokenizer'):
self._gpt2_tokenizer = AutoTokenizer.from_pretrained('gpt2', use_fast=True)
return self._gpt2_model, self._gpt2_tokenizer
@property
def mt5_base(self):
if not hasattr(self, '_mt5_base_model'):
self._mt5_base_model = MT5ForConditionalGeneration.from_pretrained('google/mt5-base')
if not hasattr(self, '_mt5_base_tokenizer'):
self._mt5_base_tokenizer = AutoTokenizer.from_pretrained('google/mt5-base', use_fast=True)
return self._mt5_base_model, self._mt5_base_tokenizer
@property
def spacy_model(self):
if not hasattr(self, '_spacy_model'):
self._spacy_model = spacy.load('en_core_web_sm')
return self._spacy_model
@property
def t5_base(self):
if not hasattr(self, '_t5_base_model'):
self._t5_base_model = T5ForConditionalGeneration.from_pretrained('t5-base')
if not hasattr(self, '_t5_base_tokenizer'):
self._t5_base_tokenizer = AutoTokenizer.from_pretrained('t5-base', use_fast=True)
return self._t5_base_model, self._t5_base_tokenizer
@property
def t5_large(self):
if not hasattr(self, '_t5_large_model'):
self._t5_large_model = T5ForConditionalGeneration.from_pretrained('t5-large')
if not hasattr(self, '_t5_large_tokenizer'):
self._t5_large_tokenizer = AutoTokenizer.from_pretrained('t5-large', use_fast=True)
return self._t5_large_model, self._t5_large_tokenizer
@property
def t5_v1_1_base(self):
if not hasattr(self, '_t5_v1_1_base_model'):
self._t5_v1_1_base_model = T5ForConditionalGeneration.from_pretrained('google/t5-v1_1-base')
if not hasattr(self, '_t5_v1_1_base_tokenizer'):
self._t5_v1_1_base_tokenizer = AutoTokenizer.from_pretrained('google/t5-v1_1-base', use_fast=True)
return self._t5_v1_1_base_model, self._t5_v1_1_base_tokenizer
@property
def treebank_word_detokenizer(self):
if not hasattr(self, '_treebank_word_detokenizer'):
self._treebank_word_detokenizer = TreebankWordDetokenizer()
return self._treebank_word_detokenizer
@property
def use(self):
if not hasattr(self, '_use'):
self._use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
return self._use
model_pool = ModelPool()
def tokenize(text):
doc = model_pool.spacy_model(text)
tokens = [token.text for token in doc]
return tokens
def detokenize(tokens):
return model_pool.treebank_word_detokenizer.detokenize(tokens)
def is_continuous(sequence):
if len(sequence) == 0:
return False
for i in range(len(sequence) - 1):
if sequence[i] + 1 != sequence[i + 1]:
return False
return True
def is_punctuation(c):
return len(c) == 1 and c in punctuation
def is_one_word(text):
return len(tokenize(text)) == 1
def get_use_sim(text1, text2):
orig_embd, adv_embd = model_pool.use([text1, text2]).numpy()
sim = cosine_similarity(orig_embd[np.newaxis, ...], adv_embd[np.newaxis, ...])[0, 0]
return sim.item()
def get_lcs_len(words1, words2):
num_words1, num_words2 = len(words1), len(words2)
dp = np.zeros((num_words1 + 1, num_words2 + 1), dtype=int)
for i in range(1, num_words1 + 1):
for j in range(1, num_words2 + 1):
if words1[i - 1] == words2[j - 1]:
dp[i, j] = dp[i - 1, j - 1] + 1
else:
dp[i, j] = max(dp[i - 1, j], dp[i, j - 1])
return dp[num_words1, num_words2].item()
def get_num_word_pert(words1, words2):
words1, words2 = list(map(lambda w: w.lower(), words1)), list(map(lambda w: w.lower(), words2))
return max(len(words1), len(words2)) - get_lcs_len(words1, words2)
def get_pos_list(words):
sentence = Sentence(detokenize(words), use_tokenizer=lambda text: words)
model_pool.flair_pos_tagger.predict(sentence)
return [token.annotation_layers['pos'][0]._value for token in sentence.tokens]
| 32.746914
| 112
| 0.684826
| 713
| 5,305
| 4.798036
| 0.175316
| 0.03157
| 0.049108
| 0.065478
| 0.326805
| 0.196141
| 0.111371
| 0.028939
| 0.018123
| 0.018123
| 0
| 0.032896
| 0.209237
| 5,305
| 161
| 113
| 32.950311
| 0.782598
| 0
| 0
| 0.094828
| 0
| 0
| 0.090858
| 0.009237
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163793
| false
| 0
| 0.103448
| 0.034483
| 0.465517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79f1aa731dd46a3731e4a9c3f27085c9eb89008f
| 765
|
py
|
Python
|
solutions/57.py
|
pacokwon/leetcode
|
37c943d371c106d1e6f24e065700e5edd1c3f9f9
|
[
"MIT"
] | 2
|
2022-01-18T08:57:13.000Z
|
2022-01-18T15:49:06.000Z
|
solutions/57.py
|
pacokwon/leetcode
|
37c943d371c106d1e6f24e065700e5edd1c3f9f9
|
[
"MIT"
] | null | null | null |
solutions/57.py
|
pacokwon/leetcode
|
37c943d371c106d1e6f24e065700e5edd1c3f9f9
|
[
"MIT"
] | null | null | null |
# Insert Interval
class Solution:
def insert(self, intervals, newInterval):
ans = []
[nst, nen] = newInterval
for index, [st, en] in enumerate(intervals):
if en < nst:
ans.append(intervals[index])
elif nen < st:
# can return now
ans.append([nst, nen])
return ans + intervals[index:]
else:
nst = min(nst, st)
nen = max(nen, en)
ans.append([nst, nen])
return ans
if __name__ == "__main__":
sol = Solution()
intervals = [[1,3],[6,9]]
newInterval = [2,5]
intervals = [[1,2],[3,5],[6,7],[8,10],[12,16]]
newInterval = [4,18]
print(sol.insert(intervals, newInterval))
| 26.37931
| 52
| 0.491503
| 88
| 765
| 4.181818
| 0.488636
| 0.048913
| 0.065217
| 0.081522
| 0.130435
| 0.130435
| 0
| 0
| 0
| 0
| 0
| 0.045267
| 0.364706
| 765
| 28
| 53
| 27.321429
| 0.711934
| 0.039216
| 0
| 0.090909
| 0
| 0
| 0.010929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0
| 0
| 0.181818
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79f1af18baf4d6560c556e9a2520a03a8a86dced
| 25,595
|
py
|
Python
|
cyvn/trader/app/ctaStrategy/strategy/strategyBollingerBot01.py
|
mumuwoyou/pytrader
|
6b94e0c8ecbc3ef238cf31715acf8474b9d26b4a
|
[
"MIT"
] | 4
|
2019-03-14T05:30:59.000Z
|
2021-11-21T20:05:22.000Z
|
cyvn/trader/app/ctaStrategy/strategy/strategyBollingerBot01.py
|
mumuwoyou/pytrader
|
6b94e0c8ecbc3ef238cf31715acf8474b9d26b4a
|
[
"MIT"
] | null | null | null |
cyvn/trader/app/ctaStrategy/strategy/strategyBollingerBot01.py
|
mumuwoyou/pytrader
|
6b94e0c8ecbc3ef238cf31715acf8474b9d26b4a
|
[
"MIT"
] | 4
|
2019-02-14T14:30:46.000Z
|
2021-01-05T09:46:19.000Z
|
# encoding: UTF-8
""""
基于布林带的交易策略
观察周期:1min
策略周期:5min
策略逻辑:
1. 信号:突破上轨、下轨
2. 过滤:均线多头、空头排列
3. 出场:分级止盈;固定止损
"""
import talib
import numpy as np
from cyvn.trader.vtObject import VtBarData
from cyvn.trader.vtConstant import EMPTY_STRING
from cyvn.trader.app.ctaStrategy.ctaTemplate import CtaTemplate, BarGenerator, ArrayManager
from cyvn.trader.vtConstant import *
########################################################################
class BollingerBotStrategy01(CtaTemplate):
"""基于布林通道的交易策略"""
className = 'BollingerBotStrategy01'
author = 'Y.Raul'
# 策略参数
bollWindow = 28 # 通道窗口数
entryDevUp = 4 # 开仓偏差
entryDevDown = 3.2
# exitDev = 1.2 # 平仓偏差
# trailingPrcnt = 0.4
# 移动止损百分比
maWindow = 10 # 过滤用均线窗口
initDays = 10 # 初始化数据所用的天数
fixedSize = 1 # 每次交易的数量
# 策略变量
bollMid = 0 # 布林带中轨
bollStd = 0 # 布林带宽度
entryUp = 0 # 开仓上轨
# exitUp = 0 # 平仓上轨
entryDown = 0 #开仓下轨
# exitDown = 0 #平仓下轨
dispacedLen = 0 #均线平移长度
maFilter = 0 # 均线过滤
maFilter1 = 0 # 上一期均线
# 分级出场设置
trailingStart1 = 20
trailingStart2 = 30
exitOnTrailingStop1 = 5 # Trailing Stop 距离
exitOnTrailingStop2 = 10 # Trailing Stop 距离
exitOnLossStop = 20 # Loss Stop 距离
# 价格相关变量
intraTradeHigh = 0 # 持仓期内的最高点
intraTradeLow = 0 # 持仓期内的最低点
avgEntryPrice = 0
minDiff = 1
trailingExit = 0 #
stopExit = 0 # 空头止损
# longEntry = 0 # 多头开仓
# shortEntry = 0
# 信号相关变量
buySig = False
shortSig = False
sellSig = False
coverSig = False
# entrusted = False #是否已有委托
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'bollWindow',
'entryDevUp',
'entryDevDown',
'trailingStart1',
'trailingStart2',
'exitOnTrailingStop1',
'exitOnTrailingStop2',
'maWindow',
'initDays',
'fixedSize']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'buySig',
'shortSig',
'sellSig',
'coverSig',
'entryUp',
'entryDown',
'trailingExit',
'stopExit',
'intraTradeHigh',
'intraTradeLow',
'avgEntryPrice']
# 同步列表
syncList = ['pos',
'intraTradeHigh',
'intraTradeLow']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(BollingerBotStrategy01, self).__init__(ctaEngine, setting)
self.bm = BarGenerator(self.onBar, 5, self.onFiveBar)
self.am = ArrayManager(30)
self.orderList = []
self.entryPriceList = []
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog('%s策略初始化' %self.name)
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog('%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog('%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
self.bm.updateTick(tick)
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 观察周期1 Min,根据信号进行交易
# 回测数据传送的bar.datetime,为bar的开始时间
self.bm.updateBar(bar)
# if not self.trading:
# return
self.date = bar.date
self.time = bar.time
# 检查交易信号
if self.buySig:
res = self.buy(bar.close, self.fixedSize, True)
self.orderList.extend([x.split('.')[1] for x in res])
# self.orderList.extend(res.split('.')[1])
# self.entryPriceList.append(self.longEntry)
# self.avgEntryPrice = sum(self.entryPriceList) / len(self.entryPriceList)
# self.LossStopPrice = round(self.avgEntryPrice * (100.0 + self.exitOnLossStop) / 100)
# self.intraTradeHigh = max(bar.high, self.avgEntryPrice)
# self.intraTradeLow = min(bar.low, self.avgEntryPrice)
# log = "-----" * 10 + "\n@onBar\n" + \
# "bar.datetime: {0}; pos: {1} \n".format(bar.datetime, self.pos) + \
# "buySig: {0}; shortSig: {1}\n".format(self.buySig, self.shortSig) + \
# "sellSig: {0}; coverSig: {1}\n".format(self.sellSig, self.coverSig) + \
# "intraTradeHigh: {0}\n".format(self.intraTradeHigh) + \
# "intraTradeLow: {0}\n".format(self.intraTradeLow)
# self.writeCtaLog(log)
# 记录log
# log = "\n Trading: {0}\n".format(self.trading) + \
# "{0} Buy : longEntry: {1};\n".format(bar.datetime, bar.close) + \
# " entryUp:{0}; maFilter:{1}; maFilter1:{2}; \n".format(self.entryUp, self.maFilter, self.maFilter1)
# self.writeCtaLog(log)
self.buySig = False
self.saveSyncData()
# return
if self.shortSig:
self.res = self.short(bar.close, self.fixedSize, True)
self.orderList.extend([x.split('.')[1] for x in self.res])
# self.orderList.extend(res.split('.')[1])
# self.LossStopPrice = round(self.shortEntry * (100.0 + self.exitOnLossStop) / 100)
# self.entryPriceList.append(self.shortEntry)
# self.avgEntryPrice = sum(self.entryPriceList) / len(self.entryPriceList)
# self.LossStopPrice = round(self.avgEntryPrice * (100.0 + self.exitOnLossStop) / 100)
#
# self.intraTradeHigh = max(bar.high, self.avgEntryPrice)
# self.intraTradeLow = min(bar.low, self.avgEntryPrice)
# log = "-----" * 10 + "\n@onBar\n" + \
# "bar.datetime: {0}; pos: {1} \n".format(bar.datetime, self.pos) + \
# "buySig: {0}; shortSig: {1}\n".format(self.buySig, self.shortSig) + \
# "sellSig: {0}; coverSig: {1}\n".format(self.sellSig, self.coverSig) + \
# "intraTradeHigh: {0}\n".format(self.intraTradeHigh) + \
# "intraTradeLow: {0}\n".format(self.intraTradeLow)
# self.writeCtaLog(log)
# # 记录log
# log = "\n Trading: {0}\n".format(self.trading) + \
# "{0} Short : shortEntry: {1};\n".format(bar.datetime, bar.close) + \
# " entryDown:{0}; maFilter:{1}; maFilter1:{2}; \n".format(self.entryDown, self.maFilter, self.maFilter1)
# self.writeCtaLog(log)
self.shortSig = False
self.saveSyncData()
# return
if self.sellSig:
if bar.close > self.stopExit:
price = self.trailingExit
else:
price = bar.close
res = self.sell(price, abs(self.pos), True)
# self.orderList.extend(res)
# log = "-----" * 10 + "\n@onBar\n" + \
# "bar.datetime: {0}; pos: {1} \n".format(bar.datetime, self.pos) + \
# "buySig: {0}; shortSig: {1}\n".format(self.buySig, self.shortSig) + \
# "sellSig: {0}; coverSig: {1}\n".format(self.sellSig, self.coverSig) + \
# "intraTradeHigh: {0}\n".format(self.intraTradeHigh) + \
# "intraTradeLow: {0}\n".format(self.intraTradeLow)
# self.writeCtaLog(log)
# # 记录log
# log = "\n Trading: {0}\n".format(self.trading) + \
# "{0} Sell : {1};\n".format(bar.datetime, bar.close) + \
# " price:{0}; stopExit: {1}\n".format(price,self.stopExit)
# self.writeCtaLog(log)
# self.entryPriceList = []
# self.avgEntryPrice = 0
# self.stopExit = 0
self.sellSig = False
self.saveSyncData()
# return
if self.coverSig:
if bar.close < self.stopExit:
price = self.trailingExit
else:
price = bar.close
res = self.cover(price, abs(self.pos), True)
# self.orderList.extend(res)
# log = "-----" * 10 + "\n@onBar\n" + \
# "bar.datetime: {0}; pos: {1} \n".format(bar.datetime, self.pos) + \
# "buySig: {0}; shortSig: {1}\n".format(self.buySig, self.shortSig) + \
# "sellSig: {0}; coverSig: {1}\n".format(self.sellSig, self.coverSig) + \
# "intraTradeHigh: {0}\n".format(self.intraTradeHigh) + \
# "intraTradeLow: {0}\n".format(self.intraTradeLow)
# self.writeCtaLog(log)
# # 记录log
# log = "\n Trading: {0}\n".format(self.trading) + \
# "{0} Cover : {1};\n".format(bar.datetime, bar.close) + \
# " price:{0}; stopExit: {1}\n".format(price,self.stopExit)
# self.writeCtaLog(log)
# self.entryPriceList = []
# self.avgEntryPrice = 0
# self.stopExit = 0
self.coverSig = False
self.saveSyncData()
# return
self.putEvent()
#----------------------------------------------------------------------
def onFiveBar(self, bar):
"""收到5分钟K线"""
# 策略周期5Min,生成交易信号
# 保存K线数据
self.am.updateBar(bar)
if not self.am.inited:
return
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
self.cancelAll()
# 计算指标数值
self.bollMid = self.am.sma(self.bollWindow,True)[-1 * (self.dispacedLen + 1)]
self.bollStd = self.am.std(self.bollWindow)
self.entryUp = round(self.bollMid + self.bollStd * self.entryDevUp)
self.entryDown = round(self.bollMid - self.bollStd * self.entryDevDown)
maArray = self.am.sma(self.maWindow, True)
self.maFilter = round(maArray[-1])
self.maFilter1 = round(maArray[-2])
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
self.entryPriceList = []
self.orderList =[]
self.avgEntryPrice = 0
if bar.close > self.maFilter and self.maFilter > self.maFilter1:
# 均线多头过滤
if bar.close >= self.entryUp:
# 上轨突破
self.buySig = True
if bar.close < self.maFilter and self.maFilter < self.maFilter1:
# 均线空头过滤
if bar.close <= self.entryDown:
# 下轨突破
self.shortSig = True
# log = "-----" * 10 + "\n@onFiveBar\n" + \
# "bar.datetime: {0}; pos: {1} ; close: {2}\n".format(bar.datetime, self.pos,bar.close) + \
# "buySig: {0}; shortSig: {1}\n".format(self.buySig, self.shortSig) + \
# "intraTradeHigh: {0}\n".format(self.intraTradeHigh) + \
# "intraTradeLow: {0}\n".format(self.intraTradeLow)
# self.writeCtaLog(log)
# 当前有仓位
else:
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = min(self.intraTradeLow, bar.low)
if self.pos > 0:
# self.stopExit = self.avgEntryPrice - self.exitOnLossStop * self.minDiff #固定止损价位
if self.intraTradeHigh >= self.avgEntryPrice + self.trailingStart2 * self.minDiff:
# 二级止赢判断 盈利80跳
if (bar.close <= self.intraTradeHigh - self.exitOnTrailingStop2 * self.minDiff):
# 回撤20跳
self.trailingExit = self.intraTradeHigh - self.exitOnTrailingStop2 * self.minDiff
self.sellSig = True
# if bar.close < self.longExit:
# self.longExit = bar.close
# 记录log
# log = "\n{0} Sell(Trailing Stop2)\n".format(bar.datetime) + \
# 'bar.close: {0}; bar.low: {1}; longExit: {2}'.format(bar.close,bar.low, self.longExit)+ \
# 'intraTradeHigh: {0}; avgEntryPrice: {1}; bar.open: {2}'.format(self.intraTradeHigh,self.avgEntryPrice, bar.open)
# self.writeCtaLog(log)
elif self.intraTradeHigh >= self.avgEntryPrice + self.trailingStart1 * self.minDiff:
# 一级止赢判断,盈利50跳
if (bar.close <= self.intraTradeHigh - self.exitOnTrailingStop1 * self.minDiff):
# 回撤20跳
self.trailingExit = self.intraTradeHigh - self.exitOnTrailingStop1 * self.minDiff
self.sellSig = True
# if bar.close < self.longExit:
# self.longExit = bar.close
# 记录log
# log = "\n{0} Sell(Trailing Stop1)\n".format(bar.datetime) + \
# 'bar.close: {0}; bar.low: {1}; longExit: {2}'.format(bar.close, bar.low,
# self.longExit)+ \
# 'intraTradeHigh: {0}; avgEntryPrice: {1}; bar.open: {2}'.format(self.intraTradeHigh,self.avgEntryPrice, bar.open)
# self.writeCtaLog(log)
elif self.stopExit != 0:
if (bar.close <= self.stopExit):
# 固定止损,回撤20跳
self.sellSig = True
# log = "-----" * 10 + "\n@onFiveBar\n" + \
# "bar.datetime: {0}; pos: {1} ; close:{2}\n".format(bar.datetime, self.pos, bar.close) + \
# "sellSig: {0}; coverSig: {1}\n".format(self.sellSig, self.coverSig) + \
# "intraTradeHigh: {0}\n".format(self.intraTradeHigh) + \
# "intraTradeLow: {0}\n".format(self.intraTradeLow) + \
# "trailingStart1: {0}\n".format(self.avgEntryPrice + self.trailingStart1 * self.minDiff) + \
# "trailingStart2: {0}\n".format(self.avgEntryPrice + self.trailingStart2 * self.minDiff) + \
# "avgEntryPrice: {0}\n".format(self.avgEntryPrice) + \
# "trailingStop: {0}\n".format(self.trailingExit) + \
# "stopExit: {0}\n".format(self.stopExit)
#
# self.writeCtaLog(log)
# if bar.close < self.longExit:
# self.longExit = bar.close
# 记录log
# log = "\n{0} Sell(Loss Stop)\n".format(bar.datetime) + \
# 'bar.close: {0}; bar.low: {1}; longExit: {2}'.format(bar.close, bar.low,
# self.longExit)+ \
# 'intraTradeHigh: {0}; avgEntryPrice: {1}; bar.open: {2}'.format(self.intraTradeHigh,
# self.avgEntryPrice,
# bar.open)
# self.writeCtaLog(log)
elif self.pos < 0:
# self.stopExit = self.avgEntryPrice + self.exitOnLossStop * self.minDiff #固定止损价
if self.intraTradeLow <= self.avgEntryPrice - self.trailingStart2 * self.minDiff:
# 二级止赢判断 盈利80跳
if (bar.close >= self.intraTradeLow + self.exitOnTrailingStop2 * self.minDiff):
# 回撤20跳
self.trailingExit = self.intraTradeLow + self.exitOnTrailingStop2 * self.minDiff
self.coverSig = True
# if bar.close > self.shortExit:
# self.shortExit = bar.close
# 记录log
# log = "\n{0} Cover(Trailing Stop1)\n".format(bar.datetime) + \
# 'bar.close: {0}; bar.low: {1}; shortExit: {2}'.format(bar.close, bar.low,
# self.shortExit)+ \
# 'intraTradeLow: {0}; avgEntryPrice: {1}; bar.open: {2}'.format(self.intraTradeLow,
# self.avgEntryPrice,
# bar.open)
# self.writeCtaLog(log)
elif self.intraTradeLow <= self.avgEntryPrice - self.trailingStart1 * self.minDiff:
# 一级止赢判断,盈利50跳
if (bar.close >= self.intraTradeLow + self.exitOnTrailingStop1 * self.minDiff):
# 回撤20跳
self.trailingExit = self.intraTradeLow + self.exitOnTrailingStop1 * self.minDiff
self.coverSig = True
# if bar.close > self.shortExit:
# self.shortExit = bar.close
# 记录log
# log = "\n{0} Cover(Trailing Stop2)\n".format(bar.datetime) + \
# 'bar.close: {0}; bar.low: {1}; shortExit: {2}'.format(bar.close, bar.low,
# self.shortExit)+ \
# 'intraTradeLow: {0}; avgEntryPrice: {1}; bar.open: {2}'.format(self.intraTradeLow,
# self.avgEntryPrice,
# bar.open)
# self.writeCtaLog(log)
elif self.stopExit != 0:
if (bar.close >= self.stopExit):
# 固定止损,回撤20跳
# self.shortExit = self.avgEntryPrice + self.exitOnLossStop * self.minDiff
self.coverSig = True
# if bar.close > self.shortExit:
# self.shortExit = bar.close
# 记录log
# log = "\n{0} Cover(Loss Stop)\n".format(bar.datetime) + \
# 'bar.close: {0}; bar.low: {1}; shortExit: {2}'.format(bar.close, bar.low,
# self.shortExit)+ \
# 'intraTradeLow: {0}; avgEntryPrice: {1}; bar.open: {2}'.format(self.intraTradeLow,
# self.avgEntryPrice,
# bar.open)
# self.writeCtaLog(log)
# log = "-----" * 10 + "\n@onFiveBar\n" + \
# "bar.datetime: {0}; pos: {1} ; close:{2}\n".format(bar.datetime, self.pos, bar.close) + \
# "sellSig: {0}; coverSig: {1}\n".format(self.sellSig, self.coverSig) + \
# "intraTradeHigh: {0}\n".format(self.intraTradeHigh) + \
# "intraTradeLow: {0}\n".format(self.intraTradeLow) + \
# "trailingStart1: {0}\n".format(self.avgEntryPrice - self.trailingStart1 * self.minDiff)+\
# "trailingStart2: {0}\n".format(self.avgEntryPrice - self.trailingStart2 * self.minDiff)+\
# "avgEntryPrice: {0}\n".format(self.avgEntryPrice)+\
# "trailingStop: {0}\n".format(self.trailingExit)+\
# "stopExit: {0}\n".format(self.stopExit)
#
# self.writeCtaLog(log)
# 发出状态更新事件
self.saveSyncData()
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
# CTA引擎中涉及到的交易方向类型
# CTAORDER_BUY = u'买开'
# CTAORDER_SELL = u'卖平'
# CTAORDER_SHORT = u'卖开'
# CTAORDER_COVER = u'买平'
# log = "-----" * 10 + "\n@onOrder\n" + \
# "orderTime: {0}; pos: {1} \n".format(order.orderTime, order.totalVolume) + \
# "status {0}; vtOrderID: {1}\n".format(order.status, order.vtOrderID)
# self.writeCtaLog(log)
# 对于开仓,记录相关价格
# if order.vtOrderID in self.orderList:
if order.direction == DIRECTION_LONG and order.offset == OFFSET_OPEN:
if order.totalVolume == order.tradedVolume:
# 更新入场价列表,更新平均入场价
self.entryPriceList.append(order.price)
self.avgEntryPrice = sum(self.entryPriceList) / len(self.entryPriceList)
self.stopExit = self.avgEntryPrice - self.exitOnLossStop * self.minDiff # 固定止损价
# self.orderList.remove(order.vtOrderID)
elif order.direction == DIRECTION_SHORT and order.offset == OFFSET_OPEN:
# 更新入场价列表,更新平均入场价
if order.totalVolume == order.tradedVolume:
# 更新入场价列表,更新平均入场价
self.entryPriceList.append(order.price)
self.avgEntryPrice = sum(self.entryPriceList) / len(self.entryPriceList)
self.stopExit = self.avgEntryPrice + self.exitOnLossStop * self.minDiff # 固定止损价
# self.orderList.remove(order.vtOrderID)
self.putEvent()
#----------------------------------------------------------------------
def onTrade(self, trade):
# 发出状态更新事件
data = trade.__dict__
self.putEvent()
#----------------------------------------------------------------------
def onStopOrder(self, so):
"""停止单推送"""
data = so.__dict__
self.putEvent()
if __name__ == "__main__":
from cyvn.trader.app.ctaStrategy.ctaBacktesting import BacktestingEngine, OptimizationSetting, MINUTE_DB_NAME
dbName = MINUTE_DB_NAME
symbol = 'rb88'
# 创建回测引擎对象
engine = BacktestingEngine()
# 设置回测使用的数据
engine.setBacktestingMode(engine.BAR_MODE) # 设置引擎的回测模式为K线
engine.setDatabase(dbName, symbol) # 设置使用的历史数据库
engine.setStartDate('20130101',10) # 设置回测用的数据起始日期
engine.setEndDate('20171231')
# 配置回测引擎参数
engine.setSlippage(0) # 设置滑点为股指1跳
engine.setRate(1.1 / 10000) # 设置手续费万1.1
engine.setSize(10) # 设置股指合约大小
engine.setPriceTick(1) # 设置股指最小价格变动
engine.setCapital(10000) # 设置回测本金
# 从当前目录加载策略类代码
from .strategyBollingerBot01 import BollingerBotStrategy01
# 使用策略类中的默认参数,则参数配置字典留空
d = {}
# 初始化策略
engine.initStrategy(BollingerBotStrategy01, d)
# 运行回测
engine.runBacktesting() # 运行回测
# engine.showBacktestingResult()
# engine.showDailyResult()
d = engine.calculateBacktestingResult()
# 记录Log
import logging
logger = logging.getLogger("backtest")
fh = logging.FileHandler('./{0}_backtest.log'.format(engine.strategy.className))
logger.setLevel(logging.INFO)
logger.addHandler(fh)
for log in engine.logList:
logger.info(log)
# logger2 = logging.getLogger("result")
# fh2 = logging.FileHandler('./{0}_result.log'.format(engine.strategy.className))
# logger2.setLevel(logging.INFO)
# logger2.addHandler(fh2)
result = d['resultList']
entryDate = []
entryPrice = []
exitDate = []
exitPrice = []
volume = []
pnl = []
for trade in result:
dic = trade.__dict__
entryDate.append(dic['entryDt'])
entryPrice.append(dic['entryPrice'])
exitDate.append(dic['exitDt'])
exitPrice.append(dic['exitPrice'])
volume.append(dic['volume'])
pnl.append(dic['pnl'])
# logger2.info("entryDate: {0}; entryPrice: {1}".format(dic['entryDt'], dic['entryPrice']))
# logger2.info("exitDate: {0}; exitPrice: {1}".format(dic['exitDt'], dic['exitPrice']))
# logger2.info("volume:{0}".format(dic['volume']))
# logger2.info("pnl:{0}".format(dic['pnl']))
import pandas as pd
data = {'entryDate': entryDate, 'entryPrice': entryPrice, 'exitDate':exitDate, 'exitPrice':exitPrice, 'volume':volume, 'pnl':pnl}
df = pd.DataFrame(data)
df.to_csv('./{0}_result.csv'.format(engine.strategy.className), index=False)
| 44.513043
| 149
| 0.477789
| 2,200
| 25,595
| 5.537727
| 0.159091
| 0.035623
| 0.037019
| 0.027579
| 0.609456
| 0.576049
| 0.540015
| 0.535582
| 0.491915
| 0.491505
| 0
| 0.022699
| 0.368314
| 25,595
| 575
| 150
| 44.513043
| 0.730826
| 0.450635
| 0
| 0.169492
| 0
| 0
| 0.037827
| 0.001613
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042373
| false
| 0
| 0.042373
| 0
| 0.237288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79f212ed18232d9a20e96553e02496b414e7481c
| 4,577
|
py
|
Python
|
cabu/drivers.py
|
thylong/cabu
|
b883293f35d22443de1ba8129b2efd1c346c7e61
|
[
"BSD-3-Clause"
] | 16
|
2016-02-05T22:49:16.000Z
|
2020-03-20T13:28:05.000Z
|
cabu/drivers.py
|
thylong/cabu
|
b883293f35d22443de1ba8129b2efd1c346c7e61
|
[
"BSD-3-Clause"
] | null | null | null |
cabu/drivers.py
|
thylong/cabu
|
b883293f35d22443de1ba8129b2efd1c346c7e61
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import re
from selenium import webdriver
from xvfbwrapper import Xvfb
from cabu.exceptions import DriverException
from cabu.utils.headers import Headers
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium import webdriver
try:
from urllib.parse import urlsplit
except ImportError: # pragma: no cover
from urlparse import urlsplit # flake8: noqa
def load_vdisplay(config):
"""Initialize a vdisplay (Xvfb subprocess instance).
Args:
config (dict): The configuration loaded previously in Cabu.
Returns:
vdisplay: An instance of Xvfb wrapper.
"""
vdisplay = None
if config['HEADLESS']:
vdisplay = Xvfb(
width=config['DRIVER_WINDOWS_WIDTH'],
height=config['DRIVER_WINDOWS_HEIGHT']
)
vdisplay.start()
return vdisplay
def unload_vdisplay(vdisplay):
"""Shutdown given Xvfb instance.
Args:
vdisplay (XvfbWrapper): The running virtual X server.
"""
vdisplay.stop()
def load_driver(config, vdisplay=None):
"""Initialize a weddriver selected in config with given config.
Args:
config (dict): The configuration loaded previously in Cabu.
Returns:
webdriver (selenium.webdriver): An instance of selenium webdriver or None.
"""
if config['DRIVER_NAME'] == 'Firefox':
driver = load_firefox(config)
elif config['DRIVER_NAME'] == 'Chrome':
driver = load_chrome(config)
elif config['DRIVER_NAME'] == 'PhantomJS':
driver = load_phantomjs(config)
elif not config.get('DRIVER_NAME'):
return None
else:
raise DriverException(vdisplay, 'Driver unrecognized.')
driver.set_page_load_timeout(config['DRIVER_PAGE_TIMEOUT'])
driver.set_window_size(config['DRIVER_WINDOWS_WIDTH'], config['DRIVER_WINDOWS_HEIGHT'])
return driver
def unload_driver(driver):
"""Shutdown given webdriver instance.
Args:
driver (selenium.webdriver): The running webdriver.
"""
driver.quit()
def load_firefox(config):
"""Start Firefox webdriver with the given configuration.
Args:
config (dict): The configuration loaded previously in Cabu.
Returns:
webdriver (selenium.webdriver): An instance of Firefox webdriver.
"""
binary = None
profile = webdriver.FirefoxProfile()
if os.environ.get('HTTPS_PROXY') or os.environ.get('HTTP_PROXY'):
proxy_address = os.environ.get('HTTPS_PROXY', os.environ.get('HTTP_PROXY'))
proxy_port = re.search('\:([0-9]+)$', proxy_address).group(1)
profile.set_preference('network.proxy.type', 1)
profile.set_preference(
'network.proxy.http',
proxy_address
)
profile.set_preference('network.proxy.http_port', proxy_port)
profile.update_preferences()
if 'HEADERS' in config and config['HEADERS']:
profile = Headers(config).set_headers(profile)
if config['DRIVER_BINARY_PATH']:
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
binary = FirefoxBinary(config['DRIVER_BINARY_PATH'])
return webdriver.Firefox(firefox_binary=binary, firefox_profile=profile)
def load_chrome(config):
"""Start Chrome webdriver with the given configuration.
Args:
config (dict): The configuration loaded previously in Cabu.
Returns:
webdriver (selenium.webdriver): An instance of Chrome webdriver.
"""
return webdriver.Chrome()
def load_phantomjs(config):
"""Start PhantomJS webdriver with the given configuration.
Args:
config (dict): The configuration loaded previously in Cabu.
Returns:
webdriver (selenium.webdriver): An instance of phantomJS webdriver.
"""
dcap = dict(DesiredCapabilities.PHANTOMJS)
service_args = [
'--ignore-ssl-errors=true',
'--ssl-protocol=any',
'--web-security=false'
]
if os.environ.get('HTTPS_PROXY') or os.environ.get('HTTP_PROXY'):
proxy_address = os.environ.get('HTTPS_PROXY', os.environ.get('HTTP_PROXY'))
proxy_ip = re.search('http\:\/\/(.*)$', proxy_address).group(1)
service_args.append('--proxy=%s' % proxy_ip)
service_args.append('--proxy-type=http')
if 'HEADERS' in config and config['HEADERS']:
dcap = Headers(config).set_headers(dcap)
return webdriver.PhantomJS(
desired_capabilities=dcap,
service_args=service_args,
service_log_path=os.path.devnull
)
| 27.908537
| 91
| 0.674022
| 524
| 4,577
| 5.755725
| 0.240458
| 0.039788
| 0.03183
| 0.028183
| 0.327255
| 0.310013
| 0.274867
| 0.252984
| 0.252984
| 0.252984
| 0
| 0.001959
| 0.219139
| 4,577
| 163
| 92
| 28.079755
| 0.841914
| 0.274634
| 0
| 0.101266
| 0
| 0
| 0.164231
| 0.028272
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088608
| false
| 0
| 0.151899
| 0
| 0.316456
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
79fcc9de775f5e6d9c7b155ff6baf8f84042ddd0
| 10,468
|
py
|
Python
|
easistrain/EDD/fitEDD.py
|
EASI-STRESS/easistrain
|
86192d1c4135875daec8e4e4abcb67e372f86efb
|
[
"MIT"
] | null | null | null |
easistrain/EDD/fitEDD.py
|
EASI-STRESS/easistrain
|
86192d1c4135875daec8e4e4abcb67e372f86efb
|
[
"MIT"
] | 11
|
2021-11-10T08:36:22.000Z
|
2022-03-21T08:31:17.000Z
|
easistrain/EDD/fitEDD.py
|
EASI-STRESS/easistrain
|
86192d1c4135875daec8e4e4abcb67e372f86efb
|
[
"MIT"
] | null | null | null |
from typing import Sequence
import numpy as np
import h5py
from easistrain.EDD.io import (
create_info_group,
peak_dataset_data,
save_fit_data,
)
from easistrain.EDD.utils import fit_detector_data, run_from_cli
def fitEDD(
fileRead: str,
fileSave: str,
sample: str,
dataset: str,
scanNumber: int,
nameHorizontalDetector: str,
nameVerticalDetector: str,
positioners: Sequence[str],
numberOfBoxes: int,
nbPeaksInBoxes: Sequence[int],
rangeFitHD: Sequence[int],
rangeFitVD: Sequence[int],
):
print(f"Fitting scan n.{scanNumber}")
with h5py.File(fileRead, "r") as h5Read: ## Read the h5 file of raw data
scan_meas = h5Read.get(
f"{sample}_{dataset}_{scanNumber}.1/measurement",
default=None,
)
if (
not isinstance(scan_meas, h5py.Group)
or nameHorizontalDetector not in scan_meas
or nameVerticalDetector not in scan_meas
):
print("No pattern was saved in this scan")
return
h5Save = h5py.File(fileSave, "a") ## create/append h5 file to save in
scanGroup = h5Save.create_group(
f"{sample}_{dataset}_{scanNumber}.1"
) ## create the group of the scan wich will contatin all the results of a scan
positionersGroup = scanGroup.create_group(
"positioners"
) ## positioners subgroup in scan group
patternHorizontalDetector = h5Read[
f"{sample}_{dataset}_{scanNumber}.1/measurement/{nameHorizontalDetector}"
][
()
] ## pattern of horizontal detector
patternVerticalDetector = h5Read[
f"{sample}_{dataset}_{scanNumber}.1/measurement/{nameVerticalDetector}"
][
()
] ## pattern of vertical detector
twoD_detector_data = (
np.ndim(patternHorizontalDetector) == 2
or np.ndim(patternVerticalDetector) == 2
)
nDetectorPoints = len(patternHorizontalDetector) if twoD_detector_data else 1
positionAngles = np.zeros((nDetectorPoints, 6), "float64")
for i, positioner in enumerate(positioners):
pos_data = h5Read[
f"{sample}_{dataset}_{scanNumber}.1/instrument/positioners/{positioner}"
][()]
positionersGroup.create_dataset(
positioner,
dtype="float64",
data=pos_data,
) ## saving all the requested positioners
if i < 6:
positionAngles[:, i] = pos_data
else:
print("Too many positioners given ! Only 6 are handled for now.")
rawDataLevel1_1 = scanGroup.create_group(
"rawData" + "_" + str(dataset) + "_" + str(scanNumber)
) ## rawData subgroup in scan group
fitGroup = scanGroup.create_group("fit") ## fit subgroup in scan group
tthPositionsGroup = scanGroup.create_group(
"tthPositionsGroup"
) ## two theta positions subgroup in scan group
rawDataLevel1_1.create_dataset(
"horizontalDetector", dtype="float64", data=patternHorizontalDetector
) ## save raw data of the horizontal detector
rawDataLevel1_1.create_dataset(
"verticalDetector", dtype="float64", data=patternVerticalDetector
) ## save raw data of the vertical detector
for k in range(nDetectorPoints):
fitParams = {"horizontal": np.array(()), "vertical": np.array(())}
uncertaintyFitParams = {
"horizontal": np.array(()),
"vertical": np.array(()),
}
pointInScan = fitGroup.create_group(
f"{str(k).zfill(4)}"
) ## create a group of each pattern (point of the scan)
fitParamsGroup = pointInScan.create_group(
"fitParams"
) ## fit results group for the two detector
for i, nb_peaks in enumerate(nbPeaksInBoxes):
fitLine = pointInScan.create_group(
f"fitLine_{str(i).zfill(4)}"
) ## create group for each range of peak(s)
for detector in ["horizontal", "vertical"]:
fit_min, fit_max = (
(rangeFitHD[2 * i], rangeFitHD[2 * i + 1])
if detector == "horizontal"
else (rangeFitVD[2 * i], rangeFitVD[2 * i + 1])
) # To be improved
pattern = (
patternHorizontalDetector
if detector == "horizontal"
else patternVerticalDetector
) # To be improved
channels = np.arange(fit_min, fit_max)
raw_data = pattern[k, fit_min:fit_max]
assert isinstance(raw_data, np.ndarray)
# print(np.shape(pattern),pattern)
(
background,
fitted_data,
boxFitParams,
uncertaintyBoxFitParams,
) = fit_detector_data(
channels=channels,
raw_data=raw_data,
nb_peaks=nb_peaks,
boxCounter=i,
scanNumber=scanNumber,
detectorName=detector,
)
save_fit_data(
fitLine, detector, channels, raw_data, background, fitted_data
)
# Accumulate fit parameters of this box
fitParams[detector] = np.append(fitParams[detector], boxFitParams)
uncertaintyFitParams[detector] = np.append(
uncertaintyFitParams[detector], uncertaintyBoxFitParams
)
# End of fitting procedure
savedFitParamsHD = np.reshape(
fitParams["horizontal"], (int(np.size(fitParams["horizontal"]) / 6), 6)
)
fitParamsGroup.create_dataset(
"fitParamsHD",
dtype="float64",
data=savedFitParamsHD,
) ## save parameters of the fit of HD
savedUncertaintyFitParamsHD = np.reshape(
uncertaintyFitParams["horizontal"],
(int(np.size(uncertaintyFitParams["horizontal"]) / 5), 5),
)
fitParamsGroup.create_dataset(
"uncertaintyFitParamsHD",
dtype="float64",
data=savedUncertaintyFitParamsHD,
) ## save uncertainty on the parameters of the fit of HD
savedFitParamsVD = np.reshape(
fitParams["vertical"], (int(np.size(fitParams["vertical"]) / 6), 6)
)
fitParamsGroup.create_dataset(
"fitParamsVD",
dtype="float64",
data=savedFitParamsVD,
) ## save parameters of the fit of VD
savedUncertaintyFitParamsVD = np.reshape(
uncertaintyFitParams["vertical"],
(int(np.size(uncertaintyFitParams["vertical"]) / 5), 5),
)
fitParamsGroup.create_dataset(
"uncertaintyFitParamsVD",
dtype="float64",
data=savedUncertaintyFitParamsVD,
) ## save uncertainty on the parameters of the fit of VD
for peakNumber in range(np.sum(nbPeaksInBoxes)):
if f"peak_{str(peakNumber).zfill(4)}" not in tthPositionsGroup.keys():
peakDataset = tthPositionsGroup.create_dataset(
f"peak_{str(peakNumber).zfill(4)}",
dtype="float64",
data=np.zeros((2 * nDetectorPoints, 13), "float64"),
) ## create a dataset for each peak in tthPositionGroup
uncertaintyPeakDataset = tthPositionsGroup.create_dataset(
f"uncertaintyPeak_{str(peakNumber).zfill(4)}",
dtype="float64",
data=np.zeros((2 * nDetectorPoints, 13), "float64"),
) ## create a dataset for uncertainty for each peak in tthPositionGroup
else:
peakDataset = tthPositionsGroup[f"peak_{str(peakNumber).zfill(4)}"]
assert isinstance(peakDataset, h5py.Dataset)
uncertaintyPeakDataset = tthPositionsGroup[
f"uncertaintyPeak_{str(peakNumber).zfill(4)}"
]
assert isinstance(uncertaintyPeakDataset, h5py.Dataset)
peakDataset[2 * k] = peak_dataset_data(
positionAngles, savedFitParamsHD[peakNumber], -90, k
)
peakDataset[2 * k + 1] = peak_dataset_data(
positionAngles, savedFitParamsVD[peakNumber], 0, k
)
uncertaintyPeakDataset[2 * k] = peak_dataset_data(
positionAngles, savedUncertaintyFitParamsHD[peakNumber], -90, k
)
uncertaintyPeakDataset[2 * k + 1] = peak_dataset_data(
positionAngles, savedUncertaintyFitParamsVD[peakNumber], 0, k
)
if "infoPeak" not in tthPositionsGroup.keys():
tthPositionsGroup.create_dataset(
"infoPeak",
dtype=h5py.string_dtype(encoding="utf-8"),
data=f"{positioners}, delta, theta, position in channel, Intenstity, FWHM, shape factor, goodness factor",
) ## create info about dataset saved for each peak in tthPositionGroup
create_info_group(
scanGroup,
fileRead,
fileSave,
sample,
dataset,
scanNumber,
nameHorizontalDetector,
nameVerticalDetector,
numberOfBoxes,
nbPeaksInBoxes,
rangeFitHD,
rangeFitVD,
positioners,
)
h5Save.close()
return
def fitEDD_with_scan_number_parse(**config):
"""Wrapper function to allow scanNumber to be a list or a slice."""
n_scan_arg = config.pop("scanNumber")
if isinstance(n_scan_arg, int):
fitEDD(**config, scanNumber=n_scan_arg)
elif isinstance(n_scan_arg, list):
for i in n_scan_arg:
fitEDD_with_scan_number_parse(**config, scanNumber=i)
elif isinstance(n_scan_arg, str):
if ":" in n_scan_arg:
min_scan, max_scan = n_scan_arg.split(":")
for i in range(int(min_scan), int(max_scan)):
fitEDD(**config, scanNumber=i)
else:
fitEDD(**config, scanNumber=int(n_scan_arg))
else:
raise ValueError(f"Unrecognized value for scanNumber: {n_scan_arg}")
if __name__ == "__main__":
run_from_cli(fitEDD_with_scan_number_parse)
| 39.353383
| 122
| 0.579862
| 984
| 10,468
| 6.029472
| 0.215447
| 0.021911
| 0.013484
| 0.020226
| 0.197034
| 0.142087
| 0.066745
| 0.0418
| 0.0418
| 0.028316
| 0
| 0.013928
| 0.327856
| 10,468
| 265
| 123
| 39.501887
| 0.829306
| 0.106993
| 0
| 0.120332
| 0
| 0
| 0.127609
| 0.057134
| 0
| 0
| 0
| 0
| 0.012448
| 1
| 0.008299
| false
| 0
| 0.020747
| 0
| 0.037344
| 0.012448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03004212ee99b61faeb0548d5a85b4b203430ddc
| 2,784
|
py
|
Python
|
support_functions.py
|
yellingviv/dungeons_on_demand
|
ced5d6b1b0c12ad8e22f7fac1cfaeecc82a821bb
|
[
"OML"
] | null | null | null |
support_functions.py
|
yellingviv/dungeons_on_demand
|
ced5d6b1b0c12ad8e22f7fac1cfaeecc82a821bb
|
[
"OML"
] | null | null | null |
support_functions.py
|
yellingviv/dungeons_on_demand
|
ced5d6b1b0c12ad8e22f7fac1cfaeecc82a821bb
|
[
"OML"
] | 1
|
2020-04-18T16:47:57.000Z
|
2020-04-18T16:47:57.000Z
|
from dungeon_model import Monsters, Players
import re
import math
def initiative_sort(init_order):
"""sorts all the characters for a given combat by initiative"""
print("passed into sort function: ", init_order)
for i in range(len(init_order)):
check = init_order[i]
print("the check is: ", check, " and i is: ", i)
index = i
while index > 0 and init_order[index - 1][0] < check[0]:
init_order[index] = init_order[index - 1]
index = index - 1
init_order[index] = check
print("we will return init order as: ", init_order)
return init_order
def instantiate_player(player_info, game_id):
"""receives info about player and adds to the DB"""
game_id = game_id
character = player_info
name = character['name']
char_name = name.title()
char_init = character['init']
new_character = Players(name=char_name,
game_id=game_id,
initiative_mod=char_init,
type='pla')
print("we just created: ", new_character)
return new_character
def instantiate_monster(monst_info):
"""receives dictionary of monster info and adds to DB"""
# room_id = 10
species = monst_info['type']
size = monst_info['size']
ac = monst_info['ac']
total_hp = monst_info['hp']
hit_dice_num = monst_info['dice_num']
hit_dice_type = monst_info['dice_type']
bonus = monst_info['bonus']
speed = monst_info['speed']
burrow = monst_info['burrow']
swim = monst_info['swim']
fly = monst_info['fly']
hover = monst_info['hover']
str = monst_info['str']
dex = monst_info['dex']
con = monst_info['con']
wis = monst_info['wis']
cha = monst_info['cha']
int = monst_info['int']
initiative = (monst_info['dex'] - 10) / 2
initiative_mod = math.trunc(initiative)
# game_id = monst_info['game_id']
monster = Monsters(# room_id=room_id,
species=species,
size=size,
total_hp=total_hp,
ac=ac,
hit_dice_num=hit_dice_num,
hit_dice_type=hit_dice_type,
bonus=bonus,
initiative_mod=initiative_mod,
speed=speed,
burrow=burrow,
swim=swim,
fly=fly,
hover=hover,
str=str,
dex=dex,
con=con,
wis=wis,
cha=cha,
int=int,
type='mon')
# game_id=game_id)
return monster
| 31.636364
| 67
| 0.530532
| 327
| 2,784
| 4.281346
| 0.275229
| 0.135
| 0.04
| 0.025714
| 0.035
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006271
| 0.369971
| 2,784
| 87
| 68
| 32
| 0.791904
| 0.084052
| 0
| 0
| 0
| 0
| 0.075464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042857
| false
| 0.014286
| 0.042857
| 0
| 0.128571
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03007634c85b96a08566af89f356d770fc155ed9
| 2,306
|
py
|
Python
|
experiments/architectures/PAE_network.py
|
Butters-cloud/denoising-normalizing-flow
|
12d56a0d069e10a744acabf5e78fdbfba8df54ee
|
[
"MIT"
] | 12
|
2021-11-18T15:01:17.000Z
|
2022-02-22T16:17:42.000Z
|
experiments/architectures/PAE_network.py
|
Butters-cloud/denoising-normalizing-flow
|
12d56a0d069e10a744acabf5e78fdbfba8df54ee
|
[
"MIT"
] | 2
|
2022-01-22T00:41:13.000Z
|
2022-02-01T15:41:42.000Z
|
experiments/architectures/PAE_network.py
|
Butters-cloud/denoising-normalizing-flow
|
12d56a0d069e10a744acabf5e78fdbfba8df54ee
|
[
"MIT"
] | 1
|
2022-01-26T22:44:07.000Z
|
2022-01-26T22:44:07.000Z
|
def infoGAN_encoder(params,is_training):
is_training = tf.constant(is_training, dtype=tf.bool)
def encoder(x):
with tf.variable_scope('model/encoder',['x'], reuse=tf.AUTO_REUSE):
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name='conv1', use_sn=True))
net = conv2d(net, 128, 4, 4, 2, 2, name='conv2', use_sn=True)
net = batch_norm(net, is_training=is_training, scope='b_norm1')
net = tf.layers.dropout(net,rate=params['dropout_rate'],training=is_training)
net = lrelu(net)
net = tf.reshape(net, [params['batch_size'], -1])
net = linear(net, 1024, scope="ln1", use_sn=True)
net = batch_norm(net, is_training=is_training, scope='b_norm2')
net = tf.layers.dropout(net,rate=params['dropout_rate'],training=is_training)
net = lrelu(net)
net = linear(net, 2 * params['latent_size'], scope="ln_output", use_sn=True)
return net
return encoder
def infoGAN_decoder(params,is_training):
is_training = tf.constant(is_training, dtype=tf.bool)
def decoder(z):
with tf.variable_scope('model/decoder',['z'], reuse=tf.AUTO_REUSE):
net = tf.nn.relu(batch_norm(linear(z, 1024, 'ln2'), is_training=is_training, scope='b_norm3'))
net = tf.nn.relu(batch_norm(linear(net, 128 * (params['width'] // 4) * (params['height'] // 4), scope='ln3'), is_training=is_training, scope='b_norm4'))
net = tf.layers.dropout(net,rate=params['dropout_rate'],training=is_training)
net = tf.reshape(net, [params['batch_size'], params['width'] // 4, params['height'] // 4, 128])
net = tf.nn.relu(batch_norm(deconv2d(net, [params['batch_size'], params['width'] // 2, params['height'] // 2, 64], 4, 4, 2, 2, name='conv3'), is_training=is_training, scope='b_norm5'))
net = tf.layers.dropout(net,rate=params['dropout_rate'],training=is_training)
net = tf.nn.sigmoid(deconv2d(net, [params['batch_size'], params['width'], params['height'], params['n_channels']], 4, 4, 2, 2, name='conv4'))
net = net-0.5
return net
return decoder# -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 12:57:10 2021
@author: horvat
"""
| 42.703704
| 196
| 0.602342
| 325
| 2,306
| 4.116923
| 0.249231
| 0.149477
| 0.147982
| 0.104634
| 0.704783
| 0.630792
| 0.505232
| 0.372197
| 0.372197
| 0.372197
| 0
| 0.042182
| 0.228968
| 2,306
| 53
| 197
| 43.509434
| 0.710349
| 0.009107
| 0
| 0.322581
| 0
| 0
| 0.114363
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0301c782c51b1c6595901ee0b2e38930f8a7ecd2
| 2,344
|
py
|
Python
|
agents/a2c.py
|
TomMakkink/transformers-for-rl
|
9d025f92611e957004030af9ef05a07e320856a7
|
[
"MIT"
] | 1
|
2022-03-09T20:44:27.000Z
|
2022-03-09T20:44:27.000Z
|
agents/a2c.py
|
TomMakkink/transformers-for-rl
|
9d025f92611e957004030af9ef05a07e320856a7
|
[
"MIT"
] | null | null | null |
agents/a2c.py
|
TomMakkink/transformers-for-rl
|
9d025f92611e957004030af9ef05a07e320856a7
|
[
"MIT"
] | null | null | null |
from agents.agent import Agent
from models.actor_critic_mlp import ActorCriticMLP
import numpy as np
import torch
import torch.optim as optim
from utils import plot_grad_flow
class A2C(Agent):
def __init__(
self,
state_size,
action_size,
hidden_size,
memory,
lr,
gamma,
device,
use_norm,
**kwargs
):
super(A2C, self).__init__(state_size, action_size, hidden_size, memory)
self.device = device
self.net = ActorCriticMLP(state_size, action_size, hidden_size, memory).to(
self.device
)
self.optimiser = optim.Adam(self.net.parameters(), lr=lr)
self.gamma = gamma
self.log_probs = []
self.values = []
self.rewards = []
self.use_norm = use_norm
def _compute_returns(self):
R = 0
returns = []
for step in reversed(range(len(self.rewards))):
R = self.rewards[step] + self.gamma * R
returns.insert(0, R)
returns = np.array(returns)
if self.use_norm:
returns -= returns.mean()
if returns.std() > 0.0:
returns /= returns.std()
return returns
def optimize_network(self):
returns = self._compute_returns()
returns = torch.from_numpy(returns).float().to(self.device)
values = torch.cat(self.values).squeeze(1)
log_probs = torch.cat(self.log_probs)
delta = returns - values
policy_loss = -torch.sum(log_probs * delta.detach())
value_function_loss = 0.5 * torch.sum(delta ** 2)
loss = policy_loss + value_function_loss
self.optimiser.zero_grad()
loss.backward()
# plot_grad_flow(self.net.named_parameters())
self.optimiser.step()
return loss.detach().item()
def reset(self):
self.values = []
self.log_probs = []
self.rewards = []
self.net.reset()
def act(self, state):
dist, value = self.net(state)
action = dist.sample()
log_prob = dist.log_prob(action)
self.log_probs.append(log_prob)
self.values.append(value)
return action.detach().item()
def collect_experience(self, state, action, reward, next_state, done):
self.rewards.append(reward)
| 27.904762
| 83
| 0.58959
| 282
| 2,344
| 4.716312
| 0.319149
| 0.03609
| 0.03609
| 0.042857
| 0.078947
| 0.078947
| 0.078947
| 0
| 0
| 0
| 0
| 0.006109
| 0.301621
| 2,344
| 83
| 84
| 28.240964
| 0.806353
| 0.018345
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.086957
| 0
| 0.231884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03026b2e47c7dd4c6d92ff2379d10e3b1de12161
| 329
|
py
|
Python
|
gan_train_parameters.py
|
Amitdedhia6/DrugDiscovery
|
c70dec96cee4d0d643a8b9de30530b6871fdf05e
|
[
"Apache-2.0"
] | null | null | null |
gan_train_parameters.py
|
Amitdedhia6/DrugDiscovery
|
c70dec96cee4d0d643a8b9de30530b6871fdf05e
|
[
"Apache-2.0"
] | null | null | null |
gan_train_parameters.py
|
Amitdedhia6/DrugDiscovery
|
c70dec96cee4d0d643a8b9de30530b6871fdf05e
|
[
"Apache-2.0"
] | null | null | null |
from common import google_cloud
class GANTrainParameters():
def __init__(self):
self.num_epochs = 2000
self.batch_size = 10000
self.num_steps = 1
self.lr_d = 0.01
self.lr_g = 0.001
if not google_cloud:
self.batch_size = 1
training_param = GANTrainParameters()
| 19.352941
| 37
| 0.620061
| 43
| 329
| 4.44186
| 0.651163
| 0.115183
| 0.136126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 0.306991
| 329
| 16
| 38
| 20.5625
| 0.758772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0306c30fc6b959f99f270569a581136d38981cb5
| 8,672
|
py
|
Python
|
tools/medico_classify.py
|
1512159/tf-faster-rcnn-medico
|
94c5cff76ef7bd271de050a8de53bd0145c6c8ec
|
[
"MIT"
] | null | null | null |
tools/medico_classify.py
|
1512159/tf-faster-rcnn-medico
|
94c5cff76ef7bd271de050a8de53bd0145c6c8ec
|
[
"MIT"
] | null | null | null |
tools/medico_classify.py
|
1512159/tf-faster-rcnn-medico
|
94c5cff76ef7bd271de050a8de53bd0145c6c8ec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen, based on code from Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from model.config import cfg
from model.test import im_detect
from model.nms_wrapper import nms
from utils.timer import Timer
import tensorflow as tf
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import os, cv2
import argparse
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
import glob
import os
import datetime
import pickle
import itertools
CLASSES = ('__background__', # always index 0
'normal',
'polyp','dyed-lifted-polyp','dyed-resection-margin')
NETS = {'vgg16': ('vgg16_faster_rcnn_iter_70000.ckpt',),'res101': ('res101_faster_rcnn_iter_5000.ckpt',)}
DATASETS= {'pascal_voc': ('voc_2007_trainval',),'pascal_voc_0712': ('voc_2007_trainval+voc_2012_trainval',),'medico_2018':('medico_2018_trainval',)}
def plot_confusion_matrix(cm, classes,
normalize=True,
title='Confusion matrix',
cmap=plt.cm.Reds):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else '.2f'
thresh = cm.max() / 2.
n_classes = len(classes)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\n')
def class_max_conf(dets, thresh=0.5):
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return 0.0
tmp = np.argmax(dets[:,-1])
return dets[tmp,-1]
def demo(log_out,sess, net, image_name, gt, cfs_mat, INP_DIR, CONF_THRESH):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the input image
im = cv2.imread(image_name)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(sess, net, im)
timer.toc()
# Visualize detections for each class
NMS_THRESH = 0.3
res_cls = CLASSES[1]
res_conf = 0.0
for cls_ind, cls in enumerate(CLASSES[2:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
tmp = class_max_conf(dets,CONF_THRESH)
if (tmp>res_conf):
res_conf = tmp
res_cls = cls
cfs_mat[gt][res_cls] += 1
correct = (gt == res_cls)
img_id = image_name.replace(INP_DIR,'')
log_out.write(img_id+','+str(correct)+','+gt+','+res_cls+','+'{:3f},{:3f}'.format(res_conf,timer.total_time)+'\n')
return correct
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Tensorflow Faster R-CNN demo')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16 res101]',
choices=NETS.keys(), default='res101')
parser.add_argument('--dataset', dest='dataset', help='Trained dataset [pascal_voc pascal_voc_0712]',
choices=DATASETS.keys(), default='medico_2018')
parser.add_argument('--inpdir', dest='inpdir')
parser.add_argument('--testlist', dest='testlist')
parser.add_argument('--conf', dest='conf', default='0.9')
parser.add_argument('--outdir', dest='outdir', default = 'result')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
#CREATE TIME STAMP ID
time_stamp = str(datetime.datetime.now())
#INPUT AND OUTPUT DIRECTORY
args = parse_args()
INPUT_DIR = args.inpdir
OUTPUT_DIR = os.path.join('cls_result',args.outdir+'_'+time_stamp+'/')
OUTPUT_LOG = OUTPUT_DIR + 'log_'+time_stamp+'.csv'
TEST_LIST = args.testlist
#SAVE LOG FILE
print('Save log to = '+ OUTPUT_LOG)
if not os.path.exists(os.path.dirname(OUTPUT_LOG)):
os.makedirs(os.path.dirname(OUTPUT_LOG))
flog = open(OUTPUT_LOG,"w")
flog.write('id,correct,gt_cls,predict_cls,conf,time\n')
#CONFIDENT THRESH
CONF_THRESH = float(args.conf)
demonet = args.demo_net
dataset = args.dataset
tfmodel = os.path.join('output', demonet, DATASETS[dataset][0], 'default',
NETS[demonet][0])
if not os.path.isfile(tfmodel + '.meta'):
raise IOError(('{:s} not found.\nDid you download the proper networks from '
'our server and place them properly?').format(tfmodel + '.meta'))
# set config
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
# init session
sess = tf.Session(config=tfconfig)
# load network
if demonet == 'vgg16':
net = vgg16()
elif demonet == 'res101':
net = resnetv1(num_layers=101)
else:
raise NotImplementedError
net.create_architecture("TEST", 5,
tag='default', anchor_scales=[4, 8, 16, 32])
saver = tf.train.Saver()
saver.restore(sess, tfmodel)
print('Loaded network {:s}'.format(tfmodel))
fi = open(TEST_LIST)
lines = fi.readlines()
print('Total input imgs = '+str(len(lines)))
num_of_test = len(lines)
cfs_mat = {}
for i_class in CLASSES[1:]:
cfs_mat[i_class] = {}
for j_class in CLASSES[1:]:
cfs_mat[i_class][j_class] = 0
for i,line in enumerate(lines):
# print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# print('Demo for data/demo/{}'.format(im_name))
im_name, gt = line.strip().split(' ')
im_name = os.path.join(INPUT_DIR,im_name)
if (i%10 == 0):
print(str(i) + '/' + str(num_of_test))
if (i%100 == 0 and i>0) or (i == len(im_name)-1):
c = '{:25s}'.format('')
for i_class in CLASSES[1:]:
c+= '{:25s}'.format(i_class)
print(c)
for i_class in CLASSES[1:]:
c = '{:25s}'.format(i_class)
for j_class in CLASSES[1:]:
c+= '{:25s}'.format(str(cfs_mat[i_class][j_class]))
print(c+'\n')
print('-------------------')
crr = demo(flog, sess, net, im_name, gt, cfs_mat, INPUT_DIR, CONF_THRESH)
flog.close()
#SAVE cvs_mat
fo = open(OUTPUT_DIR+'confusion_matrix.pickle',"wb")
pickle.dump((CLASSES,cfs_mat),fo)
fo.close()
#PRINT result
fo = open(OUTPUT_DIR+'confusion_matrix.txt',"w")
print('--------FINAL RESULT-----------')
print('Total = ' + str(num_of_test))
print('Confusion matrix: ')
c = '{:25s}'.format('')
for i_class in CLASSES[1:]:
c+= '{:25s}'.format(i_class)
print(c)
fo.write(c + '\n')
for i_class in CLASSES[1:]:
c = '{:25s}'.format(i_class)
for j_class in CLASSES[1:]:
c+= '{:25s}'.format(str(cfs_mat[i_class][j_class]))
print(c+'\n')
fo.write(c + '\n')
fo.close()
#SAVE RES IMG
n_cls = len(CLASSES[1:])
cm = np.zeros((n_cls,n_cls))
for i,i_class in enumerate(CLASSES[1:]):
for j,j_class in enumerate(CLASSES[1:]):
cm[i][j] = int(cfs_mat[i_class][j_class])
plt.figure()
plot_confusion_matrix(cm,CLASSES[1:], title = 'Confusion matrix normalized')
plt.tight_layout()
plt.savefig(OUTPUT_DIR+'confusion_matrix_normalized.png', dpi = 600)
print('Confusion matrix normalize saved!')
plt.figure()
plot_confusion_matrix(cm,CLASSES[1:],normalize=False)
plt.tight_layout()
plt.savefig(OUTPUT_DIR+'confusion_matrix.png', dpi = 600)
print('Confusion matrix saved!')
| 32.479401
| 148
| 0.602975
| 1,163
| 8,672
| 4.320722
| 0.291488
| 0.01791
| 0.022289
| 0.023881
| 0.155622
| 0.131741
| 0.102687
| 0.102687
| 0.07602
| 0.056915
| 0
| 0.025181
| 0.23524
| 8,672
| 266
| 149
| 32.601504
| 0.732509
| 0.10286
| 0
| 0.153439
| 0
| 0
| 0.146291
| 0.028043
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021164
| false
| 0
| 0.111111
| 0
| 0.153439
| 0.079365
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
030752d970037ce742be150222524d388e311557
| 6,705
|
py
|
Python
|
speech.py
|
jsarchibald/room-designer
|
d90f39f6b7a98d66f2f4c09529aaa46aea68611b
|
[
"MIT"
] | null | null | null |
speech.py
|
jsarchibald/room-designer
|
d90f39f6b7a98d66f2f4c09529aaa46aea68611b
|
[
"MIT"
] | null | null | null |
speech.py
|
jsarchibald/room-designer
|
d90f39f6b7a98d66f2f4c09529aaa46aea68611b
|
[
"MIT"
] | null | null | null |
import pygame
import speech_recognition as sr
from time import sleep
import events
import objects as obj_types
from settings import SPEECH_CRED_FILE
from speech_helpers import correct_text, either_side, get_after, get_position, get_positions, get_size, is_in_objects, process_relative, select_obj_type
# A variable listing currently-supported commands
COMMANDS = {"create", "save", "add", "insert", "delete", "remove", "goodbye", "exit", "quit", "new", "open", "move", "relocate", "here", "there", "rename", "export", "right", "left", "up", "down", "resize"}
# Some functions to abstract out the event creation process.
def create(text):
"""Create an object in the room."""
# Parameters
location = get_position(text)
size = get_size(text)
if "called" in text:
called = " ".join(get_after("called", text))
else:
called = None
# Object types
obj = is_in_objects(text)
if obj is not None:
obj_type = obj_types.obj_types[obj]
pygame.event.post(
pygame.event.Event(events.design_type,
method="create",
shape=obj_type["shape"],
location=location,
color=obj_type["color"],
size=size,
outline=obj_type["outline"],
obj_type=obj,
text=called,
text_color=obj_type["text_color"]))
def delete(text):
"""Delete an object in the room."""
location = get_position(text)
obj_type = select_obj_type(text)
# Post event
evt = pygame.event.Event(events.design_type, method="delete", location=location, obj_type=obj_type)
pygame.event.post(evt)
def move(text):
"""Move an object in the room."""
# Parameters
locations = get_positions(text, 2)
location = locations[0]
# Check for relative positioning, then move on to explicit positioning
to_location = process_relative(text)
if to_location is None:
to_location = locations[1]
obj_type = select_obj_type(text)
# Post event
evt = pygame.event.Event(events.design_type,
method="move",
location=location,
to_location=to_location,
obj_type=obj_type)
pygame.event.post(evt)
def rename(text):
"""Rename an object in the scene."""
# Parameters
location = get_position(text)
if "to" in text:
called = " ".join(get_after("to", text))
elif "as" in text:
called = " ".join(get_after("as", text))
elif "2" in text:
called = " ".join(get_after("2", text))
else:
called = None
obj_type = select_obj_type(text)
# Post event
evt = pygame.event.Event(events.design_type, method="rename", location=location, obj_type=obj_type, text=called)
pygame.event.post(evt)
def resize(text):
"""Resize an object in the scene."""
# Parameters
location = get_position(text)
size = get_size(text)
obj_type = select_obj_type(text)
# Post event
evt = pygame.event.Event(events.design_type, method="resize", location=location, obj_type=obj_type, size=size)
pygame.event.post(evt)
# Process individual voice commands.
def process_command(text, roomGrid):
"""Process voice commands. Returns False if program should quit."""
text = correct_text(text)
# Program controls
if "quit" in text or "exit" in text or "close" in text or "goodbye" in text:
pygame.event.post(pygame.event.Event(pygame.QUIT))
return False
elif "open" in text:
pygame.event.post(events.file_open)
elif "new" in text and ("design" in text or "room" in text or "file" in text or "project" in text):
pygame.event.post(events.file_new)
elif "save" in text:
pygame.event.post(pygame.event.Event(events.file_type, method="save", change_name=("as" in text)))
elif "export" in text:
pygame.event.post(events.file_export)
# If finishing up a previous command
elif ("here" in text or "there" in text or "cheer" in text) and len(roomGrid.waitFunction) > 0:
location = get_position(text)
pygame.event.post(pygame.event.Event(events.ui_type, method="finish_waiting", location=location))
# Creating things
elif "add" in text or "create" in text:
create(text)
# Moving things
# fruit is a keyword because Google thinks "fruit" and "cocktail" go together real nice...
elif "move" in text or "relocate" in text or "fruit" in text:
move(text)
# Renaming things
elif "rename" in text:
rename(text)
# Resizing things
elif "resize" in text:
resize(text)
# Deleting things
elif "remove" in text or "delete" in text:
delete(text)
pygame.event.post(events.done_listening_event)
return True
# Listen for voice commands.
def listen(roomGrid):
with open(SPEECH_CRED_FILE) as f:
GOOGLE_CLOUD_SPEECH_CREDENTIALS = f.read()
context_list = list(COMMANDS.union(obj_types.possible))
r = sr.Recognizer()
try:
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source, duration=2)
while True:
if roomGrid.dead:
break
audio = r.listen(source, phrase_time_limit=6)
try:
pygame.event.post(events.capture_space_event)
text = r.recognize_google_cloud(audio,
language="en-us",
credentials_json=GOOGLE_CLOUD_SPEECH_CREDENTIALS,
preferred_phrases=context_list)
try:
res = process_command(text, roomGrid)
except:
print("There was an error processing and executing the command.")
pygame.event.post(events.error_listening_event)
if not res:
break
except sr.UnknownValueError:
pygame.event.post(events.error_listening_event)
except:
print("Could not request results from Google Cloud Speech service.")
pygame.event.post(pygame.event.Event(events.error_type, error = "Speech recognition error."))
except OSError:
pygame.event.post(pygame.event.Event(events.error_type, error = "Could not connect to a microphone."))
| 35.104712
| 207
| 0.595824
| 812
| 6,705
| 4.783251
| 0.235222
| 0.044799
| 0.065654
| 0.050978
| 0.32724
| 0.31102
| 0.257467
| 0.199279
| 0.168126
| 0.152935
| 0
| 0.001715
| 0.304101
| 6,705
| 190
| 208
| 35.289474
| 0.83069
| 0.11484
| 0
| 0.238095
| 0
| 0
| 0.08652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.126984
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0309577959891e0612c1c6a69dda2ed2d8030359
| 600
|
py
|
Python
|
colosseum/mdps/river_swim/episodic/mdp.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/mdps/river_swim/episodic/mdp.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
colosseum/mdps/river_swim/episodic/mdp.py
|
MichelangeloConserva/Colosseum
|
b0711fd9ce75520deb74cda75c148984a8e4152f
|
[
"MIT"
] | null | null | null |
import gin
from colosseum.loops import human_loop
from colosseum.mdps import EpisodicMDP
from colosseum.mdps.river_swim.river_swim import RiverSwimMDP
@gin.configurable
class RiverSwimEpisodic(EpisodicMDP, RiverSwimMDP):
@property
def _graph_layout(self):
return {node: tuple(node) for node in self.G}
if __name__ == "__main__":
mdp = RiverSwimEpisodic(
seed=42,
randomize_actions=False,
size=15,
lazy=0.01,
random_action_p=0.1,
make_reward_stochastic=True,
)
# random_loop(mdp, 50, verbose=True)
human_loop(mdp)
| 21.428571
| 61
| 0.691667
| 75
| 600
| 5.266667
| 0.666667
| 0.098734
| 0.086076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023605
| 0.223333
| 600
| 27
| 62
| 22.222222
| 0.824034
| 0.056667
| 0
| 0
| 0
| 0
| 0.014184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0.052632
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
030af46d63eb07552bbcb49eb543b45022dab354
| 16,780
|
py
|
Python
|
okokyst_station_mapping.py
|
trondkr/okokyst_toolbox
|
3d5484458e4f346d593beb5b268378c70d391abd
|
[
"MIT"
] | null | null | null |
okokyst_station_mapping.py
|
trondkr/okokyst_toolbox
|
3d5484458e4f346d593beb5b268378c70d391abd
|
[
"MIT"
] | null | null | null |
okokyst_station_mapping.py
|
trondkr/okokyst_toolbox
|
3d5484458e4f346d593beb5b268378c70d391abd
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import datetime
from okokyst_metadata import surveys_lookup_table
import os
import re
import glob
import gsw
from okokyst_tools import pressure_to_depth
encoding = "ISO-8859-1"
__author__ = 'Elizaveta Protsenko'
__email__ = 'Elizaveta.Protsenko@niva.no'
__created__ = datetime.datetime(2020, 9, 23)
__version__ = "1.0"
__status__ = "Development"
def to_rename_columns(df,old_name, new_name):
if old_name in df.columns:
df = df.rename(columns={old_name : new_name})
return df
def modify_df(df,onedrive,filename):
#print ("modify_df")
'''
Convert columns name to the format used further in the processing steps
'''
# df = to_rename_columns(df, 'Press', "Depth")
# (df.columns)
df = to_rename_columns(df, 'Depth(u)', "Depth")
df = to_rename_columns(df, 'Sal.', 'Salinity')
df = to_rename_columns(df, 'T(FTU)', 'FTU')
df = to_rename_columns(df, 'T (FTU)', 'FTU')
df = to_rename_columns(df, 'OpOx %', 'OptOx')
df = to_rename_columns(df, 'Ox %', 'OptOx')
df = to_rename_columns(df, 'mg/l', 'OxMgL')
df = to_rename_columns(df, 'Opt', 'OptOx')
df = to_rename_columns(df, 'Opmg/l', 'OxMgL')
df = to_rename_columns(df, 'Opml/l', 'OxMlL')
# recalculate Oxygen into Ml/l
convert_dict = {
'Press': float
}
df = df.astype(convert_dict)
#print ("press to float")
if 'OxMgL' in df.columns:
print ('recalculate to ml/l')
df = df.astype({'OxMgL': float})
df['OxMgL'] = df.OxMgL.values / 1.42905
df = to_rename_columns(df, 'OxMgL', 'OxMlL')
try:
df['Date'] = pd.to_datetime(df['Date'], format='%d.%m.%Y').dt.strftime('%d.%m.%Y')
except Exception as e:
print ('date',e)
try:
df['Time'] = pd.to_datetime(df['Time'], format='%H:%M:%S').dt.strftime('%H.%M.%S')
except Exception as e:
print ('time', e)
try:
df = df.astype({'OxMlL': float})
except Exception as e:
print ('float', e)
try:
df = df.astype({'OxMgL': float})
except:
print ('Probably Oxygen is missing')
df = df.dropna(how='all', axis=1)
df = df.round(4)
if len(set(df['OptOx'].values)) < 5:
er=open(f"{onedrive}\\NoOxygenData.txt","w+")
er.write(filename)
er.close()
return df
class processStation(object):
def __init__(self, inputpath,onedrive,survey = None):
self.input_path = inputpath
self.base_path = os.path.split(self.input_path)[0]
name = os.path.split(self.input_path)[1]
self.onedrive = onedrive
if survey != None:
self.survey = survey
else:
self.survey = self.get_region_from_path()
#try:
# y = re.findall("[0-9]", str(name))
# x = ''.join(y)
# print (name,x)
# self.correct_survey_date = pd.to_
# datetime(x, format='%Y%m%d').strftime('%d.%m.%Y')
# print ('correct_survey_date', self.correct_survey_date)#.values
#except:
# y = re.findall("[0-9]{8}", str(name))
# x = ''.join(y)
# print(name, x)
# self.correct_survey_date = pd.to_datetime(x, format='%Y%m%d').strftime('%d.%m.%Y')
# print('correct_survey_date', self.correct_survey_date) # .values
self.non_assigned = []
self.assigned = []
self.stations_list = list(surveys_lookup_table[self.survey].keys())
self.stations_depths = np.array([surveys_lookup_table[self.survey][st]['depth'] for st in self.stations_list])
self.df_all = self.read_convert_df()
try:
self.calc_depth()
except Exception as e:
print('Error in reading the dataframe', e)
try:
self.df_all = modify_df(self.df_all, self.onedrive,name)
grouped = self.df_all.groupby('Ser')
for name, group_df in grouped:
self.match_stations_by_depth(group_df)
except Exception as e:
print('Error in reading the dataframe',e)
def calc_depth(self):
first_st = list(surveys_lookup_table[self.survey].keys())[0]
#print ('calc depth')
latitude = surveys_lookup_table[self.survey][first_st]["station.latitude"]
depths = []
for p in self.df_all['Press'].values:
d = pressure_to_depth(float(p), latitude)
depths.append(d)
self.df_all['Depth'] = depths
def get_region_from_path(self):
regions = {'Leon': 'Sognefjorden', 'Kvitsoy': 'Hardangerfjorden',
'Hardangerfjorden': 'Hardangerfjorden', 'Sognefjorden': 'Sognefjorden', 'RMS': 'RMS',
'Aquakompetens': 'Aqua kompetanse'}
for r in regions:
name_to_check = re.compile(r, re.IGNORECASE)
find_match = name_to_check.search(self.input_path)
if find_match:
return regions[r]
def read_convert_df(self):
print ('\n******************************')
print ('Reading', self.input_path)
# read the document and skip undefined number of unneeded rows
for n in range(1, 16):
#print('Attempt N', n)
try:
df_all = pd.read_csv(self.input_path, skiprows=n, header=n-1,
sep=';', decimal=',', encoding=encoding)
#print (df_all.head())
if len(df_all.columns) < 10:
#print('short', df_all.columns)
try:
df_all = pd.read_csv(self.input_path, skiprows=n, header=n,
sep=';', decimal=',', encoding=encoding)
#print(df_all.columns)
break
except Exception as e:
#print('Exception 2')
pass
else:
break
except Exception as e:
#print('Exception 1')
df_all = None
try:
df_all = pd.read_csv(self.input_path, skiprows=n, header=n-1,
sep=';', decimal='.')
if len(df_all.columns) < 10:
#print('short', df_all.columns)
try:
df_all = pd.read_csv(self.input_path, skiprows=n, header=n,
sep=';', decimal=',')
#print(df_all.columns)
df_all.head()
break
except Exception as e:
#print('Exception 4')
pass
except Exception as e:
#print('Exception 3')
df_all = None
try:
pass
#print ('Successfully read file')
#print (df_all.columns)
except Exception as e:
#print (e)
pass
return df_all
def match_stations_by_depth(self, group):
# Get number of the cast
Ser = group['Ser'].values[0]
print('Processing Cast', Ser)
self.survey_date = group.Date.values[0]
max_depth = np.max(group['Depth'].max())
# find the closest depth in the arr with all stations for this region
difs = self.stations_depths - max_depth
print('difs', difs)
difs_pos = list(filter(lambda x : x > -1, difs))
#print (difs_pos,'filtered difs')
#sqr_difs = np.sqrt(difs**2)
min_dif = np.min(difs_pos)
print('max depth', max_depth,'min difference', min_dif, 'Time', group.Time.values[0])
self.make_new_base_path()
if 'Salinity' not in group.columns:
group = self.calc_salinity(group)
#if self.survey == 'Hardangerfjorden':
# dif_threshold = 50
#else:
dif_threshold = 50
group=group.drop(columns=['Press'])
columns = group.columns
if 'OxMgL' in columns:
columnOrder=['Ser','Meas','Salinity','Conductivity', 'Temp', 'FTU',
'OptOx', 'OxMgL', 'Density', 'Depth', 'Date', 'Time']
#print('max OxMlL') #, group['OxMgL'].max(), group.columns)
else:
columnOrder=['Ser','Meas','Salinity','Conductivity', 'Temp', 'FTU',
'OptOx', 'OxMlL', 'Density', 'Depth', 'Date', 'Time']
#print('max OxMlL') #, group['OxMlL'].max(), group.columns)
group=group.reindex(columns=columnOrder)
if min_dif < dif_threshold:
# double check the sign of the difference (if cast went deeper than the station, do no assign)
nearest_depth_id = np.where(difs == min_dif)[0][0]
#print ('stations list', self.stations_list)
self.station_name = self.stations_list[nearest_depth_id]
self.station_metadata = surveys_lookup_table[self.survey][self.station_name]
if self.station_name in self.assigned:
print(self.station_name, 'already assigned stations:', self.assigned)
print ("duplicate")
self.station_name = self.station_name + "_duplicate"
# Save df matched by station
#self.filename = os.path.join(self.base_path, self.station_name + '.txt')
self.filename = os.path.join(self.new_base_path, self.station_name + '_temp.txt')
self.figname = os.path.join(self.new_base_path, self.station_name + '.png')
print('Assigned station_name', self.station_name)
##print('save data to file with ', self.filename, Ser)
import matplotlib.pyplot as plt
plt.figure()
plt.style.use('ggplot')
plt.title(self.station_name)
plt.plot(group['OxMlL'],group.Depth)
plt.ylim(group.Depth.max(),group.Depth.min())
plt.savefig(self.figname)
group.to_csv(self.filename, sep=';')
#Add header and save update file in the new location
self.assigned.append(self.station_name)
self.add_metadata_header()
else:
print('Was not able to find a matching station name')
if max_depth < 10:
print("Probably it is a cleaning station ")
new_filename = os.path.join(self.new_base_path, 'Cleaning_station' + str(Ser) + '.txt')
else:
#print('available station depths', self.stations_depths)
#filename = self.base_path + r'\\Unknown_station' + str(Ser) + '.txt'
print('Cast Unknown_station', Ser)
new_filename = self.new_base_path + r'\\Unknown_station' + str(Ser) + '.txt'
self.non_assigned.append(new_filename)
#group.to_csv(filename, index=False, sep=';')
#print (group['OxMlL'].values.max())
group.to_csv(new_filename, index=False, sep=';')
#else:
# print ('Date of measurement does not match date in a filename')
# print(self.survey_date, self.correct_survey_date, self.survey_date == self.correct_survey_date)
return
def calc_salinity(self,group):
''' If salinity is not in the list
calculate if from TSP
'''
print( 'calculating_salinity')
salinity = []
for n in range(len(group['Cond.'])):
s = gsw.SP_from_C(group['Cond.'].values[n], group['Temp'].values[n], group['Press'].values[n])
salinity.append(s)
group['Salinity'] = salinity
return group
def make_new_base_path(self):
# datetime.datetime.strptime(
date_folder = pd.to_datetime(str(self.survey_date), format='%d.%m.%Y').strftime('%Y-%m-%d')
##self.new_base_path = os.path.join(onedrive, self.survey, date_folder, date_folder + " CTD data")
self.new_base_path = os.path.join(self.onedrive, date_folder + " CTD data")
if not os.path.exists(self.new_base_path):
os.makedirs(self.new_base_path)
def add_metadata_header(self):
header = self.station_metadata['station.header']
#print ('adding metadata header to ', self.station_name,'.txt')
new_filename = os.path.join(self.new_base_path, self.station_name + '.txt')
print ('save data to', new_filename)
# Open initial file, update header, save the new file in One_Drive
with open(self.filename, 'r') as read_obj, open(new_filename, 'w') as write_obj:
write_obj.write(header)
for line in read_obj:
write_obj.write(line)
try:
os.remove(self.filename)
except Exception as e:
print(e)
def manual_add_metadata_header(filepath, station_name):
t = surveys_lookup_table
base_path = os.path.split(filepath)[0]
surveys = t.keys()
for key in surveys:
if station_name in t[key]:
header = t[key][station_name]['station.header']
break
new_filename = os.path.join(base_path, station_name + '.txt')
# Open initial file, update header, save the new file in One_Drive
with open(filepath, 'r') as read_obj, open(new_filename, 'w') as write_obj:
write_obj.write(header)
for line in read_obj:
write_obj.write(line)
try:
os.remove(filepath)
except Exception as e:
print (e)
#os.rename(filepath, base_path +f'to_{station_name}.txt')
if __name__ == "__main__":
#k_work_dir = r'K:/Avdeling/214-Oseanografi/DATABASER/OKOKYST_2017/'
#task = "sognefjorden"
#leon = r"K:\Avdeling\214-Oseanografi\DATABASER\OKOKYST_2017\OKOKYST_NS_Nord_Leon\\"
def call_process(main_path, foldername):
path = os.path.join(main_path, foldername)
onedrive = path
files = glob.glob(path + '\*txt')
for f in files:
if 'OBS' not in f:
processStation(f,onedrive)
user = 'ELP'
main_path_RMS = fr"C:\Users\{user}\OneDrive - NIVA\Okokyst_CTD\Norskehavet_Sor\RMS"
main_path_aqua = fr"C:\Users\{user}\OneDrive - NIVA\Okokyst_CTD\Norskehavet_Sor\Aquakompetens"
#foldernames = [f for f in os.listdir(main_path) if re.match(r'2021', f)]
#RMS
#call_process(main_path_RMS,'06_2021')
#call_process('04-2021')
#call_process('06-2021')
#call_process('07-2021')
#call_process('08-2021')
#Aqua kompetanse
call_process(main_path_aqua,'2021-08')
# Sognefjorden 2021
main_path_sognefjorden = fr"C:\Users\{user}\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Sognefjorden"
#foldername = "2021-01-25"
# Here the automatic assignment did not work, due to bad weather the CTD did not reach the bottom
#call_process(main_path_sognefjorden, "2021-02-17")
#manual_add_metadata_header(r"C:\Users\ELP\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Sognefjorden\2021-02-17\2021-02-17 CTD data\Unknown_station2.txt", 'VT16')
#call_process(main_path_sognefjorden, '2021-03-14')
#call_process(main_path_sognefjorden, '2021-04-18')
#call_process(main_path_sognefjorden, '2021-05-19')
#call_process(main_path_sognefjorden, '2021-06-17')
#call_process(main_path_sognefjorden, '2021-07-14')
#call_process(main_path_sognefjorden, '2021-08-18')
main_path_hardangerfjorden = r'C:\Users\ELP\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Hardangerfjorden'
#call_process(main_path_hardangerfjorden,'2021-01-18',survey = 'Hardangerfjorden_old')
#manual_add_metadata_header(r'C:\Users\ELP\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Hardangerfjorden\2021-01-18\2021-01-18 CTD data\Unknown_station3.txt',
# "VT70")
#call_process(main_path_hardangerfjorden,'2021-02-23',survey = 'Hardangerfjorden_old')
#call_process(main_path_hardangerfjorden,'2021-03-22-23')#,survey = 'Hardangerfjorden_old'
#manual_add_metadata_header(r"C:\Users\ELP\OneDrive - NIVA\Okokyst_CTD\Nordsjoen_Nord\Hardangerfjorden\2021-03-22-23\2021-03-22 CTD data\Unknown_station4.txt",
# 'VR49')
call_process(main_path_hardangerfjorden, "2021-04-20-21")
#call_process(main_path_hardangerfjorden, '2021-05-18-20')
#call_process(main_path_hardangerfjorden, '2021-06')
#call_process(main_path_hardangerfjorden, "2021-07")
#call_process(main_path_hardangerfjorden, '2021-08')
#Has to be checked, no oxygen! did not work
###call_process(main_path_hardangerfjorden, "2021-05-18-20")
#call_process(main_path_hardangerfjorden, "2021-07")
print ('\n\n')
##for f in foldernames:
## call_process(f)
| 35.475687
| 164
| 0.589035
| 2,126
| 16,780
| 4.454374
| 0.167921
| 0.021964
| 0.031679
| 0.040127
| 0.40982
| 0.360612
| 0.291658
| 0.232629
| 0.194403
| 0.190285
| 0
| 0.02644
| 0.280989
| 16,780
| 472
| 165
| 35.550847
| 0.758475
| 0.273123
| 0
| 0.259843
| 0
| 0
| 0.124896
| 0.029397
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047244
| false
| 0.015748
| 0.03937
| 0
| 0.114173
| 0.090551
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
030c9f62ddfe8e4538cc0711a16b8bb6b36078a9
| 715
|
py
|
Python
|
Python/350_Intersection_of_Two_arrays_II.py
|
simonecorbo99/hacktoberfest2019-leetcode
|
1e7150dafe337455616b2aea7a2cf2ffaf02cfd5
|
[
"MIT"
] | 5
|
2019-10-01T17:07:36.000Z
|
2020-10-30T21:01:35.000Z
|
Python/350_Intersection_of_Two_arrays_II.py
|
simonecorbo99/hacktoberfest2019-leetcode
|
1e7150dafe337455616b2aea7a2cf2ffaf02cfd5
|
[
"MIT"
] | 7
|
2019-10-05T17:52:33.000Z
|
2020-10-29T04:52:29.000Z
|
Python/350_Intersection_of_Two_arrays_II.py
|
simonecorbo99/hacktoberfest2019-leetcode
|
1e7150dafe337455616b2aea7a2cf2ffaf02cfd5
|
[
"MIT"
] | 50
|
2019-10-01T21:07:07.000Z
|
2021-11-05T07:15:36.000Z
|
'''Given two arrays, write a function to compute their intersection.
'''
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
m,n=len(nums1),len(nums2)
l=[]
if len(nums1)>=len(nums2):
for i in range(len(nums1)):
if nums1[i] in nums2:
l.append(nums1[i])
nums2.remove(nums1[i])
else:
for i in range(len(nums2)):
if nums2[i] in nums1:
l.append(nums2[i])
nums1.remove(nums2[i])
return l
| 23.833333
| 68
| 0.446154
| 82
| 715
| 3.890244
| 0.426829
| 0.037618
| 0.068966
| 0.100313
| 0.087774
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04914
| 0.430769
| 715
| 29
| 69
| 24.655172
| 0.734644
| 0.18042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
030d393657be4aeb44ea123011d64e69a5d1d746
| 5,609
|
py
|
Python
|
dataset/egtea_gaze.py
|
AllenXuuu/DCR
|
2240b78ea7e03c43be8ba0a8649e6ab07db36fbd
|
[
"Apache-2.0"
] | null | null | null |
dataset/egtea_gaze.py
|
AllenXuuu/DCR
|
2240b78ea7e03c43be8ba0a8649e6ab07db36fbd
|
[
"Apache-2.0"
] | null | null | null |
dataset/egtea_gaze.py
|
AllenXuuu/DCR
|
2240b78ea7e03c43be8ba0a8649e6ab07db36fbd
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
import json
from pandas.core import frame
import torch
import pandas as pd
import os
import pickle as pkl
import numpy as np
import cv2
import h5py
import tqdm
import functools
import lmdb
class EGTEA_GAZE_DATASET(torch.utils.data.Dataset):
def __init__(self, logger, config, root = None):
super().__init__()
self.root = './data/EG+'
self.name = config.name
self.split = config.split
self.config = config
self.model_fps = config.fps
self.tau_a = config.tau_a
self.feature = config.feature
self.feature_fps = config.feature_fps
self.feature_dim = config.feature_dim
assert config.name == 'EGTEA_GAZE+'
self.class_info = pd.read_csv(os.path.join(self.root,'actions.csv'), names=['action_class','verb_noun_class','text'])
self.num_action = self.class_info.shape[0]
self.vn2action = []
for _, a in self.class_info.iterrows():
v,n = list(map(int,a.verb_noun_class.split('_')))
self.vn2action.append([v,n])
self.num_verb = len(set([a[0] for a in self.vn2action]))
self.num_noun = len(set([a[1] for a in self.vn2action]))
annotation_file = {
'train1':'training1.csv',
'train2':'training2.csv',
'train3':'training3.csv',
'valid1':'validation1.csv',
'valid2':'validation2.csv',
'valid3':'validation3.csv',
}[config.split]
annotation_file = os.path.join(self.root,annotation_file)
assert config.past_frame > 0
self.data = []
info = pd.read_csv(annotation_file, header=None, names=['video','start','end','verb','noun','action'])
for idx,a in info.iterrows():
video_name = a.video
start_frame = a.start
end_frame = a.end
aid = a.action
vid = a.verb
nid = a.noun
segment = {
'id' : idx,
'video_id' : video_name,
'next_verb_class' : vid,
'next_noun_class' : nid,
'next_action_class' : aid,
}
if config.drop and start_frame<=self.tau_a * self.feature_fps:
continue
frame_index = np.arange(
start_frame - self.tau_a * self.feature_fps + config.forward_frame * self.feature_fps / self.model_fps,
start_frame - self.tau_a * self.feature_fps - config.past_frame * self.feature_fps / self.model_fps,
- self.feature_fps / self.model_fps
).astype(int)[::-1]
assert len(frame_index) == config.past_frame + config.forward_frame
frame_index[frame_index<1] = 1
segment['frame_index'] = frame_index
self.data.append(segment)
# debug
# break
self.verb_weight, self.noun_weight, self.action_weight = None, None, None
##### feature
assert config.feat_file
self.f = lmdb.open(config.feat_file, readonly=True, lock=False)
logger.info('[%s] # Frame: Past %d. Forward %d.' % (
config.split, config.past_frame,config.forward_frame))
logger.info('[%s] # segment %d. verb %d. noun %d. action %d.' % (
config.split, len(self.data), self.num_verb, self.num_noun, self.num_action))
self.cache = {}
if config.cache:
self.make_cache(logger)
def make_cache(self,logger):
logger.info('Cache: Load all feature into memory')
for segment in self.data:
for fid in segment['frame_index']:
key = '%s_frame_%010d.jpg' % (segment['video_id'],fid)
if key not in self.cache:
res = self._read_one_frame_feat(key)
self.cache[key] = res
logger.info('Cache: Finish loading. Cache Size %d' % len(self.cache))
def _read_one_frame_feat(self,key):
if key in self.cache:
return self.cache[key]
with self.f.begin() as e:
buf = e.get(key.strip().encode('utf-8'))
if buf is not None:
res = np.frombuffer(buf,'float32')
else:
res = None
return res
def _load_feat(self,video_id, frame_ids):
frames = []
dim = self.feature_dim
for fid in frame_ids:
key = '%s_frame_%010d.jpg' % (video_id,fid)
frame_feat = self._read_one_frame_feat(key)
if frame_feat is not None:
frames.append(frame_feat)
elif len(frames) > 0:
frames.append(frames[-1])
# print('Copy frame: %s' % key)
else:
frames.append(np.zeros(dim))
# print('Zero frame: %s' % key)
return torch.from_numpy(np.stack(frames,0)).float()
def __len__(self):
return len(self.data)
def __getitem__(self,i):
segment = self.data[i]
out = {
'id' : segment['id'],
'index' : i
}
out['next_action_class'] = segment['next_action_class']
out['next_verb_class'] = segment['next_verb_class']
out['next_noun_class'] = segment['next_noun_class']
out['past_frame'] = self._load_feat(
segment['video_id'],
segment['frame_index'],
)
return out
| 33.386905
| 125
| 0.54466
| 683
| 5,609
| 4.267936
| 0.234261
| 0.037736
| 0.033619
| 0.020583
| 0.142024
| 0.10566
| 0.058319
| 0.03705
| 0.026072
| 0
| 0
| 0.00997
| 0.338385
| 5,609
| 167
| 126
| 33.586826
| 0.775532
| 0.015333
| 0
| 0.015504
| 0
| 0
| 0.114275
| 0
| 0
| 0
| 0
| 0
| 0.031008
| 1
| 0.046512
| false
| 0
| 0.100775
| 0.007752
| 0.193798
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
030da47412e4bfcf6e77f631335b3a50a2f71685
| 1,092
|
py
|
Python
|
LinkedList/Merge two sorted linked lists.py
|
Bomma-Pranay/InterviewBit
|
3bc436cffd3afc7a28c67042e1589fbe7547952f
|
[
"MIT"
] | null | null | null |
LinkedList/Merge two sorted linked lists.py
|
Bomma-Pranay/InterviewBit
|
3bc436cffd3afc7a28c67042e1589fbe7547952f
|
[
"MIT"
] | null | null | null |
LinkedList/Merge two sorted linked lists.py
|
Bomma-Pranay/InterviewBit
|
3bc436cffd3afc7a28c67042e1589fbe7547952f
|
[
"MIT"
] | null | null | null |
'''
Merge Two Sorted Lists
Asked in:
Microsoft
Yahoo
Amazon
Merge two sorted linked lists and return it as a new list.
The new list should be made by splicing together the nodes of the first two lists, and should also be sorted.
For example, given following linked lists :
5 -> 8 -> 20
4 -> 11 -> 15
The merged list should be :
4 -> 5 -> 8 -> 11 -> 15 -> 20
'''
class Node:
def __init__(self, data):
self.data = data
# store reference (next item)
self.next = None
return
class Solution:
# @param A : head node of linked list
# @param B : head node of linked list
# @return the head node in the linked list
def mergeTwoLists(self, h1, h2):
d=Node('a')
td=d
while h1 != None and h2 != None:
if h1.data < h2.data:
d.next = h1
h1 = h1.next
else:
d.next = h2
h2 = h2.next
d = d.next
if h1 != None:
d.next = h1
if h2 != None:
d.next = h2
return td.next
| 22.75
| 109
| 0.53022
| 158
| 1,092
| 3.639241
| 0.392405
| 0.043478
| 0.048696
| 0.055652
| 0.069565
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05052
| 0.3837
| 1,092
| 47
| 110
| 23.234043
| 0.803863
| 0.462454
| 0
| 0.181818
| 0
| 0
| 0.001736
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
030e8114c7513a4a89a8a24f6c91ba6ed8c9d8ae
| 299
|
py
|
Python
|
Lista 1 Banin/ex08.py
|
qnomon/Python-Studies
|
dbd592cf2a161bb9ddbec66f020c602bddc6d44b
|
[
"MIT"
] | null | null | null |
Lista 1 Banin/ex08.py
|
qnomon/Python-Studies
|
dbd592cf2a161bb9ddbec66f020c602bddc6d44b
|
[
"MIT"
] | null | null | null |
Lista 1 Banin/ex08.py
|
qnomon/Python-Studies
|
dbd592cf2a161bb9ddbec66f020c602bddc6d44b
|
[
"MIT"
] | null | null | null |
v = int(input('Digite um valor: '))
validador = 0
contador = 1
while contador < v:
if v % contador == 0:
validador += 1
contador +=1
if validador > 1:
print(f'Esse número NÃO é primo, pois é divisível por {validador+1} números diferentes ')
else:
print('Esse número é primo')
| 27.181818
| 93
| 0.64214
| 45
| 299
| 4.266667
| 0.555556
| 0.15625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030837
| 0.240803
| 299
| 11
| 94
| 27.181818
| 0.814978
| 0
| 0
| 0
| 0
| 0
| 0.383333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03121b48c8c35b2b0f007ab147db73d43b9f5419
| 10,819
|
py
|
Python
|
python/cthreepo/core/models.py
|
sdss/cthreepo
|
86804657cae90bf69b77367a355bb49eb220a4b2
|
[
"BSD-3-Clause"
] | 1
|
2019-06-19T09:30:39.000Z
|
2019-06-19T09:30:39.000Z
|
python/cthreepo/core/models.py
|
sdss/cthreepo
|
86804657cae90bf69b77367a355bb49eb220a4b2
|
[
"BSD-3-Clause"
] | null | null | null |
python/cthreepo/core/models.py
|
sdss/cthreepo
|
86804657cae90bf69b77367a355bb49eb220a4b2
|
[
"BSD-3-Clause"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Filename: models.py
# Project: core
# Author: Brian Cherinka
# Created: Saturday, 12th September 2020 12:55:22 pm
# License: BSD 3-clause "New" or "Revised" License
# Copyright (c) 2020 Brian Cherinka
# Last Modified: Saturday, 12th September 2020 12:55:22 pm
# Modified By: Brian Cherinka
from __future__ import print_function, division, absolute_import
import re
from marshmallow.fields import Field
import six
import orjson
from marshmallow import Schema, fields, post_load
from fuzzy_types.fuzzy import FuzzyList
# core classes
class BaseClass(object):
def __new__(cls, *args, **kwargs):
pass
class BaseSchema(Schema):
''' Base class to use for all new Schema objects '''
_class = None
class Meta:
ordered = True
render_module = orjson
@post_load
def make_object(self, data, **kwargs):
''' this function deserializes a schema to a class object '''
return self._class(**data)
class ObjectField(fields.Field):
''' custom marshmallow object field
This is a custom marshmallow Field class used to indicate that an attribute
should be represented by a custom model object type, rather than a string or integer. It
contains special methods for custom serialization and deserialization of model datatypes.
For example, the yaml string representation 'LOG' for a log-linear wavelength will get
deserialized into an instance Wavelength('LOG'). Custom fields are described at
https://marshmallow.readthedocs.io/en/3.0/custom_fields.html.
'''
def _serialize(self, value, attr, obj, **kwargs):
if value is None:
return ''
return (value.release if hasattr(value, 'release') else value.name if hasattr(value, 'name')
else value.title if hasattr(value, 'title') else '')
def _deserialize(self, value, attr, data, **kwargs):
name = self.default
assert isinstance(value, six.string_types), f'{value} must be a string'
data = self.models.get(name, None)
return data[value] if data and value in data else value
# main/helper functions
def _get_attr(obj: object, name: str):
''' Get an attribute from a class object
Attempts to retrieve an attribute from a class object
Parameters
----------
obj : object
A class object to access
name : str
The attribute name to access
Returns
-------
a class attribute
'''
if hasattr(obj, name):
return obj.__getattribute__(name)
else:
return None
def create_class(data: dict, mixin: object = None) -> object:
''' creates a new datamodel object class
Constructs a Python class object based on a model "schema" dictionary.
Converts a model yaml file, 'versions.yaml' into a Python Version class object,
which is used for instantiating the designated "objects" in the yaml section.
Parameters
----------
data : dict
The schema dictonary section of a yaml file
mixin : object
A custom model class to mixin with base model
Returns
-------
A new Python class object
'''
name = data.get('name', None) or data.get('title', None)
# define custom repr
def new_rep(self):
reprstr = f'<{name}({self._repr_fields})>'
return reprstr
# define custom str
def new_str(self):
name = (_get_attr(self, 'name') or _get_attr(self, 'title') or
_get_attr(self, 'release') or '')
return name
# get the attributes to add to the repr
props = data.get('attributes', None) or data.get('properties', None)
if props:
added_fields = [a for a, vals in props.items()
if vals.get('add_to_repr', None)]
# define a new init
def new_init(self, **kwargs):
repr_fields = ''
# loop for attributes
for key, value in list(kwargs.items()):
self.__setattr__(key, value)
# create a repr field string
if key in added_fields:
repr_fields += f', {key}={value}'
# create a string of the repr fields
name = (_get_attr(self, 'name') or _get_attr(self, 'title') or
_get_attr(self, 'release') or '')
self._repr_fields = f'{name}' + repr_fields
# create the new class and add the new methods
bases = (mixin, object,) if mixin else (object,)
obj = type(name, bases, {})
obj.__init__ = new_init
obj.__repr__ = new_rep
obj.__str__ = new_str
return obj
def parse_kind(value: str) -> tuple:
''' parse the kind value into a kind and subkind
Parses the schema "kind" attribute into a kind and subkind if
kind contain paranetheses, i.e. kind(subkind). For example,
list(objects) return kind=list, subkind=objects.
Parameters
----------
value : str
The type of field
Returns
-------
A tuple of the field type and any sub-type
'''
subkind = re.search(r'\((.+?)\)', value)
if subkind:
kind = value.split('(', 1)[0]
subkind = subkind.group(1)
else:
kind = value
# set default list or tuple subfield to string
if kind.lower() == 'list':
subkind = 'string'
elif kind.lower() == 'tuple':
subkind = 'string'
return kind, subkind
def get_field(value: str, key: str = None) -> Field:
''' Get a Marshmallow Fields type
Using the model schema attribute "kind" parameter, determines the
appropriate marshmallow field type. If the value is "Objects"
then it uses a custom ObjectField definition.
Parameters
----------
value : str
The kind of field to retrieve, e.g. string
key : str
The name of the attribute for the field
Returns
-------
a marshmallow field class
'''
if hasattr(fields, value):
field = fields.__getattribute__(value)
return field
elif value == 'Objects':
return ObjectField(data_key=key)
else:
raise ValueError(f'Marshmallow Fields does not have {value}')
def create_field(data: dict, key: str = None, required: bool = None,
nodefault: bool = None) -> Field:
''' creates a marshmallow.fields object
Parameters
----------
data : dict
A values dictionary for a given model attribute
key : str
The name of the attribute
required : bool
If True, sets the field as a required one. Default is False.
nodefault : bool
If True, turns off any defaults specified for fields. Default is False.
Returns
-------
A marshmallow field instance to attach to a schema
'''
# parse the kind of input
kind = data.get('kind', None) or data.get('type', None)
kind = kind.title() if kind else kind
kind, subkind = parse_kind(kind)
# get the marshmallow field
field = get_field(kind)
# create a parameters dictionary to pass into the fields object
params = {}
params['required'] = data.get('required', False) if required is None else required
if 'default' in data and not nodefault:
params['missing'] = data.get('default', None)
params['default'] = data.get('default', None)
# set key to use the model indicated if use_model is set
key = data['use_model'] if 'use_model' in data else key
# create any arguments for sub-fields
args = []
if subkind:
skinds = subkind.split(',')
subfields = [get_field(i.title(), key=key) for i in skinds]
# differentiate args for lists and tuples
if kind == 'List':
assert len(subfields) == 1, 'List can only accept one subfield type.'
args.extend(subfields)
elif kind == 'Tuple':
args.append(subfields)
# instantiate the fields object with the relevant args and parameters
return field(*args, **params)
def create_schema(data: dict, mixin: object = None) -> Schema:
''' creates a new class for schema validation
Constructs a marshmallow schema class object used to validate
the creation of new Python objects for this class. Takes a
model "schema" dictionary and builds new Python classes to represent
the model Object and an Object Schema for purposes of validation.
See https://marshmallow.readthedocs.io/en/3.0/quickstart.html for a guide on
deserializing data using marshmallow schema validation.
Parameters
----------
data : dict
The schema dictonary section of a yaml file
mixin : object
A custom model class to mixin with base model
Returns
-------
A marshmallow schema class object
'''
# create a dictionary of class attributes from the schema
name = data.get('name') or data.get('title')
attrs = {}
props = data.get('attributes', None) or data.get('properties', None)
if props:
# create marshmallow schema fields for each attribute
for attr, values in props.items():
attrs[attr] = create_field(values, key=attr)
# create the base object class
class_obj = create_class(data, mixin=mixin)
# add the object class to the schema attributes to allow
# for object deserialization from yaml representation. See BaseSchema for use.
attrs['_class'] = class_obj
# create the new schema class object
objSchema = type(name + 'Schema', (BaseSchema,), attrs)
# add the schema class instance to the object class for accessibility
class_obj._schema = objSchema()
return objSchema
def generate_models(data: dict, make_fuzzy: bool = True, mixin: object = None) -> list:
''' Generate a list of datamodel types
Converts a models yaml file, e.g. manga/versions.yaml, into a list of Python instances.
A model Schema class is created using the "schema" section of the yaml file. The schema
class is used to validate and instantiate the list of objects defined in the "objects"
section.
Parameters
----------
data : dict
A yaml loaded data structure
make_fuzzy : bool
If True, returns a Fuzzy list of models
mixin : object
A custom model class to mixin with base model
Returns
-------
A list of instantiated models
'''
# create the schema class object
schema = create_schema(data['schema'], mixin=mixin)
# validate and deserialize the model data in Python objects
models = schema(many=True).load(data['objects'], many=True)
# optionally make the model list fuzzy
if make_fuzzy:
models = FuzzyList(models)
return models
| 31.914454
| 100
| 0.634994
| 1,422
| 10,819
| 4.7609
| 0.199719
| 0.013442
| 0.009749
| 0.007681
| 0.131019
| 0.110192
| 0.102216
| 0.08449
| 0.074742
| 0.074742
| 0
| 0.004833
| 0.273315
| 10,819
| 338
| 101
| 32.008876
| 0.856271
| 0.481098
| 0
| 0.122951
| 0
| 0
| 0.082321
| 0.005684
| 0
| 0
| 0
| 0
| 0.016393
| 1
| 0.114754
| false
| 0.008197
| 0.057377
| 0
| 0.336066
| 0.008197
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03124cf711ceb00968f5cacee727fb9c26ed3485
| 1,458
|
py
|
Python
|
project_src/deep_learning/pytorch_openai_transformer/datasets.py
|
tcvrick/COMP551-IMDB-Competition
|
64f83142c86e895db3a0ca037c31efb404910723
|
[
"MIT"
] | 2
|
2019-08-12T13:47:21.000Z
|
2020-05-03T11:53:38.000Z
|
project_src/deep_learning/pytorch_openai_transformer/datasets.py
|
tcvrick/COMP551-IMDB-Competition
|
64f83142c86e895db3a0ca037c31efb404910723
|
[
"MIT"
] | null | null | null |
project_src/deep_learning/pytorch_openai_transformer/datasets.py
|
tcvrick/COMP551-IMDB-Competition
|
64f83142c86e895db3a0ca037c31efb404910723
|
[
"MIT"
] | null | null | null |
import re
import html
import pandas as pd
re1 = re.compile(r' +')
def imdb(fold_id: int, split_size: int):
df = pd.read_pickle('df_train.pkl')
df = df.reindex(columns=['sentiment', 'text'])
df['text'] = df['text'].apply(fixup)
# Split the data into k-folds.
df_val = df[split_size * fold_id:split_size * (fold_id + 1)]
df_train = pd.concat((df[0:split_size * fold_id],
df[split_size * (fold_id + 1):]))
# Sanity check to make sure there are no common elements between the two splits.
if set(df_train.index).intersection(set(df_val.index)):
raise ValueError('There are common training examples in the training and validation splits!')
df_test = pd.read_pickle('df_test.pkl')
df_test = df_test.reindex(columns=['review_id', 'text'])
df_test['text'] = df_test['text'].apply(fixup)
return (df_train.text.values, df_train.sentiment.values), (df_val.text.values, df_val.sentiment.values),\
(df_test.text.values,)
# https://github.com/prajjwal1/language-modelling/blob/master/ULMfit.py
def fixup(x):
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace(
'<br />', "\n").replace('\\"', '"').replace('<unk>', 'u_n').replace(' @.@ ', '.').replace(
' @-@ ', '-').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x))
| 40.5
| 109
| 0.603567
| 203
| 1,458
| 4.187192
| 0.433498
| 0.049412
| 0.061176
| 0.070588
| 0.06
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010906
| 0.182442
| 1,458
| 35
| 110
| 41.657143
| 0.702181
| 0.121399
| 0
| 0
| 0
| 0
| 0.170713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.125
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
031365d53b1a04b88852c96530151afb17b9aec7
| 1,136
|
py
|
Python
|
coyaml/cli.py
|
tailhook/coyaml
|
2de4f0d99446afb43d9e1a0cb576e048f47c108a
|
[
"MIT"
] | 8
|
2015-01-28T15:08:38.000Z
|
2021-11-17T05:27:21.000Z
|
coyaml/cli.py
|
tailhook/coyaml
|
2de4f0d99446afb43d9e1a0cb576e048f47c108a
|
[
"MIT"
] | 3
|
2016-06-08T09:55:12.000Z
|
2019-09-14T12:40:46.000Z
|
coyaml/cli.py
|
tailhook/coyaml
|
2de4f0d99446afb43d9e1a0cb576e048f47c108a
|
[
"MIT"
] | 1
|
2015-05-28T12:22:16.000Z
|
2015-05-28T12:22:16.000Z
|
from .core import Config
def simple():
from optparse import OptionParser
op = OptionParser(usage="\n %prog\n %prog -c config.yaml")
op.add_option('-c', '--config', metavar="FILENAME",
help="Configuration file to parse",
dest="configfile", default=None, type="string")
op.add_option('-n', '--name', metavar="NAME",
help="Name of configuration (default `config`), usefull if you have"
"several configuration in single binary",
dest="name", default="config", type="string")
op.add_option('-f', '--filename', metavar="NAME",
help="Filename to read",
dest="filename", default="config", type="string")
op.add_option('-p', '--print',
help="Print parsed configuration file",
dest="print", default=False, action="store_true")
options, args = op.parse_args()
if args:
op.error("No arguments expected")
cfg = Config(options.name, options.filename)
if options.configfile:
inp = open(options.configfile, 'rt', encoding='utf-8')
else:
import sys
inp = sys.stdin
return cfg, inp, options
| 39.172414
| 76
| 0.618838
| 139
| 1,136
| 5.014388
| 0.467626
| 0.028694
| 0.063128
| 0.064562
| 0.12769
| 0.097561
| 0.097561
| 0
| 0
| 0
| 0
| 0.001139
| 0.227113
| 1,136
| 28
| 77
| 40.571429
| 0.792711
| 0
| 0
| 0
| 0
| 0
| 0.316901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.111111
| 0
| 0.185185
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03163282de6da6741659d1e1e50c91fdd514f086
| 869
|
py
|
Python
|
gtfs/stop.py
|
fabiotanniguchi/emdec-gtfs
|
823274d45263409d1d3ee56cf07f60e12d64003a
|
[
"WTFPL"
] | null | null | null |
gtfs/stop.py
|
fabiotanniguchi/emdec-gtfs
|
823274d45263409d1d3ee56cf07f60e12d64003a
|
[
"WTFPL"
] | null | null | null |
gtfs/stop.py
|
fabiotanniguchi/emdec-gtfs
|
823274d45263409d1d3ee56cf07f60e12d64003a
|
[
"WTFPL"
] | null | null | null |
from google.appengine.ext import ndb
from protorpc import messages
from google.appengine.ext.ndb import msgprop
from csvmodel import CsvModel
class Stop(CsvModel):
class LocationType(messages.Enum):
STOP = 0
STATION = 1
class WheelchairBoarding(messages.Enum):
UNKNOWN = 0
POSSIBLE = 1
IMPOSSIBLE = 2
_csv_file = 'stops.txt'
_csv_id = 'stop_id'
stop_code = ndb.StringProperty()
stop_name = ndb.StringProperty(required=True)
stop_desc = ndb.TextProperty()
stop_latlon = ndb.GeoPtProperty(required=True)
zone_id = ndb.KeyProperty(kind='Zone')
stop_url = ndb.StringProperty()
location_type = msgprop.EnumProperty(LocationType)
parent_station = ndb.KeyProperty(kind='Stop')
stop_timezone = ndb.StringProperty()
wheelchair_boarding = msgprop.EnumProperty(WheelchairBoarding)
| 31.035714
| 66
| 0.714614
| 100
| 869
| 6.06
| 0.47
| 0.112211
| 0.062706
| 0.072607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007174
| 0.197929
| 869
| 27
| 67
| 32.185185
| 0.862267
| 0
| 0
| 0
| 0
| 0
| 0.027618
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.791667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
031a0bb9c7e7eb89c3804e46ae64112b02ade8a2
| 1,701
|
py
|
Python
|
appdaemon/settings/apps/presence/home/alarm_panel.py
|
monster1025/home-assistant
|
194723e228a798b99693220ad332a2c55d06e248
|
[
"Apache-2.0"
] | null | null | null |
appdaemon/settings/apps/presence/home/alarm_panel.py
|
monster1025/home-assistant
|
194723e228a798b99693220ad332a2c55d06e248
|
[
"Apache-2.0"
] | null | null | null |
appdaemon/settings/apps/presence/home/alarm_panel.py
|
monster1025/home-assistant
|
194723e228a798b99693220ad332a2c55d06e248
|
[
"Apache-2.0"
] | null | null | null |
import appdaemon.plugins.hass.hassapi as hass
#
# Listen for presence sensor change state and change alarm control panel state.
#
# Args:
# sensor - home presence 'sensor'
# ha_panel - alarm control panel entity (to arm and disarm).
# constraint - (optional, input_boolen), if turned off - alarm panel will be not armed\disarmed.
#
# Release Notes
#
# Version 1.0:
# Initial Version
class AlarmPanelBySensor(hass.Hass):
def initialize(self):
if "sensor" not in self.args or "ha_panel" not in self.args:
self.error("Please provide sensor and ha_panel in config!")
return
self.listen_state(self.sensor_trigger, self.args['sensor'])
self.listen_event(self.ha_event, "ha_started")
def ha_event(self, event_name, data, kwargs):
self.log('Starting up!')
state = self.get_state(self.args['sensor'])
self.log('Updating alarm_control_panel state: {}'.format(state))
if state == "off":
self.away_mode()
def sensor_trigger(self, entity, attribute, old, new, kwargs):
self.log("{} turned {}".format(entity, new))
if new == "off" and old == "on":
self.away_mode()
if new == "on" and old == "off":
self.return_home_mode()
def away_mode(self):
if 'constraint' in self.args and not self.constrain_input_boolean(self.args['constraint']):
return
self.call_service("alarm_control_panel/alarm_arm_away", entity_id = self.args['ha_panel'])
def return_home_mode(self):
if 'constraint' in self.args and not self.constrain_input_boolean(self.args['constraint']):
return
self.call_service("alarm_control_panel/alarm_disarm", entity_id = self.args['ha_panel'])
| 36.978261
| 97
| 0.680188
| 239
| 1,701
| 4.677824
| 0.317992
| 0.071556
| 0.076029
| 0.039356
| 0.259392
| 0.259392
| 0.218247
| 0.218247
| 0.218247
| 0.218247
| 0
| 0.001461
| 0.195179
| 1,701
| 46
| 98
| 36.978261
| 0.815194
| 0.184597
| 0
| 0.25
| 0
| 0
| 0.209023
| 0.049624
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.035714
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
031c6ddb51fddc4e6b54071f60cd8121116d2c91
| 398
|
py
|
Python
|
general/scry_message.py
|
kcunning/gamemaster-scripts
|
ec0658e498807d9c7017da313ecf1b9ed3eb6862
|
[
"MIT"
] | 15
|
2019-01-17T20:09:45.000Z
|
2022-01-05T15:56:32.000Z
|
general/scry_message.py
|
palikhov/CnM-GM-RPG-scripts
|
ec0658e498807d9c7017da313ecf1b9ed3eb6862
|
[
"MIT"
] | 5
|
2020-04-27T19:48:54.000Z
|
2022-03-11T23:39:49.000Z
|
general/scry_message.py
|
palikhov/CnM-GM-RPG-scripts
|
ec0658e498807d9c7017da313ecf1b9ed3eb6862
|
[
"MIT"
] | 8
|
2019-02-20T21:18:46.000Z
|
2021-04-30T03:43:20.000Z
|
from random import randint
import datetime
lvl = 10
base_rounds = 10
rounds = lvl * base_rounds
print("You have", rounds, "rounds to try to get through.")
for i in range(rounds):
r = randint(1, 100)
print(r)
if r >= 96:
break
print("Number of rounds:", i)
if i == rounds - 1:
print("Nothing got through")
else:
print("It took", str(datetime.timedelta(seconds=i*6)))
| 19.9
| 58
| 0.645729
| 63
| 398
| 4.047619
| 0.587302
| 0.078431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038835
| 0.223618
| 398
| 20
| 59
| 19.9
| 0.786408
| 0
| 0
| 0
| 0
| 0
| 0.200501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.3125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
031df329773b42e6e3ea2acdd1a1f8a14ec08d3e
| 5,566
|
py
|
Python
|
src/qibo/tests/test_core_hamiltonians_trotter.py
|
mlazzarin/qibo
|
e82bc3e27c5182be7b6f0b23bd20bc1057e31701
|
[
"Apache-2.0"
] | 81
|
2020-09-04T10:54:40.000Z
|
2021-05-17T13:20:38.000Z
|
src/qibo/tests/test_core_hamiltonians_trotter.py
|
mlazzarin/qibo
|
e82bc3e27c5182be7b6f0b23bd20bc1057e31701
|
[
"Apache-2.0"
] | 201
|
2020-08-24T08:41:33.000Z
|
2021-05-18T12:23:19.000Z
|
src/qibo/tests/test_core_hamiltonians_trotter.py
|
mlazzarin/qibo
|
e82bc3e27c5182be7b6f0b23bd20bc1057e31701
|
[
"Apache-2.0"
] | 13
|
2020-09-08T12:34:35.000Z
|
2021-04-29T22:46:21.000Z
|
"""Test Trotter Hamiltonian methods from `qibo/core/hamiltonians.py`."""
import pytest
import numpy as np
import qibo
from qibo import hamiltonians, K
from qibo.tests.utils import random_state, random_complex, random_hermitian
@pytest.mark.parametrize("nqubits", [3, 4])
@pytest.mark.parametrize("model", ["TFIM", "XXZ", "Y", "MaxCut"])
def test_trotter_hamiltonian_to_dense(backend, nqubits, model):
"""Test that Trotter Hamiltonian dense form agrees with normal Hamiltonian."""
local_ham = getattr(hamiltonians, model)(nqubits, dense=False)
target_ham = getattr(hamiltonians, model)(nqubits)
final_ham = local_ham.dense
K.assert_allclose(final_ham.matrix, target_ham.matrix, atol=1e-15)
def test_trotter_hamiltonian_scalar_mul(nqubits=3):
"""Test multiplication of Trotter Hamiltonian with scalar."""
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
target_ham = 2 * hamiltonians.TFIM(nqubits, h=1.0)
local_dense = (2 * local_ham).dense
K.assert_allclose(local_dense.matrix, target_ham.matrix)
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
local_dense = (local_ham * 2).dense
K.assert_allclose(local_dense.matrix, target_ham.matrix)
def test_trotter_hamiltonian_scalar_add(nqubits=4):
"""Test addition of Trotter Hamiltonian with scalar."""
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
target_ham = 2 + hamiltonians.TFIM(nqubits, h=1.0)
local_dense = (2 + local_ham).dense
K.assert_allclose(local_dense.matrix, target_ham.matrix)
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
local_dense = (local_ham + 2).dense
K.assert_allclose(local_dense.matrix, target_ham.matrix)
def test_trotter_hamiltonian_scalar_sub(nqubits=3):
"""Test subtraction of Trotter Hamiltonian with scalar."""
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
target_ham = 2 - hamiltonians.TFIM(nqubits, h=1.0)
local_dense = (2 - local_ham).dense
K.assert_allclose(local_dense.matrix, target_ham.matrix)
target_ham = hamiltonians.TFIM(nqubits, h=1.0) - 2
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
local_dense = (local_ham - 2).dense
K.assert_allclose(local_dense.matrix, target_ham.matrix)
def test_trotter_hamiltonian_operator_add_and_sub(nqubits=3):
"""Test addition and subtraction between Trotter Hamiltonians."""
local_ham1 = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
local_ham2 = hamiltonians.TFIM(nqubits, h=0.5, dense=False)
local_ham = local_ham1 + local_ham2
target_ham = (hamiltonians.TFIM(nqubits, h=1.0) +
hamiltonians.TFIM(nqubits, h=0.5))
dense = local_ham.dense
K.assert_allclose(dense.matrix, target_ham.matrix)
local_ham = local_ham1 - local_ham2
target_ham = (hamiltonians.TFIM(nqubits, h=1.0) -
hamiltonians.TFIM(nqubits, h=0.5))
dense = local_ham.dense
K.assert_allclose(dense.matrix, target_ham.matrix)
@pytest.mark.parametrize("nqubits,normalize", [(3, False), (4, False)])
def test_trotter_hamiltonian_matmul(nqubits, normalize):
"""Test Trotter Hamiltonian expectation value."""
local_ham = hamiltonians.TFIM(nqubits, h=1.0, dense=False)
dense_ham = hamiltonians.TFIM(nqubits, h=1.0)
state = K.cast(random_complex((2 ** nqubits,)))
trotter_ev = local_ham.expectation(state, normalize)
target_ev = dense_ham.expectation(state, normalize)
K.assert_allclose(trotter_ev, target_ev)
state = random_complex((2 ** nqubits,))
trotter_ev = local_ham.expectation(state, normalize)
target_ev = dense_ham.expectation(state, normalize)
K.assert_allclose(trotter_ev, target_ev)
from qibo.core.states import VectorState
state = VectorState.from_tensor(state)
trotter_matmul = local_ham @ state
target_matmul = dense_ham @ state
K.assert_allclose(trotter_matmul, target_matmul)
def test_trotter_hamiltonian_three_qubit_term(backend):
"""Test creating ``TrotterHamiltonian`` with three qubit term."""
from scipy.linalg import expm
from qibo.core.terms import HamiltonianTerm
m1 = random_hermitian(3)
m2 = random_hermitian(2)
m3 = random_hermitian(1)
terms = [HamiltonianTerm(m1, 0, 1, 2), HamiltonianTerm(m2, 2, 3),
HamiltonianTerm(m3, 1)]
ham = hamiltonians.SymbolicHamiltonian()
ham.terms = terms
# Test that the `TrotterHamiltonian` dense matrix is correct
eye = np.eye(2, dtype=m1.dtype)
mm1 = np.kron(m1, eye)
mm2 = np.kron(np.kron(eye, eye), m2)
mm3 = np.kron(np.kron(eye, m3), np.kron(eye, eye))
target_ham = hamiltonians.Hamiltonian(4, mm1 + mm2 + mm3)
K.assert_allclose(ham.matrix, target_ham.matrix)
dt = 1e-2
initial_state = random_state(4)
if K.op is not None:
with pytest.raises(NotImplementedError):
circuit = ham.circuit(dt=dt)
else:
circuit = ham.circuit(dt=dt)
final_state = circuit(np.copy(initial_state))
u = [expm(-0.5j * dt * (mm1 + mm3)), expm(-0.5j * dt * mm2)]
target_state = u[1].dot(u[0].dot(initial_state))
target_state = u[0].dot(u[1].dot(target_state))
K.assert_allclose(final_state, target_state)
def test_old_trotter_hamiltonian_errors():
"""Check errors when creating the deprecated ``TrotterHamiltonian`` object."""
with pytest.raises(NotImplementedError):
h = hamiltonians.TrotterHamiltonian()
with pytest.raises(NotImplementedError):
h = hamiltonians.TrotterHamiltonian.from_symbolic(0, 1)
| 40.333333
| 82
| 0.7129
| 769
| 5,566
| 4.972692
| 0.159948
| 0.046025
| 0.108264
| 0.112971
| 0.547071
| 0.489801
| 0.482479
| 0.43227
| 0.423117
| 0.412395
| 0
| 0.023326
| 0.168164
| 5,566
| 137
| 83
| 40.627737
| 0.802592
| 0.106899
| 0
| 0.3
| 0
| 0
| 0.008731
| 0
| 0
| 0
| 0
| 0
| 0.14
| 1
| 0.08
| false
| 0
| 0.08
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
031ef69a071c2f61a4e58bcde34b41a5a2f122f6
| 6,186
|
py
|
Python
|
models/utils/layers.py
|
icarus945/Torch_Detection
|
4cb8ca22a7fa2f45c72b60d794ae2a2ed1a35cd8
|
[
"MIT"
] | 3
|
2018-12-23T14:07:39.000Z
|
2019-10-18T03:05:39.000Z
|
models/utils/layers.py
|
icarus945/Torch_Detection
|
4cb8ca22a7fa2f45c72b60d794ae2a2ed1a35cd8
|
[
"MIT"
] | 20
|
2018-11-24T15:59:20.000Z
|
2019-01-30T16:42:25.000Z
|
models/utils/layers.py
|
icarus945/Torch_Detection
|
4cb8ca22a7fa2f45c72b60d794ae2a2ed1a35cd8
|
[
"MIT"
] | 6
|
2018-11-14T13:12:24.000Z
|
2019-01-03T02:40:49.000Z
|
import warnings
import torch.nn as nn
def conv1x1_group(in_planes, out_planes, stride=1, groups=1):
"""
1x1 convolution with group, without bias
- Normal 1x1 convolution when groups == 1
- Grouped 1x1 convolution when groups > 1
"""
return nn.Conv2d(in_channels=in_planes,
out_channels=out_planes,
kernel_size=1,
stride=stride,
groups=groups,
bias=False)
def conv3x3_group(in_planes, out_planes, stride=1, dilation=1, groups=1):
"""
3x3 convolution with padding and group, without bias, in this situation,
padding is same as dilation.
"""
return nn.Conv2d(in_channels=in_planes,
out_channels=out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
groups=groups,
bias=False)
def conv7x7_group(in_planes, out_planes, stride=1, groups=1):
"""
7x7 convolution with padding and group, without bias, as first conv
dilation is set to 1 and padding set to 3.
"""
return nn.Conv2d(in_channels=in_planes,
out_channels=out_planes,
kernel_size=7,
stride=stride,
padding=3,
dilation=1,
groups=groups,
bias=False)
def norm_layer(planes, use_gn=False):
if not use_gn:
return nn.BatchNorm2d(planes)
else:
return nn.GroupNorm(get_group_gn(planes), planes)
class ConvModule(nn.Module):
"""
This class currently does not used in backbone, only use in necks, heads.
TODO: combine the conv layer in backbone with this class
This class support several types of layers:
1. only conv layer
2. conv + bn/gn
3. conv + bn/gn + relu
4. conv + relu
5. bn/gn + relu + conv
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
normalize=None,
use_gn=False,
activation=None,
activate_last=True):
super(ConvModule, self).__init__()
self.with_norm = normalize is not None
self.with_activation = activation is not None
self.with_bias = bias
self.activation = activation
self.activate_last = activate_last
if self.with_norm and self.with_bias:
warnings.warn('ConvModule has norm and bias at the same time')
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.groups = self.conv.groups
if self.with_norm:
norm_channels = out_channels if self.activate_last else in_channels
self.norm = norm_layer(norm_channels, use_gn=use_gn)
if self.with_activation:
assert activation in ['relu', 'relu6'], \
'Only ReLU and ReLU6 are supported'
if self.activation == 'relu':
self.activate = nn.ReLU(inplace=True)
elif self.activation == 'relu6':
self.activate = nn.ReLU6(inplace=True)
def forward(self, x):
if self.activate_last:
x = self.conv(x)
if self.with_norm:
x = self.norm(x)
if self.with_activation:
x = self.activate(x)
else:
if self.with_norm:
x = self.norm(x)
if self.with_activation:
x = self.activate(x)
x = self.conv(x)
return x
def get_group_gn(planes):
"""
get number of groups used by GroupNorm, based on number of channels
"""
dim_per_gp = -1
num_groups = 32
assert dim_per_gp == -1 or num_groups == -1, \
'GroupNorm: can only specify G or C/G'
if dim_per_gp > 0:
assert planes % dim_per_gp == 0
groups = planes // dim_per_gp
else:
assert planes % num_groups == 0
groups = num_groups
return groups
class ShuffleLayer(nn.Module):
def __init__(self, groups):
super(ShuffleLayer, self).__init__()
self.groups = groups
def forward(self, x):
"""
Channel shuffle: [N, C, H, W] -> [N, g, C/g, H, W] ->
[N, C/g, g, H, W] -> [N, C, H, W]
"""
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C / g, H, W).permute(
0, 2, 1, 3, 4).reshape(x.size())
class ChannelSplit(nn.Module):
def __init__(self):
super(ChannelSplit, self).__init__()
def forward(self, x):
half_channel = x.shape[2] // 2
return x[:, :half_channel, ...], x[:, half_channel:, ...]
class SELayer(nn.Module):
"""
Paper: https://arxiv.org/abs/1709.01507
"""
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
batch, channel, _, _ = x.size()
y = self.avg_pool(x).view(batch, channel)
y = self.fc(y).view(batch, channel, 1, 1)
return x * y
| 30.472906
| 79
| 0.534756
| 739
| 6,186
| 4.309878
| 0.196211
| 0.02763
| 0.021978
| 0.022606
| 0.261852
| 0.159184
| 0.152276
| 0.117425
| 0.117425
| 0.094819
| 0
| 0.020502
| 0.369221
| 6,186
| 202
| 80
| 30.623762
| 0.795746
| 0.132881
| 0
| 0.296296
| 0
| 0
| 0.025394
| 0
| 0
| 0
| 0
| 0.004951
| 0.02963
| 1
| 0.096296
| false
| 0
| 0.014815
| 0
| 0.214815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
031fd2613a36677be2ac3e5ad262f6fafd120692
| 1,073
|
py
|
Python
|
src/app.py
|
kangheeyong/PROJECT-datahub-api-server
|
1593603f0fbb6a2a027a677494b25584ebc91573
|
[
"MIT"
] | null | null | null |
src/app.py
|
kangheeyong/PROJECT-datahub-api-server
|
1593603f0fbb6a2a027a677494b25584ebc91573
|
[
"MIT"
] | null | null | null |
src/app.py
|
kangheeyong/PROJECT-datahub-api-server
|
1593603f0fbb6a2a027a677494b25584ebc91573
|
[
"MIT"
] | null | null | null |
from sanic import Sanic
from sanic.response import json
from sanic_openapi import doc, swagger_blueprint
from util import authorized
app = Sanic(__name__)
app.config["API_TITLE"] = "My-DataHub-OpenAPI"
app.config["API_VERSION"] = "0.1.0"
app.config["API_DESCRIPTION"] = "An example Swagger from Sanic-OpenAPI"
app.config["API_CONTACT_EMAIL"] = "cagojeiger@naver.com"
app.config["API_TERMS_OF_SERVICE"] = "https://github.com/kangheeyong/PROJECT-datahub-api-server.git"
app.config["API_LICENSE_NAME"] = "MIT LICENSE"
app.blueprint(swagger_blueprint)
class Test_status:
status = doc.String()
@app.route('/test')
@doc.tag('test')
@doc.summary('test koken')
@doc.description('This is a test route with detail description.')
@doc.consumes(doc.String(name='token'), location='header', required=True)
@doc.response(200, Test_status, description='한글도 되나?')
@doc.response(403, Test_status, description='123aaa')
@authorized(token='12')
async def test(request):
return json({'status': 'success'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8070)
| 26.825
| 100
| 0.737185
| 155
| 1,073
| 4.922581
| 0.490323
| 0.070773
| 0.094364
| 0.049803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022822
| 0.101584
| 1,073
| 39
| 101
| 27.512821
| 0.768672
| 0
| 0
| 0
| 0
| 0
| 0.334579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.269231
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0323bfe0266dc12dfb65c5926feae4f31ef1770b
| 1,323
|
py
|
Python
|
bouser_db/service.py
|
MarsStirner/bouser.db
|
86c0cb8991b96b908af0dec7843e8ffbd0f18ae8
|
[
"0BSD"
] | null | null | null |
bouser_db/service.py
|
MarsStirner/bouser.db
|
86c0cb8991b96b908af0dec7843e8ffbd0f18ae8
|
[
"0BSD"
] | null | null | null |
bouser_db/service.py
|
MarsStirner/bouser.db
|
86c0cb8991b96b908af0dec7843e8ffbd0f18ae8
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
import contextlib
import sqlalchemy
import sqlalchemy.orm
from twisted.application.service import Service
from zope.interface.declarations import implementer
from bouser.helpers.plugin_helpers import Dependency, BouserPlugin
from .interfaces import IDataBaseService
__author__ = 'mmalkov'
@implementer(IDataBaseService)
class DataBaseService(Service, BouserPlugin):
signal_name = 'bouser.db'
root = Dependency('bouser')
def __init__(self, config):
self.url = config['url']
self.db = None
self.session = None
def startService(self):
Service.startService(self)
self.db = sqlalchemy.create_engine(self.url, pool_recycle=3600)
self.session = sqlalchemy.orm.sessionmaker(bind=self.db)
def stopService(self):
Service.startService(self)
self.db = self.session = None
def get_session(self):
return self.Session()
@contextlib.contextmanager
def context_session(self, read_only=False):
session = self.session()
try:
yield session
except:
session.rollback()
raise
else:
if read_only:
session.rollback()
else:
session.commit()
finally:
session.close()
| 24.962264
| 71
| 0.642479
| 136
| 1,323
| 6.132353
| 0.463235
| 0.065947
| 0.035971
| 0.043165
| 0.079137
| 0.079137
| 0
| 0
| 0
| 0
| 0
| 0.005165
| 0.26833
| 1,323
| 52
| 72
| 25.442308
| 0.856405
| 0.015873
| 0
| 0.15
| 0
| 0
| 0.019246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.175
| 0.025
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03245e36530e8be45e6254760cac36f4e2d93c2b
| 581
|
py
|
Python
|
wavutils/wav_connect.py
|
makobouzu/rnnoise
|
3a3b854722cdc511860744e11bdbba22d63ed2b5
|
[
"BSD-3-Clause"
] | null | null | null |
wavutils/wav_connect.py
|
makobouzu/rnnoise
|
3a3b854722cdc511860744e11bdbba22d63ed2b5
|
[
"BSD-3-Clause"
] | null | null | null |
wavutils/wav_connect.py
|
makobouzu/rnnoise
|
3a3b854722cdc511860744e11bdbba22d63ed2b5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from pydub import AudioSegment
import sys
import glob
if __name__ == "__main__":
args = sys.argv
folder = glob.glob(args[1] + "/*.wav")
initial = False
for file in folder:
soundfile = AudioSegment.from_file(file, "wav")
if initial == False:
soundfile.export(args[2], format = "wav")
initial = True
else:
outfile = AudioSegment.from_file(args[2], "wav")
sound = outfile + soundfile
sound.export(args[2], format="wav")
print("connect " + file)
| 26.409091
| 60
| 0.562823
| 66
| 581
| 4.80303
| 0.484848
| 0.047319
| 0.126183
| 0.107256
| 0.126183
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012407
| 0.306368
| 581
| 21
| 61
| 27.666667
| 0.774194
| 0.036145
| 0
| 0
| 0
| 0
| 0.060932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0326b6925c35f8d5ffa44459a0582e31350f4aa2
| 6,326
|
py
|
Python
|
attributes_data/nearestNeighborTest.py
|
aeksco/FRMA-Ontology
|
1eab8731bdad49328dddfbf52468d2a107d3f247
|
[
"MIT"
] | null | null | null |
attributes_data/nearestNeighborTest.py
|
aeksco/FRMA-Ontology
|
1eab8731bdad49328dddfbf52468d2a107d3f247
|
[
"MIT"
] | 1
|
2018-11-03T00:10:01.000Z
|
2018-11-03T00:10:01.000Z
|
attributes_data/nearestNeighborTest.py
|
aeksco/FRMA-Ontology
|
1eab8731bdad49328dddfbf52468d2a107d3f247
|
[
"MIT"
] | 2
|
2018-11-03T00:02:29.000Z
|
2018-11-03T00:11:53.000Z
|
"""
This is a test that we're using to gather example data from our two example models.
This is passed a list of image names, image numbers, and the vector representing the face in the photo,
and this script takes that and a split of testing vs training data to determine how accurate the model was by simply
checking which labeled vector (from the train data) the test data is closest to, and returning whether that was right or not.
python nearestNeighborTest.py ../../../../Downloads/facenet_lfw_vector.txt training_split.txt testing_split.txt facenet_results.txt
python nearestNeighborTest.py dlib_output_vectors.txt training_split.txt testing_split.txt dlib_results.txt
"""
import sys
import numpy as np
def load_split_file(filename):
# this loads the split files, reads them, closes them, and returns the data
f = open(filename, "r")
data = f.readlines()
data = [(line.split("\t")[0], int(line.split("\t")[1])) for line in data]
f.close()
return data
def nearest_neighbor(vector, neighbors):
# neighbors is a list of (name, number, vector)s
# requires at least one neighbor
# this could be done much, much more efficiently
closest = neighbors[0]
# print(neighbors[0])
closestDistance = np.linalg.norm(vector - neighbors[0][2])
for neighbor in neighbors:
distance = np.linalg.norm(vector - neighbor[2])
if distance < closestDistance:
closestDistance = distance
closest = neighbor
return closest, closestDistance
def main(args):
results_file = open(args[1], "r") # this contains the vectors describing all of the photos
output_filename = args[4]
# then go load all of the files
all_vector_dict = {}
all_results = []
lines = results_file.readlines()
lines = [line.split(" - ") for line in lines]
for result in lines:
words = result[0].split("_")
words[-1] = words[-1].split(".")[0] # remove the file type from the number
number = int(words[-1])
name = "_".join(words[:-1]) # the rest of the underscore separated things before the number
vector = np.array([float(x) for x in result[1].replace("[", "").replace("]", "").split(", ")])
r = (name, number, vector)
all_results += [r]
if (name, number) not in all_vector_dict:
all_vector_dict[(name, number)] = []
all_vector_dict[(name, number)] += [r] # add it to the list of vectors under that name and number because some photos have multiple faces :P
results_file.close()
vector_length = len(all_results[0][2])
# we assume that at least one of the first two is correct otherwise we'll just fail I guess...
if len(all_results[1][2]) != vector_length:
print("ERROR: unknown vector length " + str(vector_length) + " != " + str(len(all_results[1][2])))
sys.exit(1)
# now we have the vectors. Now lets load the split
training_names = load_split_file(args[2])
testing_names = load_split_file(args[3])
# now find all of the labeled images so we can loop over them all
labeled_data = []
for label in training_names:
# add the vector to our list of labeled data:
if label not in all_vector_dict:
# then we just add a zero vector to it with that name and number
labeled_data += [(label[0], label[1], np.zeros(vector_length))]
print("Created zeros vector for " + str(label))
else:
for face_in_photo in all_vector_dict[label]:
labeled_data += [face_in_photo]
print("amount of labeled data: " + str(len(labeled_data)))
# then go test it!
# the output is a list of (name, number, is_result_less_than_.6, nearest_name, nearest_number, is_same_person_bool)
# which we then output into a text file split by tabs probably.
output_file = open(output_filename, "w")
# write everything here!
# if you uncomment this line then it'll generate the results for ALL images not just the testing data.
# testing_names += training_names
# results = []
# I also save everything to here just in case Matt wants to just edit this code instead of loading the file I guess?
# there are a couple lines inside the for loop which have to be uncommented to use the results array
total = 0
correct = 0
for testing_name in testing_names:
# this is a name and number tuple
# first create a default fake thing if we weren't able to find a face in that photo
testing_vector = [(testing_name[0], testing_name[1], np.zeros(vector_length))]
if testing_name in all_vector_dict:
# print("Found testing vector for " + str(testing_name))
testing_vector = all_vector_dict[testing_name] # a list of all the photos in the picture with all their faces
# [(name, number, vector), (name, number, vector)]
nearest = None
nearest_distance = -1
for face_vector in testing_vector:
# print("HERE", testing_vector, face_vector)
nearest_face, nearest_face_distance = nearest_neighbor(face_vector[2], labeled_data)
if nearest_face_distance < nearest_distance or nearest_distance == -1:
# then it's closer, so choose that one
nearest_distance = nearest_face_distance
nearest = nearest_face
# nearest is (name, number, vector)
r = (testing_name[0], testing_name[1], nearest_distance < .6, nearest[0], nearest[1], testing_name[0] == nearest[0])
total += 1
correct += testing_name[0] == nearest[0]
# results += [r]
string_r = [str(x) for x in r]
o = "\t".join(string_r) + "\n"
output_file.write(o)
output_file.close()
print("Total:", total, "Correct:", correct)
# if you uncomment things you can now do stuff with results, which is a list of (name, number, is_result_less_than_.6, nearest_name, nearest_number, is_same_person_bool)
# for each result. Currently we only test the testing_files, you can also uncomment the line above the for loop which then means
# we generate results for ALL images including training data (which should always be correct since its nearest neighbor is itself)
# but that may be useful for adding more data to the ontology, we'll figure it up later
if __name__ == "__main__":
"""this loads the attributes file that has the data for all the photos. Pass in the filename of the tab separated file downloaded
from http://vis-www.cs.umass.edu/lfw/ with the list of all people names and the number of images associated with them"""
if len(sys.argv) != 5:
print(
"""Usage: nearestNeighborTest.py results_filename training_filename testing_filename output_filename"""
)
sys.exit(0) # exit out
main(sys.argv)
| 42.743243
| 170
| 0.727948
| 1,017
| 6,326
| 4.39823
| 0.26057
| 0.022356
| 0.023251
| 0.013414
| 0.1167
| 0.064163
| 0.049184
| 0.033982
| 0.033982
| 0.033982
| 0
| 0.009176
| 0.173095
| 6,326
| 148
| 171
| 42.743243
| 0.845919
| 0.472969
| 0
| 0
| 0
| 0
| 0.042662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037975
| false
| 0
| 0.025316
| 0
| 0.088608
| 0.063291
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
032895bcd14a3671ab5030eaa6c072fe010b5493
| 792
|
py
|
Python
|
Miscillanious/random_split_train_val.py
|
b-safwat/multi_action_recognition
|
1a85da64cf236b9fb7c9a58ae75bdd092d05fab8
|
[
"Apache-2.0"
] | 1
|
2019-12-21T17:29:08.000Z
|
2019-12-21T17:29:08.000Z
|
Miscillanious/random_split_train_val.py
|
b-safwat/multi_action_recognition
|
1a85da64cf236b9fb7c9a58ae75bdd092d05fab8
|
[
"Apache-2.0"
] | null | null | null |
Miscillanious/random_split_train_val.py
|
b-safwat/multi_action_recognition
|
1a85da64cf236b9fb7c9a58ae75bdd092d05fab8
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
def save_list_to_file(z_list, z_file):
with open(z_file, 'w') as fw:
fw.writelines(z_list)
def random_split_train_test(train_file, out_train_file, out_test_file, train_percentage=0.8):
with open(train_file) as fr:
lines = fr.readlines()
np.random.shuffle(lines)
train_data, test_data = lines[0:int(train_percentage*len(lines))], lines[int(train_percentage*len(lines)):]
save_list_to_file(train_data, out_train_file)
save_list_to_file(test_data, out_test_file)
random_split_train_test("/home/bassel/data/oa_kinetics/lbls/actions_stack_list.txt",
"/home/bassel/data/oa_kinetics/lbls/action_train_stacks_list.txt",
"/home/bassel/data/oa_kinetics/lbls/action_test_stacks_list.txt")
| 36
| 111
| 0.72096
| 125
| 792
| 4.192
| 0.32
| 0.068702
| 0.057252
| 0.080153
| 0.30916
| 0.209924
| 0.156489
| 0.156489
| 0.156489
| 0
| 0
| 0.004552
| 0.167929
| 792
| 22
| 112
| 36
| 0.790592
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0.229508
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
032a7743edf88f77da5c9d56359f55cc608dcd5a
| 3,596
|
py
|
Python
|
src/thompson/codered.py
|
thepolicylab/COVID-SMSExperiment
|
2eb41a2fea4858b7e794bb7a6af396f66d41f1a6
|
[
"MIT"
] | null | null | null |
src/thompson/codered.py
|
thepolicylab/COVID-SMSExperiment
|
2eb41a2fea4858b7e794bb7a6af396f66d41f1a6
|
[
"MIT"
] | null | null | null |
src/thompson/codered.py
|
thepolicylab/COVID-SMSExperiment
|
2eb41a2fea4858b7e794bb7a6af396f66d41f1a6
|
[
"MIT"
] | null | null | null |
"""
Functions and classes for interacting with the CodeRED data format
"""
from dataclasses import dataclass
from typing import List, Optional, Union
import pandas as pd
from .types import FilenameType
# The required headers for CodeRED
EXCEL_HEADERS = (
"Command",
"CustomKey",
"ContactId",
"First Name",
"Last Name",
"Groups",
"Tags",
"HomePhone",
"WorkPhone",
"CellPhone",
"OtherPhone",
"TextNumber",
"MobileProvider",
"HomeEmail",
"WorkEmail",
"OtherEmail",
"StreetAddress",
"City",
"State",
"Zip",
"Zip4",
"Preferred Language",
)
# The name of the Worksheet to submit to CodeRED
EXCEL_SHEET_NAME = "5. CodeRed"
@dataclass(frozen=True)
class CoderedContact:
"""A representation of a contact ot be sent to CodeRED"""
contact_id: Union[str, int]
first_name: str
last_name: str
# Represents the text message the person will get
groups: str
# Must be exactly 10 characters
text_number: str
# Maybe necessary?
tags: str = "English"
preferred_language: str = "English"
command: Optional[str] = None
custom_key: Optional[str] = None
home_phone: Optional[str] = None
work_phone: Optional[str] = None
cell_phone: Optional[str] = None
other_phone: Optional[str] = None
mobile_provider: Optional[str] = None
home_email: Optional[str] = None
work_email: Optional[str] = None
other_email: Optional[str] = None
street_address: Optional[str] = None
city: Optional[str] = None
state: Optional[str] = None
zip_code: Optional[str] = None
zip_code_plus_four: Optional[str] = None
def to_excel_row(self) -> List[Optional[Union[int, str]]]:
"""
Convert this contact into a row in the appropriate order for Excel output
"""
return [
self.command,
self.custom_key,
self.contact_id,
self.first_name,
self.last_name,
self.groups,
self.tags,
self.home_phone,
self.work_phone,
self.cell_phone,
self.other_phone,
self.text_number,
self.mobile_provider,
self.home_email,
self.work_email,
self.other_email,
self.street_address,
self.city,
self.state,
self.zip_code,
self.zip_code_plus_four,
self.preferred_language,
]
def make_df_from_data(contacts: List[CoderedContact]) -> pd.DataFrame:
"""
Convert a list of contacts to a data frame for easy conversion to Excel
Args:
contacts: The contacts to transform into a data frame
Returns:
The contacts as a data frame
"""
data = [contact.to_excel_row() for contact in contacts]
return pd.DataFrame.from_records(data, columns=EXCEL_HEADERS)
def make_excel_file(
filename: FilenameType, contacts: List[CoderedContact], drop_message_0: bool = True
):
"""
Turn a list of contacts into an Excel file stored at `filename`.
Args:
filename: The location of the Excel file to create
contacts: The contacts to transform into an Excel file
drop_message_0: If True, remove those people assigned to message_0
(i.e., the control) from the output
"""
df = make_df_from_data(contacts)
if drop_message_0:
df = df[df["Groups"] != "message_0"]
with pd.ExcelWriter(filename) as writer:
df.to_excel(writer, index=False, sheet_name=EXCEL_SHEET_NAME)
| 26.057971
| 87
| 0.629588
| 444
| 3,596
| 4.948198
| 0.326577
| 0.075102
| 0.102412
| 0.036413
| 0.071006
| 0.030951
| 0
| 0
| 0
| 0
| 0
| 0.003476
| 0.280033
| 3,596
| 137
| 88
| 26.248175
| 0.845114
| 0.236096
| 0
| 0
| 0
| 0
| 0.08671
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0
| 0.044944
| 0
| 0.359551
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
032c7a1695749c8763dea4e548dc9e5de7308f19
| 2,516
|
py
|
Python
|
ecommercejockey/premier/admin/inlines.py
|
anniethiessen/dieselr-ecommerce
|
9268b72553845a4650cdfe7c88b398db3cf92258
|
[
"MIT"
] | null | null | null |
ecommercejockey/premier/admin/inlines.py
|
anniethiessen/dieselr-ecommerce
|
9268b72553845a4650cdfe7c88b398db3cf92258
|
[
"MIT"
] | 11
|
2020-06-06T00:04:26.000Z
|
2022-03-12T00:57:41.000Z
|
ecommercejockey/premier/admin/inlines.py
|
anniethiessen/ecommerce-jockey
|
9268b72553845a4650cdfe7c88b398db3cf92258
|
[
"MIT"
] | null | null | null |
from imagekit.admin import AdminThumbnail
from django.contrib.admin import TabularInline
from core.admin.forms import LimitedInlineFormSet
from core.admin.utils import (
get_change_view_link,
get_changelist_view_link
)
from ..models import PremierProduct
class PremierManufacturerProductsTabularInline(TabularInline):
model = PremierProduct
fk_name = 'manufacturer'
formset = LimitedInlineFormSet
extra = 0
verbose_name_plural = 'products (top 10)'
all_link_query = 'manufacturer__id__exact'
ordering = (
'premier_part_number',
)
classes = (
'collapse',
)
fields = (
'all_link',
'detail_link',
'premier_part_number',
'vendor_part_number',
'description',
'manufacturer',
'inventory_ab',
'cost_cad',
'primary_image_preview',
'may_be_relevant_flag',
'is_relevant',
'relevancy_warnings',
'relevancy_errors',
'relevancy_exception'
)
readonly_fields = (
'relevancy_warnings',
'relevancy_errors',
'may_be_relevant_flag',
'primary_image_preview',
'all_link',
'detail_link'
)
def get_rel_obj(self, obj):
return getattr(obj, self.fk_name)
def detail_link(self, obj):
if not obj.pk:
return None
return get_change_view_link(obj, 'Details')
detail_link.short_description = ''
def all_link(self, obj):
if not obj:
return None
query = f'{self.all_link_query}={getattr(self.get_rel_obj(obj), "pk")}'
return get_changelist_view_link(obj._meta.model, 'See All', query)
all_link.short_description = ''
primary_image_preview = AdminThumbnail(
image_field='primary_image_thumbnail'
)
primary_image_preview.short_description = 'primary image'
def may_be_relevant_flag(self, obj):
if obj.is_relevant != obj.may_be_relevant:
return '~'
else:
return ''
may_be_relevant_flag.short_description = ''
def get_queryset(self, request):
return super().get_queryset(request).filter(
is_relevant=True
).with_admin_data()
def get_readonly_fields(self, request, obj=None):
readonly_fields = super().get_readonly_fields(request, obj)
if not request.user.is_superuser:
readonly_fields += (
'premier_part_number',
)
return readonly_fields
| 26.208333
| 79
| 0.635135
| 272
| 2,516
| 5.518382
| 0.334559
| 0.027981
| 0.043304
| 0.045303
| 0.025316
| 0.025316
| 0
| 0
| 0
| 0
| 0
| 0.001641
| 0.27345
| 2,516
| 95
| 80
| 26.484211
| 0.819475
| 0
| 0
| 0.189873
| 0
| 0
| 0.20151
| 0.056041
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075949
| false
| 0
| 0.063291
| 0.025316
| 0.405063
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
032de55c5e8bb65a3b5acc0d233be5fff6131de6
| 4,191
|
py
|
Python
|
dictionary_service.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 7
|
2015-01-23T17:24:04.000Z
|
2022-01-12T16:54:24.000Z
|
dictionary_service.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 18
|
2017-12-09T01:11:23.000Z
|
2021-09-22T13:26:24.000Z
|
dictionary_service.py
|
radomd92/botjagwar
|
1dc96600c40041057a9f9afde38c31ca34b8db38
|
[
"MIT"
] | 1
|
2015-06-22T02:17:55.000Z
|
2015-06-22T02:17:55.000Z
|
#!/usr/bin/python3
import argparse
import logging as log
from aiohttp import web
from api.databasemanager import DictionaryDatabaseManager
from api.dictionary import \
entry, \
definition, \
translation, \
configuration
from api.dictionary import \
get_dictionary, \
get_dictionary_xml, \
get_language_list, \
download_dictionary, \
get_inferred_multilingual_dictionary
from api.dictionary.middlewares import \
json_error_handler, \
auto_committer
parser = argparse.ArgumentParser(description='Dictionary service')
parser.add_argument(
'-d',
'--db-file',
dest='STORAGE',
required=False,
default='default')
parser.add_argument('-p', '--port', dest='PORT', type=int, default=8001)
parser.add_argument(
'-l',
'--log-file',
dest='LOG_FILE',
type=str,
default='/opt/botjagwar/user_data/dictionary_service.log')
parser.add_argument('--host', dest='HOST', type=str, default='0.0.0.0')
parser.add_argument('--log-level', dest='LOG_LEVEL', type=str, default='debug')
parser.add_argument('--autocommit', dest='autocommit', type=bool, default=True)
parser.add_argument(
'--commit-every',
dest='commit_every',
type=int,
default=100)
args = parser.parse_args()
WORD_STORAGE = args.STORAGE
HOST = args.HOST
PORT = args.PORT
LOG = args.LOG_FILE
try:
LOG_LEVEL = log._nameToLevel[args.LOG_LEVEL.upper()]
except KeyError:
LOG_LEVEL = 10
log.basicConfig(filename=LOG, level=log.DEBUG)
dictionary_db_manager = DictionaryDatabaseManager(
database_file=WORD_STORAGE, db_header='')
routes = web.RouteTableDef()
app = web.Application(middlewares=[
json_error_handler,
auto_committer,
])
app['database'] = dictionary_db_manager
app['session_instance'] = dictionary_db_manager.session
app['autocommit'] = args.autocommit
app['commit_every'] = args.commit_every
app['commit_count'] = 0
app.router.add_route('GET', '/languages/list', get_language_list)
app.router.add_route('GET', '/languages/list/download', download_dictionary)
app.router.add_route(
'GET',
'/definition/{definition_id}',
definition.get_definition)
app.router.add_route(
'GET',
'/definition_words/{definition_id}',
definition.get_definition_with_words)
app.router.add_route(
'PUT',
'/definition/{definition_id}/edit',
definition.edit_definition)
#app.router.add_route('POST', '/definition/{language}/create', definition.create_definition)
app.router.add_route(
'DELETE',
'/definition/{definition_id}/delete',
definition.delete_definition)
app.router.add_route(
'POST',
'/definition/search',
definition.search_definition)
app.router.add_route('GET', '/dictionary/{language}', get_dictionary_xml)
app.router.add_route('GET', '/xml_dictionary/{language}', get_dictionary)
app.router.add_route(
'GET',
'/dictionary/{source}/{bridge}/{target}',
get_inferred_multilingual_dictionary)
app.router.add_route('GET', '/entry/{language}/{word}', entry.get_entry)
app.router.add_route('POST', '/entry/{language}/create', entry.add_entry)
app.router.add_route('POST', '/entry/batch', entry.add_batch)
app.router.add_route('PUT', '/entry/{word_id}/edit', entry.edit_entry)
app.router.add_route('DELETE', '/entry/{word_id}/delete', entry.delete_entry)
app.router.add_route(
'GET',
'/translations/{origin}/{target}/{word}',
translation.get_translation)
app.router.add_route(
'GET',
'/translations/{origin}/{word}',
translation.get_all_translations)
app.router.add_route('GET', '/word/{word_id}', entry.get_word_by_id)
app.router.add_route('GET', '/ping', configuration.pong)
app.router.add_route('POST', '/commit', configuration.do_commit)
app.router.add_route('POST', '/rollback', configuration.do_rollback)
app.router.add_route('PUT', '/configure', configuration.configure_service)
if __name__ == '__main__':
try:
app.router.add_routes(routes)
web.run_app(app, host=HOST, port=PORT, access_log=log)
except Exception as exc:
log.exception(exc)
log.critical("Error occurred while setting up the server")
finally:
app['session_instance'].flush()
app['session_instance'].close()
| 31.044444
| 92
| 0.720353
| 532
| 4,191
| 5.443609
| 0.244361
| 0.074586
| 0.099448
| 0.135014
| 0.28384
| 0.164365
| 0.098757
| 0
| 0
| 0
| 0
| 0.004098
| 0.1267
| 4,191
| 134
| 93
| 31.276119
| 0.787158
| 0.02577
| 0
| 0.169492
| 0
| 0
| 0.22421
| 0.108307
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.059322
| 0
| 0.059322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03308bb8046fd0dc61b44e11e40b5b2918967121
| 3,578
|
py
|
Python
|
classification/libs/utils.py
|
96lives/matrixlstm
|
83332111a459dd3fbca944898fffd935faac8820
|
[
"Apache-2.0"
] | 19
|
2020-08-11T09:18:28.000Z
|
2022-03-10T13:53:13.000Z
|
N-ROD/evrepr/thirdparty/matrixlstm/classification/libs/utils.py
|
Chiaraplizz/home
|
18cc93a795ce132e05b886aa34565a102915b1c6
|
[
"MIT"
] | 4
|
2021-01-04T11:55:50.000Z
|
2021-09-18T14:00:50.000Z
|
N-ROD/evrepr/thirdparty/matrixlstm/classification/libs/utils.py
|
Chiaraplizz/home
|
18cc93a795ce132e05b886aa34565a102915b1c6
|
[
"MIT"
] | 4
|
2020-09-03T07:12:55.000Z
|
2021-08-19T11:37:55.000Z
|
import torch
import numpy as np
import re
import itertools
from textwrap import wrap
import matplotlib.pyplot as plt
def padding_mask(lengths, batch_size, time_size=None):
"""
Computes a [batch_size, time_size] binary mask which selects all and only the
non padded values in the input tensor
:param torch.tensor lengths: a [batch_size] tensor containing the actual length
(before padding) of every sample in the batch
:param int batch_size: the number of samples in the batch
:param int time_size: the length of the padded sequences
:retype: torch.tensors
"""
max_len = torch.max(lengths) if time_size is None else time_size
mask = torch.arange(max_len, device=lengths.device, dtype=lengths.dtype)
mask = mask.expand(batch_size, max_len) < lengths.unsqueeze(1)
return mask.type(torch.uint8)
def cat_arange(counts, dtype=torch.int32):
"""
Concatenate results of multiple arange calls
E.g.: cat_arange([2,1,3]) = [0, 1, 0, 0, 1, 2]
Credits: https://stackoverflow.com/a/20033438
:param torch.tensor counts: a 1D tensor
:return: equivalent to torch.cat([torch.arange(c) for c in counts])
"""
counts1 = counts[:-1].type(dtype)
reset_index = torch.cumsum(counts1, dim=0).type(torch.int64)
incr = torch.ones(counts.sum(), dtype=dtype, device=counts.device)
incr[0] = 0
incr[reset_index] = 1 - counts1
# Reuse the incr array for the final result.
return torch.cumsum(incr, dim=0)
def repeat_arange(counts, dtype=torch.int32):
"""
Repeat each element of arange multiple times
E.g.: repeat_arange([2,1,3]) = [0, 0, 1, 2, 2, 2]
:param counts: a 1D tensor having the same length of 'tensor'
:return: equivalent to torch.cat([torch.tensor([v]).expand(n) for v, n in enumerate(counts)])
"""
incr = torch.zeros(counts.sum(), dtype=dtype, device=counts.device)
set_index = torch.cumsum(counts[:-1], dim=0).type(torch.int64)
incr[set_index] = 1
return torch.cumsum(incr, dim=0)
def select_padded(source, mask):
lengths = mask.sum(-1)
max_length = lengths.max()
batch_size, time_size, feature_size = source.shape
out_tensor = source.new_zeros([batch_size, max_length, feature_size])
batch_idx = repeat_arange(lengths, torch.int64)
time_idx = cat_arange(lengths, torch.int64)
out_tensor[batch_idx, time_idx] = source[mask]
return out_tensor
def confusion_matrix_fig(cm, labels, normalize=False):
if normalize:
cm = cm.astype('float') * 10 / cm.sum(axis=1)[:, np.newaxis]
cm = np.nan_to_num(cm, copy=True)
cm = cm.astype('int')
fig = plt.figure(figsize=(7, 7), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(cm, cmap='Oranges')
classes = ['\n'.join(wrap(l, 40)) for l in labels]
tick_marks = np.arange(len(classes))
ax.set_xlabel('Predicted', fontsize=7)
ax.set_xticks(tick_marks)
c = ax.set_xticklabels(classes, fontsize=4, rotation=-90, ha='center')
ax.xaxis.set_label_position('bottom')
ax.xaxis.tick_bottom()
ax.set_ylabel('True Label', fontsize=7)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, fontsize=4, va='center')
ax.yaxis.set_label_position('left')
ax.yaxis.tick_left()
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], 'd') if cm[i, j] != 0 else '.',
horizontalalignment="center", fontsize=6,
verticalalignment='center', color="black")
return fig
| 31.946429
| 97
| 0.671325
| 545
| 3,578
| 4.297248
| 0.33945
| 0.0269
| 0.016652
| 0.021776
| 0.152861
| 0.105892
| 0.087105
| 0
| 0
| 0
| 0
| 0.026435
| 0.196478
| 3,578
| 111
| 98
| 32.234234
| 0.788174
| 0.259083
| 0
| 0.035088
| 0
| 0
| 0.030944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.105263
| 0
| 0.280702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03315e91b0c1b2d1e2f30824173b23c77f6b76f7
| 1,642
|
py
|
Python
|
faq_module/commands/faq_on_message.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | 1
|
2021-12-12T02:50:20.000Z
|
2021-12-12T02:50:20.000Z
|
faq_module/commands/faq_on_message.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | 17
|
2020-02-07T23:40:36.000Z
|
2020-12-22T16:38:44.000Z
|
faq_module/commands/faq_on_message.py
|
alentoghostflame/StupidAlentoBot
|
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
|
[
"MIT"
] | null | null | null |
from faq_module.storage import FAQManager # , FAQConfig, FAQData
# from faq_module.commands import text
# from discord.ext import commands
# import faq_module.text
# import logging
import discord
# import typing
import re
async def faq_on_message(faq_manager: FAQManager, message: discord.Message):
embed = discord.Embed(title="Info Requested", color=0x00ffff)
found_keys = set()
faq_on_recursive(faq_manager, message.content, embed, found_keys, message.guild.id)
if embed.fields or embed.image:
await message.channel.send(embed=embed)
def faq_on_recursive(faq_manager: FAQManager, message_content: str, embed: discord.Embed, found_keys: set, guild_id: int):
for keyword in get_keywords(message_content):
if keyword.lower() in faq_manager.get_keywords(guild_id):
found_keys.add(keyword)
faq_data = faq_manager.get(guild_id, keyword)
if not just_keywords(faq_data.phrase):
# \u200b is a zero width space.
embed.add_field(name=keyword, value=faq_data.phrase + "\u200b")
if faq_data.image_url:
embed.set_image(url=faq_data.image_url)
faq_on_recursive(faq_manager, faq_data.phrase, embed, found_keys, guild_id)
def get_keywords(input_string: str) -> list:
comp = re.compile("{(.+?)}")
return comp.findall(input_string)
def just_keywords(input_string: str) -> bool:
comp = re.compile("({.+?})")
keywords = comp.findall(input_string)
if keywords and len("".join(keywords)) == len("".join(input_string.split())):
return True
else:
return False
| 34.208333
| 122
| 0.685749
| 221
| 1,642
| 4.877828
| 0.352941
| 0.055659
| 0.038961
| 0.04731
| 0.06679
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006907
| 0.206456
| 1,642
| 47
| 123
| 34.93617
| 0.820414
| 0.10475
| 0
| 0
| 0
| 0
| 0.023256
| 0
| 0
| 0
| 0.005472
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
03337a286707757f1d435b3aaef9ca40f548193a
| 2,298
|
py
|
Python
|
tests/devices_test.py
|
jebabi/controllerx
|
bc68cdd69e416880e6394b3ecf92522b3871e959
|
[
"MIT"
] | null | null | null |
tests/devices_test.py
|
jebabi/controllerx
|
bc68cdd69e416880e6394b3ecf92522b3871e959
|
[
"MIT"
] | null | null | null |
tests/devices_test.py
|
jebabi/controllerx
|
bc68cdd69e416880e6394b3ecf92522b3871e959
|
[
"MIT"
] | null | null | null |
from tests.utils import hass_mock, get_instances
import devices as devices_module
from core import Controller
from core import type as type_module
def _import_modules(file_dir, package):
pkg_dir = os.path.dirname(file_dir)
for (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]):
if ispkg:
_import_modules(pkg_dir + "/" + name + "/__init__.py", package + "." + name)
else:
importlib.import_module("." + name, package)
def _all_subclasses(cls):
return list(
set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in _all_subclasses(c)]
)
)
def get_devices():
_import_modules(devices_module.__file__, devices_module.__package__)
subclasses = _all_subclasses(Controller)
devices = [cls_() for cls_ in subclasses if len(cls_.__subclasses__()) == 0]
return devices
def check_mapping(mapping, all_possible_actions, device):
if mapping is None:
return
for k, v in mapping.items():
if type(v) != str:
raise ValueError(
"The value from the mapping should be a string, matching "
+ "one of the actions from the controller. "
+ f"The possible actions are: {all_possible_actions}. "
+ f"Device class: {device.__class__.__name__}"
)
if v not in all_possible_actions:
raise ValueError(
f"{v} not found in the list of possible action from the controller. "
+ f"The possible actions are: {all_possible_actions}"
)
def test_devices(hass_mock):
devices = get_instances(
devices_module.__file__, devices_module.__package__, Controller
)
for device in devices:
type_actions_mapping = device.get_type_actions_mapping()
if type_actions_mapping is None:
continue
possible_actions = list(type_actions_mapping.keys())
mappings = device.get_z2m_actions_mapping()
check_mapping(mappings, possible_actions, device)
mappings = device.get_deconz_actions_mapping()
check_mapping(mappings, possible_actions, device)
mappings = device.get_zha_actions_mapping()
check_mapping(mappings, possible_actions, device)
| 35.353846
| 88
| 0.656223
| 278
| 2,298
| 5.032374
| 0.291367
| 0.107219
| 0.051465
| 0.055754
| 0.276626
| 0.276626
| 0.223731
| 0.223731
| 0.184417
| 0.184417
| 0
| 0.001176
| 0.260226
| 2,298
| 64
| 89
| 35.90625
| 0.821765
| 0
| 0
| 0.09434
| 0
| 0
| 0.137511
| 0.031332
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0
| 0.150943
| 0.018868
| 0.301887
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0337794490e59afb0d50e7e70f8ff18f29c9d996
| 1,912
|
py
|
Python
|
Data Manipulation with pandas/Transforming-Data.py
|
shreejitverma/Data-Scientist
|
03c06936e957f93182bb18362b01383e5775ffb1
|
[
"MIT"
] | 2
|
2022-03-12T04:53:03.000Z
|
2022-03-27T12:39:21.000Z
|
Data Manipulation with pandas/Transforming-Data.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | null | null | null |
Data Manipulation with pandas/Transforming-Data.py
|
shivaniverma1/Data-Scientist
|
f82939a411484311171465591455880c8e354750
|
[
"MIT"
] | 2
|
2022-03-12T04:52:21.000Z
|
2022-03-27T12:45:32.000Z
|
# Import pandas using the alias pd
import pandas as pd
# Print the head of the homelessness data
print(homelessness.head())
# Print the values of homelessness
print(homelessness.values)
# Print the column index of homelessness
print(homelessness.columns)
# Print the row index of homelessness
print(homelessness.index)
# Sort homelessness by individual
homelessness_ind = homelessness.sort_values('individuals')
# Print the top few rows
print(homelessness_ind.head())
# Select the individuals column
individuals = homelessness['individuals']
# Print the head of the result
print(individuals.head())
# Filter for rows where individuals is greater than 10000
ind_gt_10k = homelessness[homelessness['individuals'] > 10000]
# See the result
print(ind_gt_10k)
# Subset for rows in South Atlantic or Mid-Atlantic regions
south_mid_atlantic = homelessness[(homelessness['region'] == 'South Atlantic') | (
homelessness['region'] == 'Mid-Atlantic')]
# See the result
print(south_mid_atlantic)
# Add total col as sum of individuals and family_members
homelessness['total'] = homelessness['individuals']+homelessness['family_members']
# Add p_individuals col as proportion of individuals
homelessness['p_individuals'] = homelessness['individuals']/homelessness['total']
# See the result
print(homelessness)
# Create indiv_per_10k col as homeless individuals per 10k state pop
homelessness["indiv_per_10k"] = 10000 * \
((homelessness['individuals']) / (homelessness['state_pop']))
# Subset rows for indiv_per_10k greater than 20
high_homelessness = homelessness[homelessness['indiv_per_10k'] > 20]
# Sort high_homelessness by descending indiv_per_10k
high_homelessness_srt = high_homelessness.sort_values(
'indiv_per_10k', ascending=False)
# From high_homelessness_srt, select the state and indiv_per_10k cols
result = high_homelessness_srt[['state', 'indiv_per_10k']]
# See the result
| 26.929577
| 82
| 0.780335
| 252
| 1,912
| 5.753968
| 0.269841
| 0.037241
| 0.06069
| 0.064138
| 0.073103
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024625
| 0.129184
| 1,912
| 70
| 83
| 27.314286
| 0.846246
| 0.421548
| 0
| 0
| 0
| 0
| 0.191136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.043478
| 0
| 0.043478
| 0.391304
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
033779b299ba043f0969f3a671cd13882ea21786
| 1,599
|
py
|
Python
|
models/bittrex.py
|
etherionlab/the_token_fund_asset_parser
|
c0d7346a8df6ca44992d7c852b58a114692865ae
|
[
"MIT"
] | 6
|
2017-06-11T19:24:36.000Z
|
2017-09-21T21:17:15.000Z
|
models/bittrex.py
|
baby636/the-token-fund-asset-parser
|
c0d7346a8df6ca44992d7c852b58a114692865ae
|
[
"MIT"
] | 4
|
2017-07-24T10:57:26.000Z
|
2017-07-30T10:09:42.000Z
|
models/bittrex.py
|
baby636/the-token-fund-asset-parser
|
c0d7346a8df6ca44992d7c852b58a114692865ae
|
[
"MIT"
] | 6
|
2018-08-02T05:57:11.000Z
|
2021-02-09T06:55:22.000Z
|
import aiohttp
from time import time
import json
from hashlib import sha512
import hmac
from .fetcher import Fetcher
class BittrexAPI(Fetcher):
_URL = 'https://bittrex.com/api/v1.1/'
_KEY = None
_SECRET = None
def __init__(self, key, secret):
if key is None or secret is None:
raise EnvironmentError("Bittrex key and secret must be specified in configs")
self._KEY = key
self._SECRET = secret
def _signature(self, query):
message = query
return hmac.new(
key=self._SECRET.encode(),
msg=message.encode(),
digestmod=sha512
).hexdigest().upper()
async def get_balances(self, loop, symbols, callback=None):
async with aiohttp.ClientSession(loop=loop) as session:
nonce = int(time())
endpoint = self._URL + \
'account/getbalances?apikey={}&nonce={}'.format(self._KEY, nonce)
signature = self._signature(endpoint)
headers = {
'apisign': signature
}
_response = await self._fetch(session=session, url=endpoint, headers=headers)
balances = json.loads(_response).get('result', [])
result = []
for balance in balances:
if balance['Currency'] in symbols:
result.append(
(balance['Currency'],
float(balance.get('Balance', 0)))
)
if callback is not None:
callback(result)
return result
| 31.98
| 89
| 0.553471
| 164
| 1,599
| 5.286585
| 0.463415
| 0.024221
| 0.029988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008654
| 0.349594
| 1,599
| 49
| 90
| 32.632653
| 0.825
| 0
| 0
| 0
| 0
| 0
| 0.09631
| 0.023765
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.139535
| 0
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
033938e07676961c14e3b120f246c45a2d3f66be
| 1,051
|
py
|
Python
|
facerecognition.py
|
Srijani-Chakroborty/Face-Recognition-System
|
60b1ef10bd724ddcd5d9e35ec5639ae73917047c
|
[
"MIT"
] | 1
|
2022-02-27T18:34:57.000Z
|
2022-02-27T18:34:57.000Z
|
facerecognition.py
|
Srijani-Chakroborty/Face-Recognition-System
|
60b1ef10bd724ddcd5d9e35ec5639ae73917047c
|
[
"MIT"
] | null | null | null |
facerecognition.py
|
Srijani-Chakroborty/Face-Recognition-System
|
60b1ef10bd724ddcd5d9e35ec5639ae73917047c
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
face_classifier=cv2.CascadeClassifier('HaarCascade/haarcascade_frontalface_default.xml')
def face_extractor(img):
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=face_classifier.detectMultiScale(gray,1.3,5)
if faces is():
return None
for(x,y,w,h) in faces:
cropped_face=img[y:y+h,x:x+w]
return cropped_face
cap=cv2.VideoCapture(0)
count=0
while True:
ret, frame=cap.read()
if face_extractor(frame)is not None:
count+=1
face=cv2.resize(face_extractor(frame),(200,200))
face=cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
file_name_path='faces/user'+str(count)+'.jpg'
cv2.imwrite(file_name_path,face)
cv2.putText(face,str(count),(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
cv2.imshow('Face Cropper',face)
else:
print("Face Not Found")
pass
if cv2.waitKey(1)==13 or count==100:
break
cap.release()
cv2.destroyAllWindows()
print('Collecting samples complete!!!')
| 30.911765
| 89
| 0.656518
| 152
| 1,051
| 4.427632
| 0.506579
| 0.041605
| 0.047548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054282
| 0.211227
| 1,051
| 34
| 90
| 30.911765
| 0.757539
| 0
| 0
| 0
| 0
| 0
| 0.114818
| 0.046124
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0.032258
| 0.064516
| 0
| 0.16129
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|