hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d4703008651b2c0eb21a2abb6541a3476ab1bbb | 507 | py | Python | pyscf/nao/test/test_0029_lil_vs_coo.py | KMCzajkowski/pyscf | e8af41d910cc0d3963655120c0b689590ad978e7 | [
"BSD-2-Clause"
] | null | null | null | pyscf/nao/test/test_0029_lil_vs_coo.py | KMCzajkowski/pyscf | e8af41d910cc0d3963655120c0b689590ad978e7 | [
"BSD-2-Clause"
] | null | null | null | pyscf/nao/test/test_0029_lil_vs_coo.py | KMCzajkowski/pyscf | e8af41d910cc0d3963655120c0b689590ad978e7 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function, division
import os,unittest
from pyscf.nao import system_vars_c
sv = system_vars_c().init_siesta_xml(label='water', cd=os.path.dirname(os.path.abspath(__file__)))
class KnowValues(unittest.TestCase):
def test_lil_vs_coo(self):
""" Init system variables on libnao's site """
lil = sv.overlap_lil().tocsr()
coo = sv.overlap_coo().tocsr()
derr = abs(coo-lil).sum()/coo.nnz
self.assertLess(derr, 1e-12)
if __name__ == "__main__": unittest.main()
| 29.823529 | 98 | 0.721893 | 77 | 507 | 4.38961 | 0.649351 | 0.059172 | 0.065089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006849 | 0.136095 | 507 | 16 | 99 | 31.6875 | 0.76484 | 0.074951 | 0 | 0 | 0 | 0 | 0.0282 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d47d930ec9d304acff898d1dd365cf1e72a0250 | 7,588 | py | Python | neurokit2/complexity/fractal_mandelbrot.py | vansjyo/NeuroKit | 238cd3d89467f7922c68a3a4c1f44806a8466922 | [
"MIT"
] | 1 | 2020-05-26T09:46:57.000Z | 2020-05-26T09:46:57.000Z | neurokit2/complexity/fractal_mandelbrot.py | vansjyo/NeuroKit | 238cd3d89467f7922c68a3a4c1f44806a8466922 | [
"MIT"
] | null | null | null | neurokit2/complexity/fractal_mandelbrot.py | vansjyo/NeuroKit | 238cd3d89467f7922c68a3a4c1f44806a8466922 | [
"MIT"
] | 1 | 2020-10-27T06:47:51.000Z | 2020-10-27T06:47:51.000Z | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def fractal_mandelbrot(size=1000, real_range=(-2, 2), imaginary_range=(-2, 2), threshold=4, iterations=25, buddha=False, show=False):
"""Generate a Mandelbrot (or a Buddhabrot) fractal
Vectorized function to efficiently generate an array containing values corresponding to a Mandelbrot fractal.
Parameters
-----------
size : int
The size in pixels (corresponding to the width of the figure).
real_range, imaginary_range : tuple
The mandelbrot set is defined within the -2, 2 complex space (the real being the x-axis and the imaginary the y-axis). Adjusting these ranges can be used to pan, zoom and crop the figure.
iterations : int
Number of iterations.
threshold : int
The threshold used, increasing it will increase the sharpness (not used for buddhabrots).
buddha : bool
Whether to return a buddhabrot.
show : bool
Visualize the fratal.
Examples
---------
>>> import neurokit2 as nk
>>>
>>> # Mandelbrot fractal
>>> nk.fractal_mandelbrot(show=True)
>>>
>>> # Zoom at seahorse valley
>>> nk.fractal_mandelbrot(real_range=(-0.76, -0.74),
imaginary_range=(0.09, 0.11),
iterations=100, show=True)
>>>
>>> # Draw manually
>>> m = nk.fractal_mandelbrot(real_range=(-2, 0.75), imaginary_range=(-1.25, 1.25))
>>> plt.imshow(m.T, cmap="viridis")
>>> plt.axis("off")
>>> plt.show()
>>>
>>> # Buddhabrot
>>> b = nk.fractal_mandelbrot(size=1500,
real_range=(-2, 0.75), imaginary_range=(-1.25, 1.25),
buddha=True, iterations=200)
>>> plt.imshow(b.T, cmap="gray")
>>> plt.axis("off")
>>> plt.show()
>>>
>>> # Mixed
>>> m = nk.fractal_mandelbrot()
>>> b = nk.fractal_mandelbrot(buddha=True, iterations=200)
>>>
>>> mixed = m - b
>>> plt.imshow(mixed.T, cmap="gray")
>>> plt.axis("off")
>>> plt.show()
"""
if buddha is False:
img = _mandelbrot(size=size,
real_range=real_range,
imaginary_range=imaginary_range,
threshold=threshold,
iterations=iterations)
else:
img = _buddhabrot(size=size,
real_range=real_range,
imaginary_range=imaginary_range,
iterations=iterations)
if show is True:
plt.imshow(img, cmap="rainbow")
plt.axis("off")
plt.show()
return img
# =============================================================================
# Internals
# =============================================================================
def _mandelbrot(size=1000, real_range=(-2, 2), imaginary_range=(-2, 2), iterations=25, threshold=4):
img, c = _mandelbrot_initialize(size=size, real_range=real_range, imaginary_range=imaginary_range)
optim = _mandelbrot_optimize(c)
z = np.copy(c)
for i in range(1, iterations+1):
# Continue only where smaller than threshold
mask = (z*z.conjugate()).real < threshold
mask = np.logical_and(mask, optim)
if np.all(mask == False) is True:
break
# Increase
img[mask] += 1
# Iterate based on Mandelbrot equation
z[mask] = z[mask]**2 + c[mask]
# Fill otpimized area
img[~optim] = np.max(img)
return img
def _mandelbrot_initialize(size=1000, real_range=(-2, 2), imaginary_range=(-2, 2)):
# Image space
width = size
height = _mandelbrot_width2height(width, real_range, imaginary_range)
img = np.full((height, width), 0)
# Complex space
real = np.array([np.linspace(*real_range, width), ] * height)
imaginary = np.array([np.linspace(*imaginary_range, height), ] * width).T
c = 1j*imaginary
c += real
return img, c
# =============================================================================
# Buddhabrot
# =============================================================================
def _buddhabrot(size=1000, iterations=100, real_range=(-2, 2), imaginary_range=(-2, 2)):
# Find original width and height (postdoc enforcing so that is has the same size than mandelbrot)
width = size
height = _mandelbrot_width2height(width, real_range, imaginary_range)
# Inflate size to match -2, 2
x = np.array((np.array(real_range) + 2) / 4 * size, int)
size = np.int(size * (size / (x[1] - x[0])))
img = np.zeros([size, size], int)
c = _buddhabrot_initialize(size=img.size, iterations=iterations, real_range=real_range, imaginary_range=imaginary_range)
# use these c-points as the initial 'z' points.
z = np.copy(c)
while(len(z) > 0):
# translate z points into image coordinates
x = np.array((z.real + 2) / 4 * size, int)
y = np.array((z.imag + 2) / 4 * size, int)
# add value to all occupied pixels
img[y, x] += 1
# apply mandelbrot dynamic
z = z ** 2 + c
# shed the points that have escaped
mask = np.abs(z) < 2
c = c[mask]
z = z[mask]
# Crop parts not asked for
xrange = np.array((np.array(real_range) + 2) / 4 * size).astype(int)
yrange = np.array((np.array(imaginary_range) + 2) / 4 * size).astype(int)
img = img[yrange[0]:yrange[0] + height, xrange[0]:xrange[0] + width]
return img
def _buddhabrot_initialize(size=1000, iterations=100, real_range=(-2, 2), imaginary_range=(-2, 2), threshold=4):
# Allocate an array to store our non-mset points as we find them.
sets = np.zeros(size, dtype=np.complex128)
sets_found = 0
# create an array of random complex numbers (our 'c' points)
c = np.random.uniform(*real_range, size) + (np.random.uniform(*imaginary_range, size) * 1j)
c = c[_mandelbrot_optimize(c)]
z = np.copy(c)
for i in range(iterations):
# apply mandelbrot dynamic
z = z ** 2 + c
# collect the c points that have escaped
mask = np.abs(z) < 2
new_sets = c[mask == False]
sets[sets_found:sets_found + len(new_sets)] = new_sets
sets_found += len(new_sets)
# then shed those points from our test set before continuing.
c = c[mask]
z = z[mask]
# return only the points that are not in the mset
return sets[:sets_found]
# =============================================================================
# Utils
# =============================================================================
def _mandelbrot_optimize(c):
# Optimizations: most of the mset points lie within the
# within the cardioid or in the period-2 bulb. (The two most
# prominant shapes in the mandelbrot set. We can eliminate these
# from our search straight away and save alot of time.
# see: http://en.wikipedia.org/wiki/Mandelbrot_set#Optimizations
# First eliminate points within the cardioid
p = (((c.real - 0.25)**2) + (c.imag**2)) ** .5
mask1 = c.real > p - (2 * p**2) + 0.25
# Next eliminate points within the period-2 bulb
mask2 = ((c.real + 1)**2) + (c.imag**2) > 0.0625
# Combine masks
mask = np.logical_and(mask1, mask2)
return mask
def _mandelbrot_width2height(size=1000, real_range=(-2, 2), imaginary_range=(-2, 2)):
return int(np.rint((imaginary_range[1] - imaginary_range[0]) / (real_range[1] - real_range[0]) * size))
| 32.289362 | 195 | 0.564444 | 965 | 7,588 | 4.340933 | 0.245596 | 0.055861 | 0.020053 | 0.038434 | 0.275483 | 0.247076 | 0.234423 | 0.22201 | 0.198615 | 0.169492 | 0 | 0.031283 | 0.254349 | 7,588 | 234 | 196 | 32.42735 | 0.709084 | 0.442409 | 0 | 0.285714 | 0 | 0 | 0.002509 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.02381 | 0.011905 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d4bc1894657b5d6284de21f926e7e2c93e7bde2 | 905 | py | Python | setup.py | kurisuD/huunifie | 88f08ac7e39bbaa62648af13f0f16a84bc3558e7 | [
"WTFPL"
] | null | null | null | setup.py | kurisuD/huunifie | 88f08ac7e39bbaa62648af13f0f16a84bc3558e7 | [
"WTFPL"
] | null | null | null | setup.py | kurisuD/huunifie | 88f08ac7e39bbaa62648af13f0f16a84bc3558e7 | [
"WTFPL"
] | 1 | 2019-08-13T03:38:27.000Z | 2019-08-13T03:38:27.000Z | # coding=utf-8
import setuptools
with open("README.md") as fh:
long_description = fh.read()
setuptools.setup(
name="huunifie",
version="0.4.3",
author="KurisuD",
author_email="KurisuD@pypi.darnand.net",
description="""A Hue bridge and Unifi controller client.
Enables/disables specified Hue schedules in the presence/absence of specified wifi devices on the Unifi controller.""",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/KurisuD/huunifie",
packages=setuptools.find_packages(),
install_requires=['pathlib', 'requests', 'pap_logger', 'argparse'],
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"License :: Public Domain",
"Operating System :: OS Independent",
"Topic :: Home Automation"
],
)
| 33.518519 | 120 | 0.677348 | 104 | 905 | 5.798077 | 0.759615 | 0.099502 | 0.063018 | 0.099502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009537 | 0.18895 | 905 | 26 | 121 | 34.807692 | 0.811989 | 0.01326 | 0 | 0 | 0 | 0 | 0.50954 | 0.026936 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d511e09c8b1ba0c199202530cfd7b03e8fdda8a | 3,088 | py | Python | backgrounds/nasaimg.py | martin-helmich/spacicon | 284a389fd75bdc8558ddde9d7fd30bb0f50fc6f3 | [
"MIT"
] | 1 | 2020-10-16T18:49:54.000Z | 2020-10-16T18:49:54.000Z | backgrounds/nasaimg.py | martin-helmich/spacicon | 284a389fd75bdc8558ddde9d7fd30bb0f50fc6f3 | [
"MIT"
] | 3 | 2021-06-08T19:04:04.000Z | 2022-03-11T23:19:09.000Z | backgrounds/nasaimg.py | martin-helmich/spacicon | 284a389fd75bdc8558ddde9d7fd30bb0f50fc6f3 | [
"MIT"
] | null | null | null | import helper.colors
import os
import os.path
import math
from objects import Renderable
from random import Random
from svgwrite import Drawing
from svgwrite.container import Group
from PIL import Image
from typing import Tuple
from io import BytesIO
ASSET_DIR = os.path.realpath(os.path.dirname(__file__) + "/../assets/backgrounds")
class NASAImageBackground(Renderable):
img: str
img_path: str
width: int
height: int
local_paths: bool
prng: Random
def __init__(self,
w: int,
h: int,
img: str,
local_paths: bool = False,
prng: Random = Random()) -> None:
self.width = w
self.height = h
self.prng = prng
self.img = img
self.img_path = os.path.join(ASSET_DIR, img)
self.local_paths = local_paths
def random_configuration(self) -> Tuple[int, int, float, float, float]:
width: int
height: int
with Image.open(self.img_path) as img:
width, height = img.size
max_x_scale = 1.0
max_y_scale = 1.0
min_x_scale = self.width / width
min_y_scale = self.height / height
min_scale = max(min_x_scale, min_y_scale)
max_scale = min(max_x_scale, max_y_scale)
scale = self.prng.uniform(min_scale, max_scale)
min_x_translate = 0
min_y_translate = 0
max_x_translate = (width * scale) - self.width
max_y_translate = (height * scale) - self.height
x_translate = self.prng.uniform(min_x_translate, max_x_translate)
y_translate = self.prng.uniform(min_y_translate, max_y_translate)
return (width, height, scale, x_translate, y_translate)
def render_raster(self):
width, height, scale, x_translate, y_translate = self.random_configuration()
with Image.open(self.img_path) as img:
resized = img.resize((math.ceil(width * scale), math.ceil(height * scale)), Image.HAMMING)
cropped = resized.crop((x_translate, y_translate, x_translate + self.width, y_translate + self.height))
fp = BytesIO()
cropped.save(fp, format="JPEG", quality=80)
return fp.getvalue()
def render(self, dwg: Drawing) -> Group:
width, height, scale, x_translate, y_translate = self.random_configuration()
image_url = "/assets/%s" % self.img
if self.local_paths:
image_url = "file://%s" % os.path.join(ASSET_DIR, self.img)
g = dwg.g()
r = dwg.image(href=self.img)
r["xlink:href"] = image_url
r["width"] = "%dpx" % width
r["height"] = "%dpx" % height
r.scale(scale)
r.translate(- x_translate, - y_translate)
g.add(r)
return g
def random_background(prng: Random, width: int, height: int, local_paths: bool = False) -> NASAImageBackground:
images = [p for p in os.listdir(ASSET_DIR) if p.endswith(".jpg")]
image = prng.choice(images)
return NASAImageBackground(width, height, image, local_paths, prng) | 31.510204 | 115 | 0.621762 | 410 | 3,088 | 4.485366 | 0.234146 | 0.059815 | 0.035889 | 0.065253 | 0.203915 | 0.148994 | 0.148994 | 0.095704 | 0.064165 | 0.064165 | 0 | 0.003567 | 0.27364 | 3,088 | 98 | 116 | 31.510204 | 0.816317 | 0 | 0 | 0.105263 | 0 | 0 | 0.025251 | 0.007122 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065789 | false | 0 | 0.144737 | 0 | 0.355263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d51b04223d5bc159f33d351d258c06dc290160e | 594 | py | Python | src/generator.py | davidkowalk/stonecutter-cycle-generator | cdef07bef51ef0742e7180b8745e0b68ff885051 | [
"MIT"
] | null | null | null | src/generator.py | davidkowalk/stonecutter-cycle-generator | cdef07bef51ef0742e7180b8745e0b68ff885051 | [
"MIT"
] | null | null | null | src/generator.py | davidkowalk/stonecutter-cycle-generator | cdef07bef51ef0742e7180b8745e0b68ff885051 | [
"MIT"
] | null | null | null | def gen_contents(ids: list):
recipes = list()
pairs = list()
json = """
{{
"type": "minecraft:stonecutting",
"ingredient": {{
"item": "{src}"
}},
"result": "{dest}",
"count": 1
}}
"""
for source_item in ids:
for dest_item in ids:
if source_item == dest_item:
continue
recipes.append(json.format(src = source_item, dest = dest_item))
pairs.append((source_item.replace("minecraft:",""), dest_item.replace("minecraft:","")))
return recipes, pairs
| 22 | 100 | 0.506734 | 59 | 594 | 4.949153 | 0.457627 | 0.136986 | 0.061644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002538 | 0.3367 | 594 | 26 | 101 | 22.846154 | 0.738579 | 0 | 0 | 0 | 0 | 0 | 0.324916 | 0.042088 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d52badd52bf3bbb89902eb631c36cf4b905858b | 38,080 | py | Python | test/unit/messages/bloxroute/test_bloxroute_message_factory.py | thabaptiser/bxcommon | ee8547c9fc68c71b8acf4ce0989a344681ea273c | [
"MIT"
] | null | null | null | test/unit/messages/bloxroute/test_bloxroute_message_factory.py | thabaptiser/bxcommon | ee8547c9fc68c71b8acf4ce0989a344681ea273c | [
"MIT"
] | null | null | null | test/unit/messages/bloxroute/test_bloxroute_message_factory.py | thabaptiser/bxcommon | ee8547c9fc68c71b8acf4ce0989a344681ea273c | [
"MIT"
] | null | null | null | import random
import struct
import time
from typing import TypeVar, Type
from datetime import datetime
from bxcommon import constants
from bxcommon.constants import UL_INT_SIZE_IN_BYTES, NETWORK_NUM_LEN, NODE_ID_SIZE_IN_BYTES, BX_HDR_COMMON_OFF, \
BLOCK_ENCRYPTED_FLAG_LEN, TRANSACTION_FLAG_LEN, BROADCAST_TYPE_LEN
from bxcommon.exceptions import PayloadLenError
from bxcommon.messages.bloxroute.abstract_broadcast_message import AbstractBroadcastMessage
from bxcommon.messages.bloxroute.ack_message import AckMessage
from bxcommon.messages.bloxroute.bdn_performance_stats_message import BdnPerformanceStatsMessage, \
BdnPerformanceStatsData
from bxcommon.messages.bloxroute.block_confirmation_message import BlockConfirmationMessage
from bxcommon.messages.bloxroute.block_holding_message import BlockHoldingMessage
from bxcommon.messages.bloxroute.bloxroute_message_factory import bloxroute_message_factory
from bxcommon.messages.bloxroute.bloxroute_version_manager import bloxroute_version_manager
from bxcommon.messages.bloxroute.broadcast_message import BroadcastMessage
from bxcommon.messages.bloxroute.compressed_block_txs_message import CompressedBlockTxsMessage
from bxcommon.messages.bloxroute.get_compressed_block_txs_message import GetCompressedBlockTxsMessage
from bxcommon.messages.bloxroute.get_tx_contents_message import GetTxContentsMessage
from bxcommon.messages.bloxroute.get_txs_message import GetTxsMessage
from bxcommon.messages.bloxroute.hello_message import HelloMessage
from bxcommon.messages.bloxroute.key_message import KeyMessage
from bxcommon.messages.bloxroute.notification_message import NotificationMessage, NotificationCode
from bxcommon.messages.bloxroute.ping_message import PingMessage
from bxcommon.messages.bloxroute.pong_message import PongMessage
from bxcommon.messages.bloxroute.routing_update_message import RoutingUpdateMessage
from bxcommon.messages.bloxroute.transaction_cleanup_message import TransactionCleanupMessage
from bxcommon.messages.bloxroute.tx_contents_message import TxContentsMessage
from bxcommon.messages.bloxroute.tx_message import TxMessage
from bxcommon.messages.bloxroute.txs_message import TxsMessage
from bxcommon.messages.bloxroute.version_message import VersionMessage
from bxcommon.models.broadcast_message_type import BroadcastMessageType
from bxcommon.models.entity_type_model import EntityType
from bxcommon.models.transaction_flag import TransactionFlag
from bxcommon.models.transaction_info import TransactionInfo
from bxcommon.network.ip_endpoint import IpEndpoint
from bxcommon.test_utils import helpers
from bxcommon.test_utils.helpers import create_input_buffer_with_bytes
from bxcommon.test_utils.message_factory_test_case import MessageFactoryTestCase
from bxcommon.utils import crypto
from bxcommon.utils.crypto import SHA256_HASH_LEN, KEY_SIZE
from bxcommon.utils.object_hash import Sha256Hash, NULL_SHA256_HASH, ConcatHash
T = TypeVar("T")
class BloxrouteMessageFactory(MessageFactoryTestCase):
HASH = Sha256Hash(crypto.double_sha256(b"123"))
NETWORK_NUM = 12345
NETWORK_NUM_BYTEARRAY = bytearray(constants.NETWORK_NUM_LEN)
struct.pack_into("<L", NETWORK_NUM_BYTEARRAY, 0, NETWORK_NUM)
NODE_ID = "c2b04fd2-7c81-432b-99a5-8b68f43d97e8"
BROADCAST_TYPE = BroadcastMessageType.BLOCK
MESSAGE_ID = ConcatHash(
crypto.double_sha256(b"123") + NETWORK_NUM_BYTEARRAY +
bytearray(BROADCAST_TYPE.value.encode(constants.DEFAULT_TEXT_ENCODING)), 0
)
def get_message_factory(self):
return bloxroute_message_factory
def create_message_successfully(
self,
message: T,
message_type: Type[T],
) -> T:
# check control flag
result = super().create_message_successfully(message, message_type)
self.assertEqual(1, result.rawbytes()[-1])
self.assertEqual(len(message.rawbytes()) - message.HEADER_LENGTH, message.payload_len())
return result
def get_hashed_message_preview_successfully(self, message, expected_hash):
is_full_message, block_hash, broadcast_type, msg_id, network_num, node_id, _payload_length = \
bloxroute_message_factory.get_broadcast_message_preview(create_input_buffer_with_bytes(
message.rawbytes()[:message.HEADER_LENGTH + SHA256_HASH_LEN + NETWORK_NUM_LEN + NODE_ID_SIZE_IN_BYTES +
BROADCAST_TYPE_LEN]))
self.assertTrue(is_full_message)
self.assertEqual(expected_hash, block_hash)
self.assertEqual(self.BROADCAST_TYPE, broadcast_type)
self.assertEqual(self.MESSAGE_ID, msg_id)
self.assertEqual(self.NODE_ID, node_id)
self.assertEqual(self.NETWORK_NUM, network_num)
def test_message_preview_success_all_types(self):
self.get_message_preview_successfully(HelloMessage(protocol_version=1, network_num=2),
HelloMessage.MESSAGE_TYPE,
VersionMessage.VERSION_MESSAGE_LENGTH + UL_INT_SIZE_IN_BYTES +
NODE_ID_SIZE_IN_BYTES - UL_INT_SIZE_IN_BYTES)
self.get_message_preview_successfully(AckMessage(), AckMessage.MESSAGE_TYPE, constants.CONTROL_FLAGS_LEN)
self.get_message_preview_successfully(PingMessage(), PingMessage.MESSAGE_TYPE, 9)
self.get_message_preview_successfully(PongMessage(), PongMessage.MESSAGE_TYPE, 17)
blob = bytearray(1 for _ in range(4))
self.get_message_preview_successfully(BroadcastMessage(self.HASH, 1, self.NODE_ID, self.BROADCAST_TYPE, True,
blob),
BroadcastMessage.MESSAGE_TYPE,
SHA256_HASH_LEN + NETWORK_NUM_LEN + constants.BROADCAST_TYPE_LEN +
BLOCK_ENCRYPTED_FLAG_LEN + constants.NODE_ID_SIZE_IN_BYTES + len(blob) +
constants.CONTROL_FLAGS_LEN)
self.get_message_preview_successfully(TxMessage(self.HASH, 1, self.NODE_ID, 12, blob),
TxMessage.MESSAGE_TYPE,
SHA256_HASH_LEN + NETWORK_NUM_LEN + UL_INT_SIZE_IN_BYTES +
TRANSACTION_FLAG_LEN + constants.NODE_ID_SIZE_IN_BYTES + len(blob) +
constants.DOUBLE_SIZE_IN_BYTES +
constants.CONTROL_FLAGS_LEN)
self.get_message_preview_successfully(KeyMessage(self.HASH, 1, self.NODE_ID,
bytearray(1 for _ in range(KEY_SIZE))),
KeyMessage.MESSAGE_TYPE,
SHA256_HASH_LEN + KEY_SIZE + NETWORK_NUM_LEN +
constants.NODE_ID_SIZE_IN_BYTES + constants.CONTROL_FLAGS_LEN)
self.get_message_preview_successfully(BlockHoldingMessage(self.HASH, 1, self.NODE_ID),
BlockHoldingMessage.MESSAGE_TYPE,
SHA256_HASH_LEN + NETWORK_NUM_LEN + constants.NODE_ID_SIZE_IN_BYTES +
constants.CONTROL_FLAGS_LEN)
get_txs = [1, 2, 3]
self.get_message_preview_successfully(GetTxsMessage(get_txs), GetTxsMessage.MESSAGE_TYPE,
UL_INT_SIZE_IN_BYTES + UL_INT_SIZE_IN_BYTES * len(
get_txs) + constants.CONTROL_FLAGS_LEN)
txs = [TransactionInfo(Sha256Hash(crypto.double_sha256(b"123")), bytearray(4), 1),
TransactionInfo(Sha256Hash(crypto.double_sha256(b"234")), bytearray(8), 2)]
expected_length = (UL_INT_SIZE_IN_BYTES +
sum(UL_INT_SIZE_IN_BYTES + SHA256_HASH_LEN + UL_INT_SIZE_IN_BYTES +
len(tx.contents) for tx in txs) + constants.CONTROL_FLAGS_LEN)
self.get_message_preview_successfully(TxsMessage(txs), TxsMessage.MESSAGE_TYPE, expected_length)
expected_length = (2 * constants.DOUBLE_SIZE_IN_BYTES) + (5 * constants.UL_SHORT_SIZE_IN_BYTES) + \
(7 * constants.UL_INT_SIZE_IN_BYTES) + constants.IP_ADDR_SIZE_IN_BYTES + \
constants.CONTROL_FLAGS_LEN
node_stats = {}
helpers.add_stats_to_node_stats(
node_stats,
"127.0.0.1", 8001,
200, 300, 400, 500, 600, 700, 800, 100, 50
)
self.get_message_preview_successfully(
BdnPerformanceStatsMessage(
datetime.utcnow(), datetime.utcnow(), 100, node_stats
),
BdnPerformanceStatsMessage.MESSAGE_TYPE,
expected_length
)
# multi node bdn stats message
expected_length = (constants.CONTROL_FLAGS_LEN +
(2 * constants.DOUBLE_SIZE_IN_BYTES) + # start/end time
constants.UL_SHORT_SIZE_IN_BYTES + # memory
constants.UL_SHORT_SIZE_IN_BYTES + # num blockchain peers
(3 * # num blockchain peers
(constants.IP_ADDR_SIZE_IN_BYTES + # ip
constants.UL_SHORT_SIZE_IN_BYTES + # port
(2 * constants.UL_SHORT_SIZE_IN_BYTES) + # original block stats
(7 * constants.UL_INT_SIZE_IN_BYTES)))) # rest of stats
node_stats = {}
helpers.add_stats_to_node_stats(
node_stats,
"127.0.0.1", 8001,
200, 300, 400, 500, 600, 700, 800, 100, 50
)
helpers.add_stats_to_node_stats(
node_stats,
"127.0.0.2", 8002,
200, 300, 400, 500, 600, 700, 800, 100, 50
)
helpers.add_stats_to_node_stats(
node_stats,
"127.0.0.3", 8003,
200, 300, 400, 500, 600, 700, 800, 100, 50
)
self.get_message_preview_successfully(
BdnPerformanceStatsMessage(
datetime.utcnow(), datetime.utcnow(), 100, node_stats
),
BdnPerformanceStatsMessage.MESSAGE_TYPE,
expected_length
)
tx_info = TransactionInfo(crypto.double_sha256(b"123"), bytearray(4), 1)
expected_length = constants.NETWORK_NUM_LEN + constants.SID_LEN + SHA256_HASH_LEN + \
constants.UL_INT_SIZE_IN_BYTES + constants.CONTROL_FLAGS_LEN + len(tx_info.contents)
self.get_message_preview_successfully(TxContentsMessage(5, tx_info),
TxContentsMessage.MESSAGE_TYPE,
expected_length)
expected_length = constants.NETWORK_NUM_LEN + constants.SID_LEN + constants.CONTROL_FLAGS_LEN
self.get_message_preview_successfully(GetTxContentsMessage(1, 2),
GetTxContentsMessage.MESSAGE_TYPE,
expected_length)
def test_message_preview_incomplete(self):
message = HelloMessage(protocol_version=1, network_num=2)
is_full_message, command, payload_length = bloxroute_message_factory.get_message_header_preview_from_input_buffer(
create_input_buffer_with_bytes(message.rawbytes()[:-1])
)
self.assertFalse(is_full_message)
self.assertEqual(b"hello", command)
self.assertEqual(VersionMessage.VERSION_MESSAGE_LENGTH + UL_INT_SIZE_IN_BYTES + NODE_ID_SIZE_IN_BYTES -
UL_INT_SIZE_IN_BYTES, payload_length)
is_full_message, command, payload_length = bloxroute_message_factory.get_message_header_preview_from_input_buffer(
create_input_buffer_with_bytes(message.rawbytes()[:1])
)
self.assertFalse(is_full_message)
self.assertIsNone(command)
self.assertIsNone(payload_length)
def test_message_hash_preview(self):
blob = bytearray(1 for _ in range(4))
self.get_hashed_message_preview_successfully(BroadcastMessage(self.HASH, self.NETWORK_NUM, self.NODE_ID,
self.BROADCAST_TYPE, True, blob),
self.HASH)
def test_message_hash_preview_incomplete(self):
blob = bytearray(1 for _ in range(4))
broadcast_message = BroadcastMessage(self.HASH, 123, self.NODE_ID, self.BROADCAST_TYPE, True, blob)
is_full_message, block_hash, broadcast_type, msg_id, network_num, node_id, payload_length = \
bloxroute_message_factory.get_broadcast_message_preview(
create_input_buffer_with_bytes(broadcast_message.rawbytes()
[:BX_HDR_COMMON_OFF + SHA256_HASH_LEN + NETWORK_NUM_LEN - 1]))
self.assertFalse(is_full_message)
self.assertIsNone(block_hash)
self.assertIsNone(broadcast_type)
self.assertIsNone(msg_id)
self.assertIsNone(network_num)
self.assertIsNone(node_id)
self.assertIsNone(payload_length)
def test_create_message_success_all_types(self):
test_network_num = 10
test_protocol_version = bloxroute_version_manager.CURRENT_PROTOCOL_VERSION
hello_message = self.create_message_successfully(HelloMessage(protocol_version=test_protocol_version,
network_num=test_network_num,
node_id=self.NODE_ID
),
HelloMessage)
self.assertEqual(test_protocol_version, hello_message.protocol_version())
self.assertEqual(test_network_num, hello_message.network_num())
self.assertEqual(self.NODE_ID, hello_message.node_id())
self.create_message_successfully(AckMessage(), AckMessage)
self.create_message_successfully(PingMessage(), PingMessage)
self.create_message_successfully(PongMessage(), PongMessage)
blob = bytearray(4)
broadcast_message = self.create_message_successfully(BroadcastMessage(self.HASH,
network_num=test_network_num,
is_encrypted=True,
source_id=self.NODE_ID,
blob=blob),
BroadcastMessage)
self.assertEqual(self.HASH, broadcast_message.block_hash())
self.assertEqual(test_network_num, broadcast_message.network_num())
self.assertEqual(self.NODE_ID, broadcast_message.source_id())
self.assertTrue(broadcast_message.is_encrypted())
self.assertEqual(blob, broadcast_message.blob().tobytes())
sid = 12
tx_val = bytes(1 for _ in range(5))
tx_message = self.create_message_successfully(TxMessage(self.HASH,
network_num=test_network_num,
source_id=self.NODE_ID,
short_id=sid,
tx_val=tx_val),
TxMessage)
self.assertEqual(self.HASH, tx_message.tx_hash())
self.assertEqual(self.NODE_ID, tx_message.source_id())
self.assertEqual(sid, tx_message.short_id())
self.assertEqual(test_network_num, tx_message.network_num())
self.assertEqual(tx_val, tx_message.tx_val())
key = bytearray(1 for _ in range(KEY_SIZE))
key_message = self.create_message_successfully(
KeyMessage(self.HASH, test_network_num, self.NODE_ID, bytearray(1 for _ in range(KEY_SIZE))),
KeyMessage
)
self.assertEqual(key, key_message.key())
self.assertEqual(self.NODE_ID, key_message.source_id())
self.assertEqual(test_network_num, key_message.network_num())
self.assertEqual(self.HASH, key_message.block_hash())
block_holding_message = self.create_message_successfully(
BlockHoldingMessage(self.HASH, test_network_num, self.NODE_ID),
BlockHoldingMessage
)
self.assertEqual(self.NODE_ID, block_holding_message.source_id())
self.assertEqual(test_network_num, block_holding_message.network_num())
self.assertEqual(self.HASH, block_holding_message.block_hash())
get_txs = [1, 2, 3]
get_txs_message = self.create_message_successfully(GetTxsMessage(get_txs), GetTxsMessage)
self.assertEqual(get_txs, get_txs_message.get_short_ids())
txs = [TransactionInfo(Sha256Hash(crypto.double_sha256(b"123")), bytearray(4), 1),
TransactionInfo(Sha256Hash(crypto.double_sha256(b"234")), bytearray(8), 2)]
txs_message = self.create_message_successfully(TxsMessage(txs), TxsMessage)
result_txs = txs_message.get_txs()
for i, result_tx in enumerate(result_txs):
self.assertEqual(txs[i].hash, result_tx.hash)
self.assertEqual(txs[i].contents, result_tx.contents)
self.assertEqual(txs[i].short_id, result_tx.short_id)
get_tx_contents_message = self.create_message_successfully(GetTxContentsMessage(test_network_num, sid),
GetTxContentsMessage)
self.assertEqual(sid, get_tx_contents_message.get_short_id())
self.assertEqual(test_network_num, get_tx_contents_message.network_num())
tx_info = TransactionInfo(Sha256Hash(crypto.double_sha256(b"123")), bytearray(4), 1)
tx_contents_message = self.create_message_successfully(TxContentsMessage(test_network_num, tx_info),
TxContentsMessage)
self.assertEqual(test_network_num, tx_contents_message.network_num())
result_tx_info = tx_contents_message.get_tx_info()
self.assertEqual(tx_info.hash, result_tx_info.hash)
self.assertEqual(tx_info.contents, result_tx_info.contents)
self.assertEqual(tx_info.short_id, result_tx_info.short_id)
short_ids = [1, 2, 33, 4444, 1234]
block_hash = Sha256Hash(helpers.generate_bytearray(32))
get_block_txs_message: GetCompressedBlockTxsMessage = self.create_message_successfully(
GetCompressedBlockTxsMessage(self.NETWORK_NUM, block_hash, short_ids),
GetCompressedBlockTxsMessage
)
self.assertEqual(self.NETWORK_NUM, get_block_txs_message.network_num())
self.assertEqual(block_hash, get_block_txs_message.block_hash())
self.assertEqual(len(short_ids), len(get_block_txs_message))
self.assertEqual(short_ids, get_block_txs_message.get_short_ids())
txs_info = [
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(200), 111),
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(300), 222),
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(400), 333)
]
block_txs_message: CompressedBlockTxsMessage = self.create_message_successfully(
CompressedBlockTxsMessage(self.NETWORK_NUM, block_hash, txs_info),
CompressedBlockTxsMessage
)
self.assertEqual(self.NETWORK_NUM, block_txs_message.network_num())
self.assertEqual(block_hash, block_txs_message.block_hash())
self.assertEqual(len(txs_info), len(block_txs_message))
parsed_txs = block_txs_message.get_txs()
for index in range(len(txs_info)):
self.assertEqual(parsed_txs[index].short_id, txs_info[index].short_id)
self.assertEqual(parsed_txs[index].contents, txs_info[index].contents)
self.assertEqual(parsed_txs[index].hash, txs_info[index].hash)
def test_compressed_block_txs_message_to_txs_message(self):
block_hash = Sha256Hash(helpers.generate_bytearray(32))
txs_info = [
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(200), 111),
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(300), 222),
TransactionInfo(Sha256Hash(helpers.generate_bytearray(32)), helpers.generate_bytearray(400), 333)
]
block_txs_message = CompressedBlockTxsMessage(self.NETWORK_NUM, block_hash, txs_info)
txs_message = block_txs_message.to_txs_message()
self.assertEqual(TxsMessage.MESSAGE_TYPE, txs_message.msg_type())
parsed_txs = txs_message.get_txs()
self.assertEqual(len(parsed_txs), len(parsed_txs))
for index in range(len(txs_info)):
self.assertEqual(parsed_txs[index].short_id, txs_info[index].short_id)
self.assertEqual(parsed_txs[index].contents, txs_info[index].contents)
self.assertEqual(parsed_txs[index].hash, txs_info[index].hash)
raw_bytes = txs_message.rawbytes()
self.assertEqual(raw_bytes[:constants.STARTING_SEQUENCE_BYTES_LEN], constants.STARTING_SEQUENCE_BYTES)
self.assertIsNotNone(txs_message.payload())
self.assertIsNotNone(txs_message.payload_len())
# control flag check
self.assertEqual(raw_bytes[-1], 1)
def test_create_message_failure(self):
message = HelloMessage(protocol_version=1, network_num=2)
with self.assertRaises(PayloadLenError):
bloxroute_message_factory.create_message_from_buffer(message.rawbytes()[:-1])
bloxroute_message_factory.create_message_from_buffer(message.rawbytes()[:-1])
def test_ping_response_msg(self):
ping = PingMessage(nonce=50)
self.assertEqual(50, ping.nonce())
msg = bloxroute_message_factory.create_message_from_buffer(ping.buf)
self.assertEqual(50, msg.nonce())
def test_pong_response_msg(self):
pong = PongMessage(nonce=50)
self.assertEqual(50, pong.nonce())
msg = bloxroute_message_factory.create_message_from_buffer(pong.buf)
def test_block_confirmation_msg(self):
short_ids = [random.randint(0, 1000000) for _ in range(150)]
tx_hashes = [
Sha256Hash(helpers.generate_bytes(crypto.SHA256_HASH_LEN))
for _ in range(10)
]
message = BlockConfirmationMessage(self.HASH, self.NETWORK_NUM, self.NODE_ID, sids=short_ids,
tx_hashes=tx_hashes)
self.get_message_preview_successfully(message,
BlockConfirmationMessage.MESSAGE_TYPE,
SHA256_HASH_LEN * len(tx_hashes) +
constants.UL_INT_SIZE_IN_BYTES * len(short_ids) +
AbstractBroadcastMessage.PAYLOAD_LENGTH +
constants.UL_INT_SIZE_IN_BYTES * 2)
rebuilt_msg = self.create_message_successfully(message, BlockConfirmationMessage)
self.assertEqual(self.HASH, rebuilt_msg.block_hash())
self.assertEqual(short_ids, rebuilt_msg.short_ids())
self.assertEqual(tx_hashes, rebuilt_msg.transaction_hashes())
self.assertEqual(self.NETWORK_NUM, rebuilt_msg.network_num())
self.assertEqual(self.NODE_ID, rebuilt_msg.source_id())
def test_transaction_cleanup_msg(self):
short_ids = [23, 99, 192, 1089, 3000500]
tx_hashes = [
Sha256Hash(helpers.generate_bytes(crypto.SHA256_HASH_LEN)),
Sha256Hash(helpers.generate_bytes(crypto.SHA256_HASH_LEN))
]
message = TransactionCleanupMessage(self.NETWORK_NUM, self.NODE_ID, sids=short_ids, tx_hashes=tx_hashes)
self.get_message_preview_successfully(message,
TransactionCleanupMessage.MESSAGE_TYPE,
SHA256_HASH_LEN * len(tx_hashes) +
constants.UL_INT_SIZE_IN_BYTES * len(short_ids) +
AbstractBroadcastMessage.PAYLOAD_LENGTH +
constants.UL_INT_SIZE_IN_BYTES * 2)
rebuilt_msg = self.create_message_successfully(message, TransactionCleanupMessage)
self.assertEqual(short_ids, rebuilt_msg.short_ids())
self.assertEqual(tx_hashes, rebuilt_msg.transaction_hashes())
self.assertEqual(self.NETWORK_NUM, rebuilt_msg.network_num())
self.assertEqual(self.NODE_ID, rebuilt_msg.source_id())
self.assertNotEqual(NULL_SHA256_HASH, rebuilt_msg.message_hash())
def test_tx_bx_message(self):
sid = 12
tx_val = bytes(1 for _ in range(5))
test_network_num = 4
timestamp = time.time() - 4
expected_tx_message = TxMessage(
self.HASH,
network_num=test_network_num,
source_id=self.NODE_ID,
short_id=sid,
tx_val=tx_val,
transaction_flag=TransactionFlag.PAID_TX,
timestamp=timestamp
)
tx_message = self.create_message_successfully(
expected_tx_message,
TxMessage)
self.assertEqual(self.HASH, tx_message.tx_hash())
self.assertEqual(self.NODE_ID, tx_message.source_id())
self.assertEqual(sid, tx_message.short_id())
self.assertEqual(test_network_num, tx_message.network_num())
self.assertEqual(tx_val, tx_message.tx_val())
self.assertEqual(TransactionFlag.PAID_TX, tx_message.transaction_flag())
self.assertEqual(timestamp, tx_message.timestamp())
new_timestamp = time.time() - 2
expected_tx_message.set_timestamp(new_timestamp)
self.assertEqual(new_timestamp, expected_tx_message.timestamp())
regenerated_tx_message = self.create_message_successfully(
expected_tx_message,
TxMessage
)
self.assertEqual(new_timestamp, regenerated_tx_message.timestamp())
def test_tx_bx_message_setting_attributes(self):
contents = helpers.generate_bytes(250)
timestamp = time.time()
tx_message = TxMessage(
self.HASH,
network_num=1,
source_id=self.NODE_ID,
short_id=2,
tx_val=contents,
transaction_flag=TransactionFlag.PAID_TX,
timestamp=timestamp
)
tx_message.clear_protected_fields()
rebuilt_tx_message = self.create_message_successfully(
tx_message,
TxMessage
)
self.assertEqual(constants.NULL_TX_SID, rebuilt_tx_message.short_id())
self.assertEqual(constants.NULL_TX_TIMESTAMP, rebuilt_tx_message.timestamp())
def test_quota_notification_message(self):
notification_code = NotificationCode.QUOTA_FILL_STATUS
args_list = ["10", str(EntityType.TRANSACTION.value), "100"]
raw_message = ",".join(args_list)
notification_message = self.create_message_successfully(
NotificationMessage(notification_code, raw_message),
NotificationMessage)
self.assertEqual(notification_code, notification_message.notification_code())
self.assertEqual(raw_message, notification_message.raw_message())
self.assertEqual(
notification_message.formatted_message(),
"10% of daily transaction quota with limit of 100 transactions per day is depleted."
)
def test_expiration_notification_message(self):
notification_code = NotificationCode.ACCOUNT_EXPIRED_NOTIFICATION
notification_message = self.create_message_successfully(
NotificationMessage(notification_code),
NotificationMessage
)
self.assertEqual(notification_code, notification_message.notification_code())
self.assertEqual(
notification_message.formatted_message(),
"The account associated with this gateway has expired. "
"Please visit https://portal.bloxroute.com to renew your subscription."
)
def test_bdn_performance_stats_message_one_node(self):
start_time = datetime.utcnow()
memory_utilization_mb = 700
node_1_bdn_stats = BdnPerformanceStatsData()
node_1_bdn_stats.new_blocks_received_from_blockchain_node = 100
node_1_bdn_stats.new_blocks_received_from_bdn = 200
node_1_bdn_stats.new_tx_received_from_blockchain_node = 300
node_1_bdn_stats.new_tx_received_from_bdn = constants.UNSIGNED_SHORT_MAX_VALUE + 1 # unsigned short max (0xffff) + 1
node_1_bdn_stats.new_blocks_seen = 800
node_1_bdn_stats.new_block_messages_from_blockchain_node = 900
node_1_bdn_stats.new_block_announcements_from_blockchain_node = 1000
node_1_bdn_stats.tx_sent_to_node = 1100
node_1_bdn_stats.duplicate_tx_from_node = 600
end_time = datetime.utcnow()
node_stats = {}
node_1_ip = "127.0.0.1"
node_1_port = 8001
node_stats[IpEndpoint(node_1_ip, node_1_port)] = node_1_bdn_stats
bdn_stats_msg = self.create_message_successfully(
BdnPerformanceStatsMessage(
start_time,
end_time,
memory_utilization_mb,
node_stats
),
BdnPerformanceStatsMessage)
ip_endpoint, stats = bdn_stats_msg.node_stats().popitem()
self.assertEqual(start_time, bdn_stats_msg.interval_start_time())
self.assertEqual(end_time, bdn_stats_msg.interval_end_time())
self.assertEqual(
node_1_bdn_stats.new_blocks_received_from_blockchain_node,
stats.new_blocks_received_from_blockchain_node
)
self.assertEqual(node_1_bdn_stats.new_blocks_received_from_bdn, stats.new_blocks_received_from_bdn)
self.assertEqual(
node_1_bdn_stats.new_tx_received_from_blockchain_node,
stats.new_tx_received_from_blockchain_node
)
self.assertEqual(node_1_bdn_stats.new_tx_received_from_bdn, stats.new_tx_received_from_bdn)
self.assertEqual(memory_utilization_mb, bdn_stats_msg.memory_utilization())
self.assertEqual(node_1_bdn_stats.new_blocks_seen, stats.new_blocks_seen)
self.assertEqual(
node_1_bdn_stats.new_block_messages_from_blockchain_node,
stats.new_block_messages_from_blockchain_node
)
self.assertEqual(
node_1_bdn_stats.new_block_announcements_from_blockchain_node,
stats.new_block_announcements_from_blockchain_node
)
self.assertEqual(node_1_bdn_stats.tx_sent_to_node, stats.tx_sent_to_node)
self.assertEqual(node_1_bdn_stats.duplicate_tx_from_node, stats.duplicate_tx_from_node)
self.assertEqual(node_1_ip, ip_endpoint.ip_address)
self.assertEqual(node_1_port, ip_endpoint.port)
def test_bdn_performance_stats_message_multi_node(self):
node_stats = {}
start_time = datetime.utcnow()
memory_utilization_mb = 800
node_1_bdn_stats = BdnPerformanceStatsData()
node_1_bdn_stats.new_blocks_received_from_blockchain_node = 200
node_1_bdn_stats.new_blocks_received_from_bdn = 300
node_1_bdn_stats.new_tx_received_from_blockchain_node = 400
node_1_bdn_stats.new_tx_received_from_bdn = constants.UNSIGNED_SHORT_MAX_VALUE + 1 # unsigned short max (0xffff) + 1
node_1_bdn_stats.new_blocks_seen = 700
node_1_bdn_stats.new_block_messages_from_blockchain_node = 200
node_1_bdn_stats.new_block_announcements_from_blockchain_node = 900
node_1_bdn_stats.tx_sent_to_node = 1100
node_1_bdn_stats.duplicate_tx_from_node = 600
end_time = datetime.utcnow()
node_1_ip = "127.0.0.1"
node_1_port = 8001
node_2_bdn_stats = BdnPerformanceStatsData()
node_2_bdn_stats.new_blocks_received_from_blockchain_node = 100
node_2_bdn_stats.new_blocks_received_from_bdn = 200
node_2_bdn_stats.new_tx_received_from_blockchain_node = 300
node_2_bdn_stats.new_tx_received_from_bdn = constants.UNSIGNED_SHORT_MAX_VALUE + 1 # unsigned short max (0xffff) + 1
node_2_bdn_stats.new_blocks_seen = 800
node_2_bdn_stats.new_block_messages_from_blockchain_node = 900
node_2_bdn_stats.new_block_announcements_from_blockchain_node = 1000
node_2_bdn_stats.tx_sent_to_node = 1200
node_2_bdn_stats.duplicate_tx_from_node = 500
node_2_ip = "127.0.0.2"
node_2_port = 8002
node_stats[IpEndpoint(node_1_ip, node_1_port)] = node_1_bdn_stats
node_stats[IpEndpoint(node_2_ip, node_2_port)] = node_2_bdn_stats
bdn_stats_msg = self.create_message_successfully(
BdnPerformanceStatsMessage(
start_time,
end_time,
memory_utilization_mb,
node_stats
),
BdnPerformanceStatsMessage)
stats = bdn_stats_msg.node_stats()[IpEndpoint(node_1_ip, node_1_port)]
self.assertEqual(start_time, bdn_stats_msg.interval_start_time())
self.assertEqual(end_time, bdn_stats_msg.interval_end_time())
self.assertEqual(memory_utilization_mb, bdn_stats_msg.memory_utilization())
self.assertEqual(
node_1_bdn_stats.new_blocks_received_from_blockchain_node,
stats.new_blocks_received_from_blockchain_node
)
self.assertEqual(node_1_bdn_stats.new_blocks_received_from_bdn, stats.new_blocks_received_from_bdn)
self.assertEqual(
node_1_bdn_stats.new_tx_received_from_blockchain_node,
stats.new_tx_received_from_blockchain_node
)
self.assertEqual(node_1_bdn_stats.new_tx_received_from_bdn, stats.new_tx_received_from_bdn)
self.assertEqual(node_1_bdn_stats.new_blocks_seen, stats.new_blocks_seen)
self.assertEqual(
node_1_bdn_stats.new_block_messages_from_blockchain_node,
stats.new_block_messages_from_blockchain_node
)
self.assertEqual(
node_1_bdn_stats.new_block_announcements_from_blockchain_node,
stats.new_block_announcements_from_blockchain_node
)
self.assertEqual(node_1_bdn_stats.tx_sent_to_node, stats.tx_sent_to_node)
self.assertEqual(node_1_bdn_stats.duplicate_tx_from_node, stats.duplicate_tx_from_node)
stats = bdn_stats_msg.node_stats()[IpEndpoint(node_2_ip, node_2_port)]
self.assertEqual(
node_2_bdn_stats.new_blocks_received_from_blockchain_node,
stats.new_blocks_received_from_blockchain_node
)
self.assertEqual(node_2_bdn_stats.new_blocks_received_from_bdn, stats.new_blocks_received_from_bdn)
self.assertEqual(
node_2_bdn_stats.new_tx_received_from_blockchain_node,
stats.new_tx_received_from_blockchain_node
)
self.assertEqual(node_2_bdn_stats.new_tx_received_from_bdn, stats.new_tx_received_from_bdn)
self.assertEqual(node_2_bdn_stats.new_blocks_seen, stats.new_blocks_seen)
self.assertEqual(
node_2_bdn_stats.new_block_messages_from_blockchain_node,
stats.new_block_messages_from_blockchain_node
)
self.assertEqual(
node_2_bdn_stats.new_block_announcements_from_blockchain_node,
stats.new_block_announcements_from_blockchain_node
)
self.assertEqual(node_2_bdn_stats.tx_sent_to_node, stats.tx_sent_to_node)
self.assertEqual(node_2_bdn_stats.duplicate_tx_from_node, stats.duplicate_tx_from_node)
def test_routing_message(self):
origin_node_id = helpers.generate_node_id()
forwarding_node_id = helpers.generate_node_id()
node_id_2 = helpers.generate_node_id()
node_id_3 = helpers.generate_node_id()
routing_update_id = helpers.generate_object_hash()
routing_update = [node_id_2, node_id_3]
expected_length = (
RoutingUpdateMessage.PAYLOAD_LENGTH
+ 2 * constants.NODE_ID_SIZE_IN_BYTES
)
self.get_message_preview_successfully(
RoutingUpdateMessage(
helpers.generate_object_hash(),
"",
origin_node_id,
forwarding_node_id,
routing_update_id,
routing_update
),
RoutingUpdateMessage.MESSAGE_TYPE,
expected_length
)
routing_update_message = self.create_message_successfully(
RoutingUpdateMessage(
helpers.generate_object_hash(),
"",
origin_node_id,
forwarding_node_id,
routing_update_id,
routing_update
),
RoutingUpdateMessage
)
self.assertEqual(origin_node_id, routing_update_message.origin_node_id())
self.assertEqual(forwarding_node_id, routing_update_message.forwarding_node_id())
self.assertEqual(routing_update, routing_update_message.routing_update())
self.assertEqual(routing_update_id, routing_update_message.routing_update_id())
| 52.524138 | 125 | 0.666859 | 4,291 | 38,080 | 5.487299 | 0.071312 | 0.07772 | 0.022424 | 0.022084 | 0.676506 | 0.593604 | 0.540304 | 0.50361 | 0.462159 | 0.408265 | 0 | 0.026147 | 0.263813 | 38,080 | 724 | 126 | 52.596685 | 0.813762 | 0.007064 | 0 | 0.378505 | 0 | 0 | 0.009049 | 0.000953 | 0 | 0 | 0 | 0 | 0.216511 | 1 | 0.03271 | false | 0 | 0.065421 | 0.001558 | 0.11215 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d56f231d502d5e5a475db3b39fa0d4b71b1c2be | 824 | py | Python | util.py | premkarat/expensify_bot | d464d85793079f8b5229a91ba907068527fd7255 | [
"Apache-2.0"
] | 2 | 2021-07-14T02:56:07.000Z | 2021-07-14T05:17:50.000Z | util.py | premkarat/expensify_bot | d464d85793079f8b5229a91ba907068527fd7255 | [
"Apache-2.0"
] | null | null | null | util.py | premkarat/expensify_bot | d464d85793079f8b5229a91ba907068527fd7255 | [
"Apache-2.0"
] | null | null | null | import os
import pdfplumber
from config import Config, bot, logger
stack = []
def delete_all_previous_messages():
global stack
while stack:
bot.delete_message(Config.TELEGRAM.CHAT_ID, stack.pop())
def send_message(msg, markup=None):
global stack
delete_all_previous_messages()
msg = bot.send_message(Config.TELEGRAM.CHAT_ID, msg, reply_markup=markup)
stack.append(msg.id)
def reply_to(msg, message):
global stack
delete_all_previous_messages()
msg = bot.reply_to(msg, message)
stack.append(msg.id)
def pdf2txt(pdf):
with pdfplumber.open(pdf) as pdf:
page = pdf.pages[0]
return page.extract_text()
def cleanup(expenses):
for expense in expenses:
os.remove(expense.pdf)
expense.delete()
logger.info('Remove PDFs: [OK]') | 19.619048 | 77 | 0.686893 | 113 | 824 | 4.849558 | 0.415929 | 0.04927 | 0.093066 | 0.136861 | 0.321168 | 0.153285 | 0.153285 | 0.153285 | 0 | 0 | 0 | 0.003067 | 0.208738 | 824 | 42 | 78 | 19.619048 | 0.837423 | 0 | 0 | 0.259259 | 0 | 0 | 0.020606 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.185185 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d57e15dd7a300bcba3ad901ed54a79b3e19d9be | 5,327 | py | Python | utils_BINGO/make_dataset_in_VOC_format.py | IMBINGO95/FairMOT | c496e911a89870a9b6988d93f80e680d01ee8afc | [
"MIT"
] | null | null | null | utils_BINGO/make_dataset_in_VOC_format.py | IMBINGO95/FairMOT | c496e911a89870a9b6988d93f80e680d01ee8afc | [
"MIT"
] | null | null | null | utils_BINGO/make_dataset_in_VOC_format.py | IMBINGO95/FairMOT | c496e911a89870a9b6988d93f80e680d01ee8afc | [
"MIT"
] | null | null | null |
import os
import collections
import json
import cv2
import time
from utils_BINGO import divide_img_range
from utils_BINGO import gtsToxml
def mkdataset_in_VOC_format(im_path,save_dir,gt_json_path ,dataset_name,section_num, mode=0):
'''
To make a detection dataset in VOC format.
:param im_path: Original img path that save the imgs
:param save_dir: The target path that we save the transformed imgs
:param gt_json_path: gts in json format, gt in this [which frame,ID,x,y,w,h,channel,jersey_number]
:param dataset_name:
:param section_num: [x,y] means we want to divide the original img to x*y parts
:param mode: mode = 0 means to create a traning dataset, mode = 1 means to create a testing dataset.
:return: None
'''
dataset_path = os.path.join(save_dir, dataset_name)
img_save_path = os.path.join(dataset_path,'JPEGImages/')
xml_save_path = os.path.join(dataset_path, 'Annotations/')
txt_save_path = os.path.join(dataset_path,'ImageSets/Main/')
# mk folders in VOC format!
if not os.path.exists(img_save_path):
os.makedirs(img_save_path)
if not os.path.exists(xml_save_path):
os.makedirs(xml_save_path)
if not os.path.exists(txt_save_path):
os.makedirs(txt_save_path)
# to creat a .txt file to save the img ID that we want to train on or test on !
if mode == 0:
txt_save_path = txt_save_path + 'train.txt'
else:
txt_save_path = txt_save_path + 'test.txt'
txt_file = open(txt_save_path, 'w')
# create two OrderDict to contains gts.
# seperate the gts by frame first
# seperate the gts in one frame to each parts
gts_by_frame = collections.OrderedDict()
gts_seperate = collections.OrderedDict()
# diliver each gt to their own dict
with open(gt_json_path, 'r') as f:
gts = json.load(f)
for gt in gts:
frame = '{:0>6}'.format(gt[0])
# change [which frame,ID,xl,yl,w,h,c,NUM] to [which frame,ID,xl,yl,xr,yr,c,NUM]
gt[4] = gt[2] + gt[4]
gt[5] = gt[3] + gt[5]
if frame not in gts_by_frame:
gts_by_frame[frame] = []
gts_seperate[frame] = collections.OrderedDict()
gts_by_frame[frame].append(gt)
else:
gts_by_frame[frame].append(gt)
# read all the img_name in the origin_img folder.
im_fnames = sorted(
(os.path.splitext(fname)[0] for fname in os.listdir(im_path) if os.path.splitext(fname)[-1] == '.jpg'))
for i, frame in enumerate(im_fnames):
image = cv2.imread(im_path + frame + '.jpg', cv2.IMREAD_COLOR)
loop_start = time.time()
im_width, im_height = image.shape[1], image.shape[0]
# deviede img into several part!
section_w, box_range_w, part_w = divide_img_range(im_width, im_height, section_num[0])
section_h, box_range_h, part_h = divide_img_range(im_width, im_height, section_num[1], mode=1)
# Traverse by width
for part_w in range(len(section_w)):
_, _, xl, xr = section_w[part_w]
# Traverse by height
for part_h in range(len(section_h)):
yl, yr, _, _ = section_h[part_h]
part_img = image[yl:yr, xl:xr]
part_num = '_{:0>2}_{:0>2}'.format(part_w + 1, part_h + 1)
gts_seperate[frame][part_num] = []
# deliver every gt to different parts,
# because different parts have overlapping places
# so a gt can be diliver to more than one parts
for gt in gts_by_frame[frame]:
if gt[2] > xl and gt[3] > yl and gt[4] < xr and gt[5] < yr:
xml = []
# the label of this gt ,must be str
xml.append('person')
# the pose of this gt ,must be str
xml.append('stand')
# Did this gt truncate? 0 for no, 1 for yes. ,must be str
xml.append('0')
# Is this gt difficult to detect? 0 for no, 1 for yes. ,must be str
xml.append('0')
# adjust [xl,yl,xr,yr] from relative to (0,0) of the original img to part img .
xml.append(gt[2] - xl) # xl
xml.append(gt[3] - yl) # yl
xml.append(gt[4] - xl) # xr
xml.append(gt[5] - yl) # yr
gts_seperate[frame][part_num].append(xml)
# only save the imgs that include gts.
if len(gts_seperate[frame][part_num]) > 0:
# draw rectangle on the imgs.
# part_img = draw_detection(part_img, gts_seperate[frame][part_num])
file_name = img_save_path + frame + part_num + '.jpg'
cv2.imwrite(file_name, part_img)
# write gts into xml file .
gtsToxml(dataset_name=dataset_name, file_dir=xml_save_path, img_ID=frame + part_num, \
size=part_img.shape, gts=gts_seperate[frame][part_num])
txt_file.writelines(frame + part_num + '\n')
print('frame {:0>6}'.format(frame) + ' have been processed! ')
txt_file.close() | 44.024793 | 111 | 0.58288 | 787 | 5,327 | 3.758577 | 0.232529 | 0.043272 | 0.02975 | 0.033807 | 0.197769 | 0.142326 | 0.1119 | 0.065585 | 0.048005 | 0.021636 | 0 | 0.013454 | 0.316313 | 5,327 | 121 | 112 | 44.024793 | 0.798737 | 0.281021 | 0 | 0.083333 | 0 | 0 | 0.036968 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013889 | false | 0 | 0.097222 | 0 | 0.111111 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d5897732ed9db81099d68890a65faaf33da2f6d | 3,553 | py | Python | src/tests/test_tpm.py | vvkorz/marksim | 743aed6b3d956a4244f61b784f400281af550143 | [
"MIT"
] | 1 | 2020-06-11T00:03:58.000Z | 2020-06-11T00:03:58.000Z | src/tests/test_tpm.py | vvkorz/marksim | 743aed6b3d956a4244f61b784f400281af550143 | [
"MIT"
] | null | null | null | src/tests/test_tpm.py | vvkorz/marksim | 743aed6b3d956a4244f61b784f400281af550143 | [
"MIT"
] | 1 | 2020-06-11T00:03:59.000Z | 2020-06-11T00:03:59.000Z |
import unittest
from src.marksim import tpm
import numpy as np
class TestTPM(unittest.TestCase):
"""
Tests for the transition probability matrix calculation
"""
def nan_equal(self, a, b):
"""
compare two numpy arrays with NaN
:param a: np.array
:param b: np.array
:return: Boolean
"""
try:
np.testing.assert_equal(a, b)
except AssertionError:
return False
return True
def test_invalid_array_shape(self):
"""
should only work with 2D
"""
a = tpm.TPM()
self.assertRaises(AssertionError, a.parse, np.random.rand(250, 20, 80))
def test_invalid_array_states(self):
"""
should only work with 2D arrays where number of rows is bigger that number of TPM states
"""
a = tpm.TPM()
a.n_states = 100
self.assertRaises(AssertionError, a.parse, np.random.rand(60, 20))
def test_get_bins(self):
benchmarkarray = np.array([5, 7, 0, 0, 6, 2, 1, 8, 7, 4, 3, 5, 6, 4, 9, 2, 8, 3, 1, 9])
array = np.array([10, 14, 0, 1, 12, 4, 3, 17, 15, 9, 7, 11, 13, 8, 18, 5, 16, 6, 2, 19])
self.assertTrue(np.array_equal(benchmarkarray, list(tpm.TPM.get_bins(array, bins=10))))
def test_calculate_states(self):
array = np.array([np.nan, 1, 2, np.nan, np.nan, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, np.nan, 17, 18, 19])
benchmarkarray = np.array([np.nan, 0., 0., np.nan, np.nan, 1., 1., 2., 2., 3., 3., 4., 4., 5., 5., 6., np.nan, 7., 8., 9.])
self.assertTrue(self.nan_equal(benchmarkarray, tpm.TPM.calculate_states(array, n_states=10)))
def test_convert_to_states(self):
array = np.array([[9., 6., 6., 2., 2., 1., np.nan, np.nan, 9., 7.],
[np.nan, 7., np.nan, 8., np.nan, 4., 5., 8., 8., 9.],
[0., 1., 7., np.nan, 8., 7., np.nan, 6., 4., 0.],
[3., np.nan, 0., np.nan, 4., np.nan, 2., 4., 0., 1.],
[7., 4., 4., 1., 0., 3., 8., 9., np.nan, 2.],
[8., 3., 8., 7., 6., 2., 0., 0., np.nan, np.nan],
[np.nan, 9., 9., 9., 9., 0., 6., 1., 2., np.nan],
[5., 2., 3., 5., np.nan, 5., 9., 3., 1., 5.],
[4., np.nan, 2., 6., 7., np.nan, 3., 7., 6., 4.],
[1., 8., 1., 3., 3., 9., 7., 2., 3., 3.]])
benchmarkarray = np.array([[4., 2., 2., 0., 0., 0., np.nan, np.nan, 4., 3.],
[np.nan, 2., np.nan, 3., np.nan, 2., 1., 3., 3., 4.],
[0., 0., 3., np.nan, 3., 3., np.nan, 2., 2., 0.],
[1., np.nan, 0., np.nan, 1., np.nan, 0., 2., 0., 0.],
[2., 1., 2., 0., 0., 1., 3., 4., np.nan, 1.],
[3., 1., 3., 2., 2., 1., 0., 0., np.nan, np.nan],
[np.nan, 4., 4., 4., 4., 0., 2., 0., 1., np.nan],
[2., 0., 1., 1., np.nan, 2., 4., 1., 0., 2.],
[1., np.nan, 1., 2., 2., np.nan, 1., 3., 2., 2.],
[0., 3., 0., 1., 1., 4., 2., 1., 1., 1.]])
self.assertTrue(self.nan_equal(benchmarkarray, tpm.TPM.convert_to_states(array, n_states=5)))
| 48.671233 | 147 | 0.40698 | 523 | 3,553 | 2.717017 | 0.17782 | 0.154821 | 0.039409 | 0.056298 | 0.270936 | 0.206897 | 0.156228 | 0.156228 | 0 | 0 | 0 | 0.127896 | 0.392626 | 3,553 | 72 | 148 | 49.347222 | 0.530584 | 0.072896 | 0 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148936 | 1 | 0.12766 | false | 0 | 0.06383 | 0 | 0.255319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d5910b1a9cc99a518fcb1913ca80447357fe32f | 2,032 | py | Python | From Another World/scoreboard.py | Grantlee11/From_Another_World_Pygame | 1aa98162a458a1a4aacfbc9170eaa233db055e9e | [
"CC-BY-3.0"
] | null | null | null | From Another World/scoreboard.py | Grantlee11/From_Another_World_Pygame | 1aa98162a458a1a4aacfbc9170eaa233db055e9e | [
"CC-BY-3.0"
] | null | null | null | From Another World/scoreboard.py | Grantlee11/From_Another_World_Pygame | 1aa98162a458a1a4aacfbc9170eaa233db055e9e | [
"CC-BY-3.0"
] | null | null | null | import pygame.font
from pygame.sprite import Group
from ship import Ship
class Scoreboard():
"""A CLASS TO REPORT SCORING INFORMATION"""
def __init__(self, ai_settings, screen, stats):
"""INITIALIZE SCOREKEEPING ATTRIBUTES"""
self.screen = screen
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
self.stats = stats
# FONT SETTINGS FOR SCORING INFORMATION
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# PREPARE THE INITIAL SCORE IMAGES
self.prep_score()
self.prep_level()
self.prep_ships()
def prep_score(self):
"""TURN THE SCORE INTO A RENDERED IMAGE"""
score_str = str(self.stats.score)
self.score_image = self.font.render(score_str, True, self.text_color, self.ai_settings.bg_color)
# DISPLAY THE SCORE AT THE TOP RIGHT OF THE SCREEN
self.score_rect = self.score_image.get_rect()
self.score_rect.centerx = self.screen_rect.centerx
self.score_rect.top = 10
def show_score(self):
"""DRAW SCORES AND SHIPS TO THE SCREEN"""
self.screen.blit(self.score_image, self.score_rect)
self.screen.blit(self.level_image, self.level_rect)
self.ships.draw(self.screen)
def prep_level(self):
"""TURN THE LEVEL INTO A RENDERED IMAGE"""
self.level_image = self.font.render(("LEVEL " + str(self.stats.level)), True, self.text_color, self.ai_settings.bg_color)
# POSITIONS THE LEVEL
self.level_rect = self.level_image.get_rect()
self.level_rect.right = self.screen_rect.right - 10
self.level_rect.top = 10
def prep_ships(self):
"""SHOW HOW MANY SHIPS ARE LEFT"""
self.ships = Group()
for ship_number in range(self.stats.ships_left):
ship = Ship(self.ai_settings, self.screen)
ship.rect.x = 10 + ship_number * ship.rect.width
ship.rect.y = 10
self.ships.add(ship) | 35.649123 | 129 | 0.642717 | 280 | 2,032 | 4.496429 | 0.260714 | 0.063542 | 0.0556 | 0.028594 | 0.060365 | 0.060365 | 0.060365 | 0.060365 | 0.060365 | 0 | 0 | 0.013843 | 0.253445 | 2,032 | 57 | 130 | 35.649123 | 0.816084 | 0.173228 | 0 | 0 | 0 | 0 | 0.003643 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d59ea9a1f5205bb6d6bc18ccfd5575ed4a21a31 | 4,417 | py | Python | fastSal_predict.py | VeronicaCPerez/FastSal | 27b5bcdacc69564284821bedc54dcf8fd0466033 | [
"Apache-2.0"
] | null | null | null | fastSal_predict.py | VeronicaCPerez/FastSal | 27b5bcdacc69564284821bedc54dcf8fd0466033 | [
"Apache-2.0"
] | null | null | null | fastSal_predict.py | VeronicaCPerez/FastSal | 27b5bcdacc69564284821bedc54dcf8fd0466033 | [
"Apache-2.0"
] | null | null | null | import model.fastSal as fastsal
from dataset.utils import read_vgg_img
from utils import load_weight
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from os.path import isfile, isdir, join
from os import listdir
import numpy as np
import argparse
from generate_img import post_process_png, post_process_probability2
import cv2
class img_dataset(Dataset):
def __init__(self, img_path, output_path):
if isdir(img_path):
print('image folder is {}'.format(img_path))
file_list = [f for f in listdir(img_path) if isfile(join(img_path, f))]
file_list = [f for f in file_list if '.jpg' in f or 'jpeg' in f or 'png' in f]
self.file_list = np.asarray(file_list)
self.dir = img_path
elif isfile(img_path):
print('image file is {}'.format(img_path))
self.file_list = np.asarray([img_path])
self.dir = None
self.output_dir = output_path
def __getitem__(self, item):
if self.dir:
img_path = join(self.dir, self.file_list[item])
output_path = join(self.output_dir, 'out_' + self.file_list[item])
else:
img_path = self.file_list[item]
output_path = self.output_dir
vgg_img, original_size = read_vgg_img(img_path, (192, 256))
return vgg_img, original_size, output_path
def __len__(self):
return self.file_list.shape[0]
def predict(model_type, finetune_dataset, input_path, output_path,
probability_output, batch_size, gpu=True):
model = fastsal.fastsal(pretrain_mode=False, model_type=model_type)
state_dict, opt_state = load_weight('weights/{}_{}.pth'.format(finetune_dataset, model_type), remove_decoder=False)
model.load_state_dict(state_dict)
if gpu:
model.cuda()
simple_data = img_dataset(input_path, output_path)
simple_loader = DataLoader(simple_data, batch_size=batch_size, shuffle=False, num_workers=4)
for x, original_size_list, output_path_list in simple_loader:
if gpu:
x = x.float().cuda()
y = model(x)
if not probability_output: y = nn.Sigmoid()(y)
if gpu:
y = y.detach().cpu()
y = y.numpy()
for i, prediction in enumerate(y[:, 0, :, :]):
img_output_path = output_path_list[i]
original_size = original_size_list[i].numpy()
print(img_output_path)
if not probability_output:
img_data = post_process_png(prediction, original_size)
cv2.imwrite(img_output_path, img_data)
else:
img_data = post_process_probability2(prediction, original_size)
np.save(img_output_path.split('.')[0], img_data)
if __name__ == '__main__':
coco_c = 'weights/coco_C.pth' # coco_C
coco_a = 'weights/coco_A.pth' # coco_A
salicon_c = 'weights/salicon_C.pth' # salicon_C
salicon_a = 'weights/salicon_A.pth' # coco_A
parser = argparse.ArgumentParser(description='configs for predict.')
parser.add_argument('-model_type', action='store', dest='model_type',
help='model type can be either C(oncatenation) or A(ddition)', default='A')
parser.add_argument('-finetune_dataset', action='store', dest='finetune_dataset',
help='Dataset that the model fine tuned on.', default='salicon')
parser.add_argument('-input_path', action='store', dest='input_path',
help='path to input image or image folder')
parser.add_argument('-output_path', action='store', dest='output_path',
help='path to output image or image folder')
parser.add_argument('-batch_size', action='store', dest='batch_size',
help='batch size.', default=1, type=int)
parser.add_argument('-probability_output', action='store', dest='probability_output',
help='use probability_output or not', default=False, type=bool)
parser.add_argument('-gpu', action='store', dest='gpu',
help='use gpu or not', default=True, type=bool)
args = parser.parse_args()
predict(args.model_type, args.finetune_dataset, args.input_path, args.output_path,
args.probability_output, args.batch_size, gpu=args.gpu)
#x = torch.zeros((10, 3, 192, 256))
#y = model(x)
#print(y.shape)
| 46.494737 | 119 | 0.645687 | 605 | 4,417 | 4.444628 | 0.233058 | 0.059502 | 0.044254 | 0.017851 | 0.09669 | 0.056527 | 0.026032 | 0 | 0 | 0 | 0 | 0.007175 | 0.242699 | 4,417 | 94 | 120 | 46.989362 | 0.796712 | 0.020376 | 0 | 0.058824 | 0 | 0 | 0.137763 | 0.009724 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047059 | false | 0 | 0.129412 | 0.011765 | 0.211765 | 0.035294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d5a317d5828d43dac1b604bfde17c42ba4f02a7 | 947 | py | Python | e621/reports/templatetags/markdown.py | adjspecies/explore621 | 0ec946d28ed54d11569aa237f721001f74e7f1be | [
"MIT"
] | 3 | 2019-10-12T13:32:22.000Z | 2021-11-18T19:17:16.000Z | e621/reports/templatetags/markdown.py | adjspecies/explore621 | 0ec946d28ed54d11569aa237f721001f74e7f1be | [
"MIT"
] | 6 | 2018-12-11T20:38:26.000Z | 2021-06-10T21:01:45.000Z | e621/reports/templatetags/markdown.py | adjspecies/explore621 | 0ec946d28ed54d11569aa237f721001f74e7f1be | [
"MIT"
] | 4 | 2018-12-11T06:19:59.000Z | 2022-02-17T00:29:15.000Z | import random
import markdown as md
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(name='markdown')
def markdown(value):
return mark_safe(md.markdown(value, extensions=['extra', 'codehilite']))
@register.simple_tag(name='titlefont')
def titlefont():
font = random.choice([
'Bungee Outline',
'Bungee Shade',
'Cabin Sketch',
'Caesar Dressing',
'Creepster',
'Ewert',
'Fascinate Inline',
'Hanalei',
'Iceland',
'Kumar One Outline',
'Libre Barcode 128 Text',
'Nova Slim',
'Revalia',
'Supermercado One',
'Wallpoet'])
return mark_safe('''
<link href="https://fonts.googleapis.com/css?family={font}" rel="stylesheet" />
<style>
header h1 {{
font-family: "{font}", monospace;
}}
</style>
'''.format(font=font))
| 22.023256 | 83 | 0.595565 | 98 | 947 | 5.714286 | 0.673469 | 0.042857 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005698 | 0.258712 | 947 | 42 | 84 | 22.547619 | 0.792023 | 0 | 0 | 0 | 0 | 0.029412 | 0.409715 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0.029412 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d5cb94a21df6b50461b7a92be6de3de8f970fc7 | 7,577 | py | Python | traffic_autocalibration/data_collection/envs.py | AboudyKreidieh/traffic-autocalibration | e8c3fc89022dba401994287b5774e2df20e2e901 | [
"MIT"
] | null | null | null | traffic_autocalibration/data_collection/envs.py | AboudyKreidieh/traffic-autocalibration | e8c3fc89022dba401994287b5774e2df20e2e901 | [
"MIT"
] | null | null | null | traffic_autocalibration/data_collection/envs.py | AboudyKreidieh/traffic-autocalibration | e8c3fc89022dba401994287b5774e2df20e2e901 | [
"MIT"
] | null | null | null | """Example of a merge network with human-driven vehicles.
In the absence of autonomous vehicles, the network exhibits properties of
convective instability, with perturbations propagating upstream from the merge
point before exiting the network.
"""
from flow.core.params import SumoParams, EnvParams, \
NetParams, InitialConfig, InFlows, SumoCarFollowingParams
from flow.core.params import VehicleParams
from flow.scenarios.merge import MergeScenario
from flow.scenarios.merge import ADDITIONAL_NET_PARAMS as MERGE_NET_PARAMS
from flow.controllers import IDMController, ContinuousRouter, GridRouter
from flow.envs.merge import WaveAttenuationMergePOEnv
from flow.envs.merge import ADDITIONAL_ENV_PARAMS as MERGE_ENV_PARAMS
from flow.envs.loop.loop_accel import AccelEnv
from flow.envs.loop.loop_accel import ADDITIONAL_ENV_PARAMS as LOOP_ENV_PARAMS
from flow.scenarios.loop import LoopScenario
from flow.scenarios.loop import ADDITIONAL_NET_PARAMS as LOOP_NET_PARAMS
from flow.scenarios.grid import SimpleGridScenario
import numpy as np
def merge_env(inflow_rate, cf_params, num, render=None):
"""
Perform a simulation of vehicles on a merge.
Parameters
----------
render: bool, optional
specifies whether to use the gui during execution
Returns
-------
exp: flow.core.experiment.Experiment
A non-rl experiment demonstrating the performance of human-driven
vehicles on a merge.
"""
sim_params = SumoParams(
render=False,
sim_step=0.1,
restart_instance=True)
if render is not None:
sim_params.render = render
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(IDMController, cf_params),
car_following_params=SumoCarFollowingParams(
accel=4.5,
decel=4.5,
speed_mode='all_checks',
),
num_vehicles=5)
env_params = EnvParams(
additional_params=MERGE_ENV_PARAMS)
inflow = InFlows()
inflow.add(
veh_type="human",
edge="inflow_highway",
vehs_per_hour=inflow_rate,
departLane="free",
departSpeed=10)
inflow.add(
veh_type="human",
edge="inflow_merge",
vehs_per_hour=100,
departLane="free",
departSpeed=7.5)
additional_net_params = MERGE_NET_PARAMS.copy()
additional_net_params["merge_lanes"] = 1
additional_net_params["highway_lanes"] = 1
additional_net_params["pre_merge_length"] = 500
net_params = NetParams(
inflows=inflow,
no_internal_links=False,
additional_params=additional_net_params)
initial_config = InitialConfig(spacing="uniform", perturbation=5.0)
scenario = MergeScenario(
name="merge-{}-{}".format(inflow_rate, num),
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config)
return WaveAttenuationMergePOEnv(env_params, sim_params, scenario)
def ring_env(length, cf_params, num, render=None):
"""
Perform a simulation of vehicles on a ring road.
Parameters
----------
render : bool, optional
specifies whether to use the gui during execution
Returns
-------
exp: flow.core.experiment.Experiment
A non-rl experiment demonstrating the performance of human-driven
vehicles on a ring road.
"""
sim_params = SumoParams(
sim_step=0.1,
render=False,
restart_instance=True)
if render is not None:
sim_params.render = render
vehicles = VehicleParams()
vehicles.add(
veh_id="idm",
acceleration_controller=(IDMController, cf_params),
car_following_params=SumoCarFollowingParams(
accel=4.5,
decel=4.5,
speed_mode='all_checks',
),
routing_controller=(ContinuousRouter, {}),
num_vehicles=22)
env_params = EnvParams(additional_params=LOOP_ENV_PARAMS)
additional_net_params = LOOP_NET_PARAMS.copy()
additional_net_params['length'] = length
net_params = NetParams(additional_params=additional_net_params)
initial_config = InitialConfig(spacing='random')
scenario = LoopScenario(
name="ring-{}-{}".format(length, num),
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config)
return AccelEnv(env_params, sim_params, scenario)
def grid_env(inflow_rate, cf_params, num, render=None):
"""
Perform a simulation of vehicles on a grid.
Parameters
----------
render: bool, optional
specifies whether to use the gui during execution
Returns
-------
exp: flow.core.experiment.Experiment
A non-rl experiment demonstrating the performance of human-driven
vehicles and balanced traffic lights on a grid.
"""
inner_length = 300
long_length = 500
short_length = 300
N_ROWS = 1
N_COLUMNS = 1
num_cars_left = 2
num_cars_right = 2
num_cars_top = 2
num_cars_bot = 2
tot_cars = (num_cars_left + num_cars_right) * N_COLUMNS \
+ (num_cars_top + num_cars_bot) * N_ROWS
inflow_ratio = np.random.dirichlet(np.ones(4), size=1)[0, :]
grid_array = {
"short_length": short_length,
"inner_length": inner_length,
"long_length": long_length,
"row_num": N_ROWS,
"col_num": N_COLUMNS,
"cars_left": num_cars_left,
"cars_right": num_cars_right,
"cars_top": num_cars_top,
"cars_bot": num_cars_bot
}
sim_params = SumoParams(
restart_instance=True,
sim_step=0.1,
render=False)
if render is not None:
sim_params.render = render
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(IDMController, cf_params),
routing_controller=(GridRouter, {}),
car_following_params=SumoCarFollowingParams(
min_gap=2.5,
accel=4.5,
decel=10.5, # avoid collisions at emergency stops
speed_mode='all_checks',
),
num_vehicles=tot_cars)
env_params = EnvParams(
sims_per_step=2,
additional_params=LOOP_ENV_PARAMS)
inflow = InFlows()
inflow.add(
veh_type="human",
edge="bot0_0",
vehs_per_hour=inflow_rate * inflow_ratio[0],
departLane="free",
departSpeed=5)
inflow.add(
veh_type="human",
edge="right0_0",
vehs_per_hour=inflow_rate * inflow_ratio[1],
departLane="free",
departSpeed=5)
inflow.add(
veh_type="human",
edge="top0_1",
vehs_per_hour=inflow_rate * inflow_ratio[2],
departLane="free",
departSpeed=5)
inflow.add(
veh_type="human",
edge="left1_0",
vehs_per_hour=inflow_rate * inflow_ratio[3],
departLane="free",
departSpeed=5)
additional_net_params = {
"grid_array": grid_array,
"speed_limit": 35,
"horizontal_lanes": 1,
"vertical_lanes": 1,
"traffic_lights": False,
}
net_params = NetParams(
inflows=inflow,
no_internal_links=False,
additional_params=additional_net_params)
initial_config = InitialConfig(spacing='custom')
scenario = SimpleGridScenario(
name="grid-{}-{}".format(inflow_rate, num),
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
)
return AccelEnv(env_params, sim_params, scenario)
| 29.142308 | 78 | 0.659364 | 898 | 7,577 | 5.309577 | 0.206013 | 0.04719 | 0.047819 | 0.020134 | 0.627517 | 0.527685 | 0.490772 | 0.463297 | 0.442534 | 0.428272 | 0 | 0.013537 | 0.249307 | 7,577 | 259 | 79 | 29.254826 | 0.824719 | 0.157846 | 0 | 0.47486 | 0 | 0 | 0.063495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01676 | false | 0 | 0.072626 | 0 | 0.106145 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d607dcd0363a0d16bf89db80e0f33fe61596373 | 1,072 | py | Python | examples/imdb_sentiment_analysis/data.py | AntonioGr7/grimai | d3b4724ca5636d8afefa322a5020c5a927ce0db4 | [
"MIT"
] | null | null | null | examples/imdb_sentiment_analysis/data.py | AntonioGr7/grimai | d3b4724ca5636d8afefa322a5020c5a927ce0db4 | [
"MIT"
] | null | null | null | examples/imdb_sentiment_analysis/data.py | AntonioGr7/grimai | d3b4724ca5636d8afefa322a5020c5a927ce0db4 | [
"MIT"
] | null | null | null | from transformers import AutoTokenizer
import torch
class Data:
def __init__(self,texts,labels):
self.class_mapping = {"positive":0,"negative":1}
self.texts = texts
self.labels = labels
self.MAX_LENGTH = 250
self.tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
def __len__(self):
return len(self.texts)
def __getitem__(self, item):
x = self.texts[item]
inputs = self.tokenizer.encode_plus(
x,
None,
add_special_tokens=True,
max_length=self.MAX_LENGTH,
padding="max_length",
truncation=True)
inputs_ids = inputs['input_ids']
attention_mask = inputs['attention_mask']
# token_type_ids = inputs['token_type_ids']
return {
"text": x,
"ids": torch.tensor(inputs_ids, dtype=torch.long),
"mask": torch.tensor(attention_mask, dtype=torch.long),
"target": torch.tensor(self.class_mapping[self.labels[item]],dtype=torch.float)
} | 33.5 | 91 | 0.603545 | 122 | 1,072 | 5.040984 | 0.42623 | 0.058537 | 0.052033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006519 | 0.284515 | 1,072 | 32 | 92 | 33.5 | 0.795306 | 0.038246 | 0 | 0 | 0 | 0 | 0.080583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.071429 | 0.035714 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d6188620428a8e7bbfe490fd6e480985421b02a | 8,335 | py | Python | rlgraph/components/distributions/joint_cumulative_distribution.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 290 | 2018-07-29T15:30:57.000Z | 2022-03-19T02:46:53.000Z | rlgraph/components/distributions/joint_cumulative_distribution.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 76 | 2018-10-19T08:42:01.000Z | 2020-05-03T08:34:21.000Z | rlgraph/components/distributions/joint_cumulative_distribution.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 41 | 2018-10-30T07:05:05.000Z | 2022-03-01T08:28:24.000Z | # Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import, division, print_function
from rlgraph import get_backend
from rlgraph.components.distributions.distribution import Distribution
from rlgraph.components.layers.preprocessing.reshape import ReShape
from rlgraph.utils.decorators import rlgraph_api, graph_fn
from rlgraph.utils.ops import flatten_op, FlattenedDataOp
if get_backend() == "tf":
import tensorflow as tf
elif get_backend() == "pytorch":
import torch
class JointCumulativeDistribution(Distribution):
"""
A joint cumulative distribution consisting of an arbitrarily nested container of n sub-distributions
assumed to be all independent(!) of each other, such that:
For e.g. n=2 and random variables X and Y: P(X and Y) = P(X)*P(Y) for all x and y.
- Sampling returns a ContainerDataOp.
- log_prob returns the sum of all single log prob terms (joint log prob).
- entropy returns the sum of all single entropy terms (joint entropy).
"""
def __init__(self, distribution_specs, scope="joint-cumulative-distribution", **kwargs):
"""
Args:
distribution_specs (dict): Dict with flat-keys containing the specifications of the single
sub-distributions.
"""
super(JointCumulativeDistribution, self).__init__(scope=scope, **kwargs)
# Create the flattened sub-distributions and add them.
self.flattened_sub_distributions = \
{flat_key: Distribution.from_spec(spec, scope="sub-distribution-{}".format(i))
for i, (flat_key, spec) in enumerate(distribution_specs.items())
}
self.flattener = ReShape(flatten=True)
self.add_components(self.flattener, *list(self.flattened_sub_distributions.values()))
@rlgraph_api
def sample_deterministic(self, parameters):
return self._graph_fn_sample_deterministic(parameters)
@rlgraph_api
def sample_stochastic(self, parameters):
return self._graph_fn_sample_stochastic(parameters)
@rlgraph_api
def draw(self, parameters, deterministic=True):
return self._graph_fn_draw(parameters, deterministic)
@rlgraph_api
def sample_and_log_prob(self, parameters, deterministic=True):
#distribution = self.get_distribution(parameters)
actions = self._graph_fn_draw(parameters, deterministic)
log_probs = self._graph_fn_log_prob(parameters, actions)
return actions, log_probs
#@rlgraph_api
#def entropy(self, parameters):
# return self._graph_fn_entropy(parameters)
@rlgraph_api
def log_prob(self, parameters, values):
"""
Override log_prob API as we have to add all the resulting log-probs together
(joint log-prob of individual ones).
"""
#distributions = self.get_distribution(parameters)
all_log_probs = self._graph_fn_log_prob(parameters, values)
return self._graph_fn_reduce_over_sub_distributions(all_log_probs)
#@rlgraph_api(must_be_complete=False)
#def kl_divergence(self, parameters, other_parameters):
# distribution = self.get_distribution(parameters)
# other_distribution = self.get_distribution(other_parameters)
# return self._graph_fn_kl_divergence(distribution, other_distribution)
# Flatten only alongside `self.flattened_sub_distributions`, not any further.
@rlgraph_api(flatten_ops="flattened_sub_distributions", split_ops=True, add_auto_key_as_first_param=True, ok_to_overwrite=True)
def _graph_fn_get_distribution(self, key, parameters):
return self.flattened_sub_distributions[key].get_distribution(parameters)
@graph_fn(flatten_ops="flattened_sub_distributions")
def _graph_fn_sample_deterministic(self, parameters):
ret = {}
for key in parameters:
ret[key] = self.flattened_sub_distributions[key].sample_deterministic(parameters[key])
return FlattenedDataOp(ret)
@graph_fn(flatten_ops="flattened_sub_distributions")
def _graph_fn_sample_stochastic(self, parameters):
ret = {}
for key in parameters:
ret[key] = self.flattened_sub_distributions[key].sample_stochastic(parameters[key])
return FlattenedDataOp(ret)
@graph_fn(flatten_ops="flattened_sub_distributions")
def _graph_fn_draw(self, parameters, deterministic):
ret = {}
for key in parameters:
ret[key] = self.flattened_sub_distributions[key].draw(parameters[key], deterministic)
return FlattenedDataOp(ret)
# Flatten only alongside `self.flattened_sub_distributions`, not any further.
@graph_fn(flatten_ops="flattened_sub_distributions")
def _graph_fn_log_prob(self, parameters, values):
ret = {}
for key in parameters:
#d = self.flattened_sub_distributions[key].get_distribution(parameters[key])
#return self.flattened_sub_distributions[key]._graph_fn_log_prob(distribution, values)
ret[key] = self.flattened_sub_distributions[key].log_prob(parameters[key], values[key])
return FlattenedDataOp(ret)
@graph_fn(flatten_ops=True)
def _graph_fn_reduce_over_sub_distributions(self, log_probs):
params_space = next(iter(flatten_op(self.api_method_inputs["parameters"]).values()))
num_ranks_to_keep = (1 if params_space.has_batch_rank else 0) + (1 if params_space.has_time_rank else 0)
log_probs_list = []
if get_backend() == "tf":
for log_prob in log_probs.values():
# Reduce sum over all ranks to get the joint log llh.
log_prob = tf.reduce_sum(log_prob, axis=list(range(len(log_prob.shape) - 1, num_ranks_to_keep - 1, -1)))
log_probs_list.append(log_prob)
return tf.reduce_sum(tf.stack(log_probs_list, axis=0), axis=0)
elif get_backend() == "pytorch":
for log_prob in log_probs.values():
# Reduce sum over all ranks to get the joint log llh.
log_prob = torch.sum(log_prob, dim=list(range(len(log_prob.shape) - 1, num_ranks_to_keep - 1, -1)))
log_probs_list.append(log_prob)
return torch.sum(torch.stack(log_probs_list, dim=0), dim=0)
# Flatten only alongside `self.flattened_sub_distributions`, not any further.
@graph_fn(flatten_ops="flattened_sub_distributions")
def _graph_fn_entropy(self, distribution):
params_space = next(iter(flatten_op(self.api_method_inputs["parameters"]).values()))
num_ranks_to_keep = (1 if params_space.has_batch_rank else 0) + (1 if params_space.has_time_rank else 0)
all_entropies = []
if get_backend() == "tf":
for key, distr in distribution.items():
entropy = distr.entropy()
# Reduce sum over all ranks to get the joint entropy.
entropy = tf.reduce_sum(entropy, axis=list(range(len(entropy.shape) - 1, num_ranks_to_keep - 1, -1)))
all_entropies.append(entropy)
return tf.reduce_sum(tf.stack(all_entropies, axis=0), axis=0)
elif get_backend() == "pytorch":
for key, distr in distribution.items():
entropy = distr.entropy()
# Reduce sum over all ranks to get the joint log llh.
entropy = torch.sum(entropy, dim=list(range(len(entropy.shape) - 1, num_ranks_to_keep - 1, -1)))
all_entropies.append(entropy)
# TODO: flatten all all_log_probs (or expand in last dim) so we can concat, then reduce_sum to get the joint probs.
return torch.sum(torch.stack(all_entropies, dim=0), dim=0)
| 47.902299 | 131 | 0.692262 | 1,080 | 8,335 | 5.096296 | 0.205556 | 0.030523 | 0.086301 | 0.063227 | 0.478198 | 0.423328 | 0.367551 | 0.347202 | 0.305414 | 0.29415 | 0 | 0.006219 | 0.208998 | 8,335 | 173 | 132 | 48.179191 | 0.828606 | 0.306779 | 0 | 0.4375 | 0 | 0 | 0.045584 | 0.033877 | 0 | 0 | 0 | 0.00578 | 0 | 1 | 0.135417 | false | 0 | 0.083333 | 0.041667 | 0.375 | 0.010417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d61d39ab9d99ec24484646c2fb2f7ff2ffef252 | 2,550 | py | Python | export.py | jkulhanek/icra2017-visual-navigation | d16ec8c0923f88c4eb41be05c5405152d8ce7c7c | [
"MIT"
] | null | null | null | export.py | jkulhanek/icra2017-visual-navigation | d16ec8c0923f88c4eb41be05c5405152d8ce7c7c | [
"MIT"
] | null | null | null | export.py | jkulhanek/icra2017-visual-navigation | d16ec8c0923f88c4eb41be05c5405152d8ce7c7c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import random
import sys
import os
from network import ActorCriticFFNetwork
from training_thread import A3CTrainingThread
from scene_loader import THORDiscreteEnvironment as Environment
from utils.ops import sample_action
from constants import ACTION_SIZE
from constants import CHECKPOINT_DIR
from constants import NUM_EVAL_EPISODES
from constants import VERBOSE
from constants import TASK_TYPE
from constants import TASK_LIST
import pickle
if __name__ == '__main__':
device = "/cpu:0" # use CPU for display tool
network_scope = TASK_TYPE
list_of_tasks = TASK_LIST
scene_scopes = list_of_tasks.keys()
global_network = ActorCriticFFNetwork(action_size=ACTION_SIZE,
device=device,
network_scope=network_scope,
scene_scopes=scene_scopes)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
saver = tf.train.Saver()
checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
checkpoint.model_checkpoint_path = os.path.normpath(os.path.join(__file__,'..\\checkpoints', 'checkpoint-10000085'))
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("checkpoint loaded: {}".format(checkpoint.model_checkpoint_path))
else:
print("Could not find old checkpoint")
print('Exporting waights to pytorch')
model = {}
model['navigation'] = {
'fc_siemense.weight': np.transpose(sess.run(global_network.W_fc1['navigation'])),
'fc_siemense.bias': sess.run(global_network.b_fc1['navigation']),
'fc_merge.weight': np.transpose(sess.run(global_network.W_fc2['navigation'])),
'fc_merge.bias': sess.run(global_network.b_fc2['navigation']),
}
for key in global_network.W_fc3.keys():
model[key] = {
'fc1.weight': np.transpose(sess.run(global_network.W_fc3[key])),
'fc1.bias': sess.run(global_network.b_fc3[key]),
'fc2_policy.weight': np.transpose(sess.run(global_network.W_policy[key])),
'fc2_policy.bias': sess.run(global_network.b_policy[key]),
'fc2_value.weight': np.transpose(sess.run(global_network.W_value[key])),
'fc2_value.bias': sess.run(global_network.b_value[key]),
}
print('Weights exported')
print('Saving')
pickle.dump(model, open( "weights.p", "wb" )) | 35.915493 | 119 | 0.688627 | 322 | 2,550 | 5.208075 | 0.347826 | 0.093023 | 0.077519 | 0.119261 | 0.187835 | 0.187835 | 0.113298 | 0.113298 | 0 | 0 | 0 | 0.011765 | 0.2 | 2,550 | 71 | 120 | 35.915493 | 0.810294 | 0.026275 | 0 | 0 | 0 | 0 | 0.145583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.089286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d63595bc2a7c365a87fadf2158bba5d7d07c053 | 836 | py | Python | web_application/werkzeug_app.py | kaitolucifer/asgi-demo | 3d5a1ab834d01c04dfbed4f99f61e78f1896606c | [
"WTFPL"
] | null | null | null | web_application/werkzeug_app.py | kaitolucifer/asgi-demo | 3d5a1ab834d01c04dfbed4f99f61e78f1896606c | [
"WTFPL"
] | null | null | null | web_application/werkzeug_app.py | kaitolucifer/asgi-demo | 3d5a1ab834d01c04dfbed4f99f61e78f1896606c | [
"WTFPL"
] | null | null | null | import os
from werkzeug.serving import run_simple
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
@Request.application
def app(request):
urls = url_map.bind_to_environ(request.environ)
endpoint, args = urls.match()
request.environ['args'] = args
return endpoint
@Request.application
def get_user(request):
username = request.environ['args'].get('username')
return Response(f'hello {username}')
url_map = Map([
Rule('/<string:username>', endpoint=get_user, methods=['get'])
])
# 静的ファイルの配信
app = SharedDataMiddleware(app, {
'/static/': os.path.join(os.path.dirname(__file__), 'static')})
if __name__ == '__main__':
run_simple('localhost', 5000, app, use_debugger=True, use_reloader=True)
| 24.588235 | 76 | 0.732057 | 105 | 836 | 5.609524 | 0.485714 | 0.081494 | 0.071307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005548 | 0.13756 | 836 | 33 | 77 | 25.333333 | 0.811373 | 0.010766 | 0 | 0.090909 | 0 | 0 | 0.101818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.227273 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d64f48022a61878d639ccecb32b324c4d8999a0 | 5,920 | py | Python | scripts/remote_control_gui.py | Badenhoop/remote_control | 218d40faa17d600a0b40ea02227dca16292c3bfb | [
"MIT"
] | null | null | null | scripts/remote_control_gui.py | Badenhoop/remote_control | 218d40faa17d600a0b40ea02227dca16292c3bfb | [
"MIT"
] | null | null | null | scripts/remote_control_gui.py | Badenhoop/remote_control | 218d40faa17d600a0b40ea02227dca16292c3bfb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import Float64
import pygame
import sys
from pygame.locals import *
import threading
import sys
import numpy as np
from time import sleep
def center(src, dest):
src.centerx = dest.centerx
src.centery = dest.centery
class Button:
def __init__(self, rect, label, onPressed=None, onReleased=None):
self.rect = rect
self.label = label
self.onPressed = onPressed
self.onReleased = onReleased
self.pressed = False
def press(self):
if not self.pressed and self.onPressed is not None:
self.onPressed()
self.pressed = True
def release(self):
if self.pressed and self.onReleased is not None:
self.onReleased()
self.pressed = False
def draw(self, surf):
# fill
if not self.pressed:
pygame.draw.rect(surf, (150, 150, 150), self.rect)
else:
pygame.draw.rect(surf, (255, 100, 100), self.rect)
# frame
pygame.draw.rect(surf, (10, 10, 10), self.rect, 1)
# label
font = pygame.font.Font(None, 36)
text = font.render(self.label, True, (10, 10, 10))
text_rect = text.get_rect()
center(text_rect, self.rect)
surf.blit(text, text_rect)
def create_joystick():
joystick = None
try:
joystick = pygame.joystick.Joystick(0) # create a joystick instance
joystick.init() # init instance
print("Enabled joystick: " + joystick.get_name())
except pygame.error:
print("No joystick found.")
return joystick
def main():
rospy.init_node('remote_control_gui', log_level=rospy.INFO)
motor_pub = rospy.Publisher('motor', Float64, queue_size=10)
servo_pub = rospy.Publisher('servo', Float64, queue_size=10)
FPS = 60
pygame.init()
pygame.joystick.init()
fpsClock = pygame.time.Clock()
BACKGROUND_COLOR = (255, 255, 255)
SCREEN_WIDTH, SCREEN_HEIGHT = 640, 480
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill(BACKGROUND_COLOR)
background.blit(background, (0,0))
clock = pygame.time.Clock()
pygame.key.set_repeat(1, 40)
BUTTON_WIDTH = 200
BUTTON_HEIGHT = 100
PADDING = 10
buttonsSurface = pygame.Surface((3 * BUTTON_WIDTH + 2 * PADDING, 2 * BUTTON_HEIGHT + PADDING))
buttonsSurface = buttonsSurface.convert()
buttonsSurface.fill(BACKGROUND_COLOR)
speed_modes = [0.2, 0.3, 0.4]
speed = 0.2
zero_speed = 0
max_angle = 1.0
zero_angle = 0
forwards = Button(
pygame.Rect(1 * (BUTTON_WIDTH + PADDING), 0 * (BUTTON_HEIGHT + PADDING), BUTTON_WIDTH, BUTTON_HEIGHT),
"FORWARDS",
onPressed=lambda: motor_pub.publish(speed),
onReleased=lambda: motor_pub.publish(zero_speed))
backwards = Button(
pygame.Rect(1 * (BUTTON_WIDTH + PADDING), 1 * (BUTTON_HEIGHT + PADDING), BUTTON_WIDTH, BUTTON_HEIGHT),
"BACKWARDS",
onPressed=lambda: motor_pub.publish(-speed),
onReleased=lambda: motor_pub.publish(zero_speed))
left = Button(
pygame.Rect(0 * (BUTTON_WIDTH + PADDING), 1 * (BUTTON_HEIGHT + PADDING), BUTTON_WIDTH, BUTTON_HEIGHT),
"LEFT",
onPressed=lambda: servo_pub.publish(max_angle),
onReleased=lambda: servo_pub.publish(zero_angle))
right = Button(
pygame.Rect(2 * (BUTTON_WIDTH + PADDING), 1 * (BUTTON_HEIGHT + PADDING), BUTTON_WIDTH, BUTTON_HEIGHT),
"RIGHT",
onPressed=lambda: servo_pub.publish(-max_angle),
onReleased=lambda: servo_pub.publish(zero_angle))
buttons = [forwards, backwards, left, right]
buttons_rect = buttonsSurface.get_rect()
center(buttons_rect, background.get_rect())
joystick = create_joystick()
running = True
while running:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
running = False
break
elif event.type == KEYDOWN:
if event.key == K_UP:
forwards.press()
elif event.key == K_DOWN:
backwards.press()
elif event.key == K_LEFT:
left.press()
elif event.key == K_RIGHT:
right.press()
elif event.key == K_1:
speed = speed_modes[0]
elif event.key == K_2:
speed = speed_modes[1]
elif event.key == K_3:
speed = speed_modes[2]
elif event.key == K_PLUS:
speed += 0.005
elif event.key == K_MINUS:
speed -= 0.005
print("speed: {:.2}".format(speed))
elif event.type == KEYUP:
if event.key == K_UP:
forwards.release()
elif event.key == K_DOWN:
backwards.release()
elif event.key == K_LEFT:
left.release()
elif event.key == K_RIGHT:
right.release()
elif event.type == JOYAXISMOTION:
x = joystick.get_axis(0)
y = joystick.get_axis(4)
angle = min(max(x * max_angle, -max_angle), max_angle)
speed = min(max(-y * MAX_SPEED, MIN_SPEED), MAX_SPEED)
servo_pub.publish(angle)
motor_pub.publish(speed)
background.fill(BACKGROUND_COLOR)
for button in buttons:
button.draw(buttonsSurface)
background.blit(buttonsSurface, buttons_rect)
screen.blit(background, (0, 0))
pygame.display.update()
fpsClock.tick(FPS)
if __name__ == '__main__':
main()
| 33.072626 | 110 | 0.58277 | 693 | 5,920 | 4.822511 | 0.217893 | 0.037702 | 0.035009 | 0.042789 | 0.262418 | 0.228905 | 0.174446 | 0.14632 | 0.14632 | 0.14632 | 0 | 0.029138 | 0.310135 | 5,920 | 178 | 111 | 33.258427 | 0.789177 | 0.013176 | 0 | 0.119205 | 0 | 0 | 0.018849 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046358 | false | 0 | 0.059603 | 0 | 0.119205 | 0.019868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d655411de74cf4c522942b76d81833d54648c19 | 14,439 | py | Python | toiro/datadownloader/preprocess.py | taishi-i/toiro | 9d37c39ca8b144646c4a977a3d49c28784db5404 | [
"Apache-2.0"
] | 94 | 2020-08-13T16:39:09.000Z | 2022-03-22T21:52:33.000Z | toiro/datadownloader/preprocess.py | taishi-i/toiro | 9d37c39ca8b144646c4a977a3d49c28784db5404 | [
"Apache-2.0"
] | 2 | 2020-08-18T01:49:18.000Z | 2020-11-18T23:53:07.000Z | toiro/datadownloader/preprocess.py | taishi-i/toiro | 9d37c39ca8b144646c4a977a3d49c28784db5404 | [
"Apache-2.0"
] | 7 | 2020-08-17T01:20:37.000Z | 2022-02-04T21:53:59.000Z | import os
import csv
import json
import glob
import gzip
import random
import tarfile
import zipfile
import pandas as pd
from .downloader_utils import get_corpora_dict
from .downloader_utils import get_resource_dir
def _extract_tarfile(filename, target_dir):
with tarfile.open(filename, 'r:*') as tar:
tar.extractall(target_dir)
def _shuffle_data(is_shuffle, data):
if is_shuffle:
random.shuffle(data)
def _max_count_data(max_count, data):
return data[:max_count]
def _split_train_dev_test(data, train_data=0.8, dev_data=0.1, test_data=0.1):
total = train_data+dev_data+test_data
if not total == 1.0:
err_msg = f"The total of train/dev/test data: {total} must be 1."
raise Exception(err_msg)
num_train = int(len(data) * train_data)
num_dev = int(len(data) * dev_data)
num_test = int(len(data) * test_data)
train = pd.DataFrame(data[:num_train])
dev = pd.DataFrame(data[num_train:num_train+num_dev])
test = pd.DataFrame(data[num_train+num_dev:num_train+num_dev+num_test])
return train, dev, test
def _check_correct_corpus_type(corpus_type, corpus_types):
if corpus_type not in corpus_types:
err_msg = f"{corpus_type} is not available. Choose from {corpus_types}"
raise Exception(err_msg)
def load_corpus(corpus, n=None, is_shuffle=True, corpus_type=None,
train_data=0.8, dev_data=0.1, test_data=0.1, random_seed=1234):
"""
Dataloader for selected corpus.
The data is pre-processed and split into training data,
development data and test data.
Parameters
----------
corpus : str
The corpus
n : int
The number of datasets
is_shuffle : bool
If true, shuffle the dataset
train_data : float
Percentage of training data
dev_data : float
Percentage of development data
test_data : float
Percentage of test data
random_seed : int
Random seed for shuffle datasets
Returns
-------
train_df : pandas.core.frame.DataFrame
The training data
dev_df : pandas.core.frame.DataFrame
The development data
test_df : pandas.core.frame.DataFrame
The test data
Examples
--------
>>> train_df, dev_df, test_df = datadownloader.load_corpus('livedoor_news_corpus')
"""
if corpus == "amazon_reviews":
return load_amazon_reviews(
n=n, is_shuffle=is_shuffle,
train_data=train_data, dev_data=dev_data, test_data=test_data,
random_seed=random_seed)
elif corpus == "yahoo_movie_reviews":
if corpus_type is None:
corpus_type = "binary"
return load_yahoo_movie_reviews(
n=n, is_shuffle=is_shuffle, corpus_type=corpus_type,
train_data=train_data, dev_data=dev_data, test_data=test_data,
random_seed=random_seed)
elif corpus == "livedoor_news_corpus":
if corpus_type is None:
corpus_type = "title"
return load_livedoor_news_corpus(
n=n, is_shuffle=is_shuffle, corpus_type=corpus_type,
train_data=train_data, dev_data=dev_data, test_data=test_data,
random_seed=random_seed)
elif corpus == "chABSA_dataset":
return load_chABSA_dataset(
n=n, is_shuffle=is_shuffle,
train_data=train_data, dev_data=dev_data, test_data=test_data,
random_seed=random_seed)
else:
err_msg = " ".join(
[f"{corpus} does not exist.",
f"Use datadownloader.download_corpus('{corpus}') ."]
)
raise Exception(err_msg)
def __count_polarity(opinions):
posinega = {"positive": 1, "negative": -1}
scores = [posinega.get(opinion["polarity"], 0) for opinion in opinions]
score = sum(scores)
if score > 0:
return "positive"
elif score < 0:
return "negative"
else:
return "neutral"
def load_chABSA_dataset(n=None, is_shuffle=True,
train_data=0.8, dev_data=0.1, test_data=0.1,
random_seed=1234):
"""
Dataloader for chABSA dataset.
The data is pre-processed and split into training data,
development data and test data.
Parameters
----------
n : int
The number of datasets
is_shuffle : bool
If true, shuffle the dataset
train_data : float
Percentage of training data
dev_data : float
Percentage of development data
test_data : float
Percentage of test data
random_seed : int
Random seed for shuffle datasets
Returns
-------
train_df : pandas.core.frame.DataFrame
The training data
dev_df : pandas.core.frame.DataFrame
The development data
test_df : pandas.core.frame.DataFrame
The test data
"""
random.seed(random_seed)
corpus = "chABSA_dataset"
corpora_dict = get_corpora_dict()
resource_dir = get_resource_dir()
filename = corpora_dict[corpus]["filename"]
filepath = os.path.join(resource_dir, filename)
with zipfile.ZipFile(filepath, 'r') as f:
f.extractall(resource_dir)
files = glob.glob(os.path.join(resource_dir, "chABSA-dataset", "*.json"))
data = []
for _file in files:
with open(_file, "r") as f:
_data = json.load(f)
sentences = _data["sentences"]
for sentence in sentences:
sent = sentence["sentence"]
opinions = sentence["opinions"]
label = __count_polarity(opinions)
data.append([label, sent])
_shuffle_data(is_shuffle, data)
data = _max_count_data(n, data)
train_df, dev_df, test_df = _split_train_dev_test(
data, train_data=train_data, dev_data=dev_data, test_data=test_data
)
return train_df, dev_df, test_df
def load_amazon_reviews(n=None, is_shuffle=True,
train_data=0.8, dev_data=0.1, test_data=0.1,
random_seed=1234):
"""
Dataloader for amazon reviews.
The data is pre-processed and split into training data,
development data and test data.
Parameters
----------
n : int
The number of datasets
is_shuffle : bool
If true, shuffle the dataset
train_data : float
Percentage of training data
dev_data : float
Percentage of development data
test_data : float
Percentage of test data
random_seed : int
Random seed for shuffle datasets
Returns
-------
train_df : pandas.core.frame.DataFrame
The training data
dev_df : pandas.core.frame.DataFrame
The development data
test_df : pandas.core.frame.DataFrame
The test data
"""
random.seed(random_seed)
corpus = "amazon_reviews"
corpora_dict = get_corpora_dict()
resource_dir = get_resource_dir()
filename = corpora_dict[corpus]["filename"]
filepath = os.path.join(resource_dir, filename)
data = []
if os.path.exists(filepath):
with gzip.open(filepath, "rt") as f:
reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
next(reader)
for line in reader:
rating = line[7]
text = line[13]
data.append([rating, text])
_shuffle_data(is_shuffle, data)
data = _max_count_data(n, data)
train_df, dev_df, test_df = _split_train_dev_test(
data, train_data=train_data, dev_data=dev_data, test_data=test_data
)
return train_df, dev_df, test_df
def load_yahoo_movie_reviews(n=None, is_shuffle=True, corpus_type="binary",
train_data=0.8, dev_data=0.1, test_data=0.1,
random_seed=1234):
"""
Dataloader for yahoo_movie_reviews.
The data is pre-processed and split into training data,
development data and test data.
Parameters
----------
n : int
The number of datasets
is_shuffle : bool
If true, shuffle the dataset
train_data : float
Percentage of training data
dev_data : float
Percentage of development data
test_data : float
Percentage of test data
random_seed : int
Random seed for shuffle datasets
Returns
-------
train_df : pandas.core.frame.DataFrame
The training data
dev_df : pandas.core.frame.DataFrame
The development data
test_df : pandas.core.frame.DataFrame
The test data
"""
corpus_types = ["binary", "original"]
_check_correct_corpus_type(corpus_type, corpus_types)
random.seed(random_seed)
corpus = "yahoo_movie_reviews"
corpora_dict = get_corpora_dict()
resource_dir = get_resource_dir()
filename = corpora_dict[corpus]["filename"]
filepath = os.path.join(resource_dir, filename)
if os.path.exists(filepath):
yahoo_movie_reviews_dir = os.path.join(resource_dir, "data")
if not os.path.exists(yahoo_movie_reviews_dir):
_extract_tarfile(filepath, resource_dir)
yahoo_movie_reviews_json = os.path.join(
yahoo_movie_reviews_dir, "yahoo-movie-reviews.json"
)
if not os.path.exists(yahoo_movie_reviews_json):
err_msg = " ".join([
f"{yahoo_movie_reviews_json} does not exist. ",
f"Use datadownloader.download_corpus('{corpus}') ."
]
)
raise Exception(err_msg)
data = []
with open(yahoo_movie_reviews_json, "r") as f:
json_load = json.load(f)
for line in json_load:
text = line["text"].replace("\n", "")
rating = str(line["rating"])
if corpus_type == "binary":
if rating in ["1", "2"]:
rating = 0
data.append([rating, text])
elif rating in ["4", "5"]:
rating = 1
data.append([rating, text])
else:
data.append([rating, text])
if corpus_type == "binary":
label2texts = {}
for line in data:
label, text = line
if label in label2texts:
label2texts[label].append(text)
else:
label2texts[label] = [text]
num_data = min(
[len(label2texts[key]) for key in label2texts.keys()]
)
data = []
for key in label2texts.keys():
texts = label2texts[key][:num_data]
for text in texts:
data.append([key, text])
_shuffle_data(is_shuffle, data)
data = _max_count_data(n, data)
train_df, dev_df, test_df = _split_train_dev_test(
data, train_data=train_data, dev_data=dev_data, test_data=test_data
)
return train_df, dev_df, test_df
else:
err_msg = " ".join(
[f"{corpus} does not exist.",
f"Use datadownloader.download_corpus('{corpus}') ."]
)
raise Exception(err_msg)
def load_livedoor_news_corpus(n=None, is_shuffle=True, corpus_type="title",
train_data=0.8, dev_data=0.1, test_data=0.1,
random_seed=1234):
"""
Dataloader for livedoor news corpus.
The data is pre-processed and split into training data,
development data and test data.
Parameters
----------
n : int
The number of datasets
is_shuffle : bool
If true, shuffle the dataset
train_data : float
Percentage of training data
dev_data : float
Percentage of development data
test_data : float
Percentage of test data
random_seed : int
Random seed for shuffle datasets
Returns
-------
train_df : pandas.core.frame.DataFrame
The training data
dev_df : pandas.core.frame.DataFrame
The development data
test_df : pandas.core.frame.DataFrame
The test data
"""
corpus_types = ["title", "article"]
_check_correct_corpus_type(corpus_type, corpus_types)
random.seed(random_seed)
corpus = "livedoor_news_corpus"
label_names = [
"dokujo-tsushin",
"kaden-channel",
"movie-enter",
"smax",
"topic-news",
"it-life-hack",
"livedoor-homme",
"peachy",
"sports-watch"
]
corpora_dict = get_corpora_dict()
resource_dir = get_resource_dir()
filename = corpora_dict[corpus]["filename"]
filepath = os.path.join(resource_dir, filename)
if os.path.exists(filepath):
livedoor_news_corpus_dir = os.path.join(resource_dir, "text")
if not os.path.exists(livedoor_news_corpus_dir):
_extract_tarfile(filepath, resource_dir)
dirs = glob.glob(f"{livedoor_news_corpus_dir}/*")
data = []
for dir_name in dirs:
dir_basename = os.path.basename(dir_name)
if dir_basename in label_names:
files = glob.glob(f"{dir_name}/*")
for filename in files:
with open(filename, "r") as f:
article = []
for i, line in enumerate(f):
line = line.strip().replace("\t", "")
if corpus_type == "title":
if i == 2:
data.append([dir_basename, line])
if corpus_type == "article":
if i > 2:
article.append(line)
if corpus_type == "article":
article = "".join(article)
data.append([dir_basename, article])
_shuffle_data(is_shuffle, data)
data = _max_count_data(n, data)
train_df, dev_df, test_df = _split_train_dev_test(
data, train_data=train_data, dev_data=dev_data, test_data=test_data
)
return train_df, dev_df, test_df
else:
err_msg = " ".join(
[f"{corpus} does not exist.",
f"Use datadownloader.download_corpus('{corpus}') ."]
)
raise Exception(err_msg)
| 28.201172 | 86 | 0.593808 | 1,782 | 14,439 | 4.567901 | 0.098204 | 0.04914 | 0.031081 | 0.038698 | 0.69828 | 0.649509 | 0.614496 | 0.599877 | 0.577887 | 0.577887 | 0 | 0.008377 | 0.313803 | 14,439 | 511 | 87 | 28.25636 | 0.813181 | 0.230071 | 0 | 0.403922 | 0 | 0 | 0.090642 | 0.023349 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043137 | false | 0 | 0.043137 | 0.003922 | 0.137255 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d65fa24cd55928a0c15ba12dea2b7523d8ec294 | 1,540 | py | Python | v2_plugin/client/rpc_client.py | cap-ntu/Video-to-Online-Platform | 757c68d9de0778e3da8bbfa678d89251a6955573 | [
"Apache-2.0"
] | 82 | 2019-10-04T05:40:45.000Z | 2020-03-14T06:40:02.000Z | v2_plugin/client/rpc_client.py | datanadi/Video-to-Retail-Platform | 757c68d9de0778e3da8bbfa678d89251a6955573 | [
"Apache-2.0"
] | 9 | 2020-06-04T19:31:59.000Z | 2021-01-10T02:32:02.000Z | v2_plugin/client/rpc_client.py | datanadi/Video-to-Retail-Platform | 757c68d9de0778e3da8bbfa678d89251a6955573 | [
"Apache-2.0"
] | 24 | 2019-10-04T05:46:46.000Z | 2020-05-30T05:22:32.000Z | from typing import Union, Generator, Iterator
import grpc
from grpc._cython import cygrpc
from v2_plugin.protos import infer_pb2_grpc
from v2_plugin.protos.infer_pb2 import InferRequest, Empty, InferResponse
class RPCClient(object):
def __init__(self, port: int):
self.channel = grpc.insecure_channel(
f'localhost:{port}',
options=[
(cygrpc.ChannelArgKey.max_send_message_length, -1),
(cygrpc.ChannelArgKey.max_receive_message_length, -1),
]
)
self.stub = infer_pb2_grpc.InferProtoStub(self.channel)
def service_request(
self,
request: Union[InferRequest, Iterator[InferRequest]],
):
if isinstance(request, Iterator):
return self._service_request_stream(request)
else:
return self._service_request(request)
def _service_request(self, request: InferRequest) -> InferResponse:
response = self.stub.Infer(request)
return response
def _service_request_stream(
self,
request_generator: Iterator[InferRequest]
) -> Generator[None, InferResponse, None]:
responses = self.stub.StreamInfer(request_generator)
return responses
def stop_model_instance(self):
response = self.stub.Stop(Empty())
return response.status
def close(self):
self.channel.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| 29.056604 | 73 | 0.650649 | 165 | 1,540 | 5.8 | 0.369697 | 0.073145 | 0.053292 | 0.037618 | 0.058516 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006173 | 0.263636 | 1,540 | 52 | 74 | 29.615385 | 0.837743 | 0 | 0 | 0.04878 | 0 | 0 | 0.01039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.195122 | false | 0 | 0.121951 | 0.02439 | 0.487805 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d6692aa84acc60dbf920b08aff206899cc311ff | 1,221 | py | Python | kata/writeOutExp.py | PauloBernal/Codewars | b5db67d1b13168d3aae7310815b7de0894608e9c | [
"MIT"
] | null | null | null | kata/writeOutExp.py | PauloBernal/Codewars | b5db67d1b13168d3aae7310815b7de0894608e9c | [
"MIT"
] | null | null | null | kata/writeOutExp.py | PauloBernal/Codewars | b5db67d1b13168d3aae7310815b7de0894608e9c | [
"MIT"
] | null | null | null | # Solution by PauloBA
def expression_out(exp):
list = []
sorter = []
ans = ""
ac = ""
c = 0
op = {'+':'Plus', '-':'Minus', '*':'Times', '/':'Divided By', '**':'To The Power Of', '=':'Equals', '!=':'Does Not Equal'}
expressions = {'1':'One', '2':'Two', '3':'Three', '4':'Four', '5':'Five', '6':'Six', '7':'Seven', '8':'Eight', '9':'Nine', '10':'Ten', '+':'Plus', '-':'Minus', '*':'Times', '/':'Divided By', '**':'To The Power Of', '=':'Equals', '!=':'Does Not Equal', ' ':' '}
for i in exp:
list.append(i)
for i in list:
if i == " ":
sorter.append(ac)
sorter.append(" ")
ac = ""
else:
ac = ac + i
for i in sorter:
if i == " ":
c = c + 1
else:
if c > 0:
if i not in op:
return "That's not an operator!"
sorter.append(ac)
print(sorter)
for i in range(len(sorter) - 1):
if i != " ":
try:
ans = ans + expressions[sorter[i]]
except KeyError:
return "That's not an operator!"
ans = ans + expressions[sorter[-1]]
return ans
print(expression_out('2 5 10')) | 32.131579 | 264 | 0.428337 | 149 | 1,221 | 3.496644 | 0.42953 | 0.03071 | 0.046065 | 0.080614 | 0.295585 | 0.295585 | 0.203455 | 0.203455 | 0.203455 | 0.203455 | 0 | 0.025221 | 0.350532 | 1,221 | 38 | 265 | 32.131579 | 0.631778 | 0.015561 | 0 | 0.285714 | 0 | 0 | 0.203164 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0 | 0 | 0.114286 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d6779a18808860730df772ff3a97199e9f3d2da | 4,685 | py | Python | spine_dynamics/spine2d.py | BerkeleyExpertSystemTechnologiesLab/2d-spine-control-hardware | 3fc2cdaeabf3b8ab2596e22bbd1912d84e9f062b | [
"Apache-2.0"
] | 1 | 2021-04-01T22:33:40.000Z | 2021-04-01T22:33:40.000Z | spine_dynamics/spine2d.py | GusSosa/ROS_navMod | 719cb233f7d386a60f63c4ed9fd1c8394f1b0b97 | [
"Apache-2.0"
] | null | null | null | spine_dynamics/spine2d.py | GusSosa/ROS_navMod | 719cb233f7d386a60f63c4ed9fd1c8394f1b0b97 | [
"Apache-2.0"
] | 2 | 2020-03-21T20:24:37.000Z | 2022-01-28T08:27:55.000Z | import numpy as np
class Spine:
def __init__(self, parameters, initial, attachment_points, g = True):
self.barlength = 0.5
self.parameters = parameters
self.states = initial
self.inputs = None
self.attachments = attachment_points
self.rod = self.bar_length()
self.bool = g
def size(self):
return len(self.states)
def length_inputs(self, force_vector, final_positions):
x = final_positions[0]
y = final_positions[1]
th = final_positions[2]
barlength = self.barlength
s1 = self.attachments[0,:]
s2 = self.attachments[1,:]
s3 = self.attachments[2,:]
# define nodes
n1 = np.array([x + np.cos(2 * np.pi/3 + th) * barlength, y + np.sin(2 * np.pi/ 3 + th) * barlength])
n2 = np.array([x + np.cos(-2 * np.pi/ 3 + th) * barlength, y + np.sin(-2 * np.pi/ 3 + th) * barlength])
l2_dist_vector = np.array([np.linalg.norm(np.array(n1 - s1)),
np.linalg.norm(np.array(n1 - s2)),
np.linalg.norm(np.array(n2 - s1)),
np.linalg.norm(np.array(n2 - s3))])
kay = np.ones(4) * 1/self.parameters[0]
K_inv = np.diag(kay)
self.inputs = l2_dist_vector - np.dot(force_vector, K_inv)
return None
def node1(self) :
barlength = self.barlength
delta = 2 * np.pi/ 3
state_t = self.current_state()
x, y, theta = state_t[0], state_t[2], state_t[4]
x1dot, x2dot, thetadot = state_t[1], state_t[3], state_t[5]
n1 = np.array([x + np.cos(2 * np.pi/3 + theta) * barlength, y + np.sin(2 * np.pi/ 3 + theta) * barlength])
n1_dot = np.array([x1dot - barlength * np.sin(delta + theta) * thetadot,
x2dot + barlength * np.cos(delta + theta) * thetadot])
return n1, n1_dot
def node2(self) :
barlength = self.barlength
delta = 2 * np.pi/ 3
state_t = self.current_state()
x, y, theta = state_t[0], state_t[2], state_t[4]
x1dot, x2dot, thetadot = state_t[1], state_t[3], state_t[5]
n2 = np.array([x + np.cos(-2 * np.pi/3 + theta) * barlength, y + np.sin(-2 * np.pi/ 3 + theta) * barlength])
n2_dot = np.array([x1dot - barlength * np.sin(-delta + theta) * thetadot,
x2dot + barlength * np.cos(-delta + theta) * thetadot])
return n2, n2_dot
def force_value(self) :
n1 , n1ddt = self.node1()
n2 , n2ddt = self.node2()
state_t = self.current_state()
# [n1, n1, n2, n2]; [s1, s2, s1, s3]
Node = np.array([n1,n1, n2, n2])
ddt_Node = np.array([n1ddt,n1ddt, n2ddt, n2ddt]).T
s1 = self.attachments[0,:]
s2 = self.attachments[1,:]
s3 = self.attachments[2,:]
ref = np.array([s1,s2,s1,s3])
length_norms = np.linalg.norm((Node - ref), axis = 1)
# (1, 4) size each
spring_comp = self.parameters[0] * (length_norms - self.inputs)
damping_comp = np.zeros((1,4))
# Hard coded ... maybe fix later
for i in range(4) :
damping_comp[0, i] = (Node - ref)[i,:] @ ddt_Node[:,i]
damping_comp = -self.parameters[1] * damping_comp @ np.linalg.inv(np.diag(length_norms))
direction_components = (Node - ref)
x_component = ((spring_comp + damping_comp) @ np.diag(direction_components[:,0])) @np.linalg.inv(np.diag(length_norms))
y_component = ((spring_comp + damping_comp) @ np.diag(direction_components[:,1])) @ np.linalg.inv(np.diag(length_norms))
return x_component, y_component
def gravity(self):
return 9.81 if self.bool == True else 0
def current_state(self) :
st = self.states
if (st.shape[0] < 2) :
last_state = self.states[0]
else :
last_state = st[-1]
return(last_state)
def acc(self):
n1 , n1ddt = self.node1()
n2 , n2ddt = self.node2()
# [n1, n1, n2, n2]; [s1, s2, s1, s3]
Node = np.array([n1, n1, n2, n2])
state_t = self.current_state()
CoM = np.array([state_t[0], state_t[2]])
relative_position = Node - CoM
fx, fy = self.force_value()
acc_x = 1 / self.parameters[2] * np.sum(fx)
acc_y = 1 / self.parameters[2] * np.sum(fy) - self.gravity()
acc_th = (relative_position[:,0] @ (fy.T + self.gravity())) - (relative_position[:,1]@ fx.T)
return acc_x, acc_y, acc_th[0]
def bar_length(spacing = None) :
return spacing | 38.089431 | 129 | 0.544077 | 653 | 4,685 | 3.773354 | 0.171516 | 0.043831 | 0.020292 | 0.024351 | 0.525974 | 0.508117 | 0.449675 | 0.415584 | 0.415584 | 0.341721 | 0 | 0.047693 | 0.310779 | 4,685 | 123 | 130 | 38.089431 | 0.715392 | 0.027748 | 0 | 0.268817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107527 | false | 0 | 0.010753 | 0.032258 | 0.215054 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d67ba189404870cb95d85d7c3705ce41920faad | 1,982 | py | Python | Chapter03/4_export_model.py | yasin-gh/Deep-Learning-for-Computer-Vision | d5b3e153369018029270a6a47349ee8ce7c7641e | [
"MIT"
] | 200 | 2018-01-23T16:00:19.000Z | 2022-03-26T09:28:48.000Z | Chapter03/4_export_model.py | yasin-gh/Deep-Learning-for-Computer-Vision | d5b3e153369018029270a6a47349ee8ce7c7641e | [
"MIT"
] | 11 | 2018-06-13T07:50:40.000Z | 2020-10-13T05:28:12.000Z | Chapter03/4_export_model.py | yasin-gh/Deep-Learning-for-Computer-Vision | d5b3e153369018029270a6a47349ee8ce7c7641e | [
"MIT"
] | 163 | 2018-01-24T02:38:52.000Z | 2022-01-13T20:23:17.000Z | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os
work_dir = '/tmp'
model_version = 9
training_iteration = 1000
input_size = 784
no_classes = 10
batch_size = 100
total_batches = 200
tf_example = tf.parse_example(tf.placeholder(tf.string, name='tf_example'),
{'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32), })
x_input = tf.identity(tf_example['x'], name='x')
y_input = tf.placeholder(tf.float32, shape=[None, no_classes])
weights = tf.Variable(tf.random_normal([input_size, no_classes]))
bias = tf.Variable(tf.random_normal([no_classes]))
logits = tf.matmul(x_input, weights) + bias
softmax_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_input, logits=logits)
loss_operation = tf.reduce_mean(softmax_cross_entropy)
optimiser = tf.train.GradientDescentOptimizer(0.5).minimize(loss_operation)
session = tf.Session()
session.run(tf.global_variables_initializer())
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
for batch_no in range(total_batches):
mnist_batch = mnist.train.next_batch(batch_size)
_, loss_value = session.run([optimiser, loss_operation], feed_dict={
x_input: mnist_batch[0],
y_input: mnist_batch[1]
})
print(loss_value)
signature_def = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tf.saved_model.utils.build_tensor_info(x_input)},
outputs={'y': tf.saved_model.utils.build_tensor_info(y_input)},
method_name="tensorflow/serving/predict"))
model_path = os.path.join(work_dir, str(model_version))
saved_model_builder = tf.saved_model.builder.SavedModelBuilder(model_path)
saved_model_builder.add_meta_graph_and_variables(
session, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'prediction': signature_def
},
legacy_init_op=tf.group(tf.tables_initializer(), name='legacy_init_op'))
saved_model_builder.save()
| 38.115385 | 94 | 0.747225 | 283 | 1,982 | 4.90106 | 0.409894 | 0.057678 | 0.043259 | 0.025955 | 0.08075 | 0.046143 | 0.046143 | 0 | 0 | 0 | 0 | 0.015707 | 0.132694 | 1,982 | 51 | 95 | 38.862745 | 0.791158 | 0 | 0 | 0 | 0 | 0 | 0.039859 | 0.013118 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.068182 | 0 | 0.068182 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d6b2ce3b9dddd7d0602dd33ae2df63bd960481e | 3,252 | py | Python | src/tests/pipeline_deployment_tests.py | jarokaz/ucaip-labs | 8db85d65a22ad3ffac8a25efea975207f6276049 | [
"Apache-2.0"
] | null | null | null | src/tests/pipeline_deployment_tests.py | jarokaz/ucaip-labs | 8db85d65a22ad3ffac8a25efea975207f6276049 | [
"Apache-2.0"
] | null | null | null | src/tests/pipeline_deployment_tests.py | jarokaz/ucaip-labs | 8db85d65a22ad3ffac8a25efea975207f6276049 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test training pipeline using local runner."""
import pytest
import sys
import os
import kfp
import tfx
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
import tensorflow as tf
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
import logging
from src.pipelines import config
from src.pipelines import training_pipeline
root = logging.getLogger()
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
root.addHandler(handler)
MLMD_SQLLITE = "mlmd.sqllite"
NUM_EPOCHS = 1
BATCH_SIZE = 512
LEARNING_RATE = 0.001
HIDDEN_UNITS = "128,128"
def test_e2e_pipeline():
project = os.getenv("PROJECT")
region = os.getenv("REGION")
model_display_name = os.getenv("MODEL_DISPLAY_NAME")
dataset_display_name = os.getenv("DATASET_DISPLAY_NAME")
gcs_location = os.getenv("GCS_LOCATION")
train_limit = os.getenv("TRAIN_LIMIT", 1000)
test_limit = os.getenv("TEST_LIMIT", 100)
model_registry = os.getenv("MODEL_REGISTRY_URI")
upload_model = os.getenv("UPLOAD_MODEL")
assert project, "Environment variable PROJECT is None!"
assert region, "Environment variable REGION is None!"
assert dataset_display_name, "Environment variable DATASET_DISPLAY_NAME is None!"
assert model_display_name, "Environment variable MODEL_DISPLAY_NAME is None!"
assert gcs_location, "Environment variable GCS_LOCATION is None!"
assert model_registry, "Environment variable MODEL_REGISTRY_URI is None!"
logging.info(f"upload_model: {upload_model}")
if tf.io.gfile.exists(gcs_location):
tf.io.gfile.rmtree(gcs_location)
logging.info(f"Pipeline e2e test artifacts stored in: {gcs_location}")
if tf.io.gfile.exists(MLMD_SQLLITE):
tf.io.gfile.remove(MLMD_SQLLITE)
metadata_connection_config = metadata_store_pb2.ConnectionConfig()
metadata_connection_config.sqlite.filename_uri = MLMD_SQLLITE
metadata_connection_config.sqlite.connection_mode = 3
logging.info("ML metadata store is ready.")
pipeline_root = os.path.join(
config.ARTIFACT_STORE_URI,
config.PIPELINE_NAME,
)
runner = LocalDagRunner()
pipeline = training_pipeline.create_pipeline(
metadata_connection_config=metadata_connection_config,
pipeline_root=pipeline_root,
num_epochs=NUM_EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LEARNING_RATE,
hidden_units=HIDDEN_UNITS,
)
runner.run(pipeline)
logging.info(f"Model output: {os.path.join(model_registry, model_display_name)}")
assert tf.io.gfile.exists(os.path.join(model_registry, model_display_name))
| 34.231579 | 85 | 0.757688 | 445 | 3,252 | 5.334831 | 0.350562 | 0.046335 | 0.040438 | 0.018955 | 0.09604 | 0.032856 | 0.032856 | 0.032856 | 0 | 0 | 0 | 0.012422 | 0.158364 | 3,252 | 94 | 86 | 34.595745 | 0.854951 | 0.182042 | 0 | 0 | 0 | 0 | 0.214232 | 0.010977 | 0 | 0 | 0 | 0 | 0.109375 | 1 | 0.015625 | false | 0 | 0.1875 | 0 | 0.203125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d6c5f95665e4961e4def6f0da5fefe20095d742 | 1,936 | py | Python | light_draft/utils.py | zerc/django-light-draft | 9b6da59deb48c7ea9a01831e0b346e6d232158ce | [
"MIT"
] | 8 | 2015-08-15T02:17:54.000Z | 2021-08-03T23:48:35.000Z | light_draft/utils.py | zerc/django-light-draft | 9b6da59deb48c7ea9a01831e0b346e6d232158ce | [
"MIT"
] | 14 | 2015-06-13T20:34:29.000Z | 2021-06-10T17:24:14.000Z | light_draft/utils.py | zerc/django-light-draft | 9b6da59deb48c7ea9a01831e0b346e6d232158ce | [
"MIT"
] | 3 | 2017-01-26T23:58:45.000Z | 2020-10-27T15:54:14.000Z | # coding: utf-8
from __future__ import unicode_literals
import os
try:
import cPickle as pickle
except ImportError:
import pickle
from uuid import uuid4
from django.db.models.fields.related import RelatedField
from django.forms.models import model_to_dict
from django.core.cache import caches
from .settings import DRAFT_TMP_DIR, DRAFT_SETTINGS
from .exceptions import CacheMissError
def make_cache_key(instance):
"""Construct a cache key for the instance."""
prefix = '{}:{}:{}'.format(
instance._meta.app_label,
instance._meta.model_name,
instance.pk
)
return '{}:{}'.format(prefix, str(uuid4()))
def save_model_snapshot(instance, related_objects=None):
"""Serialize the instance given."""
key = make_cache_key(instance)
data = {'instance': instance, 'related_objects': related_objects}
cache = caches[DRAFT_SETTINGS['cache_name']]
cache.set(key, pickle.dumps(data), DRAFT_SETTINGS['ttl'])
return key
def load_from_shapshot(model, key):
"""
Load data from the snapshot stored.
If the value is not in cache then fallback to the old (file-based) method.
"""
# New way of doing things
if key.startswith(model._meta.app_label):
cache = caches[DRAFT_SETTINGS['cache_name']]
try:
data = pickle.loads(cache.get(key))
except TypeError:
raise CacheMissError(key)
# Old way. Deprecated.
else:
data = _get_data_old(model, key)
instance = data.pop('instance')
instance._prefetched_objects_cache = data.pop('related_objects')
return instance
def _get_data_old(model, key):
"""DEPERECATED."""
path = os.path.join(
DRAFT_TMP_DIR,
model._meta.app_label,
model._meta.model_name,
key
)
try:
with open(path, 'rb') as f:
return pickle.load(f)
except IOError:
raise CacheMissError(path)
| 25.473684 | 78 | 0.668388 | 247 | 1,936 | 5.044534 | 0.40081 | 0.041734 | 0.028892 | 0.032103 | 0.081862 | 0.05297 | 0 | 0 | 0 | 0 | 0 | 0.002004 | 0.226756 | 1,936 | 75 | 79 | 25.813333 | 0.830327 | 0.131198 | 0 | 0.102041 | 0 | 0 | 0.051064 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.22449 | 0 | 0.387755 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d6db744e599c8e9e3add67c981ed0d83ade8c13 | 1,353 | py | Python | optimal_nod_combo/test/test_optimal_nods.py | jason-neal/nod_combination | 9294612112fb0b9b14d138d55f0823147436f444 | [
"MIT"
] | null | null | null | optimal_nod_combo/test/test_optimal_nods.py | jason-neal/nod_combination | 9294612112fb0b9b14d138d55f0823147436f444 | [
"MIT"
] | 8 | 2017-10-03T19:44:02.000Z | 2022-02-04T16:21:08.000Z | optimal_nod_combo/test/test_optimal_nods.py | jason-neal/nod_combination | 9294612112fb0b9b14d138d55f0823147436f444 | [
"MIT"
] | null | null | null | import numpy as np
import pkg_resources
import pytest
import optimal_nod_combo.optimal_nods_selection as ons
def test_parse_boolgrid():
testfile = pkg_resources.resource_filename("optimal_nod_combo", "data/boolgrid_test_data.dat")
# 11111011
# 01111111
# 11011101
# 00000010
test_grid = np.array([[1, 1, 1, 1, 1, 0, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0]], dtype=bool)
assert np.array_equal(ons.parse_boolgrid(testfile), test_grid)
@pytest.mark.xfail
@pytest.mark.parametrize("snr", [100, 200])
@pytest.mark.parametrize("chip", [1, 2, 3, 4])
@pytest.mark.parametrize("seed", [8, 103]) # Seeds that pass this configuration.
def test_sampled_snr(snr, chip, seed):
"""Test sampled snr.
To counteract the random failing the seed is specified to enable constantly sampled values
to be drawn for each set of parameters.
The seeds have been manually set to enable the tests to pass in this configuration.
"""
# limits = {1: [900, 960], 2: [460, 600], 3: [240, 310], 4: [450, 490]}
np.random.seed(seed) # Fix the seed
x = np.random.normal(1.0, 1 / snr, 1000)
# sampled snr within 10% of specified value.
assert (abs(ons.sampled_snr(x, chip) - snr) / snr) < 0.1
| 35.605263 | 98 | 0.626755 | 209 | 1,353 | 3.961722 | 0.440191 | 0.036232 | 0.039855 | 0.038647 | 0.036232 | 0.02657 | 0.024155 | 0.024155 | 0 | 0 | 0 | 0.113924 | 0.240946 | 1,353 | 37 | 99 | 36.567568 | 0.692308 | 0.31929 | 0 | 0 | 0 | 0 | 0.061728 | 0.030303 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.105263 | false | 0 | 0.210526 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d6e47d7390a837b8d038945ea151a3a8ec47e68 | 5,627 | py | Python | hammer/deploy.py | brave-experiments/chainhammer | 03e24b223de09e06d3f2f50023229ff8af1d39ff | [
"MIT"
] | 110 | 2018-10-17T13:33:16.000Z | 2022-03-09T11:24:34.000Z | hammer/deploy.py | brave-experiments/chainhammer | 03e24b223de09e06d3f2f50023229ff8af1d39ff | [
"MIT"
] | 26 | 2018-10-16T16:01:21.000Z | 2021-09-19T17:51:13.000Z | hammer/deploy.py | brave-experiments/chainhammer | 03e24b223de09e06d3f2f50023229ff8af1d39ff | [
"MIT"
] | 51 | 2019-01-08T16:43:36.000Z | 2022-03-24T07:15:16.000Z | #!/usr/bin/env python3
"""
@summary: deploy contract
@version: v46 (03/January/2019)
@since: 2/May/2018
@organization:
@author: https://github.com/drandreaskrueger
@see: https://github.com/drandreaskrueger/chainhammer for updates
"""
################
## Dependencies:
import sys, time, json
from pprint import pprint
import requests # pip3 install requests
try:
from web3 import Web3, HTTPProvider # pip3 install web3
from solc import compile_source # pip install py-solc
except:
print ("Dependencies unavailable. Start virtualenv first!")
exit()
# extend path for imports:
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from hammer.config import RPCaddress, TIMEOUT_DEPLOY, PARITY_UNLOCK_EACH_TRANSACTION
from hammer.config import FILE_CONTRACT_SOURCE, FILE_CONTRACT_ABI, FILE_CONTRACT_ADDRESS
from hammer.config import GAS_FOR_SET_CALL
from hammer.clienttools import web3connection, unlockAccount
###############################################################################
## deploy example from
## http://web3py.readthedocs.io/en/latest/examples.html#working-with-contracts
## when 'latest' was 4.2.0
###############################################################################
def compileContract(contract_source_file):
"""
Reads file, compiles, returns contract name and interface
"""
with open(contract_source_file, "r") as f:
contract_source_code = f.read()
compiled_sol = compile_source(contract_source_code) # Compiled source code
assert(len(compiled_sol)==1) # assert source file has only one contract object
contractName = list(compiled_sol.keys())[0]
contract_interface = compiled_sol[contractName]
return contractName.replace("<stdin>:", ""), contract_interface
def deployContract(contract_interface, ifPrint=True, timeout=TIMEOUT_DEPLOY):
"""
deploys contract, waits for receipt, returns address
"""
before=time.time()
myContract = w3.eth.contract(abi=contract_interface['abi'],
bytecode=contract_interface['bin'])
tx_hash = w3.toHex( myContract.constructor().transact() )
print ("tx_hash = ", tx_hash, "--> waiting for receipt (timeout=%d) ..." % timeout)
sys.stdout.flush()
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash, timeout=timeout)
print ("Receipt arrived. Took %.1f seconds." % (time.time()-before))
contractAddress = tx_receipt["contractAddress"]
if ifPrint:
line = "Deployed. gasUsed={gasUsed} contractAddress={contractAddress}"
print ( line.format(**tx_receipt) )
return contractAddress
def contractObject(contractAddress, abi):
"""
recreates myContract object when given address on chain, and ABI
"""
# Create the contract instance with the newly-deployed address
myContract = w3.eth.contract(address=contractAddress,
abi=abi)
return myContract
##########################
## additional basic tasks:
##########################
def saveToDisk(contractAddress, abi):
"""
save address & abi, for usage in the other script
"""
json.dump({"address": contractAddress}, open(FILE_CONTRACT_ADDRESS, 'w'))
json.dump(abi, open(FILE_CONTRACT_ABI, 'w'))
def loadFromDisk():
"""
load address & abi from previous run of 'contract_CompileDeploySave'
"""
contractAddress = json.load(open(FILE_CONTRACT_ADDRESS, 'r'))
abi = json.load(open(FILE_CONTRACT_ABI, 'r'))
return contractAddress["address"], abi
def contract_CompileDeploySave(contract_source_file):
"""
compile, deploy, save
"""
contractName, contract_interface = compileContract(contract_source_file)
print ("unlock: ", unlockAccount())
contractAddress = deployContract(contract_interface)
saveToDisk(contractAddress, abi=contract_interface["abi"])
return contractName, contract_interface, contractAddress
def trySmartContractMethods(myContract, gasForSetCall=GAS_FOR_SET_CALL):
"""
just a test if the contract's methods are working
--> call getter then setter then getter
"""
# get
answer1 = myContract.functions.get().call()
print('.get(): {}'.format(answer1))
# set
if PARITY_UNLOCK_EACH_TRANSACTION:
print ("unlockAccount:", unlockAccount())
print('.set()')
txParameters = {'from': w3.eth.defaultAccount,
'gas' : gasForSetCall}
tx = myContract.functions.set(answer1 + 1).transact(txParameters)
tx_hash = w3.toHex( tx )
print ("transaction", tx_hash, "... "); sys.stdout.flush()
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
print ("... mined. Receipt --> gasUsed={gasUsed}". format(**tx_receipt) )
# get
answer2 = myContract.functions.get().call()
print('.get(): {}'.format(answer2))
return answer1, tx_receipt, answer2
if __name__ == '__main__':
global w3, NODENAME, NODETYPE, NODEVERSION, CONSENSUS, NETWORKID, CHAINNAME, CHAINID
w3, chainInfos = web3connection(RPCaddress=RPCaddress, account=None)
NODENAME, NODETYPE, NODEVERSION, CONSENSUS, NETWORKID, CHAINNAME, CHAINID = chainInfos
contract_CompileDeploySave(contract_source_file=FILE_CONTRACT_SOURCE)
# argument "test" runs the .set() test transaction
if len(sys.argv)>1 and sys.argv[1]=="andtests":
contractAddress, abi = loadFromDisk()
myContract = contractObject(contractAddress, abi)
trySmartContractMethods(myContract)
| 33.295858 | 90 | 0.670695 | 603 | 5,627 | 6.094527 | 0.353234 | 0.034286 | 0.029388 | 0.017959 | 0.123537 | 0.087075 | 0.087075 | 0.032109 | 0.032109 | 0.032109 | 0 | 0.010193 | 0.180558 | 5,627 | 168 | 91 | 33.494048 | 0.786814 | 0.191043 | 0 | 0 | 0 | 0 | 0.090757 | 0.007882 | 0 | 0 | 0 | 0 | 0.0125 | 1 | 0.0875 | false | 0 | 0.125 | 0 | 0.2875 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d6f1cfe73a9751b5f67bc5272b869058b81ab0f | 6,501 | py | Python | tests/st/ops/gpu/test_sparse_apply_proximal_adagrad_op.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | tests/st/ops/gpu/test_sparse_apply_proximal_adagrad_op.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | tests/st/ops/gpu/test_sparse_apply_proximal_adagrad_op.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class Net(nn.Cell):
def __init__(self, var, accum, lr, l1, l2):
super(Net, self).__init__()
self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()
self.var = Parameter(var, name="var")
self.accum = Parameter(accum, name="accum")
self.lr = lr
self.l1 = l1
self.l2 = l2
def construct(self, grad, indices):
out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad, indices)
return out
def add_testcase(var, accum, lr, l1, l2, grad, indices):
net = Net(var, accum, lr, l1, l2)
return net(grad, indices)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_small_shape():
var = Tensor(np.arange(9).reshape(3, 3).astype(np.float32))
accum = Tensor(np.zeros(9).reshape(3, 3).astype(np.float32))
lr = 1.0
l1 = 1.0
l2 = 0.0
grad = Tensor(np.ones(9).reshape(3, 3).astype(np.float32) * 8)
indices = Tensor(np.array([1, 0, 2], np.int32))
output1, output2 = add_testcase(var, accum, lr, l1, l2, grad, indices)
expect1 = np.array([[-0.875, 0., 0.875],
[1.875, 2.875, 3.875],
[4.875, 5.875, 6.875]])
expect2 = np.array([[64., 64., 64.],
[64., 64., 64.],
[64., 64., 64.]])
np.testing.assert_array_almost_equal(output1.asnumpy(), expect1)
np.testing.assert_array_almost_equal(output2.asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_small_shape_input_update():
var = Tensor(np.arange(9).reshape(3, 3).astype(np.float32))
accum = Tensor(np.zeros(9).reshape(3, 3).astype(np.float32))
lr = 1.0
l1 = 1.0
l2 = 0.0
grad = Tensor(np.ones(9).reshape(3, 3).astype(np.float32) * 8)
indices = Tensor(np.array([1, 0, 2], np.int32))
net = Net(var, accum, lr, l1, l2)
net(grad, indices)
expect1 = np.array([[-0.875, 0., 0.875],
[1.875, 2.875, 3.875],
[4.875, 5.875, 6.875]])
expect2 = np.array([[64., 64., 64.],
[64., 64., 64.],
[64., 64., 64.]])
np.testing.assert_array_almost_equal(net.var.data.asnumpy(), expect1)
np.testing.assert_array_almost_equal(net.accum.data.asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_parameter_lr_l1_l2():
var = Tensor(np.arange(9).reshape(3, 3).astype(np.float32))
accum = Tensor(np.zeros(9).reshape(3, 3).astype(np.float32))
lr = 100.0
l1 = 34.0
l2 = 16.0
grad = Tensor(np.ones(9).reshape(3, 3).astype(np.float32) * 8)
indices = Tensor(np.array([1, 0, 2], np.int32))
output1, output2 = add_testcase(var, accum, lr, l1, l2, grad, indices)
expect1 = np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
expect2 = np.array([[64., 64., 64.],
[64., 64., 64.],
[64., 64., 64.]])
np.testing.assert_array_almost_equal(output1.asnumpy(), expect1)
np.testing.assert_array_almost_equal(output2.asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_with_np_arange():
var = Tensor(np.arange(9).reshape(3, 3).astype(np.float32))
accum = Tensor(np.arange(63, 72).reshape(3, 3).astype(np.float32))
lr = 1.0
l1 = 1.0
l2 = 2.0
grad = Tensor(np.arange(34, 43).reshape(3, 3).astype(np.float32) * 8)
indices = Tensor(np.array([2, 1, 0], np.int32))
output1, output2 = add_testcase(var, accum, lr, l1, l2, grad, indices)
expect1 = np.array([[-0.99038047, 0., 0.9914129],
[1.9836018, 2.9774926, 3.9716945],
[4.9603353, 5.9543643, 6.948723]])
expect2 = np.array([[102463., 107648., 112961.],
[87682., 92483., 97412.],
[74053., 78470., 83015.]])
np.testing.assert_array_almost_equal(output1.asnumpy(), expect1)
np.testing.assert_array_almost_equal(output2.asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_large_shape():
var = Tensor(np.arange(24).reshape((2, 3, 4)).astype(np.float32))
accum = Tensor(np.arange(34, 58).reshape((2, 3, 4)).astype(np.float32))
lr = 1.0
l1 = 1.0
l2 = 2.0
grad = Tensor(np.ones(24).reshape((2, 3, 4)).astype(np.float32) * 2)
indices = Tensor(np.arange(2).astype(np.int32))
output1, output2 = add_testcase(var, accum, lr, l1, l2, grad, indices)
#expected outputs are from Dchip
expect1 = np.array([[[-0.12248275, 0.39357165, 1.1591142, 1.9289699],
[2.7029436, 3.4808538, 4.2625313, 5.0478177],
[5.836565, 6.6286335, 7.423894, 8.222222]],
[[9.023503, 9.82763, 10.634497, 11.444007],
[12.256072, 13.0706005, 13.887513, 14.706733],
[15.528182, 16.35179, 17.177492, 18.005226]]])
expect2 = np.array([[[38., 39., 40., 41.],
[42., 43., 44., 45.],
[46., 47., 48., 49.]],
[[50., 51., 52., 53.],
[54., 55., 56., 57.],
[58., 59., 60., 61.]]])
np.testing.assert_array_almost_equal(output1.asnumpy(), expect1)
np.testing.assert_array_almost_equal(output2.asnumpy(), expect2)
| 41.673077 | 112 | 0.591909 | 926 | 6,501 | 4.061555 | 0.25162 | 0.025525 | 0.033502 | 0.038288 | 0.614198 | 0.58761 | 0.58761 | 0.566073 | 0.537623 | 0.528051 | 0 | 0.146006 | 0.239348 | 6,501 | 155 | 113 | 41.941935 | 0.61456 | 0.102907 | 0 | 0.534884 | 0 | 0 | 0.001891 | 0 | 0 | 0 | 0 | 0 | 0.077519 | 1 | 0.062016 | false | 0 | 0.046512 | 0 | 0.131783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d70e2e39a9218398834167253180a1fe2582bea | 3,793 | py | Python | SRT/lib/xvision/evaluation_util.py | yerang823/landmark-detection | a01bcf79abcf9d203c1b92f29b49aab9005952c3 | [
"MIT"
] | 612 | 2019-06-01T07:10:57.000Z | 2022-03-30T13:44:41.000Z | SRT/lib/xvision/evaluation_util.py | yerang823/landmark-detection | a01bcf79abcf9d203c1b92f29b49aab9005952c3 | [
"MIT"
] | 67 | 2019-06-06T15:03:02.000Z | 2021-12-17T01:51:14.000Z | SRT/lib/xvision/evaluation_util.py | yerang823/landmark-detection | a01bcf79abcf9d203c1b92f29b49aab9005952c3 | [
"MIT"
] | 121 | 2019-06-01T16:37:23.000Z | 2022-03-27T19:20:28.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os, time
import numpy as np
import torch
import json
from log_utils import print_log
from collections import OrderedDict
from scipy import interpolate
from .common_eval import evaluate_normalized_mean_error
class Eval_Meta():
def __init__(self):
self.reset()
def __repr__(self):
return ('{name}'.format(name=self.__class__.__name__)+'(number of data = {:})'.format(len(self)))
def reset(self):
self.predictions = []
self.groundtruth = []
self.image_lists = []
self.normalizers = []
def __len__(self) -> int:
return len(self.image_lists)
def __getitem__(self, index):
assert index>=0 and index<len(self.image_lists), 'invalid index : {:}'.format(index)
return self.image_lists[index], self.predictions[index], self.groundtruth[index]
def path(self, index: int) -> str:
assert index>=0 and index<len(self.image_lists), 'invalid index : {:}'.format(index)
return str(self.image_lists[index])
def error(self, index: int) -> float:
assert index>=0 and index<len(self.image_lists), 'invalid index : {:}'.format(index)
preds, labels = self.predictions[index], self.groundtruth[index]
seen = labels[2, :].astype(bool)
if int(np.sum(seen)) == 0: return -1.0
else:
preds, labels = preds[:2, seen], labels[:2, seen]
return float(np.linalg.norm(preds - labels, axis=0).mean())
def append(self, _pred, _ground, image_path, face_size):
assert _pred.shape[0] == 3 and len(_pred.shape) == 2, 'Prediction\'s shape is {:} vs [should be (3,pts) or (2,pts)]'.format(_pred.shape)
if _ground is not None:
assert _pred.shape == _ground.shape, 'shapes must be the same : {} vs {}'.format(_pred.shape, _ground.shape)
if (not self.predictions) == False:
assert _pred.shape == self.predictions[-1].shape, 'shapes must be the same : {} vs {}'.format(_pred.shape, self.predictions[-1].shape)
self.predictions.append(_pred)
self.groundtruth.append(_ground)
self.image_lists.append(image_path)
self.normalizers.append(face_size)
def save(self, filename):
meta = {'predictions': self.predictions,
'groundtruth': self.groundtruth,
'image_lists': self.image_lists,
'normalizers': self.normalizers}
torch.save(meta, filename)
print ('save eval-meta into {}'.format(filename))
def load(self, filename, index=None):
assert os.path.isfile(filename), '{:} is not a file'.format(filename)
checkpoint = torch.load(filename)
if index == None: assert isinstance(checkpoint, dict), 'invalid type of checkpoint : {:}'.format(type(checkpoint))
else : checkpoint = checkpoint[index]
self.predictions = checkpoint['predictions']
self.groundtruth = checkpoint['groundtruth']
self.image_lists = checkpoint['image_lists']
self.normalizers = checkpoint['normalizers']
def compute_mse(self, indicator, log, return_all_errors=False):
predictions, groundtruth, normalizers, num = [], [], [], 0
for x, gt, face in zip(self.predictions, self.groundtruth, self.normalizers):
if gt is None: continue
predictions.append(x)
groundtruth.append(gt)
normalizers.append(face)
num += 1
print_log('Filter the unlabeled data from {:} into {:} data'.format(len(self), num), log)
if num == 0:
nme, auc, pck_curves, _ = -1, None, None, None
else:
nme, auc, pck_curves, _ = evaluate_normalized_mean_error(self.predictions, self.groundtruth, self.normalizers, indicator, log)
if return_all_errors: return _
else : return nme, auc, pck_curves
| 40.351064 | 140 | 0.680728 | 504 | 3,793 | 4.972222 | 0.263889 | 0.047885 | 0.055866 | 0.027135 | 0.209098 | 0.195531 | 0.109338 | 0.109338 | 0.109338 | 0.109338 | 0 | 0.006783 | 0.18376 | 3,793 | 93 | 141 | 40.784946 | 0.802649 | 0.048774 | 0 | 0.066667 | 0 | 0.013333 | 0.103027 | 0 | 0 | 0 | 0 | 0 | 0.106667 | 1 | 0.146667 | false | 0 | 0.106667 | 0.026667 | 0.333333 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d7346fd7418a4cd8e994940142c08eff74b1770 | 12,135 | py | Python | aiida_castep/data/usp.py | asamli/aiida-castep | 893113152460a632439c91652211381091566645 | [
"MIT"
] | 3 | 2021-09-02T16:02:47.000Z | 2021-12-17T22:38:20.000Z | aiida_castep/data/usp.py | asamli/aiida-castep | 893113152460a632439c91652211381091566645 | [
"MIT"
] | 16 | 2020-05-07T07:58:01.000Z | 2022-03-21T11:35:35.000Z | aiida_castep/data/usp.py | asamli/aiida-castep | 893113152460a632439c91652211381091566645 | [
"MIT"
] | 3 | 2020-05-25T13:05:51.000Z | 2021-12-17T22:39:12.000Z | """
Module for storing usp files into the database
"""
import warnings
from aiida.plugins import DataFactory
from aiida.common.utils import classproperty
from aiida.common.files import md5_file
from .utils import get_usp_element
OLD_USPGROUP_TYPE = "data.castep.usp.family"
USPGROUP_TYPE = "castep.otfg"
SinglefileData = DataFactory("singlefile")
# Extract element from filename
def upload_usp_family(folder,
group_label,
group_description,
stop_if_existing=True):
"""
Upload a set of usp/recpot files in a give group
:param folder: a path containing all UPF files to be added.
Only files ending in .usp/.recpot are considered.
:param group_label: the name of the group to create. If it exists and is
non-empty, a UniquenessError is raised.
:param group_description: a string to be set as the group description.
Overwrites previous descriptions, if the group was existing.
:param stop_if_existing: if True, check for the md5 of the files and,
if the file already exists in the DB, raises a MultipleObjectsError.
If False, simply adds the existing UPFData node to the group.
"""
import os
import aiida.common
#from aiida.common import aiidalogger
from aiida.common import UniquenessError, NotExistent
from aiida.orm.querybuilder import QueryBuilder
from .otfg import OTFGGroup
files = [
os.path.realpath(os.path.join(folder, i)) for i in os.listdir(folder)
if os.path.isfile(os.path.join(folder, i)) and (
i.lower().endswith('.usp') or i.lower().endswith('recpot')
or i.lower().endswith('.uspcc'))
]
nfiles = len(files)
try:
group = OTFGGroup.get(label=group_label)
group_created = False
except NotExistent:
group = OTFGGroup(label=group_label, )
group_created = True
# Update the descript even if the group already existed
group.description = group_description
pseudo_and_created = [] # A list of records (UspData, created)
for f in files:
md5sum = md5_file(f)
qb = QueryBuilder()
qb.append(UspData, filters={'attributes.md5': {'==': md5sum}})
existing_usp = qb.first()
# Add the file if it is in the database
if existing_usp is None:
pseudo, created = UspData.get_or_create(f,
use_first=True,
store_usp=False)
pseudo_and_created.append((pseudo, created))
# The same file is there already
else:
if stop_if_existing:
raise ValueError("A usp/recpot with identical MD5 to"
" {} cannot be added with stop_if_existing"
"".format(f))
existing_usp = existing_usp[0]
pseudo_and_created.append((existing_usp, False))
# Check for unique per element
elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
# Check if we will duplicate after insertion
if not group_created:
for aiida_n in group.nodes:
if not isinstance(aiida_n, UspData):
continue
elements.append((aiida_n.element, aiida_n.md5sum))
# Discard duplicated pairs
elements = set(elements)
elements_names = [e[0] for e in elements]
# Check the uniqueness of the complete group
if not len(elements_names) == len(set(elements_names)):
duplicates = set(
[x for x in elements_names if elements_names.count(x) > 1])
dup_string = ", ".join(duplicates)
raise UniquenessError(
"More than one usp/recpot found for the elements: " + dup_string +
".")
if group_created:
group.store()
# Save the usp in the database if necessary and add them to the group
for pseudo, created in pseudo_and_created:
if created:
pseudo.store()
#aiidalogger.debug("New node {} created for file {}".format(
# pseudo.uuid, pseudo.filename))
else:
#aiidalogger.debug("Reusing node {} for file {}".format(
# pseudo.uuid, pseudo.filename))
pass
nodes_new = [
pseduo for pseduo, created in pseudo_and_created if created is True
]
nodes_add = [pseduo for pseduo, created in pseudo_and_created]
group.add_nodes(nodes_add)
return nfiles, len(nodes_new)
class UspData(SinglefileData):
"""
Class for a single usp file
These usp files are stored as individual file nodes in the database
"""
def __init__(self, **kwargs):
"""
Initialize a UspData node
:param file str: A full path to the file of the potential
:param element: The elemnt that this pseudo potential should be used for
"""
element = kwargs.pop("element", None)
self._abs_path = kwargs["file"]
super(UspData, self).__init__(**kwargs)
# Overides the element inferred
if element is not None:
self.set_element(element)
@classmethod
def get_or_create(cls,
filename,
element=None,
use_first=False,
store_usp=True):
"""
Same ase init. Check md5 in the db, it is found return a UspData.
Otherwise will store the data into the db
:return (usp, created)
"""
import aiida.common.utils
import os
# Convert the filename to an absolute path
filename = str(filename)
if filename != os.path.abspath(filename):
raise ValueError("filename must be an absolute path")
md5 = md5_file(filename)
# Check if we have got the file already
pseudos = cls.from_md5(md5)
if len(pseudos) == 0:
# No existing pseudopotential file is in the database
instance = cls(file=filename)
# If we there is an element given then I set it
if element is not None:
instance.set_element(element)
# Store the usp if requested
if store_usp is True:
instance.store()
return (instance, True)
else:
if len(pseudos) > 1:
if use_first:
return (pseudos[0], False)
else:
pks = ", ".join([str(i.pk) for i in pseudos])
raise ValueError("More than one copy of a pseudopotential"
" found. pks={}".format(pks))
else:
return (pseudos[0], False)
@classmethod
def from_md5(cls, md5):
"""
Return a list of all usp pseudopotentials that match a given MD5 hash.
Note that the hash has to be stored in a md5 attribute, otherwise
the pseudo will not be found.
We use a special md5 attribute to avoid searching through
irrelevant data types.
"""
from aiida.orm.querybuilder import QueryBuilder
qb = QueryBuilder()
qb.append(cls, filters={'attributes.md5': {'==': md5}})
return [_ for [_] in qb.all()]
@classproperty
def uspfamily_type_string(cls):
"""
Type string of the underlying group deprecated as new
Group should be access by sub-classing
"""
return USPGROUP_TYPE
def store(self, *args, **kwargs):
"""
Store the node. Automatically set md5 and element
"""
# Cannot revalidate the stored nodes
if not self.is_stored:
self._validate()
return super(UspData, self).store(*args, **kwargs)
def set_file(self, filename):
"""
Extract element and compute the md5hash
"""
filename = str(filename)
try:
element = get_usp_element(filename)
except KeyError:
element = None
else:
# Only set the element if it is not there
if self.element is None:
if element is not None:
self.set_element(element)
else:
warnings.warn(
"Cannot extract element form the usp/recpot file {}."
"Please set it manually.".format(filename))
else:
# The element is already set, no need to process further
pass
md5sum = md5_file(filename)
self.set_attribute('md5', md5sum)
super(UspData, self).set_file(filename)
def set_element(self, element):
"""
Set the element
"""
self.set_attribute('element', element)
@property
def element(self):
return self.get_attribute('element', None)
@property
def md5sum(self):
"""MD5 sum of the usp/recpot file"""
return self.get_attribute('md5', None)
@property
def string(self):
"""Alias of the md5sum"""
return self.md5sum
@classmethod
def get_usp_group(cls, group_label):
"""
Return the UspFamily group with the given name.
"""
from .otfg import OTFGGroup
return OTFGGroup.objects.get(label=group_label)
@classmethod
def get_usp_groups(cls, filter_elements=None, user=None):
"""
Return all names of groups of type UpfFamily, possibly with some filters.
:param filter_elements: A string or a list of strings.
If present, returns only the groups that contains one Upf for
every element present in the list. Default=None, meaning that
all families are returned.
:param user: if None (default), return the groups for all users.
If defined, it should be either a DbUser instance, or a string
for the username (that is, the user email).
"""
from .otfg import OTFGGroup
from aiida.orm import QueryBuilder
from aiida.orm import User
query = QueryBuilder()
query.append(OTFGGroup, tag='group', project=['*'])
if user:
query.append(User,
filters={'email': {
'==': user
}},
with_group='group')
if isinstance(filter_elements, str):
filter_elements = [filter_elements]
if filter_elements is not None:
actual_filter_elements = [_ for _ in filter_elements]
query.append(
cls,
filters={'attributes.element': {
'in': filter_elements
}},
with_group='group')
query.order_by({OTFGGroup: {'id': 'asc'}})
return [_[0] for _ in query.all()]
def _validate(self):
from aiida.common import ValidationError
super(UspData, self)._validate()
# Check again, in case things changes
usp_abspath = str(self._abs_path)
if not usp_abspath:
raise ValidationError("No valid usp file was passed")
parsed_element = get_usp_element(usp_abspath)
md5 = md5_file(usp_abspath)
attr_element = self.element
if attr_element is None:
raise ValidationError("No element is set")
attr_md5 = self.get_attribute('md5', None)
if self.md5sum is None:
raise ValidationError("attribute 'md5' not set.")
if md5 != attr_md5:
raise ValidationError(
"Mismatch between store md5 and actual md5 value")
# Warn if the parsed elemnt (if any) is not matching the attribute
if attr_element != parsed_element and parsed_element is not None:
raise ValidationError("Attribute 'element' says '{}' but '{}' was "
"parsed from file name instead.".format(
attr_element, parsed_element))
| 33.246575 | 81 | 0.581294 | 1,437 | 12,135 | 4.803758 | 0.208072 | 0.013038 | 0.016225 | 0.01043 | 0.074026 | 0.052006 | 0.039838 | 0.022889 | 0.011299 | 0 | 0 | 0.006488 | 0.339514 | 12,135 | 364 | 82 | 33.337912 | 0.854772 | 0.261557 | 0 | 0.198068 | 0 | 0 | 0.076471 | 0.002584 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067633 | false | 0.014493 | 0.086957 | 0.004831 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d75e1f7b3019c732370aebcb421ac7f64618032 | 391 | py | Python | exercicio76.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | exercicio76.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | exercicio76.py | juniooor/Exercicios-python | aed87da4f93d0e6083b1a8c3af4081a028f145de | [
"MIT"
] | null | null | null | #Faça um Programa que leia um vetor de 5 números inteiros, mostre a soma, a multiplicação e os número.
import numpy
vetor=[]
for n in range(0,5):
valor=int(input(f'digite o {n+1} valor: '))
vetor.append(valor)
soma=sum(vetor)
mult=numpy.prod(vetor)
print('-=-'*20)
print(f'A soma dos números: {soma}')
print(f'A multiplicação dos números: {mult}')
print(f'Os números foram: {vetor}') | 32.583333 | 102 | 0.700767 | 69 | 391 | 3.971014 | 0.565217 | 0.065693 | 0.051095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01791 | 0.143223 | 391 | 12 | 103 | 32.583333 | 0.8 | 0.258312 | 0 | 0 | 0 | 0 | 0.382759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.363636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d7932bd6170e191f10ab22372c26a26105e5226 | 1,430 | py | Python | src/callbacks/loggers/dsb15_vsr_logger.py | cmlab-mira/Efficient-and-Phase-aware-Video-Super-resolution-for-Cardiac-MRI | ec01b783f8acd41a7056431bad615896b8495f95 | [
"MIT"
] | 11 | 2020-08-09T08:08:56.000Z | 2022-01-18T14:25:22.000Z | src/callbacks/loggers/dsb15_vsr_logger.py | cmlab-mira/Efficient-and-Phase-aware-Video-Super-resolution-for-Cardiac-MRI | ec01b783f8acd41a7056431bad615896b8495f95 | [
"MIT"
] | 2 | 2021-09-13T09:48:41.000Z | 2021-11-08T14:20:58.000Z | src/callbacks/loggers/dsb15_vsr_logger.py | cmlab-mira/Efficient-and-Phase-aware-Video-Super-resolution-for-Cardiac-MRI | ec01b783f8acd41a7056431bad615896b8495f95 | [
"MIT"
] | 4 | 2020-08-30T14:13:35.000Z | 2021-09-14T09:26:55.000Z | import torch
from torchvision.utils import make_grid
from src.callbacks.loggers.base_logger import BaseLogger
class Dsb15VSRLogger(BaseLogger):
"""The DSB15 logger for the Video Super-Resolution.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _add_images(self, epoch, train_batch, train_outputs, valid_batch, valid_outputs):
"""Plot the visualization results.
Args:
epoch (int): The number of trained epochs.
train_batch (dict): The training batch.
train_outputs (list of torch.Tensor): The training outputs.
valid_batch (dict): The validation batch.
valid_outputs (list of torch.Tensor): The validation outputs.
"""
train_hr_img = make_grid(train_batch['hr_imgs'][-1], nrow=1, normalize=True, scale_each=True, pad_value=1)
train_sr_img = make_grid(train_outputs[-1], nrow=1, normalize=True, scale_each=True, pad_value=1)
valid_hr_img = make_grid(valid_batch['hr_imgs'][-1], nrow=1, normalize=True, scale_each=True, pad_value=1)
valid_sr_img = make_grid(valid_outputs[-1], nrow=1, normalize=True, scale_each=True, pad_value=1)
train_grid = torch.cat([train_hr_img, train_sr_img], dim=-1)
valid_grid = torch.cat([valid_hr_img, valid_sr_img], dim=-1)
self.writer.add_image('train', train_grid)
self.writer.add_image('valid', valid_grid)
| 46.129032 | 114 | 0.684615 | 203 | 1,430 | 4.527094 | 0.305419 | 0.043526 | 0.047878 | 0.065288 | 0.29815 | 0.29815 | 0.239391 | 0.239391 | 0.239391 | 0.239391 | 0 | 0.015748 | 0.200699 | 1,430 | 30 | 115 | 47.666667 | 0.788276 | 0.25035 | 0 | 0 | 0 | 0 | 0.023904 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d795eed2ed7fc391a4e36bd92025ec9b213bb6d | 728 | py | Python | built-in/ACL_PyTorch/Official/cv/Yolov3_for_Pytorch/get_coco_info.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/ACL_PyTorch/Official/cv/Yolov3_for_Pytorch/get_coco_info.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 1 | 2022-01-20T03:11:05.000Z | 2022-01-20T06:53:39.000Z | built-in/ACL_PyTorch/Official/cv/Yolov3_for_Pytorch/get_coco_info.py | Ascend/modelzoo | f018cfed33dbb1cc2110b9ea2e233333f71cc509 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | import os
import sys
file_path = sys.argv[1]
coco_info = sys.argv[2]
info_name = sys.argv[3]
image_names = []
image_size = []
with open(coco_info, 'r') as file:
contents = file.read().split('\n')
for content in contents[:-1]:
temp = content.split()
key = temp[1]
image_names.append(key[key.rfind('/') + 1:].split('.')[0])
image_size.append([temp[2], temp[3]])
name_size = dict(zip(image_names, image_size))
with open(info_name, 'w') as file:
index = 0
for key, val in name_size.items():
bin_name = os.path.join(file_path, '{}.bin'.format(key))
content = ' '.join([str(index), bin_name, val[0], val[1]])
file.write(content)
file.write('\n')
index += 1
| 22.75 | 66 | 0.603022 | 114 | 728 | 3.710526 | 0.368421 | 0.049645 | 0.070922 | 0.089835 | 0.12766 | 0.12766 | 0 | 0 | 0 | 0 | 0 | 0.022569 | 0.208791 | 728 | 31 | 67 | 23.483871 | 0.711806 | 0 | 0 | 0 | 0 | 0 | 0.020661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d796085312e5e944c4bfb1233eb5f50d50be44c | 7,433 | py | Python | experiments/2021-02-18-knockouts/hpcc/old/aggregate_knockouts.py | amlalejini/plastic-evolvability-avida | 909d944e52a102e09dd714a8b7e0f2274c4a8072 | [
"MIT"
] | 2 | 2021-09-16T14:47:43.000Z | 2021-10-31T04:55:16.000Z | experiments/2021-02-18-knockouts/hpcc/old/aggregate_knockouts.py | amlalejini/evolutionary-consequences-of-plasticity | 909d944e52a102e09dd714a8b7e0f2274c4a8072 | [
"MIT"
] | null | null | null | experiments/2021-02-18-knockouts/hpcc/old/aggregate_knockouts.py | amlalejini/evolutionary-consequences-of-plasticity | 909d944e52a102e09dd714a8b7e0f2274c4a8072 | [
"MIT"
] | 2 | 2020-08-19T20:01:14.000Z | 2020-12-21T21:24:12.000Z | '''
Aggregate data
'''
import argparse, os, sys, errno, subprocess, csv
phenotypic_traits = ["not","nand","and","ornot","or","andnot"]#,"nor","xor","equals"]
even_traits = {"not", "and", "or"}#, "nor", "equals"}
odd_traits = {"nand", "ornot", "andnot", "xor"}#, "equals"}
even_profile = "101010"#101"
odd_profile = "010101"#011"
all_profile = "111111"#111"
"""
This is functionally equivalent to the mkdir -p [fname] bash command
"""
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def extract_params_cmd_log(path):
content = None
with open(path, "r") as fp:
content = fp.read().strip()
content = content.replace("./avida", "")
params = [param.strip() for param in content.split("-set") if param.strip() != ""]
cfg = {param.split(" ")[0]:param.split(" ")[1] for param in params}
return cfg
def read_avida_dat_file(path):
content = None
with open(path, "r") as fp:
content = fp.read().strip().split("\n")
legend_start = 0
legend_end = 0
# Where does the legend table start?
for line_i in range(0, len(content)):
line = content[line_i].strip()
if line == "# Legend:": # Handles analyze mode detail files.
legend_start = line_i + 1
break
if "# 1:" in line: # Handles time.dat file.
legend_start = line_i
break
# For each line in legend table, extract field
fields = []
for line_i in range(legend_start, len(content)):
line = content[line_i].strip()
if line == "":
legend_end = line_i
break
fields.append( line.split(":")[-1].strip().lower().replace(" ", "_") )
data = []
for line_i in range(legend_end, len(content)):
line = content[line_i].strip()
if line == "": continue
data_line = line.split(" ")
if len(data_line) != len(fields):
print("data fields mismatch!")
print(fields)
print(data_line)
exit(-1)
data.append({field:value for field,value in zip(fields, data_line)})
return data
def simple_match_coeff(a, b):
if len(a) != len(b):
print(f"Length mismatch! {a} {b}")
exit(-1)
return sum(ai==bi for ai,bi in zip(a,b))
def main():
parser = argparse.ArgumentParser(description="Run submission script.")
parser.add_argument("--data_dir_file", type=str, help="Filename that lists all data directories")
parser.add_argument("--output_dir", type=str, help="Where to dump this?", default=".")
args = parser.parse_args()
data_dir_filename = args.data_dir_file
output_dir = args.output_dir
mkdir_p(output_dir)
# Aggregate run directories.
run_dirs = []
with open(data_dir_filename, 'r') as fp:
for line in fp:
line = line.strip()
if line != '':
run_dirs.append(line)
# For each run directory:
# - get id, get command line configuration settings
summary_header = None
summary_content_lines = []
file_str = ''
for run_dir in run_dirs:
if not os.path.exists(os.path.join(run_dir, 'data', 'analysis')):
print('Skipping: ', run_dir)
continue
summary_info = {} # Hold summary information about run. (one entry per run)
print(f"processing {run_dir}")
############################################################
# Extract commandline configuration settings (from cmd.log file)
cmd_log_path = os.path.join(run_dir, "cmd.log")
cmd_params = extract_params_cmd_log(cmd_log_path)
# Infer environmental change and change rate from events file
chg_env = "chg" in cmd_params["EVENT_FILE"]
env_cond = cmd_params["EVENT_FILE"].split(".")[0].replace("events-", "").lower()
seed = cmd_params["RANDOM_SEED"]
sensors = cmd_params["DISABLE_REACTION_SENSORS"]
summary_info["chg_env"] = chg_env
summary_info["environment"] = env_cond
for field in cmd_params:
summary_info[field] = cmd_params[field]
############################################################
############################################################
# Extract environment-specific one-step mutant information.
if not os.path.exists(os.path.join(run_dir, "data", "analysis", "env_all", "knockouts.dat")):
print('Skipping (all): ', run_dir)
continue
if not os.path.exists(os.path.join(run_dir, "data", "analysis", "env_odd", "knockouts.dat")):
print('Skipping (odd): ', run_dir)
continue
if not os.path.exists(os.path.join(run_dir, "data", "analysis", "env_even", "knockouts.dat")):
print('Skipping (even): ', run_dir)
continue
muts_env_all = read_avida_dat_file(os.path.join(run_dir, "data", "analysis", "env_all", "knockouts.dat"))
muts_env_odd = read_avida_dat_file(os.path.join(run_dir, "data", "analysis", "env_odd", "knockouts.dat"))
muts_env_even = read_avida_dat_file(os.path.join(run_dir, "data", "analysis", "env_even", "knockouts.dat"))
# (each of these files should only have one genotype in them)
if len(muts_env_all) <= 1 and len(muts_env_even) <= 1 and len(muts_env_odd) <= 1:
print("Unexpected number of genotypes in final_dominant data files.")
exit(-1)
for org_id in range(len(muts_env_all)):
phenotype_even = "".join([muts_env_even[org_id][trait] for trait in phenotypic_traits])
phenotype_odd = "".join([muts_env_odd[org_id][trait] for trait in phenotypic_traits])
phenotype_all = "".join([muts_env_all[org_id][trait] for trait in phenotypic_traits])
phenotype_task_order = ";".join(phenotypic_traits)
change_odd_even = phenotype_even != phenotype_odd
match_score_even = simple_match_coeff(phenotype_even, even_profile)
match_score_odd = simple_match_coeff(phenotype_odd, odd_profile)
match_score_all = simple_match_coeff(phenotype_all, all_profile)
file_str += \
str(chg_env) + ',' + \
env_cond + ',' + \
sensors + ',' + \
seed + ',' + \
str(org_id) + ',' + \
muts_env_all[org_id]['fitness'] + ',' + \
muts_env_odd[org_id]['fitness'] + ',' + \
muts_env_even[org_id]['fitness'] + ',' + \
phenotype_all + ',' + \
phenotype_odd + ',' + \
phenotype_even + ',' + \
phenotype_task_order + ',' + \
str(change_odd_even) + ',' + \
str(match_score_all) + ',' + \
str(match_score_odd) + ',' + \
str(match_score_even) + \
'\n'
# write out aggregate data
with open(os.path.join(output_dir, "knockout_data.csv"), "w") as fp:
out_content = 'chg_env,environment,sensors,seed,org_id,fit_all,fit_odd,fit_even,phenotype_all,phenotype_odd,phenotype_even,phenotype_task_order,change_odd_even,match_score_all,match_score_odd,match_score_even\n' + file_str
fp.write(out_content)
if __name__ == "__main__":
main()
| 40.617486 | 230 | 0.579039 | 936 | 7,433 | 4.353632 | 0.226496 | 0.020614 | 0.022086 | 0.025521 | 0.272393 | 0.235583 | 0.225276 | 0.225276 | 0.225276 | 0.156074 | 0 | 0.008028 | 0.262613 | 7,433 | 182 | 231 | 40.840659 | 0.73545 | 0.087582 | 0 | 0.121429 | 0 | 0.007143 | 0.146638 | 0.033697 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0.007143 | 0.007143 | 0 | 0.064286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d7b465a448fc41487d7a727d6a65f1eae989c39 | 1,456 | py | Python | experiments/simple_function.py | darthdeus/bopt | 9a9e0587172e5bfbe4df4b1d1b86513dde15a07b | [
"MIT"
] | 5 | 2019-03-04T21:48:21.000Z | 2020-10-01T19:32:34.000Z | experiments/simple_function.py | darthdeus/master-thesis-code | 9a9e0587172e5bfbe4df4b1d1b86513dde15a07b | [
"MIT"
] | 48 | 2019-10-04T04:59:30.000Z | 2021-08-02T04:28:32.000Z | experiments/simple_function.py | darthdeus/bopt | 9a9e0587172e5bfbe4df4b1d1b86513dde15a07b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
import random
if __name__ == "__main__":
# Parse arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--activation", default="relu", type=str, help="activation")
parser.add_argument("--x", default=0.0, type=float, help="X")
parser.add_argument("--y", default=0.0, type=float, help="Y")
parser.add_argument("--z", default=1.0, type=float, help="Z")
parser.add_argument("--w", default=1.0, type=float, help="W")
parser.add_argument("--foo", type=str,
help="Only used for manual params testing.")
parser.add_argument("--bar", type=str,
help="Only used for manual params testing.")
args = parser.parse_args()
SIMPLE = False
if SIMPLE:
result = - abs(args.y - 2) + 1
else:
if args.activation == "sigmoid":
act = 1.0
elif args.activation == "relu":
act = 2.0
elif args.activation == "tanh":
act = -1.0
else:
raise NotImplementedError()
result = act * (args.x + math.log2(args.y) + args.w ** 3.3 \
+ random.random() * 0.05 + 2*args.z)
# result = abs(args.x) - abs(args.y) + 1
# result = abs(args.y - 2) + 1
# result = math.log(5 + result + np.random.normal(0, args.y ** 2))
# result = args.x + 2 * args.y - args.z**2
print("RESULT={}".format(result))
| 30.978723 | 84 | 0.559066 | 195 | 1,456 | 4.092308 | 0.307692 | 0.078947 | 0.149123 | 0.070175 | 0.253133 | 0.253133 | 0.102757 | 0.102757 | 0.102757 | 0 | 0 | 0.030274 | 0.274038 | 1,456 | 46 | 85 | 31.652174 | 0.724693 | 0.146291 | 0 | 0.133333 | 0 | 0 | 0.126112 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d7c5fd17d4db26834e86eb81f9bed5d3da9d417 | 4,692 | py | Python | src/risk_methods_pyomo.py | DavimenUC3M/IronIA-RoboAdvisor | 06d37889d5cb9c40139ceb6a41c959b92fff3291 | [
"MIT"
] | null | null | null | src/risk_methods_pyomo.py | DavimenUC3M/IronIA-RoboAdvisor | 06d37889d5cb9c40139ceb6a41c959b92fff3291 | [
"MIT"
] | null | null | null | src/risk_methods_pyomo.py | DavimenUC3M/IronIA-RoboAdvisor | 06d37889d5cb9c40139ceb6a41c959b92fff3291 | [
"MIT"
] | 2 | 2022-01-31T21:56:44.000Z | 2022-02-02T10:28:00.000Z | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import time
import random
import datetime
from datetime import date
from datetime import timedelta
from dateutil.relativedelta import relativedelta
import pickle
from pyomo.environ import *
from pyomo.opt import SolverFactory
def pyomo(portfolio,rsk_metric,risk=0.05,verbose=True): # rsk_metric = CVaR, CDaR, MAD, ML
if verbose:
print("Using pyomo...")
prices_df_main = portfolio.copy()
N = len(prices_df_main.columns)
#betas = np.random.normal(0, 0.01, N)
with open('data/working_dates.txt') as f:
contents = f.read()
dates_ = contents.split("\n")[1:-1] #Working dates for train
J = len(dates_)
rate_return_df = prices_df_main.pct_change().dropna()
mean_daily_returns = np.array(rate_return_df.mean()) # C
model = ConcreteModel()
model.Weights = RangeSet(0,N-1)
model.Y = RangeSet(0,J-1)
model.x = Var(model.Weights, domain=NonNegativeReals,bounds=(0,1))
def obj_expression(model):
return sum(model.x[i]*mean_daily_returns[i] for i in model.Weights)
model.OBJ = Objective(rule=obj_expression, sense=maximize)
def sum_one(model):
return sum(model.x[i] for i in model.Weights) <= 1
model.cons1 = Constraint(rule=sum_one)
W = risk
if rsk_metric == 'MAD':
#W = 0.005
model.u_plus = Var(model.Y,domain=NonNegativeReals,bounds=(0.0,None))
model.u_minus = Var(model.Y,domain=NonNegativeReals,bounds=(0.0,None))
def dummy_cons(model):
return (1/J)*sum(model.u_plus[j] + model.u_minus[j] for j in model.Y) <= W
model.cons1_MAD = Constraint(rule = dummy_cons)
summatory = sum(sum(rate_return_df.loc[dates_[n]][p] *model.x[p] for p in model.Weights) for n in model.Y)
def c5_MAD(model,j):
date = dates_[j]
return sum(rate_return_df.loc[date][i] *model.x[i] for i in model.Weights) - ((1/J)*summatory) == (model.u_plus[j] - model.u_minus[j] )
model.cons2_MAD = Constraint(model.Y,rule = c5_MAD)
if rsk_metric == 'CVaR':
#W = 0.005
### CVaR
model.slack = Var()
alpha = 1-risk # Confidence level
model.w = Var(model.Y,domain=NonNegativeReals)
def dummy_cons(model):
return model.slack + (1/(1-alpha))*(1/J)*sum(model.w[j] for j in model.Y) <= W
model.cons1_CVaR = Constraint(rule = dummy_cons)
def c5_CVaR(model,j):
date = dates_[j]
return -sum(rate_return_df.loc[date][i] *model.x[i] for i in model.Weights )- model.slack <= model.w[j]
model.cons2_CVaR = Constraint(model.Y,rule = c5_CVaR)
if rsk_metric == 'CDaR':
#W = 0.1
### CDaR
model.slack = Var()
model.z = Var()
alpha = 1-risk # Confidence level
model.w = Var(model.Y,domain=NonNegativeReals)
def dummy_cons(model):
return model.slack + (1/(1-alpha))*(1/J)*sum(model.w[j] for j in model.Y) <= W
model.cons1_CDaR = Constraint(rule = dummy_cons)
def c5_CDaR(model,j):
act_sum = rate_return_df.loc[:dates_[j]].sum()
return model.z - sum( act_sum[i]* model.x[i] for i in model.Weights)- model.slack <= model.w[j]
model.cons2_CDaR = Constraint(model.Y,rule = c5_CDaR)
def c5_max_(model,j):
act_sum = rate_return_df.loc[:dates_[j]].sum()
return sum(act_sum[i]* model.x[i] for i in model.Weights) <= model.z
model.cons3_CDaR = Constraint(model.Y,rule = c5_max_)
if rsk_metric == 'ML':
### MAX LOSS
model.w = Var(RangeSet(0,0),domain=NonNegativeReals,bounds=(0,1))
def dummy_cons(model):
return model.w[0] <= W
model.cons1_ML = Constraint(rule = dummy_cons)
def c5_MaxLoss(model,j):
date = dates_[j]
return -sum(rate_return_df.loc[date][i] *model.x[i] for i in model.Weights ) <= model.w[0]
model.cons2_ML = Constraint(model.Y,rule = c5_MaxLoss)
# K = 0.001
# def market_neutral_upper(model):
# return sum(model.x[i]*betas[i] for i in model.Weights) <= K
# model.cons2 = Constraint(rule=market_neutral_upper)
# def market_neutral_lower(model):
# return sum(model.x[i]*betas[i] for i in model.Weights) >= -K
# model.cons3 = Constraint(rule=market_neutral_lower)
Results = SolverFactory('cplex').solve(model)
weights_dict = {}
for i in model.x:
if model.x[i].value > 0:
weights_dict[prices_df_main.columns[i]] = model.x[i].value
return weights_dict | 33.755396 | 147 | 0.619565 | 708 | 4,692 | 3.967514 | 0.180791 | 0.03738 | 0.027412 | 0.03916 | 0.494838 | 0.463154 | 0.361695 | 0.353151 | 0.337131 | 0.288359 | 0 | 0.020639 | 0.246164 | 4,692 | 139 | 148 | 33.755396 | 0.773537 | 0.103581 | 0 | 0.193182 | 0 | 0 | 0.013394 | 0.005262 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.147727 | 0.068182 | 0.420455 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d7dc9646261c9a51d014f513b2eda60e1f80e3c | 4,868 | py | Python | lms/services/oauth_http.py | hypothesis/lms | 722dac444dc1e73298eea5193f871f3ddefe46fd | [
"BSD-2-Clause"
] | 38 | 2017-12-30T23:49:53.000Z | 2022-02-15T21:07:49.000Z | lms/services/oauth_http.py | hypothesis/lms | 722dac444dc1e73298eea5193f871f3ddefe46fd | [
"BSD-2-Clause"
] | 1,733 | 2017-11-09T18:46:05.000Z | 2022-03-31T11:05:50.000Z | lms/services/oauth_http.py | hypothesis/lms | 722dac444dc1e73298eea5193f871f3ddefe46fd | [
"BSD-2-Clause"
] | 10 | 2018-07-11T17:12:46.000Z | 2022-01-07T20:00:23.000Z | from marshmallow import fields
from lms.services import ExternalRequestError, OAuth2TokenError
from lms.validation import RequestsResponseSchema
from lms.validation.authentication import OAuthTokenResponseSchema
class _OAuthAccessTokenErrorResponseSchema(RequestsResponseSchema):
"""Schema for parsing OAuth 2 access token error response bodies."""
error = fields.String(required=True)
class OAuthHTTPService:
"""Send OAuth 2.0 requests and return the responses."""
def __init__(self, http_service, oauth2_token_service):
self._http_service = http_service
self._oauth2_token_service = oauth2_token_service
def get(self, *args, **kwargs):
return self.request("GET", *args, **kwargs)
def put(self, *args, **kwargs):
return self.request("PUT", *args, **kwargs)
def post(self, *args, **kwargs):
return self.request("POST", *args, **kwargs)
def patch(self, *args, **kwargs):
return self.request("PATCH", *args, **kwargs)
def delete(self, *args, **kwargs):
return self.request("DELETE", *args, **kwargs)
def request(self, method, url, headers=None, **kwargs):
"""
Send an access token-authenticated request and return the response.
This will look up the user's access token in the DB and insert it into
the `headers` dict as an OAuth 2-formatted "Authorization" header.
Otherwise this method behaves the same as HTTPService.request().
The given `headers` must not already contain an "Authorization" header.
:raise OAuth2TokenError: if we don't have an access token for the user
:raise ExternalRequestError: if something goes wrong with the HTTP
request
"""
headers = headers or {}
assert "Authorization" not in headers
access_token = self._oauth2_token_service.get().access_token
headers["Authorization"] = f"Bearer {access_token}"
return self._http_service.request(method, url, headers=headers, **kwargs)
def get_access_token(self, token_url, redirect_uri, auth, authorization_code):
"""
Make an access token request and save the token in the DB.
Send an OAuth 2.0 "access token request"
(https://datatracker.ietf.org/doc/html/rfc6749#section-4.1.3) to get a
new access token for the current user and save it to the DB.
:raise ExternalRequestError: if the HTTP request fails
:raise ValidationError: if the server's access token response is invalid
"""
self._token_request(
token_url=token_url,
auth=auth,
data={
"redirect_uri": redirect_uri,
"grant_type": "authorization_code",
"code": authorization_code,
},
)
def refresh_access_token(self, token_url, redirect_uri, auth):
"""
Make a refresh token request and save the new token in the DB.
Send an OAuth 2.0 "refresh token request"
(https://datatracker.ietf.org/doc/html/rfc6749#section-6) to get a new
access token for the current user and save it to the DB.
:raise OAuth2TokenError: if we don't have a refresh token for the user
:raise ExternalRequestError: if the HTTP request fails
:raise ValidationError: if the server's access token response is invalid
"""
refresh_token = self._oauth2_token_service.get().refresh_token
try:
return self._token_request(
token_url=token_url,
auth=auth,
data={
"redirect_uri": redirect_uri,
"grant_type": "refresh_token",
"refresh_token": refresh_token,
},
)
except ExternalRequestError as err:
try:
error_dict = _OAuthAccessTokenErrorResponseSchema(err.response).parse()
except ExternalRequestError:
pass
else:
if error_dict["error"] == "invalid_grant":
# Looks like our refresh token has expired or been revoked.
raise OAuth2TokenError() from err
raise
def _token_request(self, token_url, data, auth):
response = self._http_service.post(token_url, data=data, auth=auth)
validated_data = OAuthTokenResponseSchema(response).parse()
self._oauth2_token_service.save(
validated_data["access_token"],
validated_data.get("refresh_token"),
validated_data.get("expires_in"), # pylint:disable=no-member
)
return validated_data["access_token"]
def factory(_context, request):
return OAuthHTTPService(
request.find_service(name="http"), request.find_service(name="oauth2_token")
)
| 36.328358 | 87 | 0.639482 | 567 | 4,868 | 5.343915 | 0.261023 | 0.061716 | 0.035644 | 0.033003 | 0.359076 | 0.344554 | 0.273597 | 0.234653 | 0.209571 | 0.193069 | 0 | 0.008772 | 0.274035 | 4,868 | 133 | 88 | 36.601504 | 0.848613 | 0.30567 | 0 | 0.142857 | 0 | 0 | 0.076387 | 0 | 0 | 0 | 0 | 0 | 0.014286 | 1 | 0.157143 | false | 0.014286 | 0.057143 | 0.085714 | 0.385714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d7f13ee6c79a5677524e21efff1f42498888361 | 2,131 | py | Python | sound/raw-sample-to-wav.py | luke-iqt/Audio-Sensor-Toolkit | 6f74fd04435bfd325988a95e2cc3b7aeee5dca72 | [
"Apache-2.0"
] | null | null | null | sound/raw-sample-to-wav.py | luke-iqt/Audio-Sensor-Toolkit | 6f74fd04435bfd325988a95e2cc3b7aeee5dca72 | [
"Apache-2.0"
] | null | null | null | sound/raw-sample-to-wav.py | luke-iqt/Audio-Sensor-Toolkit | 6f74fd04435bfd325988a95e2cc3b7aeee5dca72 | [
"Apache-2.0"
] | null | null | null | """ A small utility that converts raw pcm audio into wav files
Converts raw PCM samples into .wav files. The .wav files will be stored alongside the .raw files
`--input` the path to where the raw files are being stored.
"""
import json
import os, time, hmac, hashlib
import requests
import serial
import argparse
import errno
import wave
import audioop
import struct
from datetime import datetime
from pydub import AudioSegment, effects
# Parses command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, help='path to the .raw files', required=True)
args = parser.parse_args()
if os.path.exists(args.input):
print("Converting raw files in: {}".format(os.path.abspath(args.input)))
else:
print("Input path does not exist")
os.exit()
filepath = os.path.abspath(args.input)
# These values should be adjusted to match the sample length and sample rate used in the Arduino program
sample_sec = 10
sample_hz = 16000
samples_required = sample_sec * sample_hz
bytes_required = samples_required * 2
directory = os.fsencode(args.input)
listOfFiles = list()
for (dirpath, dirnames, filenames) in os.walk(args.input):
#filename = os.fsdecode(file)
listOfFiles += [os.path.join(dirpath, file) for file in filenames]
for file in filenames:
print("{} & {}".format(dirpath,file))
if os.path.isfile(dirpath + "/" + file) and file.endswith(".raw"):
f=open(dirpath + "/" + file,"rb")
raw_audio = AudioSegment.from_file(dirpath + "/" +file, format="raw",
frame_rate=sample_hz, channels=1, sample_width=2)
pre, ext = os.path.splitext(file)
wav_filename = pre+".wav"
loud_wav_filename = pre+"-loud.wav"
raw_audio.export(dirpath + "/" + wav_filename, format="wav")
normalizedsound = effects.normalize(raw_audio)
normalizedsound.export(dirpath + "/" + loud_wav_filename, format="wav")
#raw_audio.export(dirpath + "/" + wav_filename, format="wav")
#os.remove(dirpath + "/" + file)
| 33.825397 | 104 | 0.667292 | 281 | 2,131 | 4.975089 | 0.41637 | 0.025751 | 0.023605 | 0.042918 | 0.094421 | 0.062947 | 0.062947 | 0.062947 | 0.062947 | 0 | 0 | 0.006002 | 0.218207 | 2,131 | 63 | 105 | 33.825397 | 0.833133 | 0.220084 | 0 | 0 | 0 | 0 | 0.073289 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.275 | 0 | 0.275 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d8002b758ef830bf5fd3893c4239ae218341721 | 7,003 | py | Python | game/class_/new_terrain.py | Michael78912/SMNW | da5a7278cf1a14e2af2e9c291a5f53ba21f265e2 | [
"MIT"
] | 1 | 2018-12-11T23:42:57.000Z | 2018-12-11T23:42:57.000Z | game/class_/new_terrain.py | Michael78912/SMNW | da5a7278cf1a14e2af2e9c291a5f53ba21f265e2 | [
"MIT"
] | null | null | null | game/class_/new_terrain.py | Michael78912/SMNW | da5a7278cf1a14e2af2e9c291a5f53ba21f265e2 | [
"MIT"
] | null | null | null | """an improvement to the deprecated "terrain" module.
it uses the Block class rather than just strings, which are
now mutable, and easier to use, and change. it also uses blocks based
off of real materials; for example if the template is "sand" it will
actually use a block called "Sand". (Mostly inspired by Minecraft's block system)
"""
__all__ = ['Terrain']
import random
import re
import time
import logging
import numpy
import pygame
from lark import Lark, Transformer
try:
import block
except ImportError:
from . import block
# screen size for actual gameplay
X, Y = 800, 400
HEADER_GRAMAMAR = r"""
?start : header
number : NUMBER
?header : "@" [assignment ("|" assignment)*]
?assignment : assignment_air | assignment_water | assignment_size
assignment_water : "water"i "=" colour
assignment_size : "size"i "=" number
assignment_air : "air"i "=" colour
colour : "(" [number "," number "," number ("," number)?] ")"
NUMBER : /[0-9]/+
COMMENT : ";" /[^\n]/*
%import common.WS
%ignore WS
%ignore COMMENT
"""
class Terrain:
"""default handler for terrain in SMNW."""
raw = ''
file = ''
built_image = None
grid = None
size = 10
air = (0, 0, 0)
water = (0, 0, 128)
def check_validity(self):
"""check the validity of all given data so far"""
# check to make sure aall blocks will fit on the screen
assert X % self.size == 0 and Y % self.size == 0, "Block size is not valid"
# check to see if any colours exceed valid range
assert any(not (x > 255 or x < 0) for x in self.air + self.water), \
"Invalid colour given. air: {} water: {}".format(
self.air, self.water)
def __repr__(self):
return "Terrain With {{.size = {size},\
.air = {air}, .water = {water}, .file = {file}}}".format(
size=self.size,
air=self.air,
water=self.water,
file=self.file,
)
def __str__(self):
# basically just return the terrain file (stripped of comments)
return self.raw
def __getitem__(self, pos):
# take a block ID or position, and return the block with that ID
return self.grid[int(pos[1])] [pos[0]]
def __setitem__(self, pos, value):
# take a position and set it to a new block
self.grid[pos[1]][pos[0]] = value
def blocks_to_px(self, blocks):
"""convert blocks to pixels"""
return blocks * self.size
def px_to_blocks(self, pixels):
"""convert pixels to blocks"""
return pixels // self.size
def px_pos_to_blocks(self, pos):
"""convert the point (in pixels) given to blocks."""
return (
pos[0] // self.size,
pos[1] // self.size,
)
def get_pixels(self, pixels):
"""get a valid Pixels object based on blocks."""
return _Pixels(pixels // self.size, pixels)
def is_solid_at(self, pos):
"""return true if the block given is fully solid."""
return self[pos].solid == 1
def load(self, file, template):
"""load from file. create an array that contains
as many blocks as nessecary.
"""
self.file = file
# large chunk of data
data = file.read()
# create parser used for pulling data from the header.
header_parser = Lark(HEADER_GRAMAMAR)
# find the header, and create an AST from the found header.
header = re.search(r'@.(.*=.*)*', data)
tree = header_parser.parse(header.group(0))
# use the data from the header to transform the terrain object
# accordingly.
HeaderTransformer(self).transform(tree)
# make sure al data is OK.
self.check_validity()
# create new array for containing the blocks.
y = 0
self.grid = numpy.empty(shape=(Y // self.size, X //
self.size), dtype=block.Block)
airsurf = pygame.Surface((10, 10))
airsurf.fill(self.air)
watersurf = pygame.Surface((10, 10))
watersurf.fill(self.water)
# iterate through all lines.
for line in data.split('\n'):
x = 0
if line.startswith((';', '@')):
# entire comment/header line. continue
continue
# remove a comment if there is one
line = line.split(';')[0].strip()
if not line:
# blank line
continue
# iterate through each character in the line
for item in line:
self.raw += line + '\n'
try:
self.grid[y][x] = block.block(
item, template, airsurf, watersurf, (x, y))
except AssertionError:
# not implemented, replace a pit/sign with air for now.
self.grid[y][x] = block.Air((x, y), airsurf)
x += 1
y += 1
def get_spawn_point(self, x):
"""get and return a valid spawn point given the column."""
column = self.grid[:, self.px_to_blocks(x)]
print(column)
top_levels = [y for y, block in enumerate(column) if block.top]
return self.blocks_to_px(random.choice(top_levels))
def build(self):
"""build surface"""
image = pygame.Surface((X, Y))
time1 = time.time()
for y in range(0, Y // self.size):
for x in range(0, X // self.size):
current_block = self.grid[y][x]
try:
image.blit(current_block.image,
current_block.get_rect(self.size))
except AttributeError:
logging.warning('block at position (%d, %d) is null', x, y)
time2 = time.time()
print('took', time2 - time1, 'seconds to build')
self.built_image = image
return image
class HeaderTransformer(Transformer):
"""transforms tokens into an actual terrain object"""
colour = tuple
def __init__(self, terrain_obj):
super().__init__()
self.terrain = terrain_obj
def assignment_air(self, args):
"""get air colours"""
self.terrain.air = args[0]
def assignment_water(self, args):
"""get water colours"""
self.terrain.water = args[0]
def assignment_size(self, args):
"""get size"""
self.terrain.size = args[0]
@staticmethod
def number(val):
"""convert colour list to tuple"""
return int(val[0])
class _Pixels:
"""simple class for representing pixels."""
def __init__(self, pixels=0, blocks=0):
self.pixels = pixels
self.blocks = blocks
def main():
"""test function"""
terrain = Terrain()
terrain.load(open('terrains\\downhill.smr-terrain'), 'dirt')
pygame.image.save(terrain.build(), "howdy.png")
if __name__ == "__main__":
main()
| 28.237903 | 83 | 0.568471 | 885 | 7,003 | 4.40452 | 0.281356 | 0.02668 | 0.013853 | 0.007696 | 0.007696 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011873 | 0.314437 | 7,003 | 247 | 84 | 28.352227 | 0.800042 | 0.241896 | 0 | 0.034965 | 0 | 0 | 0.115978 | 0.005789 | 0 | 0 | 0 | 0 | 0.020979 | 1 | 0.13986 | false | 0 | 0.076923 | 0.020979 | 0.370629 | 0.013986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d816edd0e723b7f9d0fadbd185c8ed86a86f2a4 | 7,823 | py | Python | report/excelreporter.py | zoltantorok/solaredge | 673fc87896be70cf7f63f63be1e2049b1b8d0de3 | [
"MIT"
] | 1 | 2020-04-18T05:43:49.000Z | 2020-04-18T05:43:49.000Z | report/excelreporter.py | zoltantorok/solaredge | 673fc87896be70cf7f63f63be1e2049b1b8d0de3 | [
"MIT"
] | null | null | null | report/excelreporter.py | zoltantorok/solaredge | 673fc87896be70cf7f63f63be1e2049b1b8d0de3 | [
"MIT"
] | null | null | null | '''
Created on 22.11.2019
@author: Zoli
'''
import openpyxl
from os import path
from pathlib import Path
import atexit
class ExcelReporter(object):
'''
classdocs
'''
def __init__(self, outputFilename):
'''
Constructor
'''
self.outputFilename = outputFilename
name, ext = path.splitext(self.outputFilename)
excelExt = '.xlsx'
if ext.casefold() != excelExt:
self.outputFilename = path.join(Path(self.outputFilename).parent, name) + excelExt
self.workBook = openpyxl.Workbook()
atexit.register(self.storeWorkbook)
def storeWorkbook(self):
self.workBook.save(self.outputFilename)
def writeEnergyData(self, energyData, energyTypes, costCalculator):
lineNumber = 1
lastYear = 0
lastWeekNumber = -1
firstTimestampOfWeek = None
timestamps = list(energyData.keys())
for timestamp in timestamps:
if lastYear == 0:
lastYear = timestamp.year
worksheet = self.workBook.active
worksheet.title = str(timestamp.year)
elif timestamp.year != lastYear:
self.writeYearTotals(worksheet, lastYear, energyData, energyTypes, costCalculator)
lastYear = timestamp.year
worksheet = self.workBook.create_sheet(str(timestamp.year))
lineNumber = 1
lastWeekNumber = -1
firstTimestampOfWeek = None
if timestamp.date().isocalendar()[1] != lastWeekNumber:
# New week
# Calculate total
if firstTimestampOfWeek is not None:
worksheet['I' + str(lineNumber)] = 'Total (KWh)'
# Add total week cost/refund
weekEnergyData = self.getDictSubsetByKeyRange(energyData, timestamps[timestamps.index(firstTimestampOfWeek):timestamps.index(timestamp)])
self.writeWeekTotals(worksheet, lineNumber, weekEnergyData, energyTypes, costCalculator)
lastWeekNumber = timestamp.date().isocalendar()[1]
lineNumber = lineNumber + 10
firstTimestampOfWeek = timestamp
value = energyData[timestamp]
column = self.getColumnForWeekday(timestamp.weekday())
for val in range(len(value)):
worksheet[column + str(lineNumber)] = timestamp.strftime("%A")
worksheet[column + str(lineNumber + 1)] = timestamp.day
worksheet['A' + str(lineNumber + 1)] = timestamp.strftime("%B")
worksheet['A' + str(lineNumber + val + 2)] = energyTypes[val]
cell = column + str(lineNumber + val + 2)
if worksheet[cell].value is None:
worksheet[cell] = value[val] / 1000.0
else:
worksheet[cell] = worksheet[cell].value + value[val] / 1000.0
if firstTimestampOfWeek != timestamp:
worksheet['I' + str(lineNumber)] = 'Total (KWh)'
# Add total week cost/refund
weekEnergyData = self.getDictSubsetByKeyRange(energyData, timestamps[timestamps.index(firstTimestampOfWeek):timestamps.index(timestamp)])
self.writeWeekTotals(worksheet, lineNumber, weekEnergyData, energyTypes, costCalculator)
if lastYear == timestamp.year:
self.writeYearTotals(worksheet, timestamp.year, energyData, energyTypes, costCalculator)
def writeWeekTotals(self, worksheet, lineNumber, weekEnergyData, energyTypes, costCalculator):
worksheet['I' + str(lineNumber)] = 'Total (KWh)'
# Add total week cost/refund
weekCost = costCalculator.calculateCost(weekEnergyData, energyTypes)
worksheet['K' + str(lineNumber)] = 'Week cost'
worksheet['L' + str(lineNumber)] = weekCost
worksheet['M' + str(lineNumber)] = 'currency'
sumEnergyWeek = self.sumEnergyData(weekEnergyData)
self.writeSelfSufficiency(worksheet, sumEnergyWeek, energyTypes, startingCell='K', lineNumber=lineNumber + 1)
self.writeOwnConsumption(worksheet, sumEnergyWeek, energyTypes, startingCell='K', lineNumber=lineNumber + 2)
for val in range(len(sumEnergyWeek)):
worksheet['I' + str(lineNumber + val + 2)] = sumEnergyWeek[val] / 1000.0
def writeSelfSufficiency(self, worksheet, energyData, energyTypes, startingCell, lineNumber):
worksheet[startingCell + str(lineNumber)] = 'Self-sufficiency'
worksheet[chr(ord(startingCell) + 1) + str(lineNumber)] = energyData[energyTypes.index('SelfConsumption')] / energyData[energyTypes.index('Consumption')] * 100
worksheet[chr(ord(startingCell) + 2) + str(lineNumber)] = '%'
def writeOwnConsumption(self, worksheet, energyData, energyTypes, startingCell, lineNumber):
worksheet[startingCell + str(lineNumber)] = 'Own-consumption'
valueCell = chr(ord(startingCell) + 1) + str(lineNumber)
production = energyData[energyTypes.index('Production')]
if production > 0:
feedIn = energyData[energyTypes.index('FeedIn')]
worksheet[valueCell] = 100.0 * (production - feedIn) / production
else:
worksheet[valueCell] = 0
worksheet[chr(ord(startingCell) + 2) + str(lineNumber)] = '%'
def writeYearTotals(self, worksheet, year, energyData, energyTypes, costCalculator):
yearTimestampBorders = []
timestamps = list(energyData.keys())
for timestamp in timestamps:
if timestamp.year == year:
if len(yearTimestampBorders) == 0:
yearTimestampBorders.append(timestamp)
yearTimestampBorders.append(timestamp)
continue
yearTimestampBorders[1] = timestamp
yearEnergyData = self.getDictSubsetByKeyRange(energyData, timestamps[timestamps.index(yearTimestampBorders[0]):timestamps.index(yearTimestampBorders[1])])
yearEnergyCost = costCalculator.calculateCost(yearEnergyData, energyTypes)
worksheet['A3'] = 'Year totals'
worksheet['B2'] = 'Start date'
worksheet['C2'] = 'End date'
worksheet['B3'] = yearTimestampBorders[0].date().strftime('%x')
worksheet['C3'] = yearTimestampBorders[1].date().strftime('%x')
worksheet['A5'] = 'Cost'
worksheet['B5'] = yearEnergyCost
worksheet['C5'] = 'currency'
yearEnergySum = self.sumEnergyData(yearEnergyData)
worksheet['A6'] = 'Purchased (KWh)'
worksheet['B6'] = yearEnergySum[energyTypes.index('Purchased')] / 1000.0
worksheet['A7'] = 'FeedIn (KWh)'
worksheet['B7'] = yearEnergySum[energyTypes.index('FeedIn')] / 1000.0
self.writeSelfSufficiency(worksheet, yearEnergySum, energyTypes, startingCell='A', lineNumber=8)
self.writeOwnConsumption(worksheet, yearEnergySum, energyTypes, startingCell='A', lineNumber=9)
def getDictSubsetByKeyRange(self, dictionary, keyRange):
outDict = {}
for key in dictionary.keys():
if key in keyRange:
outDict[key] = dictionary[key]
return outDict
def getColumnForWeekday(self, weekday):
return chr(ord('B') + weekday)
def sumEnergyData(self, energyData):
keys = list(energyData.keys())
sumValues = [0] * len(energyData[keys[0]])
for key in keys:
value = energyData[key]
for idx in range(len(value)):
sumValues[idx] = sumValues[idx] + value[idx]
return sumValues
| 40.324742 | 167 | 0.611786 | 672 | 7,823 | 7.114583 | 0.220238 | 0.048944 | 0.029283 | 0.019243 | 0.308095 | 0.28927 | 0.22192 | 0.194311 | 0.175905 | 0.153315 | 0 | 0.015127 | 0.281733 | 7,823 | 193 | 168 | 40.533679 | 0.835736 | 0.021092 | 0 | 0.19685 | 0 | 0 | 0.034146 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.07874 | false | 0 | 0.031496 | 0.007874 | 0.141732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d84f2336aa5d99e92cb5f3f9407aab62855ea8a | 8,120 | py | Python | Swarm/main.py | Wykleph/Swarm | c0b10c673a555213fdf46dd0648a2c729dd33494 | [
"MIT"
] | 7 | 2018-10-04T01:15:03.000Z | 2018-11-03T01:51:28.000Z | Swarm/main.py | Wykleph/Swarm | c0b10c673a555213fdf46dd0648a2c729dd33494 | [
"MIT"
] | 2 | 2018-10-04T05:47:58.000Z | 2018-10-04T15:48:49.000Z | Swarm/main.py | Wykleph/Swarm | c0b10c673a555213fdf46dd0648a2c729dd33494 | [
"MIT"
] | 1 | 2018-10-04T03:57:46.000Z | 2018-10-04T03:57:46.000Z | import random
from datetime import datetime
from hashlib import md5
import pygame
from bots import AttackBot, DefenseBot, RangedBot, RepairBot
from bots import BuilderBot, KamikazeBot, Swarm, MotherShipBot, Supplies
from game import Display, Settings, Arena, Colors, ShipRoles, Game
from sim import Queue, QueueControl, ClockControl, Clock
class Threads(object):
threads = []
def setup():
q1 = Queue()
QueueControl.queues.append(q1)
Settings.bot_size = 10, 10
Display.size = Settings.display_size
BOT_SIZE = Settings.bot_size
arena = Arena()
arena.game = Game
arena.display = Display
arena.supply_drop = Supplies
roles = [AttackBot, DefenseBot, RangedBot, RepairBot, BuilderBot, KamikazeBot]
enemy_roles = [AttackBot, DefenseBot, RangedBot, KamikazeBot, BuilderBot]
Game.arena = arena
Game.objects['Swarm'] = Swarm
Game.objects['MotherShipBot'] = MotherShipBot
ShipRoles.player_roles = roles
ShipRoles.enemy_roles = enemy_roles
player_swarm = Swarm(name='Player 1', arena=arena)
for i in range(15):
player_swarm.add_bot(random.choice(roles)(arena))
for i in range(1, 11):
now = datetime.now()
now = Clock.now = (now.hour, now.minute, now.second, now.microsecond)
s = Swarm(name='Enemy {}'.format(md5(bytearray(str(now).encode('utf-8'))).hexdigest()), arena=arena)
m = MotherShipBot(arena)
s.mothership = m
s.add_bot(m)
s.arena = arena
for _ in range(1, 11):
s.add_bot(random.choice(roles)(arena))
arena.place_swarm(s)
player_swarm.mothership = MotherShipBot(arena)
player_swarm.add_bot(player_swarm.mothership)
arena.place_swarm(player_swarm)
pygame.init()
# Set the width and height of the screen [width, height]
size = Display.size
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Swarm")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
main(clock, screen, pygame, arena, done)
def draw_callback(bot, count):
if bot.speed >= 1 and count % 15 == 0:
bot.move()
bot.attack()
elif bot.speed >= 2 and count % 10 == 0:
bot.move()
bot.attack()
elif bot.speed >= 3 and count % 5 == 0:
bot.move()
bot.attack()
elif bot.speed >= 4 and count % 3 == 0:
bot.move()
bot.attack()
def govern_speed(x, y, bot, count):
arena = bot.arena
if bot.speed >= 1 and count % 10 == 0:
arena.move_bot(x, y, bot)
elif bot.speed >= 2 and count % 10 == 0:
arena.move_bot(x, y, bot)
elif bot.speed >= 3 and count % 5 == 0:
arena.move_bot(x, y, bot)
elif bot.speed >= 4 and count % 3 == 0:
arena.move_bot(x, y, bot)
def process_move(event, player_mothership, count, auto_move=False):
pm = player_mothership
if pm.is_dead:
return False
if event.key == 273 or event.key == 119: # UP OR W
govern_speed(pm.grid_x, pm.grid_y - 1, pm, count)
elif event.key == 276 or event.key == 97: # LEFT OR A
govern_speed(pm.grid_x - 1, pm.grid_y, pm, count)
elif event.key == 274 or event.key == 115: # DOWN OR S
govern_speed(pm.grid_x, pm.grid_y + 1, pm, count)
elif event.key == 275 or event.key == 100: # RIGHT OR D
govern_speed(pm.grid_x + 1, pm.grid_y, pm, count)
# -------- Main Program Loop -----------
def main(clock, screen, pygame, arena, done):
count = 0
BOT_SIZE = Settings.bot_size
now = datetime.now()
now = (now.hour, now.minute, now.second)
last = now
player_swarm = arena.swarms['Player 1']
pm = player_swarm.mothership
moving = False
moving_keys = [273, 274, 275, 276, 97, 100, 115, 119]
auto_move = False
while not done:
if count % 10000 == 0:
count = 0
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
process_move(event, pm, count)
if event.key in moving_keys:
moving = event
if event.key == 9:
auto_move = True if not auto_move else False
print('auto_move:', auto_move)
print(event.key)
elif event.type == pygame.KEYUP:
if event.key in moving_keys:
moving = False
elif event.type == pygame.QUIT:
done = True
# --- Game logic should go here
# --- Screen-clearing code goes here
# Here, we clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
# If you want a background image, replace this clear with blit'ing the
# background image.
screen.fill(Colors.BLACK)
# --- Drawing code should go here
all_objects = arena.all_bots().copy()
all_objects.update(arena.supplies)
for obj in all_objects:
if obj.is_dead:
if obj.swarm != 'Supplies':
obj.swarm.remove(obj)
if obj != pm:
del obj
continue
# todo: Can we have objects only run detect and select_target when a ship move is processed to increase
# performance? Since ship moves are only processed every `count` iterations, we should be able to
# refactor these methods to run with the move process so we can free up CPU cycles for other tasks.
obj.detect()
# if Clock.now[2] % 5 == 0 or first_loop:
obj.select_target()
# if the grid does not contain the bot at the current location then
if arena.grid[obj.grid_x][obj.grid_y] != obj:
if isinstance(obj, Supplies):
arena.supplies.remove(obj)
else:
arena.bots.remove(obj)
continue
if isinstance(obj.swarm, Swarm) and obj.swarm.name == 'Player 1':
e = pygame.draw.ellipse(screen, obj.color, [obj.grid_x * BOT_SIZE[0], obj.grid_y * BOT_SIZE[1], BOT_SIZE[0], BOT_SIZE[1]], 0)
elif isinstance(obj, Supplies):
e = pygame.draw.rect(screen, obj.color, [obj.grid_x * BOT_SIZE[0], obj.grid_y * BOT_SIZE[1], BOT_SIZE[0], BOT_SIZE[1]], 0)
else:
e = pygame.draw.rect(screen, obj.color, [obj.grid_x * BOT_SIZE[0], obj.grid_y * BOT_SIZE[1], BOT_SIZE[0], BOT_SIZE[1]], 0)
if obj.target:
lx = [obj.grid_x*10 + Display.bot_hsize[0], obj.grid_y*10 + Display.bot_hsize[1]]
ly = [obj.target.grid_x*10 + Display.bot_hsize[0], obj.target.grid_y*10 + Display.bot_hsize[1]]
pygame.draw.line(screen, obj.color, lx, ly, 1)
if isinstance(obj, MotherShipBot) and obj.swarm == player_swarm:
if moving:
process_move(moving, pm, count)
elif auto_move:
draw_callback(obj, count)
else:
draw_callback(obj, count)
now = datetime.now()
now = Clock.now = (now.hour, now.minute, now.second, now.microsecond)
if now[2] % 10 == 0 and last != now:
# todo: get rid of .remove_swarms and change it so that it is only responsible for spawning swarms.
# removing swarms should be done when the last ship in a swarm is destroyed.
arena.remove_swarms()
last = now
# print('Removing Dead Swarms...')
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
count += 1
clock.tick(30)
# Close the window and quit.
pygame.quit()
if __name__ == '__main__':
setup()
QueueControl.shutdown = True
ClockControl.shutdown = True
for thread in Threads.threads:
thread.join()
print('done')
| 34.40678 | 141 | 0.589778 | 1,108 | 8,120 | 4.222022 | 0.23556 | 0.025438 | 0.015391 | 0.009406 | 0.256092 | 0.234929 | 0.20372 | 0.172296 | 0.131039 | 0.131039 | 0 | 0.025972 | 0.302956 | 8,120 | 235 | 142 | 34.553191 | 0.80053 | 0.15702 | 0 | 0.254545 | 0 | 0 | 0.013204 | 0 | 0 | 0 | 0 | 0.004255 | 0 | 1 | 0.030303 | false | 0 | 0.048485 | 0 | 0.09697 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d86ef22f5b7c353f140244ee9100a2e0d7dc5c5 | 9,562 | py | Python | usap_calc.py | hermian/USAP | 78065d6134d18f1b8fd00aeddd7b8e6ca718efdd | [
"MIT"
] | null | null | null | usap_calc.py | hermian/USAP | 78065d6134d18f1b8fd00aeddd7b8e6ca718efdd | [
"MIT"
] | 2 | 2021-12-02T07:09:27.000Z | 2021-12-08T03:28:13.000Z | usap_calc.py | hermian/USAP | 78065d6134d18f1b8fd00aeddd7b8e6ca718efdd | [
"MIT"
] | null | null | null | # %% [markdown]
# pip install -r pykrx
# %%
from datetime import datetime, timedelta
import FinanceDataReader as fdr
import yfinance as yf
import numpy as np
import pandas as pd
from pykrx import stock
import time
import bt
import warnings
# from tqdm import tqdm
warnings.filterwarnings(action='ignore')
# pd.options.display.float_format = '{:.4f}'.format
# %matplotlib inline
from IPython.display import display, HTML
#하나의 cell에서 multiple output을 출력을 가능하게 하는 코드
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# Pandas Dataframe의 사이즈가 큰 경우, 어떻게 화면에 출력을 할지를 세팅하는 코드
pd.set_option('display.float_format', lambda x: '%.3f' % x)
pd.set_option('max_columns', None)
# %%
#from strategy import*
#from utils import *
# %%
def 장중이냐(now):
return (9 <= now.hour <= 14) or (now.hour == 15 and (now.minute <= 30))
# %%
def AMS(x):
''' x : Series (DataFrame의 컬럼)
x[-1] : 기준일. x의 현재값
(오늘날짜/과거날짜 - 1) > 0 보다 크면 1, 아니면 0
=> 오늘날짜/과거날짜 > 1 => 오늘날짜 > 과거날짜 => x[-1] > x
'''
# print(f"{list(np.where(x[-1]>x, 1, 0)[:-1])}, {len(np.where(x[-1]>x, 1, 0)[:-1])}")
return np.mean(np.where(x[-1]>x, 1, 0)[:-1]) # 당일 날짜 비교는 제외해준다 [:-1]
# %%
# get_data
# code_list is tickers['code']
# start : before_13months
# end : baseday
def get_data(code_list, start, end):
df = pd.DataFrame()
tot = len(code_list)
count = 0
for code in code_list: # tqdm(code_list)
count += 1
print(f"{count}/{tot} : {code}")
t = fdr.DataReader(code, start, end)['Close'].rename(code)
# t = stock.get_market_ohlcv_by_date(start, end, code)['종가'].rename(code)
df = bt.merge(df, t)
time.sleep(0.75)
# 맨마지막 값이 NaN인 컬럼을 삭제한다.
for c in df.columns:
if pd.isna(df.iloc[-1][c]):
print(f"drop : {c}")
df.drop(c, axis=1, inplace=True)
return df
# %%
def 종목명(code, df):
""" 사용예) 종목명('A153130', tickers) or 종목명('153130', tickers)
"""
if code.startswith('A'):
return df[df['종목코드'] == code]['종목명'].values[0]
else:
return df[df['code'] == code]['종목명'].values[0]
def 종목코드(name, df):
""" A를 제외한 종목코드를 반환한다. FinanceDataReader에서 사용
사용예: 종목코드("KODEX달러선물레버리지", tickers)
"""
_df = df.copy()
_df['종목명'] = _df['종목명'].str.replace(' ', '')
return _df[_df['종목명'] == name.replace(' ', '')]['code'].values[0]
# %%
def pickup(df, 제외직전개월수=1):
"""df에서 모멘텀이 가장 좋은 3종목을 선택한다.
Args :
- df : 가격 데이터프레임
- 제외직전개월수 : df에서 제외할 데이터 개월 수
- now : 가격 데이터프레임의 가장 아래 시간
"""
t0 = df.index[-1]
제외 = t0 - pd.DateOffset(months=제외직전개월수)
m6 = t0 - pd.DateOffset(months=6)
m9 = t0 - pd.DateOffset(months=9)
m12 = t0 - pd.DateOffset(months=12)
m13 = t0 - pd.DateOffset(months=13)
m6_returns = (df.loc[m6:제외,:].calc_total_return()+1) # 1달제외 6개월 수익률 (현재 prices가 공휴일포함 데이터임)
m9_returns = (df.loc[m9:제외,:].calc_total_return()+1) # 1달제외 9개월 수익률
m12_returns = (df.loc[m12:제외,:].calc_total_return()+1) # 1달제외 12개월 수익률
average_returns = (m6_returns+m9_returns+m12_returns)/3
# ID 계산 최근 30일 제외
# dropna에 주의 해야 한다. 조선이 0이 있어 문제가 되므로 모든 column이 nan일 때만 drop한다.
len_m1= round(len(df.loc[m12:,:])/12) # 한달 일수
# print(f"{t0}, {m1}, {m6}, {m9}, {m12}, {m13}, {len_m1}")
pos_percent = np.where(df.loc[m13:,:].pct_change(len_m1).dropna(how='all') > 0.0, 1, 0).mean(axis=0)
neg_percent = 1 - pos_percent
ID = (neg_percent - pos_percent)
momentum = average_returns * ID * -1
print(f"pickup : ======================\n{momentum.nlargest(3)}\n=================================")
return list(momentum.nlargest(3).index)
# 예제
# pickup(price_df, 0)
# %%
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def calcOECD시그널비중():
try:
df = pd.read_csv("https://stats.oecd.org/sdmx-json/data/DP_LIVE/KOR.CLI.AMPLITUD.LTRENDIDX.M/OECD?contentType=csv&detail=code&separator=comma&csv-lang=en&startPeriod=2021-01")
oecd = df[['TIME', 'Value']]
oecd.set_index('TIME', inplace=True)
oecd.index = pd.to_datetime(oecd.index)
oecd['전월비'] = oecd.pct_change()+1
oecd.drop('Value', axis=1, inplace=True)
target_weight = 1 if oecd.iloc[-1][0] > 1 else 0
# target_weights['cash'] = 1 - target_weights
# target_weights.columns = ['base1', cash]
except:
raise Exception('OECD 데이터를 못 받아왔습니다.')
return target_weight
# 예제
# OECD시그널비중()
# %% [markdown]
# 저녁에 돌리면 다음날 리밸런싱할것이고
# 9시전에 돌리면 오늘 리밸런싱할 비중(어제종가 기준)을 구할려고 하고
# 장중에 돌리면 오늘 리밸런싱할 비중(어제종가 기준)을 구할려고 한다.
# %%
# 외국인 수급 읽어 와야
# 개인 수급도 읽어 와야
def calc외국인수급비중(df):
baseday = df.index[-1]
before_one_year = baseday - pd.DateOffset(years=1)
tdf = stock.get_market_trading_value_by_date(before_one_year, baseday, "KOSPI")
tdf.index.name = 'Date'
tdf_cumsum = tdf.cumsum()
외국인수급 = tdf_cumsum[['외국인합계']]
외국인수급1m = 외국인수급[baseday-pd.DateOffset(months=1):baseday]
외국인수급2m = 외국인수급[baseday-pd.DateOffset(months=2):baseday-pd.DateOffset(months=1)]
외국인수급3m = 외국인수급[baseday-pd.DateOffset(months=3):baseday-pd.DateOffset(months=2)]
외국인수급1m증가 = 외국인수급1m.iloc[-1] > 외국인수급1m.iloc[0]
외국인수급2m증가 = 외국인수급2m.iloc[-1] > 외국인수급2m.iloc[0]
외국인수급3m증가 = 외국인수급3m.iloc[-1] > 외국인수급3m.iloc[0]
print("외국인수급: ", 외국인수급1m증가, 외국인수급2m증가, 외국인수급3m증가)
연속3개월 = ((외국인수급3m증가) & (외국인수급2m증가) & (외국인수급1m증가))
연속2개월 = ((외국인수급2m증가) & (외국인수급1m증가))
연속1개월 = (외국인수급1m증가)
print(f"외국인수급 연속증가: 연속1개월({연속1개월.values[0]}), 연속2개월({연속2개월.values[0]}), 연속3개월({연속3개월.values[0]})")
target_weights2 = pd.DataFrame(np.where(연속3개월, 1.0,
np.where(연속2개월, 0.66,
np.where(연속1개월, 0.33, 0))),
index=[외국인수급.index[-1]], columns=['base2'])
print(f"target_weights2 :\n{target_weights2}")
return target_weights2['base2'].values[0]
# %%
def calc수급스코어비중(df):
baseday = df.index[-1]
before_one_year = baseday - pd.DateOffset(years=1)
tdf = stock.get_market_trading_value_by_date(before_one_year, baseday, "KOSPI")
tdf.index.name = 'Date'
tdf_cumsum = tdf.cumsum()
외인추종스코어 = np.where(tdf_cumsum['외국인합계'][-1] > tdf_cumsum['외국인합계'], 1, 0).mean()
개인역추종스코어 = np.where(tdf_cumsum['개인'][-1] < tdf_cumsum['개인'], 1, 0).mean()
평균수급스코어 = (외인추종스코어 + 개인역추종스코어) / 2
return 평균수급스코어
# %%
def calc코스피모멘텀스코어비중(df):
baseday = df.index[-1]
before_one_year = baseday - pd.DateOffset(years=1)
kospi = fdr.DataReader('KS11', before_one_year, baseday)[['Close']]
momentumscore = kospi['Close'].rolling(len(kospi)).apply(AMS).iloc[-1]
return momentumscore
#%%
if __name__ == '__main__':
#######################################
tickers = pd.read_csv('매매종목.csv')
tickers['code'] = tickers['종목코드'].str.replace('A', '')
cash = '153130'# ['KODEX단기채권']
dollar = '261250' #,KODEX 미국달러선물레버리지
#######################################
now = datetime.now()
now_str = now.strftime('%Y-%m-%d')
# baseday, baseday_str
# kospi = fdr.DataReader('005930', now-pd.DateOffset(days=5), now)
samsung = stock.get_market_ohlcv_by_date(now-pd.DateOffset(days=5), now, "005930")
if 장중이냐(now): # 09-15:30사이
baseday = samsung.index[-2]
elif 16 <= now.hour <= 23:
baseday = samsung.index[-1] # 장마감으로 종가가 취득됨. 오늘자 기준
else:
baseday = samsung.index[-2]
baseday_str = baseday.strftime('%Y-%m-%d')
#최근 30일 제외를 한다.
before_13months = baseday.replace(year=now.year - 1) - timedelta(days=50) # 1년전에 10일치를 더 읽어 온다
# 데이터를 읽어온다.
# price_df = pd.read_csv('sectors.csv', index_col=0, parse_dates=True)
price_df = get_data(tickers['code'], before_13months, baseday)
OECD시그널비중 = calcOECD시그널비중()
수급스코어비중 = calc수급스코어비중(price_df)
코스피모멘텀스코어비중 = calc코스피모멘텀스코어비중(price_df)
탑픽종목코드 = pickup(price_df, 제외직전개월수=1)
국내비중 = 0.5
OECD매매총비중 = 국내비중 * 0.33
OECD섹터비중 = OECD매매총비중 * OECD시그널비중
OECD채권비중 = OECD매매총비중 * (1 - OECD시그널비중)
수급매매총비중 = 국내비중 * 0.33
수급섹터비중 = 수급매매총비중 * 수급스코어비중
수급채권비중 = 수급매매총비중 * (1-수급스코어비중)
모멘텀스코어매매총비중 = 국내비중 * 0.34
모멘텀섹터비중 = 모멘텀스코어매매총비중 * 코스피모멘텀스코어비중
모멘텀채권비중 = 모멘텀스코어매매총비중 * (1-코스피모멘텀스코어비중)
섹터비중 = {}
섹터비중[탑픽종목코드[0]] = round((OECD섹터비중 + 수급섹터비중/2 + 모멘텀섹터비중/3)*100, 2)
섹터비중[탑픽종목코드[1]] = round((수급섹터비중/2 + 모멘텀섹터비중/3)*100, 2)
섹터비중[탑픽종목코드[2]] = round((모멘텀섹터비중/3)*100, 2)
채권비중 = OECD채권비중+수급채권비중+모멘텀채권비중
############ 나스닥, 다우
before_one_year = baseday - pd.DateOffset(years=1)
나스닥 = yf.download("^IXIC", before_one_year, baseday)[['Adj Close']]
다우 = yf.download("^DJI", before_one_year, baseday)[['Adj Close']]
나스닥모멘텀스코어비중 = 나스닥['Adj Close'].rolling(len(나스닥)).apply(AMS).iloc[-1]
다우모멘텀스코어비중 = 다우['Adj Close'].rolling(len(다우)).apply(AMS).iloc[-1]
해외비중 = 0.5
나스닥매매총비중 = 해외비중 * 0.5
나스닥비중 = 나스닥매매총비중 * 나스닥모멘텀스코어비중
나스닥해외채권비중 = 나스닥매매총비중 * (1-나스닥모멘텀스코어비중)
다우매매총비중 = 해외비중 * 0.5
다우비중 = 다우매매총비중 * 다우모멘텀스코어비중
다우해외채권비중 = 다우매매총비중 * (1-다우모멘텀스코어비중)
해외채권비중 = 나스닥해외채권비중 + 다우해외채권비중
############ 비중 출력
print("\n\n\n")
print("="*80)
print(f"비중 계산 기준 일자 : {baseday_str}")
print("="*80)
print(f"섹터 비중: {섹터비중}")
print(f"채권비중({cash}):{채권비중*100:.2f}")
print(f"나스닥비중(133690): {나스닥비중*100:.2f}") # A133690,TIGER 미국나스닥100
print(f"다우비중(245340): {다우비중*100:.2f}") #A245340,TIGER 미국다우존스30
print(f"해외채권비중({dollar}): {해외채권비중*100:.2f}")
print("="*80)
print("\n\n\n")
| 32.746575 | 183 | 0.608973 | 1,345 | 9,562 | 4.231227 | 0.313011 | 0.033737 | 0.031629 | 0.031629 | 0.157442 | 0.132138 | 0.093832 | 0.093832 | 0.062028 | 0.062028 | 0 | 0.049304 | 0.210939 | 9,562 | 291 | 184 | 32.859107 | 0.70497 | 0.184899 | 0 | 0.125714 | 0 | 0.011429 | 0.115644 | 0.024109 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.068571 | 0.005714 | 0.188571 | 0.097143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d886970c18b3a866cce25048ab9c6ccd5e5385f | 2,393 | py | Python | playground/NeuralNetwork.py | Ceruleanacg/Learning-Notes | 1b2718dc85e622e35670fffbb525bb50d385f9a3 | [
"MIT"
] | 95 | 2018-06-01T03:57:39.000Z | 2021-12-31T04:51:21.000Z | playground/NeuralNetwork.py | Ceruleanacg/Descent | 1b2718dc85e622e35670fffbb525bb50d385f9a3 | [
"MIT"
] | 1 | 2020-02-28T13:27:15.000Z | 2020-02-28T13:27:15.000Z | playground/NeuralNetwork.py | Ceruleanacg/Descent | 1b2718dc85e622e35670fffbb525bb50d385f9a3 | [
"MIT"
] | 15 | 2018-06-24T07:33:29.000Z | 2020-10-03T04:12:27.000Z | import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler
from utility import function
from ann.Dense import Dense
np.random.seed(135)
data_count = 25
x1_points = np.linspace(0, 10, data_count).reshape((-1, 1))
x2_points = np.multiply(2, x1_points) + np.random.randint(-10, 10, size=(data_count,)).reshape((-1, 1))
x1 = np.concatenate((x1_points, x2_points), axis=1)
y1 = np.array([[1, 0, 0, 0]] * data_count)
x1_points = np.linspace(1, 10, data_count).reshape((-1, 1))
x2_points = np.multiply(-2, x1_points) + np.random.randint(-10, 10, size=(data_count,)).reshape((-1, 1))
x2 = np.concatenate((x1_points, x2_points), axis=1)
y2 = np.array([[0, 1, 0, 0]] * data_count)
x1_points = np.linspace(-1, -10, data_count).reshape((-1, 1))
x2_points = np.multiply(2, x1_points) + np.random.randint(-10, 10, size=(data_count,)).reshape((-1, 1))
x3 = np.concatenate((x1_points, x2_points), axis=1)
y3 = np.array([[0, 0, 1, 0]] * data_count)
x1_points = np.linspace(-1, -10, data_count).reshape((-1, 1))
x2_points = np.multiply(-2, x1_points) + np.random.randint(-10, 10, size=(data_count,)).reshape((-1, 1))
x4 = np.concatenate((x1_points, x2_points), axis=1)
y4 = np.array([[0, 0, 0, 1]] * data_count)
x_data = np.concatenate((x1, x2, x3, x4))
y_data = np.concatenate((y1, y2, y3, y4))
x_train = StandardScaler().fit_transform(x_data)
y_train = y_data
activation_funcs = [function.relu] * 2
# activation_funcs = [function.sigmoid] * 1
activation_funcs.append(function.linear)
dense = Dense(x_space=2, y_space=4, hidden_units_list=[6, 6], **{
"loss_func": function.softmax_cross_entropy,
"activation_funcs": activation_funcs,
"learning_rate": 0.003,
"enable_logger": True,
"model_name": 'base',
"batch_size": 100,
"max_epoch": 1000,
'model': 'train',
})
dense.train(x_data, y_data)
# dense.restore()
dense.evaluate(x_data, y_data)
x1_test = np.linspace(-20, 20, 300)
x2_test = np.linspace(-30, 30, 300)
x1_mesh, x2_mesh = np.meshgrid(x1_test, x2_test)
x_test = np.array([x1_mesh.ravel(), x2_mesh.ravel()]).T
y_test = np.argmax(dense.predict(x_test), axis=1)
plt.pcolormesh(x1_mesh, x2_mesh, y_test.reshape(x1_mesh.shape))
plt.scatter(x1[:, 0], x1[:, 1], marker='x')
plt.scatter(x2[:, 0], x2[:, 1], marker='o')
plt.scatter(x3[:, 0], x3[:, 1], marker='*')
plt.scatter(x4[:, 0], x4[:, 1], marker='p')
plt.show()
| 31.906667 | 104 | 0.679482 | 405 | 2,393 | 3.82716 | 0.239506 | 0.075484 | 0.051613 | 0.087742 | 0.373548 | 0.373548 | 0.372258 | 0.372258 | 0.284516 | 0.284516 | 0 | 0.080076 | 0.123276 | 2,393 | 74 | 105 | 32.337838 | 0.658723 | 0.023819 | 0 | 0.115385 | 0 | 0 | 0.042006 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.096154 | 0 | 0.096154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d8a164a90defbf538806cb852c130894b25b167 | 2,983 | py | Python | gdc_matcher.py | pxslip/gdc-file-matcher | b98a45867fa5286a6e023538fa4a22d02af5c05b | [
"MIT"
] | null | null | null | gdc_matcher.py | pxslip/gdc-file-matcher | b98a45867fa5286a6e023538fa4a22d02af5c05b | [
"MIT"
] | null | null | null | gdc_matcher.py | pxslip/gdc-file-matcher | b98a45867fa5286a6e023538fa4a22d02af5c05b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import datetime
import json
import os
import requests
import time
import re
FILES_ENDPOINT = 'https://api.gdc.cancer.gov/files'
DESC = """This program will attempt to use the GDC api to associate a file or set of files with any fields specified
If no fields are specified only the case id will be associated
Either --files or --dir option must be specified"""
def associate(files, fields):
filters = {
"op": "in",
"content": {
"field": "file_name",
"value": files
}
}
params = {
"filters": json.dumps(filters),
"fields": ",".join(fields),
"format": "TSV",
"size": len(files)
}
headers = {'content-type': 'application/json'}
print('Contacting GDC to get requested data')
response = requests.post(FILES_ENDPOINT, json=params, headers=headers)
print(response.url)
now = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')
filename = 'files-' + now + '.tsv'
print('Writing to output file ' + filename)
file_handle = open(filename, 'w')
file_handle.write(response.text)
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('--fields',
help='a list of fields to associate with the filename',
action='append')
parser.add_argument('--fields-file',
help='a file containing a space, comma or newline delimited list of fields to look up')
parser.add_argument('--files',
help='A list of filenames to get the field data for',
action='append')
parser.add_argument('--files-file',
help='A file containing a space, comma or newline delimited list of files to get field data for')
parser.add_argument('--dir',
help='Instead of a list of files, specify a directory with the files to use, these must have the original filename')
args = parser.parse_args()
delimiter = re.compile("[,\s\n]+")
if not args.files and not args.dir and not args.files_file:
parser.print_help()
else:
fields_arg = args.fields
if not args.fields and not args.fields_file:
fields_arg = ['cases.case_id', 'file_name']
elif args.fields:
fields_arg.extend(['cases.case_id', 'file_name'])
elif args.fields_file:
with open(args.fields_file) as field_file:
fields_content = field_file.read()
fields_arg = delimiter.split(fields_content)
# Actually do something
# prefer files over dir
files_list = []
if args.files:
files_list = args.files
elif args.dir:
for dirpath, dirnames, filenames in os.walk(args.dir):
for filename in filenames:
files_list.append(filename)
elif args.files_file:
with open(args.files_file) as input_file:
files_list = delimiter.split(input_file.read())
associate(files_list, fields_arg)
| 35.094118 | 136 | 0.63996 | 398 | 2,983 | 4.701005 | 0.349246 | 0.028862 | 0.04543 | 0.024586 | 0.128274 | 0.097274 | 0.097274 | 0.097274 | 0.061999 | 0.061999 | 0 | 0 | 0.249749 | 2,983 | 84 | 137 | 35.511905 | 0.836014 | 0.021455 | 0 | 0.027778 | 0 | 0.027778 | 0.306584 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013889 | false | 0 | 0.097222 | 0 | 0.111111 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d8a5c545515069122afa7ef7d8b3bacd35e4150 | 13,234 | py | Python | analysis/temporal_analysis_optim.py | marchdf/ppm-analysis | 5d84c875413ff609c28c3d9a9dd1d71db9a917fd | [
"Apache-2.0"
] | null | null | null | analysis/temporal_analysis_optim.py | marchdf/ppm-analysis | 5d84c875413ff609c28c3d9a9dd1d71db9a917fd | [
"Apache-2.0"
] | null | null | null | analysis/temporal_analysis_optim.py | marchdf/ppm-analysis | 5d84c875413ff609c28c3d9a9dd1d71db9a917fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# ========================================================================
#
# Imports
#
# ========================================================================
import numpy as np
import argparse
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib.colors as colors
from datetime import timedelta
import time
from scipy.optimize import minimize
import scipy.integrate as integrate
import scipy.interpolate as interpolate
import pandas as pd
# ========================================================================
#
# Some defaults variables
#
# ========================================================================
plt.rc("text", usetex=True)
cmap_med = [
"#F15A60",
"#7AC36A",
"#5A9BD4",
"#FAA75B",
"#9E67AB",
"#CE7058",
"#D77FB4",
"#737373",
]
cmap = [
"#EE2E2F",
"#008C48",
"#185AA9",
"#F47D23",
"#662C91",
"#A21D21",
"#B43894",
"#010202",
]
dashseq = [
(None, None),
[10, 5],
[10, 4, 3, 4],
[3, 3],
[10, 4, 3, 4, 3, 4],
[3, 3],
[3, 3],
]
markertype = ["s", "d", "o", "p", "h"]
rcParams.update({"figure.autolayout": True})
# ========================================================================
#
# Functions
#
# ========================================================================
def objective(x, sign=1.0):
"""Objective function (walltime)"""
CFL = x[0]
kh = x[1]
# return sign * CFL * kh ** 4 / np.pi ** 4
return sign * np.pi ** 4 / (CFL * kh ** 4)
def objective_deriv(x, sign=1.0):
CFL = x[0]
kh = x[1]
# dfdx0 = sign * kh ** 4 / np.pi ** 4
# dfdx1 = sign * CFL * 4 * kh ** 3 / np.pi ** 4
dfdx0 = -sign * np.pi ** 4 / (CFL ** 2 * kh ** 4)
dfdx1 = -sign * 4 * np.pi ** 4 / (CFL * kh ** 5)
return np.array([dfdx0, dfdx1])
def epsilon(x):
"""Dispersion error (normalized)"""
CFL = x[0]
kh = x[1]
return (
1.
/ np.pi
* (
kh
+ (3. * CFL ** 2 / 4 - 5 * CFL / 12 - 4. / 3) * np.sin(kh)
+ (-CFL ** 2 / 2 + CFL / 3 + 1. / 6) * np.sin(2 * kh)
+ (CFL ** 2 / 12 - CFL / 12) * np.sin(3 * kh)
)
)
def gamma(x):
"""Diffusion error (normalized)"""
CFL = x[0]
kh = x[1]
return (
1.
/ (-2)
* (
4. * CFL ** 2 / 3
- 7. * CFL / 3
+ (-23. * CFL ** 2 / 12 + 35 * CFL / 12) * np.cos(kh)
+ (2. * CFL ** 2 / 3 - 2 * CFL / 3) * np.cos(2 * kh)
+ (-CFL ** 2 / 12 + CFL / 12) * np.cos(3 * kh)
)
)
# ========================================================================
#
# Main
#
# ========================================================================
if __name__ == "__main__":
# Timer
start = time.time()
# Parse arguments
parser = argparse.ArgumentParser(description="Temporal analysis optimizer")
parser.add_argument("-s", "--show", help="Show the plots", action="store_true")
args = parser.parse_args()
# Setup
fmt = "png"
# # Optimize the walltime for a given dispersion error
# alphas = np.sort(np.concatenate([np.logspace(-6, 0, 50), np.linspace(3e-1, 1, 20)]))
# df = pd.DataFrame(columns=["alpha", "CFL", "kh", "objective"])
# df.alpha = alphas
# for k, alpha in enumerate(df.alpha):
# constraints = (
# {"type": "eq", "fun": lambda x: np.array([epsilon(x) - alpha])},
# {
# "type": "ineq",
# "fun": lambda x: np.array([x[0]]),
# "jac": lambda x: np.array([1.0, 0.0]),
# },
# {
# "type": "ineq",
# "fun": lambda x: np.array([x[1]]),
# "jac": lambda x: np.array([0.0, 1.0]),
# },
# {
# "type": "ineq",
# "fun": lambda x: np.array([1 - x[0]]),
# "jac": lambda x: np.array([-1.0, 0.0]),
# },
# {
# "type": "ineq",
# "fun": lambda x: np.array([np.pi - x[1]]),
# "jac": lambda x: np.array([0.0, -1.0]),
# },
# )
# res = minimize(
# objective,
# [0.5, np.pi / 2],
# args=(1.0,),
# jac=objective_deriv,
# method="SLSQP",
# constraints=constraints,
# options={"disp": True},
# )
# df.iloc[k] = [alpha, res.x[0], res.x[1], res.fun]
# Optimize the walltime for a given total error
alphas = np.sort(
np.concatenate([np.logspace(-3, 0, 50), np.linspace(3e-1, np.pi + 2, 20)])
)[::-1]
df = pd.DataFrame(columns=["alpha", "CFL", "kh", "objective"])
df.alpha = alphas
x0 = [1.0, np.pi]
for k, alpha in enumerate(df.alpha):
constraints = (
{
"type": "eq",
"fun": lambda x: np.array([np.pi * epsilon(x) + 2 * gamma(x) - alpha]),
},
{
"type": "ineq",
"fun": lambda x: np.array([x[0]]),
"jac": lambda x: np.array([1.0, 0.0]),
},
{
"type": "ineq",
"fun": lambda x: np.array([x[1]]),
"jac": lambda x: np.array([0.0, 1.0]),
},
{
"type": "ineq",
"fun": lambda x: np.array([1 - x[0]]),
"jac": lambda x: np.array([-1.0, 0.0]),
},
{
"type": "ineq",
"fun": lambda x: np.array([np.pi - x[1]]),
"jac": lambda x: np.array([0.0, -1.0]),
},
)
res = minimize(
objective,
x0,
args=(1.0,),
jac=objective_deriv,
method="SLSQP",
constraints=constraints,
options={"disp": True},
)
df.iloc[k] = [alpha, res.x[0], res.x[1], res.fun]
x0 = [res.x[0], res.x[1]]
# Get a fit for the coefficients
p = np.polyfit(np.float32(df.kh), np.float32(df.CFL), 2)
# over the range alpha in [1e-2,1e-1], we have this fit:
p = [0.3285801, 0.05037212]
# or we can try this one:
p = [1. / np.pi, 0.06]
# Error(CFL,kh) and walltime(CFL,kh)
hks = np.linspace(1e-2, np.pi, 100)
cfls = np.linspace(1e-8, 1, 100)
HKS, CFLS = np.meshgrid(hks, cfls)
epsilon_error = epsilon([CFLS, HKS])
gamma_error = gamma([CFLS, HKS])
total_error = np.pi * epsilon_error + 2 * gamma_error
walltime = objective([CFLS, HKS])
plt.figure(0)
sc = plt.scatter(
df.kh,
df.CFL,
c=df.alpha,
norm=colors.LogNorm(vmin=df.alpha.min(), vmax=df.alpha.max()),
cmap="viridis",
)
plt.plot(df.kh, p[0] * df.kh + p[1], color=cmap[-1])
# plt.plot(df.kh, df.kh**3 / 8, color=cmap[-1])
cbar = plt.colorbar(sc)
cbar.ax.get_yaxis().labelpad = 20
cbar.set_label("dispersion error", rotation=270, fontsize=18)
plt.figure(1)
extent = [0, np.pi, 0, 1]
levels = np.logspace(-6, 0, 13)
im = plt.imshow(
epsilon_error,
aspect="auto",
interpolation="bilinear",
origin="lower",
extent=extent,
norm=colors.LogNorm(vmin=df.alpha.min(), vmax=df.alpha.max()),
cmap="viridis",
)
cs = plt.contour(
epsilon_error,
levels,
colors="gray",
origin="lower",
extent=extent,
linewidths=1,
)
plt.plot(df.kh, df.CFL, color=cmap[-1], lw=2)
cbar = plt.colorbar(im)
cbar.ax.get_yaxis().labelpad = 20
cbar.set_label("dispersion error", rotation=270, fontsize=18)
plt.figure(3)
extent = [0, np.pi, 0, 1]
levels = np.logspace(-6, 0, 13)
im = plt.imshow(
gamma_error,
aspect="auto",
interpolation="bilinear",
origin="lower",
extent=extent,
cmap="Blues_r",
)
cs = plt.contour(
gamma_error, levels, colors="gray", origin="lower", extent=extent, linewidths=1
)
plt.plot(df.kh, df.CFL, color=cmap[-1], lw=2)
cbar = plt.colorbar(im)
cbar.ax.get_yaxis().labelpad = 20
cbar.set_label("diffusion error", rotation=270, fontsize=18)
plt.figure(4)
extent = [0, np.pi, 0, 1]
levels = np.logspace(-6, 0, 13)
im = plt.imshow(
total_error,
aspect="auto",
interpolation="bilinear",
origin="lower",
extent=extent,
cmap="Blues_r",
)
cs = plt.contour(
total_error,
levels,
colors="gray",
origin="lower",
extent=extent,
linewidths=1,
linestyles="dashed",
)
plt.plot(df.kh, df.CFL, color=cmap[-1], lw=2)
cbar = plt.colorbar(im)
cbar.ax.get_yaxis().labelpad = 20
cbar.set_label("total error", rotation=270, fontsize=18)
plt.figure(5)
extent = [0, np.pi, 0, 1]
levels = np.logspace(-6, 0, 13)
im = plt.imshow(
walltime,
aspect="auto",
interpolation="bilinear",
origin="lower",
extent=extent,
cmap="Blues_r",
norm=colors.LogNorm(),
vmin=1e-0,
vmax=1e3,
)
# cs = plt.contour(
# total_error, levels, colors="gray", origin="lower", extent=extent, linewidths=1
# )
plt.plot(df.kh, df.CFL, color=cmap[-1], lw=2)
cbar = plt.colorbar(im)
cbar.ax.get_yaxis().labelpad = 20
cbar.set_label("walltime", rotation=270, fontsize=18)
# \int error d(kh)
int_cfl = np.linspace(1e-8, 1, 50)
int_epsilon_error = np.zeros(int_cfl.shape)
int_gamma_error = np.zeros(int_cfl.shape)
for k, cfl in enumerate(int_cfl):
res = integrate.quad(lambda x: epsilon([cfl, x]), 0, np.pi)
int_epsilon_error[k] = res[0]
res = integrate.quad(lambda x: gamma([cfl, x]), 0, np.pi)
int_gamma_error[k] = res[0]
plt.figure(6)
plt.plot(int_cfl, int_epsilon_error, color=cmap[0], lw=2)
plt.figure(7)
plt.plot(int_cfl, int_gamma_error, color=cmap[0], lw=2)
# Format plots
plt.figure(0)
ax = plt.gca()
plt.xlabel(r"$kh$", fontsize=22, fontweight="bold")
plt.ylabel(r"$C$", fontsize=22, fontweight="bold")
plt.xlim([0, np.pi])
plt.ylim([0, 1.1])
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.savefig("pareto_dispersion.{0:s}".format(fmt), format=fmt, dpi=300)
plt.figure(1)
ax = plt.gca()
plt.xlabel(r"$kh$", fontsize=22, fontweight="bold")
plt.ylabel(r"$C$", fontsize=22, fontweight="bold")
plt.xlim([0, np.pi])
plt.ylim([0, 1.1])
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.savefig("pareto_dispersion2.{0:s}".format(fmt), format=fmt, dpi=300)
plt.figure(3)
ax = plt.gca()
plt.xlabel(r"$kh$", fontsize=22, fontweight="bold")
plt.ylabel(r"$C$", fontsize=22, fontweight="bold")
plt.xlim([0, np.pi])
plt.ylim([0, 1.1])
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.savefig("pareto_diffusion.{0:s}".format(fmt), format=fmt, dpi=300)
plt.figure(4)
ax = plt.gca()
plt.xlabel(r"$kh$", fontsize=22, fontweight="bold")
plt.ylabel(r"$C$", fontsize=22, fontweight="bold")
plt.xlim([0, np.pi])
plt.ylim([0, 1.1])
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.savefig("total_error.{0:s}".format(fmt), format=fmt, dpi=300)
plt.figure(5)
ax = plt.gca()
plt.xlabel(r"$kh$", fontsize=22, fontweight="bold")
plt.ylabel(r"$C$", fontsize=22, fontweight="bold")
plt.xlim([0, np.pi])
plt.ylim([0, 1.1])
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.savefig("walltime.{0:s}".format(fmt), format=fmt, dpi=300)
plt.figure(6)
ax = plt.gca()
plt.xlabel(r"$C$", fontsize=22, fontweight="bold")
plt.ylabel(r"$\int \epsilon \mathrm{d}(hk)$", fontsize=22, fontweight="bold")
plt.xlim([0, 1])
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.savefig("int_dispersion_error.{0:s}".format(fmt), format=fmt, dpi=300)
plt.figure(7)
ax = plt.gca()
plt.xlabel(r"$C$", fontsize=22, fontweight="bold")
plt.ylabel(r"$\int \gamma \mathrm{d}(hk)$", fontsize=22, fontweight="bold")
plt.xlim([0, 1])
plt.setp(ax.get_xmajorticklabels(), fontsize=18, fontweight="bold")
plt.setp(ax.get_ymajorticklabels(), fontsize=18, fontweight="bold")
plt.savefig("int_diffusion_error.{0:s}".format(fmt), format=fmt, dpi=300)
if args.show:
plt.show()
# output timer
end = time.time() - start
print(
"Elapsed time "
+ str(timedelta(seconds=end))
+ " (or {0:f} seconds)".format(end)
)
| 30.214612 | 90 | 0.50204 | 1,710 | 13,234 | 3.838596 | 0.147953 | 0.05972 | 0.072517 | 0.038391 | 0.697288 | 0.67078 | 0.636197 | 0.622639 | 0.605576 | 0.591865 | 0 | 0.05339 | 0.276787 | 13,234 | 437 | 91 | 30.283753 | 0.632431 | 0.188605 | 0 | 0.463722 | 0 | 0 | 0.084265 | 0.011273 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012618 | false | 0 | 0.0347 | 0 | 0.059937 | 0.003155 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d8ab8e265be6fa0615d8a4ab48fd3a975798b4c | 3,108 | py | Python | mmfashion/apis/test_virtual_tryon.py | RyanJiang0416/mmfashion | 89f56e3e631b4f5c1403f7e8897396cc02b5aa91 | [
"Apache-2.0"
] | 952 | 2019-10-31T01:49:07.000Z | 2022-03-29T11:33:27.000Z | mmfashion/apis/test_virtual_tryon.py | RyanJiang0416/mmfashion | 89f56e3e631b4f5c1403f7e8897396cc02b5aa91 | [
"Apache-2.0"
] | 135 | 2019-11-02T07:09:04.000Z | 2022-03-17T06:08:11.000Z | mmfashion/apis/test_virtual_tryon.py | RyanJiang0416/mmfashion | 89f56e3e631b4f5c1403f7e8897396cc02b5aa91 | [
"Apache-2.0"
] | 239 | 2019-10-31T02:08:40.000Z | 2022-03-22T03:14:38.000Z | from __future__ import division
import os
from ..datasets import build_dataloader
from ..utils import save_imgs
from .env import get_root_logger
def test_geometric_matching(model,
dataset,
cfg,
distributed=False,
validate=False,
logger=None):
if logger is None:
logger = get_root_logger(cfg.log_level)
# start testing predictor
if distributed: # to do
_dist_test(model, dataset, cfg, validate=validate)
else:
_non_dist_test_gmm(model, dataset, cfg, validate=validate)
def _non_dist_test_gmm(model, dataset, cfg, validate=False):
data_loader = build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
len(cfg.gpus.test),
dist=False,
shuffle=False)
# save dir
warp_cloth_dir = os.path.join(cfg.data.test.GMM.save_dir, 'warp-cloth')
warp_mask_dir = os.path.join(cfg.data.test.GMM.save_dir, 'warp-mask')
if not os.path.exists(warp_cloth_dir):
os.makedirs(warp_cloth_dir)
if not os.path.exists(warp_mask_dir):
os.makedirs(warp_mask_dir)
model.cuda()
model.eval()
for batch, data in enumerate(data_loader):
c_name = data['c_name']
cloth = data['cloth'].cuda()
cloth_mask = data['cloth_mask'].cuda()
agnostic = data['agnostic'].cuda()
parse_cloth = data['parse_cloth'].cuda()
warped_cloth, warped_mask = model(
cloth, cloth_mask, agnostic, parse_cloth, return_loss=False)
save_imgs(warped_cloth, c_name, warp_cloth_dir)
save_imgs(warped_mask, c_name, warp_mask_dir)
def test_tryon(model,
dataset,
cfg,
distributed=False,
validate=False,
logger=None):
if logger is None:
logger = get_root_logger(cfg.log_level)
# start testing predictor
if distributed: # to do
_dist_test(model, dataset, cfg, validate=validate)
else:
_non_dist_test_tryon(model, dataset, cfg, validate=validate)
def _non_dist_test_tryon(model, dataset, cfg, validate=False):
data_loader = build_dataloader(
dataset,
cfg.data.imgs_per_gpu,
cfg.data.workers_per_gpu,
len(cfg.gpus.test),
dist=False,
shuffle=False)
# save dir
try_on_dir = os.path.join(cfg.data.test.TOM.save_dir, 'try-on')
if not os.path.exists(try_on_dir):
os.makedirs(try_on_dir)
model.cuda()
model.eval()
for batch, data in enumerate(data_loader):
img = data['img'].cuda()
cloth = data['cloth'].cuda()
cloth_mask = data['cloth_mask'].cuda()
agnostic = data['agnostic'].cuda()
im_names = data['im_name']
p_tryon = model(img, cloth, cloth_mask, agnostic, return_loss=False)
save_imgs(p_tryon, im_names, try_on_dir)
def _dist_test(model, dataset, cfg, validate=False):
""" not implemented yet """
raise NotImplementedError
| 29.320755 | 76 | 0.619048 | 403 | 3,108 | 4.508685 | 0.200993 | 0.060539 | 0.074298 | 0.088608 | 0.69235 | 0.649972 | 0.609796 | 0.596588 | 0.587782 | 0.53825 | 0 | 0 | 0.277992 | 3,108 | 105 | 77 | 29.6 | 0.809715 | 0.031853 | 0 | 0.589744 | 0 | 0 | 0.032699 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064103 | false | 0 | 0.064103 | 0 | 0.128205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d8de412e3a853e1895c6edde2c7914915f2e277 | 1,545 | py | Python | jklib/std/strings.py | Jordan-Kowal/jklib | 84dc8ad64b9216926ba9af0ec11f1dbd5d8a53f4 | [
"MIT"
] | 1 | 2020-02-28T21:53:51.000Z | 2020-02-28T21:53:51.000Z | jklib/std/strings.py | Jordan-Kowal/jklib | 84dc8ad64b9216926ba9af0ec11f1dbd5d8a53f4 | [
"MIT"
] | null | null | null | jklib/std/strings.py | Jordan-Kowal/jklib | 84dc8ad64b9216926ba9af0ec11f1dbd5d8a53f4 | [
"MIT"
] | null | null | null | """Utility functions for working with strings"""
# --------------------------------------------------------------------------------
# > Functions
# --------------------------------------------------------------------------------
def clean_text(text, char_list, replacement=" "):
"""
Replaces specific characters with a 'replacement' character within a text
:param str text: The text we want to change
:param char_list: List of strings, which are the subtexts we will replace
:type char_list: list(str)
:param str replacement: The string used as replacement. Defaults to " ".
:return: The updated string
:rtype: str
"""
if char_list:
for char in char_list:
text = text.replace(char, replacement)
text = text.strip()
return text
def replace_every_nth(text, old, new, nth, start=1):
"""
Modifies a text by replacing "old" string with "new" string every "nth" time
:param str text: The text we want to change
:param str old: The string that will be replaced
:param str new: The string used as replacement
:param int nth: The frequency of replacement (every nth occurrences)
:param int start: Which occurrence to we start with. Defaults to 1.
:return: The updated text
:rtype: str
"""
i = start
index = text.find(old)
while index != -1:
if i == nth:
text = text[:index] + new + text[index + len(old) :]
i = 0
index = text.find(old, index + len(old) + 1)
i += 1
return text
| 35.113636 | 82 | 0.56699 | 196 | 1,545 | 4.428571 | 0.331633 | 0.046083 | 0.02765 | 0.034562 | 0.147465 | 0.087558 | 0.087558 | 0.087558 | 0.087558 | 0.087558 | 0 | 0.005146 | 0.245307 | 1,545 | 43 | 83 | 35.930233 | 0.73928 | 0.609709 | 0 | 0.125 | 0 | 0 | 0.001949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d93a16ec1c6fff3243949b16577cbdbfeae5354 | 2,132 | py | Python | case_deconv/code/infection_curve.py | dfarrow0/covidcast-nowcast | 8d9dfc56c643c4f47b72a58dc3e8811ddeb1a6c8 | [
"MIT"
] | null | null | null | case_deconv/code/infection_curve.py | dfarrow0/covidcast-nowcast | 8d9dfc56c643c4f47b72a58dc3e8811ddeb1a6c8 | [
"MIT"
] | null | null | null | case_deconv/code/infection_curve.py | dfarrow0/covidcast-nowcast | 8d9dfc56c643c4f47b72a58dc3e8811ddeb1a6c8 | [
"MIT"
] | null | null | null | """
Estimate infection curve
Created: 2020-09-09
Last modified: 2020-09-23
"""
# third party
import numpy as np
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
# r imports
genlasso = importr('genlasso')
rlist2dict = lambda x: dict(x.items())
rfloat2arr = lambda x: np.array(x)
# first party
from .conv1d import Conv1D
class InfectionCurve:
def __init__(self, delay, verbose=False):
"""
Args:
delay: 1D array of delay distribution probabilities
verbose: bool if output should be printed
"""
self.delay = delay
# TF via genlasso package
robjects.r(
'''
tf_predict <- function(tf_fit, x.new=NULL, lambda=NULL) {
predict(tf_fit, x.new=x.new, lambda=lambda)
}
tf_predict_cv_ <- function(tf_fit, n_folds=5) {
cv = genlasso::cv.trendfilter(tf_fit, k=n_folds)
min.lam = cv$lambda.min
print(paste("Min lambda:", min.lam))
preds = predict(tf_fit, lambda=min.lam)
list(lam=min.lam, preds=preds$fit)
}
tf_predict_cv <- function(tf_fit, n_folds=5) {
utils::capture.output(res <- tf_predict_cv_(tf_fit, n_folds=n_folds))
res
}
'''
)
if verbose:
self.tf_predict = robjects.r['tf_predict_cv_']
else:
self.tf_predict = robjects.r['tf_predict_cv']
def get_infection_curve(self, y, k=2, n_folds=3):
"""
Estimate infections via ADMM TF framework:
x_tilde = argmin ||W^{-1}y - x||_2^2 + lam*||Dx||_1
x
where W is the convolution matrix, and D is the discrete
difference operator of order k+1.
"""
n = y.shape[0]
W = Conv1D.get_conv_matrix(y, self.delay)[:n, ]
r_y = robjects.FloatVector(np.linalg.inv(W) @ y)
mod = genlasso.trendfilter(r_y, ord=k)
r_pred = rlist2dict(self.tf_predict(mod, n_folds))['preds']
return rfloat2arr(r_pred).flatten()
| 28.426667 | 85 | 0.569887 | 279 | 2,132 | 4.189964 | 0.390681 | 0.06929 | 0.047049 | 0.046193 | 0.109495 | 0.109495 | 0.109495 | 0.109495 | 0.053037 | 0 | 0 | 0.024742 | 0.317542 | 2,132 | 74 | 86 | 28.810811 | 0.778694 | 0.206379 | 0 | 0 | 0 | 0 | 0.044297 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.217391 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d9458f4cc4ae044d09d6c2266ceaaa8d8595799 | 4,174 | py | Python | tests/batch_run_tests.py | Vastra-Gotalandsregionen/selenium-vgregion_se | a8bac6b9c90ac0458dd806fcee8b3f2a510496dd | [
"MIT"
] | null | null | null | tests/batch_run_tests.py | Vastra-Gotalandsregionen/selenium-vgregion_se | a8bac6b9c90ac0458dd806fcee8b3f2a510496dd | [
"MIT"
] | 2 | 2021-02-09T10:53:04.000Z | 2021-04-27T13:07:08.000Z | tests/batch_run_tests.py | Vastra-Gotalandsregionen/selenium-vgregion_se | a8bac6b9c90ac0458dd806fcee8b3f2a510496dd | [
"MIT"
] | null | null | null | """
Run all or some tests
"""
import argparse
import glob
import os
from pathlib import Path
import robot
from robot import rebot
def parse_argument():
"""
Parses optional arguments that's then gets passed to the robot files.
This makes it possible to customise the paths for different environments.
"""
current_dir = str(get_current_dir())
parser = argparse.ArgumentParser(description='Runs .robot files and\
creates output that can be used by Azure DevOps Pipelines')
parser.add_argument('--files',
dest='files',
type=str,
default=None,
help='Array of .robot files to run')
parser.add_argument('--destdir',
dest='destdir',
type=str,
default=current_dir,
help='Absolut path to directory that contains the\
.robot files')
parser.add_argument('--outputdir',
dest='outputdir',
type=str,
default=current_dir,
help='Absolute path to the output directory for\
the test results')
parser.add_argument('--variables',
dest='variables',
type=str,
# The variables argument cannot be empty or else we´re gonna get an exception.
# This is just a default random variable that's not used by any test.
default='DEFAULTVALUE:none',
help='Override variables created in test case files.\
To override which site to run the test cases on, use:\
"NARHALSAN_DOMAIN:site1.vgregion.se,\
VGR_DOMAIN:site2.vgregion.se,\
FTV_DOMAIN:site3.vgregion.se" etc.')
# Return dictionary of args
return vars(parser.parse_args())
def get_current_dir():
"""
Get the directory of the executed Pyhton file (i.e. this file)
"""
# Resolve to get rid of any symlinks
current_path = Path(__file__).resolve()
current_dir = current_path.parent
return current_dir
def batch_run_tests(files=None):
"""
Takes a list as an input listing files in the directory.
If none given it runs all .robot files instead.
"""
arguments = parse_argument()
robot_files_dir = arguments['destdir']
output_log_dir = arguments['outputdir']
variables = arguments['variables']
if not output_log_dir.endswith('\\'):
output_log_dir += '\\'
if isinstance(variables, str):
variables = variables.split(",")
os.chdir(robot_files_dir)
# This is a bit backwards and not too clean.
# TODO: clean up.
if files is None:
files = arguments['files']
if isinstance(files, str):
files = files.split(",")
if files is None:
files = glob.glob("*.robot")
log_file = open(output_log_dir +'batch_log_output.txt', 'w')
for index, file in enumerate(files):
print(file)
file_name = file.replace(".robot", "")
output_file_name = 'output_' + file_name
# Run tests and create logs and reports with a uniqe name for each test
robot.run(file,
output=output_log_dir + output_file_name,
log=output_log_dir + 'log_' + file_name,
report=output_log_dir + 'report_' + file_name,
stdout=log_file,
variable=variables)
# Create output with XUnit format
rebot(output_log_dir
+ output_file_name
+ '.xml',
xunit=output_log_dir
+ 'xunitoutput_'
+ file_name
+ '.xml')
# To limit amount of tests (change to positive number)
if index == -1:
break
"""
If file is executed on itself then call a definition,
mostly for testing purposes
"""
if __name__ == '__main__':
print(batch_run_tests())
| 31.621212 | 102 | 0.558457 | 476 | 4,174 | 4.735294 | 0.367647 | 0.035936 | 0.047915 | 0.018634 | 0.063886 | 0.047915 | 0 | 0 | 0 | 0 | 0 | 0.00149 | 0.356732 | 4,174 | 131 | 103 | 31.862595 | 0.837616 | 0.180642 | 0 | 0.101266 | 0 | 0 | 0.070202 | 0 | 0 | 0 | 0 | 0.007634 | 0 | 1 | 0.037975 | false | 0 | 0.075949 | 0 | 0.139241 | 0.025316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d95c58cc9d7129f1558d5d523afa18fb2deef5a | 10,994 | py | Python | mvdr/ajive/AJIVE.py | idc9/mvdr | ab04895a04a8f4e1b40e332591c736ba18bf8fd7 | [
"MIT"
] | 2 | 2020-10-16T07:45:45.000Z | 2021-01-05T04:30:57.000Z | mvdr/ajive/AJIVE.py | idc9/mvdr | ab04895a04a8f4e1b40e332591c736ba18bf8fd7 | [
"MIT"
] | null | null | null | mvdr/ajive/AJIVE.py | idc9/mvdr | ab04895a04a8f4e1b40e332591c736ba18bf8fd7 | [
"MIT"
] | null | null | null | from sklearn.base import BaseEstimator
from warnings import warn
from textwrap import dedent
from ya_pca.PCA import PCA
from mvlearn.utils import check_Xs
from mvdr.mcca.mcca import MCCA, MCCAView
from mvdr.ajive.ajive_fun import ajive, _ajive_docs
from mvdr.ajive.plot_ajive_diagnostic import plot_joint_diagnostic
from mvdr.linalg_utils import normalize_cols
class AJIVE(BaseEstimator):
def __init__(self, init_signal_ranks=None,
center=True, common_loading_method='map_back',
check_joint_identif=True,
wedin_percentile=5, n_wedin_samples=1000,
rand_percentile=95, n_rand_samples=1000,
usr_joint_rank=None, usr_iniv_ranks=None,
store_full=True, final_decomp=True,
n_jobs=None):
self.init_signal_ranks = init_signal_ranks
self.center = center
self.common_loading_method = common_loading_method
self.wedin_percentile = wedin_percentile
self.n_wedin_samples = n_wedin_samples
self.rand_percentile = rand_percentile
self.n_rand_samples = n_rand_samples
self.check_joint_identif = check_joint_identif
self.store_full = store_full
self.final_decomp = final_decomp
self.usr_joint_rank = usr_joint_rank
self.usr_iniv_ranks = usr_iniv_ranks
self.n_jobs = n_jobs
def fit(self, Xs):
Xs, n_views, n_samples, n_features = check_Xs(Xs, multiview=True,
return_dimensions=True)
usr_iniv_ranks = arg_checker(Xs=Xs,
usr_iniv_ranks=self.usr_iniv_ranks)
assert self.init_signal_ranks is not None and \
len(self.init_signal_ranks) == n_views
ajive_out = ajive(Xs=Xs,
init_signal_ranks=self.init_signal_ranks,
joint_rank=self.usr_joint_rank,
indiv_ranks=usr_iniv_ranks,
center=self.center,
check_joint_identif=self.check_joint_identif,
wedin_percentile=self.wedin_percentile,
n_wedin_samples=self.n_wedin_samples,
rand_percentile=self.rand_percentile,
n_rand_samples=self.n_rand_samples,
final_decomp=self.final_decomp,
store_full=self.store_full,
n_jobs=self.n_jobs)
##################
# common results #
##################
self.common_ = get_mcca_from_ajive_out(ajive_out)
##################
# view specific #
##################
self.view_specific_ = {}
for b in range(n_views):
self.view_specific_[b] = ViewSpecificResults(
decomps=ajive_out['decomps'][b],
centerer=ajive_out['centerers'][b],
sv_threshold=ajive_out['sv_thresholds'][b],
view_idx=b)
#############
# other data #
#############
self.rank_est_ = ajive_out['rank_est']
self.n_views_ = n_views
return self
def get_ranks(self):
"""
Output
------
joint_rank: int
indiv_ranks: dict of ints
The individual rank for each view.
"""
return self.common_.n_components, \
[self.view_specific_[b].indiv_rank_
for b in range(self.n_views_)]
@property
def is_fit(self):
return hasattr(self, 'common_')
def get_view_decomps(self):
"""
Output
------
full: dict of dict of np.arrays
The joint, individual, and noise full estimates for each view.
"""
full = {}
for b in range(self.n_views_):
full[b] = {'joint': self.view_specific_[b].joint_.full_,
'individual': self.view_specific_[b].individual_.full_,
'noise': self.view_specific_[b].noise_}
return full
def summary(self):
"""
Returns a summary of AJIVE.
"""
if self.is_fit:
joint_rank, indiv_ranks = self.get_ranks()
r = 'AJIVE, joint rank: {}'.format(joint_rank)
for b in range(self.n_views_):
r += ', view {} indiv rank: {}'.format(b, indiv_ranks[b])
return r
else:
return 'AJIVE has not been fit.'
###########
# sklearn #
###########
def transform(self, Xs):
"""
Parameters
----------
X: array-like, shape (n_new_samples, n_features)
The data to project.
Output
------
s: array-like, shape (n_new_samples, n_components)
The projections of the new data. If X is a pd.DataFrame
then s will be as well.
"""
# TODO: what to do when there is no joint rank
return self.common_.transform(Xs)
#################
# visualization #
#################
def plot_joint_diagnostic(self, fontsize=20):
"""
Plots joint rank threshold diagnostic plot
"""
rand_cutoff = self.rank_est_['rand']['threshold']
rand_sv_samples = self.rank_est_['rand']['samples']
wedin_cutoff = self.rank_est_['wedin']['threshold']
wedin_sv_samples = self.rank_est_['wedin']['samples']
all_common_svals = self.rank_est_['all_common_svals']
identif_dropped = self.rank_est_['identif_dropped']
joint_rank = self.get_ranks()[0]
plot_joint_diagnostic(all_common_svals=all_common_svals,
joint_rank=joint_rank,
wedin_cutoff=wedin_cutoff,
rand_cutoff=rand_cutoff,
wedin_sv_samples=wedin_sv_samples,
rand_sv_samples=rand_sv_samples,
wedin_percentile=self.wedin_percentile,
rand_percentile=self.rand_percentile,
min_signal_rank=min(self.init_signal_ranks),
identif_dropped=identif_dropped,
fontsize=fontsize)
AJIVE.__doc__ = dedent("""
Angle-based Joint and Individual Variation Explained
Parameters
----------
{basic_args}
Attributes
----------
common_: mvdr.mcca.MCCA
Stores the common/joint space estimates as a MCCA object.
view_specific_: mvdr.ajive.AJIVE.ViewSpecificResults
Stores the view specific results including the view specific
joint and individual decompositions.
rank_est_: dict
Data for joint rank selection e.g. the wedin samples.
init_signal_ranks: list
The initial signal ranks
""".format(**_ajive_docs))
def get_mcca_from_ajive_out(ajive_out):
joint_rank = ajive_out['common']['rank']
if joint_rank == 0:
return None
common = MCCA(n_components=joint_rank,
signal_ranks=ajive_out['init_signal_ranks'],
center=ajive_out['center'])
common_out = ajive_out['common']
common.common_norm_scores_ = common_out['common_scores']
common.evals_ = common_out['sqsvals']
cs_col_norms = normalize_cols(sum(vs for
vs in common_out['view_scores']))[1]
common.cs_col_norms_ = cs_col_norms
n_views = len(ajive_out['centerers'])
views = [None for b in range(n_views)]
for b in range(n_views):
bs = common_out['view_scores'][b]
bl = common_out['view_loadings'][b]
cent = ajive_out['centerers'][b]
views[b] = MCCAView(view_scores=bs,
view_loadings=bl,
centerer=cent)
common.views_ = views
return common
def arg_checker(Xs, usr_iniv_ranks):
n_views = len(Xs)
################################
# parse view specific options #
################################
view_indiv_ranks = [None for b in range(n_views)]
# view_init_svds = [None for b in range(n_views)]
if usr_iniv_ranks is not None:
for b in range(n_views):
view_indiv_ranks[b] = usr_iniv_ranks[b]
return view_indiv_ranks
def get_pca(decomps, centerer):
U = decomps['scores']
D = decomps['svals']
V = decomps['loadings']
n_components = decomps['rank']
# setup PCA object
pca = PCA(n_components=n_components, center=centerer.mean_ is not None)
pca.scores_ = U
pca.svals_ = D
pca.loadings_ = V
pca.centerer_ = centerer
pca.tot_variance_ = sum(D ** 2)
return pca
class ViewSpecificResults(object):
"""
Contains the view specific results.
Attributes
----------
joint_: ya_pca.PCA.PCA
View specific joint PCA.
Has an extra attribute joint.full_ which contains the full view
joint estimate.
individual_: ya_pca.PCA.PCA
View specific individual PCA.
Has an extra attribute individual.full_ which contains the full view
individual estimate.
noise_: array-like
The full noise view estimate.
view_idx_:
Index of this view.
shape_: tuple
(n_observations, n_features)
Note that both joint_ and individual_ have an additional attibute .full_
(e.g. joint_.full_) which contains the full reconstruced matrix.
"""
def __init__(self, decomps, centerer,
sv_threshold=None, view_idx=None):
self.view_idx_ = view_idx
########################
# view specific joint #
########################
if decomps['joint']['scores'] is None:
self.joint_ = None
self.joint_rank_ = 0
else:
self.joint_ = get_pca(decomps['joint'], centerer=centerer)
self.joint_.full_ = decomps['joint']['full']
self.joint_rank_ = self.joint_.n_components_
#############################
# view specific individual #
#############################
if decomps['individual']['scores'] is None:
self.individual_ = None
self.indiv_rank_ = 0
else:
self.individual_ = get_pca(decomps['individual'],
centerer=centerer)
self.individual_.full_ = decomps['individual']['full']
self.indiv_rank_ = self.individual_.n_components_
#################################
# view specific noise estimate #
################################
self.noise_ = decomps['noise']
# other metadata
self.sv_threshold_ = sv_threshold
def __repr__(self):
return 'View: {}, individual rank: {}, joint rank: {}'.\
format(self.view_idx_, self.indiv_rank_, self.joint_rank_)
| 31.144476 | 78 | 0.562398 | 1,242 | 10,994 | 4.654589 | 0.171498 | 0.03425 | 0.025947 | 0.017125 | 0.119357 | 0.075938 | 0.046532 | 0.017298 | 0 | 0 | 0 | 0.002541 | 0.319811 | 10,994 | 352 | 79 | 31.232955 | 0.770527 | 0.133255 | 0 | 0.073684 | 0 | 0 | 0.116329 | 0.004094 | 0 | 0 | 0 | 0.002841 | 0.005263 | 1 | 0.068421 | false | 0 | 0.047368 | 0.010526 | 0.189474 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d962fb2a38bf698d04865d72b94f1759ba5b53e | 1,553 | py | Python | get_labels.py | cplm98/SEC-Data-Analysis | 43de491a8b0f36f558d922fd0056a0a0a8455df7 | [
"MIT"
] | null | null | null | get_labels.py | cplm98/SEC-Data-Analysis | 43de491a8b0f36f558d922fd0056a0a0a8455df7 | [
"MIT"
] | null | null | null | get_labels.py | cplm98/SEC-Data-Analysis | 43de491a8b0f36f558d922fd0056a0a0a8455df7 | [
"MIT"
] | null | null | null | from alpha_vantage.timeseries import TimeSeries
import pandas as pd
import numpy as np
import time
sp = pd.DataFrame(pd.read_csv('./constituents_csv.csv'))
key = '9AD6SV02MT4Z7G8W'
ts = TimeSeries(key, output_format='pandas')
# aapl, meta = ts.get_monthly_adjusted(symbol='AAPL')
# print(aapl[aapl.index >= '2015'])
results=[]
for ticker in sp['Symbol'][:100]:
print(ticker)
try:
res, meta = ts.get_monthly_adjusted(symbol=ticker)
res['ticker'] = ticker
for i in range(2015, 2021): # up to 2021
try:
year = res[res.index.year == i]
open_ = year[year.index.month == 1]['1. open'].values
close = year[year.index.month == 12]['5. adjusted close'].values
yearly_adj_close = close - open_
yearly_change = yearly_adj_close/open_
results.append({'year': i, 'ticker': ticker, 'yearly adjsuted close': yearly_adj_close[0], 'yearly percent change': yearly_change[0], 'year open': open_[0], 'year adjusted close': close[0]})
except:
print(res)
print('No valid data for year', i, ' with Ticker ', ticker)
results.append({'year': i, 'ticker': ticker, 'yearly adjsuted close': None, 'yearly percent change': None, 'year open': None, 'year adjusted close': None})
except:
print('Problem retrieiving stock information: ', ticker)
time.sleep(15) # necessary to not overwhelm API
print(results)
df = pd.DataFrame(results)
df.to_csv('labels.csv') | 39.820513 | 206 | 0.622022 | 201 | 1,553 | 4.701493 | 0.373134 | 0.050794 | 0.044444 | 0.033862 | 0.167196 | 0.167196 | 0.103704 | 0.103704 | 0.103704 | 0 | 0 | 0.031489 | 0.2434 | 1,553 | 39 | 207 | 39.820513 | 0.772766 | 0.081777 | 0 | 0.129032 | 0 | 0 | 0.227848 | 0.015471 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.129032 | 0 | 0.129032 | 0.16129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d9708152f4fcf358192d4adfd1090a248e90d54 | 9,042 | py | Python | zipline/data/bundles/sharadar.py | TheTradingAngel/zipline-trader | fdce0641005371f2b523c6faeb551c3d40273902 | [
"Apache-2.0"
] | 274 | 2020-06-08T13:45:50.000Z | 2022-03-29T02:59:11.000Z | zipline/data/bundles/sharadar.py | TheTradingAngel/zipline-trader | fdce0641005371f2b523c6faeb551c3d40273902 | [
"Apache-2.0"
] | 103 | 2020-07-24T04:32:17.000Z | 2022-03-29T06:28:10.000Z | zipline/data/bundles/sharadar.py | TheTradingAngel/zipline-trader | fdce0641005371f2b523c6faeb551c3d40273902 | [
"Apache-2.0"
] | 73 | 2020-06-03T00:34:04.000Z | 2022-03-29T16:57:47.000Z | """
Module for building a complete daily dataset from quandl sharadar's dataset.
written by https://github.com/ajjcoppola
make sure you set the QUANDL_API_KEY env variable to use this bundle
"""
from io import BytesIO
from zipfile import ZipFile
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from six.moves.urllib.parse import urlencode
from six import iteritems
from trading_calendars import register_calendar_alias
from zipline.data.bundles import core as bundles # looking in .zipline/extensions.py
import numpy as np
# Code from:
# Quantopian Zipline Issues:
# "Cannot find data bundle during ingest #2275"
# https://github.com/quantopian/zipline/issues/2275
log = Logger(__name__)
ONE_MEGABYTE = 1024 * 1024
QUANDL_DATA_URL = (
'https://www.quandl.com/api/v3/datatables/SHARADAR/SEP.csv?'
)
@bundles.register('sharadar-prices')
def sharadar_prices_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
###ticker2sid_map = {}
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
# raw_data.index = pd.DatetimeIndex(raw_data.date)
###ajjc changes
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
# read in Dividend History
# ajjc pharrin----------------------
###uv = raw_data.symbol.unique() # get unique m_tickers (Zacks primary key)
# iterate over all the unique securities and pack data, and metadata
# for writing
# counter of valid securites, this will be our primary key
###sec_counter = 0
###for tkr in uv:
### #df_tkr = raw_data[raw_data['symbol'] == tkr]
### ticker2sid_map[tkr] = sec_counter # record the sid for use later
### sec_counter += 1
### dfd = pd.read_csv(file_name, index_col='date',
### parse_dates=['date'], na_values=['NA'])
# drop rows where dividends == 0.0
raw_data = raw_data[raw_data["dividends"] != 0.0]
raw_data.set_index(['date', 'sid'], inplace=True)
# raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.date
# raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.date
raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.index.get_level_values('date')
raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.index.get_level_values('date')
# raw_data.loc[:, 'sid'] = raw_data.loc[:, 'symbol'].apply(lambda x: ticker2sid_map[x])
raw_data = raw_data.rename(columns={'dividends': 'amount'})
# raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume','symbol'], axis=1)
raw_data.reset_index(inplace=True)
raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'symbol', 'date'], axis=1)
# raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'lastupdated', 'ticker', 'closeunadj'], axis=1)
# # format dfd to have sid
adjustment_writer.write(dividends=raw_data)
# ajjc ----------------------------------
def format_metadata_url(api_key):
""" Build the query URL for Quandl Prices metadata.
"""
query_params = [('api_key', api_key), ('qopts.export', 'true')]
return (
QUANDL_DATA_URL + urlencode(query_params)
)
def load_data_table(file,
index_col,
show_progress=False):
""" Load data table from zip file provided by Quandl.
"""
with ZipFile(file) as zip_file:
file_names = zip_file.namelist()
assert len(file_names) == 1, "Expected a single file from Quandl."
wiki_prices = file_names.pop()
with zip_file.open(wiki_prices) as table_file:
if show_progress:
log.info('Parsing raw data.')
data_table = pd.read_csv(
table_file,
parse_dates=['date'],
index_col=index_col,
usecols=[
'ticker',
'date',
'open',
'high',
'low',
'close',
'volume',
'dividends',
##'closeunadj',
##'lastupdated' #prune last two columns for zipline bundle load
],
)
data_table.rename(
columns={
'ticker': 'symbol'
},
inplace=True,
copy=False,
)
return data_table
def fetch_data_table(api_key,
show_progress,
retries):
for _ in range(retries):
try:
if show_progress:
log.info('Downloading Sharadar Price metadata.')
metadata = pd.read_csv(
format_metadata_url(api_key)
)
# Extract link from metadata and download zip file.
table_url = metadata.loc[0, 'file.link']
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading Prices table from Quandl Sharadar"
)
else:
raw_file = download_without_progress(table_url)
return load_data_table(
file=raw_file,
index_col=None,
show_progress=show_progress,
)
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download Quandl data after %d attempts." % (retries)
)
def gen_asset_metadata(data, show_progress):
if show_progress:
log.info('Generating asset metadata.')
data = data.groupby(
by='symbol'
).agg(
{'date': [np.min, np.max]}
)
data.reset_index(inplace=True)
data['start_date'] = data.date.amin
data['end_date'] = data.date.amax
del data['date']
data.columns = data.columns.get_level_values(0)
data['exchange'] = 'QUANDL'
data['auto_close_date'] = data['end_date'].values + pd.Timedelta(days=1)
return data
def parse_pricing_and_vol(data,
sessions,
symbol_map):
for asset_id, symbol in iteritems(symbol_map):
asset_data = data.xs(
symbol,
level=1
).reindex(
sessions.tz_localize(None)
).fillna(0.0)
yield asset_id, asset_data
def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers['content-length'])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content)
register_calendar_alias("sharadar-prices", "NYSE")
| 30.755102 | 121 | 0.586043 | 1,063 | 9,042 | 4.767639 | 0.280339 | 0.059392 | 0.026046 | 0.022099 | 0.191397 | 0.147395 | 0.147395 | 0.133978 | 0.121547 | 0.121547 | 0 | 0.006007 | 0.300376 | 9,042 | 293 | 122 | 30.860068 | 0.795131 | 0.26023 | 0 | 0.107955 | 0 | 0 | 0.118842 | 0.003695 | 0 | 0 | 0 | 0 | 0.005682 | 1 | 0.045455 | false | 0 | 0.0625 | 0 | 0.142045 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d993e402a7b0249de1ea171b0cea3712d581afe | 840 | py | Python | footballleagues/models/league.py | RicardoSilveira23/TonicAppChallenge | 961107acbcdd93551bcd1b4b0ecd877fb4a7d813 | [
"MIT"
] | null | null | null | footballleagues/models/league.py | RicardoSilveira23/TonicAppChallenge | 961107acbcdd93551bcd1b4b0ecd877fb4a7d813 | [
"MIT"
] | null | null | null | footballleagues/models/league.py | RicardoSilveira23/TonicAppChallenge | 961107acbcdd93551bcd1b4b0ecd877fb4a7d813 | [
"MIT"
] | null | null | null | from .base import *
class League(BaseModel):
# Implicit id auto increment field created
name = models.TextField(unique=True, blank=True, null=False)
country = models.TextField(blank=True, null=False)
number_of_teams = models.IntegerField(blank=True, null=False, default=0)
most_championships = models.ForeignKey(
"Team",
on_delete=models.DO_NOTHING,
related_name="most_championships",
blank=True,
null=True,
)
most_appearances = models.ForeignKey(
"Player",
on_delete=models.DO_NOTHING,
related_name="most_appearances",
blank=True,
null=True,
)
current_champion = models.ForeignKey(
"Team",
on_delete=models.DO_NOTHING,
related_name="current_champion",
blank=True,
null=True,
)
| 28 | 76 | 0.641667 | 93 | 840 | 5.612903 | 0.430108 | 0.103448 | 0.149425 | 0.103448 | 0.287356 | 0.287356 | 0.287356 | 0.287356 | 0.206897 | 0.206897 | 0 | 0.001603 | 0.257143 | 840 | 29 | 77 | 28.965517 | 0.834936 | 0.047619 | 0 | 0.423077 | 0 | 0 | 0.080201 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d9a64aa318116c46fad272a16dfa8c713d941ef | 18,929 | py | Python | pyswitch/snmp/base/acl/ipv6acl.py | mfeed/PySwitchLib | 54e872bcbe77f2ae840d845dadb7c5b9c12482ed | [
"Apache-2.0"
] | 6 | 2017-10-02T21:02:02.000Z | 2018-07-04T13:56:55.000Z | pyswitch/snmp/base/acl/ipv6acl.py | mfeed/PySwitchLib | 54e872bcbe77f2ae840d845dadb7c5b9c12482ed | [
"Apache-2.0"
] | 23 | 2017-10-03T18:49:11.000Z | 2019-07-20T00:25:44.000Z | pyswitch/snmp/base/acl/ipv6acl.py | mfeed/PySwitchLib | 54e872bcbe77f2ae840d845dadb7c5b9c12482ed | [
"Apache-2.0"
] | 4 | 2018-02-27T05:43:37.000Z | 2019-06-30T13:30:25.000Z | """
Copyright 2017 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
from aclparam_parser import AclParamParser
class Ipv6Acl(AclParamParser):
"""
The Ipv6Acl class holds all the functions assocaiated with
IPv6 Access Control list.
Attributes:
None
"""
def parse_vlan(self, **parameters):
"""
parse the vlan param
Args:
parameters contains:
vlan_id(integer): 1-4096
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'vlan_id' not in parameters:
return None
vlan = parameters['vlan_id']
if not vlan:
return None
if vlan > 0 and vlan < 4096:
return 'vlan ' + str(vlan)
raise ValueError("The \'vlan\' value {} is invalid."
" Specify \'1-4095\' supported values")
def parse_protocol(self, **parameters):
"""
parse the protocol param
Args:
parameters contains:
protocol_type: (string) Type of IP packets to be filtered
based on protocol. Valid values are <0-255> or
key words tcp, udp, icmp or ip
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'protocol_type' not in parameters or \
not parameters['protocol_type']:
raise ValueError("\'protocol_type\' is required for MLX device")
protocol_type = parameters['protocol_type']
if protocol_type.isdigit():
if int(protocol_type) >= 0 and int(protocol_type) <= 255:
return protocol_type
if protocol_type in ['ahp', 'esp', 'icmp', 'ipv6',
'sctp', 'tcp', 'udp']:
return protocol_type
raise ValueError("The \'protocol\' value {} is invalid. Specify one "
"of these - ahp, esp, icmp, ipv6, sctp, tcp, udp "
"or a number between 0 and 255"
.format(protocol_type))
def _validate_ipv6(self, addr):
addr = ' '.join(addr.split())
try:
socket.inet_pton(socket.AF_INET6, addr)
except socket.error as se:
raise ValueError(str(se) + 'Invalid address: ' + addr)
def _validate_op_str(self, op_str):
op_str = ' '.join(op_str.split()).split()
if len(op_str) == 2:
if op_str[0] in ['neq', 'lt', 'gt', 'eq']:
return True
elif len(op_str) == 3 and op_str[0] == 'range':
return True
raise ValueError('Invalid tcp-udp-operator: ' + ' '.join(op_str))
def _parse_source_destination(self, protocol_type, input_param):
v6_str = input_param
op_str = ''
op_index = -1
for tcp_udp_op in ['range', 'neq', 'lt', 'gt', 'eq']:
op_index = input_param.find(tcp_udp_op)
if op_index >= 0:
op_str = input_param[op_index:]
v6_str = input_param[0:op_index]
break
if protocol_type not in ['tcp', 'udp'] and op_str:
raise ValueError("tcp udp operator is supported only for."
"protocol_type = tcp or udp")
if op_str:
self._validate_op_str(op_str)
if v6_str[0:3] == "any":
return v6_str + ' ' + op_str
if v6_str[0:4] == "host":
self._validate_ipv6(v6_str[5:])
return v6_str + ' ' + op_str
if '/' in v6_str:
self._validate_ipv6(v6_str.split('/')[0])
return v6_str + ' ' + op_str
ip, mask = v6_str.split()
self._validate_ipv6(ip)
self._validate_ipv6(mask)
return v6_str + ' ' + op_str
def parse_source(self, **parameters):
"""
parse the source param.
Args:
parameters contains:
source (string): Source filter, can be of below format
Len=1 or 3 X:X::X:X/M IPv6 source prefix (2:2::2:2/64)
Len=1 or 3 any Any source host (any)
Len=2 or 4 IPv6 source address (2:2::2:2 0:0::FFFF:FFFF)
Len=2 or 4 host A single source host (host 2:2::2:2)
followed by [ source-operator [ S_port-numbers ] ]
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'source' not in parameters or not parameters['source']:
raise ValueError("Missing \'source\' is parameters")
if 'protocol_type' not in parameters or \
not parameters['protocol_type']:
raise ValueError("\'protocol_type\' is required for MLX device")
src = parameters['source']
src = ' '.join(src.split())
return self._parse_source_destination(parameters['protocol_type'], src)
def parse_destination(self, **parameters):
"""
parse the destination param.
Args:
parameters contains:
destination (string): destination filter, can be of format:
X:X::X:X/M IPv6 destination prefix (2:2::2:2/64)
any Any destination host (any)
IPv6 destination address (2:2::2:2 0:0::FFFF:FFFF)
host A single destination host (host 2:2::2:2)
followed by [ destination-operator [ S_port-numbers ] ]
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'destination' not in parameters or not parameters['destination']:
raise ValueError("\'destination\' is required param")
if 'protocol_type' not in parameters or \
not parameters['protocol_type']:
raise ValueError("\'protocol_type\' is required for MLX device")
dst = parameters['destination']
dst = ' '.join(dst.split())
return self._parse_source_destination(parameters['protocol_type'], dst)
def parse_dscp_mapping(self, **parameters):
"""
parse the dscp mapping param.
Args:
parameters contains:
dscp: (string) Matches the specified value against the DSCP
value of the packet to filter.
Allowed values are 0 through 63.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'dscp' not in parameters:
return None
if not parameters['dscp']:
return None
dscp_mapping = parameters['dscp']
dscp_mapping = ' '.join(dscp_mapping.split())
if dscp_mapping.isdigit():
if int(dscp_mapping) >= 0 and int(dscp_mapping) <= 63:
return 'dscp ' + dscp_mapping
raise ValueError("Invalid dscp_mapping {}. Supported range is "
"<0-63>".format(dscp_mapping))
def parse_fragment(self, **parameters):
"""
parse the dscp mapping param.
Args:
parameters contains:
dscp: (string) Matches the specified value against the DSCP
value of the packet to filter.
Allowed values are 0 through 63.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'fragment' not in parameters:
return None
if 'protocol_type' not in parameters or \
not parameters['protocol_type']:
raise ValueError("\'protocol_type\' is required for MLX device")
if parameters['fragment']:
if parameters['protocol_type'] != 'ipv6':
raise ValueError("\'fragment\' can be set for ipv6 only.")
return 'fragments'
return None
def parse_drop_precedence(self, **parameters):
"""
parse the drop_precedence param
Args:
parameters contains:
drop_precedence( string): Matches the specified value
against the drop_precedence value of the packet to
filter. Allowed values are 0 through 3.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'drop_precedence' not in parameters:
return None
if not parameters['drop_precedence']:
return None
drop_precedence = parameters['drop_precedence']
if drop_precedence.isdigit():
if int(drop_precedence) >= 0 or int(drop_precedence) <= 3:
return 'drop-precedence ' + drop_precedence
raise ValueError("The \'drop-precedence\' value {} is invalid."
" Supported range is 0 to 3".format(drop_precedence))
def parse_drop_precedence_force(self, **parameters):
"""
parse the drop_precedence_force param
Args:
parameters contains:
drop_precedence_force( string): Matches the specified value
against the drop_precedence_force value of the packet to
filter. Allowed values are 0 through 3.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'drop_precedence_force' not in parameters:
return None
if not parameters['drop_precedence_force']:
return None
drop_precedence_force = parameters['drop_precedence_force']
if drop_precedence_force.isdigit():
if int(drop_precedence_force) >= 0 or \
int(drop_precedence_force) <= 3:
return 'drop-precedence-force ' + drop_precedence_force
raise ValueError("The \'drop-precedence-force\' value {} is invalid."
" Supported range is 0 to 3"
.format(drop_precedence_force))
def parse_dscp_marking(self, **parameters):
"""
parse the dscp mapping param.
Args:
parameters contains:
dscp: (string) Matches the specified value against the DSCP
value of the packet to filter.
Allowed values are 0 through 63.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'dscp_marking' not in parameters:
return None
if not parameters['dscp_marking']:
return None
dscp_marking = parameters['dscp_marking']
dscp_marking = ' '.join(dscp_marking.split())
if dscp_marking.isdigit():
if int(dscp_marking) >= 0 and int(dscp_marking) <= 63:
return 'dscp-marking ' + dscp_marking
raise ValueError("Invalid dscp_marking {}. Supported range is "
"<0-63>".format(dscp_marking))
def parse_priority_force(self, **parameters):
"""
parse the priority_force mapping param.
Args:
parameters contains:
priority_force(integer): set priority_forcer.
Allowed value is <0-7>.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'priority_force' not in parameters:
return None
priority_force = parameters['priority_force']
if not priority_force:
return None
if priority_force >= 0 and priority_force <= 7:
return 'priority-force ' + str(priority_force)
raise ValueError("Invalid priority_force {}. "
"Allowed value in decimal <0-7>."
.format(priority_force))
def parse_priority_mapping(self, **parameters):
"""
parse the priority_mapping mapping param.
Args:
parameters contains:
priority_mapping(integer): set priority_mappingr.
Allowed value is <0-7>.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'priority_mapping' not in parameters:
return None
priority_mapping = parameters['priority_mapping']
if not priority_mapping:
return None
if priority_mapping >= 0 and priority_mapping <= 7:
return 'priority-mapping ' + str(priority_mapping)
raise ValueError("Invalid priority_mapping {}. "
"Allowed value in decimal <0-7>."
.format(priority_mapping))
def parse_suppress_rpf_drop(self, **parameters):
"""
parse the suppress_rpf_drop mapping param.
Args:
parameters contains:
suppress_rpf_drop (boolean):Permit packets that fail RPF check
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'suppress_rpf_drop' not in parameters:
return None
suppress_rpf_drop = parameters['suppress_rpf_drop']
if not suppress_rpf_drop:
return None
return 'suppress-rpf-drop'
def parse_icmp_filter(self, **parameters):
"""
parse the icmp_type and icmp_code param
Args:
parameters contains:
icmp_filter(string): The string contains vlaues in below format
[ [icmp-type <vlaue>] [icmp-code <value> ] ] |
[ icmp-message <value> ]
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'icmp_filter' not in parameters or not parameters['icmp_filter']:
return None
if 'protocol_type' not in parameters or \
not parameters['protocol_type'] or \
parameters['protocol_type'] != 'icmp':
raise ValueError("icmp filter is supported only for."
"protocol_type = icmp")
icmp_filter = parameters['icmp_filter']
icmp_filter = ' '.join(icmp_filter.split()).split()
if icmp_filter[0].isdigit():
return self._parse_icmp_type_and_code(*icmp_filter)
else:
return self._parse_icmp_message(*icmp_filter)
def _parse_icmp_type_and_code(self, icmp_type, icmp_code=None):
"""
parse the icmp_type and icmp_code param
Args:
icmp_type(integer): Validate an ICMP type.
icmp_code(integer): Validate an ICMP code.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
ret = None
if int(icmp_type) >= 0 and int(icmp_type) <= 255:
ret = icmp_type
if icmp_code:
if int(icmp_code) >= 0 and int(icmp_code) <= 255:
ret = ret + ' ' + icmp_code
return ret
def _parse_icmp_message(self, icmp_message):
"""
parse the icmp_message param
Args:
icmp_message(string): Validate an ICMP message.
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if icmp_message in ['beyond-scope', 'destination-unreachable',
'dscp', 'echo-reply', 'echo-request',
'flow-label', 'fragments', 'header',
'hop-limit', 'mld-query', 'mld-reduction',
'mld-report', 'nd-na', 'nd-ns',
'next-header', 'no-admin', 'no-route',
'packet-too-big', 'parameter-option',
'parameter-problem', 'port-unreachable',
'reassembly-timeout', 'renum-command',
'renum-result', 'renum-seq-number',
'router-advertisement',
'router-renumbering',
'router-solicitation', 'routing',
'sequence', 'time-exceeded',
'unreachable']:
return icmp_message
raise ValueError("{} icmp message not supported."
"Refer config guide for supported messages"
.format(icmp_message))
def parse_tcp_operator(self, **parameters):
"""
parse the icmp_message param
Args:
parameters contains:
tcp_operator(string): Validate comparison operator for TCP port
This parameter works only for tcp protocol.
Allowed values are : established or syn
Returns:
Return None or parsed string on success
Raise:
Raise ValueError exception
Examples:
"""
if 'tcp_operator' in parameters and parameters['tcp_operator']:
tcp_operator = parameters['tcp_operator']
if 'protocol_type' not in parameters or \
not parameters['protocol_type'] or \
parameters['protocol_type'] != 'tcp':
raise ValueError("{} tcp operator is supported only for."
"protocol_type = tcp"
.format(tcp_operator))
if tcp_operator in ['established', 'syn', 'established syn',
'syn established']:
return tcp_operator
raise ValueError("Only supported tcp operator are: "
"established and/or syn")
return None
| 35.184015 | 79 | 0.550584 | 2,035 | 18,929 | 4.980835 | 0.134152 | 0.056235 | 0.026638 | 0.029992 | 0.463595 | 0.406373 | 0.349842 | 0.336227 | 0.316496 | 0.27427 | 0 | 0.014265 | 0.366739 | 18,929 | 537 | 80 | 35.249534 | 0.831317 | 0.30588 | 0 | 0.198276 | 0 | 0 | 0.213466 | 0.011482 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081897 | false | 0 | 0.008621 | 0 | 0.284483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d9b44c41d9bb6eed7d6e705ceea343d6da462a7 | 4,243 | py | Python | search/fields.py | zags4life/search | e2a7772564168173f681f8479aca39dbc49b854f | [
"Apache-2.0"
] | 2 | 2019-07-08T14:15:07.000Z | 2019-07-08T14:15:13.000Z | search/fields.py | zags4life/search | e2a7772564168173f681f8479aca39dbc49b854f | [
"Apache-2.0"
] | null | null | null | search/fields.py | zags4life/search | e2a7772564168173f681f8479aca39dbc49b854f | [
"Apache-2.0"
] | null | null | null | # fields.py
from datetime import date, datetime
import logging
import re
logger = logging.getLogger(__name__)
DATE_FORMATS = (
'%m-%d-%Y',
'%m-%d-%y',
'%m/%d/%Y',
'%m/%d/%y',
'%m%d%Y',
'%m%d%y',
'%m/%d',
'%m-%d',
'%m%d',
'%Y%m%d',
'%Y-%m-%d',
'%Y/%m/%d',
'%y%m%d',
)
def Date(date_str):
if not date_str:
return None
for format in DATE_FORMATS:
try:
formatted = datetime.strptime(date_str, format)
if formatted.year == 1900:
formatted = datetime(
year=datetime.now().year,
month=formatted.month,
day=formatted.day)
return formatted.date()
except ValueError:
pass
logger.error("Failed to parse date '{}'".format(date_strs))
def verify_name_matches(func):
'''Decorator which ensures the Field name and QueryField name matches.
If true, call func, else return False
'''
def wrapper(self, other):
return func(self, other) if re.search(other.name, self.name) else False
return wrapper
def convert_type(func):
'''Decorator to convert QueryField value to that of their SearchField value
counterparts type.
All QueryField values are initially strings. We need to convert them to
the same type as the field we are comparing. This ensures that proper
comparisons occur, i.e. comparing integer or decimal values.
'''
def wrapper(field, query_field):
assert (isinstance(query_field, QueryField) and
isinstance(field, SearchField)), \
'Invalid search field: {} - {}'.format(
type(field), type(query_field))
try:
with query_field:
return func(field, query_field(field.value))
except Exception as e:
logger.debug('{0}; {1}; {2}'.format(
e,
'SearchField: {}'.format(field),
'QueryField: {}'.format(query_field)
)
)
return False
return wrapper
class BaseField(object):
def __init__(self, name, value):
self.name = str(name)
self.value = value
@verify_name_matches
@convert_type
def __eq__(self, other):
return self.value == other.value
@verify_name_matches
@convert_type
def __ne__(self, other):
return self.value != other.value
@verify_name_matches
@convert_type
def __lt__(self, other):
return self.value < other.value
@verify_name_matches
@convert_type
def __le__(self, other):
return self.value <= other.value
@verify_name_matches
@convert_type
def __gt__(self, other):
return self.value > other.value
@verify_name_matches
@convert_type
def __ge__(self, other):
return self.value >= other.value
def __str__(self):
return '{0.name} = {0.value}'.format(self)
__repr__ = __str__
@verify_name_matches
def match(self, other):
return re.search(str(other.value), str(self.value))
class SearchField(BaseField):
'''Represents a searchable field'''
pass
class QueryField(BaseField):
'''Represents a query field
Unlike BaseField, QueryField implements __enter__/__exit__ to allow of
intermediate type convertion, while ensuring the original value is properly
rolled back after the operation is complete.
'''
def __init__(self, name, value):
assert isinstance(value, str), 'value must be of type str'
super(QueryField, self).__init__(name, value)
def __call__(self, value):
'''Call operator - converts the underlying string value to the
appropriate type.
'''
# If the value is a date, convert the string value to a date object.
# Else, convert the string value to the same type as the value
self.value = Date(self.value) if isinstance(value, (date, datetime)) \
else type(value)(self.value)
return self
def __enter__(self):
self.__orig_val = self.value
return self
def __exit__(self, type, value, tb):
self.value = self.__orig_val
| 27.551948 | 79 | 0.602404 | 522 | 4,243 | 4.680077 | 0.266284 | 0.051576 | 0.01228 | 0.016373 | 0.242734 | 0.19034 | 0.175604 | 0.14695 | 0.14695 | 0.14695 | 0 | 0.002994 | 0.291539 | 4,243 | 153 | 80 | 27.732026 | 0.809714 | 0.205515 | 0 | 0.219048 | 0 | 0 | 0.069228 | 0 | 0 | 0 | 0 | 0 | 0.019048 | 1 | 0.171429 | false | 0.019048 | 0.028571 | 0.085714 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d9ba38017f5f75b67b32f4cd2bf6d18d2df8bf3 | 1,102 | py | Python | src/python/pants/backend/codegen/antlr/java/targets.py | stuhood/pants | 107b8335a03482516f64aefa98aadf9f5278b2ee | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/codegen/antlr/java/targets.py | stuhood/pants | 107b8335a03482516f64aefa98aadf9f5278b2ee | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/codegen/antlr/java/targets.py | stuhood/pants | 107b8335a03482516f64aefa98aadf9f5278b2ee | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.rules.targets import COMMON_JVM_FIELDS
from pants.engine.target import Sources, StringField, Target
class JavaAntlrSources(Sources):
required = True
class AntlrCompiler(StringField):
"""The name of the compiler used to compile the ANTLR files."""
alias = "compiler"
valid_choices = ("antlr3", "antlr4")
value: str
default = "antlr3"
class AntlrPackage(StringField):
"""(antlr4 only) A string which specifies the package to be used on the dependent sources.
If unspecified, the package will be based on the path to the sources. Note that if the sources
are spread among different files, this must be set as the package cannot be inferred.
"""
alias = "package"
class JavaAntlrLibrary(Target):
"""A Java library generated from Antlr grammar files."""
alias = "java_antlr_library"
core_fields = (*COMMON_JVM_FIELDS, JavaAntlrSources, AntlrCompiler, AntlrPackage)
v1_only = True
| 29.783784 | 98 | 0.732305 | 144 | 1,102 | 5.541667 | 0.569444 | 0.037594 | 0.037594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012291 | 0.18784 | 1,102 | 36 | 99 | 30.611111 | 0.87933 | 0.459165 | 0 | 0 | 0 | 0 | 0.090106 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d9e6d91288a077c4b112d0a13f312a2fd232460 | 1,553 | py | Python | Snippets/flood_watch.py | ColinShark/Pyrogram-Snippets | 50ede9ca9206bd6d66c6877217b4a80b4f845294 | [
"WTFPL"
] | 59 | 2021-01-07T16:19:48.000Z | 2022-02-22T06:56:36.000Z | Snippets/flood_watch.py | Mrvishal2k2/Pyrogram-Snippets | d4e66876f6aff1252dfb88423fedd66e18057446 | [
"WTFPL"
] | 4 | 2019-10-14T14:02:38.000Z | 2020-11-06T11:47:03.000Z | Snippets/flood_watch.py | ColinShark/Pyrogram-Snippets | 50ede9ca9206bd6d66c6877217b4a80b4f845294 | [
"WTFPL"
] | 26 | 2021-03-02T14:31:51.000Z | 2022-03-23T21:19:14.000Z | # Watch how many messages people send and warn them about sending too many messages.
# If you have admin permissions they will be muted.
import time
from pyrogram import Client, filters
from pyrogram.types import Message
app = Client("my_account")
flooders = {}
FLOOD_MUTE_TIME = 60
GROUP_ADMINS = {}
def get_chat_admins(app: Client, message: Message) -> list:
return [
admin.user.id
for admin in app.get_chat_members(message.chat.id, filter="administrators")
]
@app.on_message(filters.group)
def flood_watcher(app, message: Message):
c_id = message.chat.id
u_id = message.from_user.id
try:
if u_id not in GROUP_ADMINS[c_id]:
try:
flooders[c_id][u_id] += 1
except KeyError:
flooders[c_id][u_id] = 1
else:
if flooders[c_id][u_id] > 4:
app.restrict_chat_member(
c_id, u_id, int(time.time() + FLOOD_MUTE_TIME)
)
message.reply_text(
"Please avoid spamming, or you might get kicked."
)
else:
time.sleep(1.5)
try:
flooders[c_id][u_id] -= 1
if flooders[c_id][u_id] == 0:
del flooders[c_id][u_id]
except KeyError as e:
print(e)
except KeyError:
GROUP_ADMINS[c_id] = get_chat_admins(app, message)
app.run()
| 28.236364 | 84 | 0.533806 | 195 | 1,553 | 4.051282 | 0.410256 | 0.037975 | 0.050633 | 0.053165 | 0.122785 | 0.105063 | 0.04557 | 0 | 0 | 0 | 0 | 0.009356 | 0.380554 | 1,553 | 54 | 85 | 28.759259 | 0.81185 | 0.084997 | 0 | 0.170732 | 0 | 0 | 0.050071 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.073171 | 0.02439 | 0.146341 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2da86bf3f600dab74fa8fe4d6a6648a7125a0446 | 1,526 | py | Python | algo_toolbox/week03/car_fueling_azka.py | azukacchi/rosalind | a8ac76f1ef400d893f188144a040b093cb2afc76 | [
"MIT"
] | null | null | null | algo_toolbox/week03/car_fueling_azka.py | azukacchi/rosalind | a8ac76f1ef400d893f188144a040b093cb2afc76 | [
"MIT"
] | null | null | null | algo_toolbox/week03/car_fueling_azka.py | azukacchi/rosalind | a8ac76f1ef400d893f188144a040b093cb2afc76 | [
"MIT"
] | null | null | null | # Problem Description
# Input Format. The first line contains an integer 𝑑. The second line contains an integer 𝑚. The third line
# specifies an integer 𝑛. Finally, the last line contains integers stop1, stop2, . . . , stop𝑛.
# Input Format. Assuming that the distance between the cities is 𝑑 miles, a car can travel at most 𝑚 miles
# on a full tank, and there are gas stations at distances stop1, stop2, . . . , stop𝑛 along the way, output the
# minimum number of refills needed. Assume that the car starts with a full tank. If it is not possible to
# reach the destination, output −1.
# Constraints. 1 ≤ 𝑑 ≤ 10^5. 1 ≤ 𝑚 ≤ 400. 1 ≤ 𝑛 ≤ 300. 0 < stop1 < stop2 < · · · < stop𝑛 < 𝑚.
text = '''950
400
4
200 375 550 750'''
# text = '''10
# 3
# 4
# 1 2 5 9'''
# text = '''200
# 250
# 2
# 100 150'''
(d,m,n) = (int(i) for i in text.strip().split('\n')[:-1])
stations = [int(i) for i in text.strip().split('\n')[-1].split()]
station_count = 0
last_station = 0
if d < m:
station_count = 0
else:
for i,station in enumerate(stations):
if station-last_station > d:
station_count = -1
break
elif i<len(stations)-1:
if stations[i+1] < last_station + m:
continue
else:
station_count += 1
d -= station
last_station = station
else:
station_count += 1
d -= station
last_station = station
print(station_count) | 31.142857 | 112 | 0.577326 | 233 | 1,526 | 3.776824 | 0.429185 | 0.081818 | 0.061364 | 0.047727 | 0.156818 | 0.156818 | 0.156818 | 0.156818 | 0.156818 | 0.059091 | 0 | 0.066858 | 0.313893 | 1,526 | 49 | 113 | 31.142857 | 0.764088 | 0.473788 | 0 | 0.407407 | 0 | 0 | 0.039242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2da9b02bbd1a49863bd40da05c5d1ef7aebec59a | 3,236 | py | Python | abcpy/methods.py | akangasr/elfi | b86f791da577339fb9c026d4c5e31f4cba2b3661 | [
"MIT"
] | null | null | null | abcpy/methods.py | akangasr/elfi | b86f791da577339fb9c026d4c5e31f4cba2b3661 | [
"MIT"
] | null | null | null | abcpy/methods.py | akangasr/elfi | b86f791da577339fb9c026d4c5e31f4cba2b3661 | [
"MIT"
] | null | null | null | """
These are sketches of how to use the ABC graphical model in the algorithms
"""
import numpy as np
class ABCMethod(object):
def __init__(self, N, distance_node=None, parameter_nodes=None, batch_size=10):
if not distance_node or not parameter_nodes:
raise ValueError("Need to give the distance node and list of parameter nodes")
self.N = N
self.distance_node = distance_node
self.parameter_nodes = parameter_nodes
self.batch_size = batch_size
def infer(self, spec, *args, **kwargs):
raise NotImplementedError
class Rejection(ABCMethod):
"""
Rejection sampler.
"""
def infer(self, threshold):
"""
Run the rejection sampler. Inference can be repeated with a different
threshold without rerunning the simulator.
"""
# only run at first call
if not hasattr(self, 'distances'):
self.distances = self.distance_node.generate(self.N, batch_size=self.batch_size).compute()
self.parameters = [p.generate(self.N, starting=0).compute()
for p in self.parameter_nodes]
accepted = self.distances < threshold
posteriors = [p[accepted] for p in self.parameters]
return posteriors
class BOLFI(ABCMethod):
def infer(self, spec, parameters=None, distance=None, threshold=None):
lik = GPLikelihoodApproximation().construct(parameters, distance)
# TODO
# - Construct PyMC model here using the lik
# - Run the MCMC
# Fixme: return the actual sample
return lik
class GPLikelihoodApproximation():
def construct(self, parameters=None, distance=None):
while not self.GP.is_finished():
values = self.acquisition.acquire()
# Map the parameter values for the nodes
values_hash = {param.name: values[:,i] for i, param in enumerate(parameters)}
distances = distance.generate(len(values), self.batch_size, with_values=values_hash).compute()
self.GP.update(parameters, distances)
return self.GP
# class SyntheticLikelihood(ABCMethod):
#
# def create_objective(self, model, parameters=None, summaries=None, **kwargs):
# """
#
# Parameters
# ----------
# model
# parameter
# array of nodes
# summaries
# array of nodes
# kwargs
#
# Returns
# -------
#
# """
#
# parameter_values = []
#
# for p in parameters:
# values = Values()
# values.replace(p, parents=False)
# parameter_values.append(values)
#
# def objective(params):
# S = np.zeros([self.N, len(summaries)])
# y = np.zeros([1, len(summaries)])
# for i, s in enumerate(summaries):
# parameter_values[i].values[0:self.N] = params[i]
# S[:, i] = s.generate(self.N)
# y[i] = s.observed
# cov = np.cov(S, rowvar=False)
# mean = np.mean(S, axis=0)
#
# lik = stats.multivariate_normal.pdf(y, mean, cov)
# return lik
#
# return objective
| 26.52459 | 106 | 0.583436 | 361 | 3,236 | 5.149584 | 0.351801 | 0.018827 | 0.020979 | 0.017214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002687 | 0.309951 | 3,236 | 121 | 107 | 26.743802 | 0.829825 | 0.426452 | 0 | 0 | 0 | 0 | 0.038133 | 0 | 0 | 0 | 0 | 0.008264 | 0 | 1 | 0.15625 | false | 0 | 0.03125 | 0 | 0.40625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dac885badec2e2be0fb4fc868ed7f544d27cde1 | 3,020 | py | Python | scxmlProcessor/FamillyManager.py | cesarcolle/ScxmlGenerator | d41ca560bb9c93f2320fedccebc0aade2ad85b53 | [
"MIT"
] | null | null | null | scxmlProcessor/FamillyManager.py | cesarcolle/ScxmlGenerator | d41ca560bb9c93f2320fedccebc0aade2ad85b53 | [
"MIT"
] | null | null | null | scxmlProcessor/FamillyManager.py | cesarcolle/ScxmlGenerator | d41ca560bb9c93f2320fedccebc0aade2ad85b53 | [
"MIT"
] | null | null | null | import os
from multiprocessing import Queue
from collections import defaultdict
from scxmlProcessor import Loader
class FamillyManager:
def __init__(self, dictData):
self.data = dictData
self.familly = dict()
self.fathers = list()
self.path = dict()
self.toTheEnd = list()
self.fatherFamilly = dict()
self.makeFamilly()
# make familly will fill dictionary to report who is a father, and for one child who is his father.
# we need it for the compounded state.
def makeFamilly(self):
# Init the list value of the dictionary key
for state in self.data:
self.familly[state] = list()
self.fatherFamilly[state] = list()
for state in self.data:
# check if he is a father
if self.data[state].state:
self.fathers += [state]
# add child
self.path[state] = self.data[state].initial
for child in self.data[state].state:
self.fatherFamilly[state] += [child.id]
self.familly[child.id] += [state]
def pathToAncestor(self, departure):
if departure:
return departure + self.pathToAncestor(self.familly[departure[0]])
else:
return list()
def nodeIntersection(self, begin, path):
if begin and not begin in path:
self.nodeIntersection(self.familly[begin[0]])
else:
return begin
def bromance(self, state1, state2):
if not (state1 and state2):
return
# state1 => check if state1's father have same father that state's 2 father
# if false => recursion with father's state
# else retourn children
# print("father familly : ", self.fatherFamilly[state1[0]])
if self.fatherFamilly[state1[0]] != self.fatherFamilly[state2[0]]:
self.bromance(self.familly[state1[0]], self.familly[state2[0]])
else:
return state1, state2
def isComponed(self, state):
return
def brothers(self, lookingBrothers):
d = defaultdict(list)
s = self.familly.items()
for k, v in s:
if v:
d[v.pop()].append(k)
# return the father of one state
def takeFather(self, key):
return self.familly[key]
def takeChild(self, key):
return self.path[key]
# return all father
def takeAllFather(self):
return self.fathers
if __name__ == "__main__":
l = Loader.Loader("../Test/ressource/test_bromance.scxml")
f = FamillyManager(l.machine.doc.stateDict)
# f.brothers(list())
print("familly : ", f.familly)
print("fathers : ", f.fathers)
print("child : ", f.path)
print("childOfFather : ", f.fatherFamilly)
print("La Bromance de a1 et b1 : ", f.bromance(["a1"], ["b1"]))
print("hh")
print("ho ho : ", f.pathToAncestor(["b1"]))
print("path to follow : ", f.nodeIntersection(["b1"], f.pathToAncestor(["a1"])))
| 31.789474 | 103 | 0.593709 | 362 | 3,020 | 4.917127 | 0.301105 | 0.055618 | 0.016854 | 0.01573 | 0.044944 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013084 | 0.291391 | 3,020 | 94 | 104 | 32.12766 | 0.818692 | 0.156954 | 0 | 0.106061 | 0 | 0 | 0.060403 | 0.014607 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.060606 | 0.060606 | 0.363636 | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dad2dd69f939e48da3c93d2a9a81830706128fe | 1,914 | py | Python | app/main/views.py | BrilliantGrant/One-Minute-Pitch | 4673e24bcd9521990350203929275454671e9619 | [
"MIT",
"Unlicense"
] | null | null | null | app/main/views.py | BrilliantGrant/One-Minute-Pitch | 4673e24bcd9521990350203929275454671e9619 | [
"MIT",
"Unlicense"
] | null | null | null | app/main/views.py | BrilliantGrant/One-Minute-Pitch | 4673e24bcd9521990350203929275454671e9619 | [
"MIT",
"Unlicense"
] | null | null | null | from . import main
from flask import render_template,request,redirect, url_for, abort
from flask_login import login_required,current_user
from ..models import Pitch,User,Comment
from .forms import PitchForm, CommentsForm
from .. import db
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
title = 'Home - Welcome to The best Pitching Website Online'
# pitches = pitch.query.filter_by(category='glee').all()
# print(pitches)
pick_up = Pitch.query.filter_by(category = 'pick_up').all()
interview = Pitch.query.filter_by(category = 'interview Pitch').all()
product = Pitch.query.filter_by(category = 'product pitch').all()
promotion = Pitch.query.filter_by(category = 'promotion pitch').all()
comment = Comment.query.all()
print(interview)
return render_template('index.html',title=title,product = product,interview =interview,pick_up = pick_up, comment = comment)
@main.route('/newpitch' ,methods = ['GET','POST'])
@login_required
def new_pitch():
form = PitchForm()
if form.validate_on_submit():
pitches = Pitch(title = form.title.data,body = form.body.data,category = form.category.data)
pitches.save_pitches()
# print('Your Pitch has been succenssfully saved!')
return redirect(url_for('main.index'))
return render_template('newpitch.html', pitch_form = form)
@main.route('/pitch/comments/new',methods = ['GET','POST'])
@login_required
def new_comment():
form = CommentsForm()
# vote_form = UpvoteForm()
if form.validate_on_submit():
new_comment = Comment(comment=form.comment.data,username=current_user.username)
new_comment.save_comment()
return redirect(url_for('main.index'))
#title = f'{pitch_result.id} review'
return render_template('new_comment.html',comment_form=form,vote_form = form)
| 33 | 128 | 0.699582 | 248 | 1,914 | 5.245968 | 0.334677 | 0.038432 | 0.061491 | 0.069178 | 0.229055 | 0.095311 | 0.05073 | 0 | 0 | 0 | 0 | 0 | 0.171369 | 1,914 | 57 | 129 | 33.578947 | 0.820303 | 0.128004 | 0 | 0.176471 | 0 | 0 | 0.122573 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.176471 | 0 | 0.411765 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dae87b369c7d5511901c84a10b786e76588b5f6 | 4,502 | py | Python | src/script_maskrcnn_v2.py | Ambistic/QuickSeg | f7d1563e00db9267cb38dca516e138fb1a99e392 | [
"Apache-2.0"
] | null | null | null | src/script_maskrcnn_v2.py | Ambistic/QuickSeg | f7d1563e00db9267cb38dca516e138fb1a99e392 | [
"Apache-2.0"
] | null | null | null | src/script_maskrcnn_v2.py | Ambistic/QuickSeg | f7d1563e00db9267cb38dca516e138fb1a99e392 | [
"Apache-2.0"
] | null | null | null | """
This script is intended to be used as mask maker
using maskrcnn. We use a separate script because mask rcnn
has different dependencies from the rest.
"""
import numpy as np
from tqdm import tqdm
import sys
import argparse
from configparser import ConfigParser
sys.path.append("../libs")
from tiff import Tiff
from pathlib import Path as P
from roimaker import segment_image, quick_segment
from maskrcnn import get_model
import skimage
import pickle
def prepro(image):
image = np.asarray(image)
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def normalize(image):
thr = np.max(image)
image = np.asarray(image) * 255. / thr
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def export(obj, output, z, coord):
dest = P(output) / f"maskrcnn_{z}_c{coord}.pck"
with open(dest, "wb") as f:
pickle.dump(obj, f)
def extract_patch(image, i, j, step, size, margin):
shape_x, shape_y = image.shape[:2]
i_step, j_step = i * step, j * step
ref_x_start, ref_y_start = -margin + i_step, -margin + j_step
ref_x_end, ref_y_end = -margin + i_step + size, -margin + j_step + size
im_x_start, im_y_start = max(0, ref_x_start), max(0, ref_y_start)
im_x_end, im_y_end = min(shape_x, ref_x_end), min(shape_y, ref_y_end)
pa_x_start, pa_y_start = im_x_start - ref_x_start, im_y_start - ref_y_start
pa_x_end, pa_y_end = size + im_x_end - ref_x_end, size + im_y_end - ref_y_end
patch = np.zeros((size, size, 3))
patch[pa_x_start:pa_x_end, pa_y_start:pa_y_end] = \
image[im_x_start:im_x_end, im_y_start:im_y_end]
return patch
def test_central_frame(mask, margin, thr=0.2):
return (mask[margin:-margin, margin:-margin].sum() / mask.sum()) > thr
def extract_rois(result, margin):
print("SHAPE", result["masks"].shape)
rois = list(np.rollaxis(result["masks"], 2))
print("LEN", len(rois))
filtered_rois = list(filter(lambda x: test_central_frame(x, margin),
rois))
if len(filtered_rois) == 0:
return np.array([])
filtered_rois = np.stack(filtered_rois, axis=-1)
print("FINALLY", filtered_rois.shape)
return filtered_rois
def cut_and_detect(args, image):
shape_x, shape_y = image.shape[:2]
# open model
model = get_model()
for i, c_i in enumerate(range(0, shape_x, args.step)):
for j, c_j in enumerate(range(0, shape_y, args.step)):
coord = f"{i}_{j}"
patch = extract_patch(image, i, j, args.step,
args.size, args.margin)
res = model.detect([patch], verbose=0)[0]
rois = extract_rois(res, args.margin)
if not len(rois):
print(coord, "is empty")
yield rois, coord
def export_config(args):
conf = ConfigParser()
conf["DEFAULT"] = dict(
image_file=str(args.file),
time=args.time,
channel=args.channel,
z=args.depth,
output=str(args.output),
name=args.name,
step=args.step,
margin=args.margin,
size=args.size,
)
with open(args.output / 'maskrcnn_v2.conf', 'w') as configfile:
conf.write(configfile)
def main(args):
# open image
img = Tiff(args.file)
img.get_imagej_metadata()
# process
img.seek_image(args.time, args.depth, args.channel)
image = prepro(img.img)
print(image.shape)
for rois, coord in cut_and_detect(args, image):
export(rois, args.output, args.name, coord)
export_config(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", type=str)
parser.add_argument("-o", "--output", default=None)
parser.add_argument("-z", "--depth", default=0, type=int)
parser.add_argument("-c", "--channel", default=0, type=int)
parser.add_argument("-t", "--time", default=0, type=int)
parser.add_argument("-s", "--size", default=224, type=int)
parser.add_argument("-m", "--margin", default=20, type=int)
args = parser.parse_args()
if args.output is None:
args.output = P(args.file).parent / P(args.file).stem
args.output.mkdir(parents=True, exist_ok=True)
args.name = "t%d_z%d_c%d" % (args.time, args.depth, args.channel)
args.step = args.size - 2 * args.margin
main(args)
| 29.233766 | 81 | 0.637494 | 686 | 4,502 | 3.992711 | 0.252187 | 0.017525 | 0.043447 | 0.023366 | 0.174516 | 0.092735 | 0.051844 | 0 | 0 | 0 | 0 | 0.010644 | 0.227899 | 4,502 | 153 | 82 | 29.424837 | 0.77733 | 0.051311 | 0 | 0.037383 | 0 | 0 | 0.042488 | 0.005869 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084112 | false | 0 | 0.102804 | 0.009346 | 0.242991 | 0.046729 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2db07ba5f160798a94b61eb6dae0a3aaeb6c6a24 | 3,894 | py | Python | tests/exercises_test.py | geraldclark227/cmsi585_hw2 | d83ceec50ddaab70591950a17ff3f30403a89f1b | [
"MIT"
] | null | null | null | tests/exercises_test.py | geraldclark227/cmsi585_hw2 | d83ceec50ddaab70591950a17ff3f30403a89f1b | [
"MIT"
] | null | null | null | tests/exercises_test.py | geraldclark227/cmsi585_hw2 | d83ceec50ddaab70591950a17ff3f30403a89f1b | [
"MIT"
] | null | null | null | import re
import math
import pytest
from exercises import stretched, powers, say, top_ten_scorers, interpret
def test_stretched():
assert stretched([]) == []
assert stretched([1]) == [1]
assert stretched([10, 20]) == [10, 20, 20]
assert stretched([5, 8, 3, 2]) == [5, 8, 8, 3, 3, 3, 2, 2, 2, 2]
def test_powers():
p = powers(2, 10)
assert next(p) == 1
assert next(p) == 2
assert next(p) == 4
assert next(p) == 8
with pytest.raises(StopIteration):
next(p)
assert list(powers(2, -5)) == []
assert list(powers(7, 0)) == []
assert list(powers(3, 1)) == [1]
assert list(powers(2, 63)) == [1, 2, 4, 8, 16, 32]
assert list(powers(2, 64)) == [1, 2, 4, 8, 16, 32, 64]
def test_say():
assert say() == ''
assert say('hi')() == 'hi'
assert say('hi')('there')() == 'hi there'
assert say('hello')('my')('name')('is')(
'Colette')() == 'hello my name is Colette'
def test_top_ten_scorers():
assert top_ten_scorers({}) == []
assert top_ten_scorers({'T1': [['A', 3, 300]]}) == []
input = {'T1': [['A', 30, 300]]}
expected = [{'name': 'A', 'ppg': 10, 'team': 'T1'}]
assert top_ten_scorers(input) == expected
input = {
'ATL': [
['Betnijah Laney', 16, 263],
['Courtney Williams', 14, 193],
],
'CHI': [
['Kahleah Copper', 17, 267],
['Allie Quigley', 17, 260],
['Courtney Vandersloot', 17, 225],
],
'CONN': [
['DeWanna Bonner', 16, 285],
['Alyssa Thomas', 16, 241],
],
'DAL': [
['Arike Ogunbowale', 16, 352],
['Satou Sabally', 12, 153],
],
'IND': [
['Kelsey Mitchell', 16, 280],
['Tiffany Mitchell', 13, 172],
['Candice Dupree', 16, 202],
],
'LA': [
['Nneka Ogwumike', 14, 172],
['Chelsea Gray', 16, 224],
['Candace Parker', 16, 211],
],
'LV': [
['A’ja Wilson', 15, 304],
['Dearica Hamby', 15, 188],
['Angel McCoughtry', 15, 220],
],
'MIN': [
['Napheesa Collier', 16, 262],
['Crystal Dangerfield', 16, 254],
],
'NY': [
['Layshia Clarendon', 15, 188]
],
'PHX': [
['Diana Taurasi', 13, 236],
['Brittney Griner', 12, 212],
['Skylar Diggins-Smith', 16, 261],
['Bria Hartley', 13, 190],
],
'SEA': [
['Breanna Stewart', 16, 317],
['Jewell Loyd', 16, 223],
],
'WSH': [
['Emma Meesseman', 13, 158],
['Ariel Atkins', 15, 212],
['Myisha Hines-Allen', 15, 236],
],
}
expected = [
{'name': 'Arike Ogunbowale', 'ppg': 22, 'team': 'DAL'},
{'name': 'A’ja Wilson', 'ppg': 20.266666666666666, 'team': 'LV'},
{'name': 'Breanna Stewart', 'ppg': 19.8125, 'team': 'SEA'},
{'name': 'DeWanna Bonner', 'ppg': 17.8125, 'team': 'CONN'},
{'name': 'Kelsey Mitchell', 'ppg': 17.5, 'team': 'IND'},
{'name': 'Betnijah Laney', 'ppg': 16.4375, 'team': 'ATL'},
{'name': 'Napheesa Collier', 'ppg': 16.375, 'team': 'MIN'},
{'name': 'Skylar Diggins-Smith', 'ppg': 16.3125, 'team': 'PHX'},
{'name': 'Crystal Dangerfield', 'ppg': 15.875, 'team': 'MIN'},
{'name': 'Myisha Hines-Allen', 'ppg': 15.733333333333333, 'team': 'WSH'}]
assert top_ten_scorers(input) == expected
def test_interpret():
assert [*interpret('1')] == []
assert [*interpret('3 8 7 + PRINT 10 SWAP - PRINT')] == [15, -7]
assert [*interpret('99 DUP * PRINT')] == [9801]
with pytest.raises(ValueError):
[*interpret('2 TIMES SWAP -')]
with pytest.raises(ValueError):
[*interpret('DUP')] | 32.45 | 81 | 0.472008 | 440 | 3,894 | 4.138636 | 0.386364 | 0.019769 | 0.042834 | 0.041735 | 0.132345 | 0.071939 | 0.028007 | 0 | 0 | 0 | 0 | 0.119442 | 0.318439 | 3,894 | 120 | 82 | 32.45 | 0.566692 | 0 | 0 | 0.145455 | 0 | 0 | 0.23543 | 0 | 0 | 0 | 0 | 0 | 0.218182 | 1 | 0.045455 | false | 0 | 0.036364 | 0 | 0.081818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2db09571e65b6c68ba6da57fe57eb4a66e42b4d3 | 2,199 | py | Python | hints.py | dragonc0/LADXR | 31072400e3f01fdd2449971a8a53d39d6e349abe | [
"MIT"
] | null | null | null | hints.py | dragonc0/LADXR | 31072400e3f01fdd2449971a8a53d39d6e349abe | [
"MIT"
] | null | null | null | hints.py | dragonc0/LADXR | 31072400e3f01fdd2449971a8a53d39d6e349abe | [
"MIT"
] | null | null | null | from locations.items import *
from locations.constants import INVENTORY_NAME
from utils import formatText
hint_text_ids = [
# Overworld owl statues
0x1B6, 0x1B7, 0x1B8, 0x1B9, 0x1BA, 0x1BB, 0x1BC, 0x1BD, 0x1BE, 0x22D,
0x288, 0x280, # D1
0x28A, 0x289, 0x281, # D2
0x282, 0x28C, 0x28B, # D3
0x283, # D4
0x28D, 0x284, # D5
0x285, 0x28F, 0x28E, # D6
0x291, 0x290, 0x286, # D7
0x293, 0x287, 0x292, # D8
0x263, # D0
# Hint books
0x267, # color dungeon
0x201, # Pre open: 0x200
0x203, # Pre open: 0x202
0x205, # Pre open: 0x204
0x207, # Pre open: 0x206
0x209, # Pre open: 0x208
0x20B, # Pre open: 0x20A
]
hint_items = (POWER_BRACELET, SHIELD, BOW, HOOKSHOT, MAGIC_ROD, PEGASUS_BOOTS, OCARINA, FEATHER, SHOVEL,
MAGIC_POWDER, SWORD, FLIPPERS, TAIL_KEY, ANGLER_KEY, FACE_KEY,
BIRD_KEY, SLIME_KEY, GOLD_LEAF, BOOMERANG, BOWWOW)
hints = [
"{0} is at {1}",
"If you want {0} start looking in {1}",
"{1} holds {0}",
"They say that {0} is at {1}",
"You might to look in {1} for a secret",
]
useless_hint = [
("Egg", "Mt. Tamaranch"),
("Marin", "Mabe Village"),
("Marin", "Mabe Village"),
("Witch", "Koholint Prairie"),
("Mermaid", "Martha's Bay"),
("Nothing", "Tabahl Wasteland"),
("Animals", "Animal Village"),
("Sand", "Yarna Desert"),
]
def addHints(rom, rnd, spots):
spots = list(sorted(filter(lambda spot: spot.item in hint_items, spots), key=lambda spot: spot.nameId))
text_ids = hint_text_ids.copy()
rnd.shuffle(text_ids)
for text_id in text_ids:
if len(spots) > 0:
spot_index = rnd.randint(0, len(spots) - 1)
spot = spots.pop(spot_index)
hint = rnd.choice(hints).format(INVENTORY_NAME[spot.item].decode("ascii"), spot.metadata.area)
else:
hint = rnd.choice(hints).format(*rnd.choice(useless_hint))
rom.texts[text_id] = formatText(hint.encode("ascii"))
for text_id in range(0x200, 0x20C, 2):
rom.texts[text_id] = formatText(b"Read this book?", ask=b"YES NO")
| 32.338235 | 108 | 0.592087 | 288 | 2,199 | 4.420139 | 0.621528 | 0.032993 | 0.017282 | 0.009427 | 0.075412 | 0 | 0 | 0 | 0 | 0 | 0 | 0.113821 | 0.272851 | 2,199 | 67 | 109 | 32.820896 | 0.682301 | 0.076853 | 0 | 0.035714 | 0 | 0 | 0.158436 | 0 | 0 | 0 | 0.102881 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.053571 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2db3e00b94364b270b6993b91f53aca9d4277966 | 49,756 | py | Python | Python/self_py_fun/ConvolFun.py | NiubilityDiu/BayesInferenceEEGBCI | 05710898ce0730343e1929c03e5213cda11c16af | [
"MIT"
] | null | null | null | Python/self_py_fun/ConvolFun.py | NiubilityDiu/BayesInferenceEEGBCI | 05710898ce0730343e1929c03e5213cda11c16af | [
"MIT"
] | null | null | null | Python/self_py_fun/ConvolFun.py | NiubilityDiu/BayesInferenceEEGBCI | 05710898ce0730343e1929c03e5213cda11c16af | [
"MIT"
] | null | null | null | import tensorflow as tf
import tensorflow_probability as tfp
# from tensorflow_probability import edward2 as ed2
tfd = tfp.distributions
tfb = tfp.bijectors
tfp_kernels = tfp.positive_semidefinite_kernels
import sys
sys.path.insert(0, './self_py_fun')
from self_py_fun.PreFun import *
# import scipy as sp
import seaborn as sns
# from scipy.spatial.distance import pdist, squareform
plt.style.use('ggplot')
sns.set_context('notebook')
# https://wookayin.github.io/tensorflow-talk-debugging/#29
# https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/TensorFlow_Probability_Case_Study_Covariance_Estimation.ipynb#scrollTo=znG_AtTR7qob
# Multivariate normal parametrized by loc and Cholesky precision matrix.
class MVNPrecisionCholesky(tfd.TransformedDistribution):
def __init__(self, loc, precision_cholesky, re_batch_ndims, name=None):
super(MVNPrecisionCholesky, self).__init__(
distribution=tfd.Independent(
tfd.Normal(loc=tf.zeros_like(loc, dtype='float32'),
scale=tf.ones_like(loc, dtype='float32')),
reinterpreted_batch_ndims=re_batch_ndims),
bijector=tfb.Chain([
tfb.Affine(shift=loc),
tfb.Invert(tfb.Affine(scale_tril=precision_cholesky,
adjoint=True)),
]), name=name)
class PriorModel:
num_rep = 12
DAT_TYPE = 'float32'
eps_value = tf.keras.backend.epsilon()
def __init__(self, n_length, num_electrode):
self.n_length = n_length
self.num_electrode = num_electrode
@staticmethod
def generate_delta(mean_vec, hyper_delta_var, convert_to_numpy=False):
delta_rv = tfd.MultivariateNormalDiag(
loc=mean_vec,
scale_identity_multiplier=hyper_delta_var)
delta_sample = tf.squeeze(delta_rv.sample(1), axis=0)
if convert_to_numpy:
delta_sample = tf.keras.backend.eval(delta_sample)
return delta_sample
@staticmethod
def compute_delta_log_lhd(mean_vec, hyper_delta_var, target_delta_value):
delta_rv = tfd.MultivariateNormalDiag(
loc=mean_vec,
scale_identity_multiplier=hyper_delta_var
)
return tf.reduce_sum(delta_rv.log_prob(target_delta_value))
def generate_pres_chky_matrix(self, df, channel_dim=1, convert_to_numpy=False):
pres_chky_rv = tfd.Wishart(
df=df,
scale=tf.eye(self.n_length, self.n_length, [channel_dim]),
input_output_cholesky=True,
validate_args=True
)
pres_chky_sample = tf.squeeze(pres_chky_rv.sample(1), axis=0)
if convert_to_numpy:
pres_chky_sample = tf.keras.backend.eval(pres_chky_sample)
return pres_chky_sample
def compute_pres_chky_log_lhd(self, df, pres_chky_matrix_value, channel_dim=1):
pres_chky_rv = tfd.Wishart(
df=df,
scale=tf.eye(self.n_length, self.n_length, [channel_dim]),
input_output_cholesky=True)
return tf.reduce_sum(pres_chky_rv.log_prob(pres_chky_matrix_value))
# https://www.math.wustl.edu/~sawyer/hmhandouts/Wishart.pdf
# Potential reference:
# https://amstat.tandfonline.com/doi/abs/10.1080/01621459.1966.10502018#.Xc12Cy2ZNgc
def generate_pres_chky_2(self, sigma_r_sq, channel_dim=1, convert_to_numpy=False):
uni_normal_rv = tfd.MultivariateNormalDiag(
loc=tf.zeros([channel_dim, self.n_length*(self.n_length+1)/2]),
scale_identity_multiplier=sigma_r_sq
)
# Need to modify the main-diagonal term with another chi-square random variable w/
# df n-i+1.
# Convert multi-1d array to an upper-triangular matrix (with channel_dim batch)
df_vec = tf.range(self.n_length, dtype=self.DAT_TYPE)+1
df_vec = df_vec[::-1]
df_vec = tf.tile(df_vec[tf.newaxis, :], [channel_dim, 1])
diag_chisq_rv = tfd.Chi2(df=df_vec)
uni_normal_sample = tf.squeeze(uni_normal_rv.sample(1), axis=0)
chisq_sample = tf.squeeze(diag_chisq_rv.sample(1), axis=0)
if convert_to_numpy:
uni_normal_sample = tf.keras.backend.eval(uni_normal_sample)
chisq_sample = tf.keras.backend.eval(chisq_sample)
return uni_normal_sample, chisq_sample
@staticmethod
def convert_1d_array_to_upper_triangular(uni_normal_sample, chisq_sample,
convert_to_numpy=False):
uni_normal_sample = tfd.fill_triangular(uni_normal_sample, upper=True)
uni_normal_sample = tf.linalg.set_diag(uni_normal_sample, tf.sqrt(chisq_sample))
if convert_to_numpy:
uni_normal_sample = tf.keras.backend.eval(uni_normal_sample)
return uni_normal_sample
@staticmethod
def compute_pres_upper_tri(cov_matrix, convert_to_numpy=False):
pres_matrix = tf.linalg.inv(cov_matrix)
upper_tri_sample = tf.transpose(tf.linalg.cholesky(pres_matrix), [0, 2, 1])
if convert_to_numpy:
upper_tri_sample = tf.keras.backend.eval(upper_tri_sample)
return upper_tri_sample
@staticmethod
def convert_upper_triangular_to_1d_array(upper_tri_sample, convert_to_numpy=False):
upper_tri_sample = tfd.fill_triangular_inverse(upper_tri_sample, upper=True)
if convert_to_numpy:
upper_tri_sample = tf.keras.backend.eval(upper_tri_sample)
return upper_tri_sample
def compute_pres_chky_log_lhd_2(self, sigma_r_sq, uni_normal_sample, chisq_sample,
channel_dim=1):
# Notice that uni_normal_sample is long_array
uni_normal_rv = tfd.MultivariateNormalDiag(
loc=tf.zeros([channel_dim, self.n_length*(self.n_length+1)/2]),
scale_identity_multiplier=sigma_r_sq
)
normal_log_lhd = tf.reduce_sum(uni_normal_rv.log_prob(uni_normal_sample))
df_vec = tf.range(self.n_length, dtype=self.DAT_TYPE) + 1
df_vec = df_vec[::-1]
df_vec = tf.tile(df_vec[tf.newaxis, :], [channel_dim, 1])
diag_chisq_rv = tfd.Chi2(df=df_vec)
chisq_log_lhd = tf.reduce_sum(diag_chisq_rv.log_prob(chisq_sample))
return normal_log_lhd + chisq_log_lhd
def generate_eta_and_compute_log_lhd(
self, pres_chky, letter_dim, rep_dim, flash_num,
convert_to_numpy=False, channel_dim=1):
std_normal_rv = tfd.MultivariateNormalDiag(
loc=tf.zeros([channel_dim, self.n_length]),
scale_identity_multiplier=tf.ones([]))
dim_temp = letter_dim * rep_dim * flash_num
eta = std_normal_rv.sample([dim_temp])
log_lhd_xi = tf.reduce_sum(std_normal_rv.log_prob(eta))
log_abs_diag_value = tf.reduce_sum(tf.linalg.logdet(pres_chky))
# log_abs_diag_value = tf.reduce_sum(tf.math.log(self.eps_value + tf.abs(tf.linalg.diag_part(pres_chky))))
log_lhd_pres_chky = dim_temp * log_abs_diag_value
# Solve eta from pres_chky_1 * eta = normal_vector
eta = tf.transpose(eta, [1, 0, 2])[..., tf.newaxis]
# eta should have shape (channel_dim, ..., n_length, 1)
# pres_chky should have shape (channel_dim, n_length, n_length)
# print('eta has shape {}'.format(eta.shape))
# print('pres_chky has shape {}'.format(pres_chky.shape))
def _solve_eta_per_channel(elems):
pres_chky_chan = elems[0]
eta_chan = elems[1]
pres_chky_chan = tf.tile(pres_chky_chan[tf.newaxis, ...], [dim_temp, 1, 1])
eta_chan = tf.linalg.triangular_solve(pres_chky_chan, eta_chan, lower=False)
return eta_chan
elems = (pres_chky, eta)
eta_solve = tf.map_fn(_solve_eta_per_channel, elems, dtype=self.DAT_TYPE)
eta_solve = tf.transpose(tf.squeeze(eta_solve, axis=-1), [1, 0, 2])
if convert_to_numpy:
eta_solve = tf.keras.backend.eval(eta_solve)
return eta_solve, log_lhd_xi+log_lhd_pres_chky
@staticmethod
def generate_pres_e(alpha, beta, convert_to_numpy=False, channel_dim=1):
pres_e_rv = tfd.Gamma(
concentration=alpha*tf.ones([channel_dim]),
rate=beta*tf.ones([channel_dim])
)
pres_e = tf.squeeze(pres_e_rv.sample(1), axis=0)
if convert_to_numpy:
pres_e = tf.keras.backend.eval(pres_e)
return pres_e
@staticmethod
def compute_pres_e_log_lhd(alpha, beta, pres_e, channel_dim=1):
pres_e_rv = tfd.Gamma(
concentration=alpha*tf.ones([channel_dim]),
rate=beta*tf.ones([channel_dim])
)
return tf.reduce_sum(pres_e_rv.log_prob(pres_e))
class ReArrangeBetaSigma:
# Global constants:
num_rep = 12
f_sum = 2
nf_sum = num_rep - f_sum
DAT_TYPE = 'float32'
def __init__(self, n_multiple, num_electrode, flash_and_pause_length):
self.n_multiple = n_multiple
self.num_electrode = num_electrode
self.flash_and_pause_length = flash_and_pause_length
self.n_length = int(n_multiple * flash_and_pause_length)
def tile_and_combine_delta(self, letter_dim, repet_dim, delta_1, delta_0, channel_dim=1):
assert delta_1.shape == (channel_dim, self.n_length), \
print('delta_1 has wrong input shape!')
assert delta_0.shape == (channel_dim, self.n_length), \
print('delta_0 has wrong input shape!')
delta_1 = tf.convert_to_tensor(delta_1)[tf.newaxis, ...]
delta_0 = tf.convert_to_tensor(delta_0)[tf.newaxis, ...]
dim_1 = letter_dim * repet_dim * self.f_sum
dim_0 = letter_dim * repet_dim * self.nf_sum
delta_1 = tf.tile(delta_1, multiples=[dim_1, 1, 1])
delta_1 = tf.reshape(tf.transpose(delta_1, perm=[1, 0, 2]),
shape=[channel_dim, dim_1, self.n_length])
delta_0 = tf.tile(delta_0, multiples=[dim_0, 1, 1])
delta_0 = tf.reshape(tf.transpose(delta_0, perm=[1, 0, 2]),
shape=[channel_dim, dim_0, self.n_length])
delta_combined = tf.concat([delta_1, delta_0], axis=1)
return delta_combined
# Need to absorb this numpy function within tensorflow graph (especially for prediction)
def create_permute_beta_id(self, letter_dim, repet_dim, eeg_type):
dim_temp = letter_dim * repet_dim * self.num_rep
assert eeg_type.shape == (dim_temp,), print('eeg_type has wrong input shape!')
id_beta = np.zeros([dim_temp]) - 1
id_beta[eeg_type == 1] = np.arange(self.f_sum*letter_dim*repet_dim)
id_beta[eeg_type != 1] = np.arange(self.f_sum*letter_dim*repet_dim, dim_temp)
return id_beta.astype('int32')
# This function requires further editing in terms of the dimension rearrangement.
def permute_beta_by_type(self, letter_dim, repet_dim,
beta_combined, id_beta, channel_dim=1):
dim_temp = letter_dim * repet_dim * self.num_rep
assert beta_combined.shape == (dim_temp, channel_dim, self.n_length), \
print('beta_combined has wrong input shape!')
# 2280, 16, 25
# beta_combined = tf.reshape(beta_combined, [channel_dim,
# dim_temp,
# self.n_length])
beta_combined = tf.gather(beta_combined, id_beta,
axis=0, name='beta_permuted')
beta_combined = tf.reshape(beta_combined, [letter_dim,
repet_dim*self.num_rep,
channel_dim,
self.n_length])
beta_combined = tf.transpose(beta_combined, [0, 2, 1, 3])
# print('beta_combined has shape {}'.format(beta_combined.shape))
beta_combined = tf.reshape(beta_combined, [letter_dim,
channel_dim,
repet_dim*self.num_rep*self.n_length,
1])
return beta_combined
# The following functions are based on Bayesian generative model
# which may apply tensorflow and tensorflow-probability.
# Notice that the design matrix can be automatically broadcast
# w.r.t channel batch and letter batch.
def create_design_mat_gen_bayes_seq(self, repetition_dim):
r"""
:param repetition_dim: integer
:return: design_x, with output shape
[(num_rep*num_repetition+n_multiple-1)*flash_and_pause_length, num_rep*num_repetition*n_length]
"""
# Create a zero matrix
dm_row = (repetition_dim*self.num_rep + self.n_multiple - 1) * self.flash_and_pause_length
dm_col = repetition_dim*self.num_rep*self.n_length
dm = np.zeros([dm_row, dm_col])
id_block = np.eye(self.n_length)
for trial_id in range(repetition_dim*self.num_rep):
row_id_low = trial_id * self.flash_and_pause_length
row_id_upp = row_id_low + self.n_length
col_id_low = trial_id * self.n_length
col_id_upp = col_id_low + self.n_length
dm[row_id_low:row_id_upp, col_id_low:col_id_upp] = id_block
return dm
def create_joint_beta_tilta(
self, letter_dim, repet_dim, beta_combined, id_beta, design_x, channel_dim=1
):
r"""
:param letter_dim: integer
:param repet_dim: integer
:param beta_combined: array_like, (channe_dim, letter_dim, noise_size*n_length, 1)
:param id_beta: array_like, or None
:param design_x: array_like, (channel_dim, letter_dim, seq_length, noise_size*n_length)
:param channel_dim: array_like
:return:
For design_x, we can ignore the outer-2 batch as long as the rightmost 2 dimensions are correct.
For id_beta, if none, it implies that the beta_combined has already been permuted.
"""
if id_beta is not None:
beta_combined = self.permute_beta_by_type(
letter_dim, repet_dim, beta_combined, id_beta, channel_dim)
seq_x = design_x @ beta_combined
return seq_x
@staticmethod
def convert_from_pres_to_cov(pres_chky_mat, convert_to_numpy=False):
pres_chky_mat_inv = tf.linalg.inv(pres_chky_mat)
cov_mat = tf.matmul(pres_chky_mat_inv, tf.transpose(pres_chky_mat_inv, [0, 2, 1]))
if convert_to_numpy:
cov_mat = tf.keras.backend.eval(cov_mat)
return cov_mat
@staticmethod
def convert_from_cov_to_pres(cov_mat, convert_to_numpy=False):
prec_chky_mat = tf.linalg.cholesky(tf.linalg.inv(cov_mat))
if convert_to_numpy:
prec_chky_mat = tf.keras.backend.eval(prec_chky_mat)
return prec_chky_mat
def create_trial_specific_cov_fn(self, trial_i, unit_pres_mat_value, rep_dim, channel_dim=1):
assert unit_pres_mat_value.shape == (channel_dim, self.n_length, self.n_length)
unit_cov_fn_value = self.convert_from_pres_to_cov(unit_pres_mat_value)
large_cov_size = (self.num_rep * rep_dim + self.n_multiple - 1) * self.flash_and_pause_length
upp_left = trial_i*self.flash_and_pause_length
low_right = large_cov_size - trial_i*self.flash_and_pause_length - self.n_length
paddings = tf.constant([[0, 0], [upp_left, low_right], [upp_left, low_right]])
large_cov_value = tf.pad(unit_cov_fn_value, paddings, "constant")
return large_cov_value
def create_collapsed_cov_fn(self, letter_dim, rep_dim, unit_target_pres_value,
unit_nt_pres_value, eeg_type, channel_dim=1):
trial_sum = self.num_rep * rep_dim
trn_total_seq = (trial_sum+self.n_multiple-1) * self.flash_and_pause_length
eeg_type = np.reshape(eeg_type, [letter_dim, trial_sum])
collapsed_cov_fn = tf.zeros([channel_dim, trn_total_seq, trn_total_seq])
for letter_id in range(letter_dim):
collapsed_cov_fn_letter = tf.zeros([channel_dim, trn_total_seq, trn_total_seq])
for trial_i in range(trial_sum):
if eeg_type[letter_id, trial_i] == 1:
collapsed_cov_fn_letter += self.create_trial_specific_cov_fn(
trial_i, unit_target_pres_value, rep_dim)
else:
collapsed_cov_fn_letter += self.create_trial_specific_cov_fn(
trial_i, unit_nt_pres_value, rep_dim)
collapsed_cov_fn = tf.concat([collapsed_cov_fn, collapsed_cov_fn_letter], axis=0)
collapsed_cov_fn = tf.reshape(collapsed_cov_fn[channel_dim:, ...], [letter_dim, self.num_electrode, trn_total_seq, trn_total_seq])
# should have shape (19, 16, 920, 920)
return collapsed_cov_fn
class MVNJointModel:
def __init__(self, n_length, num_electrode, re_batch_ndims=2):
self.n_length = n_length
self.num_electrode = num_electrode
self.re_batch_ndims = re_batch_ndims
def generate_pseudo_signals(self, y_tilta, prec_chky_tilta=None,
cov_tilta=None, sample_batch=1,
convert_to_numpy=False, channel_dim=1):
assert prec_chky_tilta is not None or cov_tilta is not None, \
print('Missing covariance component.')
if prec_chky_tilta is None:
prec_chky_tilta = tf.linalg.cholesky(tf.linalg.inv(cov_tilta))
jitter = 0.1 * tf.eye(self.n_length, self.n_length, [channel_dim])
mvn_rv = MVNPrecisionCholesky(
loc=y_tilta,
precision_cholesky=prec_chky_tilta+jitter,
re_batch_ndims=self.re_batch_ndims)
pseudo_signal = mvn_rv.sample(sample_batch)
if sample_batch == 1:
pseudo_signal = tf.squeeze(pseudo_signal, axis=0)
if convert_to_numpy:
pseudo_signal = tf.keras.backend.eval(pseudo_signal)
return pseudo_signal
def compute_log_lhd(self, y_tilta, y_value,
prec_chky_tilta=None, cov_tilta=None,
sum_over_letter=True):
assert prec_chky_tilta is not None or cov_tilta is not None, \
print('Missing covariance component.')
if prec_chky_tilta is None:
prec_chky_tilta = tf.linalg.cholesky(tf.linalg.inv(cov_tilta))
mvn_rv = MVNPrecisionCholesky(
loc=y_tilta,
precision_cholesky=prec_chky_tilta,
re_batch_ndims=self.re_batch_ndims)
mvn_log_prob = mvn_rv.log_prob(y_value)
if sum_over_letter:
mvn_log_prob = tf.reduce_sum(mvn_log_prob, axis=0)
return mvn_log_prob
class WLSOpt(EEGPreFun):
# class-level global constant
def __init__(self, *args, **kwargs):
super(WLSOpt, self).__init__(*args, **kwargs)
# Create the bijector for chky matrices
self.unconstrained_to_precison_chky = tfb.Chain([
# Step 2: Exponentiate the diagonals
tfb.TransformDiagonal(tfb.Exp(validate_args=self.VALIDATE_ARGS)),
# Step 1: Expand the vector to a lower triangular matrix
tfb.FillTriangular(validate_args=self.VALIDATE_ARGS),
])
self.prior = PriorModel(
n_length=self.n_length, num_electrode=self.num_electrode)
self.rearrange = ReArrangeBetaSigma(
n_multiple=self.n_multiple,
num_electrode=self.num_electrode,
flash_and_pause_length=self.flash_and_pause_length)
@staticmethod
def session_options(enable_gpu_ram_resizing=False):
"""Convenience function which sets a common 'tf.Session' options."""
config = tf.ConfigProto()
if enable_gpu_ram_resizing:
config.gpu_options.allow_growth = True
return config
def reset_sess(self, config=None):
# Convenience function to create TF graph and session or reset them.
if config is None:
config = self.session_options()
tf.reset_default_graph()
global sess
# noinspection PyBroadException
try:
sess.close()
except:
pass
sess = tf.InteractiveSession(config=config)
def print_test_info(self, test_repetition):
print('This is subject {}.'.format(self.sub_folder_name))
print('We are predicting {} repetitions for testing purpose.'.format(test_repetition))
# Import datafiles with WLS specific requirement:
def import_eeg_processed_dat_wls(self, file_subscript,
letter_dim=None, trn_repetition=None,
reshape_to_1d=True):
[eeg_signals, eeg_code, eeg_type] = \
self.import_eeg_processed_dat(file_subscript, reshape_1d_bool=False)
shape1, shape2, _ = eeg_type.shape
if letter_dim is not None:
assert letter_dim <= shape1, 'Incorrect letter dimension, ' \
'should not be greater than {}.'.format(shape1)
else:
letter_dim = shape1
if trn_repetition is not None:
assert trn_repetition <= shape2, 'Incorrect repetition dimension, ' \
'should not be greater than {}.'.format(shape2)
else:
trn_repetition = shape2
# eeg_signals = eeg_signals / 10
eeg_signals_trun, _ = self.create_truncate_segment_batch(
eeg_signals, eeg_type, letter_dim, trn_repetition)
trn_total_seq_length = (trn_repetition*self.num_rep+self.n_multiple-1)*self.flash_and_pause_length
eeg_signals = np.transpose(eeg_signals[:letter_dim, :trn_total_seq_length, :],
[0, 2, 1])
if reshape_to_1d:
eeg_code = np.reshape(eeg_code[:letter_dim, :trn_repetition, :],
[letter_dim*trn_repetition*self.num_rep])
eeg_type = np.reshape(eeg_type[:letter_dim, :trn_repetition, :],
[letter_dim*trn_repetition*self.num_rep])
return [eeg_signals.astype(self.DAT_TYPE),
eeg_signals_trun.astype(self.DAT_TYPE),
eeg_code.astype(self.DAT_TYPE),
eeg_type.astype(self.DAT_TYPE)]
# Construct design matrix X (letter-specific, intercept excluded)
def construct_design_matrix_per_letter(
self, total_seq_length, eeg_code, target_row_col):
# Assume no letter effect, nor row/column effect
params_type_num = 2
z = np.zeros([params_type_num, self.n_length], dtype=np.int32)
z[0, :] = np.arange(1, self.n_length+1) # Non-target
z[1, :] = np.arange(self.n_length+1, 2*self.n_length+1) # Target
# print('can multiple assignment')
total_seq_num = int((total_seq_length - self.n_length) / self.flash_and_pause_length + 1)
bool_index = np.in1d(eeg_code, target_row_col) * 1
design_x = np.zeros([total_seq_num, total_seq_length], dtype=np.int32)
for i in range(total_seq_num):
low_num = self.flash_and_pause_length * i
upp_num = self.flash_and_pause_length * i + self.n_length
design_x[i, low_num:upp_num] = z[bool_index[i], :]
design_x0 = np.zeros([total_seq_length, self.n_length*2])
# print('design_x0 done!')
for i in range(total_seq_length):
for j in range(total_seq_num):
if design_x[j, i] > 0:
design_x0[i, design_x[j, i]-1] = 1
return design_x0.astype(self.DAT_TYPE)
def create_penalty_fn(self):
# Create the second-order diff matrix object
# as well as smoothing around zero matrix
P1 = np.eye(N=self.n_length, M=self.n_length, dtype=np.int32)
P1 = P1[1:, :] - P1[:-1, :]
P1 = P1[1:, :] - P1[:-1, :]
P1 = np.matmul(P1.T, P1)
P_smooth = np.eye(N=self.n_length * 2, M=self.n_length * 2, dtype=np.int32)
P_smooth[:self.n_length, :self.n_length] = np.copy(P1)
P_smooth[self.n_length:, self.n_length:] = np.copy(P1)
P_zero = np.zeros([self.n_length * 2, self.n_length * 2], dtype=np.int32)
P_zero[self.n_length:, :self.n_length:] = np.copy(np.eye(N=self.n_length, M=self.n_length,
dtype=self.DAT_TYPE))
return [P_smooth, P_zero]
def from_weights_to_beta(self, design_x0, eeg_signals, l_cholesky_inv,
lambda_s, lambda_0, P_smooth, P_zero):
# X^t W X = (L^-1X)^t (L^-1X)
l_cholesky_inv = np.tile(l_cholesky_inv[:, np.newaxis, :, :],
reps=[1, self.num_letter, 1, 1])
l_inv_X = np.matmul(l_cholesky_inv, design_x0)
l_inv_X_t = np.transpose(l_inv_X, axes=(0, 1, 3, 2))
XtWX = np.sum(np.matmul(l_inv_X_t, l_inv_X), axis=1)
# X^t W Y = (L^-1X)^t (L^-1Y)
XtWY = np.sum(np.matmul(l_inv_X_t, np.matmul(l_cholesky_inv, eeg_signals)), axis=1)
# Use cholesky decomposition to solve beta_mle
# XtWX beta = XtWY
# Step 1: Obtain Cholesky decomposition of XtWX with the penalty term
l_XWX = np.linalg.cholesky(XtWX + lambda_s * P_smooth + lambda_0 * P_zero)
l_XWX_inv = np.linalg.inv(l_XWX)
# l_XWX @ l_XWX^t beta = XtWY
# Step 2: Solve l_XWX theta = XtWY
theta = np.matmul(l_XWX_inv, XtWY)
# Step 3: Solve l_XWX^t beta = theta
beta_mle = np.matmul(np.transpose(l_XWX_inv, axes=(0, 2, 1)), theta)
return beta_mle
# MLE iterations for WLS algorithm
def from_beta_to_weights(self, design_x0, eeg_signals, beta_mle, jitter, trn_repetition=-1):
_, x_dim, _ = design_x0.shape
x0_beta = np.matmul(design_x0,
np.tile(beta_mle[:, np.newaxis, :, :],
reps=[1, self.num_letter, 1, 1]))
w_mle_inv = np.matmul(eeg_signals - x0_beta, np.transpose(eeg_signals - x0_beta, axes=(0, 1, 3, 2))) \
/ self.num_letter
w_mle_inv = np.sum(w_mle_inv, axis=1)
# Place different structure on w_mle_inv to simplify the training model
w_mle_inv = self.block_diagonal_weights(w_mle_inv, jitter, x_dim, trn_repetition)
# Apply cholesky decomposition
l_cholesky = np.linalg.cholesky(w_mle_inv)
l_cholesky_inv = np.linalg.inv(l_cholesky)
return l_cholesky_inv
@staticmethod
# Assume different across channels
def unstructured_weights(w_mle_inv, jitter, x_dim):
assert len(w_mle_inv.shape) == 3
w_mle_inv += jitter * np.eye(N=x_dim, M=x_dim)
return w_mle_inv
# Assume different across channels
def block_diagonal_weights(self, w_mle_inv, jitter, x_dim, trn_repetition, channel_dim=1):
assert len(w_mle_inv.shape) == 3
w_mle_inv_block_diag = np.zeros([channel_dim, x_dim, x_dim])
for i in range(trn_repetition):
block_low = i * self.flash_and_pause_length * self.num_rep
block_upp = (i + 1) * self.flash_and_pause_length * self.num_rep
w_mle_inv_block_diag[:, block_low:block_upp, block_low:block_upp] = \
w_mle_inv[:, block_low:block_upp, block_low:block_upp]
# The last smaller block
block_low_2 = trn_repetition * self.num_rep * self.flash_and_pause_length
w_mle_inv_block_diag[block_low_2:, block_low_2:] = \
w_mle_inv[block_low_2:, block_low_2:]
# Add jitter to the diagonal to make it more pdf
w_mle_inv_block_diag = self.unstructured_weights(w_mle_inv_block_diag, jitter, x_dim)
return w_mle_inv_block_diag
@staticmethod
# Assume different across channels
def multi_diagonal_weights(w_mle_inv, jitter, x_dim, max_lag, channel_dim=1):
assert len(w_mle_inv.shape) == 3
# Add jitter and digonal term together
w_mle_inv_md = np.diagonal(w_mle_inv, offset=0, axis1=1, axis2=2)[:, :, np.newaxis] \
* np.eye(N=x_dim, M=x_dim)[np.newaxis, :, :]
w_mle_inv_md += jitter * np.eye(N=x_dim, M=x_dim)[np.newaxis, :, :]
for i in range(1, max_lag):
off_diagonal_val = np.diagonal(w_mle_inv, offset=i, axis1=1, axis2=2)
# print('off_diagonal_val has shape {}'.format(off_diagonal_val.shape))
for j in range(channel_dim):
np.fill_diagonal(w_mle_inv_md[j, i:, :], off_diagonal_val[j, :])
np.fill_diagonal(w_mle_inv_md[j, :, i:], off_diagonal_val[j, :])
return w_mle_inv_md
# Compute the loss function
def compute_mahalanobis_dist_sq(self, design_x0, eeg_signals, beta_mle, l_cholesky_inv):
Xbeta_l = np.matmul(design_x0, np.tile(beta_mle[:, np.newaxis, :, :],
reps=[1, self.num_letter, 1, 1]))
# print('Xbeta_l has shape {}'.format(Xbeta_l.shape))
Y_Xb = eeg_signals - Xbeta_l
# print('Y_Xb has shape {}'.format(Y_Xb.shape))
l_cholesky_inv = np.tile(l_cholesky_inv[:, np.newaxis, :, :],
reps=[1, self.num_letter, 1, 1])
L_inv_res = np.matmul(l_cholesky_inv, Y_Xb)
# print('L^-1 (Y-Xb) has shape {}'.format(L_inv_res.shape))
L_inv_res_sq = np.sum(np.matmul(np.transpose(L_inv_res, axes=[0, 1, 3, 2]),
L_inv_res), axis=0)
return np.squeeze(L_inv_res_sq)
# def get_log_prob_eeg_convol_fn(
# self, letter_dim, repet_dim,
# hyper_param_dict, eeg_signals, id_beta, design_x):
#
# mean_vec_1 = hyper_param_dict['mean_vec_1']
# mean_vec_0 = hyper_param_dict['mean_vec_0']
# hyper_delta_var = hyper_param_dict['hyper_delta_var']
# hyper_sigma_r_sq = hyper_param_dict['hyper_sigma_r_sq']
#
# def _log_prob_eeg_convol_fn(delta_1_value,
# delta_0_value,
# pres_array_1_value,
# pres_array_0_value):
#
# # def _print_precision(pres_chky_1, pres_chky_0):
# # print('precision_chky_1:\n {}'.format(pres_chky_1))
# # print('precision_chky_0:\n {}'.format(pres_chky_0))
# # return False # operations must return something!
# # # Turn our method into a tensorflow operation
# # prec_chky_op = tf.numpy_function(_print_precision, [pres_chky_1_value, pres_chky_0_value], tf.bool)
# #
# # assertion_op_1 = tf.compat.v1.assert_equal(
# # tf.reduce_sum(tf.linalg.band_part(pres_chky_1_value, -1, 0)), tf.cast(0, dtype=self.DAT_TYPE),
# # message='Not lower triangular for pres', summarize=4, name='low-tri-check-1'
# # )
# #
# # assertion_op_0 = tf.assert_equal(
# # tf.reduce_sum(tf.linalg.band_part(pres_chky_0_value, -1, 0)), tf.cast(0, dtype=self.DAT_TYPE),
# # message='Not symmetrical', summarize=4, name='low-tri-check-0'
# # )
#
# delta_1_log_prob = self.prior.compute_delta_log_lhd(
# mean_vec_1, hyper_delta_var, delta_1_value)
# delta_0_log_prob = self.prior.compute_delta_log_lhd(
# mean_vec_0, hyper_delta_var, delta_0_value)
#
# pres_chky_1_log_prob = self.prior.compute_pres_chky_log_lhd_2(
# hyper_sigma_r_sq, pres_array_1_value)
# pres_chky_0_log_prob = self.prior.compute_pres_chky_log_lhd_2(
# hyper_sigma_r_sq, pres_array_0_value)
# # Intermediate variable eta
# # Convert pres_chky_1/0 to upper-triangular matrices
# pres_array_1_value = self.prior.convert_1d_array_to_upper_triangular(pres_array_1_value)
# pres_array_0_value = self.prior.convert_1d_array_to_upper_triangular(pres_array_0_value)
#
# eta_1, eta_1_log_prob = self.prior.generate_eta_and_compute_log_lhd(
# pres_array_1_value, letter_dim, repet_dim, self.flash_sum)
# eta_0, eta_0_log_prob = self.prior.generate_eta_and_compute_log_lhd(
# pres_array_0_value, letter_dim, repet_dim, self.non_flash_sum)
#
# delta_combined = self.rearrange.tile_and_combine_delta(
# letter_dim, repet_dim, delta_1_value, delta_0_value)
# eta_combined = tf.transpose(tf.concat([eta_1, eta_0], axis=0),
# perm=[1, 0, 2])
# beta_combined = delta_combined + eta_combined
# beta_tilta = self.rearrange.create_joint_beta_tilta(
# letter_dim, repet_dim, beta_combined, id_beta, design_x)
# beta_tilta = tf.squeeze(beta_tilta, axis=-1)
#
# residuals = eeg_signals - beta_tilta
# eeg_signals_log_prob = -0.5 * tf.reduce_sum(tf.pow(tf.linalg.norm(
# residuals, ord='fro', axis=[-2, -1]), 2))
#
# total_log_prob = delta_1_log_prob + delta_0_log_prob \
# + pres_chky_1_log_prob + pres_chky_0_log_prob \
# + eta_1_log_prob + eta_0_log_prob \
# + eeg_signals_log_prob
#
# return total_log_prob
#
# return _log_prob_eeg_convol_fn
# Tenga un cuenta que 'eeg_t_mean_init' y 'eeg_nt_mean_init' son al azar.
def create_initial_chain(self,
eeg_t_mean_init, eeg_nt_mean_init,
eeg_t_cov, eeg_nt_cov):
upper_tri_1 = self.prior.compute_pres_upper_tri(eeg_t_cov) + \
tf.eye(self.n_length, self.n_length, [1])
upper_tri_0 = self.prior.compute_pres_upper_tri(eeg_nt_cov) + \
tf.eye(self.n_length, self.n_length, [1])
upper_array_1 = self.prior.convert_upper_triangular_to_1d_array(upper_tri_1)
upper_array_0 = self.prior.convert_upper_triangular_to_1d_array(upper_tri_0)
initial_chain_states = [
tf.random.normal(mean=eeg_t_mean_init, shape=[], dtype=self.DAT_TYPE),
tf.random.normal(mean=eeg_nt_mean_init, shape=[], dtype=self.DAT_TYPE),
# tf.random.normal(shape=[self.num_electrode,
# int(self.n_length * (1 + self.n_length) / 2)],
# dtype=self.DAT_TYPE),
# tf.random.normal(shape=[self.num_electrode,
# int(self.n_length * (1 + self.n_length) / 2)],
# dtype=self.DAT_TYPE)
upper_array_1,
upper_array_0
]
return initial_chain_states
@staticmethod
def _mala_kernel(target_log_prob_fn, step_size_init):
kernel = tfp.mcmc.MetropolisAdjustedLangevinAlgorithm(
target_log_prob_fn=target_log_prob_fn,
step_size=step_size_init)
return kernel
@staticmethod
def _random_walk_kernel(target_log_prob_fn, scale):
kernel = tfp.mcmc.RandomWalkMetropolis(
target_log_prob_fn=target_log_prob_fn,
new_state_fn=tfp.mcmc.random_walk_normal_fn(scale=scale)
)
return kernel
# Add transformed kernel and bijector
def _ttk_hmc_kernel(self, target_log_prob_fn,
num_burnin_steps, num_leapfrog_steps,
step_size_init, target_accept_prob):
step_size_init = tf.convert_to_tensor(step_size_init, dtype=self.DAT_TYPE)
target_accept_prob = tf.convert_to_tensor(target_accept_prob, dtype=self.DAT_TYPE)
# ttk = tfp.mcmc.TransformedTransitionKernel(
# inner_kernel = tfp.mcmc.HamiltonianMonteCarlo(
# target_log_prob_fn=target_log_prob_fn,
# num_leapfrog_steps=num_leapfrog_steps,
# step_size=step_size_init),
# bijector = self.create_bijectors()
ttk = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size_init),
num_adaptation_steps=int(num_burnin_steps * 0.8),
target_accept_prob=target_accept_prob
)
return ttk
def create_bijectors(self, channel_dim=1):
# tfb.Softplus(), tfb.Softplus() for univariate variance
return [tfb.Identity() for i in range(channel_dim*2)] + \
[self.unconstrained_to_precison_chky for i in range(channel_dim*2)]
@staticmethod
def trace_log_accept_ratio(states, previous_kernel_results):
return previous_kernel_results.log_accept_ratio
@staticmethod
def trace_everything(states, previous_kernel_results):
return previous_kernel_results
def mcmc_sample_chain(self, target_log_prob_fn,
t_mean_init, nt_mean_init,
eeg_t_cov, eeg_nt_cov,
n_samples, n_burn_in,
num_steps_between_results, step_size_init,
target_accept_prob, num_leapfrog_steps):
# Create initial states
initial_chain_states = self.create_initial_chain(
t_mean_init, nt_mean_init, eeg_t_cov, eeg_nt_cov)
para_mcmc = tfp.mcmc.sample_chain(
num_results=n_samples,
num_burnin_steps=n_burn_in,
num_steps_between_results=num_steps_between_results, # large value leads to large memory
current_state=initial_chain_states,
# kernel=self._ttk_hmc_kernel(
# target_log_prob_fn=target_log_prob_fn,
# num_burnin_steps=n_burn_in,
# num_leapfrog_steps=num_leapfrog_steps,
# step_size_init=step_size_init,
# target_accept_prob=target_accept_prob),
# kernel=self._mala_kernel(
# target_log_prob_fn,
# step_size_init),
kernel=self._random_walk_kernel(target_log_prob_fn, step_size_init),
parallel_iterations=10, # large iteration number leads to large memory
return_final_kernel_results=False,
trace_fn=None,
)
return para_mcmc
@staticmethod
def provide_hyper_params(mean_vec_1, mean_vec_0, hyper_delta_var, hyper_sigma_r_sq):
hyper_param_dict = {
'mean_vec_1': mean_vec_1,
'mean_vec_0': mean_vec_0,
'hyper_delta_var': hyper_delta_var,
'hyper_sigma_r_sq': hyper_sigma_r_sq
}
return hyper_param_dict
def save_hmc_params_est(self, params_list, message):
if not os.path.exists('{}/{}/{}/convol_python'.format(
self.parent_path,
self.data_type,
self.sub_folder_name[:4])):
os.mkdir('{}/{}/{}/convol_python'
.format(self.parent_path,
self.data_type,
self.sub_folder_name[:4]))
print(message)
sio.savemat('{}/{}/{}/convol_python/hmc_param_est.mat'.
format(self.parent_path, self.data_type,
self.sub_folder_name[:4]),
{
'mcmc_message': message,
'delta_1': params_list[0],
'delta_0': params_list[1],
'pres_chky_1': params_list[2],
'pres_chky_0': params_list[3],
'pres': params_list[4]
})
def save_gibbs_params_est(self, params_list, message):
if not os.path.exists('{}/{}/{}/convol_python'.format(
self.parent_path,
self.data_type,
self.sub_folder_name[:4])):
os.mkdir('{}/{}/{}/convol_python'
.format(self.parent_path,
self.data_type,
self.sub_folder_name[:4]))
print(message)
sio.savemat('{}/{}/{}/convol_python/gibbs_param_est.mat'.
format(self.parent_path, self.data_type,
self.sub_folder_name[:4]),
{
'mcmc_message': message,
'delta_1': params_list[0],
'delta_0': params_list[1],
'pres_chky_1': params_list[2],
'pres_chky_0': params_list[3],
'pres': params_list[4]
})
def save_hmc_params_est_sim(self, sim_folder_name, message, params_list):
if not os.path.exists('{}/SIM_files/{}/convol_python'.format(
self.parent_path, sim_folder_name)):
os.mkdir('{}/SIM_files/{}/convol_python'.format(self.parent_path, sim_folder_name))
sio.savemat('{}/SIM_files/{}/convol_python/hmc_param_est.mat'.
format(self.parent_path, sim_folder_name),
{
'message': message,
'delta_1': params_list[0],
'delta_0': params_list[1],
'pres_chky_1': params_list[2],
'pres_chky_0': params_list[3],
'pres': params_list[4]
})
def save_opt_params_est_sim(self, sim_folder_name, message, params_list):
if not os.path.exists('{}/SIM_files/{}/convol_python'.format(
self.parent_path, sim_folder_name
)):
os.mkdir('{}/SIM_files/{}/convol_python'.format(self.parent_path, sim_folder_name))
sio.savemat('{}/SIM_files/{}/convol_python/opt_param_est.mat'.
format(self.parent_path, sim_folder_name),
{
'message': message,
'delta_1_opt': params_list[0],
'delta_0_opt': params_list[1],
'pres_chky_1_opt': params_list[2],
'pres_chky_0_opt': params_list[3],
'pres_opt': params_list[4],
'chisq_1_opt': params_list[5],
'chisq_0_opt': params_list[6]
})
def import_hmc_params_est(self):
mcmc_dat = sio.loadmat('{}/{}/{}/convol_python/hmc_param_est.mat'
.format(self.parent_path,
self.data_type,
self.sub_folder_name[:4]))
mcmc_keys, _ = zip(*mcmc_dat.items())
# print(mcmc_keys[3:])
# mcmc_keys = list(mcmc_dat)
return mcmc_dat
def import_hmc_params_est_sim(self, sim_folder_name):
mcmc_dat = sio.loadmat('{}/SIM_files/{}/convol_python/hmc_param_est.mat'
.format(self.parent_path, sim_folder_name))
mcmc_keys, _ = zip(*mcmc_dat.items())
# print(mcmc_keys[3:])
# mcmc_keys = list(mcmc_dat)
return mcmc_dat
def import_opt_params_est_sim(self, sim_folder_name):
opt_dat = sio.loadmat('{}/SIM_files/{}/convol_python/opt_param_est.mat'
.format(self.parent_path, sim_folder_name))
opt_keys, _ = zip(*opt_dat.items())
print(opt_keys[3:])
return opt_dat
def import_gibbs_params_est(self):
gibbs_dat = sio.loadmat('{}/{}/{}/convol_python/gibbs_param_est.mat'
.format(self.parent_path,
self.data_type,
self.sub_folder_name[:4]))
gibbs_keys, _ = zip(*gibbs_dat.items())
print(gibbs_keys[3:])
return gibbs_dat
def partial_log_prob_eeg_conol_test_fn(
self, delta_1_value, delta_0_value,
pres_chky_1_value, pres_chky_0_value,
letter_dim_test, repet_dim_test,
eeg_signals_test, eeg_code_test
):
design_x = self.rearrange.create_design_mat_gen_bayes_seq(repet_dim_test)
# Generate delta_combined
delta_combined = self.rearrange.tile_and_combine_delta(
letter_dim_test, repet_dim_test, delta_1_value, delta_0_value)
pres_chky_1_value = self.prior.convert_1d_array_to_upper_triangular(pres_chky_1_value)
pres_chky_0_value = self.prior.convert_1d_array_to_upper_triangular(pres_chky_0_value)
eta_1_value, _ = self.prior.generate_eta_and_compute_log_lhd(
pres_chky_1_value, letter_dim_test, repet_dim_test, self.flash_sum)
eta_0_value, _ = self.prior.generate_eta_and_compute_log_lhd(
pres_chky_0_value, letter_dim_test, repet_dim_test, self.non_flash_sum)
eta_combined = tf.transpose(tf.concat([eta_1_value, eta_0_value], axis=0),
perm=[1, 0, 2])
beta_combined = delta_combined + eta_combined
# Use eeg_code_test to generate 36 different eeg_types
dim_temp = letter_dim_test*repet_dim_test*self.num_rep
eeg_code_test = np.reshape(eeg_code_test, [dim_temp])
mvn_log_lhd = tf.zeros([1, letter_dim_test, repet_dim_test], dtype=self.DAT_TYPE)
for _, target_letter in enumerate(self.letter_table):
eeg_type_test = self.generate_eeg_type_from_letter_eeg_code(
eeg_code_test, target_letter)
id_beta = self.rearrange.create_permute_beta_id(letter_dim_test, repet_dim_test, eeg_type_test)
beta_tilta = self.rearrange.create_joint_beta_tilta(
letter_dim_test, repet_dim_test, beta_combined, id_beta, design_x)
beta_tilta = tf.squeeze(beta_tilta, axis=-1)
log_lhd_per_letter = tf.math.cumsum(
tf.reduce_sum((eeg_signals_test-beta_tilta)**2, axis=1), axis=-1)
indices = tf.cast(tf.linspace(1., repet_dim_test, repet_dim_test), dtype='int32')
indices = (indices*self.num_rep+self.n_multiple-1)*self.flash_and_pause_length-1
log_lhd_per_letter = tf.gather(log_lhd_per_letter, indices, axis=1)
# print('log_lhd_per_letter has shape {}'.format(log_lhd_per_letter.shape))
mvn_log_lhd = tf.concat([mvn_log_lhd, log_lhd_per_letter[tf.newaxis, ...]], axis=0)
# print('mvn_log_lhd has shape {}'.format(mvn_log_lhd.shape))
eeg_mvn_log_lhd = mvn_log_lhd[1:, ...]
# print('eeg_mvn_arg_max_ has shape {}'.format(eeg_mvn_arg_max.shape))
return eeg_mvn_log_lhd
def log_prob_eeg_convol_pred_i(
self, delta_1, delta_0,
pres_mat_1, pres_mat_0,
pres_lambda, eeg_signals_test,
letter_dim_test, repet_dim_test,
eeg_code_test, design_x
):
batch_dim_1 = letter_dim_test * repet_dim_test * self.flash_sum
batch_dim_0 = letter_dim_test * repet_dim_test * self.non_flash_sum
# Generate intermediate beta
std_mvn_rv = tfd.MultivariateNormalDiag(
loc=tf.zeros([self.num_electrode, self.n_length]),
scale_identity_multiplier=tf.ones([]))
beta_1 = std_mvn_rv.sample(batch_dim_1)[..., tf.newaxis]
beta_0 = std_mvn_rv.sample(batch_dim_0)[..., tf.newaxis]
pres_mat_1_chol = tf.linalg.cholesky(pres_mat_1)
cov_mat_1_half = tf.transpose(tf.linalg.inv(pres_mat_1_chol), [0, 2, 1])
pres_mat_0_chol = tf.linalg.cholesky(pres_mat_0)
cov_mat_0_half = tf.transpose(tf.linalg.inv(pres_mat_0_chol), [0, 2, 1])
beta_1 = tf.matmul(cov_mat_1_half[tf.newaxis, ...], beta_1) + delta_1[tf.newaxis, ..., tf.newaxis]
beta_0 = tf.matmul(cov_mat_0_half[tf.newaxis, ...], beta_0) + delta_0[tf.newaxis, ..., tf.newaxis]
beta_combined = tf.squeeze(tf.concat([beta_1, beta_0], axis=0), axis=-1)
# print('beta_combined has shape {}'.format(beta_combined.shape))
mvn_log_lhd = tf.zeros([1, letter_dim_test, repet_dim_test], dtype=self.DAT_TYPE)
for _, target_letter in enumerate(self.letter_table):
eeg_type_test = self.generate_eeg_type_from_letter_eeg_code(
eeg_code_test, target_letter)
id_beta = self.rearrange.create_permute_beta_id(letter_dim_test, repet_dim_test, eeg_type_test)
beta_tilta = self.rearrange.create_joint_beta_tilta(
letter_dim_test, repet_dim_test, beta_combined, id_beta, design_x,
channel_dim=self.num_electrode)
beta_tilta = tf.squeeze(beta_tilta, axis=-1)
# print('beta_tilta has shape {}'.format(beta_tilta.shape))
log_lhd_per_letter = tf.math.cumsum(
tf.reduce_sum(-pres_lambda[tf.newaxis, ...]/2*(eeg_signals_test - beta_tilta)**2, axis=1), axis=-1)
# print('log_lhd_per_letter has shape {}'.format(log_lhd_per_letter.shape))
indices = tf.cast(tf.linspace(1., repet_dim_test, repet_dim_test), dtype='int32')
indices = (indices * self.num_rep + self.n_multiple - 1) * self.flash_and_pause_length - 1
log_lhd_per_letter = tf.gather(log_lhd_per_letter, indices, axis=1)
mvn_log_lhd = tf.concat([mvn_log_lhd, log_lhd_per_letter[tf.newaxis, ...]], axis=0)
eeg_mvn_log_lhd = mvn_log_lhd[1:, ...]
# print('eeg_mvn_arg_max_ has shape {}'.format(eeg_mvn_arg_max.shape))
return eeg_mvn_log_lhd | 47.79635 | 211 | 0.624266 | 6,809 | 49,756 | 4.163607 | 0.09179 | 0.013051 | 0.02522 | 0.014744 | 0.560282 | 0.48261 | 0.43037 | 0.378201 | 0.326349 | 0.314215 | 0 | 0.016132 | 0.279906 | 49,756 | 1,041 | 212 | 47.79635 | 0.775126 | 0.188842 | 0 | 0.304895 | 0 | 0 | 0.032857 | 0.013892 | 0 | 0 | 0 | 0 | 0.016783 | 1 | 0.083916 | false | 0.001399 | 0.015385 | 0.004196 | 0.184615 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2db419605d69b2c70df8a4f948d15b4547d9229c | 10,446 | py | Python | tinyms/losses.py | zjuter06060126/tinyms | 106fe7eeaa7865ace9a29da946084a101cecb93f | [
"Apache-2.0"
] | 129 | 2021-01-26T01:36:53.000Z | 2022-03-29T13:05:49.000Z | tinyms/losses.py | zjuter06060126/tinyms | 106fe7eeaa7865ace9a29da946084a101cecb93f | [
"Apache-2.0"
] | 57 | 2021-02-02T07:15:42.000Z | 2022-03-22T09:56:37.000Z | tinyms/losses.py | zjuter06060126/tinyms | 106fe7eeaa7865ace9a29da946084a101cecb93f | [
"Apache-2.0"
] | 62 | 2021-01-26T03:09:41.000Z | 2022-03-16T09:05:30.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Losses module. Loss function in machine learning is the target of the model.
It shows how well the model works on a dataset and the optimization target
which the optimizer is searching.
"""
from mindspore.nn import loss
from mindspore.nn.loss import *
from mindspore.nn.loss.loss import _Loss
import tinyms as ts
from . import layers, primitives as P, Tensor
from .model import SSD300
__all__ = [
'net_with_loss',
'SSD300WithLoss',
'CrossEntropyWithLabelSmooth',
'CycleGANGeneratorLoss',
'CycleGANDiscriminatorLoss',
]
__all__.extend(loss.__all__)
class SigmoidFocalClassificationLoss(layers.Layer):
""""
Sigmoid focal-loss for classification.
Args:
gamma (float): Hyper-parameter to balance the easy and hard examples. Default: 2.0
alpha (float): Hyper-parameter to balance the positive and negative example. Default: 0.25
Returns:
Tensor, the focal loss.
"""
def __init__(self, gamma=2.0, alpha=0.25):
super(SigmoidFocalClassificationLoss, self).__init__()
self.sigmiod_cross_entropy = P.SigmoidCrossEntropyWithLogits()
self.sigmoid = P.Sigmoid()
self.pow = P.Pow()
self.onehot = P.OneHot()
self.on_value = Tensor(1.0, ts.float32)
self.off_value = Tensor(0.0, ts.float32)
self.gamma = gamma
self.alpha = alpha
def construct(self, logits, label):
label = self.onehot(label, P.shape(logits)[-1], self.on_value, self.off_value)
sigmiod_cross_entropy = self.sigmiod_cross_entropy(logits, label)
sigmoid = self.sigmoid(logits)
label = P.cast(label, ts.float32)
p_t = label * sigmoid + (1 - label) * (1 - sigmoid)
modulating_factor = self.pow(1 - p_t, self.gamma)
alpha_weight_factor = label * self.alpha + (1 - label) * (1 - self.alpha)
focal_loss = modulating_factor * alpha_weight_factor * sigmiod_cross_entropy
return focal_loss
class SSD300WithLoss(layers.Layer):
r"""
Provide SSD300 training loss through network.
Args:
network (layers.Layer): The training network.
Returns:
Tensor, the loss of the network.
Examples:
>>> from tinyms.model import ssd300
>>> from tinyms.losses import SSD300WithLoss
>>>
>>> net = SSD300WithLoss(ssd300())
"""
def __init__(self, network):
super(SSD300WithLoss, self).__init__()
self.network = network
self.less = P.Less()
self.tile = P.Tile()
self.reduce_sum = P.ReduceSum()
self.reduce_mean = P.ReduceMean()
self.expand_dims = P.ExpandDims()
self.class_loss = SigmoidFocalClassificationLoss(2.0, 0.75)
self.loc_loss = SmoothL1Loss()
def construct(self, x, gt_loc, gt_label, num_matched_boxes):
pred_loc, pred_label = self.network(x)
mask = P.cast(self.less(0, gt_label), ts.float32)
num_matched_boxes = self.reduce_sum(P.cast(num_matched_boxes, ts.float32))
# Localization Loss
mask_loc = self.tile(self.expand_dims(mask, -1), (1, 1, 4))
smooth_l1 = self.loc_loss(pred_loc, gt_loc) * mask_loc
loss_loc = self.reduce_sum(self.reduce_mean(smooth_l1, -1), -1)
# Classification Loss
loss_cls = self.class_loss(pred_label, gt_label)
loss_cls = self.reduce_sum(loss_cls, (1, 2))
return self.reduce_sum((loss_cls + loss_loc) / num_matched_boxes)
def net_with_loss(net):
r'''
This function is provided for AI beginners who are not familiar with which loss
should be chosen for the network to be trained. Instead of choosing different loss
function, users could directly get the best suitable loss function by specifying
network.
Args:
net (layers.Layer): The instance of network to be trained.
Raises:
TypeError: When network type is not supported.
Note:
Currently this function only supports few networks, if the network type is
not supported, the system would raise TypeError exception.
Examples:
>>> from tinyms.model import ssd300
>>> from tinyms.losses import net_with_loss
>>>
>>> net = ssd300()
>>> net_loss = net_with_loss(net)
'''
if not isinstance(net, layers.Layer):
raise TypeError("Input should be inheritted from layers.Layer!")
if isinstance(net, SSD300):
return SSD300WithLoss(net)
else:
raise TypeError("Input should be in [SSD300], got {}.".format(type(net)))
class CrossEntropyWithLabelSmooth(_Loss):
"""
CrossEntropyWith LabelSmooth.
Args:
smooth_factor (float): Smooth factor. Default is 0.
num_classes (int): Number of classes. Default is 1000.
Returns:
None.
Examples:
>>> CrossEntropyWithLabelSmooth(smooth_factor=0., num_classes=1000)
"""
def __init__(self, smooth_factor=0., num_classes=1000):
super(CrossEntropyWithLabelSmooth, self).__init__()
self.onehot = P.OneHot()
self.on_value = Tensor(1.0 - smooth_factor, ts.float32)
self.off_value = Tensor(1.0 * smooth_factor /
(num_classes - 1), ts.float32)
self.ce = SoftmaxCrossEntropyWithLogits()
self.mean = P.ReduceMean(False)
self.cast = P.Cast()
def construct(self, logit, label):
one_hot_label = self.onehot(self.cast(label, ts.int32), P.shape(logit)[1],
self.on_value, self.off_value)
out_loss = self.ce(logit, one_hot_label)
out_loss = self.mean(out_loss, 0)
return out_loss
class GANLoss(_Loss):
"""
Cycle GAN loss factory.
Args:
mode (str): The type of GAN objective. It currently supports 'vanilla', 'lsgan'. Default: 'lsgan'.
reduction (str): Specifies the reduction to be applied to the output.
Its value must be one of 'none', 'mean', 'sum'. Default: 'mean'.
Outputs:
Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `inputs`.
Otherwise, the output is a scalar.
Raises:
NotImplementedError: Raised when GANLoss mode not recognized.
"""
def __init__(self, mode="lsgan", reduction='mean'):
super(GANLoss, self).__init__()
self.loss = None
self.ones = P.OnesLike()
if mode == "lsgan":
self.loss = loss.MSELoss(reduction)
elif mode == "vanilla":
self.loss = BCEWithLogits(reduction)
else:
raise NotImplementedError(f'GANLoss {mode} not recognized, we support lsgan and vanilla.')
def construct(self, predict, target):
target = P.cast(target, P.dtype(predict))
target = self.ones(predict) * target
loss = self.loss(predict, target)
return loss
class CycleGANGeneratorLoss(_Loss):
"""
Cycle GAN generator loss.
Args:
generator (layers.Layer): Generator of CycleGAN.
D_A (layers.Layer): The discriminator network of domain A to domain B.
D_B (layers.Layer): The discriminator network of domain B to domain A.
Outputs:
Tuple Tensor, the losses of generator.
"""
def __init__(self, generator, D_A, D_B):
super(CycleGANGeneratorLoss, self).__init__()
self.lambda_A = 10.0
self.lambda_B = 10.0
self.lambda_idt = 0.5
self.use_identity = True
self.dis_loss = GANLoss("lsgan")
self.rec_loss = loss.L1Loss("mean")
self.generator = generator
self.D_A = D_A
self.D_B = D_B
self.true = Tensor(True, ts.bool_)
def construct(self, img_A, img_B):
"""If use_identity, identity loss will be used."""
fake_A, fake_B, rec_A, rec_B, identity_A, identity_B = self.generator(img_A, img_B)
loss_G_A = self.dis_loss(self.D_B(fake_B), self.true)
loss_G_B = self.dis_loss(self.D_A(fake_A), self.true)
loss_C_A = self.rec_loss(rec_A, img_A) * self.lambda_A
loss_C_B = self.rec_loss(rec_B, img_B) * self.lambda_B
if self.use_identity:
loss_idt_A = self.rec_loss(identity_A, img_A) * self.lambda_A * self.lambda_idt
loss_idt_B = self.rec_loss(identity_B, img_B) * self.lambda_B * self.lambda_idt
else:
loss_idt_A = 0
loss_idt_B = 0
loss_G = loss_G_A + loss_G_B + loss_C_A + loss_C_B + loss_idt_A + loss_idt_B
return (fake_A, fake_B, loss_G, loss_G_A, loss_G_B, loss_C_A, loss_C_B, loss_idt_A, loss_idt_B)
class CycleGANDiscriminatorLoss(_Loss):
"""
Cycle GAN discriminator loss.
Args:
D_A (layers.Layer): The discriminator network of domain A to domain B.
D_B (layers.Layer): The discriminator network of domain B to domain A.
reduction (str): The discriminator network of reduction. Default: none.
Outputs:
the loss of discriminator.
"""
def __init__(self, D_A, D_B, reduction='none'):
super(CycleGANDiscriminatorLoss, self).__init__()
self.D_A = D_A
self.D_B = D_B
self.false = Tensor(False, ts.bool_)
self.true = Tensor(True, ts.bool_)
self.dis_loss = GANLoss("lsgan")
self.rec_loss = loss.L1Loss("mean")
self.reduction = reduction
def construct(self, img_A, img_B, fake_A, fake_B):
D_fake_A = self.D_A(fake_A)
D_img_A = self.D_A(img_A)
D_fake_B = self.D_B(fake_B)
D_img_B = self.D_B(img_B)
loss_D_A = self.dis_loss(D_fake_A, self.false) + self.dis_loss(D_img_A, self.true)
loss_D_B = self.dis_loss(D_fake_B, self.false) + self.dis_loss(D_img_B, self.true)
loss_D = (loss_D_A + loss_D_B) * 0.5
return loss_D
| 35.530612 | 111 | 0.644361 | 1,429 | 10,446 | 4.496151 | 0.205038 | 0.011673 | 0.013697 | 0.019455 | 0.208716 | 0.168716 | 0.121401 | 0.098988 | 0.098988 | 0.098988 | 0 | 0.017718 | 0.248995 | 10,446 | 293 | 112 | 35.651877 | 0.801275 | 0.338311 | 0 | 0.104895 | 0 | 0 | 0.043372 | 0.011148 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.041958 | 0 | 0.223776 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2db4650006f1e5e8d105e86db61cedcfe46c7f9e | 2,342 | py | Python | Vehicle/code/TFrecords_.py | m-L-0/17b-XiaGuangwei-2015 | 95a3b892b9b37847cafcf0fcc0645320a0cd9e8c | [
"MIT"
] | null | null | null | Vehicle/code/TFrecords_.py | m-L-0/17b-XiaGuangwei-2015 | 95a3b892b9b37847cafcf0fcc0645320a0cd9e8c | [
"MIT"
] | null | null | null | Vehicle/code/TFrecords_.py | m-L-0/17b-XiaGuangwei-2015 | 95a3b892b9b37847cafcf0fcc0645320a0cd9e8c | [
"MIT"
] | null | null | null | from PIL import Image
import os
import numpy as np
import tensorflow as tf
TRAIN_PATH = './tfrecords/train.tfrecords'
TEST_PATH = './tfrecords/test.tfrecords'
# 把传入的value转化为整数型的属性,int64_list对应着 tf.train.Example 的定义
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# 把传入的value转化为字符串型的属性,bytes_list对应着 tf.train.Example 的定义
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def write(images,labels,key_list,value_list,shape,label_dict,PATH):
from PIL import Image
import os
import numpy as np
import tensorflow as tf
for key,value in label_dict.items():
key_list.append(key)
value_list.append(value)
nums = os.listdir('./Data/汉字/')
for i in range(len(nums)):
img_temp=os.listdir('./Data/汉字/'+nums[i])
for j in range(len(img_temp)):
im=Image.open('./Data/汉字/'+nums[i]+'/'+img_temp[j])
im=im.resize((24,48))
labels.append(key_list[value_list.index(nums[i])])
im = im.convert('L')
im=np.array(im)
images.append(im)
images=np.array(images)
labels=np.array(labels)
writer = tf.python_io.TFRecordWriter(PATH)
for index in range(images.shape[0]):
#把图像矩阵转化为字符串
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(
feature={
'label': _int64_feature(labels[index]),
'image_raw': _bytes_feature(image_raw)}))
#将Example写入TFRecord文件
writer.write(example.SerializeToString())
writer.close()
return images.shape[0]
images=[]
labels=[]
key_list=[]
value_list=[]
shape=[]
label_dict = {0: '云',1: '京',2: '冀',3: '吉',4: '宁',5: '川',6: '广',7: '新',8: '晋',9: '桂',10: '沪',11: '津',12: '浙',13: '渝',14: '湘',15: '琼',16: '甘',17: '皖',18: '粤',19: '苏',20: '蒙',21: '藏',22: '豫',23: '贵',24: '赣',25: '辽',26: '鄂',27: '闽',28: '陕',29: '青',30: '鲁',31: '黑'}
RES_train = write(images,labels,key_list,value_list,shape,label_dict,TRAIN_PATH)
print('汉字训练集已写入完毕,写入数量为',RES_train)
images=[]
labels=[]
key_list=[]
value_list=[]
shape=[]
RES_test = write(images,labels,key_list,value_list,shape,label_dict,TEST_PATH)
print('汉字测试集已写入完毕,写入数量为',RES_test)
| 36.030769 | 261 | 0.625961 | 346 | 2,342 | 4.104046 | 0.395954 | 0.044366 | 0.050704 | 0.067606 | 0.321127 | 0.321127 | 0.238028 | 0.214789 | 0.214789 | 0.185211 | 0 | 0.037313 | 0.198975 | 2,342 | 64 | 262 | 36.59375 | 0.719616 | 0.059351 | 0 | 0.327273 | 0 | 0 | 0.076418 | 0.024848 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.145455 | 0.036364 | 0.254545 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2db5b3762e09025fe0aac48540bcc21360c73c7a | 1,530 | py | Python | Machine Learning Projects/Sentence Completer/main.py | SethuSenthil/HacktoberFest2020-Contributions | 97cd2113e4bafc19425f6824b4a012bb5c84417b | [
"MIT"
] | 2 | 2021-04-13T00:19:45.000Z | 2021-07-10T02:50:16.000Z | main.py | SethuSenthil/Sentence-Ender | fce037273d51b0fb342b4078ba9eb39fd0df0e82 | [
"MIT"
] | null | null | null | main.py | SethuSenthil/Sentence-Ender | fce037273d51b0fb342b4078ba9eb39fd0df0e82 | [
"MIT"
] | null | null | null | # %%
import torch
import string
from transformers import RobertaTokenizer, RobertaForMaskedLM
roberta_tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
roberta_model = RobertaForMaskedLM.from_pretrained('roberta-base').eval()
top_k = 10
def decode(tokenizer, pred_idx, top_clean):
ignore_tokens = string.punctuation + '[PAD]'
tokens = []
for w in pred_idx:
token = ''.join(tokenizer.decode(w).split())
if token not in ignore_tokens:
tokens.append(token.replace('##', ''))
return '\n'.join(tokens[:top_clean])
def encode(tokenizer, text_sentence, add_special_tokens=True):
text_sentence = text_sentence.replace('<mask>', tokenizer.mask_token)
# if <mask> is the last token, append a "." so that models dont predict punctuation.
if tokenizer.mask_token == text_sentence.split()[-1]:
text_sentence += ' .'
input_ids = torch.tensor([tokenizer.encode(text_sentence, add_special_tokens=add_special_tokens)])
mask_idx = torch.where(input_ids == tokenizer.mask_token_id)[1].tolist()[0]
return input_ids, mask_idx
def get_all_predictions(text_sentence, top_clean=5):
# ========================= ROBERTA =================================
input_ids, mask_idx = encode(roberta_tokenizer, text_sentence, add_special_tokens=True)
with torch.no_grad():
predict = roberta_model(input_ids)[0]
roberta = decode(roberta_tokenizer, predict[0, mask_idx, :].topk(top_k).indices.tolist(), top_clean)
return {'roberta': roberta}
| 37.317073 | 104 | 0.690196 | 192 | 1,530 | 5.244792 | 0.364583 | 0.095333 | 0.063555 | 0.065541 | 0.109235 | 0.08143 | 0.08143 | 0 | 0 | 0 | 0 | 0.006173 | 0.152941 | 1,530 | 40 | 105 | 38.25 | 0.770833 | 0.1 | 0 | 0 | 0 | 0 | 0.03496 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dbab78d229531508fd3e07f996a245ff7f27dab | 575 | py | Python | Beginner/2486.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | Beginner/2486.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | Beginner/2486.py | LorranSutter/URI-Online-Judge | aef885b9a7caa83484cf172e29eea8ec92fc3627 | [
"MIT"
] | null | null | null | T = int(input())
while T != 0:
tot = 0
for k in range(T):
S = input().split()
N = int(S[0])
A = ' '.join(S[1:])
if A == "suco de laranja":
tot += N*120
elif A == "morango fresco":
tot += N*85
elif A == "mamao":
tot += N*85
elif A == "goiaba vermelha":
tot += N*70
elif A == "manga":
tot += N*56
elif A == "laranja":
tot += N*50
elif A == "brocolis":
tot += N*34
if tot < 110:
print("Mais {0} mg".format(110-tot))
elif tot > 130:
print("Menos {0} mg".format(tot-130))
else:
print("{0} mg".format(tot))
T = int(input())
| 17.424242 | 39 | 0.511304 | 99 | 575 | 2.969697 | 0.424242 | 0.095238 | 0.091837 | 0.068027 | 0.07483 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08076 | 0.267826 | 575 | 32 | 40 | 17.96875 | 0.617577 | 0 | 0 | 0.142857 | 0 | 0 | 0.172174 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.107143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dbada0daba2839e4c796e70cd1b2083eaa6d710 | 5,334 | py | Python | spamclib/sync_tcp_client.py | wevsty/spamclib | 4e844f9644ecdf4f9dfadbe1e3a784dc7d273a31 | [
"MIT"
] | 1 | 2019-12-06T09:00:43.000Z | 2019-12-06T09:00:43.000Z | spamclib/sync_tcp_client.py | wevsty/spamclib | 4e844f9644ecdf4f9dfadbe1e3a784dc7d273a31 | [
"MIT"
] | null | null | null | spamclib/sync_tcp_client.py | wevsty/spamclib | 4e844f9644ecdf4f9dfadbe1e3a784dc7d273a31 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import socket
import ssl
import time
import selectors
class SyncTcpClient(object):
def __init__(self,
*,
host='localhost',
port=783,
timeout=60.0,
ssl_context=None
):
self.host = host
self.port = port
self.timeout = timeout
# ssl.SSLContext
self.ssl_context = ssl_context
self.sock = None
def set_host(self, host):
self.host = host
def set_port(self, port):
self.port = port
def set_timeout(self, timeout):
self.timeout = timeout
def set_ssl_context(self, context):
self.ssl_context = context
def set_blocking(self, b_block):
self.sock.setblocking(b_block)
@staticmethod
def get_default_ssl_context():
ssl_context = ssl.create_default_context()
ssl_context.verify_mode(ssl.CERT_NONE)
return ssl_context
def create_connection(self):
sock = socket.create_connection((self.host, self.port), timeout=self.timeout)
if self.ssl_context == None:
self.sock = sock
else:
self.sock = self.ssl_context.wrap_socket(sock)
# self.sock.setblocking(False)
return
def connect(self):
self.create_connection()
def close(self):
if self.sock != None:
self.sock.close()
self.sock = None
def is_closed(self):
if self.sock != None:
return False
return True
def send(self, bytes_data):
if isinstance(bytes_data, str):
self.sock.sendall(bytes_data.encode('utf-8'))
elif isinstance(bytes_data, bytes):
self.sock.sendall(bytes_data)
else:
self.sock.sendall(bytes(bytes_data))
pass
def send_all(self, bytes_data):
if isinstance(bytes_data, str):
self.sock.sendall(bytes_data.encode('utf-8'))
elif isinstance(bytes_data, bytes):
self.sock.sendall(bytes_data)
else:
self.sock.sendall(bytes(bytes_data))
pass
def select_recv_callback(self, sock, recv_size=1024 * 32):
bytes_data = sock.recv(recv_size)
return bytes_data
def select_recv(self, loop_sleep_second=0.25):
bytes_recv = b''
n_buffer_size = 1024 * 32
b_loop = True
b_sleep = True
float_waited_time = 0.0
selector = selectors.DefaultSelector()
selector.register(self.sock, selectors.EVENT_READ, self.select_recv_callback)
while b_loop:
se_events = selector.select()
for key, mask in se_events:
callback = key.data
callback_return = callback(key.fileobj, n_buffer_size)
if callback_return:
bytes_recv += callback_return
else:
b_loop = False
break
if len(callback_return) == n_buffer_size:
b_sleep = False
else:
b_sleep = True
pass
if b_loop and b_sleep:
float_waited_time += loop_sleep_second
if float_waited_time > self.timeout:
break
time.sleep(loop_sleep_second)
pass
selector.unregister(self.sock)
selector.close()
return bytes_recv
def select_recv_until(self, bytes_end_flag=b'\n', loop_sleep_second=0.25):
bytes_recv = b''
n_buffer_size = 1024 * 32
b_loop = True
b_sleep = True
float_waited_time = 0.0
selector = selectors.DefaultSelector()
selector.register(self.sock, selectors.EVENT_READ, self.select_recv_callback)
while b_loop:
se_events = selector.select()
for key, mask in se_events:
callback = key.data
callback_return = callback(key.fileobj, n_buffer_size)
if callback_return:
bytes_recv += callback_return
if callback_return.endswith(bytes_end_flag):
b_loop = False
b_sleep = False
break
if len(callback_return) == n_buffer_size:
b_sleep = False
else:
b_sleep = True
pass
if b_loop and b_sleep:
float_waited_time += loop_sleep_second
if float_waited_time > self.timeout:
break
time.sleep(loop_sleep_second)
pass
selector.unregister(self.sock)
selector.close()
return bytes_recv
def recv(self, recv_size=1024 * 32):
return self.sock.recv(recv_size)
def recv_until(self, bytes_end_flag=b'\n'):
bytes_data = b''
while True:
bytes_recv_data = self.recv()
bytes_data += bytes_recv_data
if len(bytes_end_flag) == 0:
if len(bytes_recv_data) == 0:
break
else:
if bytes_recv_data.endswith(bytes_end_flag) == True:
break
return bytes_data
if __name__ == '__main__':
pass
| 30.135593 | 85 | 0.552868 | 614 | 5,334 | 4.534202 | 0.161238 | 0.063218 | 0.032328 | 0.043103 | 0.551006 | 0.538075 | 0.538075 | 0.538075 | 0.518678 | 0.518678 | 0 | 0.013638 | 0.367642 | 5,334 | 176 | 86 | 30.306818 | 0.81174 | 0.01631 | 0 | 0.624161 | 0 | 0 | 0.005913 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.120805 | false | 0.04698 | 0.026846 | 0.006711 | 0.214765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dbc35f1116639374169837f761a162c1af402f8 | 5,886 | py | Python | detect.py | pooya-mohammadi/classification_metric_learning | 75835fc4d8a7cf74f11a14ed8311ba8112e35729 | [
"Apache-2.0"
] | 6 | 2021-12-06T17:39:20.000Z | 2022-03-14T07:10:31.000Z | detect.py | pooya-mohammadi/classification_metric_learning | 75835fc4d8a7cf74f11a14ed8311ba8112e35729 | [
"Apache-2.0"
] | null | null | null | detect.py | pooya-mohammadi/classification_metric_learning | 75835fc4d8a7cf74f11a14ed8311ba8112e35729 | [
"Apache-2.0"
] | null | null | null | import glob
import os
import sys
from deep_utils import dump_pickle, load_pickle
import time
from itertools import chain
from argparse import ArgumentParser
import torch
from pretrainedmodels.utils import ToRange255
from pretrainedmodels.utils import ToSpaceBGR
from scipy.spatial.distance import cdist
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torchvision import transforms
from data.inshop import InShop
from metric_learning.util import SimpleLogger
from metric_learning.sampler import ClassBalancedBatchSampler
from PIL import Image
import metric_learning.modules.featurizer as featurizer
import metric_learning.modules.losses as losses
import numpy as np
from evaluation.retrieval import evaluate_float_binary_embedding_faiss, _retrieve_knn_faiss_gpu_inner_product
dataset = "InShop"
dataset_root = ""
batch_size = 64
model_name = "resnet50"
lr = 0.01
gamma = 0.1
class_balancing = True
images_per_class = 5
lr_mult = 1
dim = 2048
test_every_n_epochs = 2
epochs_per_step = 4
pretrain_epochs = 1
num_steps = 3
output = "data1/output"
def adjust_learning_rate(optimizer, epoch, epochs_per_step, gamma=0.1):
"""Sets the learning rate to the initial LR decayed by 10 every epochs"""
# Skip gamma update on first epoch.
if epoch != 0 and epoch % epochs_per_step == 0:
for param_group in optimizer.param_groups:
param_group['lr'] *= gamma
print("learning rate adjusted: {}".format(param_group['lr']))
def main():
torch.cuda.set_device(0)
gpu_device = torch.device('cuda')
output_directory = os.path.join(output, dataset, str(dim),
'_'.join([model_name, str(batch_size)]))
if not os.path.exists(output_directory):
os.makedirs(output_directory)
out_log = os.path.join(output_directory, "train.log")
sys.stdout = SimpleLogger(out_log, sys.stdout)
# Select model
model_factory = getattr(featurizer, model_name)
model = model_factory(dim)
weights = torch.load(
'/home/ai/projects/symo/classification_metric_learning/data1/output/InShop/2048/resnet50_75/epoch_30.pth')
model.load_state_dict(weights)
eval_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(max(model.input_size)),
transforms.ToTensor(),
ToSpaceBGR(model.input_space == 'BGR'),
ToRange255(max(model.input_range) == 255),
transforms.Normalize(mean=model.mean, std=model.std)
])
# Setup dataset
# train_dataset = InShop('../data1/data/inshop', transform=train_transform)
query_dataset = InShop('data1/data/inshop', train=False, query=True, transform=eval_transform)
index_dataset = InShop('data1/data/inshop', train=False, query=False, transform=eval_transform)
query_loader = DataLoader(query_dataset,
batch_size=batch_size,
drop_last=False,
shuffle=False,
pin_memory=True,
num_workers=0)
model.to(device='cuda')
model.eval()
query_image = Image.open(
"/home/ai/Pictures/im3.png").convert(
'RGB')
with torch.no_grad():
query_image = model(eval_transform(query_image).to('cuda').unsqueeze(0))[0].cpu().numpy()
index_dataset = InShop('data1/data/inshop', train=False, query=False, transform=eval_transform)
index_loader = DataLoader(index_dataset,
batch_size=75,
drop_last=False,
shuffle=False,
pin_memory=True,
num_workers=0)
# db_list = extract_feature(model, index_loader, 'cuda')
db_list = load_pickle('db.pkl')
# db_dirs = [
# "/home/ai/projects/symo/classification_metric_learning/data1/data/inshop/img/WOMEN/Blouses_Shirts/id_00000001",
# "/home/ai/projects/symo/classification_metric_learning/data1/data/inshop/img/WOMEN/Blouses_Shirts/id_00000004",
# "/home/ai/projects/symo/classification_metric_learning/data1/data/inshop/img/WOMEN/Blouses_Shirts/id_00000038",
# "/home/ai/projects/symo/classification_metric_learning/data1/data/inshop/img/WOMEN/Blouses_Shirts/id_00000067",
# ]
# db_list = {}
# with torch.no_grad():
#
# for dir_ in db_dirs:
# for n in os.listdir(dir_):
# img_path = os.path.join(dir_, n)
# img = Image.open(img_path)
# db_list[img_path] = model(eval_transform(img).unsqueeze(0)).cpu().numpy()[0]
v = get_most_similar(query_image, db_list)
print(v)
def get_most_similar(feature, features_dict, n=10, distance='cosine'):
features = list(features_dict.values())
ids = list(features_dict.keys())
p = cdist(np.array(features),
np.expand_dims(feature, axis=0),
metric=distance)[:, 0]
group = zip(p, ids.copy())
res = sorted(group, key=lambda x: x[0])
r = res[:n]
return r
def extract_feature(model, loader, gpu_device):
"""
Extract embeddings from given `model` for given `loader` dataset on `gpu_device`.
"""
model.eval()
model.to(gpu_device)
db_dict = {}
log_every_n_step = 10
with torch.no_grad():
for i, (im, class_label, instance_label, index) in enumerate(loader):
im = im.to(device=gpu_device)
embedding = model(im)
for i, em in zip(index, embedding):
db_dict[loader.dataset.image_paths[int(i)]] = em.detach().cpu().numpy()
if (i + 1) % log_every_n_step == 0:
print('Process Iteration {} / {}:'.format(i, len(loader)))
dump_pickle('db.pkl', db_dict)
return db_dict
if __name__ == '__main__':
main()
| 36.7875 | 121 | 0.658682 | 756 | 5,886 | 4.912698 | 0.314815 | 0.024233 | 0.03231 | 0.024233 | 0.199785 | 0.182553 | 0.182553 | 0.182553 | 0.157243 | 0.157243 | 0 | 0.024493 | 0.230037 | 5,886 | 159 | 122 | 37.018868 | 0.795013 | 0.184506 | 0 | 0.12069 | 0 | 0.008621 | 0.066163 | 0.026885 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.189655 | 0 | 0.241379 | 0.025862 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dbfeefdb4668cb718f320b733ee5f2d211beb95 | 4,168 | py | Python | ext_runs/run_delly/run_delly.py | nesilin/evolution_TALL_adults | f36d6ebaeb43376096c14fc9ca20116bc2febae6 | [
"Apache-2.0"
] | null | null | null | ext_runs/run_delly/run_delly.py | nesilin/evolution_TALL_adults | f36d6ebaeb43376096c14fc9ca20116bc2febae6 | [
"Apache-2.0"
] | null | null | null | ext_runs/run_delly/run_delly.py | nesilin/evolution_TALL_adults | f36d6ebaeb43376096c14fc9ca20116bc2febae6 | [
"Apache-2.0"
] | 1 | 2021-03-26T15:38:53.000Z | 2021-03-26T15:38:53.000Z | import sys, os
os.environ["PATH"] = os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"]
import click
import subprocess
import pandas as pd
@click.command()
@click.option('--output_path',
'-out',
type=click.Path(exists=True),
required = True,
help="Output path to write results")
@click.option('--normal_bam',
'-nor_bam',
type=click.Path(exists=True),
required = True,
help="BAM of the normal samples (or remission)",
)
@click.option('--tumor_bam',
'-tum_bam',
type=click.Path(exists=True),
required = True,
help="BAM of the tumor samples (primary/relapse)",
)
@click.option('--comparison',
'-com',
type=str,
required = True,
help="comparison as tumorid_vs_normalid",
)
@click.option('--genome_reference_fasta',
'-ref',
type=click.Path(exists=True),
required = True,
help="FASTA file of the reference genome used in the aligments",
)
@click.option('--excludable_regions',
'-excl',
type=click.Path(exists=True),
required = True,
help="External file with excludable regions such as telomere and centromere regions (provided by delly)",
)
def cli(output_path, normal_bam,tumor_bam, comparison, genome_reference_fasta, excludable_regions):
"""
It runs Delly and svtools to obtain bedpe files with the structural variant calls
"""
tumor_id = comparison.split("_vs_")[0]
normal_id = comparison.split("_vs_")[1]
sample_tsv = pd.DataFrame.from_dict({tumor_id: 'tumor',
normal_id: 'control'}, orient='index')
sample_tsv.to_csv(os.path.join(output_path, 'sample.tsv'), sep='\t',header=False)
sv_list = ['bnd', 'del', 'dup', 'ins', 'inv']
for sv in sv_list:
call_file = os.path.join(output_path,comparison+"_"+sv+".bcf")
subprocess.run("source activate sv_delly_calling && delly call -t "+sv.upper()+" -q 20 -x " + excludable_regions
+' -o ' + call_file + " -g " + genome_reference_fasta + ' ' + tumor_bam + ' ' + normal_bam,
shell=True, executable='/bin/bash')
filter_file = os.path.join(output_path, comparison + "_" + sv + "_filtered.bcf")
sample_tsv = os.path.join(output_path, 'sample.tsv')
if sv == 'bnd':
subprocess.run("source activate sv_delly_calling && delly filter -p -f somatic -m 0 -r 0.75 -a 0.1 -o "+
filter_file+" -s "+sample_tsv+" "+call_file, shell=True, executable='/bin/bash')
elif (sv == 'del') or (sv == 'ins'):
subprocess.run("source activate sv_delly_calling && delly filter -p -f somatic -m 50 -r 0.75 -a 0.1 -o "+
filter_file+" -s "+sample_tsv+" "+call_file, shell=True, executable='/bin/bash')
elif sv == 'inv':
subprocess.run("source activate sv_delly_calling && delly filter -p -f somatic -m 0 -r 0.75 -a 0.1 -o "+
filter_file+" -s "+sample_tsv+" "+call_file, shell=True, executable='/bin/bash')
elif sv == 'dup':
subprocess.run("source activate sv_delly_calling && delly filter -p -f somatic -m 0 -r 0.75 -a 0.1 -o "+
filter_file+" -s "+sample_tsv+" "+call_file, shell=True, executable='/bin/bash')
else:
"wrong sv, write: bnd, inv, del, ins, dup"
call_vcf = os.path.join(output_path,comparison+"_"+sv+"_delly.vcf")
subprocess.run("source activate sv_delly_calling && bcftools view " +filter_file+" > "+call_vcf,
shell=True, executable='/bin/bash')
bedpe = os.path.join(output_path,comparison+"_"+sv+"_delly.bedpe")
subprocess.run("source activate sv_delly_calling && svtools vcftobedpe -i "+call_vcf+" -o "+bedpe,
shell=True, executable='/bin/bash')
if __name__ == '__main__':
cli() | 47.908046 | 120 | 0.567418 | 513 | 4,168 | 4.434698 | 0.267057 | 0.03956 | 0.058462 | 0.083077 | 0.502857 | 0.468571 | 0.468571 | 0.407033 | 0.271209 | 0.271209 | 0 | 0.009821 | 0.291507 | 4,168 | 87 | 121 | 47.908046 | 0.760582 | 0.019434 | 0 | 0.272727 | 0 | 0.051948 | 0.297224 | 0.005895 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012987 | false | 0 | 0.051948 | 0 | 0.064935 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dc1166f915085f858d0906dc0c46c5854fca41f | 1,351 | py | Python | getspdx.py | swinslow/scaffold | 4cf48b9f1545ad789095cf93a68a78a5df63f8b5 | [
"Apache-2.0"
] | null | null | null | getspdx.py | swinslow/scaffold | 4cf48b9f1545ad789095cf93a68a78a5df63f8b5 | [
"Apache-2.0"
] | 18 | 2020-01-09T21:50:34.000Z | 2021-01-04T19:02:37.000Z | getspdx.py | swinslow/scaffold | 4cf48b9f1545ad789095cf93a68a78a5df63f8b5 | [
"Apache-2.0"
] | null | null | null | # Copyright The Linux Foundation
# SPDX-License-Identifier: Apache-2.0
import os
from pathlib import Path
from fossdriver.tasks import SPDXTV
from datatypes import Status, ProjectRepoType
def doGetSPDXForSubproject(cfg, fdServer, prj, sp):
uploadName = os.path.basename(sp._code_path)
uploadFolder = f"{prj._name}-{cfg._month}"
spdxFolder = os.path.join(cfg._storepath, cfg._month, "spdx", prj._name)
spdxFilename = f"{sp._name}-{sp._code_pulled}.spdx"
if uploadName == "":
print(f"{prj._name}/{sp._name}: no code path in config, so no upload name; not running agents")
return False
# create spdx directory for project if it doesn't already exist
if not os.path.exists(spdxFolder):
os.makedirs(spdxFolder)
# run SPDX tag-value agent
print(f"{prj._name}/{sp._name}: getting SPDX tag-value file")
spdxFilePath = os.path.join(spdxFolder, spdxFilename)
t = SPDXTV(fdServer, uploadName, uploadFolder, spdxFilePath)
retval = t.run()
if not retval:
print(f"{prj._name}/{sp._name}: error getting SPDX tag-value file")
return False
# once we get here, the agents have been run
sp._status = Status.GOTSPDX
# and when we return, the runner framework should update the project's
# status to reflect the min of its subprojects
return True
| 33.775 | 103 | 0.696521 | 189 | 1,351 | 4.888889 | 0.486772 | 0.037879 | 0.034632 | 0.042208 | 0.111472 | 0.061688 | 0 | 0 | 0 | 0 | 0 | 0.001854 | 0.201332 | 1,351 | 39 | 104 | 34.641026 | 0.854495 | 0.22946 | 0 | 0.086957 | 0 | 0.043478 | 0.245886 | 0.121975 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.173913 | 0 | 0.347826 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dc1ce330e040dbf315cfd61027eaf0b7fe369bb | 3,164 | py | Python | second_file.py | pererawhis20/shop_ware.py | a70529447bfce539d90af1f956d4761e852f5e58 | [
"MIT"
] | 1 | 2021-07-15T19:27:45.000Z | 2021-07-15T19:27:45.000Z | second_file.py | pererawhis20/shop_ware.py | a70529447bfce539d90af1f956d4761e852f5e58 | [
"MIT"
] | null | null | null | second_file.py | pererawhis20/shop_ware.py | a70529447bfce539d90af1f956d4761e852f5e58 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 18 01:58:05 2021
@author: ASUS
"""
D = {}
list_1 = []
list_2 = []
def main():
global list_1
global list_2
def number():
Contact_No = input("3) Contact No:")
n = str(Contact_No)
if int(len(n)) == 10:
pass
else:
print(' ')
print(" Phone Number is Incorrect,Please Check Again.")
main()
list_1 = Contact_No
list_2.append(list_1)
number()
def details():
global D
global list_2
try:
G = True
while(G):
Name = input("1) Name:")
Adress = input("2) Adress:")
main()
Contact_No = list_2[0]
import datetime
x = datetime.datetime.now()
Last_Date_of_Arrival = x.year,x.month,x.day
Arrived_Time = x.hour,x.minute,x.second
class Customer:
def __init__(self,Name,Adress,Contact_No,Last_Date_of_Arrival,Arrived_Time):
self.__Name= Name.capitalize() # priwate
self.__Adress = Adress.capitalize() # private
self.__Contact_No = Contact_No # private
self.Last_Date_of_Arrival = Last_Date_of_Arrival
self.Arrived_Time = Arrived_Time
def private(self):
D[C.__Name]= C.__dict__
def hello(self):
print("* Hello",C.__Name,"['_']")
print("* Thank You for your information,Because of Covid19.")
print("* Stay Safe")
C = Customer(Name,Adress,Contact_No,Last_Date_of_Arrival,Arrived_Time)
print(" ")
#print("Hello",C.Name,"['_']")
#print("Thank You for your information,Because of Covid19.")
#D[Name.Name]= Name.__dict__
C.hello()
C.private()
list = [1]
for i in list:
list_2 = [ ]
G = False
except:
pass
def customdetails():
global D
print(" ")
for a,b in D.items():
print("===========================================")
print(" =","Customer","Details =")
print("-------------------------------------------")
for x,y in b.items():
print("*", x,":",y)
print("-------------------------------------------")
print("===========================================")
print(" ")
print(" ")
def raw():
details()
def raw1():
global D
if D == { }:
print("No Attendance"+"\n ")
else:
customdetails()
if __name__ == "__main__":
raw()
raw1()
| 21.82069 | 93 | 0.385588 | 284 | 3,164 | 4.049296 | 0.341549 | 0.070435 | 0.043478 | 0.073913 | 0.189565 | 0.189565 | 0.189565 | 0.189565 | 0.189565 | 0.189565 | 0 | 0.020918 | 0.456068 | 3,164 | 144 | 94 | 21.972222 | 0.647298 | 0.06732 | 0 | 0.265823 | 0 | 0 | 0.139906 | 0.062341 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113924 | false | 0.025316 | 0.012658 | 0 | 0.139241 | 0.202532 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dc3ba5f5b6719caa1c9a162cb2db0970e3d0959 | 23,066 | py | Python | server/apps/report/generator/end_of_trip/generator.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/report/generator/end_of_trip/generator.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/report/generator/end_of_trip/generator.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | import csv
import datetime
import logging
import time
from io import StringIO
import numpy as np
import pandas as pd
from django.db.models import Q
from django.utils import timezone
from iotile_cloud.utils.gid import IOTileBlockSlug, IOTileDeviceSlug, IOTileStreamSlug, IOTileVariableSlug
from apps.physicaldevice.models import Device
from apps.property.models import GenericProperty
from apps.sqsworker.exceptions import WorkerActionHardError
from apps.stream.models import StreamId, StreamVariable
from apps.streamdata.utils import get_stream_output_mdo
from apps.utils.aws.redshift import get_ts_from_redshift
from apps.utils.data_helpers.manager import DataManager
from apps.utils.gid.convert import get_device_and_block_by_did, gid2int
from apps.utils.iotile.variable import SYSTEM_VID
from apps.utils.objects.utils import get_device_or_block
from apps.utils.timezone_utils import convert_to_utc, str_to_dt_utc
from ..base import ReportGenerator
_TRIP_SUMMARY_VID = gid2int(SYSTEM_VID['TRIP_SUMMARY'])
logger = logging.getLogger(__name__)
def dt_format(dt):
return dt.strftime('%Y-%m-%d %H:%M:%S')
class TripSummary(object):
""" Represents a Trip Summary
"""
_lid_map = {
'5020': 's_events',
'5021': 's_pressure',
'5022': 's_humidity',
'5023': 's_temp',
SYSTEM_VID['TRIP_SUMMARY']: 's_summary', # Trip Report Summary
}
s_temp = None
s_humidity = None
s_pressure = None
s_events = None
s_start = None
s_summary = None
device_or_block_slug = ''
# project_slug = ''
ts_start = None
ts_end = None
data = None
no_start_trip = False
data_was_masked = False
device_or_block = None
def __init__(self, device_or_block):
self.device_or_block = device_or_block
try:
self.device_or_block_slug = IOTileDeviceSlug(device_or_block.slug)
except ValueError:
self.device_or_block_slug = IOTileBlockSlug(device_or_block.slug)
# self.project_slug = IOTileProjectSlug(p_slug)
self.s_temp = None
self.s_humidity = None
self.s_pressure = None
self.s_events = None
self.s_start = None
self.s_end = None
self.s_summary = None
self.ts_start = None
self.ts_end = None
self.data = None
self.no_start_trip = False
self.data_was_masked = False
@classmethod
def compute_time_active(cls, df, condition_met_count):
"""
This hard-to-name function is used to take a dataframe, and a condition_met_count
representing the number of rows that meet some condition (e.g. df['value'] < 17).sum())
The function uses the first and last index to determine the time delta between
and with it, the average time delta between values.
It then computes the amount of time where the given condition was met.
This is used to compute, for example, the amount of time that a POD was under 17C
or above 30C
:param df: DataFrame with a 'value' column and timestamp index
:param condition_met_count: Number of rows that meet condition
:return: string representation of the datetime.timedelta
"""
first_value = df.iloc[0].name
last_value = df.iloc[-1].name
delta = last_value - first_value
count = df['value'].count()
if int(condition_met_count):
time_in_condition = delta / int(count - 1) * int(condition_met_count)
result = str(time_in_condition.to_pytimedelta())
else:
result = '0:00:00'
return result
def _get_stream_slug_for(self, variable):
stream_slug = self.device_or_block.get_stream_slug_for(variable)
return str(stream_slug)
def _q_by_stream(self, stream_slug):
"""Create QuerySet filter with datetime ranges if available"""
q = Q(stream_slug=stream_slug)
if self.ts_start:
q = q & Q(timestamp__gte=get_ts_from_redshift(self.ts_start))
if self.ts_end:
q = q & Q(timestamp__lte=get_ts_from_redshift(self.ts_end))
logger.info('--> {}'.format(q))
return q
def add_stream(self, lid, stream):
if lid in self._lid_map:
self.__setattr__(self._lid_map[lid], stream)
def _get_time_dataframe(self, stream):
qs = DataManager.df_filter_qs_using_q('data', self._q_by_stream(stream.slug))
df = qs.to_dataframe(['value', ], index='timestamp')
mdo = get_stream_output_mdo(stream)
if mdo:
try:
df['value'] = df['value'].apply(lambda x: mdo.compute(x))
except Exception as e:
raise WorkerActionHardError(e)
return df
def _compute_basic_env_stats(self, name, df, units):
stats = df.agg(['min', 'median', 'max'])
if not stats.empty:
return {
'Max {} ({})'.format(name, units): stats.loc['max'].values[0],
'Min {} ({})'.format(name, units): stats.loc['min'].values[0],
'Median {} ({})'.format(name, units): stats.loc['median'].values[0],
}
return {}
def _compute_delta_v(self, x):
terms = [x[term] for term in self._delta_v_terms]
max_dv = max(*terms)
min_dv = min(*terms)
if max_dv > abs(min_dv):
return max_dv
return min_dv
def _compute_event_data(self, event_qs, sg_config_consts):
dt_index = pd.to_datetime([x.timestamp for x in event_qs])
extra_data = [x.extra_data for x in event_qs]
assert 'max_g_col' in sg_config_consts
assert 'max_dv_col' in sg_config_consts
max_g_col = sg_config_consts['max_g_col']
max_dv_col = sg_config_consts['max_dv_col']
df = pd.DataFrame(extra_data, index=dt_index)
# For Saver backwards compatibility, look for alternative labels
if max_g_col not in list(df):
max_g_col = 'max_peak'
if 'max_g' not in list(df):
data = {
'Max Peak (G)': 'Error: peak or max_g not found'
}
return data
data = {
'First event at (UTC)': dt_format(df.iloc[0].name),
'Last event at (UTC)': dt_format(df.iloc[-1].name),
'Event Count': int(df[max_g_col].count())
}
if self.no_start_trip:
# For backwards compatibility, if there was no start trip, use first/last event for duration
data['Duration (Days)'] = (df.iloc[-1].name - df.iloc[0].name) / datetime.timedelta(days=1)
if max_g_col in list(df):
max_g_idx = df[max_g_col].idxmax()
if 'delta_v_terms' in sg_config_consts:
for col in sg_config_consts['delta_v_terms']:
if col in list(df):
df[col] = df[col].apply(lambda x: x * sg_config_consts['delta_v_multiplier'])
if max_dv_col not in list(df):
self._delta_v_terms = sg_config_consts['delta_v_terms']
df[max_dv_col] = df.apply(self._compute_delta_v, axis=1)
max_dv_idx = df[max_dv_col].idxmax()
data.update({
'TimeStamp(MaxPeak) (UTC)': dt_format(max_g_idx),
'Max Peak (G)': df[max_g_col].loc[max_g_idx].max(),
'DeltaV at Max Peak (in/s)': df[max_dv_col].loc[max_g_idx].max(),
'TimeStamp(MaxDeltaV) (UTC)': dt_format(max_dv_idx),
'MaxDeltaV (in/s)': df[max_dv_col].loc[max_dv_idx].max(),
'Peak at MaxDeltaV (G)': df[max_g_col].loc[max_dv_idx].max(),
})
return data
def _get_mask_event(self):
"""
:return: Dict object if a mask has been set:
{'start': '<datetime_str>', 'end': '<datetime_str>'}.
None if not set
"""
mask_stream_slug = self._get_stream_slug_for(SYSTEM_VID['DEVICE_DATA_MASK'])
if mask_stream_slug:
event = DataManager.filter_qs('event', stream_slug=mask_stream_slug).last()
if event:
assert ('start' in event.extra_data)
assert ('end' in event.extra_data)
return event.extra_data
return None
def calculate_trip_date_ranges(self):
"""
Figure out the trip Start and End times:
1. Check for TripStart and Trip End
2. Check if TripMask is set. If so, use that (if within trip start/end
:return: Nothing
"""
start_trip_stream_slug = self._get_stream_slug_for(SYSTEM_VID['TRIP_START'])
end_trip_stream_slug = self._get_stream_slug_for(SYSTEM_VID['TRIP_END'])
qs = DataManager.filter_qs(
'data',
stream_slug__in=[start_trip_stream_slug, end_trip_stream_slug]
).order_by('streamer_local_id', 'timestamp')
self.ts_start = self.ts_end = None
for d in qs:
if d.stream_slug == start_trip_stream_slug:
self.ts_start = convert_to_utc(d.timestamp)
if d.stream_slug == end_trip_stream_slug:
self.ts_end = convert_to_utc(d.timestamp)
# Check if the device has a data mask. If so, use instead
mask_data = self._get_mask_event()
if mask_data:
if mask_data['start']:
self.ts_start = str_to_dt_utc(mask_data['start'])
self.data_was_masked = True
if mask_data['end']:
self.ts_end = str_to_dt_utc(mask_data['end'])
self.data_was_masked = True
if not self.ts_start:
logger.info('No TripStart data found. Looking for oldest data')
# For backwards compatibility, if no TRIP_START, look for the oldest Event or Data
first_event = DataManager.filter_qs_using_q(
'event',
self._q_by_stream(self.s_events.slug)
).exclude(extra_data__has_key='error').first()
first_temp = DataManager.filter_qs_using_q(
'data',
self._q_by_stream(self.s_temp.slug)
).first()
if first_event and first_temp:
first = first_temp if convert_to_utc(first_temp.timestamp) < convert_to_utc(first_event.timestamp) else first_event
else:
first = first_temp or first_event
if first:
self.ts_start = convert_to_utc(first.timestamp)
self.no_start_trip = True
else:
logger.warning('No TRIP_START or events found')
return
if self.ts_end and self.ts_end < self.ts_start:
# This is the end of a previous trip. Ignore
self.ts_end = None
logger.info('Trip Date Range: {} to {}'.format(
self.ts_start,
self.ts_end if self.ts_end else 'NOW'
))
def _send_debug_info(self):
# Print debug information
msg = 'No Events Found: {}'.format(self.device_or_block_slug)
q = self._q_by_stream(self.s_events.slug)
msg += '\n --> q= {}'.format(str(q))
event_qs = DataManager.filter_qs_using_q(
'event',
self._q_by_stream(self.s_events.slug)
).exclude(extra_data__has_key='error')
msg += '\n--> events.filter(q): {}'.format(event_qs.count())
q = Q(stream_slug=self.s_events.slug)
event_qs = DataManager.filter_qs_using_q(
'event',
q
)
msg += '\n--> events.filter(slug): {}'.format(event_qs.count())
if self.ts_start:
msg += '\n--> start (UTC): {}'.format(convert_to_utc(self.ts_start))
else:
msg += '\n--> start (UTC): Not Available'
if self.ts_end:
msg += '\n--> end (UTC): {}'.format(convert_to_utc(self.ts_end))
else:
msg += '\n--> end (UTC): Not Available'
logger.info(msg)
# Let customer know
def calculate_trip_summary_data(self, sg_config):
data = {
'Device': str(self.device_or_block_slug),
}
if self.s_events:
q = self._q_by_stream(self.s_events.slug)
logger.info('_q_by_stream({}) = {}'.format(self.s_events.slug, q))
event_qs = DataManager.filter_qs_using_q(
'event',
self._q_by_stream(self.s_events.slug)
).exclude(extra_data__has_key='error')
logger.info('--> Trip {} events: {}'.format(self.device_or_block_slug, event_qs.count()))
else:
event_qs = DataManager.none_qs('event')
data['error'] = 'Error: s_events is None'
logger.warning(data['error'])
if self.ts_start and not self.no_start_trip:
data['START (UTC)'] = dt_format(self.ts_start)
if self.ts_end:
data['END (UTC)'] = dt_format(self.ts_end)
data['Duration (Days)'] = (self.ts_end - self.ts_start) / datetime.timedelta(days=1)
else:
data['START (UTC)'] = 'Not Available'
data['END (UTC)'] = 'Not Available'
assert 'START (UTC)' in data
if self.data_was_masked:
data['Notes'] = 'Trip Start and/or End was overwritten by a set device data mask'
if event_qs.count():
if 'consts' in sg_config:
sg_config_consts = sg_config['consts']
data.update(self._compute_event_data(event_qs, sg_config_consts))
else:
logger.warning('No events found')
data['Max Peak (G)'] = 'Error: No events found'
data['Event Count'] = 0
self._send_debug_info()
if self.s_temp:
df = self._get_time_dataframe(self.s_temp)
if not df.empty:
# Compute time delta so we can show how much time the device was
# above or below the required range
data.update(self._compute_basic_env_stats('Temp', df, 'C'))
data['Below 17C'] = TripSummary.compute_time_active(df=df, condition_met_count=(df['value'] < 17).sum())
data['Above 30C'] = TripSummary.compute_time_active(df=df, condition_met_count=(df['value'] > 30).sum())
else:
logger.warning('No Temp stream found')
if self.s_humidity:
df = self._get_time_dataframe(self.s_humidity)
if not df.empty:
data.update(self._compute_basic_env_stats('Humidity', df, '% RH'))
else:
logger.warning('No Humidity stream found')
if self.s_pressure:
df = self._get_time_dataframe(self.s_pressure)
if not df.empty:
data.update(self._compute_basic_env_stats('Pressure', df, 'Mbar'))
else:
logger.warning('No Pressure stream found')
self.data = data
class EndOfTripReportGenerator(ReportGenerator):
_trips = {}
def __init__(self, msgs, rpt, start, end, sources=None):
super(EndOfTripReportGenerator, self).__init__(msgs, rpt, start, end, sources)
self._trips = {}
if sources:
for source in sources:
obj = get_device_or_block(source)
if not obj:
continue
logger.info('creating new TripSummary for device {}'.format(obj.slug))
self._trips[obj.slug] = TripSummary(obj)
def _email_template(self):
return 'report/end_of_trip'
def _create_summary_event(self, trip):
if not trip.s_summary:
# Need to create Summary Stream
if isinstance(trip.device_or_block, Device):
device = trip.device_or_block
project = device.project
block = None
else:
block = trip.device_or_block
device = trip.device_or_block.device
project=None
trip.s_summary = StreamId.objects.create_stream(
project=project,
device=device,
block=block,
variable=None,
data_type='E0',
var_lid=_TRIP_SUMMARY_VID,
var_name='Trip Summary',
created_by=device.claimed_by,
data_label='Trip Summary: {}'.format(trip.device_or_block_slug)
)
summary = DataManager.build(
'event',
stream_slug=trip.s_summary.slug,
timestamp=timezone.now(),
device_timestamp=int(time.time()),
streamer_local_id=1,
extras={
'extra_data': trip.data,
},
)
summary.deduce_slugs_from_stream_id()
try:
logger.info('Uploading Trip Summary: {}'.format(trip.s_summary.slug))
DataManager.save('event', summary)
except Exception as e:
WorkerActionHardError(e)
def _get_obj_properties(self, device_or_block):
if device_or_block:
return GenericProperty.objects.object_properties_qs(obj=device_or_block, is_system=False)
return None
def _send_summary_email(self, trip, device_or_block, sg_config):
template = self._email_template()
def_properties = []
property_keys = []
data_table = []
attachment = None
if 'summary_keys' in sg_config:
def_properties = sg_config['summary_keys']
if 'property_keys' in sg_config:
property_keys = sg_config['property_keys']
if len(def_properties) == 0:
data_table.append({
'name': 'Error',
'value': 'Bad SG configuration. Missing summary_keys'
})
try:
device_model = str(device_or_block.template)
except Exception:
device_model = 'Unk'
property_table = [
{
'name': 'Device',
'value': str(trip.device_or_block_slug)
},
{
'name': 'Model',
'value': device_model
}
]
property_map = {}
property_qs = self._get_obj_properties(device_or_block)
if property_qs:
for property in property_qs:
property_map[property.name] = property.value
for key in property_keys:
if key in property_map:
row = {
'name': key,
'value': property_map[key]
}
property_table.append(row)
for key in def_properties:
if key in trip.data:
row = {
'name': key,
'value': trip.data[key]
}
if isinstance(trip.data[key], np.float64) or isinstance(trip.data[key], float):
row['value'] = '{0:.2f}'.format(trip.data[key])
data_table.append(row)
if 'Notes' in trip.data:
row = {
'name': 'Notes',
'value': trip.data['Notes']
}
data_table.append(row)
# Create CSV to attach
rows = property_table + [{'name': '', 'value': ''}] + data_table
if len(rows):
csvfile = StringIO()
fieldnames = list(rows[0].keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in rows:
writer.writerow(row)
attachment = {
'filename': '{}.summary.csv'.format(str(trip.device_or_block_slug)),
'content': csvfile.getvalue(),
'mimetype': 'text/csv'
}
ctx = {
'device_url': device_or_block.get_webapp_url(),
'property_table': property_table,
'data_table': data_table,
'user': str(self._rpt.created_by) if self._rpt else None,
'org': str(self._rpt.org) if self._rpt else None
}
self._send_email(template, ctx, attachment)
def add_streams_for_qs(self, qs):
for stream in qs:
stream_slug = IOTileStreamSlug(stream.slug)
parts = stream_slug.get_parts()
assert 'variable' in parts
assert 'device' in parts
assert 'project' in parts
variable = IOTileVariableSlug(parts['variable'])
lid = variable.formatted_local_id()
device_or_block_slug = parts['device']
# TODO: Fix correctly
# HACK: Try to recover data block
block_id, device_id = get_device_and_block_by_did(device_or_block_slug)
if block_id > 0:
# This is a block
device_gid = device_or_block_slug.split('--')[1]
device_or_block_slug = '--'.join(['b', device_gid])
if device_or_block_slug not in self._trips:
logger.info('creating new TripSummary for device {}'.format(device_or_block_slug))
device_or_block = get_device_or_block(device_or_block_slug)
if not device_or_block:
continue
self._trips[device_or_block_slug] = TripSummary(device_or_block)
logger.info('Adding stream {} ({}) for trip summary for {}'.format(stream, lid, device_or_block_slug))
self._trips[device_or_block_slug].add_stream(lid, stream)
def process_config(self):
# No configuration available yet
pass
def generate_user_report(self):
"""
Generate an End of Trip Summary Report
:return: Nothing
"""
for slug in self._trips.keys():
trip = self._trips[slug]
device_or_block = trip.device_or_block
sg = device_or_block.sg
if 'analysis' in sg.ui_extra and 'trip_summary' in sg.ui_extra['analysis']:
sg_config = sg.ui_extra['analysis']['trip_summary']
else:
sg_config = {}
if not trip.s_events:
# We have no Events. It is possible the database is not up to date
# Reschedule to try again later
if self.reschedule_callback:
logger.warning('No events. Rescheduling')
self.reschedule_callback(900)
trip.calculate_trip_date_ranges()
if trip.ts_start:
# If no start date, there is no data to compute
trip.calculate_trip_summary_data(sg_config)
if trip.data:
# Create a TRIP_SUMMARY StreamEventData record
self._create_summary_event(trip)
# Send email with summary
self._send_summary_email(trip, device_or_block, sg_config)
else:
logger.info('Cannot create Trip Summary Report: No START signal found')
| 36.496835 | 131 | 0.579598 | 2,897 | 23,066 | 4.337245 | 0.133586 | 0.029924 | 0.048627 | 0.028412 | 0.221966 | 0.152726 | 0.094708 | 0.071389 | 0.057939 | 0.045285 | 0 | 0.004213 | 0.320775 | 23,066 | 631 | 132 | 36.554675 | 0.797792 | 0.087315 | 0 | 0.141949 | 0 | 0 | 0.100274 | 0 | 0 | 0 | 0 | 0.001585 | 0.016949 | 1 | 0.04661 | false | 0.002119 | 0.04661 | 0.004237 | 0.165254 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dc47497d650017a733594adb4646f7c7ed01118 | 835 | py | Python | websocketSessions/asgi.py | Andrew-Chen-Wang/django-async-sessions | f3208958d2cf3887bef407d914d460be414a2f4b | [
"Apache-2.0"
] | null | null | null | websocketSessions/asgi.py | Andrew-Chen-Wang/django-async-sessions | f3208958d2cf3887bef407d914d460be414a2f4b | [
"Apache-2.0"
] | 3 | 2020-10-09T23:23:30.000Z | 2021-01-02T04:49:44.000Z | websocketSessions/asgi.py | Andrew-Chen-Wang/django-async-sessions | f3208958d2cf3887bef407d914d460be414a2f4b | [
"Apache-2.0"
] | null | null | null | """
ASGI config for websocketSessions project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'websocketSessions.settings')
django_application = get_asgi_application()
from public.websockets.index import websocket_application
async def application(scope, receive, send):
if scope['type'] == 'http':
# Let Django handle HTTP requests
await django_application(scope, receive, send)
elif scope['type'] == 'websocket':
await websocket_application(scope, receive, send)
else:
raise NotImplementedError(f"Unknown scope type {scope['type']}")
| 26.935484 | 78 | 0.742515 | 104 | 835 | 5.865385 | 0.596154 | 0.059016 | 0.113115 | 0.132787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002833 | 0.154491 | 835 | 30 | 79 | 27.833333 | 0.86119 | 0.306587 | 0 | 0 | 0 | 0 | 0.180385 | 0.084063 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dcbe72118a2baf6109f8507aeadc4682aa24b10 | 5,151 | py | Python | baseline_fasttext.py | hannahyao/DeepBlocker | bca0966979c3d9f9dec95d66b30c7e5d27c61f3c | [
"BSD-3-Clause"
] | null | null | null | baseline_fasttext.py | hannahyao/DeepBlocker | bca0966979c3d9f9dec95d66b30c7e5d27c61f3c | [
"BSD-3-Clause"
] | null | null | null | baseline_fasttext.py | hannahyao/DeepBlocker | bca0966979c3d9f9dec95d66b30c7e5d27c61f3c | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import fasttext
from scipy.spatial import distance
from itertools import product
import sys
import numpy as np
model = fasttext.load_model("embedding/wiki.en/wiki.en.bin")
#Return sentence embeddings for a list of words
def get_word_embedding(list_of_words):
return [model.get_sentence_vector(word.replace("\n"," ")) for word in list_of_words]
def cos_sim(column1, column2):
column1 = column1.dropna().sort_values()
column2 = column2.dropna().sort_values()
column_embeddings1 = get_word_embedding(list(column1))
col_avg1 = np.average(np.array(column_embeddings1), axis=0)
column_embeddings2 = get_word_embedding(list(column2))
col_avg2 = np.average(np.array(column_embeddings2), axis=0)
cosine_similarity = 1 - distance.cdist(col_avg1.reshape(1,-1), col_avg2.reshape(1,-1), metric="cosine")
# print(cosine_similarity)
return cosine_similarity
def generate_column_similarity(table1,table2):
column_compare_combos = list(product(table1.columns, table2.columns))
cs_list = []
for item in column_compare_combos:
cs = cos_sim(table1[item[0]], table2[item[1]])
# print(cs)
cs_list.append(cs[0][0])
return cs_list,column_compare_combos
def make_prediction_df(table_names, table_files):
table1_name = table_names[0]
table2_name = table_names[1]
dfa = pd.read_csv(table_files[0])
dfb = pd.read_csv(table_files[1])
dfa = dfa.astype(str)
dfb = dfb.astype(str)
if len(dfb) > 5000:
return None
print(len(dfa))
print(len(dfb))
cs_list,column_compare_combos = generate_column_similarity(dfa,dfb)
test = pd.DataFrame(column_compare_combos,cs_list,columns=['ltable_id','rtable_id']).reset_index()
test = test.rename(columns={'index':'score'})
# test['join'] = test.apply(lambda x: 1 if x.score > 0.7 else 0,axis=1)
test['ltable_id'] = table1_name + '.' + test['ltable_id']
test['rtable_id'] = table2_name + '.' + test['rtable_id']
return test
def compute_blocking_statistics(table_names,candidate_set_df, golden_df,left_df, right_df):
#Now we have two data frames with two columns ltable_id and rtable_id
# If we do an equi-join of these two data frames, we will get the matches that were in the top-K
merged_df = pd.merge(candidate_set_df, golden_df, on=['ltable_id', 'rtable_id'])
# Added to calculate total false positives
false_pos = candidate_set_df[~candidate_set_df['ltable_id'].isin(merged_df['ltable_id'])&(~candidate_set_df['rtable_id'].isin(merged_df['rtable_id']))]
if len(golden_df) > 0 and (len(merged_df) + len(false_pos)) > 0:
fp = float(len(merged_df)) / (len(merged_df) + len(false_pos))
else:
fp = "N/A"
left_num_tuples = len(left_df)
right_num_tuples = len(right_df)
statistics_dict = {
"left_table": table_names[0],
"right_table": table_names[1],
"left_num_tuples": left_num_tuples,
"right_num_tuples": right_num_tuples,
"candidate_set_length": len(candidate_set_df),
"golden_set_length": len(golden_df),
"merged_set_length": len(merged_df),
"false_positives_length": len(false_pos),
"precision": fp,
"recall": float(len(merged_df)) / len(golden_df) if len(golden_df) > 0 else "N/A",
"cssr": len(candidate_set_df) / (left_num_tuples * right_num_tuples)
}
return statistics_dict
def main():
# usage: python baseline_fasttext.py kvhd-5fmu 2j8u-wtju
args = sys.argv[1:]
# table_names = ('kvhd-5fmu','2j8u-wtju')
# table_files = ('nyc_cleaned/kvhd-5fmu.csv','nyc_cleaned/2j8u-wtju.csv')
output_file = 'nyc_output/'+ args[0] + '-output.txt'
with open(output_file) as f:
lines = f.readlines()
line_df = pd.DataFrame(lines,columns=['full'])
line_df = line_df['full'].str.split("JOIN", n = 1, expand = True)
line_df = line_df.replace('\n',' ', regex=True)
line_df.columns = ['ltable_id','rtable_id']
if len(args )== 2:
joining_tables = [args[1]]
else:
joining_tables = line_df['rtable_id'].str.split('.').apply(lambda x: x[0].strip()).unique()
table_file1 = 'nyc_cleaned/' + args[0] + '.csv'
stats_list = []
for table in joining_tables:
print(table)
table_file2 = 'nyc_cleaned/' + table + '.csv'
table_names = (args[0],table)
table_files = (table_file1,table_file2)
print("Getting cosine_similarity")
test = make_prediction_df(table_names, table_files)
if test is None:
continue
print("Compute stats")
candidate_set_df = test[test['score']> 0.95] #change to top 10?
golden_df = line_df[line_df['ltable_id'].str.contains(table_names[0])]
golden_df = line_df[line_df['rtable_id'].str.contains(table_names[1])]
golden_df.ltable_id = golden_df.ltable_id.str.strip()
golden_df.rtable_id = golden_df.rtable_id.str.strip()
candidate_set_df['ltable_id'] = candidate_set_df['ltable_id'].astype('str')
golden_df['ltable_id'] = golden_df['ltable_id'].astype('str')
stats = compute_blocking_statistics(table_names,candidate_set_df, golden_df,test['ltable_id'].unique(), test['rtable_id'].unique())
print(stats)
stats_list.append(stats)
print(stats_list)
main()
| 38.729323 | 155 | 0.702388 | 782 | 5,151 | 4.345269 | 0.240409 | 0.037669 | 0.045321 | 0.023543 | 0.247793 | 0.128311 | 0.073573 | 0.052384 | 0.033549 | 0.033549 | 0 | 0.019116 | 0.157057 | 5,151 | 132 | 156 | 39.022727 | 0.763473 | 0.107746 | 0 | 0.019608 | 0 | 0 | 0.115401 | 0.011126 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0.009804 | 0.176471 | 0.068627 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dcc0eca85cc75b36357f76f58ed53d69e385166 | 2,515 | py | Python | generated-libraries/python/netapp/storage_adapter/adapter_sff_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/storage_adapter/adapter_sff_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/storage_adapter/adapter_sff_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.netapp_object import NetAppObject
class AdapterSffInfo(NetAppObject):
"""
Information on small form factor
transceiver/connector (also known as sff).
"""
_part_number = None
@property
def part_number(self):
"""
Vendor's part number for the sff.
If data not available, value will be "not_available".
"""
return self._part_number
@part_number.setter
def part_number(self, val):
if val != None:
self.validate('part_number', val)
self._part_number = val
_serial_number = None
@property
def serial_number(self):
"""
Serial number for sff.
If data not available, value will be "not_available".
"""
return self._serial_number
@serial_number.setter
def serial_number(self, val):
if val != None:
self.validate('serial_number', val)
self._serial_number = val
_vendor = None
@property
def vendor(self):
"""
sff vendor name.
If data not available, value will be "not_available".
"""
return self._vendor
@vendor.setter
def vendor(self, val):
if val != None:
self.validate('vendor', val)
self._vendor = val
_speed_capabilities = None
@property
def speed_capabilities(self):
"""
Comma separated list of speed capabilities of
the sff. Example: "1, 2 Gbit/Sec".
If data not available, value will be "not_available".
"""
return self._speed_capabilities
@speed_capabilities.setter
def speed_capabilities(self, val):
if val != None:
self.validate('speed_capabilities', val)
self._speed_capabilities = val
@staticmethod
def get_api_name():
return "adapter-sff-info"
@staticmethod
def get_desired_attrs():
return [
'part-number',
'serial-number',
'vendor',
'speed-capabilities',
]
def describe_properties(self):
return {
'part_number': { 'class': basestring, 'is_list': False, 'required': 'required' },
'serial_number': { 'class': basestring, 'is_list': False, 'required': 'required' },
'vendor': { 'class': basestring, 'is_list': False, 'required': 'required' },
'speed_capabilities': { 'class': basestring, 'is_list': False, 'required': 'required' },
}
| 29.244186 | 100 | 0.578131 | 267 | 2,515 | 5.258427 | 0.243446 | 0.071225 | 0.042735 | 0.051282 | 0.366097 | 0.366097 | 0.366097 | 0.266382 | 0.149573 | 0.149573 | 0 | 0.001162 | 0.315706 | 2,515 | 85 | 101 | 29.588235 | 0.814643 | 0.177336 | 0 | 0.178571 | 0 | 0 | 0.141888 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.196429 | false | 0 | 0.017857 | 0.053571 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dcc0f638b591e201fa5ab22b4c0cc05111e6bd7 | 552 | py | Python | daily_update.py | prathits012/songData | 8f42cacc55f5a62d22e968619d134f5771781f64 | [
"MIT"
] | null | null | null | daily_update.py | prathits012/songData | 8f42cacc55f5a62d22e968619d134f5771781f64 | [
"MIT"
] | 1 | 2021-01-05T04:11:14.000Z | 2021-01-05T04:11:14.000Z | daily_update.py | prathits012/songData | 8f42cacc55f5a62d22e968619d134f5771781f64 | [
"MIT"
] | 1 | 2021-01-05T04:01:38.000Z | 2021-01-05T04:01:38.000Z | #!/usr/bin/python3
from datetime import datetime, timedelta
import os
yesterday = datetime.now() - timedelta(1)
yesterday = datetime.strftime(yesterday, '%Y-%m-%d')
a_year_ago = datetime.now() - timedelta(365)
a_year_ago = datetime.strftime(a_year_ago, '%Y-%m-%d')
os.system(f"python3 processing/charts.py --START-DATE {a_year_ago} --END-DATE {yesterday}")
os.system("python3 processing/spotifyCsvToDict.py")
os.system("python3 processing/genius.py")
os.system("python3 analysis/features.py")
os.system("python3 analysis/Keyword_popularity.py") | 27.6 | 91 | 0.753623 | 80 | 552 | 5.0875 | 0.4125 | 0.09828 | 0.078624 | 0.125307 | 0.12285 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019802 | 0.085145 | 552 | 20 | 92 | 27.6 | 0.786139 | 0.030797 | 0 | 0 | 0 | 0 | 0.420561 | 0.11215 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dcc92c2e91c3468f162155377996e63c46c0c4b | 5,251 | py | Python | breed.py | albina77/bio | 18d46bc12ad5f40ed44a995c87010e2504653313 | [
"Apache-2.0"
] | null | null | null | breed.py | albina77/bio | 18d46bc12ad5f40ed44a995c87010e2504653313 | [
"Apache-2.0"
] | null | null | null | breed.py | albina77/bio | 18d46bc12ad5f40ed44a995c87010e2504653313 | [
"Apache-2.0"
] | null | null | null | from typing import List
import numpy as np
from random import choices
from models import Genotype, Generation
from selection import Selection
class Breed:
def __init__(self, parent_genotypes: np.ndarray, possibilites: List[float], population_of_progeny: int,
maximum_feature=None, selection: str = "gebv", max_age: int = 1, max_number: int = sys.maxsize):
if len(parent_genotypes.shape) != 3 or parent_genotypes.shape[:2] != (2, 2) or any(_ <= 0 for _ in parent_genotypes.shape):
raise AttributeError("Массив генотипов особей задан неверно! Размерность должна быть (2 x 2 x N)")
if max_age <= 0:
raise AttributeError("Максимальный возраст сущнсоти должен быть >= 1")
if not Selection.selection_implemented(selection):
raise NotImplementedError(f"Селекция {selection} не реализована!")
self.generations: List[Generation] = [Generation(
index=0,
genotypes=list(Genotype(genotype) for genotype in parent_genotypes),
population=parent_genotypes.shape[0]
)]
self.possibilities = possibilites
self.population_of_progeny = population_of_progeny
self._current_generation = 0
self.maximum_feature = (self.generations[0].genotypes[0].matrix.shape[1] * 2
if maximum_feature is None
else maximum_feature)
self.selection = selection
self.max_age = max_age
self.max_number = max_number
def evaluate(self, max_generations: int = None):
current_generation_number = 0
while True:
if self._is_max_generation(current_generation_number):
return current_generation_number
parents_generation = self.get_generation(current_generation_number)
child_generation = self.get_child_generation(parents_generation)
young_parents_genotypes = self.filter_generation_for_max_age(parents_generation)
for parent in young_parents_genotypes:
parent.age += 1
child_generation.genotypes.extend(young_parents_genotypes)
child_generation.population = len(child_generation.genotypes)
self.generations.append(child_generation)
if max_generations and current_generation_number == max_generations:
return current_generation_number
current_generation_number += 1
def get_generation(self, generation_index: int) -> Generation:
return list(filter(lambda generation: generation.index == generation_index, self.generations))[0]
def get_child_generation(self, parent_generation: Generation):
selection = Selection(parent_generation, possibilites=self.possibilities)
parent1, parent2 = getattr(selection, f"_{self.selection}_selection")()
children = self.get_reproduce(parent1, parent2)
return Generation(index=parent_generation.index + 1, genotypes=children, population=len(children))
def filter_generation_for_max_age(self, generation: Generation) -> List[Genotype]:
return list(filter(lambda genotype: genotype.age < self.max_age, generation.genotypes))
def get_reproduce(self, parent1: Genotype, parent2: Genotype) -> List[Genotype]:
"""
Return children(which is amount=population_of_progeny)' genotype from parents' genotypes
"""
children = []
for _ in range(self.population_of_progeny):
new_genotype = None
for genotype in (parent1, parent2):
if new_genotype is not None:
new_genotype = np.append(
new_genotype, [self.get_gamete(genotype)],
axis=0
)
else:
new_genotype = np.array([self.get_gamete(genotype), ])
children.append(Genotype(matrix=new_genotype))
return children
def get_genotype_indexies(self) -> List[int]:
"""
Get indexies for choising haplotype in genotype
"""
return_list = []
for i in range(len(self.possibilities)):
if i == 0:
value = choices((0, 1), weights=(0.5, 0.5))[0]
else:
value = choices(
(return_list[i - 1], 1 - return_list[i - 1]),
weights=(1 - self.possibilities[i - 1], self.possibilities[i - 1])
)[0]
return_list.append(value)
return return_list
def get_gamete(self, genotype: Genotype) -> np.ndarray:
"""
Return gamete from genotype input
"""
indexies = self.get_genotype_indexies()
return np.array([
genotype[indexies[i], i] for i in range(genotype.matrix.shape[1])
])
def _is_max_generation(self, generation_index: int):
generation = self.get_generation(generation_index)
for genotype in generation.genotypes:
if self._is_max_feature_genotype(genotype):
return True
return False
def _is_max_feature_genotype(self, genotype: Genotype):
return True if np.sum(genotype.matrix) >= self.maximum_feature else False
| 44.12605 | 131 | 0.639878 | 584 | 5,251 | 5.534247 | 0.193493 | 0.037129 | 0.049814 | 0.014233 | 0.053527 | 0.02599 | 0 | 0 | 0 | 0 | 0 | 0.012355 | 0.275567 | 5,251 | 118 | 132 | 44.5 | 0.837277 | 0.032375 | 0 | 0.043478 | 0 | 0 | 0.037318 | 0.005388 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.054348 | 0.032609 | 0.293478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dd2e2cab35ebc0537aa529a01f20f1f63c810b9 | 8,311 | py | Python | Python/Rasterize/CLUS_GDAL_Rasterize_clusdb.py | bcgov/clus | e0d4e49f031126ee40f36b338651b9fddc180f8a | [
"Apache-2.0"
] | 27 | 2018-07-26T23:05:54.000Z | 2022-03-15T22:55:46.000Z | Python/Rasterize/CLUS_GDAL_Rasterize_clusdb.py | ElizabethKleynhans/clus | a02aef861712ab62bb5b5877208a138e0074e365 | [
"Apache-2.0"
] | 41 | 2018-04-25T19:31:29.000Z | 2022-03-28T17:08:36.000Z | Python/Rasterize/CLUS_GDAL_Rasterize_clusdb.py | ElizabethKleynhans/clus | a02aef861712ab62bb5b5877208a138e0074e365 | [
"Apache-2.0"
] | 10 | 2018-04-25T17:25:10.000Z | 2022-02-16T21:53:23.000Z | #-------------------------------------------------------------------------------
# Name: CLUS_GDAL_Rasterize_VRI
# Purpose: This script is designed to read a list of input PostGIS source
# Vectors and Rasterize them to GeoTiff using GDAL Rasterize and
# then load them into PostGIS as rasters using raster2pgsql
#
# Author: Mike Fowler
# Spatial Data Analyst
# Forest Analysis and Inventory Branch - BC Government
# Workflow developed by Kyle Lochhead, converted into Python Script
#
# Created: 30-01-2019
#
#-------------------------------------------------------------------------------
import os, sys, subprocess
import shutil, getpass, datetime
#--Globals
global kennyloggins
pfx = '{0}_'.format(os.path.basename(os.path.splitext(sys.argv[0])[0]))
logTime = ''
def WriteOutErrors(lstErrors):
errLog = os.path.join(os.path.dirname(sys.argv[0]), pfx + logTime + ".errors.log")
fLog = open(errLog, 'w')
lstLog = []
lstLog.append("------------------------------------------------------------------\n")
lstLog.append("Error Log file for {0}\n".format(sys.argv[0]))
lstLog.append("Date:{0} \n".format(datetime.datetime.now().strftime("%B %d, %Y - %H%M")))
lstLog.append("User:{}\n".format(getpass.getuser()))
lstLog.append("\n")
lstLog.append("------------------------------------------------------------------\n")
sLog = ''.join(lstLog)
fLog.write(sLog)
fLog.write("List of Errors from Script------------------------------------------------------------\n")
for err in lstErrors:
fLog.write('{0}\n'.format(str(err)))
fLog.write("------------------------------------------------------------------\n")
fLog.close()
def CreateLogFile(srcDB, outDB, tiffDir, bMsg=False):
global logTime
logTime = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
currLog = os.path.join(os.path.dirname(sys.argv[0]), pfx + datetime.datetime.now().strftime("%Y%m%d_%H%M%S.log"))
fLog = open(currLog, 'w')
lstLog = []
lstLog.append("------------------------------------------------------------------\n")
lstLog.append("Log file for {0}\n".format(sys.argv[0]))
lstLog.append("Date:{0} \n".format(datetime.datetime.now().strftime("%B %d, %Y - %H%M")))
lstLog.append("User:{}\n".format(getpass.getuser()))
lstLog.append("Source DB:{}\n".format(srcDB))
lstLog.append("Output DB:{}\n".format(outDB))
lstLog.append("TIFF Directory:{}\n".format(tiffDir))
lstLog.append("\n")
lstLog.append("------------------------------------------------------------------\n")
sLog = ''.join(lstLog)
fLog.write(sLog)
if bMsg:
print(sLog)
return fLog
def WriteLog(fLog, sMessage, bMsg=False):
ts = datetime.datetime.now().strftime("%B %d, %Y - %H%M")
sMsg = '{0} - {1}'.format(ts, sMessage)
fLog.write(sMsg)
if bMsg:
print(sMsg)
def LoadListFromCSV(inCSV):
import csv
processLst = []
with open(inCSV) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
pass
else:
#processLst.append([row[0], row[1], row[2], row[3]])
processLst.append(row)
line_count += 1
return processLst
def Rasterize(db, sql, fld, outWrk, outName):
WriteLog(kennyloggins, 'Rasterize..........................\n', True)
db = 'PG:"{0}"'.format(db)
fld = fld.lower()
outTIFF = os.path.join(outWrk, '{0}.tif'.format(outName))
sql = '"{0}"'.format(sql)
WriteLog(kennyloggins, '-----{0}\n'.format(db), True)
WriteLog(kennyloggins, '-----{0}\n'.format(fld), True)
WriteLog(kennyloggins, '-----{0}\n'.format(outTIFF), True)
WriteLog(kennyloggins, '-----{0}\n'.format(sql), True)
#--Build the command to run the GDAL Rasterize
cmd = 'gdal_rasterize -tr 100 100 -te 273287.5 359687.5 1870587.5 1735787.5 -a {0} {1} -sql {2} {3}'.format(fld, db, sql, outTIFF)
WriteLog(kennyloggins, '-----Running CMD:\n', True)
WriteLog(kennyloggins, '{0}\n'.format(cmd), True)
try:
subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError as e:
WriteLog(kennyloggins, '{0}\n'.format(str(e.output)), True)
raise Exception(str(e.output))
return outTIFF
def TIFF2PostGIS(tiff, db, outSchema, outName):
WriteLog(kennyloggins, 'TIFF2PostGIS..........................\n', True)
WriteLog(kennyloggins, '-----{0}\n'.format(tiff), True)
WriteLog(kennyloggins, '-----{0}\n'.format(db), True)
WriteLog(kennyloggins, '-----{0}\n'.format(outName), True)
cmd = 'raster2pgsql -s 3005 -d -I -C -M {0} -t 100x100 {1}.{2} | psql {3}'.format(tiff, outSchema, outName, db)
WriteLog(kennyloggins, '-----Running CMD:\n', True)
WriteLog(kennyloggins, '{0}\n'.format(cmd), True)
try:
#subprocess.call(cmd, shell=True)
subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError as e:
WriteLog(kennyloggins, '{0}\n'.format(str(e.output)), True)
raise Exception(str(e.output))
if __name__ == '__main__':
#--Read inputs into a Processing List
inputCSV = os.path.join(os.path.dirname(sys.argv[0]), '{0}Input.csv'.format(pfx))
#--Read the input CSV to get the list of queries,layers to rasterize
processList =LoadListFromCSV(inputCSV)
errList = []
#--Setting the source DB and Output DB arguments. If not supplied we will defalut to localhost, postgres
if len(sys.argv) > 1:
srcDB = sys.argv[1]
else:
srcDB = "host='localhost' dbname = 'clus' port='5432' user='postgres' password='postgres'"
if len(sys.argv) > 2:
outDB = sys.argv[2]
else:
outDB = "-d clus"
#outDB = "-d clus"
#outDB = "-d clus -h DC052586.idir.bcgov -U postgres"
tiffWork = os.path.join(os.environ['TEMP'], '{0}TIFF'.format(pfx))
#--Create a Log File
kennyloggins = CreateLogFile(srcDB, outDB, tiffWork, True)
if not os.path.exists(tiffWork):
os.makedirs(tiffWork)
WriteLog(kennyloggins, '--------------------------------------------------------------------------------------\n', True)
for itm in processList:
bRemoveTIFF = False
#--Only process the input records with a PROCESS = 'Y'
if itm[4].upper() == 'Y':
WriteLog(kennyloggins, '--------------------------------------------------------------------------------------\n', True)
outSchema = itm[0]
outName = itm[1]
fld = itm[2]
sql = itm[3]
retainTIFF = itm[5].upper()
if retainTIFF =='N': bRemoveTIFF = True
WriteLog(kennyloggins, 'Processing:{0}\n'.format(str(itm)), True)
try:
WriteLog(kennyloggins, 'Running Rasterize....\n', True)
#--Rasterize the source to Tiff
outTIFF = Rasterize(srcDB, sql, fld, tiffWork, outName)
WriteLog(kennyloggins, 'Running TIFF2PostGIS....\n', True)
#--Load the TIFF to Postgres
TIFF2PostGIS(outTIFF, outDB, outSchema, outName)
#--Delete the TIFF if flagged to do so
if bRemoveTIFF:
os.remove(outTIFF)
WriteLog(kennyloggins, '--------------------------------------------------------------------------------------\n', True)
except:
WriteLog(kennyloggins, '--------------------------------------------------------------------------------------\n', True)
#WriteLog(kennyloggins, 'Error: {0}\n'.format(str(e)), True)
errList.append(itm)
if len(errList) > 0:
WriteLog(kennyloggins, 'Writing out Errors......\n', True)
WriteOutErrors(errList)
WriteLog(kennyloggins, '--------------------------------------------------------------------------------------\n', True)
WriteLog(kennyloggins, 'Script Complete-----------------------------------------------------------------------\n', True)
WriteLog(kennyloggins, '--------------------------------------------------------------------------------------\n', True)
kennyloggins.close()
| 47.491429 | 136 | 0.516183 | 907 | 8,311 | 4.703418 | 0.251378 | 0.126582 | 0.033755 | 0.056728 | 0.338022 | 0.325832 | 0.281763 | 0.259025 | 0.259025 | 0.244257 | 0 | 0.019466 | 0.202623 | 8,311 | 174 | 137 | 47.764368 | 0.624264 | 0.155216 | 0 | 0.316547 | 0 | 0.021583 | 0.269099 | 0.156509 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043165 | false | 0.035971 | 0.021583 | 0 | 0.086331 | 0.014388 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dd72ad533d396124980e7cf07ceef2413be3a83 | 6,344 | py | Python | social_rl/gym_multigrid/envs/stag_hunt.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | social_rl/gym_multigrid/envs/stag_hunt.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | social_rl/gym_multigrid/envs/stag_hunt.py | pedersor/google-research | 6fa751dd261b3f6d918fd2cd35efef5d8bf3eea6 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the multi-agent stag hunt environments.
One agent must toggle the stag while another agent is adjacent.
"""
import gym_minigrid.minigrid as minigrid
import numpy as np
from social_rl.gym_multigrid import multigrid
from social_rl.gym_multigrid.register import register
class Stag(minigrid.Box):
"""Stag."""
def __init__(self, **kwargs):
super().__init__('green')
self.toggles = 0
def can_pickup(self):
return False
def can_overlap(self):
return True
class Plant(minigrid.Ball):
"""Plant."""
def __init__(self, **kwargs):
super().__init__('yellow')
self.toggles = 0
def can_pickup(self):
return False
def can_overlap(self):
return True
class StagHuntEnv(multigrid.MultiGridEnv):
"""Grid world environment with two competing goals."""
def __init__(self,
size=15,
n_agents=2,
n_stags=2,
n_plants=2,
n_clutter=0,
penalty=1.0,
max_steps=250,
**kwargs):
"""Constructor for multi-agent gridworld environment generator.
Args:
size: Number of tiles for the width and height of the square grid.
n_agents: The number of agents playing in the world.
n_stags: The number of stags in the environment.
n_plants: The number of plants in the environment.
n_clutter: The number of blocking objects in the environment.
penalty: Penalty for collecting a stag alone.
max_steps: Number of environment steps before the episode end (max episode
length).
**kwargs: See superclass.
"""
self.n_clutter = n_clutter
self.n_stags = n_stags
self.stags = []
for _ in range(n_stags):
self.stags.append(Stag())
self.plants = []
for _ in range(n_plants):
self.plants.append(Plant())
self.penalty = penalty
super().__init__(
grid_size=size,
max_steps=max_steps,
n_agents=n_agents,
fully_observed=True,
**kwargs)
self.metrics = {'good_stag': 0, 'bad_stag': 0, 'plant': 0}
def _gen_grid(self, width, height):
self.grid = multigrid.Grid(width, height)
self.grid.wall_rect(0, 0, width, height)
for stag in self.stags:
self.place_obj(stag, max_tries=100)
for plant in self.plants:
self.place_obj(plant, max_tries=100)
for _ in range(self.n_clutter):
self.place_obj(minigrid.Wall(), max_tries=100)
self.place_agent()
self.mission = 'Toggle the stag at the same time'
def move_agent(self, agent_id, new_pos):
stepped_on = self.grid.get(*new_pos)
if stepped_on:
if isinstance(stepped_on, Plant):
self.metrics['plant'] += 1
self.rewards[agent_id] += 1
elif isinstance(stepped_on, Stag):
good_stag = False
for i, pos in enumerate(self.agent_pos):
if i == agent_id:
continue
if np.sum(np.abs(pos - new_pos)) == 1:
good_stag = True
break
if good_stag:
self.metrics['good_stag'] += 1
self.rewards += 5
else:
self.metrics['bad_stag'] += 1
self.rewards[agent_id] -= self.penalty
stepped_on.cur_pos = None
super().move_agent(agent_id, new_pos)
def step(self, action):
self.rewards = np.zeros(self.n_agents)
obs, _, done, info = multigrid.MultiGridEnv.step(self, action)
for stag in self.stags:
if stag.cur_pos is None: # Object has been picked up
self.place_obj(stag, max_tries=100)
for plant in self.plants:
if plant.cur_pos is None: # Object has been picked up
self.place_obj(plant, max_tries=100)
reward = self.rewards.tolist()
return obs, reward, done, info
class EmptyStagHuntEnv6x6(StagHuntEnv):
def __init__(self, **kwargs):
super().__init__(size=6, n_clutter=0, **kwargs)
class EmptyStagHuntEnv7x7(StagHuntEnv):
def __init__(self, **kwargs):
super().__init__(
size=7, n_agents=2, n_stags=1, n_plants=2, penalty=0.5, **kwargs)
class EmptyStagHuntEnv8x8(StagHuntEnv):
def __init__(self, **kwargs):
super().__init__(size=8, n_agents=2, n_stags=2, n_plants=3, **kwargs)
class RandomStagHuntEnv8x8(StagHuntEnv):
def __init__(self, **kwargs):
super().__init__(
size=8, n_agents=2, n_stags=2, n_plants=3, n_clutter=5, **kwargs)
class NoStagHuntEnv8x8(StagHuntEnv):
def __init__(self, **kwargs):
super().__init__(size=8, n_agents=2, n_stags=0, n_plants=4, **kwargs)
class AllStagHuntEnv8x8(StagHuntEnv):
def __init__(self, **kwargs):
super().__init__(size=8, n_agents=2, n_stags=3, n_plants=0, **kwargs)
class EmptyStagHuntEnv10x10(StagHuntEnv):
def __init__(self, **kwargs):
super().__init__(
size=10, n_agents=2, n_stags=2, n_plants=3, n_clutter=0, **kwargs)
if hasattr(__loader__, 'name'):
module_path = __loader__.name
elif hasattr(__loader__, 'fullname'):
module_path = __loader__.fullname
register(
env_id='MultiGrid-StagHunt-v0', entry_point=module_path + ':StagHuntEnv')
register(
env_id='MultiGrid-StagHunt-Empty-6x6-v0',
entry_point=module_path + ':EmptyStagHuntEnv6x6')
register(
env_id='MultiGrid-StagHunt-Empty-8x8-v0',
entry_point=module_path + ':EmptyStagHuntEnv8x8')
register(
env_id='MultiGrid-StagHunt-NoStag-8x8-v0',
entry_point=module_path + ':NoStagHuntEnv8x8')
register(
env_id='MultiGrid-StagHunt-AllStag-8x8-v0',
entry_point=module_path + ':AllStagHuntEnv8x8')
register(
env_id='MultiGrid-StagHunt-Random-8x8-v0',
entry_point=module_path + ':RandomStagHuntEnv8x8')
register(
env_id='MultiGrid-StagHunt-Empty-10x10-v0',
entry_point=module_path + ':EmptyStagHuntEnv10x10')
| 28.576577 | 80 | 0.671974 | 871 | 6,344 | 4.644087 | 0.259472 | 0.005933 | 0.027194 | 0.037824 | 0.336959 | 0.257355 | 0.19382 | 0.182942 | 0.147095 | 0.147095 | 0 | 0.02612 | 0.215479 | 6,344 | 221 | 81 | 28.705882 | 0.786618 | 0.210435 | 0 | 0.275362 | 0 | 0 | 0.089801 | 0.052011 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123188 | false | 0 | 0.028986 | 0.028986 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dda3adf4af093d787e122f187a5930ac7a1e3be | 757 | py | Python | webapp/_lib-update.py | jrmsdev/jcms | e1611423a527e6cf925b2897dfa49e819fd83672 | [
"BSD-3-Clause"
] | null | null | null | webapp/_lib-update.py | jrmsdev/jcms | e1611423a527e6cf925b2897dfa49e819fd83672 | [
"BSD-3-Clause"
] | null | null | null | webapp/_lib-update.py | jrmsdev/jcms | e1611423a527e6cf925b2897dfa49e819fd83672 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import os.path
import sys
W3JS = "https://www.w3schools.com/lib/w3.js"
W3CSS = "https://www.w3schools.com/w3css/4/w3.css"
def _print(s):
print(s)
sys.stdout.flush()
def _exit(rc):
if rc != 0:
_print("lib generate failed!")
sys.exit(rc)
def _call(cmd):
_print(cmd)
rc = os.system(cmd)
if rc != 0:
_exit(rc)
def _write(n):
orig = os.path.join("_lib", ".orig.%s" % n)
dst = os.path.join("_lib", n)
with open(orig, "r") as src:
with open(dst, "w") as fh:
for l in src.readlines():
# unify line endinds (LF)
l = l.rstrip()
fh.write("%s\n" % l)
_call("wget -nv -c -O _lib/.orig.w3.js %s" % W3JS)
_write("w3.js")
_call("wget -nv -c -O _lib/.orig.w3.css %s" % W3CSS)
_write("w3.css")
_exit(0)
| 18.02381 | 52 | 0.610304 | 139 | 757 | 3.208633 | 0.417266 | 0.040359 | 0.076233 | 0.089686 | 0.09417 | 0.09417 | 0.09417 | 0.09417 | 0 | 0 | 0 | 0.027331 | 0.178336 | 757 | 41 | 53 | 18.463415 | 0.689711 | 0.058124 | 0 | 0.066667 | 0 | 0 | 0.277075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.1 | 0 | 0.233333 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dda9193ed31dd470f1a9148dc6cd1bccafa6e74 | 1,705 | py | Python | source/importer.py | ilDug/cryptodag | b37c68ad841ea5025c7e7baf931222e056d99fd2 | [
"MIT"
] | null | null | null | source/importer.py | ilDug/cryptodag | b37c68ad841ea5025c7e7baf931222e056d99fd2 | [
"MIT"
] | null | null | null | source/importer.py | ilDug/cryptodag | b37c68ad841ea5025c7e7baf931222e056d99fd2 | [
"MIT"
] | null | null | null | from pathlib import Path
from source.install import Installer
class Importer():
help_msg = """
Comandi disponibili:
import-ca <ca_cert_path> <ca_key_path> importa un certificato, la sua chiave privata
"""
def __init__(self, args):
# nessun comando
if len(args) == 0:
print(self.help_msg)
# _______________________________________________________________________________________________
elif args[0] == "import-ca":
if len(args) < 2:
print("inserire il percorso al certificato della CA")
print(self.help_msg)
return
if len(args) < 3:
print("inserire il percorso della chiave privata della CA")
print(self.help_msg)
return
cert_path = args[1]
key_path = args[2]
self.cert_file = Path(cert_path)
self.key_file = Path(key_path)
if not self.cert_file.exists():
print("non è stato trovato il file del certificato")
return
if not self.key_file.exists():
print("non è stato trovato il file della chiave privata")
return
i = Installer()
i.clean_structure()
i.create_pki()
i.save_passphrase()
cakey = Path(i.pki/'private/ca.key')
cacrt = Path(i.pki/'certs/ca.crt')
cakey.write_text(self.key_file.read_text())
cacrt.write_text(self.cert_file.read_text())
# _______________________________________________________________________________________________
else:
print(self.help_msg)
| 32.169811 | 97 | 0.587097 | 183 | 1,705 | 4.26776 | 0.36612 | 0.044814 | 0.066581 | 0.081946 | 0.169014 | 0.169014 | 0.169014 | 0.09475 | 0.09475 | 0 | 0 | 0.005245 | 0.329032 | 1,705 | 52 | 98 | 32.788462 | 0.677448 | 0.120821 | 0 | 0.205128 | 0 | 0 | 0.225418 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0.025641 | 0.128205 | 0 | 0.307692 | 0.205128 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dda9aaed67ba74969c04014a91fabb0ab8e4047 | 2,740 | py | Python | tests/unit/test_keyer.py | jonmaddock/enigma | e9e3ca95cc397bbdfb7b5f43c043dd52997f0d65 | [
"MIT"
] | null | null | null | tests/unit/test_keyer.py | jonmaddock/enigma | e9e3ca95cc397bbdfb7b5f43c043dd52997f0d65 | [
"MIT"
] | 9 | 2021-10-11T19:50:21.000Z | 2022-02-03T21:13:06.000Z | tests/unit/test_keyer.py | jonmaddock/enigma | e9e3ca95cc397bbdfb7b5f43c043dd52997f0d65 | [
"MIT"
] | null | null | null | """Unit tests for keyer module."""
import pytest
import numpy as np
from enigma.keyer import Keyer
def mock_signal(*args):
"""Mock creation of a binary signal array.
:return: binary array
:rtype: np.ndarray
"""
signal = np.array([1, 0, 1])
return signal
def mock_audio(*args):
"""Return random 16-bit audio array.
:return: 16-bit audio array
:rtype: np.ndarray
"""
audio = np.random.rand(3)
return audio.astype(np.int16)
@pytest.fixture
def keyer(monkeypatch):
"""Create an instance of Keyer.
:param monkeypatch: fixture for mocking
:type monkeypatch: _pytest.monkeypatch.MonkeyPatch
:return: Keyer object
:rtype: enigma.keyer.Keyer
"""
monkeypatch.setattr(Keyer, "create_binary_signal", mock_signal)
monkeypatch.setattr(Keyer, "convert_audio", mock_audio)
morse_code = ".- ."
keyer = Keyer(morse_code)
return keyer
def test_init(keyer):
"""Test instantiation of Keyer.
:param keyer: Keyer object
:type keyer: enigma.keyer.Keyer
"""
# Test attributes set by mocked methods
# Test morse converted to binary
signal_exp = np.array([1, 0, 1])
np.testing.assert_array_equal(keyer.signal, signal_exp)
# Test morse converted to 16-bit audio array
assert keyer.audio.dtype == np.dtype("int16")
def test_create_binary_signal(monkeypatch):
"""Test morse to binary conversion.
:param monkeypatch: fixture for mocking
:type monkeypatch: _pytest.monkeypatch.MonkeyPatch
"""
# Keyer.create_binary_signal() is run in init(); don't mock so it can be
# tested. Init with empty morse string
monkeypatch.setattr(Keyer, "convert_audio", mock_audio)
keyer = Keyer("")
morse = ".- ."
signal_exp = np.array([1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0])
signal = keyer.create_binary_signal(morse)
np.testing.assert_array_equal(signal, signal_exp)
def test_convert_audio(monkeypatch):
"""Test conversion of binary to audio.
:param monkeypatch: fixture for mocking
:type monkeypatch: _pytest.monkeypatch.MonkeyPatch
"""
# Keyer.convert_audio() is run in init(); don't mock so it can be
# tested.
monkeypatch.setattr(Keyer, "create_binary_signal", mock_signal)
morse = ".- ."
keyer = Keyer(morse)
# Test morse converted to 16-bit audio array
# TODO convert_audio() is actually run twice; once in init() and again
# explicitly. Could this be improved?
audio = keyer.convert_audio()
assert audio.dtype == np.dtype("int16")
def test_play(keyer):
"""Check audio can be played.
:param keyer: Keyer object
:type keyer: enigma.keyer.Keyer
"""
# Just check no exceptions are thrown
keyer.play()
| 26.601942 | 76 | 0.674088 | 368 | 2,740 | 4.913043 | 0.255435 | 0.044248 | 0.049779 | 0.033186 | 0.450221 | 0.417035 | 0.417035 | 0.315265 | 0.220133 | 0.169248 | 0 | 0.014884 | 0.215328 | 2,740 | 102 | 77 | 26.862745 | 0.826047 | 0.462774 | 0 | 0.171429 | 0 | 0 | 0.071104 | 0 | 0 | 0 | 0 | 0.009804 | 0.114286 | 1 | 0.2 | false | 0 | 0.085714 | 0 | 0.371429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dddd71cd4f306c853811313444c1ae2d9679290 | 3,001 | py | Python | lib/torch_util.py | taesiri/ANCNet | 28b1c887c2016b06c9639f93e79752dcb6ec3a23 | [
"BSD-2-Clause"
] | 23 | 2020-06-29T13:31:03.000Z | 2022-02-24T21:14:35.000Z | lib/torch_util.py | taesiri/ANCNet | 28b1c887c2016b06c9639f93e79752dcb6ec3a23 | [
"BSD-2-Clause"
] | 2 | 2020-08-18T12:50:08.000Z | 2021-07-15T16:51:25.000Z | lib/torch_util.py | taesiri/ANCNet | 28b1c887c2016b06c9639f93e79752dcb6ec3a23 | [
"BSD-2-Clause"
] | 5 | 2020-06-27T06:45:57.000Z | 2022-02-24T21:14:56.000Z | # from NCNet
import shutil
import torch
from torch.autograd import Variable
from os import makedirs, remove
from os.path import exists, join, basename, dirname
import collections
from lib.dataloader import default_collate
def collate_custom(batch):
""" Custom collate function for the Dataset class
* It doesn't convert numpy arrays to stacked-tensors, but rather combines them in a list
* This is useful for processing annotations of different sizes
"""
# this case will occur in first pass, and will convert a
# list of dictionaries (returned by the threads by sampling dataset[idx])
# to a unified dictionary of collated values
if isinstance(batch[0], collections.Mapping):
return {key: collate_custom([d[key] for d in batch]) for key in batch[0]}
# these cases will occur in recursion
elif torch.is_tensor(batch[0]): # for tensors, use standrard collating function
return default_collate(batch)
else: # for other types (i.e. lists), return as is
return batch
class BatchTensorToVars(object):
"""Convert tensors in dict batch to vars
"""
def __init__(self, use_cuda=True):
self.use_cuda = use_cuda
def __call__(self, batch):
batch_var = {}
for key, value in batch.items():
if isinstance(value, torch.Tensor) and not self.use_cuda:
batch_var[key] = Variable(value, requires_grad=False)
elif isinstance(value, torch.Tensor) and self.use_cuda:
batch_var[key] = Variable(value, requires_grad=False).cuda()
else:
batch_var[key] = value
return batch_var
def Softmax1D(x, dim):
x_k = torch.max(x, dim)[0].unsqueeze(dim)
x -= x_k.expand_as(x)
exp_x = torch.exp(x)
return torch.div(exp_x, torch.sum(exp_x, dim).unsqueeze(dim).expand_as(x))
def save_checkpoint(state, is_best, file, save_all_epochs=False):
model_dir = dirname(file)
model_fn = basename(file)
# make dir if needed (should be non-empty)
if model_dir != "" and not exists(model_dir):
makedirs(model_dir)
if save_all_epochs:
torch.save(state, join(model_dir, str(state["epoch"]) + "_" + model_fn))
if is_best:
shutil.copyfile(
join(model_dir, str(state["epoch"]) + "_" + model_fn),
join(model_dir, "best_" + model_fn),
)
return join(model_dir, str(state["epoch"]) + "_" + model_fn)
else:
torch.save(state, file)
if is_best:
shutil.copyfile(file, join(model_dir, "best_" + model_fn))
def str_to_bool(v):
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def expand_dim(tensor, dim, desired_dim_len):
sz = list(tensor.size())
sz[dim] = desired_dim_len
return tensor.expand(tuple(sz))
| 33.719101 | 93 | 0.643452 | 422 | 3,001 | 4.417062 | 0.364929 | 0.038627 | 0.032189 | 0.024142 | 0.186695 | 0.131974 | 0.107296 | 0.107296 | 0.055794 | 0.055794 | 0 | 0.003095 | 0.246251 | 3,001 | 88 | 94 | 34.102273 | 0.820955 | 0.195935 | 0 | 0.1 | 0 | 0 | 0.029857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.116667 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2ddf67f0fabc81b7c876cd6463d49123bcd33ac0 | 584 | py | Python | src/util/metrics/ssim.py | dreaming-coder/DeepLab | 3020544e2f9e139dde7bd04f6ff59e6f44d49c6e | [
"Apache-2.0"
] | 3 | 2021-05-31T09:25:59.000Z | 2022-03-10T08:09:51.000Z | src/util/metrics/ssim.py | dreaming-coder/DeepLab | 3020544e2f9e139dde7bd04f6ff59e6f44d49c6e | [
"Apache-2.0"
] | 1 | 2021-09-26T16:37:39.000Z | 2021-09-28T00:43:05.000Z | src/util/metrics/ssim.py | dreaming-coder/DeepLab | 3020544e2f9e139dde7bd04f6ff59e6f44d49c6e | [
"Apache-2.0"
] | null | null | null | import torch
from src.util.metrics._ssim import ssim
__all__ = ["ssim_per_frame"]
def ssim_per_frame(pred, gt):
assert pred.shape == gt.shape
_, sequence, _, _, _ = pred.shape
SSIM = []
for s in range(sequence):
SSIM.append(ssim(pred[:, s], gt[:, s]).item())
return SSIM
if __name__ == '__main__':
outputs = torch.load(r"/home/timwell/ice/DeepLab/src/model/ConvLSTM/results/prediction.pth")
target = torch.load(r"/home/timwell/ice/DeepLab/src/model/ConvLSTM/results/ground_truth.pth")
ss = ssim_per_frame(outputs, target)
print(ss)
| 24.333333 | 97 | 0.672945 | 83 | 584 | 4.445783 | 0.518072 | 0.056911 | 0.097561 | 0.075881 | 0.292683 | 0.292683 | 0.292683 | 0.292683 | 0.292683 | 0.292683 | 0 | 0 | 0.17637 | 584 | 23 | 98 | 25.391304 | 0.767152 | 0 | 0 | 0 | 0 | 0 | 0.270548 | 0.232877 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de0607bf8148d933958226701ab2d7898844c41 | 7,297 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/logging_global.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/logging_global.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/cisco/nxos/plugins/module_utils/network/nxos/config/logging_global/logging_global.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The nxos_logging_global config file.
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to its desired end-state is
created.
"""
from copy import deepcopy
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
dict_merge,
get_from_dict,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
get_logging_sevmap,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module import (
ResourceModule,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import (
Facts,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.rm_templates.logging_global import (
Logging_globalTemplate,
)
class Logging_global(ResourceModule):
"""
The nxos_logging_global config class
"""
def __init__(self, module):
super(Logging_global, self).__init__(
empty_fact_val={},
facts_module=Facts(module),
module=module,
resource="logging_global",
tmplt=Logging_globalTemplate(),
)
self._sev_map = get_logging_sevmap(invert=True)
self._state_set = ("replaced", "deleted", "overridden")
self.parsers = [
"console",
"module",
"monitor",
"logfile",
"event.link_status.enable",
"event.link_status.default",
"event.trunk_status.enable",
"event.trunk_status.default",
"history.severity",
"history.size",
"ip.access_list.cache.entries",
"ip.access_list.cache.interval",
"ip.access_list.cache.threshold",
"ip.access_list.detailed",
"ip.access_list.include.sgt",
"origin_id.hostname",
"origin_id.ip",
"origin_id.string",
"rate_limit",
"rfc_strict",
"source_interface",
"timestamp",
]
def execute_module(self):
"""Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
if self.state not in ["parsed", "gathered"]:
self.generate_commands()
self.run_commands()
return self.result
def generate_commands(self):
"""Generate configuration commands to send based on
want, have and desired state.
"""
wantd = self._logging_list_to_dict(self.want)
haved = self._logging_list_to_dict(self.have)
if self.state == "deleted":
# empty out want (in case something was specified)
# some items are populated later on for correct removal
wantd = {}
# pre-process `event.x.y` keys
for x in self.parsers[4:7]:
have_k = get_from_dict(haved, x)
want_k = get_from_dict(wantd, x)
if have_k is None and want_k is not None:
# set have to True to mimic default state
# this allows negate commands to be issued
self.__update_dict(haved, x)
if all(
(
self.state in self._state_set,
have_k is False,
want_k is None,
)
):
# if want is missing and have is negated
# set want to True in order to revert to default state
self.__update_dict(wantd, x)
# if state is merged, merge want onto have and then compare
if self.state == "merged":
for x in self.parsers[0:4]:
hstate = haved.get(x, {}).get("state", "")
wstate = wantd.get(x, {}).get("state", "")
if hstate == "disabled" and not wstate:
# this ensures that updates are done
# with correct `state`
if wantd.get(x, {}):
wantd[x].update({"state": "enabled"})
wantd = dict_merge(haved, wantd)
if self.state in self._state_set:
# set default states for keys that appear in negated form
for x in self.parsers[0:3]:
if x in haved and x not in wantd:
wantd[x] = {"state": "enabled"}
if "rate_limit" in haved and "rate_limit" not in wantd:
wantd["rate_limit"] = "enabled"
if "logfile" in haved and "logfile" not in wantd:
wantd["logfile"] = {"name": "messages", "severity": 5}
self._compare(want=wantd, have=haved)
def _compare(self, want, have):
"""Leverages the base class `compare()` method and
populates the list of commands to be run by comparing
the `want` and `have` data with the `parsers` defined
for the Logging_global network resource.
"""
self.compare(parsers=self.parsers, want=want, have=have)
self._compare_lists(want, have)
def _compare_lists(self, want, have):
"""Compare list of dictionaries"""
for x in ["facilities", "hosts"]:
wantx = want.get(x, {})
havex = have.get(x, {})
for key, wentry in iteritems(wantx):
hentry = havex.pop(key, {})
if wentry != hentry:
if x == "hosts" and self.state in self._state_set:
# remove have config for hosts
# else want gets appended
self.addcmd(hentry, x, negate=True)
self.addcmd(wentry, x)
for key, hentry in iteritems(havex):
self.addcmd(hentry, x, negate=True)
def _logging_list_to_dict(self, data):
"""Convert all list to dicts to dicts
of dicts and substitute severity values
"""
tmp = deepcopy(data)
pkey = {"hosts": "host", "facilities": "facility"}
for k in ("hosts", "facilities"):
if k in tmp:
for x in tmp[k]:
if "severity" in x:
x["severity"] = self._sev_map[x["severity"]]
tmp[k] = {i[pkey[k]]: i for i in tmp[k]}
for k in ("console", "history", "logfile", "module", "monitor"):
if "severity" in tmp.get(k, {}):
tmp[k]["severity"] = self._sev_map[tmp[k]["severity"]]
return tmp
def __update_dict(self, datadict, key, nval=True):
"""Utility method that updates last subkey of
`datadict` as identified by `key` to `nval`.
"""
keys = key.split(".")
if keys[0] not in datadict:
datadict[keys[0]] = {}
if keys[1] not in datadict[keys[0]]:
datadict[keys[0]][keys[1]] = {}
datadict[keys[0]][keys[1]].update({keys[2]: nval})
| 36.485 | 111 | 0.567219 | 875 | 7,297 | 4.585143 | 0.284571 | 0.022433 | 0.027418 | 0.031157 | 0.162762 | 0.131356 | 0.079262 | 0.079262 | 0.079262 | 0.079262 | 0 | 0.005081 | 0.32575 | 7,297 | 199 | 112 | 36.668342 | 0.810366 | 0.171714 | 0 | 0.015385 | 0 | 0 | 0.127049 | 0.042053 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053846 | false | 0 | 0.061538 | 0 | 0.138462 | 0.007692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de06d5ce0f339b5e7ea9f767794390f08df2a6a | 860 | py | Python | exercicio80ListaOrdenadaSemRepeticao.py | adrianomdantas/Exercicios-Python | ef5025a186615258aec0cf35ed839fe49577d983 | [
"MIT"
] | null | null | null | exercicio80ListaOrdenadaSemRepeticao.py | adrianomdantas/Exercicios-Python | ef5025a186615258aec0cf35ed839fe49577d983 | [
"MIT"
] | null | null | null | exercicio80ListaOrdenadaSemRepeticao.py | adrianomdantas/Exercicios-Python | ef5025a186615258aec0cf35ed839fe49577d983 | [
"MIT"
] | null | null | null | ordem = list()
for c in range(0, 5):
n = int(input('Digite um numro: '))
if c == 0 or n > ordem[-1]:
ordem.append(n)
print('Numero adicionado na ultima posição')
else:
pos = 0
while pos < len(ordem):
if n <= ordem[pos]:
ordem.insert(pos, n)
print(f'Numero asdicionado na posiçao {pos}')
break
pos +=1
print('=-' * 30)
print(ordem)
'''ordem = []
for c in range(0, 5):
num = int(input('Digite um numero: '))
if c == 0 or num > ordem[-1]:
ordem.append(num)
else:
pos = 0
while pos < len(ordem):
if num <= ordem[pos]:
ordem.insert(pos, num)
break
pos += 1
print(ordem)'''
| 22.051282 | 61 | 0.419767 | 102 | 860 | 3.539216 | 0.343137 | 0.022161 | 0.033241 | 0.060942 | 0.33795 | 0.216066 | 0.144044 | 0.144044 | 0 | 0 | 0 | 0.029661 | 0.451163 | 860 | 38 | 62 | 22.631579 | 0.735169 | 0 | 0 | 0 | 0 | 0 | 0.180162 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de3cf0a1d2c22f132befba359b2fbde87cb74d2 | 1,234 | py | Python | ReadmeGenerator/scraper.py | luisquid/luisquid | ffe121da8c3994a9d351da404e84ed2e1da28b5a | [
"MIT"
] | null | null | null | ReadmeGenerator/scraper.py | luisquid/luisquid | ffe121da8c3994a9d351da404e84ed2e1da28b5a | [
"MIT"
] | null | null | null | ReadmeGenerator/scraper.py | luisquid/luisquid | ffe121da8c3994a9d351da404e84ed2e1da28b5a | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import json
def get_projects(github_user, query):
URL = f"https://github.com/{github_user}?tab=repositories&q={query}&type=source"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
projects = soup.body.find("ul", {"data-filterable-for": "your-repos-filter"})
if not projects:
return []
projects = projects.find_all("li")
projects_parsed = []
for project in projects:
project_data = {}
title = project.find("h3").a
project_data["name"] = title.text.strip().replace("-", " ").capitalize()
project_data["link"] = title["href"]
project_data["tags"] = [query]
impact = project.find("div", class_="f6 text-gray mt-2").find_all("a")
for data in impact:
project_data[data["href"].split("/")[-1]] = int(data.text.strip())
if "stargazers" not in project_data:
project_data["stargazers"] = 0
if "members" not in project_data:
project_data["members"] = 0
project_data["score"] = project_data["stargazers"] + project_data["members"] * 5
projects_parsed.append(project_data)
return projects_parsed
| 30.85 | 88 | 0.624797 | 152 | 1,234 | 4.927632 | 0.460526 | 0.190921 | 0.032043 | 0.042724 | 0.072096 | 0.072096 | 0 | 0 | 0 | 0 | 0 | 0.008351 | 0.223663 | 1,234 | 39 | 89 | 31.641026 | 0.773486 | 0 | 0 | 0 | 0 | 0 | 0.181524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de436be3ff3ae5b5a37d28f19e22b1b51a1203d | 971 | py | Python | Lista_43.py | Arestideschale/python | 5aef01b5fa66b71fa91d0062b703d52731c75fe9 | [
"Apache-2.0"
] | 1 | 2022-01-16T17:24:57.000Z | 2022-01-16T17:24:57.000Z | Lista_43.py | Arestideschale/python | 5aef01b5fa66b71fa91d0062b703d52731c75fe9 | [
"Apache-2.0"
] | null | null | null | Lista_43.py | Arestideschale/python | 5aef01b5fa66b71fa91d0062b703d52731c75fe9 | [
"Apache-2.0"
] | null | null | null | codigos = [100, 101, 102, 103, 104, 105]
comidas = ['Cachorro Quente', 'Bauru Simples', 'Bauru com ovo', 'Hamburguer', 'ChesseBurguer', 'Refrigerante']
precos = [1.20, 1.30, 1.50, 1.20, 1.30, 1.0]
codigo = True
n_pedido = 1
pedido = []
while codigo != 0:
print("\nPedido n°", n_pedido)
codigo = int(input("Digite o código do alimento: "))
if codigo == 0:
break
else:
while codigo not in codigos:
print("[Este código não corresponde a nenhum alimento.]")
codigo = int(input("Digite o código do alimento: "))
indice = codigos.index(codigo)
quantidade = int(input("Digite a quantidade: "))
valor_pedido = precos[indice] * quantidade
pedido.append(valor_pedido)
n_pedido += 1
pedido_nota = 0
print("\n" * 2)
for i in range(n_pedido - 1):
print("Pedido n°", pedido_nota + 1, "= R$", round(pedido[pedido_nota], 2))
pedido_nota += 1
print("Total: R$", round(sum(pedido), 2))
| 32.366667 | 110 | 0.61277 | 138 | 971 | 4.253623 | 0.449275 | 0.0477 | 0.040886 | 0.020443 | 0.149915 | 0.126065 | 0.126065 | 0.126065 | 0 | 0 | 0 | 0.061828 | 0.23378 | 971 | 29 | 111 | 33.482759 | 0.724462 | 0 | 0 | 0.076923 | 0 | 0 | 0.245108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.192308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de50eec931ddb69c1ee9d24b12437f52caf36bb | 1,587 | py | Python | src/factories/dataloader_factory.py | gmum/LocoGAN | 0200c80cff614ede39cdba6114ab17ef0acc2744 | [
"MIT"
] | 12 | 2020-02-21T08:34:53.000Z | 2021-06-10T13:55:05.000Z | src/factories/dataloader_factory.py | gmum/LocoGAN | 0200c80cff614ede39cdba6114ab17ef0acc2744 | [
"MIT"
] | null | null | null | src/factories/dataloader_factory.py | gmum/LocoGAN | 0200c80cff614ede39cdba6114ab17ef0acc2744 | [
"MIT"
] | 3 | 2021-02-05T07:18:20.000Z | 2021-08-17T03:06:31.000Z | from torch.utils.data import DataLoader
from torchvision import transforms
import torchvision.datasets as datasets
from torchvision.transforms import ToTensor
from transformations.image_random_crop import ImageRandomCropFactory
from transformations.image_resize import ImageResize
from transformations.image_to_tensor import ImageToTensor
def get_dataloader(args) -> DataLoader:
image_size = args.image_size
chunk_size = args.chunk_size
dataset_transforms = []
if args.resize_dataset:
print('Adding resize initial image step')
dataset_transforms += [
transforms.Resize(args.image_size),
]
dataset_transforms += [
ImageRandomCropFactory().create(image_size, chunk_size)
]
dataset_transforms += [
ImageToTensor()
]
dataset_transforms = transforms.Compose(dataset_transforms)
dataset = datasets.ImageFolder(root=args.dataroot,
transform=dataset_transforms)
assert dataset
dataloader = DataLoader(dataset, batch_size=args.batch_size, pin_memory=True,
shuffle=True, num_workers=args.workers)
return dataloader
def get_plain_dataloader(args):
dataset = datasets.ImageFolder(root=args.dataroot,
transform=transforms.Compose([ToTensor()]))
assert dataset
dataloader = DataLoader(dataset, batch_size=args.batch_size, pin_memory=True,
shuffle=True, num_workers=args.workers)
return dataloader
| 33.0625 | 82 | 0.679269 | 157 | 1,587 | 6.66879 | 0.305732 | 0.113658 | 0.068768 | 0.034384 | 0.332378 | 0.332378 | 0.332378 | 0.234957 | 0.234957 | 0.234957 | 0 | 0 | 0.253938 | 1,587 | 47 | 83 | 33.765957 | 0.884291 | 0 | 0 | 0.361111 | 0 | 0 | 0.020779 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.055556 | false | 0 | 0.194444 | 0 | 0.305556 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de544c69fb1c27033e89f6e2eed06e532a94e50 | 1,843 | py | Python | ch11_dash_cytoscape_advance/callback/event_callback_tapnode.py | Ethan0621/plotly-dash-dev | abe478824db1ee511a2d92f88e5dad49f5d6e27e | [
"MIT"
] | 21 | 2020-10-02T08:17:33.000Z | 2022-03-22T06:10:17.000Z | ch11_dash_cytoscape_advance/callback/event_callback_tapnode.py | Ethan0621/plotly-dash-dev | abe478824db1ee511a2d92f88e5dad49f5d6e27e | [
"MIT"
] | 4 | 2019-07-18T04:43:31.000Z | 2021-10-31T10:30:25.000Z | ch11_dash_cytoscape_advance/callback/event_callback_tapnode.py | Ethan0621/plotly-dash-dev | abe478824db1ee511a2d92f88e5dad49f5d6e27e | [
"MIT"
] | 12 | 2019-07-23T05:36:57.000Z | 2021-07-11T08:57:47.000Z | import json
import dash
import dash_cytoscape as cyto
import dash_html_components as html
from dash.dependencies import Input, Output
# Dashインスタンスの生成
app = dash.Dash(__name__)
# ネットワークの構成要素の定義
elements = [
# ノードの定義
{"data": {"id": "A", "name": "Alice", "age": 14},},
{"data": {"id": "B", "name": "Bob", "age": 13},},
{"data": {"id": "C", "name": "Carol", "age": 13},},
{"data": {"id": "D", "name": "David", "age": 14},},
# エッジの定義
{"data": {"id": "AB", "source": "A", "target": "B", "weight": 3}},
{"data": {"id": "AC", "source": "A", "target": "C", "weight": 10}},
{"data": {"id": "CD", "source": "C", "target": "D", "weight": 5}},
]
# デフォルトのスタイルシート
default_stylesheet = [
{"selector": "node", "style": {"content": "data(id)", "font-size": "25px",},},
{
"selector": "edge",
"style": {
"mid-target-arrow-shape": "vee",
"line-color": "skyblue",
"mid-target-arrow-color": "skyblue",
"arrow-scale": 3,
},
},
]
cyto_compo = cyto.Cytoscape(
id="cyto-compo",
style={"width": "400px", "height": "400px"},
layout={"name": "breadthfirst", "roots": "#A", "animate": True},
elements=elements,
stylesheet=default_stylesheet,
)
# クリックしたノードの情報を表示する<p>タグ
pre_compo = html.Pre(
id="pre-compo", style={"backgroundColor": "#CCCCCC", "fontSize": "20px"}
)
app.layout = html.Div([pre_compo, cyto_compo])
# クリックしたノードのデータ辞書を表示するコールバック関数
@app.callback(
Output("pre-compo", "children"),
[Input("cyto-compo", "tapNode")], # ❶ クリックしたノードの要素辞書全体を受け取る
)
def show_tapped_node(node_element_dict):
if not node_element_dict:
return "ノードをクリックしてください"
# ❷ 受け取ったノードの要素辞書全体を整形して返す
return json.dumps(node_element_dict, indent=2)
if __name__ == "__main__":
# サーバの起動
app.run_server(debug=True)
| 27.102941 | 82 | 0.57949 | 209 | 1,843 | 4.966507 | 0.492823 | 0.046243 | 0.043353 | 0.021195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017639 | 0.200217 | 1,843 | 67 | 83 | 27.507463 | 0.686567 | 0.088985 | 0 | 0 | 0 | 0 | 0.276978 | 0.026379 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.104167 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de5f2927180fe679db93314b93c65bacf61325a | 1,902 | py | Python | examples/deadline/All-In-AWS-Infrastructure-Basic/python/package/lib/subnets.py | horsmand/aws-rfdk | c8770da02cf381d9e5f4ec237e1686239f821ef2 | [
"Apache-2.0"
] | 76 | 2020-08-27T00:30:37.000Z | 2022-01-16T05:26:13.000Z | examples/deadline/All-In-AWS-Infrastructure-Basic/python/package/lib/subnets.py | aws/aws-rfdk | 68050c16bfaa9e273436d41a0ae60da506e20750 | [
"Apache-2.0"
] | 316 | 2020-08-27T17:45:08.000Z | 2022-03-28T19:18:56.000Z | examples/deadline/All-In-AWS-Infrastructure-Basic/python/package/lib/subnets.py | jericht/aws-rfdk | 6464f40a88236797c3e2df5caf5420421e8f7936 | [
"Apache-2.0"
] | 24 | 2020-08-26T22:03:08.000Z | 2022-02-26T03:13:31.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from aws_cdk.aws_ec2 import SubnetConfiguration, SubnetType
# Subnets for undistinguished render farm back-end infrastructure
INFRASTRUCTURE = SubnetConfiguration(
name='Infrastructure',
subnet_type=SubnetType.PRIVATE,
# 1,022 IP addresses
cidr_mask=22
)
# Subnets for publicly accessible infrastructure
PUBLIC = SubnetConfiguration(
name='Public',
subnet_type=SubnetType.PUBLIC,
# 14 IP addresses. We only require one ENI per internet gateway per AZ, but leave some extra room
# should there be a need for externally accessible ENIs
cidr_mask=28
)
# Subnets for the Render Queue Application Load Balancer (ALB).
#
# It is considered good practice to put a load blanacer in dedicated subnets. Additionally, the subnets
# must have a CIDR block with a bitmask of at least /27 and at least 8 free IP addresses per subnet.
# ALBs can scale up to a maximum of 100 IP addresses distributed across all subnets. Assuming only 2 AZs
# (the minimum) we should have 50 IPs per subnet = CIDR mask of /26
#
# See:
# - https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#subnets-load-balancer
# - https://github.com/aws/aws-rfdk/blob/release/packages/aws-rfdk/lib/deadline/README.md#render-queue-subnet-placement
RENDER_QUEUE_ALB = SubnetConfiguration(
name='RenderQueueALB',
subnet_type=SubnetType.PRIVATE,
# 62 IP addresses
cidr_mask=26
)
# Subnets for the Usage-Based Licensing
USAGE_BASED_LICENSING = SubnetConfiguration(
name='UsageBasedLicensing',
subnet_type=SubnetType.PRIVATE,
# 14 IP addresses
cidr_mask=28
)
# Subnets for the Worker instances
WORKERS = SubnetConfiguration(
name='Workers',
subnet_type=SubnetType.PRIVATE,
# 4,094 IP addresses
cidr_mask=20
)
| 33.964286 | 125 | 0.760778 | 261 | 1,902 | 5.482759 | 0.536398 | 0.053809 | 0.069881 | 0.075472 | 0.032145 | 0.032145 | 0 | 0 | 0 | 0 | 0 | 0.023854 | 0.162461 | 1,902 | 55 | 126 | 34.581818 | 0.874451 | 0.620925 | 0 | 0.230769 | 0 | 0 | 0.086207 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de635ed82ad86e4c0260d0b6dc7c1f25ecd564a | 1,591 | py | Python | ghgi/tests/test_origin.py | ghgindex/gh | f47bfbc4f2b0b2ecceff946caa37b370cf526cf6 | [
"CC0-1.0"
] | null | null | null | ghgi/tests/test_origin.py | ghgindex/gh | f47bfbc4f2b0b2ecceff946caa37b370cf526cf6 | [
"CC0-1.0"
] | 4 | 2021-09-26T18:59:35.000Z | 2021-10-01T21:35:18.000Z | ghgi/tests/test_origin.py | ghgindex/ghgi | f47bfbc4f2b0b2ecceff946caa37b370cf526cf6 | [
"CC0-1.0"
] | null | null | null | from unittest import TestCase
from ghgi.origin import Origin
from ghgi.reference import Reference
class TestOrigin(TestCase):
def test_origins_valid(self):
try:
Origin.validate()
except:
self.fail('Origins failed to validate')
def test_origins_meta(self):
# make sure all origins are present
self.assertEqual(len(Origin.ORIGINS), 4)
self.assertTrue(all([
origin in Origin.ORIGINS for origin in [
'global',
'north_america',
'canada',
'usa'
]
]))
def test_origin_entries(self):
""" Test Origin integrity:
If we aggregate all products across all Origins, every one of those
products should be available in every Origin, because all products
in sub-origins must also exist in their supers, which eventually leads
to the global origin.
"""
super_products = set()
for origin in Origin.ORIGINS:
Origin.load(origin)
super_products |= set(Origin._db[origin].keys())
super_products.remove('super')
for origin in Origin.ORIGINS:
for product in super_products:
if product.startswith('_'):
continue
values = Origin.values(origin, product)
self.assertNotEqual(values, None)
self.assertTrue(all([str(v) in Reference.db()
for v in values[0]]))
self.assertEqual(len(values[1]), 4)
| 33.145833 | 78 | 0.569453 | 174 | 1,591 | 5.132184 | 0.448276 | 0.058231 | 0.047032 | 0.070549 | 0.083987 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003868 | 0.350094 | 1,591 | 47 | 79 | 33.851064 | 0.859768 | 0.179761 | 0 | 0.058824 | 0 | 0 | 0.047733 | 0 | 0 | 0 | 0 | 0 | 0.147059 | 1 | 0.088235 | false | 0 | 0.088235 | 0 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de7972b83afa76c8657b31bcace9fd206d1760d | 4,998 | py | Python | convert_data.py | beckernick/cml_rapids | da29a412418ac5c5be038f6c96af0b926c57c1ea | [
"MIT"
] | null | null | null | convert_data.py | beckernick/cml_rapids | da29a412418ac5c5be038f6c96af0b926c57c1ea | [
"MIT"
] | 1 | 2021-05-19T05:50:23.000Z | 2021-05-19T05:50:23.000Z | convert_data.py | beckernick/cml_rapids | da29a412418ac5c5be038f6c96af0b926c57c1ea | [
"MIT"
] | 3 | 2021-05-13T19:31:23.000Z | 2021-11-10T20:12:32.000Z | # this script reads the data into a parquet format.
# we specify types to make the data "cleaner"
import pandas as dd
print("converting bureau balance")
bureau_balance = dd.read_csv('data/bureau_balance.csv')
bureau_balance['STATUS'] = bureau_balance.STATUS.astype('category')
bureau_balance.to_parquet('raw_data/bureau_balance.parquet')
## Links to Bureau on sK_ID_BUREAU
print("converting bureau")
bureau = dd.read_csv('data/bureau.csv',
dtype={'CREDIT_ACTIVE': 'category', 'CREDIT_CURRENCY': 'category'})
bureau.to_parquet('raw_data/bureau.parquet')
## Links to Train data on SK_ID_CURR
print("converting credit card balance")
cc_balance = dd.read_csv('data/credit_card_balance.csv',
dtype={'NAME_CONTRACT_STATUS': 'category'})
cc_balance.to_parquet('raw_data/cc_balance.parquet')
## Links to Prev on SK_ID_PREV
## Though also have SK_ID_CURR
print("converting installments payments")
payments = dd.read_csv('data/installments_payments.csv')
payments.to_parquet('raw_data/payments.parquet')
## Links to Prev on SK_ID_PREV
## Though also have SK_ID_CURR
print("converting POS CASH Balance")
pc_balance = dd.read_csv('data/POS_CASH_balance.csv')
pc_balance.to_parquet('raw_data/pc_balance.parquet')
## Links to Prev on SK_ID_PREV
## Though also have SK_ID_CURR
print("converting prev applications")
prev = dd.read_csv('data/previous_application.csv',
dtype={'NAME_CONTRACT_TYPE': 'category', 'WEEKDAY_APPR_PROCESS_START': 'category',
'FLAG_LAST_APPL_PER_CONTRACT': 'category', 'NAME_CONTRACT_STATUS': 'category',
'NAME_SELLER_INDUSTRY': 'category', 'NAME_YIELD_GROUP': 'category'})
# 'NFLAG_INSURED_ON_APPROVAL': 'bool' there are some na in here we need to handle too
prev.to_parquet('raw_data/prev.parquet')
## Previous loans with Home Credit Group
print("converting train and test")
train_test_dtype_dict = {'NAME_CONTRACT_TYPE': 'category', 'CODE_GENDER': 'category',
'NAME_INCOME_TYPE': 'category',
'NAME_EDUCATION_TYPE': 'category', 'NAME_FAMILY_STATUS': 'category',
'NAME_HOUSING_TYPE': 'category', 'FLAG_MOBIL': 'bool',
'FLAG_EMP_PHONE': 'bool', 'FLAG_WORK_PHONE': 'bool',
'FLAG_CONT_MOBILE': 'bool', 'FLAG_PHONE': 'bool', 'FLAG_EMAIL': 'bool',
'CNT_FAM_MEMBERS': 'Int64', 'REGION_RATING_CLIENT': 'category',
'REGION_RATING_CLIENT_W_CITY': 'category', 'WEEKDAY_APPR_PROCESS_START': 'category',
'HOUR_APPR_PROCESS_START': 'category', 'REG_REGION_NOT_LIVE_REGION': 'bool',
'REG_REGION_NOT_WORK_REGION': 'bool', 'LIVE_REGION_NOT_WORK_REGION': 'bool',
'REG_CITY_NOT_LIVE_CITY': 'bool', 'REG_CITY_NOT_WORK_CITY': 'bool',
'LIVE_CITY_NOT_WORK_CITY': 'bool', 'ORGANIZATION_TYPE': 'category',
'OBS_30_CNT_SOCIAL_CIRCLE': 'Int64', 'DEF_30_CNT_SOCIAL_CIRCLE': 'Int64',
'OBS_60_CNT_SOCIAL_CIRCLE': 'Int64', 'DEF_60_CNT_SOCIAL_CIRCLE': 'Int64',
'DAYS_LAST_PHONE_CHANGE': 'Int64', 'FLAG_DOCUMENT_2': 'bool',
'FLAG_DOCUMENT_3': 'bool', 'FLAG_DOCUMENT_4': 'bool', 'FLAG_DOCUMENT_5': 'bool',
'FLAG_DOCUMENT_6': 'bool', 'FLAG_DOCUMENT_7': 'bool', 'FLAG_DOCUMENT_8': 'bool',
'FLAG_DOCUMENT_9': 'bool', 'FLAG_DOCUMENT_10': 'bool', 'FLAG_DOCUMENT_11': 'bool',
'FLAG_DOCUMENT_12': 'bool', 'FLAG_DOCUMENT_13': 'bool', 'FLAG_DOCUMENT_14': 'bool',
'FLAG_DOCUMENT_15': 'bool', 'FLAG_DOCUMENT_16': 'bool', 'FLAG_DOCUMENT_17': 'bool',
'FLAG_DOCUMENT_18': 'bool', 'FLAG_DOCUMENT_19': 'bool', 'FLAG_DOCUMENT_20': 'bool',
'FLAG_DOCUMENT_21': 'bool', 'AMT_REQ_CREDIT_BUREAU_HOUR': 'Int64',
'AMT_REQ_CREDIT_BUREAU_DAY': 'Int64', 'AMT_REQ_CREDIT_BUREAU_WEEK': 'Int64',
'AMT_REQ_CREDIT_BUREAU_MON': 'Int64', 'AMT_REQ_CREDIT_BUREAU_QRT': 'Int64',
'AMT_REQ_CREDIT_BUREAU_YEAR': 'Int64'}
train = dd.read_csv('data/application_train.csv',
index_col='SK_ID_CURR',
dtype=train_test_dtype_dict)
train.FLAG_OWN_CAR = train.FLAG_OWN_CAR.eq('Y').mul(1).astype('bool')
train.FLAG_OWN_REALTY = train.FLAG_OWN_REALTY.eq('Y').mul(1).astype('bool')
train.to_parquet('raw_data/train.parquet')
test = dd.read_csv('data/application_test.csv',
index_col='SK_ID_CURR',
dtype=train_test_dtype_dict)
test.FLAG_OWN_CAR = test.FLAG_OWN_CAR.eq('Y').mul(1).astype('bool')
test.FLAG_OWN_REALTY = test.FLAG_OWN_REALTY.eq('Y').mul(1).astype('bool')
test.to_parquet('raw_data/test.parquet')
print("done") | 57.448276 | 110 | 0.638255 | 639 | 4,998 | 4.593114 | 0.250391 | 0.065417 | 0.103578 | 0.035434 | 0.357411 | 0.17138 | 0.144804 | 0.138671 | 0.138671 | 0.09983 | 0 | 0.017649 | 0.229092 | 4,998 | 87 | 111 | 57.448276 | 0.744096 | 0.094638 | 0 | 0.0625 | 0 | 0 | 0.487123 | 0.206261 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.015625 | 0 | 0.015625 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2de9f8713bb238651ecef6d71fa5adbe3e2b0d26 | 1,742 | py | Python | utils/metadata.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 32 | 2017-02-22T13:38:38.000Z | 2022-03-31T23:29:54.000Z | utils/metadata.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 694 | 2017-02-15T23:09:52.000Z | 2022-03-31T23:16:07.000Z | utils/metadata.py | Kpaubert/onlineweb4 | 9ac79f163bc3a816db57ffa8477ea88770d97807 | [
"MIT"
] | 35 | 2017-09-02T21:13:09.000Z | 2022-02-21T11:30:30.000Z | from django.core.exceptions import PermissionDenied
from django.http import Http404
from rest_framework import exceptions, metadata
from rest_framework.request import clone_request
class ActionMeta(metadata.SimpleMetadata):
"""
Metadata class for determining metadata based on the used serializer.
"""
def determine_actions(self, request, view):
"""
For generic class based views we return information about
the fields that are accepted for 'PUT' and 'POST' methods.
NOTE: This method is based directly on `SimpleMetadata.determine_actions`
and would need to change if it ever changed.
"""
actions = {}
meta_action = view.action
for method in {"PUT", "POST"} & set(view.allowed_methods):
view.action = view.action_map.get(method.lower())
view.request = clone_request(request, method=method)
try:
# Test global permissions
if hasattr(view, "check_permissions"):
view.check_permissions(view.request)
# Test object permissions
if method == "PUT" and hasattr(view, "get_object"):
view.get_object()
except (exceptions.APIException, PermissionDenied, Http404):
pass
else:
# If user has appropriate permissions for the view, include
# appropriate metadata about the fields that should be supplied.
serializer = view.get_serializer()
actions[method] = self.get_serializer_info(serializer)
finally:
view.request = request
view.action = meta_action
return actions
| 40.511628 | 81 | 0.618829 | 185 | 1,742 | 5.735135 | 0.448649 | 0.0377 | 0.032045 | 0.03393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005004 | 0.311711 | 1,742 | 42 | 82 | 41.47619 | 0.8799 | 0.273249 | 0 | 0 | 0 | 0 | 0.030833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0.04 | 0.16 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2dee7cc125111361a51f765c31b98f79eadc2d02 | 8,968 | py | Python | docs/conf.py | alex-tsukanov/cargopy | f73b68e2ea5263dea5e570756d5fbcc4ef8d7b75 | [
"MIT"
] | 5 | 2019-03-01T16:50:15.000Z | 2021-05-24T10:58:34.000Z | docs/conf.py | alex-tsukanov/cargopy | f73b68e2ea5263dea5e570756d5fbcc4ef8d7b75 | [
"MIT"
] | 18 | 2019-03-18T12:59:09.000Z | 2019-05-14T17:36:02.000Z | docs/conf.py | alex-tsukanov/cargopy | f73b68e2ea5263dea5e570756d5fbcc4ef8d7b75 | [
"MIT"
] | 2 | 2019-03-24T22:43:08.000Z | 2019-06-04T09:45:12.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
from typing import Dict
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# workaround python-attrs/attrs#523
# (it hides some class attributes in members summary)
def _workaround_attrs():
import attr
attrs = attr.s
def attrs_wrapper(*args, **kwargs):
def wrap(cls):
cls = attrs(*args, **kwargs)(cls)
for field in attr.fields(cls):
if not hasattr(cls, field.name):
setattr(cls, field.name, field.default)
return cls
if len(args) == 1 and not kwargs and isinstance(args[0], type):
c = args[0]
args = ()
return wrap(c)
return wrap
attr.s = attr.attrs = attr.attributes = attrs_wrapper
_workaround_attrs()
import happyly # noqa
# -- Project information -----------------------------------------------------
project = 'Happyly'
copyright = '2019, Equeum'
author = 'Alexander Tsukanov'
# The full version, including alpha/beta/rc tags
release = happyly.__version__
# The short X.Y version
version = '.'.join(release.split('.')[:2])
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon', # support Google-styled docstrings
'sphinx_autodoc_typehints', # should be imported after napoleon
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# generate auto summary
autosummary_generate = True
# autodoc: retain original methods order, don't sort them alphabetically
autodoc_member_order = 'bysource'
# concatenate docstring from class and from __init__
autoclass_content = 'both'
# use locally stored mathjax js
# see also html_static_path
# mathjax_path = 'mathjax-2.7.2/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
# 'any': if non-prefixed `text` is encountered, consider it having this role
# as a result, such text will try to properly reference desired object
# http://www.sphinx-doc.org/en/stable/markup/inline.html?highlight=roles#role-any
# or 'code': treat `text` as ``text`` creating code snippet
default_role = 'code'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Happylydoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements: Dict[str, str] = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Happyly.tex', 'Happyly Documentation', 'Alexander Tsukanov', 'manual')
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'happyly', 'Happyly Documentation', [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'Happyly',
'Happyly Documentation',
author,
'Happyly',
'One line description of project.',
'Miscellaneous',
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'marshmallow': ('https://marshmallow.readthedocs.io/en/2.x-line/', None),
}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for graphviz and inheritance_diagram
# Use SVG rather than PNG: nowadays anyone should support SVG, right?
graphviz_output_format = 'svg'
# Enhance sphinx_autodoc_typehints with support for class fields.
# This implementation is somewhat dirty;
# probably we should improve it and make a PR?
# Problems: in particular, it will fail with `<locals>` in name.
def process_docstring(app, what, name, obj, options, lines):
if what != 'attribute':
return
from importlib import import_module
from typing import get_type_hints
from sphinx_autodoc_typehints import format_annotation
*parts, attrname = name.split('.')
moduleparts = parts.copy()
while moduleparts:
try:
cls = import_module('.'.join(moduleparts))
except ImportError:
del moduleparts[-1]
else:
break
if not moduleparts:
print('XXX failed to import module')
return
innerparts = parts[len(moduleparts) :]
while innerparts:
cls = getattr(cls, innerparts.pop(0))
annotations = get_type_hints(cls)
if attrname in annotations:
lines.extend(
['', '**Type**: {}'.format(format_annotation(annotations[attrname]))]
)
def setup(app):
app.connect('autodoc-process-docstring', process_docstring)
| 30.503401 | 88 | 0.651985 | 1,110 | 8,968 | 5.189189 | 0.382883 | 0.017361 | 0.006076 | 0.005208 | 0.07934 | 0.044965 | 0.018403 | 0.018403 | 0.018403 | 0.018403 | 0 | 0.00439 | 0.18711 | 8,968 | 293 | 89 | 30.607509 | 0.785734 | 0.600357 | 0 | 0.038095 | 0 | 0 | 0.196996 | 0.035529 | 0 | 0 | 0 | 0.003413 | 0 | 1 | 0.047619 | false | 0 | 0.104762 | 0 | 0.2 | 0.009524 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2def896590ca8d47ff0336c5b45e29f811821d34 | 1,501 | py | Python | app/routers.py | redis-developer/fastapi-redis | b01f05b600c213a4c575c2b91343cdb9f6f8e4c0 | [
"MIT"
] | 1 | 2022-02-28T14:07:02.000Z | 2022-02-28T14:07:02.000Z | app/routers.py | redis-developer/fastapi-redis | b01f05b600c213a4c575c2b91343cdb9f6f8e4c0 | [
"MIT"
] | null | null | null | app/routers.py | redis-developer/fastapi-redis | b01f05b600c213a4c575c2b91343cdb9f6f8e4c0 | [
"MIT"
] | null | null | null | from fastapi import APIRouter, status
from rdkit.Chem import MolFromSmiles, RDKFingerprint
from rdkit.DataStructs import FingerprintSimilarity, CreateFromBitString, BulkTanimotoSimilarity
from app import main, schemas
smiles_router = APIRouter()
@smiles_router.post("/add-to-hash", status_code=status.HTTP_201_CREATED)
async def add(payload: schemas.CompoundsListSchema, redis_hash: str):
"""
:param payload:
:param redis_hash:
:return:
"""
mols = {
x.value.sval: RDKFingerprint(MolFromSmiles(x.value.sval)).ToBitString()
for compound in payload.PC_Compounds
for x in compound.props
if x.urn.label == "SMILES"
}
await main.app.state.mols_repo.set_multiple(redis_hash, mols)
hash_len = await main.app.state.mols_repo.len(redis_hash)
return {"number_of_inserted_keys": hash_len, "hash_name": redis_hash}
@smiles_router.get("/compare-to-hash")
async def get_and_compare(compound: str, redis_hash: str):
"""
:param compound:
:param redis_hash:
:return:
"""
mol = RDKFingerprint(MolFromSmiles(compound))
mol_hash = await main.app.state.mols_repo.get_all(redis_hash)
similarity = {
smile: FingerprintSimilarity(CreateFromBitString(fp), mol)
for smile, fp in mol_hash.items()
}
return {
"number_of_smiles_to_compare": len(similarity),
"similarity": dict(
sorted(similarity.items(), key=lambda item: item[1], reverse=True)
),
}
| 27.796296 | 96 | 0.692871 | 183 | 1,501 | 5.497268 | 0.415301 | 0.071571 | 0.044732 | 0.050696 | 0.074553 | 0.074553 | 0 | 0 | 0 | 0 | 0 | 0.00332 | 0.197202 | 1,501 | 53 | 97 | 28.320755 | 0.831535 | 0 | 0 | 0 | 0 | 0 | 0.075513 | 0.036657 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.2 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2df3a3b38d2a8e763e46a075b548b21d84f8216b | 5,013 | py | Python | src/carrier-owl.py | t-sakai-kure/Carrier-Owl | bf9a38346a80e583b8d89e7e7bfcbebcddbed2de | [
"MIT"
] | null | null | null | src/carrier-owl.py | t-sakai-kure/Carrier-Owl | bf9a38346a80e583b8d89e7e7bfcbebcddbed2de | [
"MIT"
] | null | null | null | src/carrier-owl.py | t-sakai-kure/Carrier-Owl | bf9a38346a80e583b8d89e7e7bfcbebcddbed2de | [
"MIT"
] | null | null | null | import chromedriver_binary # これは必ず入れる
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
import time
import yaml
import datetime
import numpy as np
import textwrap
from bs4 import BeautifulSoup
import requests
from fastprogress import progress_bar
import slackweb
import warnings
import urllib.parse
# setting
warnings.filterwarnings('ignore')
def get_articles_info(subject):
weekday_dict = {0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thu',
4: 'Fri', 5: 'Sat', 6: 'Sun'}
url = f'https://arxiv.org/list/{subject}/pastweek?show=100000'
response = requests.get(url)
html = response.text
year = datetime.date.today().year
# いつの論文データを取得するか
bs = BeautifulSoup(html)
h3 = bs.find_all('h3')
wd = weekday_dict[datetime.datetime.today().weekday()]
day = datetime.datetime.today().day
today = f'{wd}, {day}'
# 今日、新しい論文が出てるかどうか(土日とか休みみたい)
if today in h3[0].text:
idx = 2
else:
idx = 1
articles_html = html.split(f'{year}</h3>')[idx] # <--------- 要注意
# 論文それぞれのurlを取得
bs = BeautifulSoup(articles_html)
id_list = bs.find_all(class_='list-identifier')
return id_list
def serch_keywords(id_list, keywords_dict):
urls = []
titles = []
abstracts = []
words = []
scores = []
for id_ in progress_bar(id_list):
a = id_.find('a')
_url = a.get('href')
url = 'https://arxiv.org'+_url
response = requests.get(url)
html = response.text
bs = BeautifulSoup(html)
title = bs.find('meta', attrs={'property': 'og:title'})['content']
abstract = bs.find(
'meta',
attrs={'property': 'og:description'})['content']
sum_score = 0
hit_kwd_list = []
for word in keywords_dict.keys():
score = keywords_dict[word]
if word.lower() in abstract.lower(): # 全部小文字にすれば、大文字少文字区別しなくていい
sum_score += score
hit_kwd_list.append(word)
if sum_score != 0:
title_trans = get_translated_text('ja', 'en', title)
abstract = abstract.replace('\n', '')
abstract_trans = get_translated_text('ja', 'en', abstract)
abstract_trans = textwrap.wrap(abstract_trans, 40) # 40行で改行
abstract_trans = '\n'.join(abstract_trans)
urls.append(url)
titles.append(title_trans)
abstracts.append(abstract_trans)
words.append(hit_kwd_list)
scores.append(sum_score)
results = [urls, titles, abstracts, words, scores]
return results
def send2slack(results, slack):
urls = results[0]
titles = results[1]
abstracts = results[2]
words = results[3]
scores = results[4]
# rank
idxs_sort = np.argsort(scores)
idxs_sort = idxs_sort[::-1]
# 通知
star = '*'*120
today = datetime.date.today()
text = f'{star}\n \t \t {today}\n{star}'
slack.notify(text=text)
for i in idxs_sort:
url = urls[i]
title = titles[i]
abstract = abstracts[i]
word = words[i]
score = scores[i]
text_slack = f'''
\n score: `{score}`\n hit keywords: `{word}`\n url: {url}\n title: {title}\n abstract: \n \t {abstract}\n{star}
'''
slack.notify(text=text_slack)
def get_translated_text(from_lang, to_lang, from_text):
'''
https://qiita.com/fujino-fpu/items/e94d4ff9e7a5784b2987
'''
sleep_time = 1
# urlencode
from_text = urllib.parse.quote(from_text)
# url作成
url = 'https://www.deepl.com/translator#' + from_lang + '/' + to_lang + '/' + from_text
# ヘッドレスモードでブラウザを起動
options = Options()
options.add_argument('--headless')
# ブラウザーを起動
driver = webdriver.Chrome(options=options)
driver.get(url)
driver.implicitly_wait(10) # 見つからないときは、10秒まで待つ
for i in range(30):
# 指定時間待つ
time.sleep(sleep_time)
html = driver.page_source
to_text = get_text_from_page_source(html)
try_count = i + 1
if to_text:
wait_time = sleep_time * try_count
# アクセス終了
break
# ブラウザ停止
driver.quit()
return to_text
def get_text_from_page_source(html):
soup = BeautifulSoup(html, features='lxml')
target_elem = soup.find(class_="lmt__translations_as_text__text_btn")
text = target_elem.text
return text
def get_config():
file_abs_path = os.path.abspath(__file__)
file_dir = os.path.dirname(file_abs_path)
config_path = f'{file_dir}/../config.yaml'
with open(config_path, 'r') as yml:
config = yaml.load(yml)
return config
def main():
config = get_config()
slack = slackweb.Slack(url=os.getenv("SLACK_ID"))
id_list = get_articles_info(config['subject'])
results = serch_keywords(id_list, config['keywords'])
send2slack(results, slack)
if __name__ == "__main__":
main()
| 26.52381 | 134 | 0.608418 | 621 | 5,013 | 4.718196 | 0.318841 | 0.012287 | 0.010239 | 0.015017 | 0.129693 | 0.109215 | 0.025939 | 0 | 0 | 0 | 0 | 0.016011 | 0.264911 | 5,013 | 188 | 135 | 26.664894 | 0.779104 | 0.053062 | 0 | 0.044776 | 0 | 0.007463 | 0.115401 | 0.012728 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052239 | false | 0 | 0.11194 | 0 | 0.201493 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |