hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9bcb56db728c68db9be228e9c1622f5d0ceadd81 | 393 | py | Python | play.py | 777moneymaker/sjp_sequence | 302e61ee19c3dec92b494bc42d44c10ad41473ed | [
"MIT"
] | null | null | null | play.py | 777moneymaker/sjp_sequence | 302e61ee19c3dec92b494bc42d44c10ad41473ed | [
"MIT"
] | null | null | null | play.py | 777moneymaker/sjp_sequence | 302e61ee19c3dec92b494bc42d44c10ad41473ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import Plot
from Sequence import Sequence
__author__ = 'Milosz Chodkowski'
__license__ = "MIT"
__version__ = "1.0"
__status__ = "Production"
def main():
sq = Sequence(size=5000, seq_type='D')
sq.read_from_fasta('dr.fasta')
sq.blast_search()
Plot.plot_from_fasta('dr.fasta', 'dy.fasta', sys.argv[1])
if __name__ == '__main__':
main()
| 17.863636 | 61 | 0.684478 | 55 | 393 | 4.345455 | 0.654545 | 0.066946 | 0.09205 | 0.133891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 0.165394 | 393 | 21 | 62 | 18.714286 | 0.704268 | 0.053435 | 0 | 0 | 0 | 0 | 0.177898 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bcb598047c9fd3e9e86f21e8f468546bb327db4 | 3,178 | py | Python | plot/sideplot_utils.py | ygidtu/pysashimi | 0c3aed7e128db58e01bc2876c321bb37ad045344 | [
"Apache-2.0"
] | 7 | 2019-09-03T14:03:43.000Z | 2022-01-05T01:42:01.000Z | plot/sideplot_utils.py | ygidtu/pysashimi | 0c3aed7e128db58e01bc2876c321bb37ad045344 | [
"Apache-2.0"
] | 1 | 2022-03-30T11:28:21.000Z | 2022-03-30T11:28:21.000Z | plot/sideplot_utils.py | ygidtu/pysashimi | 0c3aed7e128db58e01bc2876c321bb37ad045344 | [
"Apache-2.0"
] | 2 | 2020-04-28T11:16:44.000Z | 2021-07-26T17:39:08.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
u"""
Created by Zhang Yiming at 2021.03.16
This is migrated from Zhou's code
"""
import numpy as np
import scipy.stats as sts
from matplotlib import pylab
from plot.utils import *
def plot_sideplot(
read_depth_object,
sample_info,
graph_coords,
ax_var,
font_size=8,
logtrans=None,
strand_choice: str = None,
sites = None
):
"""
:param read_depth_object:
:param mRNAs:
:param smaple_info:
:param graph_coords:
:param ax_var:
:param sjthread:
:param ymax:
:param number_junctions:
:param resolution:
:param nxticks:
:param font_size:
:param numbering_font_size:
:param junction_log_base:
:return:
"""
plus = read_depth_object.plus
minus = read_depth_object.minus
if logtrans == '2':
plus = np.log2(plus + 1)
minus = -np.log2(abs(minus) + 1)
elif logtrans == '10':
plus = np.log10(plus + 1)
minus = -np.log10(abs(minus) + 1)
else:
pass
maxheight = max(plus)
minheight = min(minus)
max_val = max(maxheight, abs(minheight))
ymax = 1.1 * max_val
ymin = 1.1 * -max_val
for label, array_plot in zip(['plus', 'minus'], [plus, minus]):
if strand_choice is not None and label != strand_choice:
continue
array_hist = np.repeat(graph_coords, np.abs(array_plot).astype(np.int))
try:
kde = sts.gaussian_kde(array_hist)
fit_value = kde.pdf(graph_coords)
except Exception:
continue
fit_value = fit_value / fit_value.max()
if label == 'plus':
ax_var.plot(graph_coords, fit_value * array_plot.max(), c=sample_info.color, lw=1)
ax_var.bar(range(len(graph_coords)), array_plot, color=sample_info.color)
else:
ax_var.plot(graph_coords, fit_value * array_plot.min(), c=sample_info.color, lw=1)
ax_var.bar(range(len(graph_coords)), array_plot, color=sample_info.color)
# set the y limit
# set y ticks, y label and label
ax_var.set_ybound(lower=ymin, upper=ymax)
universal_yticks = pylab.linspace(ymin, ymax, 3)
curr_yticklabels = []
for label in universal_yticks:
curr_yticklabels.append("{}".format(int(label)))
ax_var.set_yticks(universal_yticks)
ax_var.set_yticklabels(curr_yticklabels, fontsize=font_size)
ax_var.spines["left"].set_bounds(ymin, ymax)
ax_var.yaxis.set_ticks_position('left')
ax_var.spines["right"].set_visible(False)
# ylab
# y_horz_alignment = 'right'
# ax_var.set_ylabel(sample_info.alias,
# fontsize=font_size,
# va="center",
# rotation="horizontal",
# ha=y_horz_alignment,
# labelpad=distance_between_label_axis
# )
ax_var.spines['right'].set_color('none')
ax_var.spines['top'].set_color('none')
ax_var.spines['bottom'].set_color('none')
pylab.xticks([])
pylab.xlim(0, max(graph_coords))
set_indicator_lines(read_depth_object, ax_var, graph_coords, sites, ymax)
return ax_var
| 28.123894 | 94 | 0.624292 | 430 | 3,178 | 4.376744 | 0.365116 | 0.047821 | 0.039851 | 0.012752 | 0.162062 | 0.143464 | 0.119022 | 0.119022 | 0.119022 | 0.079702 | 0 | 0.013577 | 0.258339 | 3,178 | 112 | 95 | 28.375 | 0.784896 | 0.221523 | 0 | 0.095238 | 0 | 0 | 0.023879 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0.015873 | 0.063492 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bcbce48c3dbe5c7f304f6f157315cda91298309 | 3,704 | py | Python | dfpipeline/SetTransformer.py | IBM/dataframe-pipeline | 44bb4efc77ca36022ef2d54cba4d77825111841f | [
"Apache-2.0"
] | 2 | 2021-02-27T02:39:36.000Z | 2021-09-13T15:52:08.000Z | dfpipeline/SetTransformer.py | IBM/dataframe-pipeline | 44bb4efc77ca36022ef2d54cba4d77825111841f | [
"Apache-2.0"
] | 3 | 2021-02-26T02:40:27.000Z | 2021-02-26T03:24:30.000Z | dfpipeline/SetTransformer.py | IBM/dataframe-pipeline | 44bb4efc77ca36022ef2d54cba4d77825111841f | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pandas as pd
import types
from . import DFPBase
class SetTransformer(DFPBase):
"""
Perform a set operation between two Tensors.
Parameters
----------
first_operand : Function, string, or List
Function: If a function is specified, it will be called when value is necessary in fit(), transform(), or export. Its return value must be string or List.
string: it means a column label
List: constant array
second_operand : Function, string, or List
Function: If a function is specified, it will be called when value is necessary in fit(), transform(), or export. Its return value must be string or List.
string: it means a column label
List: constant array
set_operation : String
'*' or '&': And operation
'+' or '|': Union operation
'-': Subtract operation
"""
def __init__(
self,
first_operand=None,
second_operand=None,
output_operand=None,
output_func=None,
set_operation=None
):
super().__init__()
self.first_operand = first_operand
self.second_operand = second_operand
self.output_operand = output_operand
self.output_func = output_func
self.set_operation = set_operation
@classmethod
def is_method(cls, m):
return isinstance(m, types.FunctionType) or isinstance(m, types.MethodType) or isinstance(m, types.LambdaType)
def transform(self, df):
# Check invalid case first
if self.first_operand is None or self.second_operand is None or self.set_operation is None or self.output_func is None:
return df
first_op = self.first_operand() if self.is_method(self.first_operand) else self.first_operand
second_op = self.second_operand() if self.is_method(self.second_operand) else self.second_operand
# Normal case
first = df[first_op] if type(first_op) == str else first_op
second = df[second_op] if type(second_op) == str else second_op
result = None
if self.set_operation == '*' or self.set_operation == '&':
result = set(first) & set(second)
elif self.set_operation == '+' or self.set_operation == '|':
result = set(first) | set(second)
elif self.set_operation == '-':
result = set(first) - set(second)
else:
return df
result = list(result)
self.output_func(result)
if len(result) > len(df):
assert False, "The length of the result is longer than that of DataFrame. len(result)=" + str(len(result)) + " len(df)=" + str(len(df))
elif len(result) < len(df):
result.extend([None] * (len(df) - len(result)))
df[self.output_operand] = result
return df
def to_onnx_operator(self, inputs, outputs, pipeline=None):
assert False, 'Not implemented yet'
| 38.583333 | 162 | 0.62203 | 474 | 3,704 | 4.742616 | 0.318565 | 0.058719 | 0.049822 | 0.016014 | 0.277135 | 0.260231 | 0.237989 | 0.237989 | 0.227758 | 0.227758 | 0 | 0.00288 | 0.25 | 3,704 | 95 | 163 | 38.989474 | 0.806335 | 0.358531 | 0 | 0.06383 | 0 | 0 | 0.048941 | 0 | 0 | 0 | 0 | 0 | 0.042553 | 1 | 0.085106 | false | 0 | 0.06383 | 0.021277 | 0.255319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd038ebae77475d14dd3f64f0e7473001994f0b | 3,168 | py | Python | src/nbdocs/tests/base.py | ayasyrev/nbdocs | 6333532f5e163dffbc407cc2337f69f5c99cbe0b | [
"Apache-2.0"
] | null | null | null | src/nbdocs/tests/base.py | ayasyrev/nbdocs | 6333532f5e163dffbc407cc2337f69f5c99cbe0b | [
"Apache-2.0"
] | null | null | null | src/nbdocs/tests/base.py | ayasyrev/nbdocs | 6333532f5e163dffbc407cc2337f69f5c99cbe0b | [
"Apache-2.0"
] | null | null | null | from pathlib import PosixPath
from nbformat import NotebookNode, v4 as nbformat
def create_code_cell(source: str) -> NotebookNode:
"""Create basic code cell with given source.
Outputs basic text data.
Args:
source (str): Source for code cell
Returns:
NotebookNode: Nb code cell.
"""
outputs = [
nbformat.new_output(
"display_data", data={"text/plain": "- test/plain in output"}
),
nbformat.new_output(
"stream", name="stdout", text="- text in stdout (stream) output"
),
nbformat.new_output("display_data", data={"image/png": "Zw=="}),
]
return nbformat.new_code_cell(source=source, outputs=outputs)
def create_markdown_cell(source: str) -> NotebookNode:
"""Create basic markdown cell with given source.
Args:
source (str): Source ror markdown cell
Returns:
NotebookNode: Nb markdown cell.
"""
return nbformat.new_markdown_cell(source)
def create_nb(code_source: str = None, md_source: str = None) -> NotebookNode:
"""Create basic test nb.
Args:
code_source (str, optional): Source for code cell. Defaults to None.
md_source (str, optional): Source for markdown cell. Defaults to None.
Returns:
NotebookNode: Nb for test
"""
cells = []
if code_source is not None:
cells.append(create_code_cell(code_source))
if md_source is not None:
cells.append(create_markdown_cell(md_source))
return nbformat.new_notebook(cells=cells)
def create_cell_metadata(
cell: NotebookNode, execution_count: int = None, metadata: dict = None
) -> None:
"""Fill cell with metadata.
Args:
cell (NotebookNode): Cell to process.
execution_count (int, optional): Execution count. If None than 1. Defaults to None.
metadata (dict, optional): Metadata to fill. If None, used default set. Defaults to None.
"""
if cell.cell_type == "code":
execution_count = execution_count or 1
cell.execution_count = execution_count
if len(cell.outputs) > 0:
cell.outputs[0].execution_count = execution_count
default_metadata = {}
default_metadata["test_field"] = "test_value"
default_metadata["executeTime"] = dict(
[("end_time", "09:31:50"), ("start_time", "09:31:49")]
)
metadata = metadata or default_metadata
if "metadata" not in cell:
cell.metadata = {}
cell.metadata.update(metadata)
def create_nb_metadata(nb: NotebookNode, metadata: dict = None):
"""Fill nb metadata
Args:
nb (NotebookNode): Nb to process.
metadata (dict, optional): Metadata to set. Defaults to None.
"""
metadata = metadata or {
"language_info": {"name": "python", "version": "3.9"},
"kernelspec": {"language": "python", "name": "python3"},
}
nb.metadata = metadata
def create_tmp_image_file(image_name: PosixPath) -> None:
"""Create tmp image file.
Args:
image_name (PosixPath): Image name
"""
with open(image_name, "wb") as fh:
fh.write(b"X===")
| 30.171429 | 97 | 0.630997 | 385 | 3,168 | 5.049351 | 0.241558 | 0.064815 | 0.036008 | 0.04321 | 0.160494 | 0.102881 | 0.032922 | 0 | 0 | 0 | 0 | 0.008475 | 0.255051 | 3,168 | 104 | 98 | 30.461538 | 0.815254 | 0.304924 | 0 | 0.083333 | 0 | 0 | 0.128843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.041667 | 0 | 0.229167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd1ab50e45ba0d49740f04c14faae91b642172a | 2,094 | py | Python | setup.py | humrochagf/lektor-creative-commons | 9891d1f4b94fd78d975780af7d722d9e6d4ba43c | [
"MIT"
] | 7 | 2016-05-30T14:16:07.000Z | 2019-08-12T08:43:55.000Z | setup.py | humrochagf/lektor-creative-commons | 9891d1f4b94fd78d975780af7d722d9e6d4ba43c | [
"MIT"
] | 5 | 2018-06-22T11:56:33.000Z | 2018-07-17T07:05:55.000Z | setup.py | humrochagf/lektor-creative-commons | 9891d1f4b94fd78d975780af7d722d9e6d4ba43c | [
"MIT"
] | 2 | 2016-09-19T09:11:15.000Z | 2018-06-21T13:29:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
from setuptools import find_packages, setup
def create_mo_files():
data_files = []
localedir = 'lektor_creative_commons/locales'
po_dirs = [
localedir + '/' + l + '/LC_MESSAGES/'
for l in next(os.walk(localedir))[1]
]
for d in po_dirs:
mo_files = []
po_files = [
f for f in next(os.walk(d))[2] if os.path.splitext(f)[1] == '.po'
]
for po_file in po_files:
filename, extension = os.path.splitext(po_file)
mo_file = filename + '.mo'
msgfmt_cmd = 'msgfmt {} -o {}'.format(d + po_file, d + mo_file)
subprocess.call(msgfmt_cmd, shell=True)
mo_files.append(d + mo_file)
data_files.append((d, mo_files))
return data_files
with open('README.md', 'r') as f:
README = f.read()
setup(
name='lektor-creative-commons',
description='Lektor plugin to add Creative Commons license to your pages',
long_description=README,
long_description_content_type='text/markdown',
version='0.6.0',
url='https://github.com/humrochagf/lektor-creative-commons',
project_urls={
'Documentation': (
'https://github.com/humrochagf/lektor-creative-commons/'
'blob/master/README.md'),
'Source': 'https://github.com/humrochagf/lektor-creative-commons/',
'Tracker': (
'https://github.com/humrochagf/lektor-creative-commons/issues'),
},
author='Humberto Rocha',
author_email='humrochagf@gmail.com',
license='MIT',
py_modules=['lektor_creative_commons'],
entry_points={
'lektor.plugins': [
'creative-commons=lektor_creative_commons:CreativeCommonsPlugin',
]
},
packages=find_packages(),
include_package_data=True,
package_data={
'lektor_creative_commons': ['assets/*/*/*.png']
},
zip_safe=False,
data_files=create_mo_files(),
classifiers=[
'Framework :: Lektor',
'Environment :: Plugins',
],
)
| 27.92 | 78 | 0.607927 | 246 | 2,094 | 4.987805 | 0.45122 | 0.134474 | 0.154034 | 0.07824 | 0.146699 | 0.146699 | 0.146699 | 0 | 0 | 0 | 0 | 0.004453 | 0.249284 | 2,094 | 74 | 79 | 28.297297 | 0.776081 | 0.020057 | 0 | 0 | 0 | 0 | 0.321951 | 0.089268 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016393 | false | 0 | 0.04918 | 0 | 0.081967 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd29b4a8cee60c2206d100284d4aa8f29f9e702 | 9,381 | py | Python | cosmos/latex_ocr/img2latex.py | hadarohana/myCosmos | 6e4682a2af822eb828180658aaa6d3e304cc85bf | [
"Apache-2.0"
] | null | null | null | cosmos/latex_ocr/img2latex.py | hadarohana/myCosmos | 6e4682a2af822eb828180658aaa6d3e304cc85bf | [
"Apache-2.0"
] | 5 | 2020-01-28T23:13:10.000Z | 2022-02-10T00:28:15.000Z | cosmos/latex_ocr/img2latex.py | hadarohana/myCosmos | 6e4682a2af822eb828180658aaa6d3e304cc85bf | [
"Apache-2.0"
] | 1 | 2021-03-10T19:25:44.000Z | 2021-03-10T19:25:44.000Z | import sys, os
sys.path.append(os.path.dirname(__file__))
import random
import re
from scipy.misc import imread
import PIL
from PIL import Image
import os
import uuid
from imgaug import augmenters as iaa
from model.img2seq import Img2SeqModel
from model.utils.general import Config, run
from model.utils.text import Vocab
from model.utils.image import (
greyscale,
crop_image,
pad_image,
downsample_image,
TIMEOUT,
)
from imgaug import augmenters as iaa
import os
import click
import tensorflow as tf
def img2latex(
model,
img,
downsample_image_ratio=1,
cropping=False,
padding=False,
img_augment=None,
gray_scale=True,
):
"""
Predict a latex code for an input equation image.
:param model: model to be used
:param img: input equation image
:param downsample_image_ratio: down sampling ratio
:param cropping: whether to crop
:param padding: whether to pad
:param img_augment: img augmentation filter
:param gray_scale:whether to gray scale
:return: latex prediction, processed img, processed img location
"""
dir_output = "tmp/"
run(["mkdir -p tmp"], TIMEOUT)
name = str(uuid.uuid4())
img_path = os.path.join("tmp/", f"{name}.png")
img.save(img_path)
buckets = [
[240, 100],
[320, 80],
[400, 80],
[400, 100],
[480, 80],
[480, 100],
[560, 80],
[560, 100],
[640, 80],
[640, 100],
[720, 80],
[720, 100],
[720, 120],
[720, 200],
[800, 100],
[800, 320],
[1000, 200],
[1000, 400],
[1200, 200],
[1600, 200],
[1600, 1600],
]
img_path_tmp = dir_output + "{}.png".format(name)
if cropping:
crop_image(img_path, img_path_tmp)
if padding:
pad_image(img_path_tmp if cropping else img_path, img_path_tmp, buckets=buckets)
if downsample_image_ratio != 1:
if cropping or padding:
downsample_image(img_path_tmp, img_path_tmp, ratio=downsample_image_ratio)
else:
downsample_image(img_path, img_path_tmp, ratio=downsample_image_ratio)
if cropping or padding or downsample_image_ratio != 1:
img = imread(img_path_tmp)
else:
img = imread(img_path)
if img_augment:
img = img_augment.augment_image(img)
img_obj = Image.fromarray(img)
img_obj.save(img_path_tmp)
if gray_scale:
last = greyscale(img)
else:
last = img
hyps = model.predict(last)
return hyps[0], img, os.path.abspath(img_path_tmp)
def pdf2latex(model, pdf_path):
"""
Make prediction for PDF
:param model: model to be used
:param pdf_path: PDF location
:return:
"""
buckets = [
[240, 100],
[320, 80],
[400, 80],
[400, 100],
[480, 80],
[480, 100],
[560, 80],
[560, 100],
[640, 80],
[640, 100],
[720, 80],
[720, 100],
[720, 120],
[720, 200],
[800, 100],
[800, 320],
[1000, 200],
[1000, 400],
[1200, 200],
[1600, 200],
[1600, 1600],
]
dir_output = "tmp/"
name = pdf_path.split("/")[-1].split(".")[0]
run(
"magick convert -density {} -quality {} {} {}".format(
200, 100, pdf_path, dir_output + "{}.png".format(name)
),
TIMEOUT,
)
pdf_path = dir_output + "{}.png".format(name)
crop_image(pdf_path, pdf_path)
pad_image(pdf_path, pdf_path, buckets=buckets)
downsample_image(pdf_path, pdf_path, 2)
img = imread(pdf_path)
img = greyscale(img)
hyps = model.predict(img)
# model.logger.info(hyps[0])
return hyps[0], pdf_path
def easiest_latex_fix_from_left(tokens):
"""
Fix imbalance brackets iterating from left
:param tokens: List of tokens
:return: Fixed sequence
"""
c = 0
for w in tokens:
if w == "{":
c += 1
yield w
elif w == "}":
if c == 0:
continue
else:
c -= 1
yield w
else:
yield w
def easiest_latex_fix_from_right(tokens):
"""
Fix imbalance brackets iterating from right
:param tokens: List of tokens
:return: Fixed sequence
"""
c = 0
for w in tokens[::-1]:
if w == "{":
if c == 0:
continue
c -= 1
yield w
elif w == "}":
c += 1
yield w
else:
yield w
def remove_bad_underscore(tokens):
"""
Fix invalid underscore sequence
:param tokens: List of tokens
:return: Fixed sequence
"""
merged = "".join(tokens)
merged = re.sub(r"[_]{2,}", "_", merged)
merged = merged.replace("}_}", "}}")
merged = merged.replace("{_{", "{{")
merged = re.sub(r"^_", "", merged)
merged = re.sub(r"_$", "", merged)
merged = re.sub(r"[_]{2,}", "_", merged)
return list(merged)
def remove_bad_camma(tokens):
"""
Remove invalid camma
:param tokens: List of tokens
:return: Fixed sequence
"""
merged = "".join(tokens)
merged = re.sub(r"\\,", "", merged)
return merged
def strip(tokens, forbidden=[]):
"""
Remove unnecessary command
:param tokens: List of tokens
:param forbidden command to be removed
:return: Fixed sequence
"""
merged = "".join(tokens)
for cmd in forbidden:
merged = re.sub(cmd.replace("\\", "\\\\"), "", merged)
return list(merged)
def replace_empty_bracket(tokens):
"""
Remove empty bracket
:param tokens: List of tokens
:return: Fixed sequence
"""
merged = "".join(tokens)
find = re.search(r"\{\}", merged)
while find:
merged = re.sub(r"\{\}", "", merged)
find = re.search(r"\{\}", merged)
return list(merged)
def postprocess(raw_latex):
"""
Wrapper function for performing different postprocess operations
:param raw_latex: latex code
:return: processed latex code
"""
tokens = raw_latex.split()
recorded_command = list(filter(lambda x: "\\" in x, tokens))
tokens = strip(tokens, ["\\mathrm", "\\Big", "\\cal"])
tokens = remove_bad_underscore(tokens)
tokens = remove_bad_camma(tokens)
tokens = replace_empty_bracket(tokens)
# print(tokens)
tokens = list(easiest_latex_fix_from_left(tokens))
# print(''.join(tokens))
tokens = reversed(list(easiest_latex_fix_from_right(tokens)))
# print(''.join(tokens))
merged = "".join(tokens)
# add space after commands
for cmd in recorded_command:
merged = merged.replace(cmd, cmd + " ")
return merged
def get_im2latex_model(weight_dir):
"""
Load up model from the given weight location
:param weight_dir: weight location
:return: trained model
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf.logging.set_verbosity(tf.logging.ERROR)
config_vocab = Config(weight_dir + "vocab.json")
config_model = Config(weight_dir + "model.json")
vocab = Vocab(config_vocab)
model = Img2SeqModel(config_model, weight_dir, vocab)
model.build_pred()
model.restore_session(weight_dir + "model.weights/")
return model
def img2latex_api(model, img, downsample_image_ratio, cropping, padding, gray_scale):
"""
Predict a latex code for an input equation image.
:param model: model to be used
:param img: input equation image
:param downsample_image_ratio: down sampling ratio
:param cropping: whether to crop
:param padding: whether to pad
:param gray_scale:whether to gray scale
:return: latex prediction, processed img, processed img location
"""
seq = iaa.Sequential([iaa.GammaContrast(2)])
latex, _, _ = img2latex(
model,
img,
downsample_image_ratio=downsample_image_ratio,
cropping=cropping,
padding=padding,
img_augment=seq,
gray_scale=gray_scale,
)
processed_latex = postprocess(latex)
return processed_latex
# downsample_image_ratio=1, cropping=False, padding=False, img_augment=None, gray_scale=True
@click.command()
@click.option("--downsample_image_ratio", default=2, help="Ratio to down sampling")
@click.option("--cropping", default=True, help="Crops the source image")
@click.option("--padding", default=True, help="Pads the source image")
@click.option("--gray_scale", default=True, help="Gray scales the source image")
@click.option(
"--weight_dir",
required=True,
help="Path to configuration folder under which there're vocab.json model.json model.weights",
)
@click.option("--img_path", required=True, help="Path to source img")
def img2latex_cli(
weight_dir, img_path, downsample_image_ratio, cropping, padding, gray_scale
):
"""Program that takes as input an image of equation and outputs a Latex code"""
img = Image.open(img_path)
model = get_im2latex_model(weight_dir)
processed_latex = img2latex_api(
model, img, downsample_image_ratio, cropping, padding, gray_scale
)
click.echo(processed_latex)
if __name__ == "__main__":
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf.logging.set_verbosity(tf.logging.ERROR)
img2latex_cli()
| 26.277311 | 97 | 0.607824 | 1,177 | 9,381 | 4.677145 | 0.192863 | 0.02416 | 0.050863 | 0.018529 | 0.482834 | 0.40109 | 0.326794 | 0.290463 | 0.282834 | 0.274114 | 0 | 0.043174 | 0.269161 | 9,381 | 356 | 98 | 26.351124 | 0.759772 | 0.19838 | 0 | 0.43038 | 0 | 0 | 0.073945 | 0.003342 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0 | 0.07173 | 0 | 0.160338 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd304a40fd75b4bf452c8a6a813abc8ee25dc77 | 2,469 | py | Python | tests/test_invoke_watson_studio.py | Agnarsh/functions | 64a408ecf55773f38c5ce3b2fe75119e7235e9c9 | [
"Apache-2.0"
] | null | null | null | tests/test_invoke_watson_studio.py | Agnarsh/functions | 64a408ecf55773f38c5ce3b2fe75119e7235e9c9 | [
"Apache-2.0"
] | null | null | null | tests/test_invoke_watson_studio.py | Agnarsh/functions | 64a408ecf55773f38c5ce3b2fe75119e7235e9c9 | [
"Apache-2.0"
] | null | null | null | # Licensed Materials - Property of IBM
# 5737-M66, 5900-AAA, 5900-A0N, 5725-S86, 5737-I75
# (C) Copyright IBM Corp. 2020, 2021 All Rights Reserved.
# US Government Users Restricted Rights - Use, duplication, or disclosure
# restricted by GSA ADP Schedule Contract with IBM Corp.
import logging
import pandas as pd
from iotfunctions.bif import InvokeWMLModel
from iotfunctions.enginelog import EngineLogging
logger = logging.getLogger(__name__)
def test_invoke_watson_studio():
EngineLogging.configure_console_logging(logging.DEBUG)
# Function configuration for InvokeWMLModel
invoke_model = InvokeWMLModel(input_items=FEATURE_NAMES, wml_auth=None, output_items=TARGET)
invoke_model.deployment_id = DEPLOYMENT_ID
invoke_model.apikey = API_KEY
invoke_model.wml_endpoint = WML_ENDPOINT
invoke_model.space_id = SPACE_ID
# Prepare Test data
test_data = TEST_DATA
test_df = pd.DataFrame(data=test_data, columns=FEATURE_NAMES)
# Call test function
test_results = invoke_model.execute(test_df)
logger.debug(f'Test Results \n{test_results}')
"""
To use this test script you need to have a watson studio account and a model deployed in the deployment space.
Set the following global variables before calling the test function
1. Set the feature names and target to the same as deployed model
FEATURE_NAMES = ['feature_1', 'feature_2', ...]
TARGET = ['target_name']
2. Set wml credentials in function above
DEPLOYMENT_ID = ***
API_KEY = ***
SPACE_ID = ***
WML_ENDPOINT = ***
3. Set test data
TEST_DATA is a list of list. Each inner list represents a row of data
Example
If we have two features defined as:
FEATURE_NAMES = ['feature_1', 'feature_2'],
Then one row of features can be represented as a list
ROW_1 = [24, 18]
where feature_1 = 24 feature_2 = 18
The test data is represented as a comma separated list of rows
TEST_DATA = [ROW_1, ROW_2, ...]
Example test data with two rows:
TEST_DATA = [[24, 18], [25, 26]]
"""
FEATURE_NAMES = ['insert-one-or-more-feature-names']
TARGET = ['insert-target-name']
DEPLOYMENT_ID = 'insert-watson-studio-deployment-id'
API_KEY = 'insert-watson-studio-api-key'
SPACE_ID = 'insert-watson-studio-space-id'
WML_ENDPOINT = 'insert-watson-studio-endpoint-url'
TEST_DATA = ['insert-one-or-more-rows-of-test-data']
if __name__ == '__main__':
test_invoke_watson_studio()
| 34.291667 | 110 | 0.72337 | 358 | 2,469 | 4.793296 | 0.391061 | 0.055944 | 0.027972 | 0.027972 | 0.048951 | 0.032634 | 0 | 0 | 0 | 0 | 0 | 0.031421 | 0.18793 | 2,469 | 71 | 111 | 34.774648 | 0.824439 | 0.142568 | 0 | 0 | 0 | 0 | 0.218972 | 0.170213 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd3b1b2d0fa8b107bcc7412c022c29382cfbcbc | 558 | py | Python | 733. Flood Fill.py | Jorewang/LeetCode_Solutions | 0c483a915e2a8b3bfc4bcb4b5a35df3dd0dbe8ba | [
"Apache-2.0"
] | 2 | 2020-06-13T06:37:50.000Z | 2020-06-13T06:37:52.000Z | 733. Flood Fill.py | Jorewang/LeetCode_Solutions | 0c483a915e2a8b3bfc4bcb4b5a35df3dd0dbe8ba | [
"Apache-2.0"
] | null | null | null | 733. Flood Fill.py | Jorewang/LeetCode_Solutions | 0c483a915e2a8b3bfc4bcb4b5a35df3dd0dbe8ba | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def floodFill(self, image, sr, sc, newColor):
if not image or not image[0]:
return
dx = [-1, 0, 1, 0]
dy = [0, 1, 0, -1]
oldColor = image[sr][sc]
if oldColor == newColor:
return image
image[sr][sc] = newColor
for i in range(4):
x = sr + dx[i]
y = sc + dy[i]
if 0 <= x < len(image) and 0 <= y < len(image[0]) and image[x][y] == oldColor:
self.floodFill(image, x, y, newColor)
return image
| 29.368421 | 90 | 0.467742 | 78 | 558 | 3.346154 | 0.358974 | 0.08046 | 0.103448 | 0.130268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038576 | 0.396057 | 558 | 18 | 91 | 31 | 0.735905 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd60563ef7cbd2a3e4db3e8d3079aea1b82e793 | 10,480 | py | Python | submission_validator.py | team-yi-ubc/image-matching-benchmark | da05d98f2a39d848ce29124fd521cdd659718c36 | [
"Apache-2.0"
] | 1 | 2020-09-01T05:31:51.000Z | 2020-09-01T05:31:51.000Z | submission_validator.py | team-yi-ubc/image-matching-benchmark | da05d98f2a39d848ce29124fd521cdd659718c36 | [
"Apache-2.0"
] | null | null | null | submission_validator.py | team-yi-ubc/image-matching-benchmark | da05d98f2a39d848ce29124fd521cdd659718c36 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC, University of Victoria, Czech Technical University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IMW 2021 Submission Validator
#
# Submission Zip file should have folder structure as follow:
# ├── config.json
# ├── [Dataset 1]
# │ ├── [Sequence 1]
# │ │ ├── keypoints.h5
# │ │ ├── descriptors.h5
# │ │ ├── matches.h5
# │ ├── [Sequence 2]
# │ │ ├── ...
# ├── [Dataset 2]
# │ ├── ...
#
# In the config file, please following these nameing conventions:
# 'keypoint', 'descriptor', and 'custom_matches_name' should only
# contains lowercase letters(a-z), numbers(0-9), and two special
# charactors('-','.')
# 'json_label' should only contain above mentioned charactors plus '_'
#
# Please use this script to validate your zip file before submiting.
# This script will create a log file alongside with your submission file.
# Please make sure there is no error message in the log file.
import os
import sys
import argparse
from itertools import product
from utils.io_helper import load_h5, load_json
from config import validate_method
from utils.pack_helper import get_descriptor_properties
def get_config():
parser = argparse.ArgumentParser()
# submission zip file path
parser.add_argument('--submit_file_path', type=str, default='')
# benchmark repo path
parser.add_argument('--benchmark_repo_path', type=str, default='./')
# dataset path
parser.add_argument('--raw_data_path', type=str, default='../imw_data')
# list of datasets
parser.add_argument('--datasets', nargs='+', default=['phototourism', 'googleurban', 'pragueparks'])
config = parser.parse_args()
return config
class MonitorLogger():
def __init__(self, logger_path, value):
self.file = os.path.join(logger_path,'{}_log.txt'.format(value))
if os.path.isfile(self.file):
os.remove(self.file)
def add_new_log(self, new_log):
with open(self.file, 'a') as f:
f.write(new_log + '\n')
def is_empty(self):
if os.path.isfile(self.file):
return False
else:
return True
def get_log_str(self):
with open(self.file, 'r') as f:
lines = f.readlines()
return ''.join(lines)
def get_file_path(self):
return self.file
def validate_submission_files(sub_path,benchmark_repo_path, datasets, raw_data_path, logger):
for dataset in datasets:
raw_dataset_path = os.path.join(raw_data_path,dataset)
# check if dataset folder exists
sub_dataset_path = os.path.join(sub_path,dataset)
if not os.path.isdir(sub_dataset_path):
logger.add_new_log('Submission does not contain {} dataset (ignore this mesage if you do not intend to evaluate on this dataset).'.format(dataset))
continue
# read seqs from json
seqs = load_json(os.path.join(benchmark_repo_path,'json/data/{}_test.json'.format(dataset)))
for seq in seqs:
# get number of image
raw_seq_path = os.path.join(raw_dataset_path,seq)
im_list = [os.path.splitext(f)[0] for f in os.listdir(raw_seq_path) if (os.path.isfile(os.path.join(raw_seq_path, f)) and f.endswith(('png', 'jpg')))]
num_im =len(im_list)
# get all key pairs
key_pairs = [pair[0]+'-'+pair[1] for pair in list(product(im_list, im_list))if pair[0] > pair[1]]
# check if seq folder exists
sub_seq_path = os.path.join(sub_dataset_path,seq)
if not os.path.isdir(sub_seq_path):
logger.add_new_log('Submission does not contain {} sequence in {} dataset.'.format(seq,dataset))
continue
# validate keypoints file
kp_path = os.path.join(sub_seq_path,'keypoints.h5')
if not os.path.isfile(kp_path):
logger.add_new_log('Submission does not contain keypoints file for {} sequence in {} dataset.'.format(seq,dataset))
else:
keypoints = load_h5(kp_path)
if len(keypoints.keys()) == 0:
logger.add_new_log('{}-{}: Keypoints file is corrupted'.format(dataset,seq))
else:
if sorted(list(keypoints.keys()))!=sorted(im_list):
logger.add_new_log('{}-{}: Keypoints file does not contain all the image keys.'.format(dataset,seq))
if len(list(keypoints.values())[0].shape)!=2:
logger.add_new_log('{}-{}: Keypoints file is in wrong format.'.format(dataset,seq))
if list(keypoints.values())[0].shape[1]!=2:
logger.add_new_log('{}-{}: Keypoints file is in wrong format.'.format(dataset,seq))
# check number of keypoints
for _keypoints in keypoints.values():
if _keypoints.shape[0] > 8000:
logger.add_new_log('{}-{}: Keypoints file contains more than 8000 points.'.format(dataset,seq))
break
# check if match file exists first
match_files = [file for file in os.listdir(sub_seq_path) if os.path.isfile(os.path.join(sub_seq_path,file)) and file.startswith('match')]
# validate descriptor file
desc_path = os.path.join(sub_seq_path,'descriptors.h5')
# much provide either descriptor file or match file
if not os.path.isfile(desc_path) and len(match_files)==0:
logger.add_new_log('Submission does not contain descriptors file for {} sequence in {} dataset.'.format(seq,dataset))
elif not os.path.isfile(desc_path):
pass
else:
descriptors = load_h5(desc_path)
if len(descriptors.keys()) == 0:
logger.add_new_log('{}-{}: Descriptors file is corrupted'.format(dataset,seq))
else:
if sorted(list(descriptors.keys()))!=sorted(im_list):
logger.add_new_log('{}-{}: Descriptors file does not contain all the image keys.'.format(dataset,seq))
if len(list(descriptors.values())[0].shape)!=2:
logger.add_new_log('{}-{}: Descriptors file is in wrong format'.format(dataset,seq))
if list(descriptors.values())[0].shape[1]<64 or list(descriptors.values())[0].shape[1]>2048:
logger.add_new_log('{}-{}: Descriptors file is in wrong format'.format(dataset,seq))
# check descriptor size
desc_type, desc_size, desc_nbytes = get_descriptor_properties({},descriptors)
if desc_nbytes > 512 and len(match_files)==0:
logger.add_new_log('{}-{}: Descriptors size is larger than 512 bytes, you need to provide custom match file'.format(dataset,seq))
# validate match file
# check match file name
if 'matches.h5' in match_files:
if len(match_files) != 1:
logger.add_new_log('{}-{}: matches.h5 exists. Do not need to provide any other match files.'.format(dataset,seq))
elif 'matches_multiview.h5' in match_files or 'matches_stereo_0.h5' in match_files or 'matches_stereo.h5' in match_files:
if 'matches_multiview.h5' not in match_files:
logger.add_new_log('{}-{}: missing matches_multiview.h5'.format(dataset,seq))
if 'matches_stereo_0.h5' not in match_files and 'matches_stereo.h5' not in match_files:
logger.add_new_log('{}-{}: missing matches_stereo.h5'.format(dataset,seq))
if 'matches_stereo_1.h5' in match_files or 'matches_stereo_2.h5' in match_files:
logger.add_new_log('{}-{}: for 2021 challenge, we only run stereo once, no need to provide matches_stereo_1 and matches_stereo_2'.format(dataset,seq))
for match_file in match_files:
matches = load_h5(os.path.join(sub_seq_path,match_file))
if len(matches.keys()) == 0:
logger.add_new_log('{}-{}: Matches file is corrupted'.format(dataset,seq))
else:
if len(matches.keys()) != len(key_pairs):
logger.add_new_log('{}-{}: Matches file contains wrong number of keys, should have {} keys, have {}.'.format(dataset,seq, len(key_pairs), len(matches.keys())))
elif sorted(list(matches.keys()))!=sorted(key_pairs):
logger.add_new_log('{}-{}: Matches file contains worng keys, maybe the image names is in reverse order. Plase refer to submission instruction for proper custom match key naming convention'.format(dataset,seq))
if len(list(matches.values())[0].shape)!=2:
logger.add_new_log('{}-{}: Matches file is in wrong format.'.format(dataset,seq))
if list(matches.values())[0].shape[0]!=2:
logger.add_new_log('{}-{}: Matches file is in wrong format.'.format(dataset,seq))
def validate_json(json_path, datasets, logger):
# check if json file exist
if not os.path.isfile(json_path):
logger.add_new_log('Submission does not contain json file')
return
# load json
try:
method_list = load_json(json_path)
except:
logger.add_new_log('Following error occurs when loading json : \n {}'.format(sys.exc_info()))
return
# validate json
if not type(method_list) is list:
logger.add_new_log('Json should contain a list of method, please refer to the example json file.')
return
for i, method in enumerate(method_list):
print('Validating method {}/{}: "{}"'.format(
i + 1, len(method_list), method['config_common']['json_label']))
try:
validate_method(method, is_challenge=True, datasets=datasets)
except:
logger.add_new_log('Following error occurs when validating json : \n {}'.format(sys.exc_info()))
def main():
config = get_config()
# Unzip folder
submission_name = os.path.basename(config.submit_file_path).split('.')[0]
extracted_folder = '{}_extracted'.format(submission_name)
folder_path = os.path.dirname(config.submit_file_path)
os.system('unzip {} -d {}'.format(config.submit_file_path,os.path.join(folder_path,extracted_folder)))
# Init Logger
logger = MonitorLogger(folder_path, submission_name)
# Validate Submission files
validate_submission_files(os.path.join(folder_path,extracted_folder), config.benchmark_repo_path, config.datasets, config.raw_data_path,logger)
# Validate Json
validate_json(os.path.join(folder_path,extracted_folder,'config.json'), config.datasets, logger)
if logger.is_empty():
logger.add_new_log('Submission is in proper format, please submit to IMW 2021 website.')
print('--------\nSubmission is in proper format, please submit to IMW 2021 website.\n--------')
else:
logger.add_new_log('Please fix the above errors and rerun this script!')
print('--------\nPlease fix the errors in log file before submitting!\n{}\n--------'.format(logger.get_file_path()))
if __name__ == "__main__":
main()
| 43.666667 | 215 | 0.710973 | 1,586 | 10,480 | 4.56116 | 0.185372 | 0.026541 | 0.037324 | 0.060133 | 0.364529 | 0.316422 | 0.26334 | 0.217031 | 0.172519 | 0.105889 | 0 | 0.012086 | 0.155248 | 10,480 | 239 | 216 | 43.849372 | 0.800407 | 0.18063 | 0 | 0.162162 | 0 | 0.013514 | 0.275393 | 0.007976 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060811 | false | 0.006757 | 0.047297 | 0.006757 | 0.168919 | 0.02027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd7eeb57fe36c170d09785fa64e8ba0635099a9 | 2,318 | py | Python | dataset.py | zerocool95/ImageClassificationAutomate | f777bf1cf60fa4b86531a572629dd2c0f148826b | [
"MIT"
] | 1 | 2021-09-27T01:39:53.000Z | 2021-09-27T01:39:53.000Z | dataset.py | zerocool95/ImageClassificationAutomate | f777bf1cf60fa4b86531a572629dd2c0f148826b | [
"MIT"
] | 2 | 2021-08-25T16:14:00.000Z | 2022-02-10T02:27:28.000Z | dataset.py | zerocool95/ImageClassificationAutomate | f777bf1cf60fa4b86531a572629dd2c0f148826b | [
"MIT"
] | null | null | null | import tensorflow as tf
from sklearn.model_selection import train_test_split
import os
from config import INPUT_CONFIG, MODEL_CONFIG
class Dataset():
def __init__(self):
pass
def get_labels_from_folder(self, base_folder):
base = base_folder
file_names = []
file_labels = []
lb = 0
for folder in os.listdir(base):
for f in os.listdir(base + folder):
file_names.append(os.path.join(base,folder,f))
file_labels.append(lb)
lb += 1
return file_names, file_labels
def read_data(self, mode = 'folder', tts = 'automatic', train_path = "data/train/", test_path = "data/test/"): # mode: csv , tts : automatic, manual
if mode == 'folder':
# Prepend image filenames in train/ with relative path
file_name_dict = {} # filename : label
file_names, file_labels = self.get_labels_from_folder(train_path)
if tts == 'automatic':
train_filenames, val_filenames, train_labels, val_labels = train_test_split(file_names,
file_labels,
train_size=0.9,
random_state=42)
else:
train_filenames, train_labels = file_names, file_labels
val_filenames, val_labels = self.get_labels_from_folder(test_path)
INPUT_CONFIG['num_train_samples'] = len(train_filenames)
train_data = tf.data.Dataset.from_tensor_slices((tf.constant(train_filenames),
tf.constant(train_labels))).map(self._parse_fn).shuffle(buffer_size=10000).batch(MODEL_CONFIG["batch_size"])
val_data = tf.data.Dataset.from_tensor_slices((tf.constant(val_filenames),
tf.constant(val_labels))).map(self._parse_fn).batch(MODEL_CONFIG["batch_size"])
return train_data, val_data
def _parse_fn(self, filename, label):
img = tf.io.read_file(filename)
img = tf.image.decode_jpeg(img)
img = (tf.cast(img, tf.float32)/127.5) - 1
img = tf.image.resize(img, (INPUT_CONFIG["image_width"], INPUT_CONFIG["image_height"]))
return img, label
| 39.288136 | 153 | 0.591027 | 280 | 2,318 | 4.592857 | 0.303571 | 0.041991 | 0.050544 | 0.073872 | 0.18196 | 0.111975 | 0.066874 | 0.066874 | 0.066874 | 0 | 0 | 0.011299 | 0.31277 | 2,318 | 58 | 154 | 39.965517 | 0.795982 | 0.045298 | 0 | 0 | 0 | 0 | 0.051604 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0.02381 | 0.095238 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd8c531230cff2167b3677c631f23a611edb353 | 891 | py | Python | src/parse_slim.py | dp-rice/multiplemergers | ae52d1092cf2ae57be4220f68cefb36f5887abc1 | [
"MIT"
] | null | null | null | src/parse_slim.py | dp-rice/multiplemergers | ae52d1092cf2ae57be4220f68cefb36f5887abc1 | [
"MIT"
] | null | null | null | src/parse_slim.py | dp-rice/multiplemergers | ae52d1092cf2ae57be4220f68cefb36f5887abc1 | [
"MIT"
] | null | null | null | import sys
import numpy as np
import helpers as h
'''
Parse SLiM output by calculating the SFS in windows.
Windowing greatly reduces the time to compute correlations because mutations are sparse.
'''
infn = sys.argv[1]
L_str = sys.argv[2]
w_str = sys.argv[3]
outfn = sys.argv[4]
if 'e' in L_str:
L = int(float(L_str))
else:
L = int(L_str)
if 'e' in w_str:
window_size = int(float(w_str))
else:
window_size = int(w_str)
# sys.stderr.write('Importing data...\n')
sample_size, positions, allele_counts = h.import_slim_output(infn)
minor_allele_counts = np.minimum(allele_counts, sample_size - allele_counts)
# sys.stderr.write('Computing windowed statistics...\n')
sfs_w = h.windowed_sfs(positions, minor_allele_counts, sample_size, L, window_size)
# sys.stderr.write('Saving to file...\n')
np.savetxt(outfn, sfs_w, fmt='%d', header='window_size={}'.format(window_size))
| 26.205882 | 88 | 0.727273 | 149 | 891 | 4.161074 | 0.436242 | 0.080645 | 0.067742 | 0.070968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005208 | 0.138047 | 891 | 33 | 89 | 27 | 0.802083 | 0.150393 | 0 | 0.105263 | 0 | 0 | 0.029801 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.210526 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd9639d41a8997991902bb68434fc02f0b53b63 | 3,155 | py | Python | tests/output_tests.py | shoshijak/abcpy | ad12808782fa72c0428122fc659fd3ff22d3e854 | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/output_tests.py | shoshijak/abcpy | ad12808782fa72c0428122fc659fd3ff22d3e854 | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/output_tests.py | shoshijak/abcpy | ad12808782fa72c0428122fc659fd3ff22d3e854 | [
"BSD-3-Clause-Clear"
] | null | null | null | import unittest
import numpy as np
from abcpy.output import Journal
class JournalTests(unittest.TestCase):
def test_add_parameters(self):
params1 = np.zeros((2,4))
params2 = np.ones((2,4))
# test whether production mode only stores the last set of parameters
journal_prod = Journal(0)
journal_prod.add_parameters(params1)
journal_prod.add_parameters(params2)
self.assertEqual(len(journal_prod.parameters), 1)
np.testing.assert_equal(journal_prod.parameters[0], params2)
# test whether reconstruction mode stores all parameter sets
journal_recon = Journal(1)
journal_recon.add_parameters(params1)
journal_recon.add_parameters(params2)
self.assertEqual(len(journal_recon.parameters), 2)
np.testing.assert_equal(journal_recon.parameters[0], params1)
np.testing.assert_equal(journal_recon.parameters[1], params2)
def test_add_weights(self):
weights1 = np.zeros((2,4))
weights2 = np.ones((2,4))
# test whether production mode only stores the last set of parameters
journal_prod = Journal(0)
journal_prod.add_weights(weights1)
journal_prod.add_weights(weights2)
self.assertEqual(len(journal_prod.weights), 1)
np.testing.assert_equal(journal_prod.weights[0], weights2)
# test whether reconstruction mode stores all parameter sets
journal_recon = Journal(1)
journal_recon.add_weights(weights1)
journal_recon.add_weights(weights2)
self.assertEqual(len(journal_recon.weights), 2)
np.testing.assert_equal(journal_recon.weights[0], weights1)
np.testing.assert_equal(journal_recon.weights[1], weights2)
def test_add_opt_values(self):
opt_values1 = np.zeros((2,4))
opt_values2 = np.ones((2,4))
# test whether production mode only stores the last set of parameters
journal_prod = Journal(0)
journal_prod.add_opt_values(opt_values1)
journal_prod.add_opt_values(opt_values2)
self.assertEqual(len(journal_prod.opt_values), 1)
np.testing.assert_equal(journal_prod.opt_values[0], opt_values2)
# test whether reconstruction mode stores all parameter sets
journal_recon = Journal(1)
journal_recon.add_opt_values(opt_values1)
journal_recon.add_opt_values(opt_values2)
self.assertEqual(len(journal_recon.opt_values), 2)
np.testing.assert_equal(journal_recon.opt_values[0], opt_values1)
np.testing.assert_equal(journal_recon.opt_values[1], opt_values2)
def test_load_and_save(self):
params1 = np.zeros((2,4))
weights1 = np.zeros((2,4))
journal = Journal(0)
journal.add_parameters(params1)
journal.add_weights(weights1)
journal.save('journal_tests_testfile.pkl')
new_journal = Journal.fromFile('journal_tests_testfile.pkl')
np.testing.assert_equal(journal.parameters, new_journal.parameters)
np.testing.assert_equal(journal.weights, new_journal.weights)
if __name__ == '__main__':
unittest.main()
| 35.449438 | 77 | 0.695721 | 407 | 3,155 | 5.144963 | 0.154791 | 0.103152 | 0.078797 | 0.105062 | 0.659503 | 0.604585 | 0.556351 | 0.34766 | 0.3085 | 0.26361 | 0 | 0.027755 | 0.212044 | 3,155 | 88 | 78 | 35.852273 | 0.814562 | 0.120444 | 0 | 0.172414 | 0 | 0 | 0.021676 | 0.018786 | 0 | 0 | 0 | 0 | 0.293103 | 1 | 0.068966 | false | 0 | 0.051724 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bd988ffccd66ee2fc706da6698e57d9432fb73c | 1,915 | py | Python | tests/test_database.py | zhieejhia93/watts | b1f75ba582003152d2171998d32959ccafce3102 | [
"MIT"
] | 5 | 2022-01-21T14:49:14.000Z | 2022-02-18T02:00:29.000Z | tests/test_database.py | zhieejhia93/watts | b1f75ba582003152d2171998d32959ccafce3102 | [
"MIT"
] | 18 | 2022-01-21T18:02:39.000Z | 2022-03-21T18:56:03.000Z | tests/test_database.py | zhieejhia93/watts | b1f75ba582003152d2171998d32959ccafce3102 | [
"MIT"
] | 5 | 2022-01-21T15:05:22.000Z | 2022-01-25T15:00:36.000Z | # SPDX-FileCopyrightText: 2022 UChicago Argonne, LLC
# SPDX-License-Identifier: MIT
from datetime import datetime
from pathlib import Path
import pytest
import watts
@pytest.fixture(autouse=True, scope='module')
def restore_database_path():
db_path = watts.Database.get_default_path()
yield
watts.Database.set_default_path(db_path)
def get_result():
return watts.ResultsOpenMC(
params=watts.Parameters(value=1, lab='Argonne'),
name='Workflow',
time=datetime.now(),
inputs=['geometry.xml'],
outputs=['statepoint.50.h5'],
)
def test_change_default_dir(run_in_tmpdir):
watts.Database.set_default_path('new_database')
# Make sure creating database uses new default path
db = watts.Database()
assert db.path.is_dir()
assert db.path.name == 'new_database'
# Creating another database should return the same object
db2 = watts.Database()
assert db2 is db
def test_specify_path(run_in_tmpdir):
db = watts.Database(path='somewhere')
assert db.path.is_dir()
assert db.path.name == 'somewhere'
# Calling Database() again with same arguments should give same instance
db2 = watts.Database(path='somewhere')
assert db2 is db
# Shouldn't matter whether path is relative or absolute
cwd = Path.cwd()
db3 = watts.Database(path=cwd / 'somewhere')
assert db3 is db
def test_add_results(run_in_tmpdir):
db = watts.Database('tmp_db')
db.add_result(get_result())
db.add_result(get_result())
# Basic sanity checks
assert len(db) == 2
for result in db:
assert isinstance(result, watts.ResultsOpenMC)
assert result.parameters['value'] == 1
assert result.parameters['lab'] == 'Argonne'
assert len(result.inputs) == 1
assert len(result.outputs) == 1
# Ensure database can be cleared
db.clear()
assert len(db) == 0
| 25.878378 | 76 | 0.681984 | 256 | 1,915 | 4.972656 | 0.390625 | 0.091909 | 0.037706 | 0.036135 | 0.201885 | 0.092694 | 0.051846 | 0.051846 | 0.051846 | 0 | 0 | 0.012574 | 0.210966 | 1,915 | 73 | 77 | 26.232877 | 0.829914 | 0.188512 | 0 | 0.130435 | 0 | 0 | 0.084142 | 0 | 0 | 0 | 0 | 0 | 0.304348 | 1 | 0.108696 | false | 0 | 0.086957 | 0.021739 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bda401ca1149d8175f0f1854298d7a95d9143d4 | 913 | py | Python | samplecode/sample_template_2.py | taijiji/NetworkAutomationTutorial | d83af889ec83b6492579e8bed56ac69a41dd8dba | [
"MIT"
] | 3 | 2018-01-09T01:34:23.000Z | 2018-12-02T19:54:51.000Z | samplecode/sample_template_2.py | taijiji/NetworkAutomationTutorial | d83af889ec83b6492579e8bed56ac69a41dd8dba | [
"MIT"
] | null | null | null | samplecode/sample_template_2.py | taijiji/NetworkAutomationTutorial | d83af889ec83b6492579e8bed56ac69a41dd8dba | [
"MIT"
] | null | null | null | import jinja2
# ファイルの読み込み
file = open("sample_template.jinja2", "r")
template_txt = file.read()
print(template_txt)
print("-----------------------------------")
# 1つのテンプレート(雛形)から1つのテキストファイルを作成する例
# テンプレートオブジェクトの作成
template = jinja2.Environment().from_string(template_txt)
# 1つのテンプレート(雛形)から複数のテキストファイルを作成する例
interfaces = [
{
"if_description" : "To_RouterA",
"if_name" : "fastethernet 1/1",
"ip4" : "192.168.0.1",
"ip4_subnet" : "255.255.255.0"
},
{
"if_description" : "To_RouterB",
"if_name" : "fastethernet 1/2",
"ip4" : "192.168.1.1",
"ip4_subnet" : "255.255.255.0"
},
{
"if_description" : "To_RouterC",
"if_name" : "fastethernet 1/3",
"ip4" : "192.168.2.1",
"ip4_subnet" : "255.255.255.0"
}
]
for interface in interfaces:
config_txt = template.render(interface)
print(config_txt) | 24.675676 | 57 | 0.576123 | 104 | 913 | 4.875 | 0.403846 | 0.071006 | 0.088757 | 0.112426 | 0.177515 | 0.177515 | 0.177515 | 0.138067 | 0.138067 | 0.138067 | 0 | 0.102418 | 0.230011 | 913 | 37 | 58 | 24.675676 | 0.618777 | 0.099671 | 0 | 0.103448 | 0 | 0 | 0.378973 | 0.069682 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.034483 | 0 | 0.034483 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bdd9652088ba86a79c212d90a2c1f5d8943385b | 1,334 | py | Python | scripts/dump_help.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 3,287 | 2016-07-26T17:34:33.000Z | 2022-03-31T09:52:13.000Z | scripts/dump_help.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 19,206 | 2016-07-26T07:04:42.000Z | 2022-03-31T23:57:09.000Z | scripts/dump_help.py | YuanyuanNi/azure-cli | 63844964374858bfacd209bfe1b69eb456bd64ca | [
"MIT"
] | 2,575 | 2016-07-26T06:44:40.000Z | 2022-03-31T22:56:06.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
import json
import os
import re
import subprocess
import sys
from azure.cli.core.application import Configuration
class Exporter(json.JSONEncoder):
def default(self, o):#pylint: disable=method-hidden
try:
return super(Exporter, self).default(o)
except TypeError:
return str(o)
parser = argparse.ArgumentParser(description='Command Table Parser')
parser.add_argument('--commands', metavar='N', nargs='+', help='Filter by command scope')
args = parser.parse_args()
cmd_set_names = args.commands
# ignore the params passed in now so they aren't used by the cli
sys.argv = sys.argv[:1]
config = Configuration([])
cmd_table = config.get_command_table()
cmd_list = sorted([cmd_name for cmd_name in cmd_table.keys() if cmd_set_names is None or cmd_name.split()[0] in cmd_set_names])
for cmd in cmd_list:
cmd_string = 'az {} -h'.format(cmd)
os.system(cmd_string)
print('\n===============================', flush=True)
| 35.105263 | 127 | 0.608696 | 166 | 1,334 | 4.777108 | 0.60241 | 0.022699 | 0.041614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001745 | 0.14093 | 1,334 | 37 | 128 | 36.054054 | 0.690227 | 0.32084 | 0 | 0 | 0 | 0 | 0.106785 | 0.036707 | 0.04 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.28 | 0 | 0.44 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bde1a83a3d23ca2e35d19bac3ef3a3de2cec35e | 7,522 | py | Python | source/conf.py | JaapKeuter/devicetree-specification | 7e1cc178c4698715e47baa23743111cf83d789ef | [
"Apache-2.0"
] | 1 | 2022-01-31T20:09:49.000Z | 2022-01-31T20:09:49.000Z | source/conf.py | JaapKeuter/devicetree-specification | 7e1cc178c4698715e47baa23743111cf83d789ef | [
"Apache-2.0"
] | null | null | null | source/conf.py | JaapKeuter/devicetree-specification | 7e1cc178c4698715e47baa23743111cf83d789ef | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
#
# Devicetree Specification documentation build configuration file, created by
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import time
import subprocess
# sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('extensions'))
from DtsLexer import DtsLexer
def setup(app):
from sphinx.highlighting import lexers
lexers['dts'] = DtsLexer()
# -- Project information -----------------------------------------------------
project = u'Devicetree Specification'
copyright = u'2016,2017, devicetree.org'
author = u'devicetree.org'
# The short X.Y version
try:
version = str(subprocess.check_output(["git", "describe", "--dirty"]), 'utf-8').strip()
except:
version = "unknown-rev"
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.2.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.graphviz'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%d %B %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# Include at the beginning of every source file that is read
with open('rst_prolog', 'rb') as pr:
rst_prolog = pr.read().decode('utf-8')
rst_epilog = """
.. |SpecVersion| replace:: {versionnum}
""".format(
versionnum = version,
)
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
numfig = True
highlight_language = 'none'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'devicetree-org',
'github_repo': 'devicetree-specification',
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "devicetree-logo.svg"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "devicetree-favicon.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'DevicetreeSpecificationdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
'classoptions': ',oneside',
'babel': '\\usepackage[english]{babel}',
'sphinxsetup': 'hmargin=2cm',
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
'figure_align': 'H',
}
# Release numbers with a qualifier (ex. '-rc', '-pre') get a watermark.
if '-' in release:
latex_elements['preamble'] = '\\usepackage{draftwatermark}\\SetWatermarkScale{.45}\\SetWatermarkText{%s}' % (release)
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'devicetree-specification.tex', u'Devicetree Specification',
u'devicetree.org', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "devicetree-logo.png"
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'devicetree-specification', u'Devicetree Specification',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'devicetree-specification', u'Devicetree Specification',
author, 'DevicetreeSpecification', 'Devicetree hardware description language specification.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
| 30.208835 | 121 | 0.662058 | 953 | 7,522 | 5.174187 | 0.388248 | 0.041979 | 0.014196 | 0.023322 | 0.150071 | 0.099777 | 0.081322 | 0.081322 | 0.056175 | 0.056175 | 0 | 0.006223 | 0.166844 | 7,522 | 248 | 122 | 30.330645 | 0.780597 | 0.654214 | 0 | 0.024691 | 0 | 0 | 0.368231 | 0.109507 | 0 | 0 | 0 | 0.004032 | 0 | 1 | 0.012346 | false | 0 | 0.074074 | 0 | 0.08642 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bdee9ab6aa34bd2b378d13a5852f8c41af61fdc | 1,207 | py | Python | tests/jobs/examples/ex7_pandas_job_test.py | arthurprevot/yaetos | 5eba59538f8e53c3d7033b0af80a25828c24a43e | [
"Apache-2.0"
] | 8 | 2022-02-15T20:39:55.000Z | 2022-03-24T20:50:18.000Z | tests/jobs/examples/ex7_pandas_job_test.py | arthurprevot/yaetos | 5eba59538f8e53c3d7033b0af80a25828c24a43e | [
"Apache-2.0"
] | 2 | 2021-11-01T20:42:32.000Z | 2021-12-09T23:13:47.000Z | tests/jobs/examples/ex7_pandas_job_test.py | arthurprevot/yaetos | 5eba59538f8e53c3d7033b0af80a25828c24a43e | [
"Apache-2.0"
] | 2 | 2022-03-18T00:59:29.000Z | 2022-03-24T20:52:00.000Z | from jobs.examples.ex7_pandas_job import Job
import pandas as pd
class Test_Job(object):
def test_transform(self, get_pre_jargs):
some_events = pd.DataFrame([
{'session_id': 1234, 'action': 'searchResultPage', 'n_results': 1},
{'session_id': 1234, 'action': 'searchResultPage', 'n_results': 1},
{'session_id': 1235, 'action': 'searchResultPage', 'n_results': 1},
{'session_id': 1236, 'action': 'other', 'n_results': 1}, # noqa: E241
])
other_events = pd.DataFrame([
{'session_id': 1234, 'other': 1},
{'session_id': 1235, 'other': 1},
{'session_id': 1236, 'other': 1},
])
expected = [
{'session_id': 1234, 'count_events': 2},
{'session_id': 1235, 'count_events': 1},
# only diff with ex1_framework_job is session_id being str instead of int.
]
loaded_inputs = {'some_events': some_events, 'other_events': other_events}
actual = Job(pre_jargs=get_pre_jargs(loaded_inputs.keys())).etl_no_io(sc=None, sc_sql=None, loaded_inputs=loaded_inputs)[0].to_dict(orient='records')
assert actual == expected
| 41.62069 | 157 | 0.593206 | 147 | 1,207 | 4.591837 | 0.435374 | 0.133333 | 0.074074 | 0.133333 | 0.272593 | 0.272593 | 0.202963 | 0.143704 | 0.143704 | 0.143704 | 0 | 0.056983 | 0.258492 | 1,207 | 28 | 158 | 43.107143 | 0.697207 | 0.068766 | 0 | 0.181818 | 0 | 0 | 0.242641 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bdffba6e8eea17d503a52dbad47871ef2f944e0 | 1,275 | py | Python | problems/problem31.py | nolanwrightdev/blind-75-python | b92ef3449eb0143c760ddd339897a3f0a2972830 | [
"MIT"
] | 6 | 2020-02-01T23:29:51.000Z | 2022-02-20T20:46:56.000Z | problems/problem31.py | nolanwrightdev/blind-75-python | b92ef3449eb0143c760ddd339897a3f0a2972830 | [
"MIT"
] | null | null | null | problems/problem31.py | nolanwrightdev/blind-75-python | b92ef3449eb0143c760ddd339897a3f0a2972830 | [
"MIT"
] | null | null | null | '''
Blind Curated 75 - Problem 31
=============================
Binary Tree Maximum Path Sum
----------------------------
Given a non-empty binary tree, find the maximum path sum.
For this problem, a path is defined as any sequence of nodes from some starting
node to any node in the tree along the parent-child connections. The path must
contain at least one node and does not need to go through the root.
[→ LeetCode][1]
[1]: https://leetcode.com/problems/binary-tree-maximum-path-sum/
'''
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
'''
Recurse over the tree. For a given node, first calculate the maximum paths
through either of its children. Since the maximum path may branch across both
the left and right children, record the the maximum path at each step, but
only return to the parent the longer of the two branches.
'''
def max_path_sum(self, root):
self.max_sum = float('-inf')
self.recurse(root)
return self.max_sum
def recurse(self, node):
if not node:
return 0
left = self.recurse(node.left)
right = self.recurse(node.right)
self.max_sum = max(self.max_sum,
max(left, 0) + node.val + max(right, 0))
return max(left, right, 0) + node.val
| 27.12766 | 79 | 0.678431 | 205 | 1,275 | 4.17561 | 0.443902 | 0.064252 | 0.046729 | 0.049065 | 0.056075 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 0.192157 | 1,275 | 46 | 80 | 27.717391 | 0.820388 | 0.607059 | 0 | 0 | 0 | 0 | 0.008316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9be16ad025272fbd89cb45d3e090102853d8024d | 799 | py | Python | algorithms/ar-kmp/python3/knuth_morris_pratt.py | NuclearCactus/FOSSALGO | eb66f3bdcd6c42c66e8fc7110a32ac021596ca66 | [
"MIT"
] | 59 | 2018-09-11T17:40:25.000Z | 2022-03-03T14:40:39.000Z | algorithms/ar-kmp/python3/knuth_morris_pratt.py | RitvikDayal/FOSSALGO | ae225a5fffbd78d0dff83fd7b178ba47bfd7a769 | [
"MIT"
] | 468 | 2018-08-28T17:04:29.000Z | 2021-12-03T15:16:34.000Z | algorithms/ar-kmp/python3/knuth_morris_pratt.py | RitvikDayal/FOSSALGO | ae225a5fffbd78d0dff83fd7b178ba47bfd7a769 | [
"MIT"
] | 253 | 2018-08-28T17:08:51.000Z | 2021-11-01T12:30:39.000Z | #Python program for KMP Algorithm
def LPSArray(pat, M, lps):
lenn = 0
i = 1
while i < M:
if pat[i]== pat[lenn]:
lenn += 1
lps[i] = lenn
i += 1
else:
if lenn != 0:
lenn = lps[lenn-1]
else:
lps[i] = 0
i += 1
def KMP(pat, txt):
M = len(pat)
N = len(txt)
# create lps[] that will hold the longest prefix suffix values for pattern
lps = [0]*M
j = 0
# Preprocess the pattern (calculate lps[] array)
LPSArray(pat, M, lps)
i = 0 # index for txt[]
while i < N:
if pat[j] == txt[i]:
i += 1
j += 1
if j == M:
print ("Found pattern at index " + str(i-j))
j = lps[j-1]
# mismatch after j matches
elif i < N and pat[j] != txt[i]:
if j != 0:
j = lps[j-1]
else:
i += 1
txt = "ABABDABACDABABCABAB"
pat = "ABABCABAB"
KMP(pat, txt)
| 15.98 | 75 | 0.545682 | 141 | 799 | 3.092199 | 0.326241 | 0.022936 | 0.055046 | 0.068807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0.297872 | 799 | 49 | 76 | 16.306122 | 0.746881 | 0.241552 | 0 | 0.25 | 0 | 0 | 0.085142 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.055556 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9be1ab82b8c28213b37b58836821800cd0be1586 | 658 | py | Python | numpy-001-ndarray/bilibili_stat.py | gemark/numpy-note | 3da01f5ef96c9421ac088ce067dcb916ff09c304 | [
"MIT"
] | null | null | null | numpy-001-ndarray/bilibili_stat.py | gemark/numpy-note | 3da01f5ef96c9421ac088ce067dcb916ff09c304 | [
"MIT"
] | null | null | null | numpy-001-ndarray/bilibili_stat.py | gemark/numpy-note | 3da01f5ef96c9421ac088ce067dcb916ff09c304 | [
"MIT"
] | null | null | null | import numpy as np
import sys, loadcsv, re
loader = loadcsv.loadcsv()
data = loader.load_from_file('./planguage_unfix.csv')
# 创建 ndarray 对象
fix_dataA = []
fix_dataB = []
for row in data:
if row[2] != '"vkchow"':
try:
play_num = float(row[4])
except:
play_num = 0.0
fix_dataA.append(play_num)
fix_dataB.append(row[6])
arrA = np.array(fix_dataA)
arrB = np.array(fix_dataB)
years = set()
for i in arrB:
d:str = i.split('/')[0]
if re.match(r'\d+', d) is not None:
if len(d) == 4 and d.find('.') == -1:
years.add(d)
years = list(years)
years.sort()
print(years)
| 20.5625 | 53 | 0.568389 | 103 | 658 | 3.514563 | 0.563107 | 0.066298 | 0.055249 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016701 | 0.272036 | 658 | 32 | 54 | 20.5625 | 0.73904 | 0.019757 | 0 | 0 | 0 | 0 | 0.052795 | 0.032609 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9be63bf00b194febff246721c3ba5b2812f45268 | 8,077 | py | Python | src/riaps/ctrl/ctrlcli.py | mbellabah/riaps-pycom | 6995d1b5d58c5f8b03b6eb75a5c5c34f9de00f5e | [
"Apache-2.0"
] | 6 | 2019-02-24T10:14:46.000Z | 2020-07-08T16:22:17.000Z | src/riaps/ctrl/ctrlcli.py | mbellabah/riaps-pycom | 6995d1b5d58c5f8b03b6eb75a5c5c34f9de00f5e | [
"Apache-2.0"
] | 36 | 2018-10-16T04:39:54.000Z | 2022-02-09T17:38:05.000Z | src/riaps/ctrl/ctrlcli.py | mbellabah/riaps-pycom | 6995d1b5d58c5f8b03b6eb75a5c5c34f9de00f5e | [
"Apache-2.0"
] | 8 | 2018-10-23T22:09:18.000Z | 2021-04-23T02:53:24.000Z | '''
Controller CMD
Created on Dec 6.2016
@author: riaps
'''
import gi
import rpyc
import time
import sys
import os
import zmq
from os.path import join
from _collections import OrderedDict
import re
import logging
import cmd
import traceback
import subprocess
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib, Gdk
from threading import RLock
cmdLock = RLock() # Global GUI lock
cmdClient = None
class ControlCLIClient(object):
'''
Controller GUI class
'''
def __init__(self, port, controller,script):
'''
Builds the GUI, connects it to the server (thread). The GUI is just another client of
the service.
'''
global cmdClient
cmdClient = self
self.logger = logging.getLogger(__name__)
self.port = port
self.controller = controller
self.context = controller.context
self.script = script
self.prompt = '$ '
(self.stdin,self.echo) = (sys.stdin,False) if self.script == '-' else (open(script,'r'),True)
self.stdout = sys.stdout
# self.conn = rpyc.connect(self.controller.hostAddress, port) # Local connection to the service
# GLib.io_add_watch(self.conn, 1, GLib.IO_IN, self.bg_server) # Register the callback with the service
# GLib.io_add_watch(self.stdin, 1, GLib.IO_IN, self.cmd_server)
# self.conn.root.login("*gui*", self.on_serverMessage) # Log in to the service
self.socket = self.context.socket(zmq.PULL)
self.socket.bind(self.controller.endpoint)
GLib.io_add_watch(self.socket.fileno(), 1, GLib.IO_IN, self.on_serverMessage)
GLib.io_add_watch(self.stdin, 1, GLib.IO_IN, self.cmd_server)
self.appDownLoaded = False
self.appFolder = None
self.appName = None
self.deplName = None
self.nodeIDDict = OrderedDict()
self.appStatusDict = OrderedDict()
self.loop = GLib.MainLoop()
class CtrlCmdShell(cmd.Cmd):
intro = 'Welcome to the ctrl shell. Type help or ? to list commands.\n'
def __init__(self,parent):
super(parent.CtrlCmdShell, self).__init__()
self.parent = parent
def do_f(self,arg):
'''Select app folder: f path'''
self.parent.cmdSelectFolder(arg)
def do_m(self,arg):
'''Select app model: m app.riaps'''
self.parent.cmdSelectApp(arg)
def do_d(self,arg):
'''Select deployment model: d app.depl '''
self.parent.cmdSelectDepl(arg)
def do_l(self,arg):
'''Launch app: g app'''
self.parent.cmdLaunchApp(arg)
def do_h(self,arg):
'''Halt app: h app'''
self.parent.cmdStopApp(arg)
def do_r(self,arg):
'''Remove app: r app'''
self.parent.cmdRemoveApp(arg)
def do_w(self,arg):
'''Wait: w sec'''
# self.parent.conn.poll_all(int(arg))
# poller = zmq.Poller()
# poller.register(self.parent.socket, zmq.POLLIN)
# socks = dict(poller.poll(int(arg)))
# if self.parent.socket in socks:
# self.parent.on_serverMessage()
time.sleep(int(arg))
def do_e(self,arg):
''' Echo argument: e message'''
self.stdout.write(arg + '\r\n')
self.stdout.flush()
def do_shell(self,arg):
''' Execute command: e ls -l'''
subprocess.call(arg.split())
def do_q(self,arg):
'''Quit program'''
self.parent.cmdQuit()
def run(self):
self.do_prompt()
self.shell = self.CtrlCmdShell(self)
self.loop.run()
# def bg_server(self, source=None, cond=None):
# '''Check if there is something pending from the server thread.'''
# if self.conn:
# self.conn.poll_all()
# return True
# else:
# return False
def do_prompt(self):
if not self.echo:
self.stdout.write(self.prompt)
self.stdout.flush()
def cmd_script(self,fname,fnames=[]):
fnames.append(fname)
with open(fname) as f:
for line in f.readlines():
self.cmd_line(line.rstrip('\r\n'),fnames)
def cmd_line(self,line,fnames=[]):
if self.echo: print('(cmd) %s' % line)
if not len(line): return
first = line[0]
if first == '#':
pass # Comment
elif first == '@':
line = line.lstrip('@ ')
if line in fnames:
pass # Error
else:
echo = self.echo
self.echo = True
self.cmd_script(line,fnames) # Load the script
self.echo = echo
else:
self.shell.onecmd(line)
def cmd_server(self, source=None, _cond=None):
if source == None: return
line = source.readline()
if not len(line):
line = 'EOF'
else:
line = line.rstrip('\r\n')
if line == 'EOF':
self.cmdQuit()
return False
else:
try:
self.cmd_line(line)
except:
traceback.print_exc()
self.stdout.flush()
source.flush()
self.do_prompt()
return True
def log(self,text):
global cmdLock
with cmdLock:
text = '\n> ' + text + '\n'
print(text)
def on_serverMessage(self, _channel=None, _cond=None):
'''
Callback used by the service thread(s): it prints a log message.
'''
while True:
try:
text = self.socket.recv_pyobj(flags=zmq.NOBLOCK)
self.log(text)
except zmq.error.ZMQError:
break
return True
def isAppOK(self):
aName = self.appName
dName = self.deplName
return (aName != None and aName != '' and dName != None and dName != '')
def cmdSelectApp(self,fileName):
if fileName != None:
# Check if file exists
self.appName = fileName
self.controller.compileApplication(fileName, self.appFolder)
def cmdClearApp(self):
'''
Clears the app entry.
'''
self.appName = ''
def cmdSelectDepl(self,fileName):
if fileName != None:
# Check if file exists
self.deplName = fileName
self.controller.compileDeployment(fileName)
def cmdClearDepl(self):
'''
Clears the deployment entry
'''
self.deplName = ''
def cmdSelectFolder(self,folderName):
if folderName != None:
# Check if folder exists
self.appFolder = folderName
self.controller.setAppFolder(folderName)
def cmdQuit(self):
'''
Quit the app. Forces a return from the CMD loop
'''
# self.conn.close()
self.socket.close()
self.loop.quit()
def cmdLaunchApp(self,appSelected):
self.controller.launchByName(appSelected)
def cmdStopApp(self,appSelected):
self.controller.haltByName(appSelected)
def cmdRemoveApp(self,appSelected):
self.controller.removeAppByName(appSelected)
def clearApplication(self):
self.cmdClearApp()
def clearDeployment(self):
self.cmdClearDepl()
def update_node_apps(self,clientName,value):
for appName in value.keys():
actors = value[appName]
for actorName in actors:
self.controller.addToLaunchList(clientName,appName,actorName)
| 30.026022 | 111 | 0.537452 | 885 | 8,077 | 4.830508 | 0.277966 | 0.030409 | 0.013099 | 0.013099 | 0.084678 | 0.066901 | 0.053801 | 0.044912 | 0.044912 | 0.044912 | 0 | 0.002312 | 0.357311 | 8,077 | 268 | 112 | 30.13806 | 0.821229 | 0.193265 | 0 | 0.101796 | 0 | 0 | 0.017326 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.191617 | false | 0.011976 | 0.08982 | 0 | 0.317365 | 0.017964 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9be716ba6a3ac29ba81f9326068b4f56fbd3cde8 | 3,519 | py | Python | torchbenchmark/models/fastNLP/reproduction/matching/matching_cntn.py | Chillee/benchmark | 91e1b2871327e44b9b7d24d173ca93720fb6565b | [
"BSD-3-Clause"
] | 2,693 | 2018-03-08T03:09:20.000Z | 2022-03-30T07:38:42.000Z | reproduction/matching/matching_cntn.py | stratoes/fastNLP | a8a458230489710ab945b37ec22e93315230f2de | [
"Apache-2.0"
] | 291 | 2018-07-21T07:43:17.000Z | 2022-03-07T13:06:58.000Z | reproduction/matching/matching_cntn.py | stratoes/fastNLP | a8a458230489710ab945b37ec22e93315230f2de | [
"Apache-2.0"
] | 514 | 2018-03-09T06:54:25.000Z | 2022-03-26T20:11:44.000Z | import argparse
import torch
from fastNLP.core import Trainer, Tester, Adam, AccuracyMetric, Const, CrossEntropyLoss
from fastNLP.embeddings import StaticEmbedding
from fastNLP.io.pipe.matching import SNLIPipe, RTEPipe, MNLIPipe, QNLIPipe
from reproduction.matching.model.cntn import CNTNModel
# define hyper-parameters
argument = argparse.ArgumentParser()
argument.add_argument('--embedding', choices=['glove', 'word2vec'], default='glove')
argument.add_argument('--batch-size-per-gpu', type=int, default=256)
argument.add_argument('--n-epochs', type=int, default=200)
argument.add_argument('--lr', type=float, default=1e-5)
argument.add_argument('--save-dir', type=str, default=None)
argument.add_argument('--cntn-depth', type=int, default=1)
argument.add_argument('--cntn-ns', type=int, default=200)
argument.add_argument('--cntn-k-top', type=int, default=10)
argument.add_argument('--cntn-r', type=int, default=5)
argument.add_argument('--dataset', choices=['qnli', 'rte', 'snli', 'mnli'], default='qnli')
arg = argument.parse_args()
# dataset dict
dev_dict = {
'qnli': 'dev',
'rte': 'dev',
'snli': 'dev',
'mnli': 'dev_matched',
}
test_dict = {
'qnli': 'dev',
'rte': 'dev',
'snli': 'test',
'mnli': 'dev_matched',
}
# set num_labels
if arg.dataset == 'qnli' or arg.dataset == 'rte':
num_labels = 2
else:
num_labels = 3
# load data set
if arg.dataset == 'snli':
data_bundle = SNLIPipe(lower=True, tokenizer='raw').process_from_file()
elif arg.dataset == 'rte':
data_bundle = RTEPipe(lower=True, tokenizer='raw').process_from_file()
elif arg.dataset == 'qnli':
data_bundle = QNLIPipe(lower=True, tokenizer='raw').process_from_file()
elif arg.dataset == 'mnli':
data_bundle = MNLIPipe(lower=True, tokenizer='raw').process_from_file()
else:
raise RuntimeError(f'NOT support {arg.task} task yet!')
print(data_bundle) # print details in data_bundle
# load embedding
if arg.embedding == 'word2vec':
embedding = StaticEmbedding(data_bundle.vocabs[Const.INPUTS(0)], model_dir_or_name='en-word2vec-300',
requires_grad=True)
elif arg.embedding == 'glove':
embedding = StaticEmbedding(data_bundle.vocabs[Const.INPUTS(0)], model_dir_or_name='en-glove-840b-300d',
requires_grad=True)
else:
raise ValueError(f'now we only support word2vec or glove embedding for cntn model!')
# define model
model = CNTNModel(embedding, ns=arg.cntn_ns, k_top=arg.cntn_k_top, num_labels=num_labels, depth=arg.cntn_depth,
r=arg.cntn_r)
print(model)
# define trainer
trainer = Trainer(train_data=data_bundle.datasets['train'], model=model,
optimizer=Adam(lr=arg.lr, model_params=model.parameters()),
loss=CrossEntropyLoss(),
batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,
n_epochs=arg.n_epochs, print_every=-1,
dev_data=data_bundle.datasets[dev_dict[arg.dataset]],
metrics=AccuracyMetric(), metric_key='acc',
device=[i for i in range(torch.cuda.device_count())],
check_code_level=-1)
# train model
trainer.train(load_best_model=True)
# define tester
tester = Tester(
data=data_bundle.datasets[test_dict[arg.dataset]],
model=model,
metrics=AccuracyMetric(),
batch_size=torch.cuda.device_count() * arg.batch_size_per_gpu,
device=[i for i in range(torch.cuda.device_count())]
)
# test model
tester.test()
| 35.545455 | 111 | 0.686275 | 470 | 3,519 | 4.97234 | 0.287234 | 0.047069 | 0.081301 | 0.039367 | 0.259307 | 0.259307 | 0.241335 | 0.195122 | 0.195122 | 0.195122 | 0 | 0.011624 | 0.168798 | 3,519 | 98 | 112 | 35.908163 | 0.78735 | 0.049446 | 0 | 0.175676 | 0 | 0 | 0.118812 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.081081 | 0 | 0.081081 | 0.040541 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9be76e97eb7f667a8a779708fb33a532807933b0 | 428 | py | Python | matplotlib/save-without-margins/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | matplotlib/save-without-margins/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | matplotlib/save-without-margins/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z |
# date: 2019.09.04
# https://stackoverflow.com/questions/57791698/how-to-save-an-image-from-a-custom-coco-dataset-with-its-annotations-overlaid-on/57792318#57792318
import skimage.io as io
import matplotlib.pyplot as plt
image = io.imread("https://homepages.cae.wisc.edu/~ece533/images/lena.png")
plt.imshow(image)
plt.axis('off')
plt.annotate("Lena", (10, 20))
plt.savefig("output.png", bbox_inches='tight', pad_inches=0)
| 28.533333 | 145 | 0.754673 | 69 | 428 | 4.652174 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.100251 | 0.067757 | 428 | 14 | 146 | 30.571429 | 0.704261 | 0.371495 | 0 | 0 | 0 | 0 | 0.288973 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9becaeff2a34b0cd0a770f43fc5a12a80395bfa1 | 2,029 | py | Python | analytics/management/commands/process_events.py | abrookins/quest | 302e985ed4702d977990bc5438c1a6d0521d236e | [
"MIT"
] | 38 | 2020-08-12T12:15:51.000Z | 2022-03-29T20:19:34.000Z | analytics/management/commands/process_events.py | abrookins/quest | 302e985ed4702d977990bc5438c1a6d0521d236e | [
"MIT"
] | 6 | 2021-03-19T10:51:50.000Z | 2021-09-22T19:34:49.000Z | analytics/management/commands/process_events.py | abrookins/quest | 302e985ed4702d977990bc5438c1a6d0521d236e | [
"MIT"
] | 6 | 2021-05-24T09:58:24.000Z | 2022-02-25T20:57:47.000Z | import datetime
from darksky.api import DarkSky
from django.core.management.base import BaseCommand
from django.conf import settings
from analytics.models import Event
darksky = DarkSky(settings.DARK_SKY_API_KEY)
class Command(BaseCommand):
help = 'Annotate events with cloud cover data'
def add_arguments(self, parser):
today = datetime.date.today()
default_start = today - datetime.timedelta(days=30)
default_end = today
parser.add_argument(
'--start',
type=lambda s: datetime.datetime.strptime(
s,
'%Y-%m-%d-%z'
),
default=default_start)
parser.add_argument(
'--end',
type=lambda s: datetime.datetime.strptime(
s,
'%Y-%m-%d-%z'
),
default=default_end)
def handle(self, *args, **options):
events = Event.objects.filter(
created_at__range=[options['start'],
options['end']])
for e in events.exclude(
data__latitude=None,
data__longitude=None).iterator(): # <1>
# Presumably we captured a meaningful latitude and
# longitude related to the event (perhaps the
# user's location).
latitude = float(e.data.get('latitude'))
longitude = float(e.data.get('longitude'))
if 'weather' not in e.data:
e.data['weather'] = {}
if 'cloud_cover' not in e.data['weather']:
forecast = darksky.get_time_machine_forecast(
latitude, longitude, e.created_at)
hourly = forecast.hourly.data[e.created_at.hour]
e.data['weather']['cloud_cover'] = \
hourly.cloud_cover
# This could alternatively be done with bulk_update().
# Doing so would in theory consume more memory but take
# less time.
e.save()
| 31.703125 | 67 | 0.551503 | 220 | 2,029 | 4.968182 | 0.481818 | 0.027447 | 0.032937 | 0.034767 | 0.098811 | 0.098811 | 0.098811 | 0.098811 | 0.098811 | 0.098811 | 0 | 0.002269 | 0.348448 | 2,029 | 63 | 68 | 32.206349 | 0.824508 | 0.114342 | 0 | 0.227273 | 0 | 0 | 0.08161 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.113636 | 0 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bedf7bea32410d4cc7340467516ed945e3847de | 6,585 | py | Python | attack_inc.py | wufanyou/EPGD | 92f8cb1a0d29329e1a6dff5209533fdf01a4270e | [
"MIT"
] | 4 | 2019-07-17T13:57:19.000Z | 2021-03-03T07:34:16.000Z | attack_inc.py | wufanyou/EPGD | 92f8cb1a0d29329e1a6dff5209533fdf01a4270e | [
"MIT"
] | 3 | 2020-10-13T21:53:38.000Z | 2021-07-24T13:08:19.000Z | attack_inc.py | wufanyou/EPGD | 92f8cb1a0d29329e1a6dff5209533fdf01a4270e | [
"MIT"
] | 1 | 2019-11-05T02:39:05.000Z | 2019-11-05T02:39:05.000Z | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore")
import tensorflow as tf
from tensorflow.contrib.slim.nets import resnet_v1,inception
import inception_resnet_v2
import vgg
import numpy as np
#from scipy.misc import imread
import pandas as pd
from PIL import Image
import sys
import argparse
config = tf.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
slim = tf.contrib.slim
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--alpha', help='per step decrease',type=float, default=300)
parser.add_argument('-i', '--num_iter', help='num_iter', type=int, default=40)
parser.add_argument('-rw', '--resnet_weight',type=float, default=0.8)
parser.add_argument('-vw', '--vgg_weight',type=float, default=1.0)
parser.add_argument('-c', '--confidence',type=float, default=0.5)
parser.add_argument('-d','--weight_decay',type=float, default=1.0)
parser.add_argument('-mm','--min_max_range',type=float, default=0.3)
parser.add_argument("-ir", "--is_round", help="round x per round",action="store_true")
parser.add_argument('-mina','--mina',type=float, default=100.0)
parser.add_argument('-maxa','--maxa',type=float, default=400.0)
parser.add_argument("-iw", "--is_wrong", help="wrong",action="store_true")
parser.add_argument("-norm", "--norm", help="use norm",action="store_true")
parser.add_argument("-im", "--is_mask", help="use mask",action="store_true")
parser.add_argument('-ms','--mask_size',type=int, default=5)
parser.add_argument('--input_dir',type=str, default='./dev_data/')
parser.add_argument('--output_dir',type=str, default='./output/')
parser.add_argument('--dev_dir',type=str, default='./dev_data/')
args = parser.parse_args()
print(args,file=open("output.txt", "a+"))
batch_shape=[1,299,299,3]
num_iter= args.num_iter
model_checkpoint_map = {
'resnet_v1_50': os.path.join('./checkpoints/', 'resnet_v1_50','model.ckpt-49800'),
'vgg_16': os.path.join('./checkpoints/', 'vgg_16', 'vgg_16.ckpt'),
'InceptionV3': os.path.join('./checkpoints/', 'inception_v3', 'inception_v3.ckpt'),
}
dev=pd.read_csv(os.path.join(args.dev_dir,'dev.csv'))
if args.is_wrong:
try:
wrong_list=pd.read_csv('./wrong_list.csv',header=None)
dev=dev[dev.filename.isin(wrong_list[0])].reset_index(drop=True)
except:
sys.exit(0)
def Graph(x, y,raw_image):
num_classes=110
batch_size=1
weight = [args.resnet_weight,args.vgg_weight]
x_int=x/255*2-1
with slim.arg_scope(inception.inception_v3_arg_scope()):
logits_inception_v3, end_points_inception_v3 = inception.inception_v3(x_int, num_classes=110,reuse=tf.AUTO_REUSE, is_training=False, scope='InceptionV3')
one_hot = tf.one_hot(y, num_classes)
logits = logits_inception_v3
cross_entropy = tf.losses.softmax_cross_entropy(one_hot,logits,label_smoothing=0.0,weights=1.0)
grad = tf.gradients([cross_entropy], [x])[0]
if args.norm:
grad = grad/tf.norm(grad)
else:
#grad = grad/tf.reshape(tf.norm(grad,axis=-1),[1,299,299,1])
grad = tf.transpose(grad,[0,3,1,2])
grad = grad/tf.reshape(tf.norm(tf.reshape(grad,[batch_size,3,-1]),axis=2),[batch_size,3,1,1])
grad = tf.transpose(grad,[0,2,3,1])
if args.is_mask:
mask = tf.ones(shape=[int(299-2*args.mask_size),int(299-2*args.mask_size),3])
mask = tf.pad(mask,tf.constant([[args.mask_size,args.mask_size],[args.mask_size,args.mask_size],[0,0]]))
grad = grad*mask
alpha = args.maxa
x = x - alpha * grad#*tf.concat([tf.ones([299,299,1]),tf.ones([299,299,1]),tf.zeros([299,299,1])],-1)
x = tf.clip_by_value(x, 0, 255)
out_x = x-raw_image
out_x = tf.floor(tf.abs(out_x))*tf.sign(out_x)+raw_image
out_x = tf.round(tf.clip_by_value(out_x, 0, 255))
return x,out_x
def Eval(x_img,y):
input_image=2*x_img/255-1
with slim.arg_scope(inception.inception_v3_arg_scope()):
logits_inception_v3, end_points_inception_v3 = inception.inception_v3(input_image, num_classes=110, is_training=False, scope='InceptionV3',reuse=tf.AUTO_REUSE)
inc_label=tf.argmax(end_points_inception_v3['Predictions'][0],-1)
y_inc=end_points_inception_v3['Predictions'][0][y[0]]
return inc_label,y_inc
# Define Graph
x=tf.placeholder(tf.float32, shape=[1,299,299,3])
y=tf.placeholder(tf.int32, shape=1)
raw_image_placeholder=tf.placeholder(tf.float32, shape=[1,299,299,3])
x_adv,out_x = Graph(x,y,raw_image_placeholder)
s1 = tf.train.Saver(slim.get_model_variables(scope='InceptionV3'))
x_img=tf.placeholder(tf.float32, shape=[1,299,299,3])
inc_label,y_inc=Eval(x_img,y)
with tf.Session(config=config) as sess:
s1.restore(sess, model_checkpoint_map['InceptionV3'])
success=0
d_counter=0
for _,row in dev.iterrows():
d_counter+=1
raw_image = np.array(Image.open(os.path.join(args.input_dir,row.filename), mode='r'))
raw_image = raw_image.reshape(1,299,299,3)
output = raw_image
output_image = Image.fromarray(output.astype(np.uint8)[0])
eval_input=np.array(output_image).astype(np.float32).reshape([1,299,299,3])
inc_l,inc_pred=sess.run([inc_label,y_inc],feed_dict={x_img:eval_input,y:[row.targetedLabel]})
output = raw_image
for _ in range(num_iter):
output = np.array(output).astype(np.float32).reshape(1,299,299,3)
output,output_image = sess.run([x_adv,out_x],feed_dict={x:output,y:[row.targetedLabel],
raw_image_placeholder:raw_image})
#output_image = Image.fromarray(np.round(output_image).astype(np.uint8)[0])
output_image = Image.fromarray(output_image.astype(np.uint8)[0])
eval_input=np.array(output_image).astype(np.float32).reshape([1,299,299,3])
inc_l,inc_pred=sess.run([inc_label,y_inc],feed_dict={x_img:eval_input,y:[row.targetedLabel]})
if (inc_l==row.targetedLabel):
success+=1
output_image.save(os.path.join(args.output_dir,row.filename), format='PNG')
message = '{}/{} {} {}'.format(success,d_counter,inc_l,row.targetedLabel).ljust(50)
print(message,end='\r')
break
else:
message = '{}/{} {} {}'.format(success,d_counter,inc_l,row.targetedLabel).ljust(50)
print(message,end='\r')
else:
output_image.save(os.path.join(args.output_dir,row.filename), format='PNG')
print(end='\n')
| 45.10274 | 167 | 0.679575 | 1,034 | 6,585 | 4.118956 | 0.216634 | 0.035924 | 0.067856 | 0.015027 | 0.395633 | 0.362996 | 0.233153 | 0.233153 | 0.209674 | 0.17375 | 0 | 0.045187 | 0.149734 | 6,585 | 145 | 168 | 45.413793 | 0.715485 | 0.038724 | 0 | 0.137097 | 0 | 0 | 0.104506 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016129 | false | 0 | 0.08871 | 0 | 0.120968 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bf12c85d0406305d077774122d9a0db3710456a | 11,438 | py | Python | Code/.ipynb_checkpoints/5. WGAN_GP-checkpoint.py | hyunchangyi/Stock-price-prediction-using-GAN | d9be065dda16dca76af1faf3c2e28f443cb2bf96 | [
"MIT"
] | null | null | null | Code/.ipynb_checkpoints/5. WGAN_GP-checkpoint.py | hyunchangyi/Stock-price-prediction-using-GAN | d9be065dda16dca76af1faf3c2e28f443cb2bf96 | [
"MIT"
] | null | null | null | Code/.ipynb_checkpoints/5. WGAN_GP-checkpoint.py | hyunchangyi/Stock-price-prediction-using-GAN | d9be065dda16dca76af1faf3c2e28f443cb2bf96 | [
"MIT"
] | null | null | null | import time
import os
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from pickle import load
from tensorflow.keras.losses import mean_squared_error
from tensorflow.keras.layers import GRU, Dense, Flatten, Conv1D, BatchNormalization, LeakyReLU, ELU, ReLU
from tensorflow.keras import Sequential, regularizers
from tensorflow.python.client import device_lib
# Load data
X_train = np.load("X_train.npy", allow_pickle=True)
y_train = np.load("y_train.npy", allow_pickle=True)
X_test = np.load("X_test.npy", allow_pickle=True)
y_test = np.load("y_test.npy", allow_pickle=True)
yc_train = np.load("yc_train.npy", allow_pickle=True)
yc_test = np.load("yc_test.npy", allow_pickle=True)
# Define the generator
def Generator(input_dim, output_dim, feature_size) -> tf.keras.models.Model:
model = Sequential()
model.add(GRU(units=256,
return_sequences=True,
input_shape=(input_dim, feature_size),
recurrent_dropout=0.02,
recurrent_regularizer=regularizers.l2(1e-3)))
model.add(GRU(units=128,
#return_sequences=True,
recurrent_dropout=0.02,
recurrent_regularizer=regularizers.l2(1e-3)))
#model.add(Dense(128,
# kernel_regularizer=regularizers.l2(1e-3)))
model.add(Dense(64, kernel_regularizer=regularizers.l2(1e-3)))
model.add(Dense(32, kernel_regularizer=regularizers.l2(1e-3)))
#model.add(Dense(16, kernel_regularizer=regularizers.l2(1e-3)))
#model.add(Dense(8, kernel_regularizer=regularizers.l2(1e-3)))
model.add(Dense(units=output_dim))
return model
# Define the discriminator
def Discriminator() -> tf.keras.models.Model:
model = tf.keras.Sequential()
model.add(Conv1D(32, input_shape=(4, 1), kernel_size=3, strides=2, padding="same", activation=LeakyReLU(alpha=0.01)))
model.add(Conv1D(64, kernel_size=3, strides=2, padding="same", activation=LeakyReLU(alpha=0.01)))
model.add(Conv1D(128, kernel_size=3, strides=2, padding="same", activation=LeakyReLU(alpha=0.01)))
model.add(Flatten())
model.add(Dense(220, use_bias=True))
model.add(LeakyReLU())
model.add(Dense(220, use_bias=True))
model.add(ReLU())
model.add(Dense(1))
return model
# Train WGAN-GP model
class GAN():
def __init__(self, generator, discriminator):
super(GAN, self).__init__()
self.d_optimizer = tf.keras.optimizers.Adam(0.0001)
self.g_optimizer = tf.keras.optimizers.Adam(0.0001)
self.generator = generator
self.discriminator = discriminator
self.batch_size = 128
checkpoint_dir = '../training_checkpoints'
self.checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
self.checkpoint = tf.train.Checkpoint(generator_optimizer=self.g_optimizer,
discriminator_optimizer=self.d_optimizer,
generator=self.generator,
discriminator=self.discriminator)
def gradient_penalty(self, batch_size, real_output, fake_output):
""" Calculates the gradient penalty.
This loss is calculated on an interpolated image
and added to the discriminator loss.
"""
# get the interpolated data
alpha = tf.random.normal([batch_size, 4, 1], 0.0, 1.0)
diff = fake_output - tf.cast(real_output, tf.float32)
interpolated = tf.cast(real_output, tf.float32) + alpha * diff
with tf.GradientTape() as gp_tape:
gp_tape.watch(interpolated)
# 1. Get the discriminator output for this interpolated image.
pred = self.discriminator(interpolated, training=True)
# 2. Calculate the gradients w.r.t to this interpolated image.
grads = gp_tape.gradient(pred, [interpolated])[0]
# 3. Calcuate the norm of the gradients
norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2]))
gp = tf.reduce_mean((norm - 1.0) ** 2)
return gp
def train_step(self, data):
real_input, real_price, yc = data
batch_size = tf.shape(real_input)[0]
for _ in range(1):
with tf.GradientTape() as d_tape:
# Train the discriminator
# generate fake output
generated_data = self.generator(real_input, training=True)
# reshape the data
generated_data_reshape = tf.reshape(generated_data, [generated_data.shape[0], generated_data.shape[1], 1])
fake_output = tf.concat([generated_data_reshape, tf.cast(yc, tf.float32)], axis=1)
real_y_reshape = tf.reshape(real_price, [real_price.shape[0], real_price.shape[1], 1])
real_output = tf.concat([tf.cast(real_y_reshape, tf.float32), tf.cast(yc, tf.float32)], axis=1)
# Get the logits for the fake images
D_real = self.discriminator(real_output, training=True)
# Get the logits for real images
D_fake = self.discriminator(fake_output, training=True)
# Calculate discriminator loss using fake and real logits
real_loss = tf.cast(tf.reduce_mean(D_real), tf.float32)
fake_loss = tf.cast(tf.reduce_mean(D_fake), tf.float32)
d_cost = fake_loss-real_loss
# Calculate the gradientjiu penalty
gp = self.gradient_penalty(batch_size, real_output, fake_output)
# Add the gradient penalty to the original discriminator loss
d_loss = d_cost + gp * 10
d_grads = d_tape.gradient(d_loss, self.discriminator.trainable_variables)
self.d_optimizer.apply_gradients(zip(d_grads, self.discriminator.trainable_variables))
for _ in range(3):
with tf.GradientTape() as g_tape:
# Train the generator
# generate fake output
generated_data = self.generator(real_input, training=True)
# reshape the data
generated_data_reshape = tf.reshape(generated_data, [generated_data.shape[0], generated_data.shape[1], 1])
fake_output = tf.concat([generated_data_reshape, tf.cast(yc, tf.float32)], axis=1)
# Get the discriminator logits for fake images
G_fake = self.discriminator(fake_output, training=True)
# Calculate the generator loss
g_loss = -tf.reduce_mean(G_fake)
g_grads = g_tape.gradient(g_loss, self.generator.trainable_variables)
self.g_optimizer.apply_gradients(zip(g_grads, self.generator.trainable_variables))
return real_price, generated_data, {'d_loss': d_loss, 'g_loss': g_loss}
def train(self, X_train, y_train, yc, epochs):
data = X_train, y_train, yc
train_hist = {}
train_hist['D_losses'] = []
train_hist['G_losses'] = []
train_hist['per_epoch_times'] = []
train_hist['total_ptime'] = []
for epoch in range(epochs):
start = time.time()
real_price, fake_price, loss = self.train_step(data)
G_losses = []
D_losses = []
Real_price = []
Predicted_price = []
D_losses.append(loss['d_loss'].numpy())
G_losses.append(loss['g_loss'].numpy())
Predicted_price.append(fake_price)
Real_price.append(real_price)
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
tf.keras.models.save_model(generator, 'gen_GRU_model_%d.h5' % epoch)
self.checkpoint.save(file_prefix=self.checkpoint_prefix)
print('epoch', epoch+1, 'd_loss', loss['d_loss'].numpy(), 'g_loss', loss['g_loss'].numpy())
# For printing loss
epoch_end_time = time.time()
per_epoch_ptime = epoch_end_time - start
train_hist['D_losses'].append(D_losses)
train_hist['G_losses'].append(G_losses)
train_hist['per_epoch_times'].append(per_epoch_ptime)
# Reshape the predicted result & real
Predicted_price = np.array(Predicted_price)
Predicted_price = Predicted_price.reshape(Predicted_price.shape[1], Predicted_price.shape[2])
Real_price = np.array(Real_price)
Real_price = Real_price.reshape(Real_price.shape[1], Real_price.shape[2])
# Plot the loss
plt.plot(train_hist['D_losses'], label='D_loss')
plt.plot(train_hist['G_losses'], label='G_loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.savefig('train_loss.png')
print("REAL", Real_price.shape)
print(Real_price)
print("PREDICTED", Predicted_price.shape)
print(Predicted_price)
return Predicted_price, Real_price, np.sqrt(mean_squared_error(Real_price, Predicted_price)) / np.mean(Real_price)
if __name__ == '__main__':
input_dim = X_train.shape[1]
feature_size = X_train.shape[2]
output_dim = y_train.shape[1]
epoch = 100
generator = Generator(X_train.shape[1], output_dim, X_train.shape[2])
discriminator = Discriminator()
gan = GAN(generator, discriminator)
Predicted_price, Real_price, RMSPE = gan.train(X_train, y_train, yc_train, epoch)
# %% --------------------------------------- Plot the result -----------------------------------------------------
# Rescale back the real dataset
X_scaler = load(open('X_scaler.pkl', 'rb'))
y_scaler = load(open('y_scaler.pkl', 'rb'))
train_predict_index = np.load("index_train.npy", allow_pickle=True)
test_predict_index = np.load("index_test.npy", allow_pickle=True)
print("----- predicted price -----", Predicted_price)
rescaled_Real_price = y_scaler.inverse_transform(Real_price)
rescaled_Predicted_price = y_scaler.inverse_transform(Predicted_price)
print("----- rescaled predicted price -----", rescaled_Predicted_price)
print("----- SHAPE rescaled predicted price -----", rescaled_Predicted_price.shape)
predict_result = pd.DataFrame()
for i in range(rescaled_Predicted_price.shape[0]):
y_predict = pd.DataFrame(rescaled_Predicted_price[i], columns=["predicted_price"], index=train_predict_index[i:i+output_dim])
predict_result = pd.concat([predict_result, y_predict], axis=1, sort=False)
real_price = pd.DataFrame()
for i in range(rescaled_Real_price.shape[0]):
y_train = pd.DataFrame(rescaled_Real_price[i], columns=["real_price"], index=train_predict_index[i:i+output_dim])
real_price = pd.concat([real_price, y_train], axis=1, sort=False)
predict_result['predicted_mean'] = predict_result.mean(axis=1)
real_price['real_mean'] = real_price.mean(axis=1)
# Plot the predicted result
plt.figure(figsize=(16, 8))
plt.plot(real_price["real_mean"])
plt.plot(predict_result["predicted_mean"], color = 'r')
plt.xlabel("Date")
plt.ylabel("Stock price")
plt.legend(("Real price", "Predicted price"), loc="upper left", fontsize=16)
plt.title("The result of Training", fontsize=20)
plt.show()
plt.savefig('train_plot.png')
# Calculate RMSE
predicted = predict_result["predicted_mean"]
real = real_price["real_mean"]
For_MSE = pd.concat([predicted, real], axis = 1)
RMSE = np.sqrt(mean_squared_error(predicted, real))
print('-- RMSE -- ', RMSE)
| 43.992308 | 129 | 0.649065 | 1,502 | 11,438 | 4.717044 | 0.157124 | 0.04319 | 0.016514 | 0.020325 | 0.327452 | 0.256034 | 0.217502 | 0.194354 | 0.16796 | 0.109386 | 0 | 0.019557 | 0.226613 | 11,438 | 259 | 130 | 44.162162 | 0.78137 | 0.113307 | 0 | 0.087912 | 0 | 0 | 0.066984 | 0.002282 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032967 | false | 0 | 0.06044 | 0 | 0.126374 | 0.049451 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bf3daa72de60c99abd7d87091db4ce8d5d8daf3 | 15,529 | py | Python | src/signalalign/tests/test_hiddenMarkovModel.py | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | src/signalalign/tests/test_hiddenMarkovModel.py | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | src/signalalign/tests/test_hiddenMarkovModel.py | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Place unit tests for hiddenMarkovModel.py
"""
########################################################################
# File: test_hiddenMarkovModel.py
# executable: test_hiddenMarkovModel.py
# Purpose: test hiddenMarkovModel
#
# Author: Andrew Bailey
# History: 5/31/2018 Created
########################################################################
import unittest
import os
import numpy as np
import tempfile
from shutil import copyfile
from signalalign.hiddenMarkovModel import *
from py3helpers.utils import all_string_permutations, get_random_string, list_dir
class HiddenMarkovTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(HiddenMarkovTests, cls).setUpClass()
cls.HOME = '/'.join(os.path.abspath(__file__).split("/")[:-4])
cls.model_file = os.path.join(cls.HOME, "models/testModelR9p4_5mer_acgt_RNA.model")
cls.r9_model_file = os.path.join(cls.HOME, "models/testModelR9_acegt_complement.model")
cls.model = HmmModel(ont_model_file=cls.model_file)
cls.expectation_file = os.path.join(cls.HOME,
"tests/test_expectation_files/4f9a316c-8bb3-410a-8cfc-026061f7e8db.template.expectations.tsv")
cls.nanopolish_model = os.path.join(cls.HOME, "models/r9.4_450bps.nucleotide.6mer.template.model")
cls.cpg_nanopolish_model = os.path.join(cls.HOME, "models/r9.4_450bps.cpg.6mer.template.model")
def test_get_kmer_index(self):
all_kmers = [x for x in all_string_permutations("ATGC", 5)]
for x in range(10):
kmer = get_random_string(5, chars="ATGC")
self.assertEqual(all_kmers.index(kmer), self.model.get_kmer_index(kmer))
def test_log_event_mean_gaussian_probability_match(self):
def emissions_signal_logGaussPdf(x, mu, sigma):
log_inv_sqrt_2pi = -0.91893853320467267
l_sigma = np.log(sigma)
a = (x - mu) / sigma
# // returns Log-space
return log_inv_sqrt_2pi - l_sigma + (-0.5 * a * a)
for x in range(10):
kmer = get_random_string(5, chars="ATGC")
mu, sigma = self.model.get_event_mean_gaussian_parameters(kmer)
prob = self.model.log_event_mean_gaussian_probability_match(50, kmer)
self.assertAlmostEqual(prob, emissions_signal_logGaussPdf(50, mu, sigma))
def test_log_event_sd_inv_gaussian_probability_match(self):
def emissions_signal_logInvGaussPdf(eventNoise, modelNoiseMean, modelNoiseLambda):
l_twoPi = 1.8378770664093453 # // log(2*pi)
l_eventNoise = np.log(eventNoise)
a = (eventNoise - modelNoiseMean) / modelNoiseMean
l_modelNoseLambda = np.log(modelNoiseLambda)
# // returns Log-space
return (l_modelNoseLambda - l_twoPi - 3 * l_eventNoise - (modelNoiseLambda * a * a / eventNoise)) / 2
for x in range(10):
kmer = get_random_string(5, chars="ATGC")
mu, lambda1 = self.model.get_event_sd_inv_gaussian_parameters(kmer)
prob = self.model.log_event_sd_inv_gaussian_probability_match(2, kmer)
self.assertAlmostEqual(prob, emissions_signal_logInvGaussPdf(2, mu, lambda1))
def test_get_event_mean_gaussian_parameters(self):
for x in range(10):
kmer = get_random_string(5, chars="ATGC")
mu, sigma = self.model.get_event_mean_gaussian_parameters(kmer)
mu, sigma = self.model.get_event_mean_gaussian_parameters("TTTTT")
self.assertEqual(self.model.event_model["means"][-1], mu)
self.assertEqual(self.model.event_model["SDs"][-1], sigma)
def test_get_event_sd_inv_gaussian_parameters(self):
for x in range(10):
kmer = get_random_string(5, chars="ATGC")
mu, sigma = self.model.get_event_sd_inv_gaussian_parameters(kmer)
mean, lambda1 = self.model.get_event_sd_inv_gaussian_parameters("TTTTT")
self.assertEqual(self.model.event_model["noise_means"][-1], mean)
self.assertEqual(self.model.event_model["noise_lambdas"][-1], lambda1)
def test_HmmModel(self):
hdp_model_file = os.path.join(self.HOME, "models/testModelR9p4_5mer_acgt_RNA.model")
model = HmmModel(ont_model_file=hdp_model_file)
self.assertIsInstance(model, HmmModel)
model = HmmModel(ont_model_file=self.model_file)
self.assertIsInstance(model, HmmModel)
def test_add_expectations_file(self):
model = HmmModel(ont_model_file=self.r9_model_file)
model.add_expectations_file(self.expectation_file)
model = HmmModel(ont_model_file=self.r9_model_file)
model.add_expectations_file(self.expectation_file)
self.assertRaises(AssertionError, self.model.add_expectations_file, self.expectation_file)
def test_check_header_line(self):
self.model.check_header_line(['3', '4', "ACGT", '5'], "path")
self.assertRaises(AssertionError, self.model.check_header_line, ['1', '4', "ACGT", '5'], "ssomething")
self.assertRaises(AssertionError, self.model.check_header_line, ['3', '2', "ACGT", '5'], "ssomething")
def test_add_and_normalize_expectations(self):
with tempfile.TemporaryDirectory() as tempdir:
test_expecations_file = os.path.join(tempdir, "fake.expectations.tsv")
copyfile(self.expectation_file, test_expecations_file)
files = [test_expecations_file]
model = HmmModel(ont_model_file=self.r9_model_file)
model.add_and_normalize_expectations(files, os.path.join(tempdir, "fake.hmm"))
def test_normalize_transitions_expectations(self):
hdp_model_file = os.path.join(self.HOME, "models/testModelR9_acegt_complement.model")
model = HmmModel(ont_model_file=hdp_model_file)
model.add_expectations_file(self.expectation_file)
model.add_expectations_file(self.expectation_file)
model.add_expectations_file(self.expectation_file)
model.normalize_transitions_expectations()
for from_state in range(model.state_number):
i = model.state_number * from_state
self.assertAlmostEqual(sum(model.transitions_expectations[i:i + model.state_number]), 1)
def test_write(self):
with tempfile.TemporaryDirectory() as tempdir:
test_hmm_file = os.path.join(tempdir, "fake.model.hmm")
model = HmmModel(ont_model_file=self.model_file)
self.assertRaises(AssertionError, model.write, test_hmm_file)
model.normalized = True
model.write(test_hmm_file)
def test_normalise(self):
model = HmmModel(ont_model_file=self.r9_model_file)
model.add_expectations_file(self.expectation_file)
model.add_expectations_file(self.expectation_file)
model.add_expectations_file(self.expectation_file)
model.normalize(update_transitions=True, update_emissions=False)
model.normalize(update_emissions=True, update_transitions=True)
self.assertTrue(model.normalized)
def test_reset_assignments(self):
model = HmmModel(ont_model_file=self.r9_model_file)
model.add_expectations_file(self.expectation_file)
model.reset_assignments()
self.assertSequenceEqual(model.event_assignments, [])
self.assertSequenceEqual(model.kmer_assignments, [])
def test_HDP_model_load(self):
hdp_model = os.path.join(self.HOME, "models/template_RNA.singleLevelFixedCanonical.nhdp")
hdp_handle = HmmModel(ont_model_file=self.model_file, hdp_model_file=hdp_model)
kmer = "AACAT"
kmer_id = 19
self.assertEqual(kmer_id, hdp_handle.get_kmer_index(kmer))
query_x = 83.674161662792542
x = hdp_handle.linspace
y = hdp_handle.all_posterior_pred[kmer_id]
slope = hdp_handle.all_spline_slopes[kmer_id]
length = hdp_handle.grid_length
prob = hdp_handle.grid_spline_interp(query_x, x, y, slope, length)
self.assertEqual(prob, 0.29228949476718646)
query_x = 81.55860779063407
prob = hdp_handle.grid_spline_interp(query_x, x, y, slope, length)
self.assertEqual(prob, 0.12927539337648492)
kmer = "CATTT"
kmer_id = 319
self.assertEqual(kmer_id, hdp_handle.get_kmer_index(kmer))
y = hdp_handle.all_posterior_pred[kmer_id]
slope = hdp_handle.all_spline_slopes[kmer_id]
query_x = 80.605230545769458
prob = hdp_handle.grid_spline_interp(query_x, x, y, slope, length)
self.assertEqual(prob, 0.12328410496683605)
def test_get_hdp_probability(self):
hdp_model = os.path.join(self.HOME, "models/template_RNA.singleLevelFixedCanonical.nhdp")
hdp_handle = HmmModel(ont_model_file=self.model_file, hdp_model_file=hdp_model)
query_x = 83.674161662792542
prob = hdp_handle.get_hdp_probability("AACAT", query_x)
self.assertEqual(prob, 0.29228949476718646)
query_x = 81.55860779063407
prob = hdp_handle.get_hdp_probability("AACAT", query_x)
self.assertEqual(prob, 0.12927539337648492)
query_x = 80.605230545769458
prob = hdp_handle.get_hdp_probability("CATTT", query_x)
self.assertEqual(prob, 0.12328410496683605)
def test_get_new_linspace_hdp_probability_distribution(self):
hdp_model = os.path.join(self.HOME, "models/template_RNA.singleLevelFixedCanonical.nhdp")
hdp_handle = HmmModel(ont_model_file=self.model_file, hdp_model_file=hdp_model)
kmer = "AACAT"
linspace = hdp_handle.linspace
kmer_id = hdp_handle.get_kmer_index(kmer)
y = hdp_handle.all_posterior_pred[kmer_id]
new_y = hdp_handle.get_new_linspace_hdp_probability_distribution(kmer, linspace)
self.assertSequenceEqual(new_y, y)
def test_write_new_model(self):
with tempfile.TemporaryDirectory() as tempdir:
test_model_file = os.path.join(tempdir, "fake.hmm")
hmm_handle = HmmModel(ont_model_file=self.model_file)
hmm_handle.write_new_model(out_path=test_model_file, alphabet="ATGCF", replacement_base="A")
hmm_handle2 = HmmModel(ont_model_file=test_model_file)
self.assertEqual(hmm_handle.kmer_length, hmm_handle2.kmer_length)
self.assertEqual(hmm_handle2.alphabet, "ACFGT")
self.assertEqual(hmm_handle2.alphabet_size, 5)
self.assertRaises(AssertionError, hmm_handle.write_new_model, test_model_file, "ATGCW", "A")
def test_create_new_model(self):
with tempfile.TemporaryDirectory() as tempdir:
test_model_file = os.path.join(tempdir, "fake.hmm")
new_model = create_new_model(self.model_file, test_model_file, (("A", "F"), ("A", "J")))
self.assertEqual(new_model.kmer_length, 5)
self.assertEqual(new_model.alphabet, "ACFGJT")
self.assertEqual(new_model.alphabet_size, 6)
mean1 = new_model.get_event_mean_gaussian_parameters("AAAAA")
mean2 = new_model.get_event_mean_gaussian_parameters("AAAJA")
mean3 = new_model.get_event_mean_gaussian_parameters("AAAFA")
mean4 = new_model.get_event_mean_gaussian_parameters("AAJFA")
mean5 = new_model.get_event_mean_gaussian_parameters("AAJJJ")
mean6 = new_model.get_event_mean_gaussian_parameters("FFJJJ")
self.assertEqual(mean1, mean2)
self.assertEqual(mean2, mean3)
self.assertEqual(mean3, mean4)
self.assertEqual(mean4, mean5)
self.assertEqual(mean5, mean6)
new_model = create_new_model(self.model_file, test_model_file, [("A", "J")])
def test_set_kmer_event_mean(self):
hmm_handle = HmmModel(ont_model_file=self.model_file)
hmm_handle.set_kmer_event_mean("AAAAA", 1000)
mean, sd = hmm_handle.get_event_mean_gaussian_parameters("AAAAA")
self.assertEqual(mean, 1000)
def test_set_kmer_event_sd(self):
hmm_handle = HmmModel(ont_model_file=self.model_file)
hmm_handle.set_kmer_event_sd("AAAAA", 1000)
mean, sd = hmm_handle.get_event_mean_gaussian_parameters("AAAAA")
self.assertEqual(sd, 1000)
def test_set_kmer_noise_means(self):
hmm_handle = HmmModel(ont_model_file=self.model_file)
hmm_handle.set_kmer_noise_means("AAAAA", 1000)
mean, sd = hmm_handle.get_event_sd_inv_gaussian_parameters("AAAAA")
self.assertEqual(mean, 1000)
def test_set_kmer_noise_lambdas(self):
hmm_handle = HmmModel(ont_model_file=self.model_file)
hmm_handle.set_kmer_noise_lambdas("AAAAA", 1000)
mean, sd = hmm_handle.get_event_sd_inv_gaussian_parameters("AAAAA")
self.assertEqual(sd, 1000)
def test_read_in_alignment_file(self):
assignments_dir = os.path.join(self.HOME, "tests/test_alignments/ecoli1D_test_alignments_sm3")
data = read_in_alignment_file(list_dir(assignments_dir)[0])
self.assertEqual(len(data["contig"]), 16852)
self.assertEqual(len(data["reference_index"]), 16852)
self.assertEqual(len(data["reference_kmer"]), 16852)
self.assertEqual(len(data["read_file"]), 16852)
self.assertEqual(len(data["strand"]), 16852)
self.assertEqual(len(data["event_index"]), 16852)
self.assertEqual(len(data["event_mean"]), 16852)
self.assertEqual(len(data["event_noise"]), 16852)
self.assertEqual(len(data["event_duration"]), 16852)
self.assertEqual(len(data["aligned_kmer"]), 16852)
self.assertEqual(len(data["scaled_mean_current"]), 16852)
self.assertEqual(len(data["scaled_noise"]), 16852)
self.assertEqual(len(data["posterior_probability"]), 16852)
self.assertEqual(len(data["descaled_event_mean"]), 16852)
self.assertEqual(len(data["ont_model_mean"]), 16852)
self.assertEqual(len(data["path_kmer"]), 16852)
self.assertEqual(len(data), 16852)
def test_load_nanopolish_model(self):
# model = HmmModel(ont_model_file=self.model_file, nanopolish_model_file=nanopolish_model)
model, alphabet, k = load_nanopolish_model(self.nanopolish_model)
self.assertEqual(len(model["means"]), 4**6)
self.assertEqual(alphabet, "ACGT")
self.assertEqual(k, 6)
def test_convert_nanopolish_model_to_signalalign(self):
with tempfile.TemporaryDirectory() as tempdir:
sa_file = os.path.join(tempdir, "testModelr9.4_450bps.nucleotide.6mer.template.model")
convert_nanopolish_model_to_signalalign(self.nanopolish_model, self.model.transitions, sa_file)
sa_model = HmmModel(sa_file)
model_mean, model_sd = sa_model.get_event_mean_gaussian_parameters("AAAATG")
self.assertEqual(model_mean, 75.943873)
self.assertEqual(model_sd, 1.542528)
def test_convert_and_edit_nanopolish_model_to_signalalign(self):
with tempfile.TemporaryDirectory() as tempdir:
sa_file = os.path.join(tempdir, "testModelR9.4_450bps.cpg.6mer.template.model")
convert_and_edit_nanopolish_model_to_signalalign(self.cpg_nanopolish_model, self.model.transitions, sa_file)
sa_model = HmmModel(sa_file)
model_mean, model_sd = sa_model.get_event_mean_gaussian_parameters("AAAAEE")
self.assertEqual(model_mean, 75.7063)
self.assertEqual(model_sd, 2.70501)
if __name__ == '__main__':
unittest.main()
| 50.914754 | 138 | 0.693155 | 1,962 | 15,529 | 5.159531 | 0.132008 | 0.049788 | 0.031611 | 0.039514 | 0.675096 | 0.625012 | 0.520794 | 0.448582 | 0.410451 | 0.367579 | 0 | 0.042831 | 0.195634 | 15,529 | 304 | 139 | 51.082237 | 0.767593 | 0.023118 | 0 | 0.345238 | 0 | 0 | 0.077979 | 0.045321 | 0 | 0 | 0 | 0 | 0.261905 | 1 | 0.115079 | false | 0 | 0.027778 | 0 | 0.154762 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bf5bef03846470df10d77e9388198ed2c99651d | 5,106 | py | Python | server/ngwmn/views.py | duselmann/ngwmn-ui | 9c95b0eddc692184ba65e3fabe92e9b230d9b7d1 | [
"CC0-1.0"
] | null | null | null | server/ngwmn/views.py | duselmann/ngwmn-ui | 9c95b0eddc692184ba65e3fabe92e9b230d9b7d1 | [
"CC0-1.0"
] | null | null | null | server/ngwmn/views.py | duselmann/ngwmn-ui | 9c95b0eddc692184ba65e3fabe92e9b230d9b7d1 | [
"CC0-1.0"
] | null | null | null | """
NGWMN UI application views
"""
from flask import abort, jsonify, render_template
from . import __version__, app
from .services.ngwmn import get_features, get_water_quality, get_well_log, get_statistics, get_providers, get_sites
from .services.confluence import (
pull_feed, confluence_url, MAIN_CONTENT, SITE_SELECTION_CONTENT, DATA_COLLECTION_CONTENT, DATA_MANAGEMENT_CONTENT,
OTHER_AGENCY_INFO_CONTENT)
from .string_utils import generate_subtitle
@app.route('/')
def home():
"""testing home page"""
return render_template(
'index.html',
version=__version__,
test_sites=[
{'agency_cd': 'USGS', 'location_id': '353945105574502'},
{'agency_cd': 'USGS', 'location_id': '282532081075601'},
{'agency_cd': 'USGS', 'location_id': '473442118162201'},
{'agency_cd': 'USGS', 'location_id': '423532088254601'},
{'agency_cd': 'USGS', 'location_id': '401105074120205'},
{'agency_cd': 'USGS', 'location_id': '411958079540202'},
{'agency_cd': 'ADWR', 'location_id': '334306112433801'},
{'agency_cd': 'DGS', 'location_id': 'Eb53-33'},
{'agency_cd': 'KSGS', 'location_id': '381107098532401'},
{'agency_cd': 'MEGS', 'location_id': '39412'},
{'agency_cd': 'MBMG', 'location_id': '235474'},
{'agency_cd': 'TWRB', 'location_id': '2763901'}
]
)
@app.route('/version')
def version():
"""Render the home page."""
return jsonify({
'version': __version__
})
@app.route('/provider/', methods=['GET'])
def providers():
"""
NGWMN available providers view
"""
return render_template('providers.html', providers=get_providers())
@app.route('/provider/<agency_cd>/', methods=['GET'])
def provider(agency_cd):
"""
NGWMN provider information view
"""
providers = get_providers()
providers_by_agency_cd = dict(map(lambda x: (x['agency_cd'], x), providers))
if agency_cd not in providers_by_agency_cd:
return '{0} is not a valid agency code'.format(agency_cd), 404
return render_template('provider.html', agency_metadata=providers_by_agency_cd.get(agency_cd),
provider_content=pull_feed(confluence_url(agency_cd, MAIN_CONTENT)),
site_selection=pull_feed(confluence_url(agency_cd, SITE_SELECTION_CONTENT)),
data_collection=pull_feed(confluence_url(agency_cd, DATA_COLLECTION_CONTENT)),
data_management=pull_feed(confluence_url(agency_cd, DATA_MANAGEMENT_CONTENT)),
other_agency_info=pull_feed(confluence_url(agency_cd, OTHER_AGENCY_INFO_CONTENT)))
@app.route('/provider/<agency_cd>/site/', methods=['GET'])
def sites(agency_cd):
"""
A list of NGWMN sites for an agency_cd
:param str agency_cd:
"""
site_list = get_sites(agency_cd)
if not site_list:
return '{0} is not a valid agency code'.format(agency_cd), 404
return render_template('sites.html',
sites=site_list)
@app.route('/provider/<agency_cd>/site/<location_id>/', methods=['GET'])
def site_page(agency_cd, location_id):
"""
Site location view.
:param str agency_cd: agency code for the agency that manages the location
:param location_id: the location's identifier
"""
well_log = get_well_log(agency_cd, location_id)
if not well_log:
return abort(404)
summary = get_features(
well_log['location']['latitude'],
well_log['location']['longitude']
)
water_quality = get_water_quality(agency_cd, location_id)
# reduce the amount of data returned from GeoServer to only the monitoring location of interest
# this section is important for monitoring locations that have the same geographical coordinates (i.e. nested wells)
feature = {}
for a_single_feature in summary['features']:
if a_single_feature['properties']['SITE_NO'] == location_id:
feature = a_single_feature['properties']
break
if 'organization' in water_quality:
organization = water_quality['organization']['name']
else:
organization = feature.get('AGENCY_NM')
# run the logic to create web page subtitle also known as the 'monitoring location description'
monitoring_location_description = generate_subtitle(feature)
# Get the unique list of best-choice lithology IDs in the well log
lithology_ids = set()
for entry in well_log.get('log_entries', []):
materials = entry['unit'].get('ui', {}).get('materials')
if materials:
lithology_ids.add(materials[0])
return render_template(
'site_location.html',
feature=feature,
organization=organization,
water_quality_activities=water_quality.get('activities') or [],
well_log=well_log,
lithology_ids=lithology_ids,
stats=get_statistics(agency_cd, location_id),
monitoring_location_description=monitoring_location_description
), 200
| 36.471429 | 120 | 0.65942 | 606 | 5,106 | 5.262376 | 0.264026 | 0.095328 | 0.033866 | 0.039511 | 0.246159 | 0.164001 | 0.061461 | 0.040765 | 0.040765 | 0.040765 | 0 | 0.039427 | 0.220133 | 5,106 | 139 | 121 | 36.733813 | 0.761426 | 0.137289 | 0 | 0.044944 | 0 | 0 | 0.19378 | 0.020887 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067416 | false | 0 | 0.05618 | 0 | 0.224719 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bf86984732e09c11e4c2e86d3f2f8159a031d16 | 534 | py | Python | djangomom/core/methods.py | emiamar/d | abfd0ca81224a1259fdfac92ed21ad771d901e18 | [
"BSD-3-Clause"
] | null | null | null | djangomom/core/methods.py | emiamar/d | abfd0ca81224a1259fdfac92ed21ad771d901e18 | [
"BSD-3-Clause"
] | 2 | 2018-02-27T07:56:18.000Z | 2018-03-09T12:45:48.000Z | djangomom/core/methods.py | emiamar/d | abfd0ca81224a1259fdfac92ed21ad771d901e18 | [
"BSD-3-Clause"
] | 2 | 2018-02-21T07:43:04.000Z | 2018-11-10T18:09:26.000Z | from django.utils import timezone
from pyproduction.models import Payment
def yesterday_collections():
total = 0
payments = Payment.objects.filter(
date=(timezone.now() + timezone.timedelta(days=-1)).date())
for payment in payments:
amount = payment.amount
total = total + amount
return total
def due_cheques():
payments = Payment.objects.all()
payments = [
payment for payment in payments if payment.payment_mode is 2 and payment.is_cheque_due()
]
return payments
| 24.272727 | 96 | 0.681648 | 65 | 534 | 5.523077 | 0.523077 | 0.125348 | 0.122563 | 0.111421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007317 | 0.23221 | 534 | 21 | 97 | 25.428571 | 0.868293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bfa564a919d87dd928eba98c48a22db06bbd539 | 4,214 | py | Python | python/griddly/util/action_space.py | Thaigun/Griddly | de5972a608a2928172510a0ac81a977c48af6b1f | [
"MIT"
] | null | null | null | python/griddly/util/action_space.py | Thaigun/Griddly | de5972a608a2928172510a0ac81a977c48af6b1f | [
"MIT"
] | null | null | null | python/griddly/util/action_space.py | Thaigun/Griddly | de5972a608a2928172510a0ac81a977c48af6b1f | [
"MIT"
] | null | null | null | import gym
import numpy as np
class MultiAgentActionSpace(list):
def __init__(self, agents_action_space):
for x in agents_action_space:
assert isinstance(x, gym.spaces.space.Space)
super(MultiAgentActionSpace, self).__init__(agents_action_space)
self.agents_action_space = agents_action_space
def sample(self):
"""samples action for each agent from uniform distribution"""
return [
agent_action_space.sample()
for agent_action_space in self.agents_action_space
]
def seed(self, seed):
for space in self.agents_action_space:
space.seed(seed)
class ValidatedActionSpace(gym.spaces.space.Space, list):
"""
Sampling this action space only results in valid actions
"""
def __init__(self, action_space, masking_wrapper):
self._masking_wrapper = masking_wrapper
shape = None
dtype = None
if isinstance(action_space, gym.spaces.Discrete) or isinstance(
action_space, gym.spaces.MultiDiscrete
):
shape = action_space.shape
dtype = action_space.dtype
elif isinstance(action_space, MultiAgentActionSpace):
shape = action_space[0].shape
dtype = action_space[0].dtype
self.action_space = action_space
super().__init__(shape, dtype)
def __len__(self):
if isinstance(self.action_space, list):
return len(self.action_space)
else:
return 1
def __getitem__(self, y):
if isinstance(self.action_space, list):
return self.action_space[y]
else:
raise IndexError()
def __getattr__(self, name):
if name.startswith("_"):
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name)
)
return getattr(self.action_space, name)
def _sample_valid(self, player_id):
# Sample a location with valid actions
assert player_id <= self._masking_wrapper.player_count, "Player does not exist."
assert player_id > 0, "Player 0 is reserved for internal actions only."
available_actions = [
a
for a in self._masking_wrapper.game.get_available_actions(player_id).items()
]
num_available = len(available_actions)
if num_available == 0:
return [0, 0, 0, 0]
else:
available_actions_choice = self.np_random.choice(num_available)
location, actions = available_actions[available_actions_choice]
available_action_ids = [
aid
for aid in self._masking_wrapper.game.get_available_action_ids(
location, list(actions)
).items()
if len(aid[1]) > 0
]
num_action_ids = len(available_action_ids)
# If there are no available actions at all, we do a NOP (which is any action_name with action_id 0)
if num_action_ids == 0:
action_name_idx = 0
action_id = 0
else:
available_action_ids_choice = self.np_random.choice(num_action_ids)
action_name, action_ids = available_action_ids[available_action_ids_choice]
action_name_idx = self._masking_wrapper.action_names.index(action_name)
action_id = self.np_random.choice(action_ids)
sampled_action = []
# Build the action based on the action_space info
if not self._masking_wrapper.has_avatar:
sampled_action.extend([location[0], location[1]])
if self._masking_wrapper.action_count > 1:
sampled_action.append(action_name_idx)
sampled_action.append(action_id)
return sampled_action
def sample(self, player_id=None):
if player_id is not None:
return self._sample_valid(player_id)
if self._masking_wrapper.player_count == 1:
return self._sample_valid(1)
sampled_actions = []
for player_id in range(self._masking_wrapper.player_count):
sampled_actions.append(self._sample_valid(player_id + 1))
return sampled_actions
| 31.684211 | 107 | 0.636687 | 506 | 4,214 | 4.976285 | 0.217391 | 0.113582 | 0.064337 | 0.03336 | 0.196187 | 0.101668 | 0.057983 | 0 | 0 | 0 | 0 | 0.007336 | 0.288325 | 4,214 | 132 | 108 | 31.924242 | 0.832277 | 0.070242 | 0 | 0.065217 | 0 | 0 | 0.030039 | 0 | 0 | 0 | 0 | 0 | 0.032609 | 1 | 0.097826 | false | 0 | 0.021739 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bfe751e9da080c147a9b40a89fd821b2d0316cb | 1,636 | py | Python | fixes/fix_alarm_managedobjectprofile.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | fixes/fix_alarm_managedobjectprofile.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | fixes/fix_alarm_managedobjectprofile.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ----------------------------------------------------------------------
# Fill Active/ArchivedAlarm managed object profile
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from collections import defaultdict
# Third-party modules
from pymongo import UpdateMany
# NOC modules
from noc.sa.models.managedobject import ManagedObject
from noc.fm.models.activealarm import ActiveAlarm
from noc.fm.models.archivedalarm import ArchivedAlarm
BULK_SIZE = 50
IN_SIZE = 1000
def fix():
def fix_model(model):
coll = model._get_collection()
ins = defaultdict(list)
bulk = []
for doc in coll.find(
{"managed_object_profile": {"$exists": False}, "managed_object": {"$exists": True}},
{"_id": 1, "managed_object": 1},
):
mo = ManagedObject.get_by_id(doc["managed_object"])
if not mo:
continue
mop = mo.object_profile.id
ins[mop] += [doc["_id"]]
if len(ins[mop]) >= IN_SIZE:
bulk += [
UpdateMany(
{"_id": {"$in": ins[mop]}}, {"$set": {"managed_object_profile": mop}}
)
]
ins[mop] = []
if len(bulk) >= BULK_SIZE:
coll.bulk_write(bulk)
bulk = []
if bulk:
coll.bulk_write(bulk)
fix_model(ActiveAlarm)
fix_model(ArchivedAlarm)
| 30.867925 | 96 | 0.479829 | 154 | 1,636 | 4.941558 | 0.396104 | 0.102497 | 0.078844 | 0.039422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013841 | 0.293399 | 1,636 | 52 | 97 | 31.461538 | 0.644464 | 0.227384 | 0 | 0.114286 | 0 | 0 | 0.092504 | 0.035088 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.142857 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9bff8b578a562836d98ff90408b55710c6ffc73f | 7,841 | py | Python | tests/test_msg.py | kingli-crypto/chainlibpy | 8511c08c3bdb7de9cf58254a804ca329188a1dd8 | [
"Apache-2.0"
] | null | null | null | tests/test_msg.py | kingli-crypto/chainlibpy | 8511c08c3bdb7de9cf58254a804ca329188a1dd8 | [
"Apache-2.0"
] | null | null | null | tests/test_msg.py | kingli-crypto/chainlibpy | 8511c08c3bdb7de9cf58254a804ca329188a1dd8 | [
"Apache-2.0"
] | null | null | null | from chainlibpy.amino import (Coin, CommissionRates, Content, Description,
Input, Output, VoteOptionYes, message)
def test_msg():
msg_send = message.MsgSend("from_address", "to_address", [Coin()])
data = msg_send.to_dict()
assert data == {
"type": "cosmos-sdk/MsgSend",
"value": {
"from_address": "from_address",
"to_address": "to_address",
"amount": [{"amount": "0", "denom": "basecro"}],
},
}
inputs = [Input("input_address", [Coin()])]
outputs = [Output("output_address", [Coin()])]
msg_multi_send = message.MsgMultiSend(inputs, outputs)
data = msg_multi_send.to_dict()
assert data == {
"type": "cosmos-sdk/MsgMultiSend",
"value": {
"inputs": [
{
"address": "input_address",
"coins": [{"amount": "0", "denom": "basecro"}],
}
],
"outputs": [
{
"address": "output_address",
"coins": [{"amount": "0", "denom": "basecro"}],
}
],
},
}
msg = message.MsgVerifyInvariant(
"sender_address", "invariant_module_name", "invariant_route"
)
data = msg.to_dict()
assert data == {
"type": "cosmos-sdk/MsgVerifyInvariant",
"value": {
"sender": "sender_address",
"invariant_module_name": "invariant_module_name",
"invariant_route": "invariant_route",
},
}
msg = message.MsgSetWithdrawAddress("delegator_address", "withdraw_address")
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgSetWithdrawAddress",
"value": {
"delegator_address": "delegator_address",
"withdraw_address": "withdraw_address",
},
}
msg = message.MsgWithdrawDelegationReward("delegator_address", "validator_address")
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgWithdrawDelegationReward",
"value": {
"delegator_address": "delegator_address",
"validator_address": "validator_address",
},
}
msg = message.MsgWithdrawValidatorCommission("validator_address")
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgWithdrawValidatorCommission",
"value": {"validator_address": "validator_address"},
}
msg = message.MsgFundCommunityPool([Coin()], "depositor")
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgFundCommunityPool",
"value": {
"amount": [{"amount": "0", "denom": "basecro"}],
"depositor": "depositor",
},
}
evidence_content = Content("type_url", b"evidence content details")
msg = message.MsgSubmitEvidence("submitter", evidence_content)
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgSubmitEvidence",
"value": {
"submitter": "submitter",
"evidence": {"type_url": "type_url", "value": b"evidence content details"},
},
}
content = Content("type_url", b"content details")
msg = message.MsgSubmitProposal(content, [Coin()], "proposer")
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgSubmitProposal",
"value": {
"content": {"type_url": "type_url", "value": b"content details"},
"initial_deposit": [{"amount": "0", "denom": "basecro"}],
"proposer": "proposer",
},
}
msg = message.MsgVote(1, "voter address", VoteOptionYes)
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgVote",
"value": {"proposal_id": 1, "voter": "voter address", "option": VoteOptionYes},
}
msg = message.MsgDeposit(1, "depositor address", [Coin()])
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgDeposit",
"value": {
"proposal_id": 1,
"depositor": "depositor address",
"amount": [{"amount": "0", "denom": "basecro"}],
},
}
msg = message.MsgUnjail("validator address")
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgUnjail",
"value": {"validator_addr": "validator address"},
}
description = Description(
"moniker", "identity", "website", "security_contact", "details"
)
commission = CommissionRates("rate", "max_rate", "max_change_rate")
msg = message.MsgCreateValidator(
description,
commission,
"min_self_delegation",
"delegator_address",
"validator_address",
"public key",
Coin(),
)
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgCreateValidator",
"value": {
"description": {
"moniker": "moniker",
"identity": "identity",
"website": "website",
"security_contact": "security_contact",
"details": "details",
},
"commission": {
"rate": "rate",
"max_rate": "max_rate",
"max_change_rate": "max_change_rate",
},
"min_self_delegation": "min_self_delegation",
"delegator_address": "delegator_address",
"validator_address": "validator_address",
"pubkey": "public key",
"value": {"amount": "0", "denom": "basecro"},
},
}
msg = message.MsgEditValidator(
description, "validator_address", commission, "min_self_delegation"
)
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgEditValidator",
"value": {
"description": {
"moniker": "moniker",
"identity": "identity",
"website": "website",
"security_contact": "security_contact",
"details": "details",
},
"validator_address": "validator_address",
"commission_rate": {
"rate": "rate",
"max_rate": "max_rate",
"max_change_rate": "max_change_rate",
},
"min_self_delegation": "min_self_delegation",
},
}
msg = message.MsgDelegate("delegator_address", "validator_address", Coin())
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgDelegate",
"value": {
"delegator_address": "delegator_address",
"validator_address": "validator_address",
"amount": {"amount": "0", "denom": "basecro"},
},
}
msg = message.MsgBeginRedelegate(
"delegator_address", "validator_src_address", "validator_dst_address", Coin()
)
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgBeginRedelegate",
"value": {
"delegator_address": "delegator_address",
"validator_src_address": "validator_src_address",
"validator_dst_address": "validator_dst_address",
"amount": {"amount": "0", "denom": "basecro"},
},
}
msg = message.MsgUndelegate("delegator_address", "validator_address", Coin())
data = msg.to_dict()
print(data)
assert data == {
"type": "cosmos-sdk/MsgUndelegate",
"value": {
"delegator_address": "delegator_address",
"validator_address": "validator_address",
"amount": {"amount": "0", "denom": "basecro"},
},
}
| 31.744939 | 87 | 0.534498 | 664 | 7,841 | 6.099398 | 0.137048 | 0.079012 | 0.058765 | 0.083951 | 0.569136 | 0.527654 | 0.431605 | 0.403951 | 0.348642 | 0.331605 | 0 | 0.002591 | 0.31093 | 7,841 | 246 | 88 | 31.873984 | 0.746992 | 0 | 0 | 0.442982 | 0 | 0 | 0.36577 | 0.075373 | 0 | 0 | 0 | 0 | 0.074561 | 1 | 0.004386 | false | 0 | 0.004386 | 0 | 0.008772 | 0.061404 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50021064d023bb04bd15ab405bd9dea090a6545b | 922 | py | Python | src/cli/pyramid_display.py | catzilla-007/plexus-water-overflow | bda423ef3ffa52dd5e45596a3d474b31a46544fa | [
"MIT"
] | null | null | null | src/cli/pyramid_display.py | catzilla-007/plexus-water-overflow | bda423ef3ffa52dd5e45596a3d474b31a46544fa | [
"MIT"
] | null | null | null | src/cli/pyramid_display.py | catzilla-007/plexus-water-overflow | bda423ef3ffa52dd5e45596a3d474b31a46544fa | [
"MIT"
] | null | null | null | import click
from src.core.glass_pyramid import GlassPyramid
class PyramidDisplay(object):
def __init__(self, pyramid: GlassPyramid):
self._pyramid = pyramid
def display(self):
height = self._get_pyramid_height()
space = height - 1
for i in range(0, height):
for j in range(0, space):
click.echo(' ', nl=False)
space = space - 1
for j in range(0, i + 1):
glass_info = self._get_glass_info(i, j)
click.echo(f'{glass_info} ', nl=False)
click.echo('\n', nl=True)
def _get_glass_info(self, i: int, j: int) -> str:
glass = self._pyramid.get_glass(i, j)
return f'[({i},{j}) {glass.content} ml]'
def _get_pyramid_height(self):
keys = list(self._pyramid.glasses)
height = list(map(lambda a: a[0], keys))
return max(height)
| 27.117647 | 59 | 0.557484 | 122 | 922 | 4.02459 | 0.360656 | 0.089613 | 0.04888 | 0.044807 | 0.04888 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011164 | 0.319957 | 922 | 33 | 60 | 27.939394 | 0.77193 | 0 | 0 | 0 | 0 | 0 | 0.063991 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.086957 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500302a6edbc0f78c26797058326cae2f1dd7b5b | 2,624 | py | Python | test/test_npu/test_network_ops/test_dropout.py | Ascend/pytorch | 39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc | [
"BSD-3-Clause"
] | 1 | 2021-12-02T03:07:35.000Z | 2021-12-02T03:07:35.000Z | test/test_npu/test_network_ops/test_dropout.py | Ascend/pytorch | 39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc | [
"BSD-3-Clause"
] | 1 | 2021-11-12T07:23:03.000Z | 2021-11-12T08:28:13.000Z | test/test_npu/test_network_ops/test_dropout.py | Ascend/pytorch | 39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestDropOutDoMask(TestCase):
def cpu_op_exec(self, input):
out = torch.nn.Dropout(0.5)(input)
out = out.numpy()
return out
def npu_op_exec(self, input):
out = torch.nn.Dropout(0.5)(input)
out = out.to("cpu")
out = out.numpy()
return out
def dropout_list_exec(self, list):
epsilon = 1e-3
for item in list:
cpu_input1, npu_input1 = create_common_tensor(item, 0, 100)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output = self.npu_op_exec(npu_input1)
cpu_output = cpu_output.astype(npu_output.dtype)
# 该算子随机结果的比较方式
for a, b in zip(cpu_output.flatten(), npu_output.flatten()):
if abs(a) > 0 and abs(b) > 0 and abs(a - b) > epsilon:
print(f'input = {item}, ERROR!')
break
else:
print(f'input = {item}, Successfully!')
def test_op_shape_format_fp16(self, device):
format_list = [0, 3, 29]
shape_list = [1, (256, 1280), (32, 3, 3), (256, 2048, 7, 7)]
shape_format = [
[np.float16, i, j] for i in format_list for j in shape_list
]
self.dropout_list_exec(shape_format)
def test_op_shape_format_fp32(self, device):
format_list = [0, 3, 29]
shape_list = [1, (256, 1280), (32, 3, 3), (256, 2048, 7, 7)]
shape_format = [
[np.float32, i, j] for i in format_list for j in shape_list
]
self.dropout_list_exec(shape_format)
instantiate_device_type_tests(TestDropOutDoMask, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests() | 37.485714 | 77 | 0.640625 | 375 | 2,624 | 4.288 | 0.376 | 0.037313 | 0.027985 | 0.019901 | 0.267413 | 0.242537 | 0.217662 | 0.217662 | 0.217662 | 0.217662 | 0 | 0.044799 | 0.259909 | 2,624 | 70 | 78 | 37.485714 | 0.783213 | 0.223704 | 0 | 0.285714 | 0 | 0 | 0.033119 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.122449 | 0 | 0.285714 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500436561563d36cc8d543eb50f2e292d7141dbe | 2,178 | py | Python | LeetCode/BottomLevelOrder.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | 1 | 2021-02-27T06:12:55.000Z | 2021-02-27T06:12:55.000Z | LeetCode/BottomLevelOrder.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | 1 | 2021-02-02T08:52:17.000Z | 2021-02-03T08:19:12.000Z | LeetCode/BottomLevelOrder.py | Jaidev810/Competitive-Questions | 5d5b28be69e8572e9b4353e9790ee39b56769fc3 | [
"MIT"
] | null | null | null | import queue
class BinaryTree:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def takeLevelwiseinput():
print('enter the root data: ')
rootData = int(input())
if rootData == -1:
return None
root = BinaryTree(rootData)
q = queue.Queue()
q.put(root)
while not(q.empty()):
curr_node = q.get()
print('enter the leftchild of ', curr_node.data)
leftData = int(input())
if leftData != -1:
leftchild = BinaryTree(leftData)
curr_node.left = leftchild
q.put(leftchild)
print('enter the rightchild of ', curr_node.data)
rightData = int(input())
if rightData != -1:
rightchild = BinaryTree(rightData)
curr_node.right = rightchild
q.put(rightchild)
return root
def printLevelwise(root):
if root is None:
return None
q = queue.Queue()
q.put(root)
while not(q.empty()):
curr_node = q.get()
print(curr_node.data, end=':')
if curr_node.left is not None:
print('L', curr_node.left.data, end=',')
q.put(curr_node.left)
if curr_node.right is not None:
print('R', curr_node.right.data, end='')
q.put(curr_node.right)
print()
def bottomLevelOrder(root):
if root is None:
return None
temp = []
arr = []
q = list()
q.append(root)
q.append(None)
while len(q) != 0:
curr_node = q.pop(0)
if curr_node is None:
arr.append(temp)
temp = []
if len(q) == 0:
break
else:
q.append(None)
continue
if curr_node.left is not None:
q.append(curr_node.left)
if curr_node.right is not None:
q.append(curr_node.right)
temp.append(curr_node.data)
arr1 = list()
for i in range(len(arr)-1, -1, -1):
arr1.append(arr[i])
return arr1
root = takeLevelwiseinput()
printLevelwise(root)
arr = bottomLevelOrder(root)
print(arr) | 21.78 | 57 | 0.53214 | 268 | 2,178 | 4.235075 | 0.208955 | 0.140969 | 0.063436 | 0.021145 | 0.2837 | 0.2837 | 0.257269 | 0.151542 | 0.151542 | 0.151542 | 0 | 0.008529 | 0.353994 | 2,178 | 100 | 58 | 21.78 | 0.798152 | 0 | 0 | 0.28 | 0 | 0 | 0.033043 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053333 | false | 0 | 0.013333 | 0 | 0.146667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500a73f486a31bfb6d99a53402ae6f207998ea3f | 977 | py | Python | questa_cmd.py | psumesh/Python_scripts | e43100f28cbec0551ecd3c3b2d81b1bd021445e7 | [
"Apache-2.0"
] | null | null | null | questa_cmd.py | psumesh/Python_scripts | e43100f28cbec0551ecd3c3b2d81b1bd021445e7 | [
"Apache-2.0"
] | 1 | 2021-08-12T15:29:41.000Z | 2021-08-12T18:14:39.000Z | questa_cmd.py | psumesh/Python_scripts | e43100f28cbec0551ecd3c3b2d81b1bd021445e7 | [
"Apache-2.0"
] | null | null | null | #creater : Umesh Prasad
# email : spumesh@outlook.com
# github : psumesh
import os
top_fname = 'abc.sv'
os.system('vsim -c') #enter into questa
start_compile = 'vlog ' + top_fname
os.system(start_compile) #compile
#simulate
sim = 'vlog -novopt work.top'
os.system(sim)
#add all waveform including submodules
waveforms = 'add wave -r *'
os.system(waveforms)
os.system('run 1000')
#for coverage
os.system('quit -sim')
os.system(start_compile)
compile_cover = 'vlog -cover bcst ' + top_fname
os.system(compile_cover)
os.system(waveforms)
os.system('run 10000')
#in sv for diffrent test cases with program files
os.system('coverage save -assert -directive -cvg -codeAll result1.ucdb')
#for html coverage report
os.system('vcover report -details -html result1.ucdb')
#SV assertions
assert_cmd = 'vlog +acc ' + top_fname
os.system(assert_cmd)
os.system('vsim -assertdebug top_module') #where top_module is the top top wrapper module of dut & testbench
| 22.72093 | 110 | 0.731832 | 146 | 977 | 4.808219 | 0.506849 | 0.159544 | 0.042735 | 0.068376 | 0.156695 | 0.079772 | 0 | 0 | 0 | 0 | 0 | 0.013285 | 0.152508 | 977 | 42 | 111 | 23.261905 | 0.834541 | 0.307062 | 0 | 0.190476 | 0 | 0 | 0.351433 | 0 | 0 | 0 | 0 | 0 | 0.190476 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500a8ee1748957a5a10a48e440aae1e6b9c57cba | 2,016 | py | Python | src/update_handlers/menu/my_channels/actions/delete.py | Arseny-Tokmancev/channels-watchbot | 102edc07c9d8c306f47b6a5b8318fa0ba56534f0 | [
"MIT"
] | 1 | 2020-11-10T22:50:14.000Z | 2020-11-10T22:50:14.000Z | src/update_handlers/menu/my_channels/actions/delete.py | Arseny-Tokmancev/channels-watchbot | 102edc07c9d8c306f47b6a5b8318fa0ba56534f0 | [
"MIT"
] | null | null | null | src/update_handlers/menu/my_channels/actions/delete.py | Arseny-Tokmancev/channels-watchbot | 102edc07c9d8c306f47b6a5b8318fa0ba56534f0 | [
"MIT"
] | 1 | 2022-01-31T19:23:03.000Z | 2022-01-31T19:23:03.000Z | from pyrogram import filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from data.models import Channel, Chat
from ..show_channel import show_channel
from ..list_channel import list_channels
def register(app):
@app.on_callback_query(
filters.regex('delete ')
)
def delete_channel(client, update):
channel_id = int(update.data.split(' ')[1])
channel = Channel.objects.get(id=channel_id)
input_channel = channel.get_input_channel(client)
update.edit_message_text(
f'Вы уверены, что хотите удалить канал **{input_channel.title}**?',
reply_markup = InlineKeyboardMarkup([
[InlineKeyboardButton('Да ✅', f'yes_sure {channel_id}')],
[InlineKeyboardButton('Нет ❌', f'not_sure {channel_id}')],
])
)
@app.on_callback_query(
filters.regex('yes_sure ')
)
def sure_delete(client, update):
chat = Chat.objects.get(id=update.message.chat.id)
try:
channel_id = int(update.data.split(' ')[1])
channel = Channel.objects.get(id=channel_id)
input_channel = channel.get_input_channel(client)
channel.delete()
except Exception as e:
print(e)
update.answer('При попытке удаления произошла ошибка')
else:
text, buttons = list_channels(client, chat)
text = f'Канал **{input_channel.title}** удалён\n\n' + text
update.edit_message_text(
text,
reply_markup = buttons,
)
@app.on_callback_query(
filters.regex('not_sure ')
)
def not_sure_delete(client, update):
channel_id = int(update.data.split(' ')[1])
channel = Channel.objects.get(id=channel_id)
update.answer('Удаление отменено')
text, buttons = show_channel(client, channel)
update.edit_message_text(
text,
reply_markup = buttons
) | 34.758621 | 79 | 0.610615 | 227 | 2,016 | 5.23348 | 0.303965 | 0.060606 | 0.040404 | 0.045455 | 0.394781 | 0.394781 | 0.319024 | 0.319024 | 0.246633 | 0.246633 | 0 | 0.002073 | 0.282242 | 2,016 | 58 | 80 | 34.758621 | 0.817554 | 0 | 0 | 0.307692 | 0 | 0 | 0.117997 | 0.025285 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.096154 | 0 | 0.173077 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500b67659760a963dd3a6c6d7ecc661982992539 | 3,701 | py | Python | src/fastpli/analysis/affine_transformation.py | jifengting1/fastpliFork | 1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1 | [
"MIT"
] | null | null | null | src/fastpli/analysis/affine_transformation.py | jifengting1/fastpliFork | 1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1 | [
"MIT"
] | null | null | null | src/fastpli/analysis/affine_transformation.py | jifengting1/fastpliFork | 1ef7e2d268e03e21ded9390fc005b9fff2e0a3c1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Methods for calculating and applying affine transformation to coordinates and images.
"""
import numpy as np
import scipy.interpolate
import numba
def _replace_mat_row(B, r, d):
return np.linalg.det(np.delete(np.vstack([r, B]), (d + 1), axis=0))
@numba.njit(cache=True)
def _nearest_neighbors(image, M):
""" written for simpli images[x,y,rho]
"""
image = np.atleast_3d(image)
image_nn = np.empty_like(image)
M = np.ascontiguousarray(np.linalg.inv(M))
x_max, y_max = image.shape[0] - 1, image.shape[1] - 1
for i in range(image.shape[0]):
for j in range(image.shape[1]):
x, y, _ = M @ np.array([i, j, 1.0])
ii = max(0, min(x_max, int(np.rint(x))))
jj = max(0, min(y_max, int(np.rint(y))))
image_nn[i, j, :] = image[ii, jj, :]
return image_nn
def _interpolate_griddata(image, M, mode):
""" written for simpli images[x,y,rho]
"""
image = np.atleast_3d(image)
image_nn = np.empty_like(image)
grid_i, grid_j = np.mgrid[0:image.shape[0], 0:image.shape[1]]
# points -> coordinates in transformed image
points = np.array(
[grid_i.flatten(),
grid_j.flatten(),
np.ones(grid_j.size)])
points = (M @ points)[0:2, :]
for k in range(image.shape[2]):
image_nn[:, :, k] = scipy.interpolate.griddata(points.T,
image[:, :, k].flatten(),
(grid_i, grid_j),
method=mode)
return image_nn
def calc_matrix(p_in, p_out):
"""
Calculate the affine transformation matrix.
Parameters
----------
p_in, p_out : (3,2)-array_like
list of 3 x 2d points which will be transformed from p_in to p_out
Returns
-------
res : (3x3)-array
affine transformation matrix
"""
p_in = np.array(p_in)
p_out = np.array(p_out)
if not np.all(np.equal(np.array(p_in.shape), np.array(p_out.shape))):
raise TypeError("in and out not the same shape")
if not np.all(np.equal(np.array(p_in.shape), np.array([3, 2]))):
print(p_in.shape)
raise TypeError("shape error: input required [3x2], [3x2]")
l = p_in.shape[0]
B = np.vstack([np.transpose(p_in), np.ones(l)])
D = 1.0 / np.linalg.det(B)
M = np.array([[(-1)**i * D * _replace_mat_row(B, R, i)
for i in range(l)]
for R in np.transpose(p_out)])
return np.vstack([M, [0, 0, 1]])
def exec_matrix(M, x, y):
"""
Execute the affine transformation.
Parameters
----------
M : (3,3)-array
affine transformation matrix
x, y : float
2d coordinates to transform
Returns
-------
res : float, float
transformed coordinates
"""
x, y, _ = M @ np.array([x, y, 1.0])
return x, y
def image(image, M, mode='nearest'):
"""
Execute the affine transformation on simpli images[x,y,rho].
Parameters
----------
image : 2d-array
image to transform
M : float
affine transformation matrix
mode : str
"nearest", "linear", "cubic" interpolation mode
Returns
-------
res : 2d-array
transformed image
"""
if mode == 'nearest':
# this is faster then scipy.interpolate.griddata('nearest')
new_image = _nearest_neighbors(image, M)
elif mode == 'linear' or mode == 'cubic':
new_image = _interpolate_griddata(image, M, mode)
else:
raise ValueError(f"mode \"{mode}\" does not exist")
return np.squeeze(new_image)
| 26.435714 | 85 | 0.557147 | 521 | 3,701 | 3.84261 | 0.255278 | 0.034965 | 0.01998 | 0.020979 | 0.172328 | 0.10989 | 0.10989 | 0.10989 | 0.10989 | 0.10989 | 0 | 0.018398 | 0.295055 | 3,701 | 139 | 86 | 26.625899 | 0.748946 | 0.277222 | 0 | 0.101695 | 0 | 0 | 0.046222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.050847 | 0.016949 | 0.254237 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500c734d25def9cef7ae5f1e7a309bb8e9297090 | 1,927 | py | Python | virtcam/effects.py | badmonkey/virtual-camera | 712fb6126f407304940ff678159a50695a848093 | [
"MIT"
] | null | null | null | virtcam/effects.py | badmonkey/virtual-camera | 712fb6126f407304940ff678159a50695a848093 | [
"MIT"
] | null | null | null | virtcam/effects.py | badmonkey/virtual-camera | 712fb6126f407304940ff678159a50695a848093 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from pipey import Pipeable
from virtcam.base import Frame, FrameFilter, FrameProcessor, Image, Mask, immutable
class Erode(FrameFilter):
def __init__(self, source: FrameProcessor, size: int):
super().__init__(source)
self.kernel = np.ones((size, size), dtype=np.uint8)
def next(self, frame_id: int) -> Frame:
frame = self.source.next(frame_id)
image = cv2.erode(frame.image, self.kernel, iterations=None)
return Frame(frame.config, immutable(image), frame.mask)
@staticmethod
@Pipeable
def p(src, size):
return Erode(src, size)
class Hologram(FrameFilter):
def __init__(self, source: FrameProcessor):
super().__init__(source)
def next(self, frame_id: int) -> Frame:
frame = self.source.next(frame_id)
# add a blue tint
holo = cv2.applyColorMap(frame.image, cv2.COLORMAP_WINTER)
# add a halftone effect
bandLength, bandGap = 3, 4
for y in range(holo.shape[0]):
if y % (bandLength + bandGap) < bandLength:
holo[y, :, :] = holo[y, :, :] * np.random.uniform(0.1, 0.3)
# add some ghosting
holo_blur = cv2.addWeighted(holo, 0.2, shift_image(holo.copy(), 5, 5), 0.8, 0)
holo_blur = cv2.addWeighted(holo_blur, 0.4, shift_image(holo.copy(), -5, -5), 0.6, 0)
# combine with the original color, oversaturated
image = cv2.addWeighted(frame.image, 0.5, holo_blur, 0.6, 0)
return Frame(frame.config, immutable(image), frame.mask)
@staticmethod
@Pipeable
def p(src):
return Hologram(src)
def shift_image(img, dx, dy):
img = np.roll(img, dy, axis=0)
img = np.roll(img, dx, axis=1)
if dy > 0:
img[:dy, :] = 0
elif dy < 0:
img[dy:, :] = 0
if dx > 0:
img[:, :dx] = 0
elif dx < 0:
img[:, dx:] = 0
return img
| 30.109375 | 93 | 0.59782 | 265 | 1,927 | 4.241509 | 0.320755 | 0.017794 | 0.032028 | 0.039146 | 0.411032 | 0.33274 | 0.258007 | 0.220641 | 0.220641 | 0.220641 | 0 | 0.03125 | 0.269331 | 1,927 | 63 | 94 | 30.587302 | 0.767045 | 0.052932 | 0 | 0.26087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.152174 | false | 0 | 0.086957 | 0.043478 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500d3e9bf092fba21ad52d02acdbdfac814931c6 | 6,866 | py | Python | examples/pinn_inverse/Navier_Stokes_inverse.py | pescap/deepxde | 8792af7a62dc89a08d1ad4e67fc6c2431422e4be | [
"Apache-2.0"
] | 5 | 2019-02-08T02:48:00.000Z | 2019-03-29T16:21:40.000Z | examples/pinn_inverse/Navier_Stokes_inverse.py | pescap/deepxde | 8792af7a62dc89a08d1ad4e67fc6c2431422e4be | [
"Apache-2.0"
] | null | null | null | examples/pinn_inverse/Navier_Stokes_inverse.py | pescap/deepxde | 8792af7a62dc89a08d1ad4e67fc6c2431422e4be | [
"Apache-2.0"
] | 2 | 2019-02-08T18:15:47.000Z | 2019-03-31T17:23:42.000Z | """Backend supported: tensorflow.compat.v1, tensorflow, pytorch
An inverse problem of the Navier-Stokes equation of incompressible flow around cylinder with Re=100
References: https://doi.org/10.1016/j.jcp.2018.10.045 Section 4.1.1
"""
import deepxde as dde
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import re
# true values
C1true = 1.0
C2true = 0.01
# Load training data
def load_training_data(num):
data = loadmat("../dataset/cylinder_nektar_wake.mat")
U_star = data["U_star"] # N x 2 x T
P_star = data["p_star"] # N x T
t_star = data["t"] # T x 1
X_star = data["X_star"] # N x 2
N = X_star.shape[0]
T = t_star.shape[0]
# Rearrange Data
XX = np.tile(X_star[:, 0:1], (1, T)) # N x T
YY = np.tile(X_star[:, 1:2], (1, T)) # N x T
TT = np.tile(t_star, (1, N)).T # N x T
UU = U_star[:, 0, :] # N x T
VV = U_star[:, 1, :] # N x T
PP = P_star # N x T
x = XX.flatten()[:, None] # NT x 1
y = YY.flatten()[:, None] # NT x 1
t = TT.flatten()[:, None] # NT x 1
u = UU.flatten()[:, None] # NT x 1
v = VV.flatten()[:, None] # NT x 1
p = PP.flatten()[:, None] # NT x 1
# training domain: X × Y = [1, 8] × [−2, 2] and T = [0, 7]
data1 = np.concatenate([x, y, t, u, v, p], 1)
data2 = data1[:, :][data1[:, 2] <= 7]
data3 = data2[:, :][data2[:, 0] >= 1]
data4 = data3[:, :][data3[:, 0] <= 8]
data5 = data4[:, :][data4[:, 1] >= -2]
data_domain = data5[:, :][data5[:, 1] <= 2]
# choose number of training points: num =7000
idx = np.random.choice(data_domain.shape[0], num, replace=False)
x_train = data_domain[idx, 0:1]
y_train = data_domain[idx, 1:2]
t_train = data_domain[idx, 2:3]
u_train = data_domain[idx, 3:4]
v_train = data_domain[idx, 4:5]
p_train = data_domain[idx, 5:6]
return [x_train, y_train, t_train, u_train, v_train, p_train]
# Parameters to be identified
C1 = dde.Variable(0.0)
C2 = dde.Variable(0.0)
# Define Navier Stokes Equations (Time-dependent PDEs)
def Navier_Stokes_Equation(x, y):
u = y[:, 0:1]
v = y[:, 1:2]
p = y[:, 2:3]
du_x = dde.grad.jacobian(y, x, i=0, j=0)
du_y = dde.grad.jacobian(y, x, i=0, j=1)
du_t = dde.grad.jacobian(y, x, i=0, j=2)
dv_x = dde.grad.jacobian(y, x, i=1, j=0)
dv_y = dde.grad.jacobian(y, x, i=1, j=1)
dv_t = dde.grad.jacobian(y, x, i=1, j=2)
dp_x = dde.grad.jacobian(y, x, i=2, j=0)
dp_y = dde.grad.jacobian(y, x, i=2, j=1)
du_xx = dde.grad.hessian(y, x, component=0, i=0, j=0)
du_yy = dde.grad.hessian(y, x, component=0, i=1, j=1)
dv_xx = dde.grad.hessian(y, x, component=1, i=0, j=0)
dv_yy = dde.grad.hessian(y, x, component=1, i=1, j=1)
continuity = du_x + dv_y
x_momentum = du_t + C1 * (u * du_x + v * du_y) + dp_x - C2 * (du_xx + du_yy)
y_momentum = dv_t + C1 * (u * dv_x + v * dv_y) + dp_y - C2 * (dv_xx + dv_yy)
return [continuity, x_momentum, y_momentum]
# Define Spatio-temporal domain
# Rectangular
Lx_min, Lx_max = 1.0, 8.0
Ly_min, Ly_max = -2.0, 2.0
# Spatial domain: X × Y = [1, 8] × [−2, 2]
space_domain = dde.geometry.Rectangle([Lx_min, Ly_min], [Lx_max, Ly_max])
# Time domain: T = [0, 7]
time_domain = dde.geometry.TimeDomain(0, 7)
# Spatio-temporal domain
geomtime = dde.geometry.GeometryXTime(space_domain, time_domain)
# Get the training data: num = 7000
[ob_x, ob_y, ob_t, ob_u, ob_v, ob_p] = load_training_data(num=7000)
ob_xyt = np.hstack((ob_x, ob_y, ob_t))
observe_u = dde.icbc.PointSetBC(ob_xyt, ob_u, component=0)
observe_v = dde.icbc.PointSetBC(ob_xyt, ob_v, component=1)
# Training datasets and Loss
data = dde.data.TimePDE(
geomtime,
Navier_Stokes_Equation,
[observe_u, observe_v],
num_domain=700,
num_boundary=200,
num_initial=100,
anchors=ob_xyt,
)
# Neural Network setup
layer_size = [3] + [50] * 6 + [3]
activation = "tanh"
initializer = "Glorot uniform"
net = dde.nn.FNN(layer_size, activation, initializer)
model = dde.Model(data, net)
# callbacks for storing results
fnamevar = "variables.dat"
variable = dde.callbacks.VariableValue([C1, C2], period=100, filename=fnamevar)
# Compile, train and save model
model.compile("adam", lr=1e-3, external_trainable_variables=[C1, C2])
loss_history, train_state = model.train(
epochs=10000, callbacks=[variable], display_every=1000, disregard_previous_best=True
)
dde.saveplot(loss_history, train_state, issave=True, isplot=True)
model.compile("adam", lr=1e-4, external_trainable_variables=[C1, C2])
loss_history, train_state = model.train(
epochs=10000, callbacks=[variable], display_every=1000, disregard_previous_best=True
)
dde.saveplot(loss_history, train_state, issave=True, isplot=True)
# model.save(save_path = "./NS_inverse_model/model")
f = model.predict(ob_xyt, operator=Navier_Stokes_Equation)
print("Mean residual:", np.mean(np.absolute(f)))
# Plot Variables:
# reopen saved data using callbacks in fnamevar
lines = open(fnamevar, "r").readlines()
# read output data in fnamevar
Chat = np.array(
[
np.fromstring(
min(re.findall(re.escape("[") + "(.*?)" + re.escape("]"), line), key=len),
sep=",",
)
for line in lines
]
)
l, c = Chat.shape
plt.semilogy(range(0, l * 100, 100), Chat[:, 0], "r-")
plt.semilogy(range(0, l * 100, 100), Chat[:, 1], "k-")
plt.semilogy(range(0, l * 100, 100), np.ones(Chat[:, 0].shape) * C1true, "r--")
plt.semilogy(range(0, l * 100, 100), np.ones(Chat[:, 1].shape) * C2true, "k--")
plt.legend(["C1hat", "C2hat", "True C1", "True C2"], loc="right")
plt.xlabel("Epochs")
plt.title("Variables")
plt.show()
# Plot the velocity distribution of the flow field:
for t in range(0, 8):
[ob_x, ob_y, ob_t, ob_u, ob_v, ob_p] = load_training_data(num=140000)
xyt_pred = np.hstack((ob_x, ob_y, t * np.ones((len(ob_x), 1))))
uvp_pred = model.predict(xyt_pred)
x_pred, y_pred, t_pred = xyt_pred[:, 0], xyt_pred[:, 1], xyt_pred[:, 2]
u_pred, v_pred, p_pred = uvp_pred[:, 0], uvp_pred[:, 1], uvp_pred[:, 2]
x_true = ob_x[ob_t == t]
y_true = ob_y[ob_t == t]
u_true = ob_u[ob_t == t]
fig, ax = plt.subplots(2, 1)
cntr0 = ax[0].tricontourf(x_pred, y_pred, u_pred, levels=80, cmap="rainbow")
cb0 = plt.colorbar(cntr0, ax=ax[0])
cntr1 = ax[1].tricontourf(x_true, y_true, u_true, levels=80, cmap="rainbow")
cb1 = plt.colorbar(cntr1, ax=ax[1])
ax[0].set_title("u-PINN " + "(t=" + str(t) + ")", fontsize=9.5)
ax[0].axis("scaled")
ax[0].set_xlabel("X", fontsize=7.5, family="Arial")
ax[0].set_ylabel("Y", fontsize=7.5, family="Arial")
ax[1].set_title("u-Reference solution " + "(t=" + str(t) + ")", fontsize=9.5)
ax[1].axis("scaled")
ax[1].set_xlabel("X", fontsize=7.5, family="Arial")
ax[1].set_ylabel("Y", fontsize=7.5, family="Arial")
fig.tight_layout()
plt.show()
| 37.113514 | 99 | 0.628605 | 1,212 | 6,866 | 3.415017 | 0.231848 | 0.006282 | 0.028993 | 0.030925 | 0.32085 | 0.274462 | 0.255376 | 0.243295 | 0.147862 | 0.131916 | 0 | 0.054414 | 0.191669 | 6,866 | 184 | 100 | 37.315217 | 0.69027 | 0.149869 | 0 | 0.055944 | 0 | 0 | 0.044191 | 0.006042 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013986 | false | 0 | 0.034965 | 0 | 0.062937 | 0.006993 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500d4e2fd60def589df1950b2d6f626a80aa3f81 | 3,035 | py | Python | pilosa/validator.py | EvilMcJerkface/python-pilosa | 8c07596c1538d7dfe608d45406b4d6779359ed10 | [
"BSD-3-Clause"
] | 35 | 2017-05-01T18:43:40.000Z | 2022-02-13T17:09:28.000Z | pilosa/validator.py | mkuzdowicz/python-pilosa | 8c07596c1538d7dfe608d45406b4d6779359ed10 | [
"BSD-3-Clause"
] | 109 | 2017-05-02T17:23:54.000Z | 2021-07-06T15:14:25.000Z | pilosa/validator.py | mkuzdowicz/python-pilosa | 8c07596c1538d7dfe608d45406b4d6779359ed10 | [
"BSD-3-Clause"
] | 15 | 2017-04-30T13:28:39.000Z | 2021-02-12T13:47:16.000Z | # Copyright 2017 Pilosa Corp.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
import re
from pilosa.exceptions import ValidationError
__all__ = ("valid_index_name", "validate_index_name", "valid_field_name",
"validate_field_name", "valid_label", "validate_label")
__INDEX_NAME = re.compile(r"^[a-z][a-z0-9_-]*$")
__FIELD_NAME = re.compile(r"^[a-z][a-z0-9_-]*$")
__LABEL = re.compile(r"^[a-zA-Z][a-zA-Z0-9_-]*$")
__KEY = re.compile(r"^[A-Za-z0-9_{}+/=.~%:-]*$")
__MAX_INDEX_NAME = 64
__MAX_FIELD_NAME = 64
__MAX_LABEL = 64
__MAX_KEY = 64
def valid_index_name(index_name):
if len(index_name) > __MAX_INDEX_NAME:
return False
return bool(__INDEX_NAME.match(index_name))
def validate_index_name(index_name):
if not valid_index_name(index_name):
raise ValidationError("Invalid index name: %s" % index_name)
def valid_field_name(field_name):
if len(field_name) > __MAX_FIELD_NAME:
return False
return bool(__FIELD_NAME.match(field_name))
def valid_key(key):
if len(key) > __MAX_KEY:
return False
return bool(__KEY.match(key))
def validate_field_name(field_name):
if not valid_field_name(field_name):
raise ValidationError("Invalid field name: %s" % field_name)
def valid_label(label):
if len(label) > __MAX_LABEL:
return False
return bool(__LABEL.match(label))
def validate_label(label):
if not valid_label(label):
raise ValidationError("Invalid label: %s" % label)
def validate_key(key):
if not valid_key(key):
raise ValidationError("Invalid key: %s" % key)
| 32.287234 | 73 | 0.738715 | 446 | 3,035 | 4.811659 | 0.336323 | 0.067102 | 0.018639 | 0.020503 | 0.203169 | 0.082013 | 0.082013 | 0.082013 | 0.082013 | 0.063374 | 0 | 0.009145 | 0.171334 | 3,035 | 93 | 74 | 32.634409 | 0.844135 | 0.478089 | 0 | 0.1 | 0 | 0 | 0.165055 | 0.031593 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.05 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500df84d0e391ed7e6f451ff568a4b10edee796d | 960 | py | Python | pybullet-gym-rocus/setup.py | nbfigueroa/RoCUS | f1e1a538a2d0d12d307d9a003c4a2d5bcadcb30f | [
"MIT"
] | 7 | 2020-11-20T20:45:49.000Z | 2021-12-14T19:27:20.000Z | pybullet-gym-rocus/setup.py | nbfigueroa/RoCUS | f1e1a538a2d0d12d307d9a003c4a2d5bcadcb30f | [
"MIT"
] | 1 | 2021-03-03T03:57:21.000Z | 2021-03-03T03:57:21.000Z | pybullet-gym-rocus/setup.py | nbfigueroa/RoCUS | f1e1a538a2d0d12d307d9a003c4a2d5bcadcb30f | [
"MIT"
] | 4 | 2020-11-20T17:00:27.000Z | 2021-04-01T00:53:50.000Z | from setuptools import setup, find_packages
import sys, os.path
# Don't import gym module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'pybulletgym_rocus'))
VERSION = 0.1
setup_py_dir = os.path.dirname(os.path.realpath(__file__))
need_files = []
datadir = "pybulletgym_rocus/envs/assets"
hh = setup_py_dir + "/" + datadir
for root, dirs, files in os.walk(hh):
for fn in files:
ext = os.path.splitext(fn)[1][1:]
if ext and ext in 'png gif jpg urdf sdf obj mtl dae off stl STL xml '.split():
fn = root + "/" + fn
need_files.append(fn[1+len(hh):])
setup(name='pybulletgym_rocus',
version=VERSION,
packages=[package for package in find_packages()
if package.startswith('pybulletgym_rocus')],
zip_safe=False,
install_requires=[
'pybullet>=1.7.8',
],
package_data={'pybulletgym_rocus': need_files},
)
| 29.090909 | 86 | 0.652083 | 140 | 960 | 4.292857 | 0.528571 | 0.0599 | 0.043261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012 | 0.21875 | 960 | 32 | 87 | 30 | 0.789333 | 0.063542 | 0 | 0 | 0 | 0 | 0.181717 | 0.03233 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500dfb17e40ff938e17ef341ae4b73cad0965063 | 824 | py | Python | docker/build_devel_image.py | nightduck/AirSim | 2ba7124ceff7607f23463f483cd3e2cbe026d0ca | [
"MIT"
] | null | null | null | docker/build_devel_image.py | nightduck/AirSim | 2ba7124ceff7607f23463f483cd3e2cbe026d0ca | [
"MIT"
] | 14 | 2021-02-25T22:32:34.000Z | 2021-08-20T17:17:12.000Z | docker/build_devel_image.py | nightduck/AirSim | 2ba7124ceff7607f23463f483cd3e2cbe026d0ca | [
"MIT"
] | null | null | null | import argparse
import subprocess
def main():
parser = argparse.ArgumentParser(description='AirSim development docker image builder')
parser.add_argument('--target_image', type=str, help='base image name AND tag')
args = parser.parse_args()
build_docker_image(args)
def build_docker_image(args):
dockerfile = 'Dockerfile.jp45_dashing_devel'
target_image_tag = "jp4.5_dashing_devel"
if not args.target_image:
args.target_image = 'nightduck/airsim_cinematography' + ':' + target_image_tag
docker_command = ['docker', 'build', '--network=host', \
'-t', args.target_image, \
'-f', dockerfile, \
'../']
print(" ".join(docker_command))
subprocess.call(docker_command)
if __name__=="__main__":
main()
| 31.692308 | 91 | 0.643204 | 91 | 824 | 5.494505 | 0.483516 | 0.132 | 0.09 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006319 | 0.231796 | 824 | 25 | 92 | 32.96 | 0.78357 | 0 | 0 | 0 | 0 | 0 | 0.239078 | 0.072816 | 0.05 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.2 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
500e18a4d7a911daeedcf51288d1913f399ec33b | 7,902 | py | Python | portal/views/truenth.py | pep8speaks/true_nth_usa_portal | 31ff755b0cfe61ab908e2a399e3c41ef17ca8c16 | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:25:20.000Z | 2019-03-11T12:25:20.000Z | portal/views/truenth.py | pep8speaks/true_nth_usa_portal | 31ff755b0cfe61ab908e2a399e3c41ef17ca8c16 | [
"BSD-3-Clause"
] | null | null | null | portal/views/truenth.py | pep8speaks/true_nth_usa_portal | 31ff755b0cfe61ab908e2a399e3c41ef17ca8c16 | [
"BSD-3-Clause"
] | null | null | null | """TrueNTH API view functions"""
from flask import (
Blueprint,
current_app,
jsonify,
make_response,
render_template,
request,
session,
url_for,
)
from werkzeug.exceptions import Unauthorized
from ..audit import auditable_event
from ..csrf import csrf
from ..extensions import oauth
from ..models.client import validate_origin
from ..models.user import current_user
from .crossdomain import crossdomain
truenth_api = Blueprint('truenth_api', __name__, url_prefix='/api')
@truenth_api.route("/ping", methods=('OPTIONS', 'POST'))
@csrf.portal_exempt
@crossdomain()
def ping():
"""POST request prolong session by reseting cookie timeout"""
current_app.logger.debug("ping received")
session.modified = True
return 'OK'
@truenth_api.route('/auditlog', methods=('POST',))
@oauth.require_oauth()
def auditlog_addevent():
"""Add event to audit log
API for client applications to add any event to the audit log. The message
will land in the same audit log as any auditable internal event, including
recording the authenticated user making the call.
Returns a json friendly message, i.e. {"message": "ok"}
---
operationId: auditlog_addevent
tags:
- TrueNTH
produces:
- application/json
parameters:
- in: body
name: body
schema:
id: message
required:
- message
properties:
message:
type: string
description: message text
responses:
200:
description: successful operation
schema:
id: response_ok
required:
- message
properties:
message:
type: string
description: Result, typically "ok"
401:
description: if missing valid OAuth token
"""
message = request.form.get('message')
if not message:
return jsonify(message="missing required 'message' in post")
auditable_event('remote message: {0}'.format(message), context='other',
user_id=current_user().id, subject_id=current_user().id)
return jsonify(message='ok')
@truenth_api.route('/portal-wrapper-html/', methods=('GET', 'OPTIONS'))
@crossdomain()
def portal_wrapper_html():
"""Returns portal wrapper for insertion at top of interventions
Get html for the portal site UI wrapper (top-level nav elements, timeout code, piwik analytics, etc)
CORS headers will only be included when the request includes well defined
Origin header.
To assist in logic decisions on client pages, the javascript variable
`truenth_authenticated` of type boolean included in the response will
accurately describe the user's athenticated status.
---
tags:
- TrueNTH
operationId: getPortalWrapperHTML
produces:
- text/html
parameters:
- name: login_url
in: query
description:
URL on intervention to direct login requests. Typically an entry
point on the intervention, to initiate OAuth dance with
TrueNTH. Inclusion of this parameter affects
the apperance of a "login" option in the portal menu, but only
displayed if the user has not logged in.
required: false
type: string
- name: disable_links
in: query
description:
If present, with any value, all links will be removed. Useful
during sessions where any navigation outside of the main well
is discouraged.
required: false
type: string
responses:
200:
description:
html for direct insertion near the top of the intervention's
page.
403:
description:
if a login_url is provided with an origin other than one
registered as a client app or intervention
"""
# Unlike all other oauth protected resources, we manually check
# if it's a valid oauth request as this resource is also available prior
# to logging in.
valid, req = oauth.verify_request(['email'])
if valid:
user = req.user
else:
user = current_user()
login_url = request.args.get('login_url')
if login_url and not user:
try:
validate_origin(login_url)
except Unauthorized:
current_app.logger.warning(
"invalid origin on login_url `%s` from referer `%s`",
login_url, request.headers.get('Referer'))
return make_response("login_url lacks a valid origin: {}".format(
login_url)), 403
else:
login_url = None
if user and user.image_url:
movember_profile = user.image_url
else:
movember_profile = ''.join((
'//',
current_app.config['SERVER_NAME'],
url_for('static', filename='img/movember_profile_thumb.png'),
))
def branded_logos():
"""return path to branded logos if called for in tuple (lg, small)"""
if 'brand' in request.args:
brand_name = request.args.get('brand')
return (
url_for('static',
filename="img/{}.png".format(brand_name),
_external=True),
url_for('static',
filename="img/{}_sm.png".format(brand_name),
_external=True)
)
def expires_in():
"""compute remaining seconds on session"""
expires = current_app.permanent_session_lifetime.total_seconds()
return expires
cookie_timeout = request.cookies.get('SS_TIMEOUT')
disable_links = True if 'disable_links' in request.args else False
html = render_template(
'portal_wrapper.html',
PORTAL=''.join(('//', current_app.config['SERVER_NAME'])),
user=user,
movember_profile=movember_profile,
login_url=login_url,
branded_logos=branded_logos(),
enable_links = not disable_links,
expires_in = cookie_timeout if cookie_timeout else expires_in()
)
return make_response(html)
@truenth_api.route('/portal-footer-html/', methods=('GET', 'OPTIONS'))
@crossdomain()
def portal_footer_html():
"""Returns portal footer for insertion at bottom of interventions
Get html for the portal site UI footer
CORS headers will only be included when the request includes well defined
Origin header.
---
tags:
- TrueNTH
operationId: getPortalFooterHTML
produces:
- text/html
responses:
200:
description:
html for direct insertion near the bottom of the intervention's
page.
"""
# Unlike all other oauth protected resources, we manually check
# if it's a valid oauth request as this resource is also available prior
# to logging in.
valid, req = oauth.verify_request(['email'])
if valid:
user = req.user
else:
user = current_user()
html = render_template(
'portal_footer.html',
PORTAL=''.join(('//', current_app.config['SERVER_NAME'])),
user=user
)
return make_response(html)
### Depricated rewrites follow
@truenth_api.route('/portal-wrapper-html/<username>',
methods=('GET', 'OPTIONS'))
@crossdomain()
def depricated_portal_wrapper_html(username):
current_app.logger.warning("use of depricated API %s from referer %s",
request.url, request.headers.get('Referer'))
return portal_wrapper_html()
@truenth_api.route('/protected-portal-wrapper-html', methods=('GET', 'OPTIONS'))
@crossdomain()
def protected_portal_wrapper_html():
current_app.logger.warning("use of depricated API %s from referer %s",
request.url, request.headers.get('Referer'))
return portal_wrapper_html()
| 31.482072 | 104 | 0.631359 | 923 | 7,902 | 5.279523 | 0.289274 | 0.021342 | 0.031398 | 0.022984 | 0.326083 | 0.296532 | 0.261646 | 0.230248 | 0.210548 | 0.172789 | 0 | 0.003337 | 0.279423 | 7,902 | 250 | 105 | 31.608 | 0.852476 | 0.401417 | 0 | 0.267241 | 0 | 0 | 0.151066 | 0.02595 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.068966 | 0 | 0.224138 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
501156de0b074597ba0db6bd8da7d002b02256f3 | 2,310 | py | Python | src/app/start.py | roch1990/aiohttp-blog | 32e7b76b5b293d4517631ea82dfa2b268a1662eb | [
"MIT"
] | null | null | null | src/app/start.py | roch1990/aiohttp-blog | 32e7b76b5b293d4517631ea82dfa2b268a1662eb | [
"MIT"
] | null | null | null | src/app/start.py | roch1990/aiohttp-blog | 32e7b76b5b293d4517631ea82dfa2b268a1662eb | [
"MIT"
] | null | null | null | import asyncio
import aiohttp_jinja2
import jinja2
from aiohttp import web
from aiopg.sa import create_engine
# from app.database.common import prepare_tables
from app.handlers.admin.categories import AdminCategories
from app.handlers.admin.create_entity import AdminCreateEntity
from app.handlers.user.about_me import UserAbout
from app.handlers.user.categories import UserCategories
from app.handlers.user.category import UserCategory
from app.handlers.user.dashboard import UserDashboard
from app.handlers.user.entity import UserEntity
from config import Config
async def database_client(app):
app['database'] = await create_engine(
user=Config.db_user,
database=Config.db_name,
host=Config.db_host,
port=Config.db_port,
password=Config.db_pass
)
yield
await app['database'].wait_close()
await asyncio.sleep(0.250)
async def make_app(project_root: str) -> web.Application:
app = web.Application()
aiohttp_jinja2.setup(
app=app,
loader=jinja2.FileSystemLoader('./templates'),
)
app.cleanup_ctx.append(database_client)
# For user
# Category handlers
app.router.add_route(path='/', handler=UserDashboard, name='dashboard', method='get')
app.router.add_route(path='/categories', handler=UserCategories, name='categories', method='get')
app.router.add_route(path='/category/{category_title}', handler=UserCategory, name='category', method='get')
# Entity handler
app.router.add_route(path='/entity/{entity_id}', handler=UserEntity, name='entity', method='get')
# About me handler
app.router.add_route(path='/about_me', handler=UserAbout, name='about_me', method='get')
# For admin
# Categories
app.router.add_route(path='/admin/categories', handler=AdminCategories, name='admin_categories', method='get')
# Entities
app.router.add_route(path='/admin/entity/create', handler=AdminCreateEntity, name='admin_create_entity', method='get')
app.router.add_route(path='/admin/entity/create', handler=AdminCreateEntity, name='admin_create_entity', method='post')
# Don't use this for production. Use nginx static (for example) instead.
app.router.add_static(
prefix=f'/static',
path=f'{project_root}/static',
)
return app
| 33.970588 | 123 | 0.72987 | 297 | 2,310 | 5.552189 | 0.282828 | 0.049121 | 0.065494 | 0.082474 | 0.208611 | 0.195876 | 0.146149 | 0.107944 | 0.107944 | 0.107944 | 0 | 0.004084 | 0.151948 | 2,310 | 67 | 124 | 34.477612 | 0.837672 | 0.089177 | 0 | 0 | 0 | 0 | 0.142311 | 0.022445 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.022727 | 0.295455 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5011625deab948f455b9294a59b2380033e1b545 | 675 | py | Python | textsummarizer/nlbasics_test.py | katiewimmer/textsummarizer | 61304f21b5b41fea44c25fdb5b880faa92675507 | [
"MIT"
] | null | null | null | textsummarizer/nlbasics_test.py | katiewimmer/textsummarizer | 61304f21b5b41fea44c25fdb5b880faa92675507 | [
"MIT"
] | null | null | null | textsummarizer/nlbasics_test.py | katiewimmer/textsummarizer | 61304f21b5b41fea44c25fdb5b880faa92675507 | [
"MIT"
] | 8 | 2020-10-09T03:36:01.000Z | 2020-11-10T15:19:07.000Z | import unittest
import nlbasics
from nltk.stem.lancaster import LancasterStemmer
from nltk.tokenize import word_tokenize
class Testing(unittest.TestCase):
def test_tokenizer(self):
text = "Mary had a little lamb"
words = nlbasics.nlbasics().do_tokenize(sample=text)
self.assertEqual(words, [['Mary', 'had', 'a', 'little', 'lamb']])
def test_stemming(self):
text = "I jumped into the cars quickly"
st = LancasterStemmer()
stemmedWords = [st.stem(word) for word in word_tokenize(text)]
self.assertEqual(stemmedWords, ['i', 'jump', 'into', 'the', 'car', 'quick'])
if __name__ == '__main__':
unittest.main() | 33.75 | 84 | 0.665185 | 82 | 675 | 5.317073 | 0.536585 | 0.036697 | 0.036697 | 0.06422 | 0.082569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 675 | 20 | 85 | 33.75 | 0.807407 | 0 | 0 | 0 | 0 | 0 | 0.14497 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50159c779c7700511661222c6e2cc9190607bb69 | 9,736 | py | Python | demo.py | foamliu/SHALE | 022625cb21bf6aa91e75b077ff1ab378d577fc50 | [
"MIT"
] | 1 | 2019-09-30T00:59:27.000Z | 2019-09-30T00:59:27.000Z | demo.py | foamliu/SHALE | 022625cb21bf6aa91e75b077ff1ab378d577fc50 | [
"MIT"
] | 1 | 2019-09-30T09:23:51.000Z | 2019-09-30T09:23:51.000Z | demo.py | foamliu/SHALE | 022625cb21bf6aa91e75b077ff1ab378d577fc50 | [
"MIT"
] | 1 | 2021-03-18T06:58:25.000Z | 2021-03-18T06:58:25.000Z | #!/usr/bin/env python
from random import random
from sympy import *
class Supply:
def __init__(self, file):
self.pair = {}
self.satisfy_demand = {}
with open(file, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
i, s = line.split('\t')
self.pair[i] = int(s)
def get_supply(self, i):
return self.pair[i]
def get_satisfy_demand(self, i):
return self.satisfy_demand[i]
def get_all_i(self):
return self.pair.keys()
class Demand:
def __init__(self, file, supply):
self.demand = {}
self.penalty = {}
self.target_supply = {}
with open(file, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
j, d, p, ii = line.split('\t')
self.demand[j] = int(d)
self.penalty[j] = float(p)
self.target_supply[j] = ii.split(',')
self._set_supply_satisfy_demand(supply)
def _set_supply_satisfy_demand(self, supply):
for (j, ii) in self.target_supply.items():
for i in ii:
if i not in supply.satisfy_demand:
supply.satisfy_demand[i] = []
supply.satisfy_demand[i].append(j)
def get_demand(self, j):
return self.demand[j]
def get_penalty(self, j):
return self.penalty[j]
def get_target_supply(self, j):
return self.target_supply[j]
def get_all_j(self):
return self.demand.keys()
def get_v(self, j):
return 1.0
class Shale:
def __init__(self, supply, demand):
self.supply = supply
self.demand = demand
def initialize(self):
self.alpha_j = {}
self.beta_i = {}
self.theta_ij = {}
self.sigma_j = {}
for j in self.demand.get_all_j():
self.alpha_j[j] = 0.0
sum = 0.0
for i in self.demand.get_target_supply(j):
sum += self.supply.get_supply(i)
self.theta_ij[j] = self.demand.get_demand(j) / sum
def stage_one(self, iters):
while iters > 0:
for i in self.supply.get_all_i():
self.update_beta(i)
for j in self.demand.get_all_j():
self.update_alpha(j)
iters -= 1
print('stage one alpha --->', sorted(self.alpha_j.items(), key=lambda d: d[0]))
print('stage one beta --->', sorted(self.beta_i.items(), key=lambda d: d[0]))
def stage_two(self):
self.s_i = {}
for i in self.supply.get_all_i():
self.s_i[i] = self.supply.get_supply(i)
self.update_beta(i)
print('stage two beta --->', sorted(self.beta_i.items(), key=lambda d: d[0]))
sigma = {}
for j in self.demand.get_all_j():
self.find_sigma(j)
for i in self.demand.get_target_supply(j):
g = max(0.0, self.theta_ij[j] * (1.0 + (self.sigma_j[j] - self.beta_i[i]) / self.demand.get_v(j)))
self.s_i[i] -= min(self.s_i[i], self.supply.get_supply(i) * g)
def output(self):
print('output alpha_j --->', sorted(self.alpha_j.items(), key=lambda d: d[0]))
print('output sigma_j --->', sorted(self.sigma_j.items(), key=lambda d: d[0]))
def update_beta(self, i):
beta = Symbol('beta')
flag = True
for j in self.supply.get_satisfy_demand(i):
f = self.theta_ij[j] * (1.0 + (self.alpha_j[j] - beta) / self.demand.get_v(j))
if flag:
sum = Piecewise((f, f >= 0), (0, f < 0))
flag = False
else:
sum += Piecewise((f, f >= 0), (0, f < 0))
result = solve(sum - 1, beta)
if len(result) == 0 or result[0] < 0.0:
self.beta_i[i] = 0.0
else:
self.beta_i[i] = result[0]
def update_alpha(self, j):
alpha = Symbol('alpha')
flag = True
for i in self.demand.get_target_supply(j):
s = self.supply.get_supply(i)
f = self.theta_ij[j] * (1.0 + (alpha - self.beta_i[i]) / self.demand.get_v(j))
if flag:
sum = s * Piecewise((f, f >= 0), (0, f < 0))
flag = False
else:
sum += s * Piecewise((f, f >= 0), (0, f < 0))
result = solve(sum - self.demand.get_demand(j), alpha)
if len(result) == 0 or result[0] > self.demand.get_penalty(j):
self.alpha_j[j] = self.demand.get_penalty(j)
# if len(result) == 0 or result[0] < 0.0:
# self.alpha_j[j] = 0.0
else:
self.alpha_j[j] = result[0]
def find_sigma(self, j):
sigma = Symbol('sigma')
flag = True
for i in self.demand.get_target_supply(j):
s = self.supply.get_supply(i)
f = self.theta_ij[j] * (1.0 + (sigma - self.beta_i[i]) / self.demand.get_v(j))
f_max = s * Piecewise((f, f >= 0), (0, f < 0))
f_min = Piecewise((self.s_i[i], self.s_i[i] <= f_max), (f_max, self.s_i[i] > f_max))
if flag:
sum = f_min
flag = False
else:
sum += f_min
result = solve(sum - self.demand.get_demand(j), sigma)
if len(result) == 0:
self.sigma_j[j] = float('inf')
else:
self.sigma_j[j] = result[0]
class Online:
def __init__(self, supply, demand, alpha_j, sigma_j):
self.supply = supply
self.demand = demand
self.alpha_j = alpha_j
self.sigma_j = sigma_j
self.theta_ij = {}
self.beta_i = {}
self.allocation_j = {}
self.remaind_i = {}
for i in self.supply.get_all_i():
self.remaind_i[i] = supply.get_supply(i)
for j in self.demand.get_all_j():
sum = 0.0
for i in self.demand.get_target_supply(j):
sum += self.supply.get_supply(i)
self.theta_ij[j] = self.demand.get_demand(j) / sum
self.allocation_j[j] = 0
def allocation(self, i):
s = 1.0
x_ij = {}
if i not in self.beta_i:
self.update_beta(i)
for j in self.supply.get_satisfy_demand(i):
g = max(0.0, self.theta_ij[j] * (1.0 + (self.sigma_j[j] - self.beta_i[i]) / self.demand.get_v(j)))
x_ij[j] = min(s, g)
s -= x_ij[j]
sum = 0.0
for (j, p) in x_ij.items():
sum += p
if sum < 1.0:
print('there is %f chance that no conract is selected' % (1.0 - sum))
r = random()
sum = 0.0
for (j, p) in x_ij.items():
sum += p
if r < sum:
self.allocation_j[j] += 1
self.remaind_i[i] -= 1
break
def update_beta(self, i):
beta = Symbol('beta')
flag = True
for j in self.supply.get_satisfy_demand(i):
f = self.theta_ij[j] * (1.0 + (self.alpha_j[j] - beta) / self.demand.get_v(j))
if flag:
sum = Piecewise((f, f >= 0), (0, f < 0))
flag = False
else:
sum += Piecewise((f, f >= 0), (0, f < 0))
result = solve(sum - 1, beta)
if len(result) == 0 or result[0] < 0.0:
self.beta_i[i] = 0.0
else:
self.beta_i[i] = result[0]
class Debug:
def __init__(self, shale, online):
self.shale = shale
self.online = online
def print_supply(self):
ii = self.shale.supply.get_all_i()
# ii.sort()
sorted(ii)
print("\nsupply:")
print("supply_node\tinventory\tsatisfy_demand")
for i in ii:
print('%s\t\t%d\t\t%s' % (
i, self.shale.supply.get_supply(i), ','.join(self.shale.supply.get_satisfy_demand(i))))
def print_demand(self):
jj = self.shale.demand.get_all_j()
# jj.sort()
sorted(jj)
print("\ndemand:")
print("demand_node\tdemand\tpenalty\ttarget_supply")
for j in jj:
print('%s\t\t%d\t%f\t%s' % (j, self.shale.demand.get_demand(j), self.shale.demand.get_penalty(j),
','.join(self.shale.demand.get_target_supply(j))))
def print_online_allocation(self):
jj = self.online.demand.get_all_j()
# jj.sort()
sorted(jj)
print("\nallocation:")
print("demand_node\tdemand\t\tallocation")
for j in jj:
print('%s\t\t%d\t\t%d' % (j, self.online.demand.get_demand(j), self.online.allocation_j[j]))
def print_online_remaind(self):
ii = self.online.supply.get_all_i()
# ii.sort()
sorted(ii)
print("\nremaind:")
print("supply_node\tinventory\tremaind")
for i in ii:
print('%s\t\t%d\t\t%s' % (i, self.online.supply.get_supply(i), self.online.remaind_i[i]))
def main():
supply = Supply('./supply.txt')
demand = Demand('./demand.txt', supply)
shale = Shale(supply, demand)
shale.initialize()
shale.stage_one(5)
shale.stage_two()
shale.output()
online = Online(supply, demand, shale.alpha_j, shale.sigma_j)
for i in supply.get_all_i():
inventory = supply.get_supply(i)
while inventory > 0:
online.allocation(i)
inventory -= 1
debug = Debug(shale, online)
debug.print_supply()
debug.print_demand()
debug.print_online_allocation()
debug.print_online_remaind()
if __name__ == '__main__':
main()
| 32.561873 | 114 | 0.516537 | 1,389 | 9,736 | 3.453564 | 0.085673 | 0.056285 | 0.056911 | 0.033354 | 0.521576 | 0.456327 | 0.433813 | 0.424849 | 0.409006 | 0.313738 | 0 | 0.014781 | 0.339873 | 9,736 | 298 | 115 | 32.671141 | 0.731601 | 0.012839 | 0 | 0.399194 | 0 | 0 | 0.046855 | 0.015098 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112903 | false | 0 | 0.008065 | 0.032258 | 0.173387 | 0.104839 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5016bfccf943dbf65cee2bcc3f079fb37280db2a | 12,877 | py | Python | resqpy/olio/write_hdf5.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | 35 | 2021-05-24T17:43:55.000Z | 2022-03-15T15:47:59.000Z | resqpy/olio/write_hdf5.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | 355 | 2021-05-24T16:28:16.000Z | 2022-03-14T14:43:40.000Z | resqpy/olio/write_hdf5.py | poc11/resqpy | 5dfbfb924f8ee9b2712fb8e38bff96ee8ee9d8e2 | [
"MIT"
] | 12 | 2021-06-02T09:50:46.000Z | 2022-03-02T20:29:56.000Z | """write_hdf5.py: Class to write a resqml hdf5 file and functions for copying hdf5 data."""
version = '26th August 2021'
# Nexus is a registered trademark of the Halliburton Company
# write_hdf5.py
# approach is to register the datasets (arrays) to be written; then write everything in a separate, single function call
import logging
log = logging.getLogger(__name__)
import os
import h5py
import numpy as np
import resqpy.olio.uuid as bu
resqml_path_head = '/RESQML/' # note: latest fesapi code uses RESQML20
write_bool_as_int8 = True # Nexus read fails if bool used as hdf5 element dtype; also better for NullValue handling
write_int_as_int32 = True # only applies if registered dtype is None
class H5Register():
"""Class for registering arrays and then writing to an hdf5 file."""
def __init__(self, model):
"""Create a new, empty register of arrays to be written to an hdf5 file."""
self.dataset_dict = {} # dictionary mapping from (object_uuid, group_tail) to (numpy_array, dtype)
self.hdf5_path_dict = {} # dictionary optionally mapping from (object_uuid, group_tail) to hdf5 internal path
self.model = model
def register_dataset(self, object_uuid, group_tail, a, dtype = None, hdf5_path = None, copy = False):
"""Register an array to be included as a dataset in the hdf5 file.
arguments:
object_uuid (uuid.UUID): the uuid of the object (part) that this array is for
group_tail (string): the remainder of the hdf5 internal path (following RESQML and
uuid elements)
a (numpy array): the dataset (array) to be registered for writing
dtype (type or string): the required type of the individual elements within the dataset
hdf5_path (string, optional): if present, a full hdf5 internal path to use instead of
the default generated from the uuid
copy (boolean, default False): if True, a copy of the array will be made at the time of
registering, otherwise changes made to the array before the write() method is called
are likely to be in the data that is written
returns:
None
notes:
several arrays might belong to the same object;
if a dtype is given and necessitates a conversion of the array data, the behaviour will
be as if the copy argument is True regardless of its setting
"""
# print('registering dataset with uuid ' + str(object_uuid) + ' and group tail ' + group_tail)
assert (len(group_tail) > 0)
assert a is not None
assert isinstance(a, np.ndarray)
if dtype is not None:
a = a.astype(dtype, copy = copy)
elif copy:
a = a.copy()
if group_tail[0] == '/':
group_tail = group_tail[1:]
if group_tail[-1] == '/':
group_tail = group_tail[:-1]
if (object_uuid, group_tail) in self.dataset_dict.keys():
pass # todo: warn of re-registration?
self.dataset_dict[(object_uuid, group_tail)] = (a, dtype)
if hdf5_path:
self.hdf5_path_dict[(object_uuid, group_tail)] = hdf5_path
def write_fp(self, fp):
"""Write or append to an hdf5 file, writing the pre-registered datasets (arrays).
arguments:
fp: an already open h5py._hl.files.File object
returns:
None
note:
the file handle fp must have been opened with mode 'w' or 'a'
"""
# note: in resqml, an established hdf5 file has a uuid and should therefore be immutable
# this function allows appending to any hdf5 file; calling code should set a new uuid when needed
assert (fp is not None)
for (object_uuid, group_tail) in self.dataset_dict.keys():
if (object_uuid, group_tail) in self.hdf5_path_dict.keys():
hdf5_path = self.hdf5_path_dict[(object_uuid, group_tail)]
else:
hdf5_path = resqml_path_head + str(object_uuid) + '/' + group_tail
(a, dtype) = self.dataset_dict[(object_uuid, group_tail)]
if dtype is None:
dtype = a.dtype
if write_int_as_int32 and str(dtype) == 'int64':
dtype = 'int32'
if write_bool_as_int8 and str(dtype).lower().startswith('bool'):
dtype = 'int8'
log.debug('Writing hdf5 dataset ' + hdf5_path + ' of size ' + str(a.size) + ' type ' + str(dtype))
fp.create_dataset(hdf5_path, data = a, dtype = dtype)
def write(self, file = None, mode = 'w', release_after = True):
"""Create or append to an hdf5 file, writing the pre-registered datasets (arrays).
arguments:
file: either a string being the file path, or an already open h5py._hl.files.File object;
if None (recommended), the file is opened through the model object's hdf5 management
functions
mode (string, default 'w'): the mode to open the file in; only relevant if file is a path;
must be 'w' or 'a' for (over)write or append
returns:
None
"""
# note: in resqml, an established hdf5 file has a uuid and should therefore be immutable
# this function allows appending to any hdf5 file;
# strictly, calling code should set a new uuid when needed, in practice not essential
if len(self.dataset_dict) == 0:
return
if file is None:
file = self.model.h5_access(mode = mode)
elif isinstance(file, str):
log.debug(f'writing to hdf5 file: {file}')
file = self.model.h5_access(mode = mode, file_path = file)
if mode == 'a' and isinstance(file, str) and not os.path.exists(file):
mode = 'w'
assert isinstance(file, h5py._hl.files.File)
self.write_fp(file)
if release_after:
self.model.h5_release()
def copy_h5(file_in, file_out, uuid_inclusion_list = None, uuid_exclusion_list = None, mode = 'w'):
"""Create a copy of an hdf5, optionally including or excluding arrays with specified uuids.
arguments:
file_in (string): path of existing hdf5 file to be duplicated
file_out (string): path of output hdf5 file to be created or appended to (see mode)
uuid_inclusion_list (list of uuid.UUID, optional): if present, the uuids to be included
in the output file
uuid_exclusion_list (list of uuid.UUID, optional): if present, the uuids to be excluded
from the output file
mode (string, default 'w'): mode to open output file with; must be 'w' or 'a' for
(over)write or append respectively
returns:
number of hdf5 groups (uuid's) copied
notes:
at most one of uuid_inclusion_list and uuid_exclusion_list should be passed;
if neither are passed, all the datasets (arrays) in the input file are copied to the
output file
"""
# note: if both inclusion and exclusion lists are present, exclusion list is ignored
assert file_out != file_in, 'identical input and output files specified for hdf5 copy'
assert uuid_inclusion_list is None or uuid_exclusion_list is None, \
'inclusion and exclusion lists both specified for hdf5 copy; at most one allowed'
checking_uuid = uuid_inclusion_list is not None or uuid_exclusion_list is not None
assert mode in ['w', 'a']
copy_count = 0
with h5py.File(file_out, mode) as fp_out:
assert fp_out is not None, 'failed to open output hdf5 file: ' + file_out
with h5py.File(file_in, 'r') as fp_in:
assert fp_in is not None, 'failed to open input hdf5 file: ' + file_in
main_group_in = fp_in['RESQML']
assert main_group_in is not None, 'failed to find RESQML group in hdf5 file: ' + file_in
if mode == 'w':
main_group_out = fp_out.create_group('RESQML')
elif mode == 'a':
try:
main_group_out = fp_out['RESQML']
except Exception:
main_group_out = fp_out.create_group('RESQML')
else:
main_group_out = fp_out['RESQML']
for group in main_group_in:
if checking_uuid:
uuid = bu.uuid_from_string(group)
if uuid_inclusion_list is not None:
if uuid not in uuid_inclusion_list:
if uuid is None:
log.warning('RESQML group name in hdf5 file does not start with a uuid, skipping: ' +
str(group))
continue
else: # uuid_exclusion_list is not None
if uuid in uuid_exclusion_list:
continue
if uuid is None: # will still be copied
log.warning('RESQML group name in hdf5 file does not start with a uuid: ' + str(group))
if group in main_group_out:
log.warning('not copying hdf5 data due to pre-existence for: ' + str(group))
continue
log.debug('copying hdf5 data for uuid: ' + group)
main_group_in.copy(group,
main_group_out,
expand_soft = True,
expand_external = True,
expand_refs = True)
copy_count += 1
return copy_count
def copy_h5_path_list(file_in, file_out, hdf5_path_list, mode = 'w'):
"""Create a copy of some hdf5 datasets (or groups), identified as a list of hdf5 internal paths.
arguments:
file_in (string): path of existing hdf5 file to be copied from
file_out (string): path of output hdf5 file to be created or appended to (see mode)
hdf5_path_list (list of string): the hdf5 internal paths of the datasets (or groups) to be copied
mode (string, default 'w'): mode to open output file with; must be 'w' or 'a' for
(over)write or append respectively
returns:
number of hdf5 datasets (or groups) copied
"""
# note: if both inclusion and exclusion lists are present, exclusion list is ignored
assert file_out != file_in, 'identical input and output files specified for hdf5 copy'
assert hdf5_path_list is not None
assert mode in ['w', 'a']
copy_count = 0
with h5py.File(file_out, mode) as fp_out:
assert fp_out is not None, f'failed to open output hdf5 file: {file_out}'
with h5py.File(file_in, 'r') as fp_in:
assert fp_in is not None, f'failed to open input hdf5 file: {file_in}'
for path in hdf5_path_list:
if path in fp_out:
log.warning(f'not copying hdf5 data due to pre-existence for: {path}')
continue
assert path in fp_in, f'internal path {path} not found in hdf5 file {file_in}'
log.debug(f'copying hdf5 data for: {path}')
build = ''
group_list = list(path.split(sep = '/'))
assert len(group_list) > 1, f'no hdf5 group(s) in internal path {path}'
for w in group_list[:-1]:
if w:
build += '/' + w
if build not in fp_out:
fp_out.create_group(build)
build += '/' + group_list[-1]
fp_out.create_dataset(build, data = fp_in[path])
# fp_in.copy(path, fp_out[path], expand_soft = True, expand_external = True, expand_refs = True)
copy_count += 1
return copy_count
def change_uuid(file, old_uuid, new_uuid):
"""Changes hdf5 internal path (group name) for part, switching from old to new uuid.
notes:
this is low level functionality not usually called directly;
the function assumes that hdf5 internal path names conform to the format that resqpy uses
when writing data, namely /RESQML/uuid/tail...
"""
assert file, 'hdf5 file name missing'
assert old_uuid is not None and new_uuid is not None, 'missing uuid'
def change_uuid_fp(fp, old_uuid, new_uuid):
main_group = fp[resqml_path_head.strip('/')]
old_group = main_group[str(old_uuid)]
main_group[str(new_uuid)] = old_group
del main_group[str(old_uuid)]
if isinstance(file, h5py._hl.files.File):
change_uuid_fp(file, old_uuid, new_uuid)
else:
assert isinstance(file, str)
with h5py.File(file, 'r+') as fp:
change_uuid_fp(fp, old_uuid, new_uuid)
| 45.989286 | 123 | 0.608682 | 1,795 | 12,877 | 4.226741 | 0.162674 | 0.025306 | 0.017794 | 0.027547 | 0.405299 | 0.388823 | 0.338078 | 0.30895 | 0.275471 | 0.244893 | 0 | 0.012847 | 0.316922 | 12,877 | 279 | 124 | 46.154122 | 0.849704 | 0.389221 | 0 | 0.186667 | 0 | 0 | 0.128884 | 0 | 0 | 0 | 0 | 0.003584 | 0.14 | 1 | 0.053333 | false | 0.006667 | 0.033333 | 0 | 0.113333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5017f784c033f58b62d89b89d1944c814b6d00d6 | 471 | py | Python | PyUdemy/Day8/AreaCalc.py | JoseArtur/phyton-exercices | f3da4447044e445222233960f991fb2e36311131 | [
"MIT"
] | null | null | null | PyUdemy/Day8/AreaCalc.py | JoseArtur/phyton-exercices | f3da4447044e445222233960f991fb2e36311131 | [
"MIT"
] | null | null | null | PyUdemy/Day8/AreaCalc.py | JoseArtur/phyton-exercices | f3da4447044e445222233960f991fb2e36311131 | [
"MIT"
] | null | null | null | #Write your code below this line 👇
def paint_calc(height,width,cover):
import math
calc=math.ceil((height*width)/cover)
print(f"You'll need {calc} cans of paint")
#Write your code above this line 👆
# Define a function called paint_calc() so that the code below works.
# 🚨 Don't change the code below 👇
test_h = int(input("Height of wall: "))
test_w = int(input("Width of wall: "))
coverage = 5
paint_calc(height=test_h, width=test_w, cover=coverage)
| 24.789474 | 72 | 0.70276 | 83 | 471 | 3.951807 | 0.542169 | 0.082317 | 0.079268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002584 | 0.178344 | 471 | 18 | 73 | 26.166667 | 0.834625 | 0.358811 | 0 | 0 | 0 | 0 | 0.211409 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50194e6e6d2f4b4b991d59f7de42e7bce81cbf64 | 5,426 | py | Python | scs/x86f/features/SemanticFeaturesRecorderAC.py | kestreltechnology/kt-semantic-code-search | 3459e68b013bc0fd912072b052a1db6ef784f219 | [
"MIT"
] | 4 | 2019-09-30T10:06:34.000Z | 2021-01-04T06:38:45.000Z | scs/x86f/features/SemanticFeaturesRecorderAC.py | kestreltechnology/kt-semantic-code-search | 3459e68b013bc0fd912072b052a1db6ef784f219 | [
"MIT"
] | null | null | null | scs/x86f/features/SemanticFeaturesRecorderAC.py | kestreltechnology/kt-semantic-code-search | 3459e68b013bc0fd912072b052a1db6ef784f219 | [
"MIT"
] | 1 | 2021-09-16T02:25:30.000Z | 2021-09-16T02:25:30.000Z | # ------------------------------------------------------------------------------
# Python API to access CodeHawk Binary Analyzer analysis results
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2019 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import re
structureweights = {
'md5': 1,
'loopdepth': 4,
'loops': 3,
'blocks': 2,
'instrs': 1
}
hexg6 = re.compile('gv_0x[0-9a-f]{6}')
hexg7 = re.compile('gv_0x[0-9a-f]{7}')
hexg8 = re.compile('gv_0x[0-9a-f]{8}')
appa6 = re.compile('App:0x[0-9a-f]{6}')
appa7 = re.compile('App:0x[0-9a-f]{7}')
appa8 = re.compile('App:0x[0-9a-f]{8}')
addr6 = re.compile('0x[0-9a-f]{6}')
addr7 = re.compile('0x[0-9a-f]{7}')
addr8 = re.compile('0x[0-9a-f]{8}')
class SemanticFeaturesRecorderAC(object):
def __init__(self):
self.results = {}
self.featuresets = []
self.substitution = {}
self.substitution['_in'] = ''
def reset(self):
self.results = {}
self.substitution = {}
self.substitution['_in'] = ''
def substitute(self,term):
for t in sorted(self.substitution,reverse=True): # ensure App: is encountered before 0x
term = term.replace(t,self.substitution[t])
return term
def normalize_term(self,p):
if 'App:' in p:
apps = re.findall(appa8,p) + re.findall(appa7,p) + re.findall(appa6,p)
for a in apps:
p = p.replace(a,'App:F')
if 'gv_' in p:
gvars = re.findall(hexg8,p) + re.findall(hexg7,p) + re.findall(hexg6,p)
for g in gvars:
p = p.replace(g,'G')
if '@' in p:
addrs = re.findall(addr8,p) + re.findall(addr7,p) + re.findall(addr6,p)
for a in addrs:
p = p.replace('@' + a,'')
p = self.substitute(p)
return p
def add_term(self,featureset,term,n=1):
self.results.setdefault(featureset,{})
self.results[featureset].setdefault(term,0)
self.results[featureset][term] += n
def record(self,fnfeaturesets):
for fs in sorted(fnfeaturesets): # appcalls and dllcalls before predicates
features = fnfeaturesets[fs]
if fs == 'dllcalls':
self.record_dllcalls(features)
elif fs == 'appcalls':
self.record_appcalls(features)
elif fs in ['predicates', 'returnexprs', 'structuredrhs', 'structuredlhs',
'unresolvedcalls' ]:
self.record_expressions(fs,features)
elif fs == 'structure':
self.record_structure(features)
elif fs == 'unresolvedcalls':
self.record_unresolved_calls(features)
else:
for term in fnfeaturesets[fs]:
self.add_term(fs,term,fnfeaturesets[fs][term])
def record_dllcalls(self,features):
for term in features:
if term.endswith('A') or term.endswith('W'):
stemmedterm = term[:-1]
termfn = term.split(':')[1]
stemmedtermfn = stemmedterm.split(':')[1]
self.substitution[termfn] = stemmedtermfn
else:
stemmedterm = term
self.add_term('dllcalls',stemmedterm,features[term])
def record_appcalls(self,features): pass
'''
for term in features:
self.add_term('appcalls','App:F')
self.substitution[term] = 'App:F'
self.substitution['App:' + term] = 'App:F'
'''
def record_expressions(self,fs,features):
for term in features:
if '@val_' in term: continue
if '<accelerate' in term: continue
if 'App:' in term or 'gv_' in term or '@' in term:
cterm = self.normalize_term(term)
elif addr8.match(term) or addr6.match(term):
cterm = 'Addr'
else:
cterm = self.substitute(term)
self.add_term(fs,cterm,features[term])
def record_structure(self,features):
weights = structureweights
for fs in features:
self.add_term(fs,str(features[fs]),weights[fs])
| 37.42069 | 96 | 0.57206 | 660 | 5,426 | 4.657576 | 0.310606 | 0.02635 | 0.014639 | 0.017567 | 0.107027 | 0.090436 | 0.034157 | 0 | 0 | 0 | 0 | 0.019206 | 0.270734 | 5,426 | 144 | 97 | 37.680556 | 0.757645 | 0.274051 | 0 | 0.11828 | 0 | 0 | 0.088686 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107527 | false | 0.010753 | 0.010753 | 0 | 0.150538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50196f88692b35920975b4050ad747b2abd0036f | 24,265 | py | Python | Bio/Align/stockholm.py | BatoolMM/biopython | 92bbd7840c3b3f2a742b6186ce5bd0d901d4e43e | [
"BSD-3-Clause"
] | null | null | null | Bio/Align/stockholm.py | BatoolMM/biopython | 92bbd7840c3b3f2a742b6186ce5bd0d901d4e43e | [
"BSD-3-Clause"
] | null | null | null | Bio/Align/stockholm.py | BatoolMM/biopython | 92bbd7840c3b3f2a742b6186ce5bd0d901d4e43e | [
"BSD-3-Clause"
] | 1 | 2022-03-03T21:45:02.000Z | 2022-03-03T21:45:02.000Z | # Copyright 2006-2016 by Peter Cock. All rights reserved.
# Copyright 2021 by Michiel de Hoon. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.Align support for alignment files in the Stockholm file format.
You are expected to use this module via the Bio.Align functions.
For example, consider this alignment from PFAM for the HAT helix motif::
# STOCKHOLM 1.0
#=GF ID HAT
#=GF AC PF02184.18
#=GF DE HAT (Half-A-TPR) repeat
#=GF AU SMART;
#=GF SE Alignment kindly provided by SMART
#=GF GA 21.00 21.00;
#=GF TC 21.00 21.00;
#=GF NC 20.90 20.90;
#=GF BM hmmbuild HMM.ann SEED.ann
#=GF SM hmmsearch -Z 57096847 -E 1000 --cpu 4 HMM pfamseq
#=GF TP Repeat
#=GF CL CL0020
#=GF RN [1]
#=GF RM 9478129
#=GF RT The HAT helix, a repetitive motif implicated in RNA processing.
#=GF RA Preker PJ, Keller W;
#=GF RL Trends Biochem Sci 1998;23:15-16.
#=GF DR INTERPRO; IPR003107;
#=GF DR SMART; HAT;
#=GF DR SO; 0001068; polypeptide_repeat;
#=GF CC The HAT (Half A TPR) repeat is found in several RNA processing
#=GF CC proteins [1].
#=GF SQ 3
#=GS CRN_DROME/191-222 AC P17886.2
#=GS CLF1_SCHPO/185-216 AC P87312.1
#=GS CLF1_SCHPO/185-216 DR PDB; 3JB9 R; 185-216;
#=GS O16376_CAEEL/201-233 AC O16376.2
CRN_DROME/191-222 KEIDRAREIYERFVYVH.PDVKNWIKFARFEES
CLF1_SCHPO/185-216 HENERARGIYERFVVVH.PEVTNWLRWARFEEE
#=GR CLF1_SCHPO/185-216 SS --HHHHHHHHHHHHHHS.--HHHHHHHHHHHHH
O16376_CAEEL/201-233 KEIDRARSVYQRFLHVHGINVQNWIKYAKFEER
#=GC SS_cons --HHHHHHHHHHHHHHS.--HHHHHHHHHHHHH
#=GC seq_cons KEIDRARuIYERFVaVH.P-VpNWIKaARFEEc
//
Parsing this file using Bio.Align stores the alignment, its annotations, as
well as the sequences and their annotations::
>>> from Bio.Align import stockholm
>>> alignments = stockholm.AlignmentIterator("Stockholm/example.sth")
>>> alignment = next(alignments)
>>> alignment.shape
(3, 33)
>>> alignment[0]
'KEIDRAREIYERFVYVH-PDVKNWIKFARFEES'
Alignment meta-data are stored in alignment.annotations::
>>> alignment.annotations["accession"]
'PF02184.18'
>>> alignment.annotations["references"][0]["title"]
'The HAT helix, a repetitive motif implicated in RNA processing.'
Annotations of alignment columns are stored in alignment.column_annotations::
>>> alignment.column_annotations["consensus secondary structure"]
'--HHHHHHHHHHHHHHS.--HHHHHHHHHHHHH'
Sequences and their annotations are stored in alignment.sequences::
>>> alignment.sequences[0].id
'CRN_DROME/191-222'
>>> alignment.sequences[0].seq
Seq('KEIDRAREIYERFVYVHPDVKNWIKFARFEES')
>>> alignment.sequences[1].letter_annotations["secondary structure"]
'--HHHHHHHHHHHHHHS--HHHHHHHHHHHHH'
Slicing specific columns of an alignment will slice any per-column-annotations:
>>> alignment.column_annotations["consensus secondary structure"]
'--HHHHHHHHHHHHHHS.--HHHHHHHHHHHHH'
>>> part_alignment = alignment[:,10:20]
>>> part_alignment.column_annotations["consensus secondary structure"]
'HHHHHHS.--'
"""
import textwrap
from collections import defaultdict
from Bio.Align import Alignment
from Bio.Align import interfaces
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
class AlignmentIterator(interfaces.AlignmentIterator):
"""Alignment iterator for alignment files in the Stockholm format.
The file may contain multiple concatenated alignments, which are loaded
and returned incrementally.
Alignment meta-data (lines starting with #=GF) are stored in the dictionary
alignment.annotations. Column annotations (lines starting with #=GC) are
stored in the dictionary alignment.column_annotations. Sequence names are
stored in record.id. Sequence record meta-data (lines starting with #=GS)
are stored in the dictionary record.annotations. Sequence letter
annotations (lines starting with #=GR) are stored in the dictionary
record.letter_annotations.
Wrap-around alignments are not supported - each sequence must be on
a single line.
For more information on the file format, please see:
http://sonnhammer.sbc.su.se/Stockholm.html
https://en.wikipedia.org/wiki/Stockholm_format
"""
gf_mapping = {
"ID": "identifier",
"AC": "accession",
"DE": "definition",
"AU": "author",
"SE": "source of seed",
"SS": "source of structure",
"GA": "gathering method",
"TC": "trusted cutoff",
"NC": "noise cutoff",
"BM": "build method",
"SM": "search method",
"TP": "type",
"PI": "previous identifier",
"CC": "comment",
"CL": "clan",
"WK": "wikipedia",
"CB": "calibration method",
"**": "**", # Found in Rfam
}
gr_mapping = {
"SS": "secondary structure",
"PP": "posterior probability",
"CSA": "Catalytic Site Atlas", # used in CATH
# These features are included in the Stockholm file format
# documentation, but currently not used in the PFAM, RFAM, and CATH
# databases:
"SA": "surface accessibility",
"TM": "transmembrane",
"LI": "ligand binding",
"AS": "active site",
"pAS": "active site - Pfam predicted",
"sAS": "active site - from SwissProt",
"IN": "intron",
}
gc_mapping = {
"RF": "reference coordinate annotation",
"seq_cons": "consensus sequence",
"scorecons": "consensus score", # used in CATH
"scorecons_70": "consensus score 70", # used in CATH
"scorecons_80": "consensus score 80", # used in CATH
"scorecons_90": "consensus score 90", # used in CATH
# This feature is included in the Stockholm file format
# documentation, but currently not used in the PFAM, RFAM,
# and CATH databases:
"MM": "model mask",
}
# Add *_cons from GR mapping:
for key, value in gr_mapping.items():
gc_mapping[key + "_cons"] = "consensus " + value
# These GC keywords are used in Rfam:
for keyword in (
"RNA_elements",
"RNA_structural_element",
"RNA_structural_elements",
"RNA_ligand_AdoCbl",
"RNA_ligand_AqCbl",
"RNA_ligand_FMN",
"RNA_ligand_Guanidinium",
"RNA_ligand_SAM",
"RNA_ligand_THF_1",
"RNA_ligand_THF_2",
"RNA_ligand_TPP",
"RNA_ligand_preQ1",
"RNA_motif_k_turn",
"Repeat_unit",
"2L3J_B_SS",
"CORE",
"PK",
"PK_SS",
"cons",
):
gc_mapping[keyword] = keyword.replace("_", " ")
gs_mapping = {
"AC": "accession",
# "DE": description, # handled separately
# "DR": "database_references", # handled separately
"OS": "organism",
# These two features are included in the Stockholm file
# format documentation, but currently not used in the PFAM,
# RFAM, and CATH databases:
"OC": "organism classification",
"LO": "look",
}
def __init__(self, source):
"""Create an AlignmentIterator object.
Arguments:
- source - input data or file name
"""
super().__init__(source, mode="t", fmt="Stockholm")
@staticmethod
def _store_per_file_annotations(alignment, gf, rows):
for key, value in gf.items():
if key == "WK":
lines = iter(value)
references = []
for line in lines:
reference = ""
while line.endswith("/"):
reference += line[:-1]
line = next(lines)
reference += line
references.append(reference)
value = references
elif key in ("SM", "CC", "**"):
value = " ".join(value)
elif key == "SQ":
assert len(value) == 1
if int(value.pop()) != rows:
raise ValueError("Inconsistent number of sequences in alignment")
continue
elif key == "AU":
pass
else:
assert len(value) == 1, (key, value)
value = value.pop()
alignment.annotations[AlignmentIterator.gf_mapping[key]] = value
@staticmethod
def _store_per_column_annotations(alignment, gc, columns, skipped_columns):
if gc:
alignment.column_annotations = {}
for key, value in gc.items():
if skipped_columns:
value = "".join(
letter
for index, letter in enumerate(value)
if index not in skipped_columns
)
if len(value) != columns:
raise ValueError(
f"{key} length is {len(value)}, expected {columns}"
)
alignment.column_annotations[AlignmentIterator.gc_mapping[key]] = value
@staticmethod
def _store_per_sequence_annotations(alignment, gs):
for seqname, annotations in gs.items():
for record in alignment.sequences:
if record.id == seqname:
break
else:
raise ValueError(f"Failed to find seqname {seqname}")
for key, value in annotations.items():
if key == "DE":
record.description = value
elif key == "DR":
record.dbxrefs = value
else:
record.annotations[AlignmentIterator.gs_mapping[key]] = value
@staticmethod
def _store_per_sequence_and_per_column_annotations(alignment, gr):
for seqname, letter_annotations in gr.items():
for record in alignment.sequences:
if record.id == seqname:
break
else:
raise ValueError(f"Failed to find seqname {seqname}")
for keyword, letter_annotation in letter_annotations.items():
feature = AlignmentIterator.gr_mapping[keyword]
if keyword == "CSA":
letter_annotation = letter_annotation.replace("-", "")
else:
letter_annotation = letter_annotation.replace(".", "")
record.letter_annotations[feature] = letter_annotation
def parse(self, stream):
"""Parse the next alignment from the stream."""
if stream is None:
raise StopIteration
for line in stream:
line = line.strip()
if not line:
continue
elif line == "# STOCKHOLM 1.0":
# Starting a new alignment
records = []
aligned_sequences = []
references = []
reference_comments = []
database_references = []
nested_domains = []
gf = defaultdict(list)
gc = {}
gs = defaultdict(lambda: {"DR": []})
gr = defaultdict(dict)
length = None
elif line == "//":
# Reached the end of the alignment.
skipped_columns = []
coordinates = Alignment.infer_coordinates(
aligned_sequences, skipped_columns
)
skipped_columns = set(skipped_columns)
alignment = Alignment(records, coordinates)
alignment.annotations = {}
if references:
alignment.annotations["references"] = []
for reference in references:
reference = dict(reference)
reference["title"] = " ".join(reference["title"])
reference["author"] = " ".join(reference["author"])
reference["location"] = " ".join(reference["location"])
alignment.annotations["references"].append(reference)
if database_references:
alignment.annotations["database references"] = database_references
if nested_domains:
alignment.annotations["nested domains"] = nested_domains
rows, columns = alignment.shape
AlignmentIterator._store_per_file_annotations(alignment, gf, rows)
AlignmentIterator._store_per_column_annotations(
alignment, gc, columns, skipped_columns
)
AlignmentIterator._store_per_sequence_annotations(alignment, gs)
AlignmentIterator._store_per_sequence_and_per_column_annotations(
alignment, gr
)
yield alignment
elif not line.startswith("#"):
# Sequence
# Format: "<seqname> <sequence>"
try:
seqname, aligned_sequence = line.split(None, 1)
except ValueError:
# This might be someone attempting to store a zero length sequence?
raise ValueError(
"Could not split line into sequence name and aligned sequence:\n"
+ line
) from None
if length is None:
length = len(aligned_sequence)
elif length != len(aligned_sequence):
raise ValueError(
f"Aligned sequence {seqname} consists of {len(aligned_sequence)} letters, expected {length} letters)"
)
aligned_sequence = aligned_sequence.replace(".", "-")
sequence = aligned_sequence.replace("-", "")
aligned_sequences.append(aligned_sequence)
seq = Seq(sequence)
record = SeqRecord(seq, id=seqname)
records.append(record)
elif line.startswith("#=GF "):
# Generic per-File annotation, free text
# Format: #=GF <feature> <free text>
feature, text = line[5:].strip().split(None, 1)
if feature == "RN":
assert text.startswith("[")
assert text.endswith("]")
number = int(text[1:-1])
reference = defaultdict(list)
reference["number"] = number
if reference_comments:
reference["comment"] = " ".join(reference_comments)
reference_comments = []
references.append(reference)
elif feature == "RM":
assert not reference["medline"]
reference["medline"] = text
elif feature == "RT":
reference["title"].append(text)
elif feature == "RA":
reference["author"].append(text)
elif feature == "RL":
reference["location"].append(text)
elif feature == "RC":
reference_comments.append(text)
elif feature == "DR":
database_reference = {"reference": text}
database_references.append(database_reference)
elif feature == "DC":
assert "comment" not in database_reference
database_reference["comment"] = text
elif feature == "NE":
nested_domain = {"accession": text}
nested_domains.append(nested_domain)
elif feature == "NL":
assert "location" not in nested_domain
nested_domain["location"] = text
else:
# Each feature key could be used more than once,
# so store the entries as a list of strings.
gf[feature].append(text)
elif line.startswith("#=GC "):
# Generic per-Column annotation, exactly 1 char per column
# Format: "#=GC <feature> <exactly 1 char per column>"
feature, text = line[5:].strip().split(None, 2)
if feature not in gc:
gc[feature] = ""
gc[feature] += text.strip() # append to any previous entry
# Might be interleaved blocks, so can't check length yet
elif line.startswith("#=GS "):
# Generic per-Sequence annotation, free text
# Format: "#=GS <seqname> <feature> <free text>"
try:
seqname, feature, text = line[5:].strip().split(None, 2)
except ValueError:
# Free text can sometimes be empty, which a one line split throws an error for.
# See https://github.com/biopython/biopython/issues/2982 for more details
seqname, feature = line[5:].strip().split(None, 1)
text = ""
if feature == "DR":
gs[seqname][feature].append(text)
else:
assert feature not in gs[seqname]
gs[seqname][feature] = text
elif line[:5] == "#=GR ":
# Generic per-Sequence AND per-Column markup
# Format: "#=GR <seqname> <feature> <exactly 1 char per column>"
terms = line[5:].split(None, 2)
assert terms[0] == seqname
feature = terms[1]
gr[seqname][feature] = terms[2].strip()
class AlignmentWriter(interfaces.AlignmentWriter):
"""Alignment file writer for the Stockholm file format."""
gf_mapping = {value: key for key, value in AlignmentIterator.gf_mapping.items()}
gs_mapping = {value: key for key, value in AlignmentIterator.gs_mapping.items()}
gr_mapping = {value: key for key, value in AlignmentIterator.gr_mapping.items()}
gc_mapping = {value: key for key, value in AlignmentIterator.gc_mapping.items()}
def format_alignment(self, alignment):
"""Return a string with a single alignment in the Stockholm format."""
rows, columns = alignment.shape
if rows == 0:
raise ValueError("Must have at least one sequence")
if columns == 0:
raise ValueError("Non-empty sequences are required")
lines = []
lines.append("# STOCKHOLM 1.0\n")
# #=GF Above the alignment; alignment.annotations
for key, feature in self.gf_mapping.items():
if key == "comment":
# write this last
continue
value = alignment.annotations.get(key)
if value is not None:
feature = self.gf_mapping[key]
if key in ("author", "wikipedia"):
for item in value:
lines.append(f"#=GF {feature} {item}\n")
else:
lines.append(f"#=GF {feature} {value}\n")
nested_domains = alignment.annotations.get("nested domains")
if nested_domains is not None:
for nested_domain in nested_domains:
accession = nested_domain.get("accession")
if accession is not None:
lines.append(f"#=GF NE {accession}\n")
location = nested_domain.get("location")
if location is not None:
lines.append(f"#=GF NL {location}\n")
references = alignment.annotations.get("references")
if references is not None:
for reference in references:
comment = reference.get("comment")
lines.append(AlignmentWriter._format_long_text("#=GF RC ", comment))
lines.append(f"#=GF RN [{reference['number']}]\n")
lines.append(f"#=GF RM {reference['medline']}\n")
title = reference["title"]
lines.append(AlignmentWriter._format_long_text("#=GF RT ", title))
lines.append(f"#=GF RA {reference['author']}\n")
lines.append(f"#=GF RL {reference['location']}\n")
database_references = alignment.annotations.get("database references")
if database_references is not None:
for database_reference in database_references:
lines.append(f"#=GF DR {database_reference['reference']}\n")
comment = database_reference.get("comment")
if comment is not None:
lines.append(f"#=GF DC {comment}\n")
key = "comment"
value = alignment.annotations.get(key)
if value is not None:
prefix = "#=GF %s " % self.gf_mapping[key]
lines.append(AlignmentWriter._format_long_text(prefix, value))
for key in alignment.annotations:
if key in self.gf_mapping:
continue
if key == "nested domains":
continue
if key == "references":
continue
if key == "database references":
continue
raise ValueError(
"Unknown annotation %s found in alignment.annotations" % key
)
lines.append("#=GF SQ %i\n" % rows)
# #=GS Above the alignment or just below the corresponding sequence;
# record.annotations
# #=GR Just below the corresponding sequence;
# record.letter_annotations
width = max(len(record.id) for record in alignment.sequences)
start = max(width, 20) + 12
for record in alignment.sequences:
name = record.id.ljust(width)
for key, value in record.annotations.items():
feature = self.gs_mapping[key]
lines.append(f"#=GS {name} {feature} {value}\n")
if record.description != "<unknown description>":
lines.append(f"#=GS {name} DE {record.description}\n")
for value in record.dbxrefs:
lines.append(f"#=GS {name} DR {value}\n")
for aligned_sequence, record in zip(alignment, alignment.sequences):
lines.extend(
AlignmentWriter._format_record(width, start, aligned_sequence, record)
)
# #=GC Below the alignment;
# alignment.column_annotations
if alignment.column_annotations:
for key, value in alignment.column_annotations.items():
feature = self.gc_mapping[key]
line = f"#=GC {feature}".ljust(start) + value + "\n"
lines.append(line)
lines.append("//\n")
return "".join(lines)
@staticmethod
def _format_long_text(prefix, text):
"""Format the text as wrapped lines (PRIVATE)."""
if text is None:
return ""
return (
textwrap.fill(
text,
width=79,
break_long_words=False,
initial_indent=prefix,
subsequent_indent=prefix,
)
+ "\n"
)
@staticmethod
def _format_record(width, start, aligned_sequence, record):
"""Format lines for a single SeqRecord (PRIVATE)."""
name = record.id.ljust(start)
line = name + aligned_sequence + "\n"
yield line
name = record.id.ljust(width)
for key, value in record.letter_annotations.items():
feature = AlignmentWriter.gr_mapping[key]
line = f"#=GR {name} {feature}".ljust(start) + value + "\n"
yield line
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| 41.620926 | 125 | 0.554502 | 2,496 | 24,265 | 5.294872 | 0.200721 | 0.016646 | 0.011804 | 0.01082 | 0.230857 | 0.186743 | 0.149213 | 0.114936 | 0.090875 | 0.074682 | 0 | 0.016694 | 0.345807 | 24,265 | 582 | 126 | 41.69244 | 0.815862 | 0.264373 | 0 | 0.145729 | 0 | 0 | 0.133077 | 0.015007 | 0 | 0 | 0 | 0 | 0.022613 | 1 | 0.022613 | false | 0.002513 | 0.017588 | 0 | 0.072864 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50198232b5bf4069ed42d7304eb295112ccde17d | 5,348 | py | Python | demos/opponents/demo_chrono_opponent.py | WisconsinAutonomous/wa_simulator | 405a086b16f262fc82513ca9b23fd040e6375945 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5 | 2021-02-14T03:56:07.000Z | 2021-12-16T04:46:54.000Z | demos/opponents/demo_chrono_opponent.py | WisconsinAutonomous/wa_simulator | 405a086b16f262fc82513ca9b23fd040e6375945 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-02-05T19:30:55.000Z | 2021-02-05T19:51:21.000Z | demos/opponents/demo_chrono_opponent.py | WisconsinAutonomous/wa_simulator | 405a086b16f262fc82513ca9b23fd040e6375945 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 | 2021-09-20T21:21:12.000Z | 2022-01-09T20:49:46.000Z | # Path follower using a chrono vehicle model
# Multiple vehicles are used. The vehicles
# that aren't tracked are considered "opponents"
# Meant to demonstrate the WA Simulator API
# -----------------------------------------
# Import the simulator
import wa_simulator.chrono as wa
# Import the controller
from pid_controller import PIDController
# Command line arguments
parser = wa.WAArgumentParser(use_sim_defaults=True)
parser.add_argument("-n", "--num_opponents", type=int,
help="Number of opponents to simulation. The more, the less efficient the simulation.", default=2)
parser.add_argument("-mv", "--matplotlib", action="store_true", help="Use matplotlib to visualize", default=False)
parser.add_argument("-iv", "--irrlicht", action="store_true", help="Use irrlicht to visualize", default=False)
parser.add_argument("-sv", "--sensor", action="store_true", help="Use sensor to visualize", default=False)
args = parser.parse_args()
def main():
# ---------------
# Create a system
# Systems describe simulation settings and can be used to
# update dynamics
system = wa.WAChronoSystem(args=args)
# ---------------------
# Create an environment
# An environment handles external assets (trees, barriers, etc.) and terrain characteristics
# Pre-made evGrand Prix (EGP) env file
env_filename = wa.WAChronoEnvironment.EGP_ENV_MODEL_FILE
environment = wa.WAChronoEnvironment(system, env_filename)
# --------------------------------
# Create the vehicle inputs object
# This is a shared object between controllers, visualizations and vehicles
vehicle_inputs = wa.WAVehicleInputs()
# ----------------
# Create a vehicle
# Pre-made go kart veh file
init_loc = wa.WAVector([49.8, 132.9, 0.5])
veh_filename = wa.WAChronoVehicle.GO_KART_MODEL_FILE
vehicle = wa.WAChronoVehicle(system, vehicle_inputs, environment, veh_filename, init_loc=init_loc)
# -------------
# Create a Path
# Load data points from a csv file and interpolate a path
filename = wa.get_wa_data_file("paths/sample_medium_loop.csv")
points = wa.load_waypoints_from_csv(filename, delimiter=",")
path = wa.WASplinePath(points, num_points=1000, is_closed=True)
# ------------------
# Create n opponents
opponents = []
opponent_vehicle_inputs_list = []
num_opponents = args.num_opponents
for i in range(num_opponents):
opponent_init_loc = wa.WAVector(points[i+1])
opponent_init_loc.z = 0.1
opponent_vehicle_inputs = wa.WAVehicleInputs()
opponent = wa.WAChronoVehicle(system, opponent_vehicle_inputs, environment,
veh_filename, init_loc=opponent_init_loc)
opponents.append(opponent)
opponent_vehicle_inputs_list.append(opponent_vehicle_inputs)
# ----------------------
# Create a visualization
# It's nice to visualize the "look ahead" points for the controller
# Add two spheres/dots for that purpose
position = wa.WAVector()
size = wa.WAVector([0.1, 0.1, 0.1])
kwargs = {'position': position, 'size': size, 'body_type': 'sphere', 'updates': True}
sentinel_sphere = environment.create_body(name='sentinel', color=wa.WAVector([1, 0, 0]), **kwargs)
target_sphere = environment.create_body(name='target', color=wa.WAVector([0, 1, 0]), **kwargs)
# Will use irrlicht or matplotlib for visualization
visualizations = []
if args.irrlicht:
irr = wa.WAChronoIrrlicht(system, vehicle, vehicle_inputs, environment=environment, opponents=opponents)
visualizations.append(irr)
if args.sensor:
sens = wa.WAChronoSensorVisualization(system, vehicle, vehicle_inputs, environment=environment)
visualizations.append(sens)
if args.matplotlib:
mat = wa.WAMatplotlibVisualization(system, vehicle, vehicle_inputs,
environment=environment, plotter_type="multi", opponents=opponents)
visualizations.append(mat)
# -------------------
# Create a controller
# Create a pid controller
controller = PIDController(system, vehicle, vehicle_inputs, path)
controller.get_long_controller().set_target_speed(9)
controller.get_long_controller().set_gains(0.1, 0, 1e-2)
controllers = [controller]
for i in range(num_opponents):
opponent_controller = PIDController(system, opponents[i], opponent_vehicle_inputs_list[i], path)
opponent_controller.get_long_controller().set_target_speed(9)
opponent_controller.get_long_controller().set_gains(0.1, 0, 1e-2)
controllers.append(opponent_controller)
# --------------------------
# Create a simuation wrapper
# Will be responsible for actually running the simulation
sim_manager = wa.WASimulationManager(system, environment, vehicle, *visualizations, *controllers, *opponents)
# ---------------
# Simulation loop
step_size = system.step_size
while sim_manager.is_ok():
time = system.time
sim_manager.synchronize(time)
sim_manager.advance(step_size)
# Update the position of the spheres
target_sphere.position = controller.get_target_pos()
sentinel_sphere.position = controller.get_sentinel_pos()
if __name__ == "__main__":
main()
| 40.210526 | 118 | 0.67371 | 628 | 5,348 | 5.55414 | 0.308917 | 0.048452 | 0.036124 | 0.029817 | 0.208716 | 0.164564 | 0.118693 | 0.053899 | 0.029817 | 0.029817 | 0 | 0.009478 | 0.191099 | 5,348 | 132 | 119 | 40.515152 | 0.79681 | 0.246447 | 0 | 0.030303 | 0 | 0 | 0.082748 | 0.007021 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0 | 0.030303 | 0 | 0.045455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
501ca1aa5aa81f64cecce1fdfcb086b792f85af6 | 785 | py | Python | go_doc_get.py | lowellmower/go_doc_get | 511abf77f16a7a92dde93a9f1318967b1d237635 | [
"MIT"
] | 1 | 2018-11-24T07:11:59.000Z | 2018-11-24T07:11:59.000Z | go_doc_get.py | lowellmower/go_doc_get | 511abf77f16a7a92dde93a9f1318967b1d237635 | [
"MIT"
] | null | null | null | go_doc_get.py | lowellmower/go_doc_get | 511abf77f16a7a92dde93a9f1318967b1d237635 | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
import webbrowser
def cleanPackage(pkgURI):
pkg = pkgURI.split('.com/')[1]
return pkg
class GoDocGetCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
for region in view.sel():
selected = view.substr(region)
if "github.corp" in selected:
# if corporate go to page on master branch
pkg = cleanPackage(selected)
res = pkg.split('/')
res.insert(2, 'tree/master')
pkg = '/'.join(res)
webbrowser.open('https://github.corp.dyndns.com/' + pkg)
elif "github" in selected:
# if public package go to doc
pkg = cleanPackage(selected)
webbrowser.open('https://godoc.org/github.com/' + pkg)
else:
# default to golang proper
webbrowser.open('https://golang.org/pkg/' + selected)
| 25.322581 | 59 | 0.68535 | 106 | 785 | 5.056604 | 0.5 | 0.078358 | 0.106343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003101 | 0.178344 | 785 | 30 | 60 | 26.166667 | 0.827907 | 0.118471 | 0 | 0.090909 | 0 | 0 | 0.171512 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.136364 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
501d28ea5db48d2848e2a89b31ffdfcb9bbe92c6 | 598 | py | Python | text_mcts_lm_demo.py | lantunes/nlg-mcts | 91afe0fe7eeed96cf10686c7f555df91fbdd3112 | [
"MIT"
] | 1 | 2018-10-29T20:43:45.000Z | 2018-10-29T20:43:45.000Z | text_mcts_lm_demo.py | lantunes/nlg-mcts | 91afe0fe7eeed96cf10686c7f555df91fbdd3112 | [
"MIT"
] | null | null | null | text_mcts_lm_demo.py | lantunes/nlg-mcts | 91afe0fe7eeed96cf10686c7f555df91fbdd3112 | [
"MIT"
] | 1 | 2021-05-11T12:08:29.000Z | 2021-05-11T12:08:29.000Z | from nlgmcts import *
if __name__ == '__main__':
print("creating language model...")
lm = ShakespeareCharLanguageModel(n=5)
num_simulations = 250000
text_length = 50
start_state = ["<L>"]
eval_function = lambda text: -lm.perplexity(text)
mcts = TextMCTS(lm.vocab(with_unk=False), text_length, eval_function, c=10)
state = start_state
print("beginning search...")
mcts.search(state, num_simulations)
best = mcts.get_best_sequence()
generated_text = ''.join(best[0])
print("generated text: %s (score: %s)" % (generated_text, str(best[1])))
| 24.916667 | 79 | 0.665552 | 75 | 598 | 5.026667 | 0.626667 | 0.103448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026915 | 0.192308 | 598 | 23 | 80 | 26 | 0.753623 | 0 | 0 | 0 | 0 | 0 | 0.143813 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
501dce90553028042e573494a13b85e1d63565ac | 772 | py | Python | python/ray/tests/test_unreconstructable_errors.py | jCrompton/ray | 4d791f141e953bc057d9fcee353901dbf6a5c0ea | [
"Apache-2.0"
] | null | null | null | python/ray/tests/test_unreconstructable_errors.py | jCrompton/ray | 4d791f141e953bc057d9fcee353901dbf6a5c0ea | [
"Apache-2.0"
] | 5 | 2021-08-25T16:17:15.000Z | 2022-03-12T01:00:29.000Z | python/ray/tests/test_unreconstructable_errors.py | jCrompton/ray | 4d791f141e953bc057d9fcee353901dbf6a5c0ea | [
"Apache-2.0"
] | null | null | null | import numpy as np
import unittest
import ray
from ray.test_utils import put_unpinned_object
class TestObjectLostErrors(unittest.TestCase):
def setUp(self):
ray.init(
num_cpus=1,
object_store_memory=150 * 1024 * 1024,
_redis_max_memory=10000000)
def tearDown(self):
ray.shutdown()
def testDriverPutEvictedCannotReconstruct(self):
x_id = put_unpinned_object(np.zeros(1 * 1024 * 1024))
ray.get(x_id)
for _ in range(20):
ray.put(np.zeros(10 * 1024 * 1024))
self.assertRaises(ray.exceptions.ObjectLostError,
lambda: ray.get(x_id))
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 24.903226 | 61 | 0.626943 | 94 | 772 | 4.861702 | 0.56383 | 0.052516 | 0.074398 | 0.039387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073214 | 0.274611 | 772 | 30 | 62 | 25.733333 | 0.742857 | 0 | 0 | 0 | 0 | 0 | 0.012953 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.130435 | false | 0 | 0.26087 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
501fef42d45f95698b601126fae0f708ad02b7e2 | 2,210 | py | Python | David-codes/conexao_python_mysql-base.py | davidson-santos/monitoria-python-2021 | ec19ddfe7f0a29aaa998d57f8b847fc570f1e604 | [
"MIT"
] | null | null | null | David-codes/conexao_python_mysql-base.py | davidson-santos/monitoria-python-2021 | ec19ddfe7f0a29aaa998d57f8b847fc570f1e604 | [
"MIT"
] | 1 | 2021-09-17T13:38:13.000Z | 2021-09-17T13:38:13.000Z | David-codes/conexao_python_mysql-base.py | davidson-santos/monitoria-python-2021 | ec19ddfe7f0a29aaa998d57f8b847fc570f1e604 | [
"MIT"
] | 2 | 2021-09-17T13:24:58.000Z | 2021-09-17T13:35:53.000Z | #INSTALAÇÃO DE BIBLIOTECAS (CASO NECESSÁRIO)
#pip install mysql-connector-python
#Importação de bibliotecas
import mysql.connector
import sys
#Início do programa
def abre_conexao_bd():
try:
con = mysql.connector.connect(host="200.128.9.67", port=33006, database="ei32teste", user="ei32teste", password="tes12345")
#con = mysql.connector.connect(host="localhost", port=3306, database="danilo", user="root", password="")
return con
except mysql.connector.Error as e:
print("Falha na matrix")
return 0
#----- Modulo que executa uma alteração no banco (insert, update, delete...) e não retorna resultado
def manipula_dados(sql):
con = abre_conexao_bd()
if con != 0:
cursor = con.cursor()
cursor.execute(sql)
con.commit() #este comando salva as alterações realizadas durante a conexão (con). Note que sem ele, as alterações não aparecem após uma nova execução.
cursor.close()
con.close()
print("Operação realizada com sucesso.\n")
else:
print("A operação não foi realizada.\n")
#----- Módulo que executa uma consulta e imprime os resultados
def executa_conculta(sql):
con = abre_conexao_bd()
if con != 0:
cursor = con.cursor()
cursor.execute(sql)
for linha in cursor.fetchall():
print(linha)
cursor.close()
con.close()
print("\nfim da impressão.\n\n")
else:
print("O select não foi executado.\n")
#----- PROGRAMA PRINCIPAL -------------------
executa_conculta("select * from aluno")
manipula_dados("insert into aluno (nome, turma_id, status) values ('sebastiao', 1, 'A');")
manipula_dados("update aluno set status = 'A' where id = 2")
executa_conculta("select * from aluno")
# SUGESTÃO: escreva em python o seguinte algoritmo
# digite 1 para consultar dados, 2 para manipular dados e 3 para sair
# enquanto opcao igual a 1 ou 2, faça
# digite o comando a ser executado
# se opcao igual a 1
# executa_consulta(comando)
# se opcao igual a 2
# manipula_dados(comando)
# digite 1 para consultar dados, 2 para manipular dados e 3 para sair
| 32.028986 | 159 | 0.647511 | 292 | 2,210 | 4.849315 | 0.479452 | 0.049435 | 0.027542 | 0.033898 | 0.271186 | 0.155367 | 0.155367 | 0.155367 | 0.155367 | 0.155367 | 0 | 0.02497 | 0.238914 | 2,210 | 68 | 160 | 32.5 | 0.816885 | 0.424434 | 0 | 0.444444 | 0 | 0 | 0.256185 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.027778 | 0.055556 | 0 | 0.194444 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50209b52d5eddeae724970b059fdb650414f4ca0 | 1,730 | py | Python | tests/actions_plugins/test_plugins.py | whamcloud/iml-agent | fecb2468fd6edc822f3ab37ced444d98d8725730 | [
"MIT"
] | 1 | 2020-04-22T16:43:09.000Z | 2020-04-22T16:43:09.000Z | tests/actions_plugins/test_plugins.py | whamcloud/iml-agent | fecb2468fd6edc822f3ab37ced444d98d8725730 | [
"MIT"
] | 53 | 2018-07-07T18:17:50.000Z | 2021-03-19T23:15:28.000Z | tests/actions_plugins/test_plugins.py | whamcloud/iml-agent | fecb2468fd6edc822f3ab37ced444d98d8725730 | [
"MIT"
] | 6 | 2018-06-18T08:51:38.000Z | 2019-10-24T12:16:42.000Z | from mock import patch
import unittest
from chroma_agent.plugin_manager import DevicePluginManager, ActionPluginManager
from chroma_agent.lib.agent_teardown_functions import agent_daemon_teardown_functions
from chroma_agent.lib.agent_startup_functions import agent_daemon_startup_functions
from chroma_agent.action_plugins.device_plugin import (
initialise_block_device_drivers,
terminate_block_device_drivers,
)
class TestDevicePlugins(unittest.TestCase):
def test_get_device_plugins(self):
"""Test that we get a list of loaded plugin classes."""
self.assertNotEqual(len(DevicePluginManager.get_plugins()), 0)
def test_excluded_plugins(self):
self.assertTrue("linux" in DevicePluginManager.get_plugins())
with patch("chroma_agent.plugin_manager.EXCLUDED_PLUGINS", ["linux"]):
with patch.object(DevicePluginManager, "_plugins", {}):
self.assertTrue("linux" not in DevicePluginManager.get_plugins())
def test_initialise_block_device_drivers_called_at_startup(self):
"""Test method is added to list of functions to run on daemon startup."""
self.assertTrue(
initialise_block_device_drivers in agent_daemon_startup_functions
)
def test_terminate_block_device_drivers_called_at_teardown(self):
"""Test method is added to list of functions to run on daemon teardown."""
self.assertTrue(
terminate_block_device_drivers in agent_daemon_teardown_functions
)
class TestActionPlugins(unittest.TestCase):
def test_get_action_plugins(self):
"""Test that we get a list of loaded plugin classes."""
self.assertNotEqual(len(ActionPluginManager().commands), 0)
| 41.190476 | 85 | 0.754913 | 209 | 1,730 | 5.942584 | 0.272727 | 0.05314 | 0.086957 | 0.067633 | 0.36715 | 0.246377 | 0.196457 | 0.196457 | 0.196457 | 0.196457 | 0 | 0.001398 | 0.172832 | 1,730 | 41 | 86 | 42.195122 | 0.866527 | 0.136416 | 0 | 0.071429 | 0 | 0 | 0.045485 | 0.029871 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.178571 | false | 0 | 0.214286 | 0 | 0.464286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50211822a135202f8d98627f77e8d1e600f7048a | 4,468 | py | Python | augment.py | mamdamin/MVCNN-TensorFlow | 6d8dd3f8d68feccd90ce1b82de31c4b8524b59ab | [
"MIT"
] | null | null | null | augment.py | mamdamin/MVCNN-TensorFlow | 6d8dd3f8d68feccd90ce1b82de31c4b8524b59ab | [
"MIT"
] | null | null | null | augment.py | mamdamin/MVCNN-TensorFlow | 6d8dd3f8d68feccd90ce1b82de31c4b8524b59ab | [
"MIT"
] | null | null | null | import tensorflow as tf
import math
def augmentImages(images,
resize=None, # (width, height) tuple or None
horizontal_flip=False,
vertical_flip=False,
translate = 0,
rotate=0, # Maximum rotation angle in degrees
crop_probability=0, # How often we do crops
crop_min_percent=0.6, # Minimum linear dimension of a crop
crop_max_percent=1., # Maximum linear dimension of a crop
mixup=0): # Mixup coeffecient, see https://arxiv.org/abs/1710.09412.pdf
if resize is not None:
images = tf.image.resize_bilinear(images, resize)
# My experiments showed that casting on GPU improves training performance
#print(images.dtype)
if images.dtype != None:#tf.float32:
images = tf.image.convert_image_dtype(images, dtype=tf.float32)
images = tf.subtract(images, 0.5)
images = tf.multiply(images, 2.0)
#labels = tf.to_float(labels)
with tf.name_scope('augmentation'):
shp = tf.shape(images)
batch_size, height, width = shp[0], shp[1], shp[2]
width = tf.cast(width, tf.float32)
height = tf.cast(height, tf.float32)
# The list of affine transformations that our image will go under.
# Every element is Nx8 tensor, where N is a batch size.
transforms = []
identity = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], dtype=tf.float32)
if horizontal_flip:
coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), 0.5)
flip_transform = tf.convert_to_tensor(
[-1., 0., width, 0., 1., 0., 0., 0.], dtype=tf.float32)
transforms.append(
tf.where(coin,
tf.tile(tf.expand_dims(flip_transform, 0), [batch_size, 1]),
tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])))
if vertical_flip:
coin = tf.less(tf.random_uniform([batch_size], 0, 1.0), 0.5)
flip_transform = tf.convert_to_tensor(
[1, 0, 0, 0, -1, height, 0, 0], dtype=tf.float32)
transforms.append(
tf.where(coin,
tf.tile(tf.expand_dims(flip_transform, 0), [batch_size, 1]),
tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])))
if rotate > 0:
angle_rad = rotate / 180 * math.pi
angles = tf.random_uniform([batch_size], -angle_rad, angle_rad)
transforms.append(
tf.contrib.image.angles_to_projective_transforms(
angles, height, width))
if translate > 0:
tx = tf.random_uniform([batch_size,1],minval=-32,maxval=32,dtype=tf.int32)
ty = tf.random_uniform([batch_size,1],minval=-32,maxval=32,dtype=tf.int32)
zero = tf.zeros([batch_size,1],dtype=tf.int32)
one = tf.ones([batch_size,1],dtype=tf.int32)
ti = tf.cast(tf.concat([one,zero,tx,zero,one,ty,zero,zero],axis=1),dtype=tf.float32)
transforms.append(ti)
if crop_probability > 0:
crop_pct = tf.random_uniform([batch_size], crop_min_percent,
crop_max_percent)
left = tf.random_uniform([batch_size], 0, width * (1 - crop_pct))
top = tf.random_uniform([batch_size], 0, height * (1 - crop_pct))
crop_transform = tf.stack([
crop_pct,
tf.zeros([batch_size]), top,
tf.zeros([batch_size]), crop_pct, left,
tf.zeros([batch_size]),
tf.zeros([batch_size])
], 1)
coin = tf.less(
tf.random_uniform([batch_size], 0, 1.0), crop_probability)
transforms.append(
tf.where(coin, crop_transform,
tf.tile(tf.expand_dims(identity, 0), [batch_size, 1])))
if transforms:
print(images.shape)
images = tf.contrib.image.transform(
images,
tf.contrib.image.compose_transforms(*transforms),
interpolation='BILINEAR') # or 'NEAREST'
def cshift(values): # Circular shift in batch dimension
return tf.concat([values[-1:, ...], values[:-1, ...]], 0)
if mixup > 0:
mixup = 1.0 * mixup # Convert to float, as tf.distributions.Beta requires floats.
beta = tf.distributions.Beta(mixup, mixup)
lam = beta.sample(batch_size)
ll = tf.expand_dims(tf.expand_dims(tf.expand_dims(lam, -1), -1), -1)
images = ll * images + (1 - ll) * cshift(images)
labels = lam * labels + (1 - lam) * cshift(labels)
return images#, labels | 42.150943 | 93 | 0.59803 | 608 | 4,468 | 4.261513 | 0.25 | 0.079892 | 0.038595 | 0.069471 | 0.351602 | 0.282516 | 0.232343 | 0.232343 | 0.223852 | 0.223852 | 0 | 0.039348 | 0.271934 | 4,468 | 106 | 94 | 42.150943 | 0.757147 | 0.129588 | 0 | 0.172414 | 0 | 0 | 0.005308 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022989 | false | 0 | 0.022989 | 0.011494 | 0.068966 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5022378e1d330702f332e768a0c7bd07b89452b8 | 5,901 | py | Python | python/smbus_pni_rm3100_examples.py | kuevpr/pni_rm3100_driver | b7579fa3880480ef971f200a91bc0eb02823e866 | [
"MIT"
] | null | null | null | python/smbus_pni_rm3100_examples.py | kuevpr/pni_rm3100_driver | b7579fa3880480ef971f200a91bc0eb02823e866 | [
"MIT"
] | null | null | null | python/smbus_pni_rm3100_examples.py | kuevpr/pni_rm3100_driver | b7579fa3880480ef971f200a91bc0eb02823e866 | [
"MIT"
] | null | null | null | import pni_rm3100
import time
import smbus_pni_rm3100
import smbus2
"""
execute_self_test
Example of running a BIST (Built In Selt Test) to check the
status of the three magnetic field sensors.
"""
def execute_self_test():
# Instantiate Objects
pni_object = pni_rm3100.PniRm3100()
i2cbus = smbus2.SMBus(1) # Opens /dev/i2c-1
# Select PNI Object Settings
pni_object.print_status_statements = True
# Set PNI Device Address
pni_object.assign_device_addr(pni_object.DeviceAddress.I2C_ADDR_HH)
# Select which Axes we'd like to test during the Built-In Self Test (BIST)
pni_object.assign_poll_byte(poll_x = True, poll_y = True, poll_z = True)
# Select the Timeout and LRP for the BIST. Then Enable Self-Test mode
pni_object.assign_bist_timeout(pni_object.BistRegister.BIST_TO_120us)
pni_object.assign_bist_lrp(pni_object.BistRegister.BIST_LRP_4)
pni_object.assign_bist_ste(True)
# Run the Self Test
smbus_pni_rm3100.self_test(i2cbus, pni_object)
"""
execute_continuous_measurements
This example shows how to setup the RM3100 in continuous measurement mode (CMM)
In CMM, you set the frequency that the sensors are sampled (using CCR and TMRC registers pg 28 and 31 of datasheet)
Once you have set the sampling frequency, you are free to read from the MEAS register and gather data
Note, the "assign" functions from "pni_object" do not write anything over I2C.
These functions simply adjust parameters in a struct and prepare for that data to be written to the sensor.
The "write" and "read" functions from "smbus_pni_rm3100" are communicating with the sensor over I2C.
These functions take in a "pni_object" that has been configued to the user's preferences
"""
def execute_continuous_measurements(num_measurements = 20, dt_seconds = 0.2):
# Instantiate Objects
pni_object = pni_rm3100.PniRm3100()
i2cbus = smbus2.SMBus(1) # Opens /dev/i2c-1
# Select PNI Object Settings
# Set this to False (the default) if you don't want the "read" functions printing data in the terminal
pni_object.print_status_statements = True
# Assign PNI Device Address
# Default is I2C_ADDR_LL (0x20)
pni_object.assign_device_addr(pni_object.DeviceAddress.I2C_ADDR_HH)
# Assign CCR Values
# Here, we set X, Y, and Z CCR values to the defaull value of 0x00C8 = 200
# See page 28 of the datasheet for more details
# Note: 'assign_xyz_ccr' also adjusts the 'scaling' values which are used to convert measured data to physical units of uT (microTesla)
pni_object.assign_xyz_ccr(x_ccr_in = pni_object.CcrRegister.CCR_DEFAULT,
y_ccr_in = pni_object.CcrRegister.CCR_DEFAULT,
z_ccr_in = pni_object.CcrRegister.CCR_DEFAULT)
# Assign TMRC Values
# Here, we set X, Y, and Z TMRC values to allow the sensors to be read at approximately 37Hz
# Note, if the sample rate set by TMRC is higher than allowable from the CCR values,
# then CCR values will be used for that axis and the TMRC value will be ignored for that axis
# (See note at end of pg31 of datasheet for an example)
pni_object.assign_tmrc(pni_object.TmrcRegister.TMRC_37HZ)
# Here's an example of how to write to and read from a register.
# You can adjust the CCR values in the 'assign_xyz_ccr' function call above and ensure these are
# Being written to the registers correctly
# Note: 'smbus_pni_rm3100.write_config' will write to the CCR address for you.
print("About to write to and read from CCR Registers")
smbus_pni_rm3100.write_ccr(i2cbus, pni_object)
read_x_ccr, read_y_ccr, read_z_ccr = smbus_pni_rm3100.read_ccr(i2cbus, pni_object)
# Here's an example on how to check some of the values internal to the 'pni_object' object
# A complete list of the member variables appear at the start of the "PniRm3100" class defiend in "pni_rm3100.py"
print("Gain :", 1.0/pni_object.x_scaling, "\tScaling: ", pni_object.x_scaling)
# Take the settings we've assigned and write them their respective registers on the Magnetometer
# Note: 'write_config()' will call the following functions and write values
# to various registers on the RM3100
# write_bist (bist register)
# write_poll (poll register)
# write_ccr (ccr register)
# write_tmrc (tmrc register)
# write_hshake (hshake register)
# write_cmm (cmm register)
smbus_pni_rm3100.write_config(i2cbus, pni_object)
# Now that we've enables CMM (Continous Measurement Mode), let's read some magnetometer values!
i = 0
x_mag_sum = y_mag_sum = z_mag_sum = 0
while i < num_measurements:
# Print progress
if i % 10 == 0:
print("\nIteration {}/{}".format(i, num_measurements))
#Read magnetic field data
x_mag, y_mag, z_mag = smbus_pni_rm3100.read_meas(i2cbus, pni_object)
# Update our summations
x_mag_sum += x_mag
y_mag_sum += y_mag
z_mag_sum += z_mag
# Sleep and incremenet iterator
time.sleep(dt_seconds)
i += 1
# Take average of measurements and print
x_mag_avg = x_mag_sum / num_measurements
y_mag_avg = y_mag_sum / num_measurements
z_mag_avg = z_mag_sum / num_measurements
print("\nAverage magnetic field values over {} iterations are \n\txMag_avg: {:+.4f}uT \tyMag_avg: {:+.4f}uT \tzMag_avg {:+.4f}uT"\
.format(num_measurements, x_mag_avg, y_mag_avg, z_mag_avg))
# This is the code that will execute when you type "python3 smbus_pni_rm3100_examples" in the terminal
# Please only um-comment one of these at a time.
if __name__=="__main__":
execute_continuous_measurements() # Read data form RM3100 in continuous mode
# execute_self_test() # Perform a BIST (built-in self test) on the RM3100
| 45.392308 | 139 | 0.716997 | 920 | 5,901 | 4.394565 | 0.277174 | 0.071234 | 0.031165 | 0.014098 | 0.15162 | 0.139253 | 0.11254 | 0.086569 | 0.076181 | 0.076181 | 0 | 0.030999 | 0.218268 | 5,901 | 129 | 140 | 45.744186 | 0.845437 | 0.414167 | 0 | 0.173913 | 0 | 0.021739 | 0.082474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.086957 | 0 | 0.130435 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5027d922ab68b8733596fbb5db84944c3d693455 | 10,401 | py | Python | functions.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
] | 5 | 2021-11-18T13:27:30.000Z | 2021-12-05T00:15:33.000Z | functions.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
] | null | null | null | functions.py | EmmaAlexander/possum-tools | 051ebca682cd97b68fa2a89c9d67e99cf85b09c7 | [
"MIT"
] | null | null | null | import numpy as np
import math as mth
import matplotlib.pyplot as plt
from mpl_toolkits import axes_grid1
from astropy.io import fits
import math as m
import sys
from scipy.optimize import curve_fit
import astropy.units as u
import astropy.coordinates as coord
from astropy.wcs import WCS
def fit(x,y,yerr):
#fit a straight line
w=np.divide(1,yerr)
df= len(x)-2 #degrees of freedom
#filter out NaNs by indexing
idx = np.isfinite(x) & np.isfinite(y) & np.isfinite(w)
if len(x[idx]) >=2:
try:
p,cov=np.polyfit(x[idx],y[idx],1,cov=True,w=w[idx])
# calculate chisquared
chisqrd = 0
for i, j, k in zip(x[idx], y[idx], yerr[idx]):
c = pow(((j - np.polyval(p, i))/k), 2)
chisqrd += c
if df !=0:
redchisqrd = chisqrd/df
#cov = fit[1] * (len(x) - 2 - 2)/chisqrd
pError = np.sqrt(np.diag(cov))
except:
print("Something went wrong with a fit")
p=[np.nan,np.nan]
pError=[np.nan,np.nan]
redchisqrd=np.nan
else:
p=[np.nan,np.nan]
pError=[np.nan,np.nan]
redchisqrd=np.nan
return p,pError,redchisqrd
def peakfit(x,y):
#find location of peak datapoint
peak=np.nanmax(y)
peaklocarr=np.where(y==peak)#[0]
#peakloc2=np.nanargmax(y)
#print(peakloc,peakloc2)
peakloc=np.nanargmax(y)
if len(peaklocarr)>=2 or peakloc<=2 :
#something is wrong
fitted_peak=np.nan
fitted_peak_loc=np.nan
else:
#get a selection of points either side of this (5 in total)
x_fit=x[int(peakloc-2):int(peakloc+1)]
y_fit=y[int(peakloc-2):int(peakloc+1)]
#do a quadratic fit
peakfit=np.polyfit(x_fit,y_fit,2)
#y = ax^2 + bx +c
#dy/dx = 2ax+b = 0 at peak
a=peakfit[0]
b=peakfit[1]
c=peakfit[2]
fitted_peak_loc=-0.5*(b/a)
fitted_peak=a*np.square(fitted_peak_loc) + b*fitted_peak_loc + c
return fitted_peak_loc,fitted_peak
def gauss(x, H, A, x0, sigma):
return H + A * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2))
def gaussfit(x,y):
peak=np.nanmax(y)
peakloc=np.where(y==peak)[0]
sigma=10. #just for a placeholder width
if len(peakloc)>2 or len(peakloc)==0:
#something is wrong
fitted_peak=np.nan
fitted_peak_loc=np.nan
elif len(peakloc)==2:
#two adjacent points with same value, take the first
peakloc=peakloc[0]
print(peakloc)
mean=float(x[peakloc])
#do a guassian fit python
popt, pcov = curve_fit(gauss, np.array(x,dtype='float64'), np.array(y,dtype='float64'), p0=[np.nanmin(y), np.nanmax(y), mean, sigma])
return popt
def quadfit(x,y,xloc):
#Do a quadratic fit to data
idx = np.nanmedian(np.searchsorted(x, xloc, side="left"))
x_fit=x[int(idx-1):int(idx+2)]
y_fit=y[int(idx-1):int(idx+2)]
try:
p=np.polyfit(x_fit,y_fit,2)
except:
print("Something went wrong with a fit")
p=[np.nan,np.nan,np.nan]
return p
def burnkfit(lamdasq,polfrac_cube,polfrac_cube_errs,mask):
h_dims=int(polfrac_cube.shape[2])
v_dims=int(polfrac_cube.shape[1])
k_arr=np.nan*np.ones(shape=mask.shape)
p0_arr=np.nan*np.ones(shape=mask.shape)
redchi_array=np.nan*np.ones(shape=mask.shape)
x=np.square(lamdasq)
for i in range(0,h_dims):
for j in range(0,v_dims):
#check that it's not a masked pixel
if mask[j,i]==0:
try:
polfrac_slice=polfrac_cube[:,j,i]
polfrac_errs_slice=polfrac_cube_errs[:,j,i]
polfrac_errs_slice=np.where(polfrac_slice<0,np.nan,polfrac_errs_slice)
polfrac_slice=np.where(polfrac_slice<0,np.nan,polfrac_slice)
y=np.log(polfrac_slice)
#through standard error propagation
yerr=np.divide(polfrac_errs_slice,polfrac_slice)
#fit a straight line
p,pError,redchisqrd = fit(x,y,yerr)
k=-1*p[0]
p0=np.exp(p[1])
k_arr[j,i]=k
p0_arr[j,i]=p0
redchi_array[j,i]=redchisqrd
except:
print("uh oh")
return k_arr,p0_arr,redchi_array
def image_rms(a,rms_crop_pix):
'''Take the rms around the edge of the image then use it to do a second pass'''
rms1=np.sqrt(np.nanmean(np.square((a[0:rms_crop_pix,0:-rms_crop_pix]))))
rms2=np.sqrt(np.nanmean(np.square((a[rms_crop_pix:,0:rms_crop_pix]))))
rms3=np.sqrt(np.nanmean(np.square((a[:-rms_crop_pix,rms_crop_pix:]))))
rms4=np.sqrt(np.nanmean(np.square((a[0:-rms_crop_pix,-rms_crop_pix:]))))
rms=np.median([rms1,rms2,rms3,rms4])
return rms
def cube_rms(a,crop_pix):
rms1=np.sqrt(np.nanmean(np.square(a[:,0:crop_pix,0:-crop_pix]),axis=(1,2)))
rms2=np.sqrt(np.nanmean(np.square(a[:,crop_pix:,0:crop_pix]),axis=(1,2)))
rms3=np.sqrt(np.nanmean(np.square(a[:,:-crop_pix,crop_pix:]),axis=(1,2)))
rms4=np.sqrt(np.nanmean(np.square(a[:,0:-crop_pix,-crop_pix:]),axis=(1,2)))
rms=np.nanmean([rms1,rms2,rms3,rms4],axis=0)
return rms
def add_colorbar(im, aspect=40, pad_fraction=0.5, **kwargs):
"""Add a vertical color bar to an image plot."""
divider = axes_grid1.make_axes_locatable(im.axes)
width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)
pad = axes_grid1.axes_size.Fraction(pad_fraction, width)
current_ax = plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
plt.sca(current_ax)
return im.axes.figure.colorbar(im, cax=cax, **kwargs)
def fdf_fit(pkrm_im,fdfdata,fdf_real,fdf_im,rmarray,rmthresh=1000):
#fit the peak RM of a FDF
rmsf_rm_fit=np.nan*np.ones(shape=pkrm_im.shape)
rmsf_chi0_fit=np.nan*np.ones(shape=pkrm_im.shape)
pkPI_fit=np.nan*np.ones(shape=pkrm_im.shape)
shape=pkrm_im.shape[1]
rm_res=np.abs(rmarray[1]-rmarray[0])
for i in range (0,shape):
print(i,shape)
for j in range (0,pkrm_im.shape[0]):
if np.abs(pkrm_im[j,i])<=rmthresh and np.isfinite(pkrm_im[j,i]):
try:
total_peak,total_peak_amp=peakfit(rmarray,fdfdata[:,j,i])
if np.abs(total_peak-pkrm_im[j,i])>rm_res:
print("uh oh peak fit went wrong")
p_real=quadfit(rmarray,fdf_real[:,j,i],total_peak)
p_im=quadfit(rmarray,fdf_im[:,j,i],total_peak)
q_amp=np.multiply(np.square(total_peak),p_real[0]) + np.multiply(total_peak,p_real[1]) + p_real[2]
u_amp=np.multiply(np.square(total_peak),p_im[0]) + np.multiply(total_peak,p_im[1]) + p_im[2]
chi_0_rmsf=0.5*np.arctan2(u_amp,q_amp)
if np.isfinite(total_peak):
rmsf_rm_fit[j,i]=total_peak
rmsf_chi0_fit[j,i]=chi_0_rmsf
pkPI_fit[j,i]=total_peak_amp
except:
rmsf_rm_fit[j,i]=np.nan
rmsf_chi0_fit[j,i]=np.nan
pkPI_fit[j,i]=np.nan
return(rmsf_rm_fit,rmsf_chi0_fit,pkPI_fit)
def fdf_fit_gauss(pkrm_im,fdfdata,fdf_real,fdf_im,rmarray,rmthresh=1000):
#fit the peak RM of a FDF
rmsf_rm_fit_width=np.nan*np.ones(shape=pkrm_im.shape)
shape=pkrm_im.shape[1]
print(shape)
for i in range (0,shape):
for j in range (0,pkrm_im.shape[0]):
if np.abs(pkrm_im[j,i])<=rmthresh and np.isfinite(pkrm_im[j,i]):
try:
H, A, x0, sigma=gaussfit(rmarray,fdfdata[:,j,i])
except:
print(x0)
FWHM=2.35482 * sigma
if np.isfinite(x0):
rmsf_rm_fit_width[j,i]=FWHM
return(rmsf_rm_fit_width)
def fitsopen(filepath):
#open a fits file and return the data and header
hdu=fits.open(filepath)
header=hdu[0].header
data=hdu[0].data
hdu.close()
return(data,header)
def finitelims(array):
#finds the limits of actual data in an images
x_collapse=np.nanmean(array,axis=0)
y_collapse=np.nanmean(array,axis=1)
x_coords=np.where(np.isfinite(x_collapse))
xmin=np.nanmin(x_coords)
xmax=np.nanmax(x_coords)
y_coords=np.where(np.isfinite(y_collapse))
ymin=np.nanmin(y_coords)
ymax=np.nanmax(y_coords)
return(xmin,xmax,ymin,ymax)
def vmax(array,percent):
vmax_val=np.percentile(array[np.isfinite(array)],percent)
return vmax_val
def get_rm_scale(rmarray,percent):
rm_abs=np.abs(rmarray[np.isfinite(rmarray)])
rm_abs=rm_abs[rm_abs<=1000]
rm_scale_val=np.percentile(rm_abs,percent)
print(rm_scale_val)
return rm_scale_val
def galactic_rm(coords,filename,errfilename):
faradaysky,header=fitsopen(filename)
faradayuncertainty,header2=fitsopen(errfilename)
wcs=WCS(header)
pixcoords=wcs.world_to_pixel(coords)
x=int(round(float(pixcoords[0])))
y=int(round(float(pixcoords[1])))
RM=faradaysky[y,x]
RMerr=faradayuncertainty[y,x]
return(RM,RMerr)
def specind(freqs,icube,icubeerrs,mask):
h_dims=int(icube.shape[2])
v_dims=int(icube.shape[1])
alpha_arr=np.nan*np.ones(shape=mask.shape)
redchi_array=np.nan*np.ones(shape=mask.shape)
x=np.square(freqs)
for i in range(0,h_dims):
for j in range(0,v_dims):
#check that it's not a masked pixel
if mask[j,i]==0:
try:
i_slice=icube[:,j,i]
i_errs_slice=icubeerrs[:,j,i]
i_errs_slice=np.where(i_slice<0,np.nan,i_errs_slice)
i_slice=np.where(i_slice<0,np.nan,i_slice)
y=np.log(i_errs_slice)
x=np.log(freqs)
yerr=np.divide(i_errs_slice,i_errs_slice)
#fit a straight line
p,pError,redchisqrd = fit(x,y,yerr)
alpha=p[0]
alpha_arr[j,i]=alpha
redchi_array[j,i]=redchisqrd
except:
print("uh oh")
print("hello")
return alpha_arr,redchi_array
| 33.551613 | 137 | 0.593885 | 1,652 | 10,401 | 3.590194 | 0.171308 | 0.02782 | 0.017704 | 0.016692 | 0.391502 | 0.329793 | 0.30231 | 0.288147 | 0.265554 | 0.208565 | 0 | 0.022316 | 0.271897 | 10,401 | 309 | 138 | 33.660194 | 0.760861 | 0.085088 | 0 | 0.22807 | 0 | 0 | 0.013193 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074561 | false | 0 | 0.048246 | 0.004386 | 0.175439 | 0.048246 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5028f41d58e1b33a911017d03393e968c16af20e | 6,056 | py | Python | data/baseline.py | afprati/Bayesian-Causal-Inference | 385d42f27fe736c4147cffa6f23d3ee338a54b1c | [
"MIT",
"Unlicense"
] | 1 | 2021-04-22T02:09:48.000Z | 2021-04-22T02:09:48.000Z | data/baseline.py | afprati/Bayesian-Causal-Inference | 385d42f27fe736c4147cffa6f23d3ee338a54b1c | [
"MIT",
"Unlicense"
] | null | null | null | data/baseline.py | afprati/Bayesian-Causal-Inference | 385d42f27fe736c4147cffa6f23d3ee338a54b1c | [
"MIT",
"Unlicense"
] | 1 | 2021-02-14T20:30:19.000Z | 2021-02-14T20:30:19.000Z | import numpy as np
import argparse
from matplotlib import pyplot as plt
import os
import pandas as pd
from statsmodels.formula.api import ols
from statsmodels.stats.outliers_influence import summary_table
N_tr = 10
N_co = 10
T = 50
T0 = 40
effect = 0.1
synthetic_path = 'data/synthetic'
TITLES = ["Non linear but parallel",
"Linear but not parallel",
"Non linear and not parallel"]
def generate_effect(t, SEED):
# np.random.seed(SEED)
true_effects = np.zeros(t.shape)
true_effects += effect/((T-T0)/2)*(t-T0)*((t>=T0) & (t<=(T0+T)/2))
true_effects += effect*(t>(T0+T)/2)
return true_effects + np.random.normal(0, 0.01, true_effects.shape)*((t-T0)>=0)
def f1(x):
# non linear but parallel
y_tr = np.cos(x/4) / (T/2) + x / T / 4 + 0.5
y_co = y_tr - 0.1
return y_tr, y_co
def f2(x):
# linear but not parallel
y_co = x / T / 3 + 0.2
y_tr = x / T / 5 + 0.4
return y_tr, y_co
def f3(x, SEED):
# np.random.seed(SEED)
coef_a = np.random.normal(1/T**2, 1/T**2/5, size=2)
coef_b = np.random.normal(2*T/3, T/5, size=2)
# non linear and not parallel
y_tr = np.cos(x/5) / (T/2) + coef_a[0]*(x-coef_b[0])**2 + 0.3
y_co = np.cos(x/4) / (T/2) + coef_a[1]*(x-coef_b[1])**2 + 0.1
return y_tr, y_co
fs = [f1, f2, f3]
fs = [f3]
TITLES=['quadratic']
def generate_data(SEED):
np.random.seed(SEED)
for k in range(len(TITLES)):
x = np.arange(T)
treat = np.zeros((N_tr, T))
control = np.zeros((N_co, T))
y_tr, y_co = fs[k](x, SEED)
print(np.corrcoef(y_tr,y_co))
ATT = np.zeros(treat.shape)
for i in range(N_tr):
treat[i] = y_tr
b = np.random.uniform(-0.05,0.05, 1)
treat[i] += np.random.normal(b, 0.05, T)
ATT[i] += generate_effect(x, SEED)
treat[i] += ATT[i]
for i in range(N_co):
control[i] = y_co
b = np.random.uniform(-0.05,0.05, 1)
control[i] += np.random.normal(b, 0.05, T)
np.savetxt(synthetic_path+"/treat_{}.csv".format(SEED), treat, delimiter=",")
np.savetxt(synthetic_path+"/control_{}.csv".format(SEED), control, delimiter=",")
np.savetxt(synthetic_path+"/effect_{}.csv".format(SEED), ATT, delimiter=",")
plot_synthetic_data(treat, control, k, SEED, y_tr, y_co, ATT)
fixed_effect(treat, control, k, SEED)
def plot_synthetic_data(treat, control, k, SEED,y_tr, y_co, ATT):
plt.rcParams["figure.figsize"] = (10,5)
# x_plot = np.linspace(0,T,2*T)
x_plot = x = np.arange(T)
# y_tr, y_co = fs[k](x_plot, SEED)
# y_tr += generate_effect(x_plot, SEED)
y_tr += np.mean(ATT, axis=0)
# plot true mean
plt.plot(x_plot, y_tr, 'b--', linewidth=1.0, label='Treat true mean')
plt.plot(x_plot, y_co, 'r--', linewidth=1.0, label='Control true mean')
# plot averaged data
plt.scatter(x, np.mean(treat, axis=0), c="purple", s=4, label='Treat sample averaged')
plt.scatter(x, np.mean(control, axis=0), c="crimson", s=4, label='Control sample averaged')
plt.legend(loc=2)
plt.title(TITLES[k])
plt.savefig(synthetic_path+"/data_{}.png".format(SEED))
plt.close()
def fixed_effect(treat, control, k, SEED):
x = np.arange(T)
x = np.concatenate([x for _ in range(N_tr+N_co)]).reshape(-1,1)
units = np.concatenate([[i for _ in range(T)] for i in range(N_tr+N_co)]).reshape(-1,1)
treated = np.logical_and((units<N_tr), (x>=T0)).astype("float")
y = np.concatenate([treat.reshape(-1,1),control.reshape(-1,1)])
COLUMNS = ["time", "y", "unit", "treated"]
data = pd.DataFrame(np.concatenate((x,y,units,treated),axis=1),columns=COLUMNS)
data.to_csv(synthetic_path+"/data_{}.csv".format(SEED), index=False)
return
fit = ols('y ~ 1 + C(time) + C(unit) + treated:C(time)', data=data).fit()
ypred = fit.predict(data)
m_tr = ypred[:N_tr*T].to_numpy().reshape(N_tr,T)
m_co = ypred[N_tr*T:].to_numpy().reshape(N_co,T)
# print(fit.summary())
for t in range(T0, T, 1):
m_tr[:, t] -= fit.params["treated:C(time)[{}.0]".format(t)]
_, data, _ = summary_table(fit, alpha=0.05)
predict_mean_ci_lower, predict_mean_ci_upper = data[:, 4:6].T
lower_tr = predict_mean_ci_lower[:N_tr*T].reshape(N_tr,T)
upper_tr = predict_mean_ci_upper[:N_tr*T].reshape(N_tr,T)
lower_co = predict_mean_ci_lower[N_tr*T:].reshape(N_co,T)
upper_co = predict_mean_ci_upper[N_tr*T:].reshape(N_co,T)
for t in range(T0, T, 1):
lower_tr[:, t] -= fit.conf_int().loc["treated:C(time)[{}.0]".format(t),1]
upper_tr[:, t] -= fit.conf_int().loc["treated:C(time)[{}.0]".format(t),0]
test_t = np.arange(T)
# plt.plot(test_t, np.mean(control, axis=0), color='grey', alpha=0.8)
# plt.plot(test_t, np.mean(m_co, axis=0), 'k--', linewidth=1.0, label='Estimated Y(0)')
# plt.fill_between(test_t, np.mean(lower_co, axis=0), np.mean(upper_co, axis=0), alpha=0.5)
# plt.show()
ATT = np.stack([np.mean(treat-m_tr, axis=0),
np.mean(treat-upper_tr, axis=0),
np.mean(treat-lower_tr, axis=0)])
plt.rcParams["figure.figsize"] = (15,5)
plt.plot(test_t, ATT[0],'k--', linewidth=1.0, label="Estimated ATT")
plt.fill_between(test_t, ATT[1], ATT[2], alpha=0.5, label="ATT 95% CI")
plt.legend(loc=2)
plt.savefig(synthetic_path+"/fixedeffect{}_{}.png".format(k, SEED))
plt.close()
np.savetxt(synthetic_path+"/fixedeffect{}_{}.csv".format(k, SEED), ATT, delimiter=",")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='python baseline.py --type data')
parser.add_argument('-t','--type', help='data/twoway', required=True)
parser.add_argument('-s','--seed', help='seed', required=True)
args = vars(parser.parse_args())
SEED = int(args['seed'])
if args['type'] == 'data':
if not os.path.exists(synthetic_path):
os.makedirs(synthetic_path)
generate_data(SEED)
else:
exit() | 35.83432 | 95 | 0.606671 | 1,032 | 6,056 | 3.403101 | 0.165698 | 0.013667 | 0.01139 | 0.013667 | 0.3582 | 0.248861 | 0.183371 | 0.136105 | 0.104784 | 0.044989 | 0 | 0.032392 | 0.204756 | 6,056 | 169 | 96 | 35.83432 | 0.696844 | 0.087351 | 0 | 0.106557 | 0 | 0 | 0.102504 | 0.019049 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057377 | false | 0 | 0.057377 | 0 | 0.155738 | 0.008197 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50298429b69b545036671ea9d2877e60fccd7863 | 9,895 | py | Python | buckit/buckit.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | buckit/buckit.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | buckit/buckit.py | martarozek/buckit | 343cc5a5964c1d43902b6a77868652adaefa0caa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import argparse
import copy
import logging
import os
import sys
from textwrap import dedent
import compiler
import configure_buck
import fetch
import formatting
import use_system
from helpers import BuckitException
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
class EnvDefault(argparse.Action):
def __init__(self, envvar, required=True, default=None, **kwargs):
if envvar and envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super(EnvDefault, self).__init__(
default=default, required=required, **kwargs
)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def add_compiler_args(parent_args, subparser):
description = (
"Detect tools like the compiler and pyton binaries, and "
"set them up properly inside of the root project's "
".buckconfig.local"
)
parser = subparser.add_parser(
"compiler",
help="Sets up compiler + tools and configures buck to use them",
description=description,
)
for args in parent_args:
parser.add_argument(args.pop("name"), **args)
def add_fetch_args(parent_args, subparser):
description = (
"Fetch source for a package, and configure .buckconfig, "
".buckconfig.local, and BUCK files for third-party package "
"managers"
)
parser = subparser.add_parser(
"fetch",
help="Fetches source and configures buck for a vendored library",
description=description,
)
for args in parent_args:
parser.add_argument(args.pop("name"), **args)
parser.add_argument(
"--use-python2",
action="store_true",
default=False,
help="Whether python2 should be used",
)
parser.add_argument(
"--python2-virtualenv",
action=EnvDefault,
required=True,
default="virtualenv --python=python2.7",
envvar="BUCKIT_PY2_VIRTUALENV",
help=(
"The python2 virtualenv command to use. Can be set with "
"BUCKIT_PY2_VIRTUALENV environment variable"
)
)
parser.add_argument(
"--python2-virtualenv-root",
action=EnvDefault,
required=True,
default="node_modules/__py2_virtualenv",
envvar="BUCKIT_PY2_VIRTUALENV_ROOT",
help="The directory to setup a virtualenv in"
)
parser.add_argument(
"--use-python3",
action="store_true",
default=True,
help="Whether python3 should be used",
)
parser.add_argument(
"--python3-virtualenv",
action=EnvDefault,
required=True,
default="virtualenv",
envvar="BUCKIT_PY3_VIRTUALENV",
help=(
"The python3 virtualenv command to use. Can be set with "
"BUCKIT_PY3_VIRTUALENV environment variable"
)
)
parser.add_argument(
"--python3-virtualenv-root",
action=EnvDefault,
required=True,
default="node_modules/__py3_virtualenv",
envvar="BUCKIT_PY3_VIRTUALENV_ROOT",
help="The directory to setup a virtualenv in"
)
parser.add_argument(
"--virtualenv-use-proxy-vars",
action="store_true",
default=False,
)
parser.add_argument(
"--package",
action=EnvDefault,
required=True,
envvar="npm_package_name",
help=(
"The package to configure. Otherwise, pulled from the "
"npm_package_name environment variable"
)
)
parser.add_argument(
"--force",
action="store_true",
default=False,
help=("Whether to force a fetch of the source")
)
def add_buckconfig_args(parent_args, subparser):
description = (
"Configures .buckconfig and .buckconfig.local to have knowledge of "
"all cells specified in package.json. Should not be run inside "
"of individual package roots"
)
parser = subparser.add_parser(
"buckconfig",
help="Reconfigure .buckconfig and .buckconfig.local",
description=description
)
for args in parent_args:
parser.add_argument(args.pop("name"), **args)
def add_system_args(parent_args, subparser):
description = (
"Installs system packages required for all or some packages. Also "
"configures buck to use system packages"
)
parser = subparser.add_parser(
"system",
help="Install system packages, and configure buck to use them",
description=description,
)
for args in parent_args:
parser.add_argument(args.pop("name"), **args)
parser.add_argument(
"--no-install-packages",
help=(
"If set, don't actually install packages specified in "
"package.json files"
),
action="store_false",
dest="install_packages",
default=True,
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--use-system-for-all",
help=(
"If set, configure buckit to always use system specs if "
"available. This is done by setting buckit.use_system_for_all "
"in .buckconfig.local, and propagating it"
),
action="store_true",
dest="use_system_for_all",
)
group.add_argument(
"--use-vendored-for-all",
help=(
"If set, configure buckit to always use vendored packages if "
"available. This is done by setting buckit.use_system_for_all "
"to false in .buckconfig.local"
),
action="store_false",
dest="use_system_for_all",
)
group.add_argument(
"--use-system-for-cells",
help=(
"If set, configure buckit to only use system libraries for "
"provided cells (comma delimited)"
),
default="",
)
parser.set_defaults(use_system_for_all=None)
def parse_args(argv):
description = dedent(
"""
Automatically configure Buck and build third party libraries for
easier C++ development"""
)
parser = argparse.ArgumentParser(description=description)
parent_options = [
{
"name": "--log-level",
"default": "info",
"choices": sorted(log_levels.keys()),
"action": EnvDefault,
"required": True,
"envvar": "BUCKIT_LOG_LEVEL",
},
{
"name": "--node-modules",
"default": "node_modules",
"help": "Where yarn installs modules to",
}
]
subparser = parser.add_subparsers(dest="selected_action")
add_buckconfig_args(copy.deepcopy(parent_options), subparser)
add_compiler_args(copy.deepcopy(parent_options), subparser)
add_fetch_args(copy.deepcopy(parent_options), subparser)
add_system_args(copy.deepcopy(parent_options), subparser)
return parser, parser.parse_args(argv)
def get_root_path(node_modules):
# If we're in a post install event, then we
# are inside of the package's root, not the main
# project
try:
if os.environ.get('npm_lifecycle_event') == 'postinstall':
start_path = os.path.split(os.getcwd())[0]
else:
start_path = os.getcwd()
return configure_buck.find_project_root(start_path, node_modules)
except BuckitException as e:
logging.info(str(e))
return os.getcwd()
def main(argv):
parser, args = parse_args(argv)
formatting.configure_logger(level=log_levels[args.log_level])
ret = 0
should_configure_buck = False
project_root = get_root_path(args.node_modules)
if args.selected_action == 'buckconfig':
should_configure_buck = True
elif args.selected_action == 'fetch':
ret = fetch.fetch_package(
project_root=project_root,
node_modules=args.node_modules,
package=args.package,
use_python2=args.use_python2,
python2_virtualenv=args.python2_virtualenv,
python2_virtualenv_root=args.python2_virtualenv_root,
use_python3=args.use_python3,
python3_virtualenv=args.python3_virtualenv,
python3_virtualenv_root=args.python3_virtualenv_root,
virtualenv_use_proxy_vars=args.virtualenv_use_proxy_vars,
force=args.force,
)
if ret == 0:
should_configure_buck = True
elif args.selected_action == 'compiler':
ret = compiler.configure_compiler(project_root=project_root)
if ret == 0:
should_configure_buck = True
elif args.selected_action == 'system':
ret = use_system.use_system_packages(
project_root=project_root,
node_modules=args.node_modules,
install_packages=args.install_packages,
use_system_for_all=args.use_system_for_all,
system_cells=filter(None, args.use_system_for_cells.split(',')),
)
if ret == 0:
should_configure_buck = True
else:
parser.print_help()
if should_configure_buck:
ret = configure_buck.configure_buck_for_all_packages(
project_root=project_root,
node_modules=args.node_modules,
)
sys.exit(ret)
if __name__ == '__main__':
main(sys.argv[1:])
| 30.352761 | 77 | 0.628903 | 1,124 | 9,895 | 5.331851 | 0.206406 | 0.031203 | 0.039713 | 0.020023 | 0.370265 | 0.30886 | 0.267479 | 0.21108 | 0.203571 | 0.149508 | 0 | 0.006006 | 0.276402 | 9,895 | 325 | 78 | 30.446154 | 0.831006 | 0.040121 | 0 | 0.312727 | 0 | 0 | 0.267526 | 0.041125 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032727 | false | 0 | 0.043636 | 0 | 0.090909 | 0.003636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
502ad1b89cb570e155e24764fadbcc90f5001f45 | 2,996 | py | Python | MIT_video_pipeline.py | himanshu-doi/Gaze_tracking | 96836f1c3aaf973e1573c16a6aba3adcfb08903b | [
"MIT"
] | null | null | null | MIT_video_pipeline.py | himanshu-doi/Gaze_tracking | 96836f1c3aaf973e1573c16a6aba3adcfb08903b | [
"MIT"
] | null | null | null | MIT_video_pipeline.py | himanshu-doi/Gaze_tracking | 96836f1c3aaf973e1573c16a6aba3adcfb08903b | [
"MIT"
] | null | null | null | """
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""
from __future__ import division
import os
import cv2
from gaze_tracking import GazeTracking
gaze = GazeTracking()
# webcam = cv2.VideoCapture(0)
video_root = '/home/himanshu/Downloads'
video_name = 'P45.avi'
if not os.path.exists(os.path.join(video_root, 'MIT_images', video_name[:-4])):
os.mkdir(os.path.join(video_root, 'MIT_images', video_name[:-4]))
os.system("ffmpeg -i {0}/{2} -vf fps=30 {0}/MIT_images/{1}/output%06d.png".format(video_root, video_name[:-4], video_name))
# os.system("ffmpeg -i {0}/P45.avi -vf fps=30 {0}/MIT_images/{1}/output%06d.png".format(video_root, video_name[:-4]))
# while True:
# We get a new frame from the webcam
img_root = '/home/himanshu/Downloads/MIT_images/P45'
left = 0
right = 0
center = 0
blinking = 0
for fname in os.listdir(img_root):
frame = cv2.imread(os.path.join(img_root, fname))
frame = gaze.perspective_transform(frame, angle_x=65, angle_y=50)
# print(frame.shape)
# We send this frame to GazeTracking to analyze it
gaze.refresh(frame)
frame = gaze.annotated_frame()
text = ""
if gaze.is_blinking():
text = "Blinking"
blinking += 1
elif gaze.is_right():
text = "Looking right"
right += 1
elif gaze.is_left():
text = "Looking left"
left += 1
elif gaze.is_center():
text = "Looking center"
center += 1
cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.imshow("Demo", frame)
if cv2.waitKey(1) == 27:
break
# cv2.destroyAllWindows()
center_gaze_ratio = center/len(os.listdir(img_root))
right_gaze_ratio = right/len(os.listdir(img_root))
left_gaze_ratio = left/len(os.listdir(img_root))
blinking_ratio = blinking/len(os.listdir(img_root))
print("Gaze count: \n center:{0}, left: {1}, right: {2}, blink: {3}, total: {4}\n".format(center,
left,
right,
blinking,
len(os.listdir(img_root))))
print("Gaze Ratios: \n center:{0}, left: {1}, right: {2}, blink: {3}".format(center_gaze_ratio,
left_gaze_ratio,
right_gaze_ratio,
blinking_ratio)) | 40.486486 | 127 | 0.561081 | 386 | 2,996 | 4.186529 | 0.303109 | 0.034653 | 0.044554 | 0.059406 | 0.287748 | 0.233911 | 0.233911 | 0.233911 | 0.189356 | 0.158416 | 0 | 0.051183 | 0.308745 | 2,996 | 74 | 128 | 40.486486 | 0.729116 | 0.125167 | 0 | 0 | 0 | 0.055556 | 0.139571 | 0.03681 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
502c3579a1c1a38d688baf0cac4174421b2fa08b | 2,245 | py | Python | djangae/contrib/backup/tests/test_tasks.py | ikedaosushi/djangae | 5fd2f8d70699fbbf155740effe42a36b205a6540 | [
"BSD-3-Clause"
] | null | null | null | djangae/contrib/backup/tests/test_tasks.py | ikedaosushi/djangae | 5fd2f8d70699fbbf155740effe42a36b205a6540 | [
"BSD-3-Clause"
] | null | null | null | djangae/contrib/backup/tests/test_tasks.py | ikedaosushi/djangae | 5fd2f8d70699fbbf155740effe42a36b205a6540 | [
"BSD-3-Clause"
] | null | null | null | import json
from django.test import override_settings
from django.contrib.admin.models import LogEntry
from djangae.contrib.gauth_datastore.models import GaeDatastoreUser
from djangae.contrib import sleuth
from djangae.environment import application_id
from djangae.test import TestCase
from djangae.contrib.backup.tasks import (
_get_valid_export_models,
backup_datastore,
SERVICE_URL,
AUTH_SCOPES,
)
from google.appengine.api import app_identity
from google.auth import app_engine
def mock_get_app_models(**kwargs):
return [
LogEntry,
GaeDatastoreUser,
]
class GetValidExportModelsTestCase(TestCase):
"""Tests focused on djangae.contrib.backup.tasks._get_valid_export_models"""
@override_settings(DJANGAE_BACKUP_EXCLUDE_MODELS=['django_admin_log'])
@sleuth.switch('django.apps.apps.get_models', mock_get_app_models)
def test_models_filtered(self):
valid_models = _get_valid_export_models(
['django_admin_log', 'gauth_datastore_gaedatastoreuser']
)
self.assertNotIn('django_admin_log', valid_models)
self.assertIn('gauth_datastore_gaedatastoreuser', valid_models)
@override_settings(DJANGAE_BACKUP_EXCLUDE_APPS=['django'])
@sleuth.switch('django.apps.apps.get_models', mock_get_app_models)
def test_apps_filtered(self):
valid_models = _get_valid_export_models(
['django_admin_log', 'gauth_datastore_gaedatastoreuser']
)
self.assertIn('gauth_datastore_gaedatastoreuser', valid_models)
self.assertNotIn('django_admin_log', valid_models)
class BackupTestCase(TestCase):
@override_settings(DJANGAE_BACKUP_ENABLED=True)
def test_ok(self):
"""Lightweight end-to-end flow test of backup_datastore."""
with sleuth.switch(
'djangae.contrib.backup.tasks._get_authentication_credentials',
lambda: app_engine.Credentials(scopes=AUTH_SCOPES)
):
with sleuth.switch(
'googleapiclient.http.HttpRequest.execute', lambda x: True
) as mock_fn:
kinds = ['gauth_datastore_gaedatastoreuser']
backup_datastore(kinds=kinds)
self.assertTrue(mock_fn.called)
| 34.538462 | 80 | 0.725167 | 256 | 2,245 | 6.023438 | 0.292969 | 0.054475 | 0.045396 | 0.051881 | 0.39559 | 0.359274 | 0.30869 | 0.193256 | 0.193256 | 0.193256 | 0 | 0 | 0.191091 | 2,245 | 64 | 81 | 35.078125 | 0.849119 | 0.055234 | 0 | 0.24 | 0 | 0 | 0.189573 | 0.148815 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.08 | false | 0 | 0.2 | 0.02 | 0.34 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
502c9594adefa75862916bf5c1572f7638704fdc | 19,195 | py | Python | seanim.py | Coreforge/io_anim_seanim | f1c5982f5f5af67a88caa1f16dbfc016a597e86f | [
"MIT"
] | 57 | 2018-04-10T13:35:23.000Z | 2022-03-26T01:34:54.000Z | seanim.py | Coreforge/io_anim_seanim | f1c5982f5f5af67a88caa1f16dbfc016a597e86f | [
"MIT"
] | 39 | 2017-04-24T00:15:59.000Z | 2022-03-30T21:46:23.000Z | seanim.py | Coreforge/io_anim_seanim | f1c5982f5f5af67a88caa1f16dbfc016a597e86f | [
"MIT"
] | 25 | 2017-05-04T02:43:03.000Z | 2022-03-29T12:35:39.000Z | import time
import struct
try:
# Try to import the Python 3.x enum module
from enum import IntEnum
except:
# If we're on Python 2.x we need to define
# a dummy replacement
class IntEnum:
pass
# <pep8 compliant>
LOG_READ_TIME = False
LOG_WRITE_TIME = False
LOG_ANIM_HEADER = False
LOG_ANIM_BONES = False
LOG_ANIM_BONE_MODIFIERS = False
LOG_ANIM_BONES_KEYS = False
LOG_ANIM_NOTES = False
class SEANIM_TYPE(IntEnum):
SEANIM_TYPE_ABSOLUTE = 0
SEANIM_TYPE_ADDITIVE = 1
SEANIM_TYPE_RELATIVE = 2
SEANIM_TYPE_DELTA = 3
class SEANIM_PRESENCE_FLAGS(IntEnum):
# These describe what type of keyframe data is present for the bones
SEANIM_BONE_LOC = 1 << 0
SEANIM_BONE_ROT = 1 << 1
SEANIM_BONE_SCALE = 1 << 2
# If any of the above flags are set, then bone keyframe data is present,
# thus this comparing against this mask will return true
SEANIM_PRESENCE_BONE = 1 << 0 | 1 << 1 | 1 << 2
SEANIM_PRESENCE_NOTE = 1 << 6 # The file contains notetrack data
SEANIM_PRESENCE_CUSTOM = 1 << 7 # The file contains a custom data block
class SEANIM_PROPERTY_FLAGS(IntEnum):
SEANIM_PRECISION_HIGH = 1 << 0
class SEANIM_FLAGS(IntEnum):
SEANIM_LOOPED = 1 << 0
class Info(object):
__slots__ = ('version', 'magic')
def __init__(self, file=None):
self.version = 1
self.magic = b'SEAnim'
if file is not None:
self.load(file)
def load(self, file):
bytes = file.read(8)
data = struct.unpack('6ch', bytes)
magic = b''
for i in range(6):
magic += data[i]
version = data[6]
assert magic == self.magic
assert version == self.version
def save(self, file):
bytes = self.magic
bytes += struct.pack('h', self.version)
file.write(bytes)
class Header(object):
__slots__ = (
'animType', 'animFlags',
'dataPresenceFlags', 'dataPropertyFlags',
'framerate', 'frameCount',
'boneCount', 'boneAnimModifierCount',
'noteCount'
)
def __init__(self, file=None):
self.animType = SEANIM_TYPE.SEANIM_TYPE_RELATIVE # Relative is default
self.animFlags = 0x0
self.dataPresenceFlags = 0x0
self.dataPropertyFlags = 0x0
self.framerate = 0
self.frameCount = 0
self.boneCount = 0
self.boneAnimModifierCount = 0
self.noteCount = 0
if file is not None:
self.load(file)
def load(self, file):
bytes = file.read(2)
data = struct.unpack('h', bytes)
headerSize = data[0]
bytes = file.read(headerSize - 2)
# = prefix tell is to ignore C struct packing rules
data = struct.unpack('=6BfII4BI', bytes)
self.animType = data[0]
self.animFlags = data[1]
self.dataPresenceFlags = data[2]
self.dataPropertyFlags = data[3]
# reserved = data[4]
# reserved = data[5]
self.framerate = data[6]
self.frameCount = data[7]
self.boneCount = data[8]
self.boneAnimModifierCount = data[9]
# reserved = data[10]
# reserved = data[11]
# reserved = data[12]
self.noteCount = data[13]
def save(self, file):
bytes = struct.pack('=6BfII4BI',
self.animType, self.animFlags,
self.dataPresenceFlags, self.dataPropertyFlags,
0, 0,
self.framerate,
self.frameCount, self.boneCount,
self.boneAnimModifierCount, 0, 0, 0,
self.noteCount)
size = struct.pack('h', len(bytes) + 2)
file.write(size)
file.write(bytes)
class Frame_t(object):
"""
The Frame_t class is only ever used to get the size
and format character used by frame indices in a given seanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if header.frameCount <= 0xFF:
self.size = 1
self.char = 'B'
elif header.frameCount <= 0xFFFF:
self.size = 2
self.char = 'H'
else: # if header.frameCount <= 0xFFFFFFFF:
self.size = 4
self.char = 'I'
class Bone_t(object):
"""
The Bone_t class is only ever used to get the size
and format character used by frame indices in a given seanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if header.boneCount <= 0xFF:
self.size = 1
self.char = 'B'
elif header.boneCount <= 0xFFFF:
self.size = 2
self.char = 'H'
else: # if header.boneCount <= 0xFFFFFFFF:
self.size = 4
self.char = 'I'
class Precision_t(object):
"""
The Precision_t class is only ever used to get the size
and format character used by vec3_t, quat_t, etc. in a given sanim file
"""
__slots__ = ('size', 'char')
def __init__(self, header):
if (header.dataPropertyFlags &
SEANIM_PROPERTY_FLAGS.SEANIM_PRECISION_HIGH):
self.size = 8
self.char = 'd'
else:
self.size = 4
self.char = 'f'
class KeyFrame(object):
"""
A small class used for holding keyframe data
"""
__slots__ = ('frame', 'data')
def __init__(self, frame, data):
self.frame = frame
self.data = data
class Bone(object):
__slots__ = (
'name', 'flags',
'locKeyCount', 'rotKeyCount', 'scaleKeyCount',
'posKeys', 'rotKeys', 'scaleKeys',
'useModifier', 'modifier'
)
def __init__(self, file=None):
self.name = ""
self.flags = 0x0
self.locKeyCount = 0
self.rotKeyCount = 0
self.scaleKeyCount = 0
self.posKeys = []
self.rotKeys = []
self.scaleKeys = []
self.useModifier = False
self.modifier = 0
if file is not None:
self.load(file)
def load(self, file):
bytes = b''
b = file.read(1)
while not b == b'\x00':
bytes += b
b = file.read(1)
self.name = bytes.decode("utf-8")
def loadData(self, file, frame_t, precision_t,
useLoc=False, useRot=False, useScale=False):
# Read the flags for the bone
bytes = file.read(1)
data = struct.unpack("B", bytes)
self.flags = data[0]
# Load the position keyframes if they are present
if useLoc:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.locKeyCount = data[0]
for _ in range(self.locKeyCount):
bytes = file.read(frame_t.size + 3 * precision_t.size)
data = struct.unpack('=%c3%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
pos = (data[1], data[2], data[3])
self.posKeys.append(KeyFrame(frame, pos))
# Load the rotation keyframes if they are present
if useRot:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.rotKeyCount = data[0]
for _ in range(self.rotKeyCount):
bytes = file.read(frame_t.size + 4 * precision_t.size)
data = struct.unpack('=%c4%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
# Load the quaternion as XYZW
quat = (data[1], data[2], data[3], data[4])
self.rotKeys.append(KeyFrame(frame, quat))
# Load the Scale Keyrames
if useScale:
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.scaleKeyCount = data[0]
for _ in range(self.scaleKeyCount):
bytes = file.read(frame_t.size + 3 * precision_t.size)
data = struct.unpack('=%c3%c' %
(frame_t.char, precision_t.char), bytes)
frame = data[0]
scale = (data[1], data[2], data[3])
self.scaleKeys.append(KeyFrame(frame, scale))
def save(self, file, frame_t, bone_t, precision_t,
useLoc=False, useRot=False, useScale=False):
bytes = struct.pack("B", self.flags)
file.write(bytes)
if useLoc:
bytes = struct.pack('%c' % frame_t.char, len(self.posKeys))
file.write(bytes)
for key in self.posKeys:
bytes = struct.pack('=%c3%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1], key.data[2])
file.write(bytes)
if useRot:
bytes = struct.pack('%c' % frame_t.char, len(self.rotKeys))
file.write(bytes)
for key in self.rotKeys:
bytes = struct.pack('=%c4%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1],
key.data[2], key.data[3])
file.write(bytes)
if useScale:
bytes = struct.pack('%c' % frame_t.char, len(self.scaleKeys))
file.write(bytes)
for key in self.scaleKeys:
bytes = struct.pack('=%c3%c' %
(frame_t.char, precision_t.char),
key.frame,
key.data[0], key.data[1], key.data[2])
file.write(bytes)
class Note(object):
__slots__ = ('frame', 'name')
def __init__(self, file=None, frame_t=None):
self.frame = -1
self.name = ""
if file is not None:
self.load(file, frame_t)
def load(self, file, frame_t):
bytes = file.read(frame_t.size)
data = struct.unpack('%c' % frame_t.char, bytes)
self.frame = data[0]
bytes = b''
b = file.read(1)
while not b == b'\x00':
bytes += b
b = file.read(1)
self.name = bytes.decode("utf-8")
def save(self, file, frame_t):
bytes = struct.pack('%c' % frame_t.char, self.frame)
file.write(bytes)
bytes = struct.pack('%ds' % (len(self.name) + 1), self.name.encode())
file.write(bytes)
class Anim(object):
__slots__ = ('__info', 'info', 'header', 'bones',
'boneAnimModifiers', 'notes')
def __init__(self, path=None):
self.__info = Info()
self.header = Header()
self.bones = []
self.boneAnimModifiers = []
self.notes = []
if path is not None:
self.load(path)
# Update the header flags based on the presence of certain keyframe /
# notetrack data
def update_metadata(self, high_precision=False, looping=False):
anim_locKeyCount = 0
anim_rotKeyCount = 0
anim_scaleKeyCount = 0
header = self.header
header.boneCount = len(self.bones)
dataPresenceFlags = header.dataPresenceFlags
dataPropertyFlags = header.dataPropertyFlags
max_frame_index = 0
for bone in self.bones:
bone.locKeyCount = len(bone.posKeys)
bone.rotKeyCount = len(bone.rotKeys)
bone.scaleKeyCount = len(bone.scaleKeys)
anim_locKeyCount += bone.locKeyCount
anim_rotKeyCount += bone.rotKeyCount
anim_scaleKeyCount += bone.scaleKeyCount
for key in bone.posKeys:
max_frame_index = max(max_frame_index, key.frame)
for key in bone.rotKeys:
max_frame_index = max(max_frame_index, key.frame)
for key in bone.scaleKeys:
max_frame_index = max(max_frame_index, key.frame)
if anim_locKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_LOC
if anim_rotKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_ROT
if anim_scaleKeyCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_BONE_SCALE
for note in self.notes:
max_frame_index = max(max_frame_index, note.frame)
header.noteCount = len(self.notes)
if header.noteCount:
dataPresenceFlags |= SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_NOTE
if high_precision:
dataPropertyFlags |= SEANIM_PROPERTY_FLAGS.SEANIM_PRECISION_HIGH
if looping:
header.animFlags |= SEANIM_FLAGS.SEANIM_LOOPED
header.dataPresenceFlags = dataPresenceFlags
header.dataPropertyFlags = dataPropertyFlags
# FrameCount represents the length of the animation in frames
# and since all animations start at frame 0 - we simply grab
# the max frame number (from keys / notes / etc.) and add 1 to it
header.frameCount = max_frame_index + 1
def load(self, path):
if LOG_READ_TIME:
time_start = time.time()
print("Loading: '%s'" % path)
try:
file = open(path, "rb")
except IOError:
print("Could not open file for reading:\n %s" % path)
return
self.info = Info(file)
self.header = Header(file)
self.boneAnimModifiers = []
# Init the frame_t, bone_t and precision_t info
frame_t = Frame_t(self.header)
bone_t = Bone_t(self.header)
precision_t = Precision_t(self.header)
dataPresenceFlags = self.header.dataPresenceFlags
if LOG_ANIM_HEADER:
print("Magic: %s" % self.info.magic)
print("Version: %d" % self.info.version)
print("AnimType: %d" % self.header.animType)
print("AnimFlags: %d" % self.header.animFlags)
print("PresenceFlags: %d" % dataPresenceFlags)
print("PropertyFlags: %d" % self.header.dataPropertyFlags)
print("FrameRate: %f" % self.header.framerate)
print("FrameCount: %d" % self.header.frameCount)
print("BoneCount: %d" % self.header.boneCount)
print("NoteCount: %d" % self.header.noteCount)
print("BoneModifierCount: %d" % self.header.boneAnimModifierCount)
print("Frame_t Size: %d" % frame_t.size)
print("Frame_t Char: '%s'" % frame_t.char)
self.bones = []
if dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_BONE:
useLoc = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_LOC
useRot = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_ROT
useScale = (dataPresenceFlags &
SEANIM_PRESENCE_FLAGS.SEANIM_BONE_SCALE)
for i in range(self.header.boneCount):
if LOG_ANIM_BONES:
print("Loading Name for Bone[%d]" % i)
self.bones.append(Bone(file))
for i in range(self.header.boneAnimModifierCount):
bytes = file.read(bone_t.size + 1)
data = struct.unpack("%cB" % bone_t.char, bytes)
index = data[0]
self.bones[index].useModifier = True
self.bones[index].modifier = data[1]
self.boneAnimModifiers.append(self.bones[index])
if LOG_ANIM_BONE_MODIFIERS:
print("Loaded Modifier %d for '%s" %
(index, self.bones[index].name))
for i in range(self.header.boneCount):
if LOG_ANIM_BONES:
print("Loading Data For Bone[%d] '%s'" % (
i, self.bones[i].name))
self.bones[i].loadData(
file, frame_t, precision_t, useLoc, useRot, useScale)
if LOG_ANIM_BONES_KEYS:
for key in self.bones[i].posKeys:
print("%s LOC %d %s" %
(self.bones[i].name, key.frame, key.data))
for key in self.bones[i].rotKeys:
print("%s ROT %d %s" %
(self.bones[i].name, key.frame, key.data))
for key in self.bones[i].scaleKeys:
print("%s SCALE %d %s" %
(self.bones[i].name, key.frame, key.data))
self.notes = []
if (self.header.dataPresenceFlags &
SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_NOTE):
for i in range(self.header.noteCount):
note = Note(file, frame_t)
self.notes.append(note)
if LOG_ANIM_NOTES:
print("Loaded Note[%d]:" % i)
print(" Frame %d: %s" % (note.frame, note.name))
file.close()
if LOG_READ_TIME:
time_end = time.time()
time_elapsed = time_end - time_start
print("Done! - Completed in %ss" % time_elapsed)
def save(self, filepath="", high_precision=False, looping=False):
if LOG_WRITE_TIME:
time_start = time.time()
print("Saving: '%s'" % filepath)
try:
file = open(filepath, "wb")
except IOError:
print("Could not open file for writing:\n %s" % filepath)
return
# Update the header flags, based on the presence of different keyframe
# types
self.update_metadata(high_precision, looping)
self.__info.save(file)
self.header.save(file)
for bone in self.bones:
bytes = struct.pack(
'%ds' % (len(bone.name) + 1), bone.name.encode())
file.write(bytes)
dataPresenceFlags = self.header.dataPresenceFlags
useLoc = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_LOC
useRot = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_ROT
useScale = dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_BONE_SCALE
frame_t = Frame_t(self.header)
bone_t = Bone_t(self.header)
precision_t = Precision_t(self.header)
for index, bone in enumerate(self.bones):
if bone.useModifier:
bytes = struct.pack('%cB' % bone_t.char, index, bone.modifier)
file.write(bytes)
for bone in self.bones:
bone.save(file, frame_t, bone_t, precision_t,
useLoc, useRot, useScale)
if dataPresenceFlags & SEANIM_PRESENCE_FLAGS.SEANIM_PRESENCE_NOTE:
for note in self.notes:
note.save(file, frame_t)
file.close()
if LOG_WRITE_TIME:
time_end = time.time()
time_elapsed = time_end - time_start
print("Done! - Completed in %ss" % time_elapsed)
| 31.938436 | 79 | 0.550039 | 2,209 | 19,195 | 4.627433 | 0.111363 | 0.024653 | 0.015653 | 0.015066 | 0.437586 | 0.404226 | 0.360399 | 0.308159 | 0.270593 | 0.234201 | 0 | 0.012461 | 0.347799 | 19,195 | 600 | 80 | 31.991667 | 0.804058 | 0.08299 | 0 | 0.367925 | 0 | 0 | 0.051489 | 0.0012 | 0 | 0 | 0.001829 | 0 | 0.004717 | 1 | 0.049528 | false | 0.002358 | 0.007075 | 0 | 0.143868 | 0.063679 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
502cc5d56e6e007269b5020dd5c9053d1f419261 | 10,321 | py | Python | image_generation/core/snops.py | drboog/FPK | 7e79cfcede41dd7ed65987acce5a7617977fc9be | [
"MIT"
] | 1 | 2021-08-19T00:08:30.000Z | 2021-08-19T00:08:30.000Z | image_generation/core/snops.py | drboog/FPK | 7e79cfcede41dd7ed65987acce5a7617977fc9be | [
"MIT"
] | null | null | null | image_generation/core/snops.py | drboog/FPK | 7e79cfcede41dd7ed65987acce5a7617977fc9be | [
"MIT"
] | null | null | null | from tensorflow.python.framework import ops
from utils.misc import variable_summaries
from .mmd import tf
try:
from .sn import spectral_normed_weight
except:
from sn import spectral_normed_weight
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm", format='NCHW'):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
if format == 'NCHW':
self.axis = 1
elif format == 'NHWC':
self.axis = 3
def __call__(self, x, train=True):
# return tf.contrib.layers.batch_norm(x,
# decay=self.momentum,
# updates_collections=tf.GraphKeys.UPDATE_OPS,
# epsilon=self.epsilon,
# scale=True,
# is_training=train,
# fused=True,
# data_format=self.format,
# scope=self.name)
return tf.layers.batch_normalization(
x,
momentum=self.momentum,
epsilon=self.epsilon,
scale=True,
training=train,
fused=True,
axis=self.axis,
name=self.name)
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat(3, [x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])])
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, scale=1.0, with_learnable_sn_scale=False, with_sn=False,
name="snconv2d", update_collection=None, data_format='NCHW',with_singular_values=False):
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('w' in v.op.name) for v in scope_vars])
out_channel, in_channel = get_in_out_shape([output_dim], input_.get_shape().as_list(), data_format)
strides = get_strides(d_h, d_w, data_format)
w = tf.get_variable('w', [k_h, k_w, in_channel, out_channel],
initializer=tf.truncated_normal_initializer(stddev=stddev))
if with_sn:
s = tf.get_variable('s', shape=[1], initializer=tf.constant_initializer(scale), trainable=with_learnable_sn_scale, dtype=tf.float32)
w_bar, sigma = spectral_normed_weight(w, update_collection=update_collection, with_sigma=True)
w_bar = s*w_bar
conv = tf.nn.conv2d(input_, w_bar, strides=strides, padding='SAME', data_format=data_format)
else:
conv = tf.nn.conv2d(input_, w, strides=strides, padding='SAME', data_format=data_format)
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases, data_format=data_format), conv.get_shape())
if not has_summary:
if with_sn:
variable_summaries({ 'b': biases, 's': s, 'sigma_w': sigma})
variable_summaries({'W': w},with_singular_values=with_singular_values)
else:
variable_summaries({'b': biases})
variable_summaries({'W': w},with_singular_values=with_singular_values)
return conv
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, scale=1.0, with_learnable_sn_scale=False, with_sn=False,
name="deconv2d", with_w=False, update_collection=None, data_format='NCHW',with_singular_values=False):
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('w' in v.op.name) for v in scope_vars])
out_channel, in_channel = get_in_out_shape(output_shape, input_.get_shape().as_list(), data_format)
strides = get_strides(d_h, d_w, data_format)
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, out_channel, in_channel],
initializer=tf.random_normal_initializer(stddev=stddev))
if with_sn:
s = tf.get_variable('s', shape=[1], initializer=tf.constant_initializer(scale), trainable=with_learnable_sn_scale, dtype=tf.float32)
w_bar, sigma = spectral_normed_weight(w, update_collection=update_collection, with_sigma=True)
w_bar = s*w_bar
deconv = tf.nn.conv2d_transpose(input_, w_bar, output_shape=output_shape, strides=strides, data_format=data_format)
else:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=strides, data_format=data_format)
biases = tf.get_variable('biases', [out_channel], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases, data_format=data_format), deconv.get_shape())
if not has_summary:
if with_sn:
variable_summaries({ 'b': biases, 's': s, 'sigma_w': sigma})
variable_summaries({'W': w},with_singular_values=with_singular_values)
else:
variable_summaries({'b': biases})
variable_summaries({'W': w},with_singular_values=with_singular_values)
if with_w:
return deconv, w, biases
else:
return deconv
def get_in_out_shape(output_shape, input_shape, format):
if format == 'NCHW':
if len(output_shape) > 1:
out_channel = output_shape[1]
else:
out_channel = output_shape[0]
in_channel = input_shape[1]
elif format == 'NHWC':
if len(output_shape) > 1:
out_channel = output_shape[-1]
else:
out_channel = output_shape[0]
in_channel = input_shape[-1]
return out_channel, in_channel
def get_strides(d_h, d_w, format):
if format == 'NCHW':
return [1, 1, d_h, d_w]
elif format == 'NHWC':
return [1, d_h, d_w, 1]
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def linear(input_, output_size, name="Linear", stddev=0.01, scale=1.0, with_learnable_sn_scale=False, with_sn=False, bias_start=0.0, with_w=False, update_collection=None, with_singular_values=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('Matrix' in v.op.name) for v in scope_vars])
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
if with_sn:
s = tf.get_variable('s', shape=[1], initializer=tf.constant_initializer(scale), trainable=with_learnable_sn_scale, dtype=tf.float32)
matrix_bar, sigma = spectral_normed_weight(matrix, update_collection=update_collection, with_sigma=True)
matrix_bar = s*matrix_bar
mul = tf.matmul(input_, matrix_bar)
else:
mul = tf.matmul(input_, matrix)
bias = tf.get_variable(
"bias",
[output_size],
initializer=tf.constant_initializer(bias_start))
if not has_summary:
if with_sn:
variable_summaries({'b': bias, 's': s, 'sigma_w': sigma})
variable_summaries({'W': matrix}, with_singular_values=with_singular_values)
else:
variable_summaries({'b': bias})
variable_summaries({'W': matrix}, with_singular_values=with_singular_values)
if with_w:
return mul + bias, matrix, bias
else:
return mul + bias
def linear_one_hot(input_, output_size, num_classes, name="Linear_one_hot", stddev=0.01, scale=1.0, with_learnable_sn_scale=False, with_sn=False, bias_start=0.0, with_w=False, update_collection=None,with_singular_values=False):
with tf.variable_scope(name):
scope_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
tf.get_variable_scope().name)
has_summary = any([('Matrix' in v.op.name) for v in scope_vars])
matrix = tf.get_variable(
"Matrix",
[num_classes, output_size],
tf.float32,
tf.random_normal_initializer(stddev=stddev))
if with_sn:
s = tf.get_variable('s', shape=[1], initializer=tf.constant_initializer(scale), trainable=with_learnable_sn_scale, dtype=tf.float32)
matrix_bar, sigma = spectral_normed_weight(matrix, update_collection=update_collection, with_sigma=True)
matrix_bar = s*matrix_bar
embed = tf.nn.embedding_lookup(matrix_bar, input_)
else:
embed = tf.nn.embedding_lookup(matrix, input_)
if not has_summary:
if with_sn:
variable_summaries({'s': s, 'sigma_w': sigma})
variable_summaries({'W': matrix}, with_singular_values=with_singular_values)
else:
variable_summaries({'W': matrix}, with_singular_values=with_singular_values)
if with_w:
return embed, matrix
else:
return embed
| 43.004167 | 227 | 0.613991 | 1,348 | 10,321 | 4.420623 | 0.135757 | 0.040275 | 0.060413 | 0.02685 | 0.698607 | 0.666555 | 0.612855 | 0.596744 | 0.584997 | 0.560161 | 0 | 0.012935 | 0.273423 | 10,321 | 239 | 228 | 43.1841 | 0.781704 | 0.074605 | 0 | 0.485714 | 0 | 0 | 0.021986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062857 | false | 0 | 0.028571 | 0.011429 | 0.177143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
502e17c9457b864aa7965a867d4c0446b411d949 | 516 | py | Python | dpx/onboarding/urls.py | DotPodcast/dotpodcast-dpx | 5a084a07d094180eaefbb0946d274f74c1472e02 | [
"MIT"
] | 8 | 2018-06-10T20:41:50.000Z | 2021-03-05T16:33:11.000Z | dpx/onboarding/urls.py | DotPodcast/dotpodcast-dpx | 5a084a07d094180eaefbb0946d274f74c1472e02 | [
"MIT"
] | 8 | 2021-03-18T20:20:20.000Z | 2022-03-11T23:15:30.000Z | dpx/onboarding/urls.py | DotPodcast/dotpodcast-dpx | 5a084a07d094180eaefbb0946d274f74c1472e02 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from .views import *
urlpatterns = [
url(
r'^$',
OnboardingFormView.as_view(),
name='onboarding_welcome'
),
url(
r'^dropbox/$',
DropboxSetupView.as_view(),
name='dropbox_setup'
),
url(
r'^dropbox/callback/$',
DropboxCompleteView.as_view(),
name='dropbox_callback'
),
url(
r'^complete/$',
OnboardingCallbackView.as_view(),
name='onboarding_callback'
)
]
| 19.111111 | 41 | 0.550388 | 46 | 516 | 6 | 0.478261 | 0.057971 | 0.144928 | 0.144928 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.313953 | 516 | 26 | 42 | 19.846154 | 0.779661 | 0 | 0 | 0.291667 | 0 | 0 | 0.209302 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
503044ed49b5249d5961918241b3a0c98505661a | 1,060 | py | Python | application.py | erwindev/flask-seed | 15c08d3edf6747e1eff3f52acf5cab93a59c2470 | [
"MIT"
] | null | null | null | application.py | erwindev/flask-seed | 15c08d3edf6747e1eff3f52acf5cab93a59c2470 | [
"MIT"
] | null | null | null | application.py | erwindev/flask-seed | 15c08d3edf6747e1eff3f52acf5cab93a59c2470 | [
"MIT"
] | null | null | null | import sys
import os
from app import create_app, db
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
from app.models import *
app = create_app(os.getenv('ENV_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
test_result = unittest.TextTestRunner(verbosity=2).run(tests)
sys.exit(len(test_result.failures))
@manager.command
def run_am_i_alive(customer_id, kpi_id):
""" python application.py run_am_i_alive 123 11 """
""" just runs a command to verify this application is runnable """
print("I am alive! customer_id:{} kpi_id:{}".format(customer_id, kpi_id))
sys.exit(0)
if __name__ == '__main__':
# import profile
# profile.run('manager.run()')
manager.run()
| 26.5 | 77 | 0.723585 | 151 | 1,060 | 4.854305 | 0.443709 | 0.040928 | 0.053206 | 0.061392 | 0.05457 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007778 | 0.150943 | 1,060 | 39 | 78 | 27.179487 | 0.806667 | 0.10283 | 0 | 0.08 | 0 | 0 | 0.083716 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.28 | 0.04 | 0.44 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5032f98ad9b0412fa28e6b7bc499665931717f00 | 2,436 | py | Python | elon.py | XeronScript/FutureCity | a815db9db4949777f007985c9825667f1049124a | [
"MIT"
] | null | null | null | elon.py | XeronScript/FutureCity | a815db9db4949777f007985c9825667f1049124a | [
"MIT"
] | null | null | null | elon.py | XeronScript/FutureCity | a815db9db4949777f007985c9825667f1049124a | [
"MIT"
] | null | null | null | import subprocess
import time
import pyglet
import PySimpleGUI as sg
import pygame
import platform
import random
unix_packs = {
'elon.gif': 'elon2.ogg',
'gandalf.gif': 'sax.ogg',
'salamander.gif': 'salamander.ogg',
'cat.gif': 'elon2.ogg'
}
unix_packs_list = list(unix_packs.items())
win_packs = {
'elon.gif': 'elon2.mp3',
'gandalf.gif': 'sax.mp3',
'salamander.gif': 'salamander.mp3',
'cat.gif': 'elon2.mp3'
}
win_packs_list = list(win_packs.items())
def gif_displayer(gif_name):
# setting up gif
animation = pyglet.resource.animation(gif_name)
sprite = pyglet.sprite.Sprite(animation)
# adjusting window size
win = pyglet.window.Window(width=sprite.width, height=sprite.height)
# setting background
green = 0, 1, 0, 1
pyglet.gl.glClearColor(*green)
@win.event
def on_draw():
win.clear()
sprite.draw()
pyglet.app.run()
def open_hacker():
if platform.system() == 'Windows':
proc = subprocess.Popen(['python.exe', 'hackertype_modified.py'])
else:
proc = subprocess.Popen(['python3', 'hackertype_modified.py'])
time.sleep(10)
proc.kill()
# main function
def main():
pygame.mixer.init()
sg.theme('DarkBlack')
layout = [[sg.Button('Break into the brain')]]
window = sg.Window('Hack Machine', layout)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
break
if event == 'Break into the brain':
window.close()
open_hacker()
layout = [[sg.Text("You're in")], [sg.Button("Get in")]]
window = sg.Window('Hack Machine', layout)
while True:
event, values = window.read()
if event == "Get in":
if platform.system() == 'Windows':
brain_gif, brain_theme = random.choice(win_packs_list)
else:
brain_gif, brain_theme = random.choice(unix_packs_list)
theme = pygame.mixer.Sound('sounds/' + brain_theme)
theme.play()
gif_displayer('gifs/' + brain_gif)
theme.stop()
if event == sg.WIN_CLOSED:
break
break
window.close()
| 24.857143 | 80 | 0.542282 | 268 | 2,436 | 4.820896 | 0.36194 | 0.027864 | 0.018576 | 0.026316 | 0.212848 | 0.181889 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0 | 0.009214 | 0.331691 | 2,436 | 97 | 81 | 25.113402 | 0.784398 | 0.028736 | 0 | 0.25 | 0 | 0 | 0.15 | 0.019469 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.102941 | 0 | 0.161765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
503709b04aeb61a22e7078b4a00f57536595fb26 | 4,592 | py | Python | tests/unit/symbolic_regression/agraph/simplification_backend/test_simplification_backend.py | nolanstr/bingo_multi_stage | 7a88c4f5c59268d0612664be5864765db2edad51 | [
"Apache-2.0"
] | null | null | null | tests/unit/symbolic_regression/agraph/simplification_backend/test_simplification_backend.py | nolanstr/bingo_multi_stage | 7a88c4f5c59268d0612664be5864765db2edad51 | [
"Apache-2.0"
] | null | null | null | tests/unit/symbolic_regression/agraph/simplification_backend/test_simplification_backend.py | nolanstr/bingo_multi_stage | 7a88c4f5c59268d0612664be5864765db2edad51 | [
"Apache-2.0"
] | null | null | null | # Ignoring some linting rules in tests
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring
import numpy as np
import pytest
from bingo.symbolic_regression.agraph.operator_definitions import *
from bingo.symbolic_regression.agraph.simplification_backend \
import simplification_backend as py_simp_backend
try:
from bingocpp import simplification_backend as cpp_simp_backend
except ImportError:
cpp_simp_backend = None
CPP_PARAM = pytest.param("c++",
marks=pytest.mark.skipif(not cpp_simp_backend,
reason='BingoCpp import '
'failure'))
@pytest.fixture(params=["Python", CPP_PARAM])
def engine(request):
return request.param
@pytest.fixture
def simp_backend(engine):
if engine == "Python":
return py_simp_backend
return cpp_simp_backend
@pytest.fixture
def sample_command_array():
return np.array([[VARIABLE, 0, 0],
[VARIABLE, 1, 1],
[ADDITION, 0, 1],
[CONSTANT, 0, 0],
[SIN, 2, 3],
[CONSTANT, 0, 0],
[ADDITION, 4, 4]])
def test_utilized_commands(simp_backend, sample_command_array):
util = simp_backend.get_utilized_commands(sample_command_array)
expected_util = [True, True, True, False, True, False, True]
np.testing.assert_array_equal(util, expected_util)
def test_reduce_stack(simp_backend, sample_command_array):
reduction = simp_backend.reduce_stack(sample_command_array)
expected_reduction = np.array([[VARIABLE, 0, 0],
[VARIABLE, 1, 1],
[ADDITION, 0, 1],
[SIN, 2, 2],
[ADDITION, 3, 3]])
np.testing.assert_array_equal(reduction, expected_reduction)
def test_simplification_1(simp_backend, engine):
if engine == "c++":
pytest.xfail(reason="Simplification not yet implemented in c++")
stack = np.array([[CONSTANT, -1, -1],
[MULTIPLICATION, 0, 0],
[ADDITION, 1, 0],
[SUBTRACTION, 1, 0],
[CONSTANT, -1, -1],
[VARIABLE, 0, 0],
[ADDITION, 2, 4],
[ADDITION, 6, 3],
[SUBTRACTION, 7, 5],
[ADDITION, 8, 2],
[SUBTRACTION, 4, 9],
])
simp_stack = simp_backend.simplify_stack(stack)
expected_simp = np.array([[CONSTANT, -1, -1],
[VARIABLE, 0, 0],
[ADDITION, 0, 1]])
np.testing.assert_array_equal(simp_stack, expected_simp)
def test_simplification_2(simp_backend, engine):
if engine == "c++":
pytest.xfail(reason="Simplification not yet implemented in c++")
stack = np.array([[CONSTANT, -1, -1],
[VARIABLE, 0, 0],
[CONSTANT, -1, -1],
[ADDITION, 1, 1],
[SUBTRACTION, 3, 3],
[SUBTRACTION, 2, 0],
[MULTIPLICATION, 5, 4],
[SUBTRACTION, 6, 5],
])
simp_stack = simp_backend.simplify_stack(stack)
expected_simp = np.array([[CONSTANT, -1, -1]])
np.testing.assert_array_equal(simp_stack, expected_simp)
def test_simplification_3(simp_backend, engine):
if engine == "c++":
pytest.xfail(reason="Simplification not yet implemented in c++")
stack = np.array([[CONSTANT, -1, -1],
[VARIABLE, 0, 0],
[SUBTRACTION, 0, 0],
[ADDITION, 2, 2],
[MULTIPLICATION, 3, 2],
[SUBTRACTION, 4, 0],
[SUBTRACTION, 1, 3],
[MULTIPLICATION, 6, 2],
[MULTIPLICATION, 5, 7],
[ADDITION, 6, 2],
[ADDITION, 8, 9],
[SUBTRACTION, 10, 0],
[SUBTRACTION, 11, 4],
[CONSTANT, -1, -1],
[ADDITION, 12, 13],
])
simp_stack = simp_backend.simplify_stack(stack)
expected_simp = np.array([[CONSTANT, -1, -1],
[VARIABLE, 0, 0],
[ADDITION, 0, 1]])
np.testing.assert_array_equal(simp_stack, expected_simp) | 36.444444 | 75 | 0.505226 | 463 | 4,592 | 4.831533 | 0.198704 | 0.083594 | 0.040232 | 0.042915 | 0.464461 | 0.375503 | 0.375503 | 0.362986 | 0.362986 | 0.362986 | 0 | 0.039873 | 0.38284 | 4,592 | 126 | 76 | 36.444444 | 0.749471 | 0.023301 | 0 | 0.376238 | 0 | 0 | 0.03793 | 0 | 0 | 0 | 0 | 0 | 0.049505 | 1 | 0.079208 | false | 0 | 0.069307 | 0.019802 | 0.188119 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5037c073d656f71480d1155fdb9f2a40354e38a2 | 600 | py | Python | remove_duplicates.py | umanggoel2001/Hacktoberfest2021-1 | 8dc2338df89db68b03d89dc2052b6f5166a5a413 | [
"MIT"
] | 1 | 2021-10-04T18:01:41.000Z | 2021-10-04T18:01:41.000Z | remove_duplicates.py | umanggoel2001/Hacktoberfest2021-1 | 8dc2338df89db68b03d89dc2052b6f5166a5a413 | [
"MIT"
] | null | null | null | remove_duplicates.py | umanggoel2001/Hacktoberfest2021-1 | 8dc2338df89db68b03d89dc2052b6f5166a5a413 | [
"MIT"
] | 1 | 2021-10-06T03:45:26.000Z | 2021-10-06T03:45:26.000Z |
# Question Link : https://leetcode.com/problems/remove-duplicates-from-sorted-list/
class Solution:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
prev = head
current = head
if head:
val = head.val
head = head.next
while (head != None):
if head.val == val:
prev.next = head.next
head = head.next
else:
val = head.val
prev = head
head = head.next
return current
| 25 | 83 | 0.475 | 58 | 600 | 4.913793 | 0.482759 | 0.098246 | 0.126316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.438333 | 600 | 23 | 84 | 26.086957 | 0.845697 | 0.135 | 0 | 0.4375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5039d9f1efb7e99201854fcd5a1b77fb7a57ed32 | 1,758 | py | Python | userbot/plugins/antivirus2.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | userbot/plugins/antivirus2.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | userbot/plugins/antivirus2.py | SH4DOWV/X-tra-Telegram | 73634556989ac274c44a0a2cc9ff4322e7a52158 | [
"MIT"
] | null | null | null | # Lots of lub to @r4v4n4 for gibing the base <3
import datetime
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from userbot.utils import admin_cmd,register
@borg.on(admin_cmd("scan ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.reply_to_msg_id:
await event.edit("```Rispondi a qualunque messaggio.```")
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("```rispondi ad un messaggio contenente media```")
return
chat = "@DrWebBot"
sender = reply_message.sender
if reply_message.sender.bot:
await event.edit("```Rispondi a messaggi di persone attuali.```")
return
await event.edit(" `Facendomi scivolare la punta delle dita sopra`")
async with borg.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=161163358))
await borg.forward_messages(chat, reply_message)
response = await response
except YouBlockedUserError:
await event.reply("```Per favore, sblocca @sangmatainfo_bot.```")
return
if response.text.startswith("Forward"):
await event.edit("```Per favore, puoi disattivare la privacy di inoltro, per piacere?```")
else:
if response.text.startswith("Select"):
await event.edit("`Per favore, vai da @DrWebBot e seleziona un linguaggio.`")
else:
await event.edit(f"**Scansione AntiVirus completata. Ho i risultati finali.**\n {response.message.message}")
| 43.95 | 121 | 0.663254 | 209 | 1,758 | 5.492823 | 0.521531 | 0.078397 | 0.085366 | 0.057491 | 0.080139 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009673 | 0.235495 | 1,758 | 39 | 122 | 45.076923 | 0.844494 | 0.025597 | 0 | 0.189189 | 0 | 0 | 0.27294 | 0.027469 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.135135 | 0 | 0.27027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
503a31e2bef9fe942b80f7708d4a5009a933eace | 28,268 | py | Python | bossdata/spec.py | dkirkby/bossdata | 313b6a69e75679248a2e41f02f88da467835aa45 | [
"MIT"
] | 2 | 2017-06-12T13:18:20.000Z | 2020-04-08T10:00:31.000Z | bossdata/spec.py | dkirkby/bossdata | 313b6a69e75679248a2e41f02f88da467835aa45 | [
"MIT"
] | 102 | 2015-05-11T20:27:10.000Z | 2019-01-08T16:01:54.000Z | bossdata/spec.py | dkirkby/bossdata | 313b6a69e75679248a2e41f02f88da467835aa45 | [
"MIT"
] | 4 | 2015-06-18T19:51:46.000Z | 2017-08-31T00:23:18.000Z | # -*- coding: utf-8 -*-
# Licensed under a MIT style license - see LICENSE.rst
""" Access spectroscopic data for a single BOSS target.
"""
from __future__ import division, print_function
from six import binary_type
import re
import numpy as np
import numpy.ma
import fitsio
import astropy.table
import bossdata.raw
def get_fiducial_pixel_index(wavelength):
"""
Convert a wavelength to a fiducial pixel index.
The fiducial wavelength grid used by all SDSS co-added spectra is
logarithmically spaced::
wavelength = wavelength0 * 10**(coef * index)
The value ``coef = 1e-4`` is encoded in the FITS HDU headers of SDSS
coadded data files with the keyword ``CD1_1`` (and sometimes also
``COEFF1``). The value of ``wavelength0`` defines ``index = 0`` and is
similarly encoded as ``CRVAL1`` (and sometimes also ``COEFF0``). However,
its value is not constant between different SDSS co-added spectra because
varying amounts of invalid data are trimmed. This function adopts the
constant value 3500.26 Angstrom corresponding to ``index = 0``:
>>> get_fiducial_pixel_index(3500.26)
0.0
Note that the return value is a float so that wavelengths not on the
fiducial grid can be converted and detected:
>>> get_fiducial_pixel_index(3500.5)
0.29776960129179741
The calculation is automatically broadcast over an input wavelength array:
>>> wlen = np.arange(4000,4400,100)
>>> get_fiducial_pixel_index(wlen)
array([ 579.596863 , 686.83551692, 791.4898537 , 893.68150552])
Use :attr:`fiducial_pixel_index_range` for an index range that covers all
SDSS spectra and :attr:`fiducial_loglam` to covert integer indices to
wavelengths.
Args:
wavelength(float): Input wavelength in Angstroms.
Returns:
numpy.ndarray: Array of floating-point indices relative to the fiducial
wavelength grid.
"""
return (np.log10(wavelength) - _fiducial_log10lam0)/_fiducial_coef
_fiducial_coef = 1e-4
_fiducial_log10lam0 = np.log10(3500.26)
fiducial_pixel_index_range = (0, 4800)
"""
Range of fiducial pixel indices that covers all spectra.
Use :func:`get_fiducial_pixel_index` to calculate fiducial pixel indices.
"""
fiducial_loglam = (_fiducial_log10lam0 +
_fiducial_coef * np.arange(*fiducial_pixel_index_range))
"""
Array of fiducial log10(wavelength in Angstroms) covering all spectra.
Lookup the log10(wavelength) or wavelength corresponding to a particular
integral pixel index using:
>>> fiducial_loglam[100]
3.554100305027835
>>> 10**fiducial_loglam[100]
3581.7915291606305
The bounding wavelengths of this range are:
>>> 10**fiducial_loglam[[0,-1]]
array([ 3500.26 , 10568.18251472])
The :meth:`SpecFile.get_valid_data` and :meth:`PlateFile.get_valid_data()
<bossdata.plate.PlateFile.get_valid_data>` methods provide a ``fiducial_grid``
option that returns data using this grid.
"""
class Exposures(object):
"""Table of exposure info extracted from FITS header keywords.
Parse the NEXP and EXPIDnn keywords that are present in the header of HDU0
in :datamodel:`spPlate <PLATE4/spPlate>` and :datamodel:`spec
<spectra/PLATE4/spec>` FITS files.
The constructor initializes the ``table`` attribute with column names
``offset``, ``camera``, ``science``, ``flat`` and ``arc``, and creates one
row for each keyword EXPIDnn, where ``offset`` equals the keyword sequence
number nn, ``camera`` is one of b1, b2, r1, r2, and the remaining columns
record the science and calibration exposure numbers.
Use :meth:`get_info` to retrieve the n-th exposure for a particular camera
(b1, b2, r1, r2). Note that when this class is initialized from a
:datamodel:`spec file <spectra/PLATE4/spec>` header, it will only describe
the two cameras of a single spectrograph (b1+r1 or b2+r2). The `num_by_camera`
attribute is a dictionary of ints indexed by camera that records the number
of science exposures available for that camera.
Args:
header(dict): dictionary of FITS header keyword, value pairs.
Returns:
"""
def __init__(self, header):
num_exposures = header['NEXP']
expid_pattern = re.compile('([br][12])-([0-9]{8})-([0-9]{8})-([0-9]{8})')
exposure_set = set()
self.table = astropy.table.Table(
names=('offset', 'camera', 'science', 'flat', 'arc'),
dtype=('i4', 'S2', 'i4', 'i4', 'i4'))
self.num_by_camera = dict(b1=0, b2=0, r1=0, r2=0)
for i in range(num_exposures):
camera, science_num, flat_num, arc_num = expid_pattern.match(
header['EXPID{0:02d}'.format(i + 1)]).groups()
self.table.add_row((i, camera, int(science_num), int(flat_num), int(arc_num)))
exposure_set.add(int(science_num))
self.num_by_camera[camera] += 1
self.sequence = sorted(exposure_set)
# Check that the science exposures listed for each camera are self consistent.
num_exposures = len(self.sequence)
for camera in ('b1', 'b2', 'r1', 'r2'):
if self.num_by_camera[camera] == 0:
continue
if self.num_by_camera[camera] != num_exposures:
raise RuntimeError('Found {} {} exposures but expected {}.'.format(
self.num_by_camera[camera], camera, num_exposures))
# Conversion to binary_type is needed for backwards compatibility with
# astropy < 2.0 and python 3. For details, see:
# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-python-3
camera_rows = self.table['camera'] == binary_type(camera, 'ascii')
camera_exposures = set(self.table[camera_rows]['science'])
if camera_exposures != exposure_set:
raise RuntimeError('Found inconsistent {} exposures: {}. Expected: {}.'.format(
camera, camera_exposures, exposure_set))
def get_info(self, exposure_index, camera):
"""Get information about a single camera exposure.
Args:
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
Returns:
A structured array with information about the requested exposure,
corresponding to one row of our ``table`` attribute.
Raises:
ValueError: Invalid exposure_index or camera.
RuntimeError: Exposure not present.
"""
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}", expected b1, b2, r1, or r2.'.format(camera))
if self.num_by_camera[camera] == 0:
raise ValueError('There are no {} exposures available.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
science_num = self.sequence[exposure_index]
row = (self.table['science'] == science_num) & (
self.table['camera'] == binary_type(camera, 'ascii'))
if not np.any(row):
# This should never happen after our self-consistency checks in the ctor.
raise RuntimeError('No exposure[{}] = {:08d} found for {}.'.format(
exposure_index, science_num, camera))
if np.count_nonzero(row) > 1:
# This should never happen after our self-consistency checks in the ctor.
raise RuntimeError(
'Found multiple {} exposures[{}].'.format(camera, exposure_index))
return self.table[row][0]
def get_exposure_name(self, exposure_index, camera, ftype='spCFrame'):
"""Get the file name of a single science or calibration exposure data product.
Use the exposure name to locate FITS data files associated with
individual exposures. The supported file types are:
:datamodel:`spCFrame <PLATE4/spCFrame>`,
:datamodel:`spFrame <PLATE4/spFrame>`,
:datamodel:`spFluxcalib <PLATE4/spFluxcalib>`
:datamodel:`spFluxcorr <PLATE4/spFluxcorr>`,
:datamodel:`spArc <PLATE4/spArc>`,
:datamodel:`spFlat <PLATE4/spFlat>`. This method is analogous to
:meth:`bossdata.plate.Plan.get_exposure_name`, but operates for a single
target and only knows about exposures actually used in the final co-add
(including the associated arc and flat exposures).
Args:
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
ftype(str): Type of exposure file whose name to return. Must be one of
spCFrame, spFrame, spFluxcalib, spFluxcorr, spArc, spFlat. An spCFrame
is assumed to be uncompressed, and all other files are assumed to be
compressed. When a calibration is requested (spArc, spFlat) results from
the calibration exposure used to analyze the specified science exposure
is returned.
Returns:
str: Exposure name of the form [ftype]-[cc]-[eeeeeeee].[ext] where [cc]
identifies the camera (one of b1,r1,b2,r2) and [eeeeeeee] is the
zero-padded arc/flat/science exposure number. The extension [ext]
is "fits" for spCFrame files and "fits.gz" for all other file types.
Raises:
ValueError: one of the inputs is invalid.
"""
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}", expected b1, b2, r1, or r2.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
ftypes = ('spCFrame', 'spFrame', 'spFluxcalib', 'spFluxcorr', 'spArc', 'spFlat')
if ftype not in ftypes:
raise ValueError('Invalid file type ({}) must be one of: {}.'
.format(ftype, ', '.join(ftypes)))
# Get the science exposure ID number for the requested seqence number 0,1,...
exposure_info = self.get_info(exposure_index, camera)
if ftype == 'spArc':
exposure_id = exposure_info['arc']
elif ftype == 'spFlat':
exposure_id = exposure_info['flat']
else:
exposure_id = exposure_info['science']
name = '{0}-{1}-{2:08d}.fits'.format(ftype, camera, exposure_id)
if ftype != 'spCFrame':
name += '.gz'
return name
def get_raw_image(self, plate, mjd, exposure_index, camera, flavor='science',
finder=None, mirror=None):
"""Get the raw image file associated with an exposure.
Args:
plate(int): Plate number, which must be positive.
mjd(int): Modified Julian date of the observation, which must be > 45000.
exposure_index(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
camera(str): One of b1,b2,r1,r2.
flavor(str): One of science, arc, flat.
finder(bossdata.path.Finder): Object used to find the names of BOSS data files.
If not specified, the default Finder constructor is used.
mirror(bossdata.remote.Manager): Object used to interact with the local mirror
of BOSS data. If not specified, the default Manager constructor is used.
Returns:
bossdata.raw.RawImageFile: requested raw image file.
Raises:
ValueError: one of the inputs is invalid.
"""
if plate < 0:
raise ValueError('Invalid plate number ({}) must be > 0.'.format(plate))
if mjd <= 45000:
raise ValueError('Invalid mjd ({}) must be >= 45000.'.format(mjd))
if camera not in ('b1', 'b2', 'r1', 'r2'):
raise ValueError(
'Invalid camera "{}". Expected one of b1, b2, r1, r2.'.format(camera))
if exposure_index < 0 or exposure_index >= self.num_by_camera[camera]:
raise ValueError('Invalid exposure_index {}, expected 0-{}.'.format(
exposure_index, self.num_by_camera[camera] - 1))
if flavor not in ('science', 'arc', 'flat'):
raise ValueError(
'Invalid flavor "{}". Expected one of science, arc, flat.')
exposure_info = self.get_info(exposure_index, camera)
exposure_id = exposure_info[flavor]
# Load the co-add plan to determine the observation MJD for this exposure,
# which is generally different (earlier) than the MJD assigned to the coadd.
# There are other ways to do this, but this requires the smallest download.
if finder is None:
finder = bossdata.path.Finder()
plan_path = finder.get_plate_plan_path(plate, mjd, combined=True)
if mirror is None:
mirror = bossdata.remote.Manager()
plan = bossdata.plate.Plan(mirror.get(plan_path))
# Find the observation MJD of the requested science exposure.
found = plan.exposure_table['exp'] == exposure_info['science']
if np.count_nonzero(found) != 1:
raise RuntimeError('Cannot locate science exposure in plan.')
obs_mjd = plan.exposure_table[found][0]['mjd']
path = mirror.get(finder.get_raw_path(obs_mjd, camera, exposure_id))
return bossdata.raw.RawImageFile(path)
class SpecFile(object):
""" A BOSS spec file containing summary data for a single target.
A :datamodel:`spec file <spec>` contains co-added spectra for a single target of an
observation. This class supports the full version described in the data model as
well as a :datamodel:`lite version <spectra/lite/PLATE4/spec>` that does not contain
the per-exposure HDUs with indices >= 4. Use the `lite` attribute to detect which
version an object represents.
To read all co-added spectra of an observation use :class:`bossdata.plate.PlateFile`.
Individual exposures of a half-plate can be read using :class:`bossdata.plate.FrameFile`.
The ``plate``, ``mjd`` and ``fiber`` attributes specify the target observation.
The ``info`` attribute contains this target's row from :datamodel:`spAll <spAll>`
as a structured numpy array, so its metadata can be accessed as ``info['OBJTYPE']``,
etc.
Use :meth:`get_valid_data` to access this target's spectra, or the :class:`exposures
<Exposures>` attribute for a list of exposures used in the coadd (see
:class:`bossdata.plate.Plan` for alternative information about the exposures used in
a coadd.) The ``num_exposures`` attribute gives the number of science exposures used
for this target's co-added spectrum (counting a blue+red pair as one exposure). Use
:meth:`get_exposure_name` to locate files associated the individual exposures used
for this co-added spectrum.
This class is only intended for reading the BOSS spec file format, so generic
operations on spectroscopic data (redshifting, resampling, etc) are intentionally not
included here, but are instead provided in the `speclite
<http://speclite.readthedocs.org>`__ package.
Args:
path(str): Local path of the spec FITS file to use. This should normally be obtained
via :meth:`bossdata.path.Finder.get_spec_path` and can be automatically mirrored
via :meth:`bossdata.remote.Manager.get` or using the :ref:`bossfetch` script. The
file is opened in read-only mode so you do not need write privileges.
"""
def __init__(self, path):
self.hdulist = fitsio.FITS(path, mode=fitsio.READONLY)
self.lite = (len(self.hdulist) == 4)
self.header = self.hdulist[0].read_header()
# Look up the available exposures.
self.exposures = Exposures(self.header)
self.num_exposures = len(self.exposures.sequence)
# Extract our plate-mjd-fiber values.
self.plate, self.mjd, self.fiber = (
self.hdulist[2]['PLATE', 'MJD', 'FIBERID'][0][0])
# We don't use bossdata.plate.get_num_fibers here to avoid a circular import.
num_fibers = 640 if self.plate < 3510 else 1000
# Calculate the camera (b1/b2/r1/r2) for this target's fiber.
self.spec_id = '1' if self.fiber <= num_fibers // 2 else '2'
def get_exposure_name(self, sequence_number, band, ftype='spCFrame'):
"""Get the file name of a single science exposure data product.
Use the exposure name to locate FITS data files associated with
individual exposures. The supported file types are:
:datamodel:`spCFrame <PLATE4/spCFrame>`,
:datamodel:`spFrame <PLATE4/spFrame>`,
:datamodel:`spFluxcalib <PLATE4/spFluxcalib>` and
:datamodel:`spFluxcorr <PLATE4/spFluxcorr>`. This method is analogous to
:meth:`bossdata.plate.Plan.get_exposure_name`, but operates for a single
target and only knows about exposures actually used in the final co-add.
Args:
sequence_number(int): Science exposure sequence number, counting from zero.
Must be less than our num_exposures attribute.
band(str): Must be 'blue' or 'red'.
ftype(str): Type of exposure file whose name to return. Must be one of
spCFrame, spFrame, spFluxcalib, spFluxcorr. An spCFrame is assumed
to be uncompressed, and all other files are assumed to be compressed.
Returns:
str: Exposure name of the form [ftype]-[cc]-[eeeeeeee].[ext] where [cc]
identifies the camera (one of b1,r1,b2,r2) and [eeeeeeee] is the
zero-padded exposure number. The extension [ext] is "fits" for
spCFrame files and "fits.gz" for all other file types.
Raises:
ValueError: one of the inputs is invalid.
"""
if band not in ('blue', 'red'):
raise ValueError('Invalid band "{}". Expected blue or red.'.format(band))
camera = band[0] + self.spec_id
return self.exposures.get_exposure_name(sequence_number, camera, ftype)
def get_raw_image(self, sequence_number, band, flavor='science',
finder=None, mirror=None):
"""Get a raw image file associated with one of this coadd's exposures.
Args:
sequence_number(int): The sequence number for the requested camera
exposure, in the range 0 - `(num_exposures[camera]-1)`.
band(str): Must be 'blue' or 'red'.
flavor(str): One of science, arc, flat.
finder(bossdata.path.Finder): Object used to find the names of BOSS data files.
If not specified, the default Finder constructor is used.
mirror(bossdata.remote.Manager): Object used to interact with the local mirror
of BOSS data. If not specified, the default Manager constructor is used.
Returns:
bossdata.raw.RawImageFile: requested raw image file.
Raises:
ValueError: one of the inputs is invalid.
"""
if band not in ('blue', 'red'):
raise ValueError('Invalid band "{}". Expected blue or red.'.format(band))
camera = band[0] + self.spec_id
return self.exposures.get_raw_image(self.plate, self.mjd, sequence_number, camera,
flavor, finder, mirror)
def get_exposure_hdu(self, exposure_index, camera):
"""Lookup the HDU for one exposure.
This method will not work on "lite" files, which do not include individual
exposures.
Args:
exposure_index(int): Individual exposure to use, specified as a sequence number
starting from zero, for the first exposure, and increasing up to
`self.num_exposures-1`.
camera(str): Which camera to use. Must be one of b1,b2,r1,r2.
Returns:
hdu: The HDU containing data for the requested exposure.
Raises:
RuntimeError: individual exposures not available in lite file.
"""
if self.lite:
raise RuntimeError('individual exposures not available in lite file.')
info = self.exposures.get_info(exposure_index, camera)
return self.hdulist[4 + info['offset']]
def get_pixel_mask(self, exposure_index=None, camera=None):
"""Get the pixel mask for a specified exposure or the combined coadd.
Returns the `and_mask` for coadded spectra. The entire mask is returned, including
any pixels with zero inverse variance.
Args:
exposure_index(int): Individual exposure to use, specified as a sequence number
starting from zero, for the first exposure, and increasing up to
`self.num_exposures-1`. Uses the co-added spectrum when the value is None.
camera(str): Which camera to use. Must be either 'b1', 'b2' (blue) or 'r1', 'r2'
(red) unless exposure_index is None, in which case this argument is ignored.
Returns:
numpy.ndarray: Array of integers, one per pixel, encoding the mask bits defined
in :attr:`bossdata.bits.SPPIXMASK` (see also
http://www.sdss3.org/dr10/algorithms/bitmask_sppixmask.php).
"""
if exposure_index is None:
hdu = self.hdulist[1]
return hdu['and_mask'][:]
else:
hdu = self.get_exposure_hdu(exposure_index, camera)
return hdu['mask'][:]
def get_valid_data(self, exposure_index=None, camera=None, pixel_quality_mask=None,
include_wdisp=False, include_sky=False, use_ivar=False,
use_loglam=False, fiducial_grid=False):
"""Get the valid data for a specified exposure or the combined coadd.
You will probably find yourself using this idiom often::
data = spec.get_valid_data(...)
wlen,flux,dflux = data['wavelength'][:],data['flux'][:],data['dflux'][:]
Args:
exposure_index(int): Individual exposure to use, specified as a sequence number
starting from zero, for the first exposure, and increasing up to
`self.num_exposures-1`. Uses the co-added spectrum when the value is None.
camera(str): Which camera to use. Must be either 'b1', 'b2' (blue) or 'r1', 'r2'
(red) unless exposure_index is None, in which case this argument is ignored.
pixel_quality_mask(int): An integer value interpreted as a bit pattern using the
bits defined in :attr:`bossdata.bits.SPPIXMASK` (see also
http://www.sdss3.org/dr10/algorithms/bitmask_sppixmask.php). Any bits set in
this mask are considered harmless and the corresponding spectrum pixels are
assumed to contain valid data. When accessing the coadded spectrum, this mask
is applied to the AND of the masks for each individual exposure. No mask is
applied if this value is None.
include_wdisp: Include a wavelength dispersion column in the returned data.
include_sky: Include a sky flux column in the returned data.
use_ivar: Replace ``dflux`` with ``ivar`` (inverse variance) in the returned
data.
use_loglam: Replace ``wavelength`` with ``loglam`` (``log10(wavelength)``) in
the returned data.
fiducial_grid: Return co-added data using the :attr:`fiducial wavelength grid
<fiducial_loglam>`. If False, the returned array uses
the native grid of the SpecFile, which generally trims pixels on both ends
that have zero inverse variance. Set this value True to ensure that all
co-added spectra use aligned wavelength grids when this matters.
Returns:
numpy.ma.MaskedArray: Masked array of per-pixel records. Pixels with no valid data
are included but masked. The record for each pixel has at least the following
named fields: wavelength in Angstroms (or loglam), flux and dflux in 1e-17
ergs/s/cm2/Angstrom (or flux and ivar). Wavelength values are strictly
increasing and dflux is calculated as ivar**-0.5 for pixels with valid data.
Optional fields are wdisp in constant-log10-lambda pixels and sky in 1e-17
ergs/s/cm2/Angstrom. The wavelength (or loglam) field is never masked and
all other fields are masked when ivar is zero or a pipeline flag is set (and
not allowed by ``pixel_quality_mask``).
Raises:
ValueError: fiducial grid is not supported for individual exposures.
RuntimeError: co-added wavelength grid is not aligned with the fiducial grid.
"""
# Look up the HDU for this spectrum and its pixel quality bitmap.
if exposure_index is None:
hdu = self.hdulist[1]
pixel_bits = hdu['and_mask'][:]
else:
hdu = self.get_exposure_hdu(exposure_index, camera)
pixel_bits = hdu['mask'][:]
if fiducial_grid:
if exposure_index is not None:
raise ValueError('Fiducial grid not supported for individual exposures.')
loglam = fiducial_loglam
first_index = float(get_fiducial_pixel_index(10.0**hdu['loglam'][0]))
if abs(first_index - round(first_index)) > 0.01:
raise RuntimeError('Wavelength grid not aligned with fiducial grid.')
first_index = int(round(first_index))
trimmed = slice(first_index, first_index + len(pixel_bits))
else:
loglam = hdu['loglam'][:]
trimmed = slice(None)
num_pixels = len(loglam)
# Apply the pixel quality mask, if any.
if pixel_quality_mask is not None:
clear_allowed = np.bitwise_not(np.uint32(pixel_quality_mask))
pixel_bits = np.bitwise_and(pixel_bits, clear_allowed)
# Identify the pixels with valid data.
ivar = hdu['ivar'][:]
bad_pixels = (pixel_bits != 0) | (ivar <= 0.0)
good_pixels = ~bad_pixels
# Create and fill the unmasked structured array of data.
dtype = [('loglam' if use_loglam else 'wavelength', np.float32),
('flux', np.float32), ('ivar' if use_ivar else 'dflux', np.float32)]
if include_wdisp:
dtype.append(('wdisp', np.float32))
if include_sky:
dtype.append(('sky', np.float32))
data = np.zeros(num_pixels, dtype=dtype)
if use_loglam:
data['loglam'][:] = loglam
else:
data['wavelength'][:] = np.power(10.0, loglam)
data['flux'][trimmed][:] = hdu['flux'][:]
if use_ivar:
data['ivar'][trimmed][good_pixels] = ivar[good_pixels]
else:
data['dflux'][trimmed][good_pixels] = 1.0 / np.sqrt(ivar[good_pixels])
if include_wdisp:
data['wdisp'][trimmed] = hdu['wdisp'][:]
if include_sky:
data['sky'][trimmed] = hdu['sky'][:]
if fiducial_grid:
mask = np.ones(num_pixels, dtype=bool)
mask[trimmed][:] = bad_pixels
else:
mask = bad_pixels
result = numpy.ma.MaskedArray(data, mask=mask)
# Wavelength values are always valid.
result['loglam' if use_loglam else 'wavelength'].mask = False
return result
| 48.238908 | 99 | 0.633897 | 3,659 | 28,268 | 4.809784 | 0.157967 | 0.02807 | 0.004773 | 0.005455 | 0.359225 | 0.323712 | 0.308938 | 0.291607 | 0.274618 | 0.270527 | 0 | 0.02163 | 0.272216 | 28,268 | 585 | 100 | 48.321368 | 0.833811 | 0.529609 | 0 | 0.240196 | 0 | 0.004902 | 0.130475 | 0.00399 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053922 | false | 0 | 0.039216 | 0 | 0.151961 | 0.004902 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
503bcc59209a9fc0125aa56f54a3b21d7c33fc40 | 703 | py | Python | cdzforever/urls.py | dvl/cdzforever.net | d20d172d10e11e0bc1df8978dd90d4f4655b157f | [
"MIT"
] | null | null | null | cdzforever/urls.py | dvl/cdzforever.net | d20d172d10e11e0bc1df8978dd90d4f4655b157f | [
"MIT"
] | null | null | null | cdzforever/urls.py | dvl/cdzforever.net | d20d172d10e11e0bc1df8978dd90d4f4655b157f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django_markdown import flatpages
from apps.blog.views import PostListView
admin.autodiscover()
flatpages.register()
urlpatterns = patterns(
'',
url(r'^$', PostListView.as_view(), name='index'),
url(r'^catalogo/', include('apps.catalogo.urls', namespace='catalogo')),
url(r'^manga/', include('apps.manga.urls', namespace='manga')),
url(r'^fb/', include('apps.fbpage.urls', namespace='fb')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
url(r'^markdown/', include('django_markdown.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| 25.107143 | 76 | 0.677098 | 89 | 703 | 5.314607 | 0.382022 | 0.059197 | 0.033827 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001631 | 0.128023 | 703 | 27 | 77 | 26.037037 | 0.769984 | 0.029872 | 0 | 0 | 0 | 0 | 0.242647 | 0.042647 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
503ed0f4a1a9c1f7c25a6bc13ba16d5fac95f478 | 13,387 | py | Python | src/sst/elements/CramSim/tests/VeriMem/test_verimem1.py | sudhanshu2/sst-elements | d658e5e4b26e5725488f9e93528506ddb22072ee | [
"BSD-3-Clause"
] | 58 | 2015-10-05T15:22:27.000Z | 2022-03-31T01:58:36.000Z | src/sst/elements/CramSim/tests/VeriMem/test_verimem1.py | sudhanshu2/sst-elements | d658e5e4b26e5725488f9e93528506ddb22072ee | [
"BSD-3-Clause"
] | 1,453 | 2015-10-07T14:51:06.000Z | 2022-03-31T22:22:28.000Z | src/sst/elements/CramSim/tests/VeriMem/test_verimem1.py | sudhanshu2/sst-elements | d658e5e4b26e5725488f9e93528506ddb22072ee | [
"BSD-3-Clause"
] | 109 | 2015-10-16T22:03:10.000Z | 2022-03-22T23:21:32.000Z | from __future__ import division
import subprocess
import sys
# GLOBAL PARAMS
config_file = "ddr4_verimem.cfg"
config_file_openbank = "ddr4_verimem_openbank.cfg"
DEBUG = True if sys.argv[1] == "1" else False
def run_verimem(config_file, trace_file):
# set the command
sstCmd = "sst --lib-path=.libs/ tests/test_txntrace.py --model-options=\""
sstParams = "--configfile=" + config_file + " traceFile=" + trace_file + "\""
osCmd = sstCmd + sstParams
sstParams_openbank = "--configfile=" + config_file_openbank + " traceFile=" + trace_file + "\""
osCmd_openbank = sstCmd + sstParams_openbank
print (osCmd)
# run SST
p = subprocess.Popen(osCmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
if DEBUG:
print ("My output: ", output)
if err != "":
print ("My error: ", err)
# extract total Txns processed
outputLines = output.split("\n")
totalTxns = 0
for line in outputLines:
if line.find("Total Txns Received:") != -1:
substrIndex = line.find("Total Txns Received: ")+20
receivedStr = line[substrIndex:]
totalTxns += int(receivedStr)
return totalTxns
def run_verimem_openbank(config_file, trace_file):
# set the command
sstCmd = "sst --lib-path=.libs/ tests/test_txntrace.py --model-options=\""
sstParams_openbank = "--configfile=" + config_file_openbank + " traceFile=" + trace_file + "\""
osCmd_openbank = sstCmd + sstParams_openbank
print (osCmd_openbank)
# run SST
p = subprocess.Popen(osCmd_openbank, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
if DEBUG:
print ("My output: ", output)
if err != "":
print ("My error: ", err)
# extract total Txns processed
outputLines = output.split("\n")
totalTxns = 0
for line in outputLines:
if line.find("Total Txns Received:") != -1:
substrIndex = line.find("Total Txns Received: ")+20
receivedStr = line[substrIndex:]
totalTxns += int(receivedStr)
return totalTxns
#Trace Suite 1
def run_suite1(params):
results = ""
#*** READS ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_1_R.trc")
expected_Timing = max(params["nRC"],
(params["nRCD"] + params["nRTP"] + params["nRP"]),
(params["nFAW"] / 4),
params["nRRD_L"],
params["nCCD_L"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing) / timing) <= 0.10):
results += "[v] Suite 1 - Reads only\n"
else:
results += "[x] Suite 1 - Reads only\n"
#*** WRITES ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_1_W.trc")
expected_Timing = max((params["nRCD"] + params["nCWL"] + (params["nBL"]) + params["nWR"] + params["nRP"]),
params["nRC"],
(params["nFAW"] / 4),
params["nRRD_L"],
params["nCCD_L"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing) / timing) <= 0.10):
results += "[v] Suite 1 - Writes only\n"
else:
results += "[x] Suite 1 - Writes only\n"
#*** READS & WRITES ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_1_RW.trc")
first_timing_option = (params["nRAS"] + params["nRP"] + params["nRCD"] + params["nCWL"] + (params["nBL"] / 2) + params["nWR"] + params["nRP"]) / 2
expected_Timing = max(first_timing_option,
params["nRC"], (params["nFAW"] / 4),
params["nRRD_L"],
params["nCCD_L"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing) / timing) <= 0.10):
results += "[v] Suite 1 - Reads & Writes\n"
else:
results += "[x] Suite 1 - Reads & Writes\n"
return results
#Trace Suites 2 and 3 do not offer different insights for this sim from Suite 1
# sst --lib-path=.libs/ tests/test_txntrace.py --model-options="--configfile=ddr4_verimem_openbank.cfg traceFile=traces/sst-CramSim-trace_verimem_2_W.trc"
#Trace Suite 2
def run_suite2(params):
results = ""
#*** READS ONLY ***
totalTxns = run_verimem_openbank(params["config_file"], "traces/sst-CramSim-trace_verimem_2_R.trc")
expected_Timing = max(params["nRC"],
(params["nRCD"] + params["nRTP"] + params["nRP"]),
(params["nFAW"] / 4),
params["nRRD_L"],
params["nCCD_L"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing) / timing) <= 0.10):
results += "[v] Suite 2 - Reads only\n"
else:
results += "[x] Suite 2 - Reads only\n"
#*** WRITES ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_2_W.trc")
expected_Timing = max((params["nRCD"] + params["nCWL"] + (params["nBL"]) + params["nWR"] + params["nRP"]),
params["nRC"],
(params["nFAW"] / 4),
params["nRRD_L"],
params["nCCD_L"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing) / timing) <= 0.10):
results += "[v] Suite 2 - Writes only\n"
else:
results += "[x] Suite 2 - Writes only\n"
return results
#Trace Suite 4
def run_suite4(params):
results = ""
#*** READS ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_4_R.trc")
expected_Timing = max((params["nRC"] / 4),
((params["nRCD"] + params["nRTP"] + params["nRP"]) / 4),
(params["nFAW"] /4),
params["nRRD_L"],
params["nCCD_L"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing) / timing) <= 0.10):
results += "[v] Suite 4 - Reads only\n"
else:
results += "[x] Suite 4 - Reads only\n"
#*** WRITES ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_4_W.trc")
expected_Timing = max(( (params["nRCD"] + params["nCWL"] + (params["nBL"] / 2) + params["nWR"] + params["nRP"]) / 4),
(params["nRC"] / 4),
(params["nFAW"] / 4),
params["nRRD_L"],
params["nCCD_L"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing) / timing) <= 0.10):
results += "[v] Suite 4 - Writes only\n"
else:
results += "[x] Suite 4 - Writes only\n"
results += "[v] Suite 4 - Reads & Writes (Analysis yet to be done)\n"
return results
#Trace Suite 5
def run_suite5(params):
results = ""
#*** READS ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_5_R.trc")
expected_Timing = max((params["nRC"] / params["num_banks"]), ((params["nRCD"] + params["nRTP"] + params["nRP"]) / params["num_banks"]), (params["nFAW"] /4), params["nRRD_S"], params["nCCD_S"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing)) <= 1):
results += "[v] Suite 5 - Reads only\n"
else:
results += "[x] Suite 5 - Reads only\n"
#*** WRITES ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_5_W.trc")
expected_Timing = max( ( (params["nRCD"] + params["nCWL"] + (params["nBL"] / 2) + params["nWR"] + params["nRP"]) / params["num_banks"]),
(params["nRC"] / params["num_banks"]),
(params["nFAW"] / 4),
params["nRRD_S"],
params["nCCD_S"])
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing)) <= 1):
results += "[v] Suite 5 - Writes only\n"
else:
results += "[x] Suite 5 - Writes only\n"
results += "[v] Suite 5 - Reads & Writes (Analysis yet to be done)\n"
return results
#Trace Suite 6
def run_suite6(params):
results = ""
#*** READS ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_6_R.trc")
expected_Timing = max((params["nFAW"] /4),
params["nRRD_S"],
params["nCCD_S"],
(params["nRC"] / 16),
((params["nRCD"] + params["nRTP"] + params["nRP"]) / 16) )
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if (((expected_Timing - timing)) <= 1):
results += "[v] Suite 6 - Reads only\n"
else:
results += "[x] Suite 6 - Reads only\n"
#*** WRITES ONLY ***
totalTxns = run_verimem(params["config_file"], "traces/sst-CramSim-trace_verimem_6_W.trc")
expected_Timing = max((params["nFAW"] /4),
params["nRRD_S"],
params["nCCD_S"],
((params["nRCD"] + params["nCWL"] + (params["nBL"] / 2) + params["nWR"] + params["nRP"]) / 16) ,
(params["nRC"] / 16) )
timing = params["stopAtCycle"] / totalTxns
if DEBUG:
print ("Expected Timing: ", expected_Timing)
print ("Actual Timing: ", timing, "\n\n\n")
if ((abs(expected_Timing - timing)) <= 1):
results += "[v] Suite 6 - Writes only\n"
else:
results += "[x] Suite 6 - Writes only\n"
results += "[v] Suite 6 - Reads & Writes (Analysis yet to be done)\n"
return results
def santize_params(params):
return_params = {}
return_params["stopAtCycle"] = int(params["stopAtCycle"].replace("ns\n", ""))
channels = int(params["numChannels"].replace("\n", ""))
ranksPerChannel = int(params["numRanksPerChannel"].replace("\n", ""))
bankGroupsPerRank = int(params["numBankGroupsPerRank"].replace("\n", ""))
banksPerBankGroup = int(params["numBanksPerBankGroup"].replace("\n", ""))
return_params["num_banks"] = channels * ranksPerChannel * bankGroupsPerRank * banksPerBankGroup
return_params["nRC"] = int(params["nRC"].replace("\n", ""))
return_params["nRRD"] = int(params["nRRD"].replace("\n", ""))
return_params["nRRD_L"] = int(params["nRRD_L"].replace("\n", ""))
return_params["nRRD_S"] = int(params["nRRD_S"].replace("\n", ""))
return_params["nRCD"] = int(params["nRCD"].replace("\n", ""))
return_params["nCCD"] = int(params["nCCD"].replace("\n", ""))
return_params["nCCD_L"] = int(params["nCCD_L"].replace("\n", ""))
return_params["nCCD_L_WR"] = int(params["nCCD_L_WR"].replace("\n", ""))
return_params["nCCD_S"] = int(params["nCCD_S"].replace("\n", ""))
return_params["nAL"] = int(params["nAL"].replace("\n", ""))
return_params["nCL"] = int(params["nCL"].replace("\n", ""))
return_params["nCWL"] = int(params["nCWL"].replace("\n", ""))
return_params["nWR"] = int(params["nWR"].replace("\n", ""))
return_params["nWTR"] = int(params["nWTR"].replace("\n", ""))
return_params["nWTR_L"] = int(params["nWTR_L"].replace("\n", ""))
return_params["nWTR_S"] = int(params["nWTR_S"].replace("\n", ""))
return_params["nRTW"] = int(params["nRTW"].replace("\n", ""))
return_params["nEWTR"] = int(params["nEWTR"].replace("\n", ""))
return_params["nERTW"] = int(params["nERTW"].replace("\n", ""))
return_params["nEWTW"] = int(params["nEWTW"].replace("\n", ""))
return_params["nERTR"] = int(params["nERTR"].replace("\n", ""))
return_params["nRAS"] = int(params["nRAS"].replace("\n", ""))
return_params["nRTP"] = int(params["nRTP"].replace("\n", ""))
return_params["nRP"] = int(params["nRP"].replace("\n", ""))
return_params["nRFC"] = int(params["nRFC"].replace("\n", ""))
return_params["nREFI"] = int(params["nREFI"].replace("\n", ""))
return_params["nFAW"] = int(params["nFAW"].replace("\n", ""))
return_params["nBL"] = int(params["nBL"].replace("\n", ""))
return return_params
# get config params
g_params = {}
configFileInstance = open(config_file, 'r')
for line in configFileInstance:
tokens = line.split(' ')
g_params[tokens[0]] = tokens[1]
g_params = santize_params(g_params)
g_params["config_file"] = config_file
print ("-----RUNNING VERIMEM-----")
print(run_suite1(g_params))
print(run_suite2(g_params))
print(run_suite4(g_params))
print(run_suite5(g_params))
print(run_suite6(g_params))
print("done.\n")
| 34.237852 | 196 | 0.594233 | 1,630 | 13,387 | 4.73681 | 0.1 | 0.079782 | 0.052584 | 0.072529 | 0.739412 | 0.698096 | 0.663515 | 0.61119 | 0.597591 | 0.591633 | 0 | 0.011752 | 0.218197 | 13,387 | 390 | 197 | 34.325641 | 0.72597 | 0.047808 | 0 | 0.539623 | 0 | 0.007547 | 0.244182 | 0.040094 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030189 | false | 0 | 0.011321 | 0 | 0.071698 | 0.132075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
503f6356b71c7a741568e839c2ff6d4c94ad4cd3 | 785 | py | Python | setup.py | wingnut29/Nebula | 3548b48720c069a552e2e9732bf6fa79ff7ff5bb | [
"MIT"
] | 1 | 2020-05-14T22:33:38.000Z | 2020-05-14T22:33:38.000Z | setup.py | wingnut29/Nebula | 3548b48720c069a552e2e9732bf6fa79ff7ff5bb | [
"MIT"
] | 1 | 2020-05-14T18:41:59.000Z | 2020-05-14T20:56:49.000Z | setup.py | wingnut29/Nebula | 3548b48720c069a552e2e9732bf6fa79ff7ff5bb | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
packages = ['src']
with open("README.md") as readme:
long_description = readme.read()
with open("requirements.txt") as requirements:
install_requires = [line.strip() for line in requirements]
with open('resources/backend/version.txt', 'rt') as ver:
VERSION = ver.read()
# with open('resources/backend/version.txt', 'w') as ver:
# ver.write(VERSION)
setup(
name='nebula',
version=VERSION,
packages=find_packages(),
url='https://github.com/wingnut29/Nebula',
license='MIT',
author='Justin Mullins',
author_email='jumullins@comcast.net',
description='Study aid for advancement',
long_description=long_description,
install_requires=install_requires,
include_package_data=True
)
| 25.322581 | 62 | 0.707006 | 97 | 785 | 5.608247 | 0.546392 | 0.058824 | 0.044118 | 0.088235 | 0.125 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0.003035 | 0.16051 | 785 | 30 | 63 | 26.166667 | 0.822458 | 0.099363 | 0 | 0 | 0 | 0 | 0.231534 | 0.071023 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50442f53be8111b0c31e800952e4165e8aeee54e | 1,198 | py | Python | tests/test_n_plus_one.py | MacHu-GWU/learn_pony-project | b338e192135a117ac460b2e4187f9fb7196a42e8 | [
"MIT"
] | null | null | null | tests/test_n_plus_one.py | MacHu-GWU/learn_pony-project | b338e192135a117ac460b2e4187f9fb7196a42e8 | [
"MIT"
] | null | null | null | tests/test_n_plus_one.py | MacHu-GWU/learn_pony-project | b338e192135a117ac460b2e4187f9fb7196a42e8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pony solves N + 1 select problem very good.
"""
import pytest
from pony import orm
db = orm.Database()
db.bind(provider="sqlite", filename=":memory:")
class Customer(db.Entity):
id = orm.PrimaryKey(int)
name = orm.Required(str)
orders = orm.Set(lambda: Order)
class Order(db.Entity):
# id = orm.PrimaryKey(int)
customer = orm.Required(Customer)
db.generate_mapping(create_tables=True)
@orm.db_session
def insert_test_data():
customer1 = Customer(id=1, name="Alice")
customer2 = Customer(id=2, name="Bob")
customer3 = Customer(id=3, name="Cathy")
order1 = Order(id=1, customer=customer1)
order2 = Order(id=2, customer=customer2)
order3 = Order(id=3, customer=customer3)
order4 = Order(id=4, customer=customer1)
order5 = Order(id=5, customer=customer1)
orm.commit()
insert_test_data()
@orm.db_session
def test():
query = orm.select(c for c in Customer)
for c, n_order in zip(query, [3, 1, 1]):
assert len(c.orders) == n_order
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 20.305085 | 48 | 0.657763 | 170 | 1,198 | 4.505882 | 0.476471 | 0.045692 | 0.02611 | 0.033943 | 0.067885 | 0.067885 | 0 | 0 | 0 | 0 | 0 | 0.026749 | 0.188648 | 1,198 | 58 | 49 | 20.655172 | 0.761317 | 0.092654 | 0 | 0.0625 | 0 | 0 | 0.044568 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 1 | 0.0625 | false | 0 | 0.09375 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50460256efaef5de6ac8b5662ecb8d13a366ede3 | 1,801 | py | Python | .github/workflows/update-supported-enterprise-server-versions/update.py | dandion/codeql-action | 33927cc1c923f2df348745ce91adec9027440753 | [
"MIT"
] | 541 | 2020-04-28T15:50:42.000Z | 2022-03-31T15:08:31.000Z | .github/workflows/update-supported-enterprise-server-versions/update.py | dandion/codeql-action | 33927cc1c923f2df348745ce91adec9027440753 | [
"MIT"
] | 525 | 2020-04-29T15:05:08.000Z | 2022-03-31T10:13:21.000Z | .github/workflows/update-supported-enterprise-server-versions/update.py | dandion/codeql-action | 33927cc1c923f2df348745ce91adec9027440753 | [
"MIT"
] | 178 | 2020-04-29T09:39:13.000Z | 2022-03-28T16:12:12.000Z | #!/usr/bin/env python3
import datetime
import json
import os
import pathlib
import semver
_API_COMPATIBILITY_PATH = pathlib.Path(__file__).absolute().parents[3] / "src" / "api-compatibility.json"
_ENTERPRISE_RELEASES_PATH = pathlib.Path(os.environ["ENTERPRISE_RELEASES_PATH"])
_RELEASE_FILE_PATH = _ENTERPRISE_RELEASES_PATH / "releases.json"
_FIRST_SUPPORTED_RELEASE = semver.VersionInfo.parse("2.22.0") # Versions older than this did not include Code Scanning.
def main():
api_compatibility_data = json.loads(_API_COMPATIBILITY_PATH.read_text())
releases = json.loads(_RELEASE_FILE_PATH.read_text())
oldest_supported_release = None
newest_supported_release = semver.VersionInfo.parse(api_compatibility_data["maximumVersion"] + ".0")
for release_version_string, release_data in releases.items():
release_version = semver.VersionInfo.parse(release_version_string + ".0")
if release_version < _FIRST_SUPPORTED_RELEASE:
continue
if release_version > newest_supported_release:
feature_freeze_date = datetime.date.fromisoformat(release_data["feature_freeze"])
if feature_freeze_date < datetime.date.today() + datetime.timedelta(weeks=2):
newest_supported_release = release_version
if oldest_supported_release is None or release_version < oldest_supported_release:
end_of_life_date = datetime.date.fromisoformat(release_data["end"])
if end_of_life_date > datetime.date.today():
oldest_supported_release = release_version
api_compatibility_data = {
"minimumVersion": f"{oldest_supported_release.major}.{oldest_supported_release.minor}",
"maximumVersion": f"{newest_supported_release.major}.{newest_supported_release.minor}",
}
_API_COMPATIBILITY_PATH.write_text(json.dumps(api_compatibility_data, sort_keys=True) + "\n")
if __name__ == "__main__":
main()
| 40.931818 | 119 | 0.800666 | 231 | 1,801 | 5.813853 | 0.337662 | 0.154877 | 0.098287 | 0.049144 | 0.172748 | 0.084885 | 0 | 0 | 0 | 0 | 0 | 0.005535 | 0.097168 | 1,801 | 43 | 120 | 41.883721 | 0.820418 | 0.042754 | 0 | 0 | 0 | 0 | 0.157375 | 0.102207 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.151515 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5046873b08c993df8e131d5392822b33b0ef17f0 | 4,257 | py | Python | experiments/simulation_experiments/subset_response/simulation_subset_response_nmf_cglvm.py | vishalbelsare/cplvm | d8f715258b2c363beb2d59e95e5b5b9e73b503a7 | [
"MIT"
] | null | null | null | experiments/simulation_experiments/subset_response/simulation_subset_response_nmf_cglvm.py | vishalbelsare/cplvm | d8f715258b2c363beb2d59e95e5b5b9e73b503a7 | [
"MIT"
] | null | null | null | experiments/simulation_experiments/subset_response/simulation_subset_response_nmf_cglvm.py | vishalbelsare/cplvm | d8f715258b2c363beb2d59e95e5b5b9e73b503a7 | [
"MIT"
] | null | null | null | import functools
import warnings
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import os
from scipy.stats import poisson
from scipy.special import logsumexp
from sklearn.neighbors import DistanceMetric
from sklearn.decomposition import PCA, NMF
from sklearn.metrics import silhouette_score
from pcpca import CPCA, PCPCA
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
from tensorflow_probability import bijectors as tfb
from cplvm import CGLVM
from cplvm import CGLVMMFGaussianApprox
# import sys
# sys.path.append("../../models")
# from clvm_tfp_poisson import fit_model as fit_clvm
# from clvm_tfp_poisson_link import fit_model_map as fit_clvm_link
tf.enable_v2_behavior()
warnings.filterwarnings("ignore")
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
if __name__ == "__main__":
N_REPEATS = 5
sil_scores_clvm = []
sil_scores_pca = []
sil_scores_nmf = []
sil_scores_cpca = []
sil_scores_cglvm = []
# for _ in range(N_REPEATS):
num_datapoints_x = 1000
num_datapoints_y = 1000
data_dim = 100
latent_dim_shared = 2
latent_dim_target = 2
a, b = 1, 1
actual_s = np.random.gamma(a, 1 / b, size=(data_dim, latent_dim_shared))
actual_w = np.random.gamma(a, 1 / b, size=(data_dim, latent_dim_target))
# actual_w[-data_dim//frac_response:, 0] = np.random.gamma(40, 1/5, size=(data_dim//frac_response))
actual_zx = np.random.gamma(a, 1 / b, size=(latent_dim_shared, num_datapoints_x))
actual_zy = np.random.gamma(a, 1 / b, size=(latent_dim_shared, num_datapoints_y))
actual_ty = np.random.gamma(a, 1 / b, size=(latent_dim_target, num_datapoints_y))
actual_ty[0, : num_datapoints_y // 2] = np.random.gamma(
1, 1 / 20, size=(num_datapoints_y // 2)
)
actual_ty[1, num_datapoints_y // 2 :] = np.random.gamma(
1, 1 / 20, size=(num_datapoints_y // 2)
)
# actual_w[-data_dim//frac_response:, 0] = np.random.gamma(20, 1/5, size=(data_dim//frac_response))
# actual_w[0, :] = np.random.gamma(20, 1/5, size=latent_dim_target)
x_train = np.random.poisson(actual_s @ actual_zx)
y_train = np.random.poisson(actual_s @ actual_zy + actual_w @ actual_ty)
labs = np.zeros(num_datapoints_y)
labs[num_datapoints_y // 2 :] = 1
group1_idx = np.where(labs == 0)[0]
group2_idx = np.where(labs == 1)[0]
plt.figure(figsize=(14, 7))
######### NMF #########
reduced_data = NMF(n_components=latent_dim_target).fit_transform(
np.concatenate([x_train, y_train], axis=1).T
)
fg_reduced = reduced_data[num_datapoints_x:, :]
plt.subplot(121)
plt.scatter(
fg_reduced[group1_idx, 0],
fg_reduced[group1_idx, 1],
color="green",
label="Foreground group 1",
)
plt.scatter(
fg_reduced[group2_idx, 0],
fg_reduced[group1_idx, 1],
color="orange",
label="Foreground group 2",
)
plt.xlabel("Latent dim 1")
plt.ylabel("Latent dim 2")
plt.title("NMF")
plt.legend(fontsize=20)
######### CGLVM #########
cglvm = CGLVM(k_shared=2, k_foreground=2, compute_size_factors=False)
approx_model = CGLVMMFGaussianApprox(
X=x_train,
Y=y_train,
k_shared=2,
k_foreground=2,
num_test_genes=0,
is_H0=False,
compute_size_factors=False,
)
results = cglvm.fit_model_vi(x_train, y_train, approx_model, is_H0=False)
fg_reduced = results["approx_model"].qty_mean.numpy().T
plt.subplot(122)
plt.scatter(
fg_reduced[group1_idx, 0],
fg_reduced[group1_idx, 1],
color="green",
label="Foreground group 1",
)
plt.scatter(
fg_reduced[group2_idx, 0],
fg_reduced[group1_idx, 1],
color="orange",
label="Foreground group 2",
)
plt.xlabel("Latent dim 1")
plt.ylabel("Latent dim 2")
plt.title("CGLVM")
plt.legend(fontsize=20)
plt.tight_layout()
plt.savefig("../out/simulation_scatter_nmf_cglvm.png")
plt.show()
plt.close()
import ipdb
ipdb.set_trace()
| 26.773585 | 103 | 0.664553 | 619 | 4,257 | 4.308562 | 0.260097 | 0.04387 | 0.048744 | 0.040495 | 0.381702 | 0.370079 | 0.355081 | 0.330334 | 0.298088 | 0.287214 | 0 | 0.031836 | 0.210477 | 4,257 | 158 | 104 | 26.943038 | 0.761678 | 0.107588 | 0 | 0.25 | 0 | 0 | 0.06235 | 0.010392 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
504b0eede996ec5def9bcf85a2eef0b600663294 | 1,556 | py | Python | pychron/logger/tasks/logger_panes.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/logger/tasks/logger_panes.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/logger/tasks/logger_panes.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traits.api import List, Any
from traitsui.api import View, UItem, ListEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
class DisplayPane(TraitsTaskPane):
loggers = List
selected = Any
def traits_view(self):
v = View(
UItem(
"loggers",
editor=ListEditor(
use_notebook=True, page_name=".title", selected="selected"
),
style="custom",
)
)
return v
# ============= EOF =============================================
| 33.106383 | 81 | 0.52892 | 155 | 1,556 | 5.245161 | 0.651613 | 0.073801 | 0.03198 | 0.03936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006478 | 0.206298 | 1,556 | 46 | 82 | 33.826087 | 0.651822 | 0.616967 | 0 | 0 | 0 | 0 | 0.046875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
504c6bf651b01cc71217d125895a87a5af3690c6 | 4,477 | py | Python | official_scorer.py | begab/fca_hypernymy | 3bc00a6eec19f4dd9bcb3410d3f7f40dc56ca529 | [
"MIT"
] | 1 | 2021-01-01T16:39:44.000Z | 2021-01-01T16:39:44.000Z | official_scorer.py | begab/fca_hypernymy | 3bc00a6eec19f4dd9bcb3410d3f7f40dc56ca529 | [
"MIT"
] | 7 | 2018-01-31T15:14:32.000Z | 2018-04-16T08:47:20.000Z | official_scorer.py | begab/fca_hypernymy | 3bc00a6eec19f4dd9bcb3410d3f7f40dc56ca529 | [
"MIT"
] | 2 | 2018-01-28T15:06:48.000Z | 2021-01-01T16:39:45.000Z | # -*- coding: utf-8 -*-
# Rank metrics from https://gist.github.com/bwhite/3726239
import sys
import numpy as np
def mean_reciprocal_rank(r):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
r = np.asarray(r).nonzero()[0]
return 1. / (r[0] + 1) if r.size else 0.
def precision_at_k(r, k, n):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return (np.mean(r)*k)/min(k,n)
# Modified from the first version. Now the gold elements are taken into account
def average_precision(r,n):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1, n) for k in range(r.size)]
#Modified from the first version (removed "if r[k]"). All elements (zero and nonzero) are taken into account
if not out:
return 0.
return np.mean(out)
def mean_average_precision(r,n):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return average_precision(r,n)
def get_hypernyms(line, is_gold=True, limit=15):
if is_gold == True:
valid_hyps = line.strip().split('\t')
return valid_hyps
else:
linesplit=line.strip().split('\t')
cand_hyps=[]
for hyp in linesplit[:limit]:
hyp_lower=hyp.lower()
if hyp_lower not in cand_hyps: cand_hyps.append(hyp_lower)
return cand_hyps
def return_official_scores(gold, predictions, limit=15):
fgold = open(gold, 'r')
fpredictions = open(predictions, 'r')
goldls = fgold.readlines()
predls = fpredictions.readlines()
if len(goldls)!=len(predls): sys.exit('ERROR: Number of lines in gold and output files differ')
all_scores = []
scores_names = ['MRR', 'MAP', 'P@1', 'P@3', 'P@5', 'P@15']
for i in range(len(goldls)):
goldline = goldls[i]
predline = predls[i]
avg_pat1 = []
avg_pat2 = []
avg_pat3 = []
avg_pat4 = []
gold_hyps = get_hypernyms(goldline, is_gold=True)
pred_hyps = get_hypernyms(predline, is_gold=False)
gold_hyps_n = len(gold_hyps)
r = [0 for i in range(limit)]
for j in range(len(pred_hyps)):
if j < gold_hyps_n:
pred_hyp = pred_hyps[j]
if pred_hyp in gold_hyps:
r[j] = 1
avg_pat1.append(precision_at_k(r,1,gold_hyps_n))
avg_pat2.append(precision_at_k(r,3,gold_hyps_n))
avg_pat3.append(precision_at_k(r,5,gold_hyps_n))
avg_pat4.append(precision_at_k(r,15,gold_hyps_n))
mrr_score_numb = mean_reciprocal_rank(r)
map_score_numb = mean_average_precision(r,gold_hyps_n)
avg_pat1_numb = sum(avg_pat1)/len(avg_pat1)
avg_pat2_numb = sum(avg_pat2)/len(avg_pat2)
avg_pat3_numb = sum(avg_pat3)/len(avg_pat3)
avg_pat4_numb = sum(avg_pat4)/len(avg_pat4)
scores_results = [mrr_score_numb, map_score_numb, avg_pat1_numb, avg_pat2_numb, avg_pat3_numb, avg_pat4_numb]
all_scores.append(scores_results)
evaluation_metrics = {}
for k in range(len(scores_names)):
evaluation_metrics[scores_names[k]] = sum([score_list[k] for score_list in all_scores]) / len(all_scores)
return evaluation_metrics
| 33.162963 | 129 | 0.603529 | 634 | 4,477 | 4.07571 | 0.219243 | 0.03096 | 0.024381 | 0.030186 | 0.252322 | 0.191176 | 0.158669 | 0.158669 | 0.158669 | 0.158669 | 0 | 0.018653 | 0.2935 | 4,477 | 134 | 130 | 33.410448 | 0.798293 | 0.283672 | 0 | 0 | 0 | 0 | 0.03437 | 0 | 0 | 0 | 0 | 0 | 0.014286 | 1 | 0.085714 | false | 0 | 0.028571 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
504d2d1b52019fafeb22db465bc5daf677a0d2b2 | 1,296 | py | Python | demo/versus.py | damanjitsingh/StackExchange-python- | 4e3c045bbfc5e8df76ad5be4dd6d41681c10a8cb | [
"BSD-3-Clause"
] | 163 | 2015-01-03T14:27:45.000Z | 2022-03-24T14:30:37.000Z | demo/versus.py | damanjitsingh/StackExchange-python- | 4e3c045bbfc5e8df76ad5be4dd6d41681c10a8cb | [
"BSD-3-Clause"
] | 41 | 2015-02-02T21:11:59.000Z | 2021-12-05T15:17:30.000Z | demo/versus.py | damanjitsingh/StackExchange-python- | 4e3c045bbfc5e8df76ad5be4dd6d41681c10a8cb | [
"BSD-3-Clause"
] | 64 | 2015-01-30T10:32:07.000Z | 2021-09-03T09:30:22.000Z | #!/usr/bin/env python
from __future__ import print_function
import sys
sys.path.append('.')
sys.path.append('..')
import stackexchange, stackauth
if len(sys.argv) < 3:
print('Usage: versus.py YOUR_SO_UID THEIR_SO_UID')
sys.exit(1)
so = stackexchange.Site(stackexchange.StackOverflow, impose_throttling=True)
user1, user2 = (int(x) for x in sys.argv[1:])
rep1, rep2 = {}, {}
username1, username2 = (so.user(x).display_name for x in (user1, user2))
total_rep1, total_rep2 = 0, 0
sites = []
for site in stackauth.StackAuth().api_associated(so, user1):
rep1[site.on_site.name] = site.reputation
sites.append(site.on_site.name)
for site in stackauth.StackAuth().api_associated(so, user2):
rep2[site.on_site.name] = site.reputation
for site in sites:
total_rep1 += rep1[site]
if site in rep2:
total_rep2 += rep2[site]
max_user = username1
max_rep, other_rep = rep1[site], rep2.get(site, 0)
if rep2.get(site, 0) > rep1[site]:
max_user = username2
max_rep, other_rep = other_rep, max_rep
diff = max_rep - other_rep
print('%s: %s wins (+%d)' % (site, max_user, diff))
print('Overall: %s wins (+%d)' % (username1 if total_rep1 >= total_rep2 else username2, max(total_rep1, total_rep2) - min(total_rep1, total_rep2)))
| 28.173913 | 147 | 0.683642 | 199 | 1,296 | 4.266332 | 0.316583 | 0.053004 | 0.06596 | 0.084806 | 0.1649 | 0.1649 | 0.09894 | 0.09894 | 0 | 0 | 0 | 0.037453 | 0.175926 | 1,296 | 45 | 148 | 28.8 | 0.757491 | 0.015432 | 0 | 0 | 0 | 0 | 0.0652 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.096774 | 0 | 0.096774 | 0.129032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
504d73f94f1f4ce5b9369ba0722a39e17b31c859 | 2,991 | py | Python | tests/example_envs/test_tag_continuous.py | salesforce/warp-drive | 178873a65cb016722f082ac72314cef6cd32dcfa | [
"BSD-3-Clause"
] | 255 | 2021-09-01T21:28:26.000Z | 2022-03-28T14:18:45.000Z | tests/example_envs/test_tag_continuous.py | salesforce/warp-drive | 178873a65cb016722f082ac72314cef6cd32dcfa | [
"BSD-3-Clause"
] | 16 | 2021-09-02T04:58:10.000Z | 2022-02-23T00:34:55.000Z | tests/example_envs/test_tag_continuous.py | salesforce/warp-drive | 178873a65cb016722f082ac72314cef6cd32dcfa | [
"BSD-3-Clause"
] | 38 | 2021-09-02T03:19:50.000Z | 2022-03-15T18:40:37.000Z | # Copyright (c) 2021, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root
# or https://opensource.org/licenses/BSD-3-Clause
import unittest
import numpy as np
from example_envs.tag_continuous.tag_continuous import TagContinuous
from warp_drive.env_cpu_gpu_consistency_checker import EnvironmentCPUvsGPU
# Env configs for testing
env_configs = {
"test1": {
"num_taggers": 2,
"num_runners": 3,
"max_acceleration": 1,
"max_turn": np.pi / 4,
"num_acceleration_levels": 3,
"num_turn_levels": 3,
"grid_length": 10,
"episode_length": 100,
"seed": 274880,
"skill_level_runner": 1,
"skill_level_tagger": 1,
"use_full_observation": True,
"runner_exits_game_after_tagged": True,
"tagging_distance": 0.0,
},
"test2": {
"num_taggers": 4,
"num_runners": 1,
"max_acceleration": 0.05,
"max_turn": np.pi / 4,
"num_acceleration_levels": 3,
"num_turn_levels": 3,
"grid_length": 10,
"episode_length": 100,
"step_penalty_for_tagger": -0.1,
"seed": 428096,
"skill_level_runner": 1,
"skill_level_tagger": 2,
"use_full_observation": False,
"runner_exits_game_after_tagged": False,
"tagging_distance": 0.25,
},
"test3": {
"num_taggers": 1,
"num_runners": 4,
"max_acceleration": 2,
"max_turn": np.pi / 2,
"num_acceleration_levels": 3,
"num_turn_levels": 3,
"grid_length": 10,
"episode_length": 100,
"step_reward_for_runner": 0.1,
"seed": 654208,
"skill_level_runner": 1,
"skill_level_tagger": 0.5,
"use_full_observation": False,
"runner_exits_game_after_tagged": True,
},
"test4": {
"num_taggers": 3,
"num_runners": 2,
"max_acceleration": 0.05,
"max_turn": np.pi,
"num_acceleration_levels": 3,
"num_turn_levels": 3,
"grid_length": 10,
"episode_length": 100,
"seed": 121024,
"skill_level_runner": 0.5,
"skill_level_tagger": 1,
"use_full_observation": True,
"runner_exits_game_after_tagged": False,
},
}
class MyTestCase(unittest.TestCase):
"""
CPU v GPU consistency unit tests
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.testing_class = EnvironmentCPUvsGPU(
dual_mode_env_class=TagContinuous,
env_configs=env_configs,
num_envs=2,
num_episodes=2,
use_gpu_testing_mode=True,
)
def test_env_consistency(self):
try:
self.testing_class.test_env_reset_and_step(seed=274880)
except AssertionError:
self.fail("TagContinuous environment consistency tests failed")
| 29.038835 | 75 | 0.597793 | 353 | 2,991 | 4.716714 | 0.339943 | 0.033634 | 0.021622 | 0.026426 | 0.403604 | 0.403604 | 0.392793 | 0.342943 | 0.314715 | 0.255856 | 0 | 0.050444 | 0.284186 | 2,991 | 102 | 76 | 29.323529 | 0.72723 | 0.088933 | 0 | 0.388235 | 0 | 0 | 0.348743 | 0.095044 | 0 | 0 | 0 | 0 | 0.011765 | 1 | 0.023529 | false | 0 | 0.047059 | 0 | 0.082353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
504e2b2290bbec5a9668105535731c6180ac4253 | 8,528 | py | Python | tests/test_encryption/test_encrypted_columns.py | voice1/ormar | 7d0826392ee868f8fe1f10bd1540aee104b0a59f | [
"MIT"
] | null | null | null | tests/test_encryption/test_encrypted_columns.py | voice1/ormar | 7d0826392ee868f8fe1f10bd1540aee104b0a59f | [
"MIT"
] | null | null | null | tests/test_encryption/test_encrypted_columns.py | voice1/ormar | 7d0826392ee868f8fe1f10bd1540aee104b0a59f | [
"MIT"
] | null | null | null | # type: ignore
import base64
import decimal
import hashlib
import uuid
import datetime
from typing import Any
import databases
import pytest
import sqlalchemy
import ormar
from ormar import ModelDefinitionError, NoMatch
from ormar.fields.sqlalchemy_encrypted import EncryptedString
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL)
metadata = sqlalchemy.MetaData()
class BaseMeta(ormar.ModelMeta):
metadata = metadata
database = database
default_fernet = dict(
encrypt_secret="asd123", encrypt_backend=ormar.EncryptBackends.FERNET,
)
class DummyBackend(ormar.fields.EncryptBackend):
def _initialize_backend(self, secret_key: bytes) -> None:
pass
def encrypt(self, value: Any) -> str:
return value
def decrypt(self, value: Any) -> str:
return value
class Author(ormar.Model):
class Meta(BaseMeta):
tablename = "authors"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, **default_fernet)
uuid_test = ormar.UUID(default=uuid.uuid4, uuid_format="string")
uuid_test2 = ormar.UUID(nullable=True, uuid_format="string")
password: str = ormar.String(
max_length=128,
encrypt_secret="udxc32",
encrypt_backend=ormar.EncryptBackends.HASH,
)
birth_year: int = ormar.Integer(
nullable=True,
encrypt_secret="secure89key%^&psdijfipew",
encrypt_backend=ormar.EncryptBackends.FERNET,
)
test_text: str = ormar.Text(default="", **default_fernet)
test_bool: bool = ormar.Boolean(nullable=False, **default_fernet)
test_float: float = ormar.Float(**default_fernet)
test_float2: float = ormar.Float(nullable=True, **default_fernet)
test_datetime = ormar.DateTime(default=datetime.datetime.now, **default_fernet)
test_date = ormar.Date(default=datetime.date.today, **default_fernet)
test_time = ormar.Time(default=datetime.time, **default_fernet)
test_json = ormar.JSON(default={}, **default_fernet)
test_bigint: int = ormar.BigInteger(default=0, **default_fernet)
test_smallint: int = ormar.SmallInteger(default=0, **default_fernet)
test_decimal = ormar.Decimal(scale=2, precision=10, **default_fernet)
test_decimal2 = ormar.Decimal(max_digits=10, decimal_places=2, **default_fernet)
custom_backend: str = ormar.String(
max_length=200,
encrypt_secret="asda8",
encrypt_backend=ormar.EncryptBackends.CUSTOM,
encrypt_custom_backend=DummyBackend,
)
class Hash(ormar.Model):
class Meta(BaseMeta):
tablename = "hashes"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(
max_length=128,
encrypt_secret="udxc32",
encrypt_backend=ormar.EncryptBackends.HASH,
)
class Filter(ormar.Model):
class Meta(BaseMeta):
tablename = "filters"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, **default_fernet)
hash = ormar.ForeignKey(Hash)
class Report(ormar.Model):
class Meta(BaseMeta):
tablename = "reports"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
filters = ormar.ManyToMany(Filter)
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.drop_all(engine)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
def test_error_on_encrypted_pk():
with pytest.raises(ModelDefinitionError):
class Wrong(ormar.Model):
class Meta(BaseMeta):
tablename = "wrongs"
id: int = ormar.Integer(
primary_key=True,
encrypt_secret="asd123",
encrypt_backend=ormar.EncryptBackends.FERNET,
)
def test_error_on_encrypted_relation():
with pytest.raises(ModelDefinitionError):
class Wrong2(ormar.Model):
class Meta(BaseMeta):
tablename = "wrongs2"
id: int = ormar.Integer(primary_key=True)
author = ormar.ForeignKey(
Author,
encrypt_secret="asd123",
encrypt_backend=ormar.EncryptBackends.FERNET,
)
def test_error_on_encrypted_m2m_relation():
with pytest.raises(ModelDefinitionError):
class Wrong3(ormar.Model):
class Meta(BaseMeta):
tablename = "wrongs3"
id: int = ormar.Integer(primary_key=True)
author = ormar.ManyToMany(
Author,
encrypt_secret="asd123",
encrypt_backend=ormar.EncryptBackends.FERNET,
)
def test_wrong_backend():
with pytest.raises(ModelDefinitionError):
class Wrong3(ormar.Model):
class Meta(BaseMeta):
tablename = "wrongs3"
id: int = ormar.Integer(primary_key=True)
author = ormar.Integer(
encrypt_secret="asd123",
encrypt_backend=ormar.EncryptBackends.CUSTOM,
encrypt_custom_backend="aa",
)
def test_db_structure():
assert Author.Meta.table.c.get("name").type.__class__ == EncryptedString
@pytest.mark.asyncio
async def test_save_and_retrieve():
async with database:
test_uuid = uuid.uuid4()
await Author(
name="Test",
birth_year=1988,
password="test123",
uuid_test=test_uuid,
test_float=1.2,
test_bool=True,
test_decimal=decimal.Decimal(3.5),
test_decimal2=decimal.Decimal(5.5),
test_json=dict(aa=12),
custom_backend="test12",
).save()
author = await Author.objects.get()
assert author.name == "Test"
assert author.birth_year == 1988
password = (
"03e4a4d513e99cb3fe4ee3db282c053daa3f3572b849c3868939a306944ad5c08"
"22b50d4886e10f4cd418c3f2df3ceb02e2e7ac6e920ae0c90f2dedfc8fa16e2"
)
assert author.password == password
assert author.uuid_test == test_uuid
assert author.uuid_test2 is None
assert author.test_datetime.date() == datetime.date.today()
assert author.test_date == datetime.date.today()
assert author.test_text == ""
assert author.test_float == 1.2
assert author.test_float2 is None
assert author.test_bigint == 0
assert author.test_json == {"aa": 12}
assert author.test_decimal == 3.5
assert author.test_decimal2 == 5.5
assert author.custom_backend == "test12"
@pytest.mark.asyncio
async def test_fernet_filters_nomatch():
async with database:
await Filter(name="test1").save()
await Filter(name="test1").save()
filters = await Filter.objects.all()
assert filters[0].name == filters[1].name == "test1"
with pytest.raises(NoMatch):
await Filter.objects.get(name="test1")
assert await Filter.objects.get_or_none(name="test1") is None
@pytest.mark.asyncio
async def test_hash_filters_works():
async with database:
await Hash(name="test1").save()
await Hash(name="test2").save()
secret = hashlib.sha256("udxc32".encode()).digest()
secret = base64.urlsafe_b64encode(secret)
hashed_test1 = hashlib.sha512(secret + "test1".encode()).hexdigest()
hash1 = await Hash.objects.get(name="test1")
assert hash1.name == hashed_test1
with pytest.raises(NoMatch):
await Filter.objects.get(name__icontains="test")
@pytest.mark.asyncio
async def test_related_model_fields_properly_decrypted():
async with database:
hash1 = await Hash(name="test1").save()
report = await Report.objects.create(name="Report1")
await report.filters.create(name="test1", hash=hash1)
await report.filters.create(name="test2")
report2 = await Report.objects.select_related("filters").get()
assert report2.filters[0].name == "test1"
assert report2.filters[1].name == "test2"
secret = hashlib.sha256("udxc32".encode()).digest()
secret = base64.urlsafe_b64encode(secret)
hashed_test1 = hashlib.sha512(secret + "test1".encode()).hexdigest()
report2 = await Report.objects.select_related("filters__hash").get()
assert report2.filters[0].name == "test1"
assert report2.filters[0].hash.name == hashed_test1
| 31.238095 | 84 | 0.655722 | 955 | 8,528 | 5.691099 | 0.184293 | 0.035327 | 0.034407 | 0.056302 | 0.487764 | 0.413247 | 0.332107 | 0.292732 | 0.259798 | 0.25207 | 0 | 0.03595 | 0.23675 | 8,528 | 272 | 85 | 31.352941 | 0.799047 | 0.001407 | 0 | 0.319048 | 0 | 0 | 0.050388 | 0.017853 | 0 | 0 | 0 | 0 | 0.109524 | 1 | 0.042857 | false | 0.02381 | 0.061905 | 0.009524 | 0.371429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
504e2c500348eb7b7420ff67295a03d97f301c8d | 1,893 | py | Python | temoin.py | ninideol/French-Wordle | eba15ce76abf1aeb7ce9122fc78c1ce422250a37 | [
"MIT"
] | null | null | null | temoin.py | ninideol/French-Wordle | eba15ce76abf1aeb7ce9122fc78c1ce422250a37 | [
"MIT"
] | 4 | 2022-02-18T15:48:17.000Z | 2022-02-26T01:40:16.000Z | temoin.py | ninideol/wordle_helper | eba15ce76abf1aeb7ce9122fc78c1ce422250a37 | [
"MIT"
] | null | null | null | def create(size,firstLetter = '\n'):
'''Create the temoin of the word and then return it
:param size: size of the word
:type size: int
:param firstLetter: first letter of the word
:type firstLetter: char
:returns: the temoin of this new word considering the parameters
:rtype: str list list
'''
temoin = []
for i in range(size):
temp = []
for j in range(26):
temp.append(chr(j+65))
temoin.append(temp)
if firstLetter != '\n':
temoin[0] = [firstLetter]
return temoin
def setTemoin(temoin,toHave,word,res):
'''Set the temoin using word choosen and how good it is
:param temoin:
:type temoin: str list list
:param toHave: good letters missplaced in the word
:type toHave: char list
:param word: word choosen by the user
:type word: str
:param res: goodeness of the word choosen
:type res: str
:returns: the temoin and the letter to Have and missplaced
:rtype: str list list, char list tuple
'''
alreadyTried = []
for i in range(len(res)):
if res[i] == '2':
temoin[i] = [word[i]]
alreadyTried.append(word[i])
elif res[i] == '1':
toHave.append(word[i])
temoin[i].remove(word[i])
alreadyTried.append(word[i])
elif res[i] == '0':
if word[i] not in alreadyTried:
for k in temoin:
if word[i] in k: k.remove(word[i])
alreadyTried.append(word[i])
return (temoin,toHave)
def printTemoin(temoin):
'''
:param temoin:
:type temoin: str list list
:returns: the words good written with '_' as blank spaces
:rtype: string
'''
toWrite = ""
for i in temoin:
if len(i) != 1: toWrite += "_"
else: toWrite += i[0]
return toWrite
def testfunction(t):
return t
| 27.434783 | 68 | 0.577919 | 257 | 1,893 | 4.249027 | 0.29572 | 0.041209 | 0.032967 | 0.063187 | 0.161172 | 0.161172 | 0.161172 | 0.065934 | 0.065934 | 0 | 0 | 0.007675 | 0.311675 | 1,893 | 68 | 69 | 27.838235 | 0.830391 | 0.396725 | 0 | 0.088235 | 0 | 0 | 0.007767 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0.029412 | 0.235294 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
504f0269253a1c8ade7684a28bed2391c307a62d | 543 | py | Python | facility_info/routers/users.py | JunAishima/facility-info | 4ea11e5920ae85b6343e33eb820a538004f0cab3 | [
"BSD-3-Clause"
] | null | null | null | facility_info/routers/users.py | JunAishima/facility-info | 4ea11e5920ae85b6343e33eb820a538004f0cab3 | [
"BSD-3-Clause"
] | null | null | null | facility_info/routers/users.py | JunAishima/facility-info | 4ea11e5920ae85b6343e33eb820a538004f0cab3 | [
"BSD-3-Clause"
] | null | null | null | from fastapi import APIRouter
router = APIRouter()
from .user_data import user_info_dict
@router.get("/")
def get_all():
all_info = []
for name, info in user_info_dict.items():
all_info.append(info)
return all_info
@router.get("/me")
def get():
return user_info_dict['me']
@router.get("/{text}")
def get_by_search(text):
results = []
for user, info in user_info_dict.items():
for field, value in info.items():
if text == value:
results.append(info)
return results
| 19.392857 | 45 | 0.627993 | 76 | 543 | 4.289474 | 0.342105 | 0.122699 | 0.147239 | 0.08589 | 0.141104 | 0.141104 | 0 | 0 | 0 | 0 | 0 | 0 | 0.243094 | 543 | 27 | 46 | 20.111111 | 0.793187 | 0 | 0 | 0 | 0 | 0 | 0.02403 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0.05 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5055af3784cd982e10a221d3b3eb88bdb41d6f99 | 1,407 | py | Python | recover_results.py | pplonski/automlbenchmark | f49ddfa2583643173296ed8ab45a8c14c62a6987 | [
"MIT"
] | 282 | 2018-09-19T09:45:46.000Z | 2022-03-30T04:05:51.000Z | recover_results.py | pplonski/automlbenchmark | f49ddfa2583643173296ed8ab45a8c14c62a6987 | [
"MIT"
] | 267 | 2018-11-02T11:43:11.000Z | 2022-03-31T08:58:16.000Z | recover_results.py | pplonski/automlbenchmark | f49ddfa2583643173296ed8ab45a8c14c62a6987 | [
"MIT"
] | 104 | 2018-10-17T19:32:36.000Z | 2022-03-19T22:47:59.000Z | import argparse
import os
# prevent asap other modules from defining the root logger using basicConfig
import amlb.logger
import amlb
from amlb import log
from amlb.utils import Namespace as ns, config_load
parser = argparse.ArgumentParser()
parser.add_argument('instances', type=str, help="The path to an instances.csv file.")
parser.add_argument('--reconnect', nargs='?', const=True, default=False, help=argparse.SUPPRESS)
parser.add_argument('-X', '--extra', default=[], action='append', help=argparse.SUPPRESS)
args = parser.parse_args()
extras = {t[0]: t[1] if len(t) > 1 else True for t in [x.split('=', 1) for x in args.extra]}
# script_name = os.path.splitext(os.path.basename(__file__))[0]
# log_dir = os.path.join(args.outdir if args.outdir else '.', 'logs')
# os.makedirs(log_dir, exist_ok=True)
# now_str = datetime_iso(date_sep='', time_sep='')
amlb.logger.setup(root_level='DEBUG', console_level='INFO')
root_dir = os.path.dirname(__file__)
config = config_load(os.path.join(root_dir, "resources", "config.yaml"))
config_args = ns.parse(
root_dir=root_dir,
script=os.path.basename(__file__),
run_mode='script',
) + ns.parse(extras)
config_args = ns({k: v for k, v in config_args if v is not None})
amlb.resources.from_configs(config, config_args)
if args.reconnect:
amlb.AWSBenchmark.reconnect(args.instances)
else:
amlb.AWSBenchmark.fetch_results(args.instances)
| 34.317073 | 96 | 0.73774 | 219 | 1,407 | 4.561644 | 0.429224 | 0.036036 | 0.051051 | 0.036036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004023 | 0.11656 | 1,407 | 40 | 97 | 35.175 | 0.799678 | 0.205402 | 0 | 0 | 0 | 0 | 0.09541 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
50583afea5accb02ad425531ecb20d8681a3c7c9 | 281 | py | Python | common.py | weiyue0307/XiYuan-Project | b469c1a4e928ae1f0e3240f09b5dd910c88bdf48 | [
"MIT"
] | null | null | null | common.py | weiyue0307/XiYuan-Project | b469c1a4e928ae1f0e3240f09b5dd910c88bdf48 | [
"MIT"
] | null | null | null | common.py | weiyue0307/XiYuan-Project | b469c1a4e928ae1f0e3240f09b5dd910c88bdf48 | [
"MIT"
] | null | null | null | import socket, sys
def recv_all(sock,length):
raw = b''
while len(raw)<length:
more = sock.recv(length-len(raw))
if not more:
raise EOFError('socket closed when receiving head info')
raw += more
return str(raw, encoding='utf8')
| 18.733333 | 68 | 0.594306 | 38 | 281 | 4.368421 | 0.684211 | 0.072289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005025 | 0.291815 | 281 | 14 | 69 | 20.071429 | 0.829146 | 0 | 0 | 0 | 0 | 0 | 0.150538 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
505abf372aba12515e93675d4a7e4c01cf2ebdbd | 1,044 | py | Python | Dual Channel models/test.py | adeepH/DC-LM | 1f3f98f6e22529de28e3136f7e7e60bd2fe1ef45 | [
"CC0-1.0"
] | 1 | 2021-12-15T11:04:14.000Z | 2021-12-15T11:04:14.000Z | Dual Channel models/test.py | adeepH/DC-LM | 1f3f98f6e22529de28e3136f7e7e60bd2fe1ef45 | [
"CC0-1.0"
] | null | null | null | Dual Channel models/test.py | adeepH/DC-LM | 1f3f98f6e22529de28e3136f7e7e60bd2fe1ef45 | [
"CC0-1.0"
] | 3 | 2021-10-17T10:43:45.000Z | 2022-02-19T12:46:02.000Z | import pandas as pd
from transformers import AutoTokenizer
from utils import create_data_loader
from dcbert4hope import DcBert4hope
from get_predictions import get_predictions
device = 'cuda' if torch.cuda.is_available() else 'cpu'
test = pd.read_csv('multichannelhope_test.csv')
model_name = ['mbert-bert.bin']
model1 = ['bert-base-uncased',
# 'distilbert-base-uncased',
# 'roberta-base',
# 'xlm-roberta-base',
# 'bert-large-uncased'
]
model2 = ['bert-base-multilingual-cased',
# 'distilbert-base-multilingual-cased',
# 'xlm-roberta-base',
]
MAX_LEN = 128
BATCH_SIZE = 32
model = DcBert4hope(model1, model2, n_classes=1)
model = model.to(device)
LOAD_MODEL = True
if LOAD_MODEL:
model.load_state_dict(torch.load(model_name))
tokenizer1 = AutoTokenizer.from_pretrained(model1)
tokenizer2 = AutoTokenizer.from_pretrained(model2)
test_data_loader = create_data_loader(test, tokenizer1, tokenizer2, MAX_LEN, BATCH_SIZE)
get_predictions(model, test_data_loader)
| 32.625 | 88 | 0.725096 | 132 | 1,044 | 5.530303 | 0.439394 | 0.054795 | 0.043836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021839 | 0.166667 | 1,044 | 31 | 89 | 33.677419 | 0.817241 | 0.135057 | 0 | 0 | 0 | 0 | 0.101563 | 0.059152 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.217391 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
505eddb05540b8fcb57a78623aa95277ac5f6226 | 1,310 | py | Python | complex_framework/flask_app/pubsub_client.py | yaelgreen/Flask-SocketI-Examples | 0357c5191513e1d2e038a65516b3d666842b00f4 | [
"MIT"
] | 6 | 2021-05-03T05:28:55.000Z | 2021-05-25T09:26:31.000Z | complex_framework/flask_app/pubsub_client.py | yaelgreen/Flask-SocketI-Examples | 0357c5191513e1d2e038a65516b3d666842b00f4 | [
"MIT"
] | null | null | null | complex_framework/flask_app/pubsub_client.py | yaelgreen/Flask-SocketI-Examples | 0357c5191513e1d2e038a65516b3d666842b00f4 | [
"MIT"
] | 1 | 2021-04-27T13:44:27.000Z | 2021-04-27T13:44:27.000Z | import redis
import json
from config import CHANNEL
class PubSubClient:
"""
A Pub Sub redis client
Args:
_client (Redis): A Redis instance,
initialized with a redis server host and port.
Attributes:
_client (Redis): A Redis instance,
initialized with a redis host and port.
"""
_client = None
def __init__(self, redis_host: str, redis_port: int):
self._client = redis.Redis(redis_host, redis_port)
def send_event_fired_message(self, data: dict):
"""
Send a message on the channel CHANNEL
Args:
data: the message to send
"""
self.send_message(data, CHANNEL)
def send_message(self, message: dict, channel: str):
"""
Send a message on the channel
Args:
message: the message to send
channel: the channel to send the message on
"""
info = self._client.info("server")
if not hasattr(self._client, 'xadd') or ('redis_version' in info and
int(info['redis_version'].split('.')[0]) < 5):
message_json = json.dumps(message)
self._client.publish(channel, message_json)
else:
self._client.xadd(channel, message)
| 27.291667 | 95 | 0.577099 | 156 | 1,310 | 4.685897 | 0.320513 | 0.068399 | 0.032832 | 0.046512 | 0.191518 | 0.191518 | 0.125855 | 0.125855 | 0.125855 | 0 | 0 | 0.002299 | 0.335878 | 1,310 | 47 | 96 | 27.87234 | 0.837931 | 0.316031 | 0 | 0 | 0 | 0 | 0.048114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.176471 | 0 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
505f723cf9e78a3c906218f4c674e89211396016 | 5,282 | py | Python | src/system/spectrum.py | robbje/eis | a7e0cac9e00e57f605cd9d9536d385a3f632e506 | [
"MIT"
] | null | null | null | src/system/spectrum.py | robbje/eis | a7e0cac9e00e57f605cd9d9536d385a3f632e506 | [
"MIT"
] | null | null | null | src/system/spectrum.py | robbje/eis | a7e0cac9e00e57f605cd9d9536d385a3f632e506 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
import json
import numpy as np
import scipy.signal
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
from copy import deepcopy
class Spectrum(object):
def __init__(self):
self.omega = []
self.Z = []
@classmethod
def fromExperiments(cls, data):
s = cls()
""" data: List of Experiment-objects
"""
if len(data) < 2:
raise ValueError(
"Too little experiments in spectrum: %i" %
len(data))
data.sort(key=lambda e: e.w)
w, z = map(list, map(None, *[d.impedance() for d in data]))
s.omega = np.array(w)
s.Z = np.array(z)
return s
@classmethod
def fromCircuit(cls, omega, eqc, p):
""" omega: list of angular frequencies for this spectrum
eqc: equivalent circuit function returning the impedance
p: parameters to be used for the equivalent circuit function
"""
s = cls()
s.omega = np.array(omega)
s.Z = np.array([eqc(w, p) for w in omega])
s.eqc = eqc
return s
@classmethod
def fromRawData(cls, omega, Z):
""" omega: list of angular frequencies for this spectrum
Z: list of impedances for this spectrum
"""
s = cls()
s.omega = deepcopy(np.array(omega))
s.Z = deepcopy(np.array(Z))
return s
@classmethod
def fromJSON(cls, jsonstring):
d = json.loads(jsonstring)
s = cls()
s.omega = d['omega']
s.Z = np.array(d['Re']) + 1j * np.array(d['Im'])
return s
def toJSON(self):
return json.dumps({'omega':list(self.omega),
'Re':list(np.real(self.Z)),
'Im':list(np.imag(self.Z))})
def fit(self, spectrum, pset):
# TODO: utilize jacobian, if possible
def residuals(p, spectrum, pset):
pset.updateUnmaskedTransformedValues(np.abs(p))
pset.applyConstraints()
res = []
for i, w in enumerate(spectrum.omega):
z = self.eqc(w, pset._values)
res.append(np.real(z) - np.real(spectrum.Z[i]))
res.append(np.imag(z) - np.imag(spectrum.Z[i]))
return np.array(res)
p0 = pset.getUnmaskedTransformedValues()
plsq = leastsq(residuals, p0, args=(spectrum, pset), \
full_output = True,
xtol = 1e-20,
ftol = 1e-12,
factor = 1)
pset.updateUnmaskedTransformedValues(np.abs(plsq[0]))
r = residuals(plsq[0], spectrum, pset)
rmsd = np.sqrt(np.sum(np.power(r,2.0))/len(r))
return pset, rmsd
def updateParameter(self, p):
self.p = np.array(p)
self.Z = np.array([self.eqc(w, p) for w in self.omega])
def interpolate(
self, new_omega=2 * np.pi * np.power(10, np.arange(-5, 10, 0.1))):
re = np.interp(new_omega, self.omega, np.real(self.Z))
im = np.interp(new_omega, self.omega, np.imag(self.Z))
self.Z = re + 1j * im
self.omega = np.array(new_omega)
def getImaginaryMaxima(self):
return scipy.signal.argrelmax(np.imag(self.Z))
def getImaginaryMinima(self):
return scipy.signal.argrelmin(np.imag(self.Z))
def cropFrequencyRange(self, wRange):
newZ = []
newOmega = []
for i, w in enumerate(self.omega):
if (wRange[0] == None or w > wRange[0]) and \
(wRange[1] == None or w <= wRange[1]):
newOmega.append(w)
newZ.append(self.Z[i])
self.Z = np.array(newZ)
self.omega = np.array(newOmega)
def plot_bode(self, style=['b+-', 'g+-']):
re, = plt.semilogx(self.omega, np.real(self.Z), style[0])
im, = plt.semilogx(self.omega, np.imag(self.Z), style[1])
plt.legend([re, im], ['Re(Z)', 'Im(Z)'])
return re, im
def plot_nyquist(self, style='rx-'):
plt.gca().set_aspect('equal')
return plt.plot(np.real(self.Z), np.imag(self.Z), style)
def do_kkr(self, change=False):
ng = 1
absZ = np.abs(self.Z)
phiZ = np.angle(self.Z)
linF = np.log(self.omega)
int_term = np.zeros(self.Z.shape)
diff_term = np.zeros(self.Z.shape)
for k in xrange(ng, phiZ.shape[0] - ng):
int_term[k] = -np.trapz(phiZ[k:], linF[k:])
diff_term[k] = np.mean(
np.diff(
phiZ[
k -
1:k +
2]) /
np.diff(
linF[
k -
1:k +
2]))
gamma = -np.pi / 6.0
lnH = 2.0 / np.pi * int_term + gamma * diff_term
err_lnH = np.log(absZ[ng:-ng]) - lnH[ng:-ng]
constOffset = np.mean(err_lnH)
abszhit = np.zeros(absZ.shape)
abszhit[ng:-ng] = np.exp(constOffset + lnH[ng:-ng])
z = abszhit * np.exp(1j * phiZ)
w = self.omega
if change:
self.omega = w[ng:-ng]
self.Z = z[ng:-ng]
return Spectrum.fromRawData(w[ng:-ng], z[ng:-ng])
| 33.0125 | 78 | 0.514767 | 696 | 5,282 | 3.875 | 0.252874 | 0.037078 | 0.022247 | 0.024472 | 0.186504 | 0.119392 | 0.074156 | 0.032629 | 0 | 0 | 0 | 0.012156 | 0.345892 | 5,282 | 159 | 79 | 33.220126 | 0.768452 | 0.060772 | 0 | 0.139535 | 0 | 0 | 0.016553 | 0 | 0 | 0 | 0 | 0.006289 | 0 | 1 | 0.124031 | false | 0 | 0.046512 | 0.023256 | 0.271318 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5060ce7919aeaba5ec72e50f0321501c2bf67e8c | 17,135 | py | Python | bakdotfiles.py | UberLambda/bakdotfiles | d458d619e865c9170536545b057666859cba64de | [
"MIT"
] | null | null | null | bakdotfiles.py | UberLambda/bakdotfiles | d458d619e865c9170536545b057666859cba64de | [
"MIT"
] | null | null | null | bakdotfiles.py | UberLambda/bakdotfiles | d458d619e865c9170536545b057666859cba64de | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# vim: set sw=4 expandtab colorcolumn=120:
# The MIT License
#
# Copyright (c) 2019 Paolo Jovon <paolo.jovon@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''Parses `pacman -Qkk` output to find modified package files, then makes backup copies of them
(including/excluding any file specified in user-defined glob patterns)'''
import subprocess as sp
import re
import os
import sys
import shutil
import fnmatch, glob
from collections import namedtuple, defaultdict, OrderedDict
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
PKG_MANAGER = 'pacman' # = 'yay'
'''The pacman-like package manager to use.'''
WARNING_RE = re.compile(r'(?:warning|backup file): [^\s:]+: ([^\s]+) \(([^\)]*)\)$', re.MULTILINE)
'''Matches a filename and warning message from `pacman -Qkk` output.'''
IGNORED_WARNINGS = ['Permission denied', 'Permissions mismatch']
'''List of `pacman -Qkk` warnings that do not trigger a backup for a file.'''
SELF_DIR = os.path.dirname(os.path.abspath(__file__))
'''The location of this script.'''
BACKUP_DIR = os.path.join(SELF_DIR, 'backup')
'''Path to the default directory to backup files to.'''
CFG_FILE = os.path.join(SELF_DIR, 'dotfiles.cfg')
'''Path to the default backup configuration file.'''
OLD_SUFFIX = '.bakold'
'''Suffix to add to the filenames of outdated files.'''
class Log:
TRACE = -1
INFO = 0
WARN = 1
ERROR = 2
TAGS = {
TRACE: '',
INFO: '--',
WARN: '(!)',
ERROR: '[!]',
}
def __call__(self, level, *args, **kwargs):
tag = self.TAGS.get(level, '')
print(tag, *args, file=sys.stderr, **kwargs)
log = Log()
dictree = lambda: defaultdict(dictree) # Magic!
class GlobMatcher:
'''Builds a regular expression that matches any of the added glob patterns'''
glob_substs = OrderedDict([
(r'\\\*\\\*', r'.*'),
(r'\\\*', r'[^/]*'),
(r'\\\?', r'[^/]'),
])
glob_subber = re.compile('|'.join(glob_substs.keys()))
def __init__(self):
self.tree = dictree()
'''Basically a trie in which each node is a path node.
'/a/**/b' becomes:
{
'a': {
'**': {
'b': None,
}
}
}
'''
def _glob_sub(self, glob_node):
'''Translates a glob path node (ex. 'a', 'foo*.bar', '**') to a regex.'''
regex = re.escape(glob_node)
regex = self.glob_subber.sub(lambda match: self.glob_substs[re.escape(match.group())], regex)
return f'(?:{regex})'
@staticmethod
def normalize_glob(glob_pat):
'''Normalizes a glob pattern, making it start at / and normalizing paths within it.'''
if glob_pat.endswith('/') or os.path.isdir(glob_pat):
# If the pattern ends with a trailing slash, or if we know for sure it refers to a folder,
# it must be for a folder; match all of its children
glob_pat = glob_pat[:-1] + '**'
# Remove duplicated slashes, '.' and '..'
glob_pat = os.path.normpath(str(glob_pat))
if glob_pat.startswith('**'):
# Make the pattern start at the root directory
glob_pat = '/' + glob_pat
elif glob_pat.startswith('*'):
# Ditto
glob_pat = '/*' + glob_pat
elif glob_pat.startswith('/'):
# The pattern already starts at the root directory, nothing to do.
pass
else:
# Relative glob patterns make little sense in this context; disallow them
raise RuntimeError('Invalid glob pattern: {glob_pat} (it should start with / or *)')
return glob_pat
def add(self, glob_pat):
'''Adds `glob_pat` to the list of patterns to match (after normalizing it).'''
glob_pat = self.normalize_glob(glob_pat)
assert len(glob_pat) != 0
nodes = glob_pat.split('/')
nodes = nodes[1:] # No need for the first, empty node
tree = self.tree
for node in nodes[:-1]:
tree = tree[node]
tree[nodes[-1]] = None
def _build_node_regex(self, node):
'''Returns a regex build for `node`, a node of `self.tree`.'''
regex = '(?:'
for i, (k, v) in enumerate(node.items(), start=1):
regex += self._glob_sub(k)
if type(v) == type(self.tree):
regex += r'\/'
regex += self._build_node_regex(v)
# else: leaf node
if i != len(node): regex += '|'
regex += ')'
return regex
def build_regex(self):
'''Compiles a regex that will match a filepath if any of the `add()`ed glob patterns match it.'''
regex = r'^\/' + self._build_node_regex(self.tree) + '$'
return re.compile(regex)
def flatten_item(path):
'''Given a path, yields (in this order):
- All of the path's ancestor directories from root (but excluding /), in order
- The path itself
- All of the path's descendants (if path is a directory), in order'''
# Yield all parent directories of the item in succession
# This way `backup_file()` will mkdir() them before trying to copy their children
root = os.path.dirname(os.path.normpath(path))
sep_pos = 0
while sep_pos < len(root):
sep_pos = root.find('/', sep_pos + 1)
if sep_pos < 0: sep_pos = len(root)
yield root[:sep_pos]
# Yield the item itself
yield path
# If `path` is a directory, walk though its children
for root, dirnames, filenames in os.walk(path):
yield root
for dirname in dirnames: yield os.path.join(root, dirname)
for filename in filenames: yield os.path.join(root, filename)
def changed_pkg_files():
'''Calls `pacman(-like) -Qkk` to list all files/folders (items) that differ from the package's version.
Returns a set containing `flatten_item(item) for item in changed_items`.'''
cmd = [PKG_MANAGER, '-Qkk']
try:
cmd_output = sp.check_output(cmd, stderr=sp.STDOUT).decode('utf-8')
except sp.CalledProcessError as err:
# Exit code is non-zero: some file(s) probably changed in the package
cmd_output = err.output.decode('utf-8')
# Note that if the exit code is zero something could have changed in the package as well;
# need to check the output to make sure
warnings = WARNING_RE.findall(cmd_output)
changed_files = set()
for (warning_item, warning_msg) in warnings:
if warning_msg not in IGNORED_WARNINGS:
# If `warning_item` is a directory, it and all of its children must be listed recursively.
# This is needed because otherwise blacklist glob patterns would not filter the children!
changed_files.update(flatten_item(warning_item))
return changed_files
def backup_item(src_path, backup_dir, force=False):
'''Copies the item (file or directory) at `src_path` (an absolute path) to `backup_dir`. Preserves permissions.
If `force`, the destination exists and it is not older than the source, raises a `FileExistsError`.'''
# NOTE: os.path.join here would't do the trick!
# NOTE: The parent directory of `dst_path` should already have been created! See `flatten_item()`
dst_path = os.path.normpath(backup_dir + src_path)
src_stat = os.stat(src_path)
if not force:
try:
dst_stat = os.stat(dst_path)
if dst_stat.st_mtime >= src_stat.st_mtime:
raise FileExistsError(f"{dst_path} is not older than {src_path}")
except FileNotFoundError:
pass
if os.path.isdir(src_path):
try:
os.mkdir(dst_path)
except FileExistsError:
raise
else:
# NOTE: The destination directory should already have been created!
shutil.copy2(src=src_path, dst=dst_path)
os.chown(dst_path, src_stat.st_uid, src_stat.st_gid)
def mark_old_files(src_root, bak_root, dry_run=False, verbose=False):
'''Adds a suffix to the names of all files present in `bak_root` but not in `src_root`.'''
for root, dirnames, filenames in os.walk(bak_root):
rel_root = os.path.relpath(root, bak_root)
for filename in filenames:
if filename.endswith(OLD_SUFFIX):
# Already marked as outdated
continue
bak_path = os.path.join(root, filename)
src_path = os.path.join(src_root, rel_root, filename)
if os.path.exists(src_path):
continue
if verbose:
log(log.TRACE, src_path, 'is outdated')
if not dry_run:
os.rename(bak_path, bak_path + OLD_SUFFIX)
Config = namedtuple('Config', 'included excluded')
def read_cfg(stream):
'''Reads +glob (included) and -glob (excluded) patterns from a configuration file.'''
cfg = Config(included=set(), excluded=set())
lines = stream.readlines()
for n, line in enumerate(lines, start=1):
if not line or line.isspace() or line.startswith('#'):
continue
splits = line.strip().split(maxsplit=1)
if len(splits) == 2:
op, pat = splits
pat = pat.rstrip() # Prevent trailing whitespace in the pattern!
if op == '+':
cfg.included.add(pat)
continue
elif op == '-':
cfg.excluded.add(pat)
continue
raise RuntimeError(f'Malformed config file: line {n} invalid: "{line}"')
return cfg
def progress_foreach(items, operation, mk_start_msg, mk_item_msg, mk_done_msg, clear=True):
'''1. Logs `mk_start_msg()`.
2. For each item in `items`, logs `mk_item_msg(item_n=1.., item)` and calls `operation(item)`.
If `clear` any previous item message line is cleared and the new one replaces it.
3. Logs `mk_done_msg()`.
'''
log(log.INFO, mk_start_msg())
for i, item in enumerate(items, start=1):
progress_str = f'[{i}/{len(items)}]'
item_msg = mk_item_msg(i, item)
if clear:
sys.stderr.write(f'\r\033[2K{progress_str} {item_msg:.100}')
else:
log(log.TRACE, progress_str, item_msg)
operation(item)
if clear: sys.stderr.write('\r\033[2K')
log(log.INFO, mk_done_msg())
def path_item_msg(i, item):
'''A `mk_item_msg()` for an item's path.'''
prefix = '(directory) ' if os.path.isdir(item) else ''
return f'{prefix}{item}'
def print_excluded_files(all_files, selected_files):
'''Logs all items that are in `all_files` but not in `selected_files`.'''
excluded = set(all_files) - set(selected_files)
log(log.INFO, len(all_files), 'file(s) found;', len(excluded), 'of those will be excluded from backup:')
for fpath in excluded:
log(log.TRACE, ' ', fpath)
def parse_args():
'''Interprets and returns shell arguments.'''
parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--verbose',
action='count', default=0,
help='Increases the verbosity level of the output. Can be specified multiple times')
parser.add_argument('-c', '--config',
default=CFG_FILE,
help='The backup configuration file to read')
parser.add_argument('-O', '--backup-dir',
default=BACKUP_DIR,
help='The directory to backup files to')
parser.add_argument('-n', '--dry-run',
action='store_true', default=False,
help='List files but do not actually back them up')
parser.add_argument('-f', '--force',
action='store_true', default=False,
help='Copy files even if their backup is already present and not older than them')
return parser.parse_args()
def main():
'''Entry point.'''
args = parse_args()
if args.verbose >= 1:
log(log.INFO, 'Backup destination:', args.backup_dir)
try:
if args.verbose >= 2:
log(log.INFO, 'Reading config file:', args.config)
with open(args.config) as cfg_file:
cfg = read_cfg(cfg_file)
except (PermissionError, FileNotFoundError, OSError) as e:
log(log.ERROR, 'Could not open config file:', e)
sys.exit(5)
except RuntimeError as e:
log(log.ERROR, e)
sys.exit(4)
if os.geteuid() != 0:
log(log.WARN, 'Not running as root, may be unable to copy some files due to insufficient permissions!')
if args.dry_run:
log(log.WARN, 'Dry run: no files will actually be backed up!')
clear_progress = args.verbose < 1
clear_line = '\n' * clear_progress
n_failed, n_skipped = 0, 0
def do_backup_item(f):
if args.dry_run:
return
try:
backup_item(f, args.backup_dir, force=args.force)
except FileExistsError as e:
if not clear_progress: log(log.WARN, f'\nskipped: {e}')
nonlocal n_skipped; n_skipped += 1
except Exception as e:
log(log.WARN, f'{clear_line}failed to backup {f}: {e}')
nonlocal n_failed; n_failed += 1
# Build a regex that matches all files to exclude from the backup
blacklist = GlobMatcher()
for pat in cfg.excluded:
blacklist.add(pat)
blacklist_re = blacklist.build_regex()
if args.verbose >= 2:
log(log.INFO, 'Blacklist regex:', blacklist_re.pattern)
# List changed package files
log(log.INFO, 'Listing modified package files...')
all_changed_files = changed_pkg_files()
# Apply blacklist and sort files so that folders are created before their children
changed_files = list(sorted(fpath for fpath in all_changed_files if not blacklist_re.match(fpath)))
if args.verbose >= 2:
print_excluded_files(all_changed_files, changed_files)
else:
log(log.INFO, len(changed_files), 'file(s) to backup found')
# Ensure backup dir is present
if not args.dry_run:
os.makedirs(BACKUP_DIR, exist_ok=True)
# Backup changed package files
progress_foreach(changed_files, do_backup_item,
mk_start_msg=lambda: 'Backing up modified package files',
mk_item_msg=path_item_msg,
mk_done_msg=lambda: 'Done',
clear=clear_progress)
# List user files
log(log.INFO, 'Listing user files...')
all_added_files = set()
for glob_pat in cfg.included:
if args.verbose >= 1:
log(log.TRACE, '+', glob_pat)
for globbed_item in glob.iglob(glob_pat, recursive=True):
# If `globbed_item` is a directory, it and all of its children must be listed recursively.
# Same reasoning as for `changed_pkg_files()`; otherwise, the blacklist globs would not filter the children!
all_added_files.update(flatten_item(globbed_item))
# Apply blacklist and sort files so that folders are created before their children
added_files = list(sorted(fpath for fpath in all_added_files if not blacklist_re.match(fpath)))
if args.verbose >= 2:
print_excluded_files(all_added_files, added_files)
else:
log(log.INFO, len(added_files), 'file(s) to backup found')
# Backup files from whitelist
progress_foreach(added_files, do_backup_item,
mk_start_msg=lambda: f'Backing up user files',
mk_item_msg=path_item_msg,
mk_done_msg=lambda: 'Done',
clear=clear_progress)
# Mark files present in the backup but not in the source as .old
log(log.INFO, 'Detecting outdated files...')
mark_old_files('/', args.backup_dir, args.dry_run, args.verbose >= 1)
# Done
if n_skipped > 0:
log(log.INFO, n_skipped, 'file(s) were skipped')
if n_failed > 0:
log(log.ERROR, n_failed, 'file(s) could not be backed up')
sys.exit(2)
sys.exit(0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(130)
| 35.84728 | 120 | 0.62848 | 2,362 | 17,135 | 4.428874 | 0.216765 | 0.016729 | 0.011471 | 0.005353 | 0.149508 | 0.102093 | 0.07896 | 0.068254 | 0.048944 | 0.048944 | 0 | 0.005362 | 0.259877 | 17,135 | 477 | 121 | 35.922432 | 0.819508 | 0.281821 | 0 | 0.123188 | 0 | 0 | 0.121407 | 0.001997 | 0 | 0 | 0 | 0 | 0.003623 | 1 | 0.065217 | false | 0.007246 | 0.028986 | 0 | 0.15942 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5061740c9c6a0a1da4d7e072e2779d2ecf6a7af7 | 7,396 | py | Python | tests/test_parametric_components/test_PoloidalFieldCoilCaseSet.py | moatazharb/paramak | 785c3ed7304e22eac7d58bb1bdc6515fbd20b9a8 | [
"MIT"
] | 1 | 2021-12-14T15:53:46.000Z | 2021-12-14T15:53:46.000Z | tests/test_parametric_components/test_PoloidalFieldCoilCaseSet.py | bam241/paramak | 785c3ed7304e22eac7d58bb1bdc6515fbd20b9a8 | [
"MIT"
] | null | null | null | tests/test_parametric_components/test_PoloidalFieldCoilCaseSet.py | bam241/paramak | 785c3ed7304e22eac7d58bb1bdc6515fbd20b9a8 | [
"MIT"
] | null | null | null |
import math
import unittest
import paramak
import pytest
class TestPoloidalFieldCoilCaseSet(unittest.TestCase):
def setUp(self):
self.test_shape = paramak.PoloidalFieldCoilCaseSet(
heights=[10, 10, 20, 20],
widths=[10, 10, 20, 40],
center_points=[(100, 100), (100, 150), (50, 200), (50, 50)],
casing_thicknesses=[5, 10, 5, 10],
)
def test_default_parameters(self):
"""Checks that the default parameters of a PoloidalFieldCoilCaseSet are correct."""
assert self.test_shape.rotation_angle == 360
assert self.test_shape.stp_filename == "PoloidalFieldCoilCaseSet.stp"
assert self.test_shape.stl_filename == "PoloidalFieldCoilCaseSet.stl"
# assert self.test_shape.name == "pf_coil_case_set"
assert self.test_shape.material_tag == "pf_coil_case_mat"
def test_points_calculation(self):
"""Checks that the points used to construct the PoloidalFieldCoilCaseSetFC are
calculated correctly from the parameters given."""
assert self.test_shape.points == [
(105.0, 105.0, 'straight'), (105.0, 95.0, 'straight'), (95.0, 95.0, 'straight'),
(95.0, 105.0, 'straight'), (105.0, 105.0, 'straight'), (110.0, 110.0, 'straight'),
(110.0, 90.0, 'straight'), (90.0, 90.0, 'straight'), (90.0, 110.0, 'straight'),
(110.0, 110.0, 'straight'), (105.0, 155.0, 'straight'), (105.0, 145.0, 'straight'),
(95.0, 145.0, 'straight'), (95.0, 155.0, 'straight'), (105.0, 155.0, 'straight'),
(115.0, 165.0, 'straight'), (115.0, 135.0, 'straight'), (85.0, 135.0, 'straight'),
(85.0, 165.0, 'straight'), (115.0, 165.0, 'straight'), (60.0, 210.0, 'straight'),
(60.0, 190.0, 'straight'), (40.0, 190.0, 'straight'), (40.0, 210.0, 'straight'),
(60.0, 210.0, 'straight'), (65.0, 215.0, 'straight'), (65.0, 185.0, 'straight'),
(35.0, 185.0, 'straight'), (35.0, 215.0, 'straight'), (65.0, 215.0, 'straight'),
(70.0, 60.0, 'straight'), (70.0, 40.0, 'straight'), (30.0, 40.0, 'straight'),
(30.0, 60.0, 'straight'), (70.0, 60.0, 'straight'), (80.0, 70.0, 'straight'),
(80.0, 30.0, 'straight'), (20.0, 30.0, 'straight'), (20.0, 70.0, 'straight'),
(80.0, 70.0, 'straight'), (105.0, 105.0, 'straight')
]
def test_creation(self):
"""Creates a set of pf coils using the PoloidalFieldCoilCaseSet
parametric component and passing all required args, and checks
that a solid with the correct number of solids is created."""
assert self.test_shape.solid is not None
assert len(self.test_shape.solid.Solids()) == 4
def test_creation_with_zero_thickness(self):
"""Creates a set of pf coils using the PoloidalFieldCoilCaseSet
parametric component and passing a 0 entry into the casing_thicknesses
list, and checks that a solid with the correct number of solids is
created."""
self.test_shape.casing_thicknesses = [5, 0, 10, 10]
assert self.test_shape.solid is not None
assert len(self.test_shape.solid.Solids()) == 3
def test_absolute_volume(self):
"""Creates a set of pf coils using the PoloidalFieldCoilCaseSet
parametric component and checks that the volume is correct."""
assert self.test_shape.volume == pytest.approx((((20 * 5 * 2) +
(10 * 5 * 2)) * math.pi * 2 * 100) + (((30 * 10 * 2) +
(10 * 10 * 2)) * math.pi * 2 * 100) + (((30 * 5 * 2) +
(20 * 5 * 2)) * math.pi * 2 * 50) + (((60 * 10 * 2) +
(20 * 10 * 2)) * math.pi * 2 * 50))
def test_absolute_areas(self):
"""Creates a set of pf coils using the PoloidalFieldCoilCaseSet
parametric component and checks that the areas are correct"""
assert len(self.test_shape.areas) == 32
assert len(set([round(i) for i in self.test_shape.areas])) == 16
assert self.test_shape.areas.count(
pytest.approx(10 * math.pi * 2 * 100)) == 6
assert self.test_shape.areas.count(
pytest.approx(40 * math.pi * 2 * 50)) == 4
assert self.test_shape.areas.count(
pytest.approx(30 * math.pi * 2 * 100)) == 4
assert self.test_shape.areas.count(
pytest.approx(30 * math.pi * 2 * 50)) == 2
assert self.test_shape.areas.count(
pytest.approx(10 * math.pi * 2 * 105)) == 3
assert self.test_shape.areas.count(
pytest.approx(10 * math.pi * 2 * 95)) == 2
assert self.test_shape.areas.count(
pytest.approx(20 * math.pi * 2 * 110)) == 1
assert self.test_shape.areas.count(
pytest.approx(20 * math.pi * 2 * 90)) == 1
assert self.test_shape.areas.count(
pytest.approx(30 * math.pi * 2 * 115)) == 1
assert self.test_shape.areas.count(
pytest.approx(30 * math.pi * 2 * 85)) == 1
assert self.test_shape.areas.count(
pytest.approx(20 * math.pi * 2 * 60)) == 1
assert self.test_shape.areas.count(
pytest.approx(20 * math.pi * 2 * 40)) == 2
assert self.test_shape.areas.count(
pytest.approx(30 * math.pi * 2 * 65)) == 1
assert self.test_shape.areas.count(
pytest.approx(20 * math.pi * 2 * 70)) == 1
assert self.test_shape.areas.count(
pytest.approx(20 * math.pi * 2 * 30)) == 1
assert self.test_shape.areas.count(
pytest.approx(40 * math.pi * 2 * 80)) == 1
def test_PoloidalFieldCoilCaseSet_incorrect_thicknesses_1(self):
"""Checks that an error is raised when a PoloidalFieldCoilCaseSet is made
with the wrong number of casing thicknesses."""
def make_PoloidalFieldCoilCaseSet_incorrect_thicknesses_1():
self.test_shape.casing_thicknesses = [5, 5, 10]
self.test_shape.solid
self.assertRaises(
ValueError,
make_PoloidalFieldCoilCaseSet_incorrect_thicknesses_1
)
def test_PoloidalFieldCoil_incorrect_thicknesses_2(self):
"""Checks that an error is raised when a PoloidalFieldCoilCaseSet is made
with invalid casing thicknesses."""
def make_PoloidalFieldCoilCaseSet_incorrect_thicknesses_2():
self.test_shape.casing_thicknesses = [5, 5, 5, 'ten']
self.assertRaises(
ValueError,
make_PoloidalFieldCoilCaseSet_incorrect_thicknesses_2
)
def test_PoloidalFieldCoil_incorrect_thicknesses_3(self):
"""Checks that an error is raised when a PoloidalFieldCoilCaseSet is made
with invalid casing thicknesses."""
def make_PoloidalFieldCoilCaseSet_incorrect_thicknesses_3():
self.test_shape.casing_thicknesses = "ten"
self.assertRaises(
ValueError,
make_PoloidalFieldCoilCaseSet_incorrect_thicknesses_3
)
| 48.339869 | 209 | 0.568551 | 924 | 7,396 | 4.441558 | 0.149351 | 0.089912 | 0.110867 | 0.115741 | 0.746345 | 0.680068 | 0.594298 | 0.467105 | 0.429581 | 0.428606 | 0 | 0.099456 | 0.303948 | 7,396 | 152 | 210 | 48.657895 | 0.697747 | 0.163332 | 0 | 0.230769 | 0 | 0 | 0.067074 | 0.009252 | 0 | 0 | 0 | 0 | 0.298077 | 1 | 0.125 | false | 0 | 0.038462 | 0 | 0.173077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac8ce5a1a5cdf542bcfdc5040d47fb102f413dca | 4,584 | py | Python | networks/convlstm_networks/train_src/patches_storage.py | DiMorten/FCN_ConvLSTM_Crop_Recognition_Generalized | 2749c90fab6c3854c380f6bca945dd4e99c17239 | [
"Apache-2.0"
] | null | null | null | networks/convlstm_networks/train_src/patches_storage.py | DiMorten/FCN_ConvLSTM_Crop_Recognition_Generalized | 2749c90fab6c3854c380f6bca945dd4e99c17239 | [
"Apache-2.0"
] | null | null | null | networks/convlstm_networks/train_src/patches_storage.py | DiMorten/FCN_ConvLSTM_Crop_Recognition_Generalized | 2749c90fab6c3854c380f6bca945dd4e99c17239 | [
"Apache-2.0"
] | null | null | null | #from natsort import natsorted, ns
import numpy as np
import pathlib
import deb
import glob
import re
import pdb
from pathlib import Path
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
class PatchesStorage():
def __init__(self):
pass
class PatchesStorageAllSamples(PatchesStorage):
def __init__(self,path):
self.path_patches = path + 'patches_bckndfixed/'
self.path={}
self.path['train_bckndfixed']=self.path_patches+'train/'
self.path['val_bckndfixed']=self.path_patches+'val/'
self.path['test_bckndfixed']=self.path_patches+'test/'
print("Path, ",self.path)
#pdb.set_trace()
def store(self,data_patches):
self.storeSplit(data_patches['train'],'train_bckndfixed')
self.storeSplit(data_patches['test'],'test_bckndfixed')
#self.storeSplit(data_patches['val'],'val_bckndfixed')
def storeSplit(self, patches, split='train_bckndfixed'):
pathlib.Path(self.path[split]).mkdir(parents=True, exist_ok=True)
print("Storing in ",self.path[split])
np.save(self.path[split]+'patches_in.npy', patches['in']) #to-do: add polymorphism for other types of input
#pathlib.Path(self.path[split]['label']).mkdir(parents=True, exist_ok=True)
np.save(self.path[split]+'patches_label.npy', patches['label']) #to-do: add polymorphism for other types of input
#pdb.set_trace()
def load(self):
data_patches={}
data_patches['val']=self.loadSplit('val_bckndfixed')
data_patches['train']=self.loadSplit('train_bckndfixed')
data_patches['test']=self.loadSplit('test_bckndfixed')
return data_patches
def loadSplit(self, split='train'):
out={}
out['in']=np.load(self.path[split]+'patches_in.npy',mmap_mode='r')
out['label']=np.load(self.path[split]+'patches_label.npy')
return out
class PatchesStorageEachSample(PatchesStorage):
def __init__(self,path):
self.path_patches = path + 'patches_eachsample/'
self.path_im={}
self.path_im['train']=self.path_patches+'im/'+'train/'
self.path_im['val']=self.path_patches+'im/'+'val/'
self.path_im['test']=self.path_patches+'im/'+'test/'
self.path_label={}
self.path_label['train']=self.path_patches+'label/'+'train/'
self.path_label['val']=self.path_patches+'label/'+'val/'
self.path_label['test']=self.path_patches+'label/'+'test/'
def store(self,data_patches):
self.storeSplit(data_patches['train'],'train')
self.storeSplit(data_patches['test'],'test')
self.storeSplit(data_patches['val'],'val')
def storeSplit(self, patches, split='train'):
pathlib.Path(self.path_im[split]).mkdir(parents=True, exist_ok=True)
pathlib.Path(self.path_label[split]).mkdir(parents=True, exist_ok=True)
for idx in range(patches['in'].shape[0]):
np.save(self.path_im[split]+'patches_in'+str(idx).zfill(5)+'.npy', patches['in'][idx]) #to-do: add polymorphism for other types of input
np.save(self.path_label[split]+'patches_label'+str(idx).zfill(5)+'.npy', patches['label'][idx]) #to-do: add polymorphism for other types of input
pdb.set_trace()
#pathlib.Path(self.path[split]['label']).mkdir(parents=True, exist_ok=True)
def load(self):
# use self.folder_load from main.py
data_patches={}
data_patches['val']=self.loadSplit('val')
data_patches['train']=self.loadSplit('train')
data_patches['test']=self.loadSplit('test')
return data_patches
def loadSplit(self, split='train'):
out={}
out['in'],_=self.folder_load(self.path_im[split])
out['label'],_=self.folder_load(self.path_label[split])
return out
def folder_load(self,folder_path): #move to patches_handler
paths=glob.glob(folder_path+'*.npy')
#deb.prints(paths)
# sort in human order
paths=natural_sort(paths)
#deb.prints(paths)
files=[]
deb.prints(len(paths))
for path in paths:
#print(path)
files.append(np.load(path))
return np.asarray(files),paths
def folder_load_partition(self,folder_path): #move to patches_handler
paths=glob.glob(folder_path+'*.npy')
#deb.prints(paths)
# sort in human order
paths=natural_sort(paths)
return paths
def loadSplitPartition(self, split='train'):
partition={}
partition['in']=self.folder_load_partition(self.path_im[split])
partition['label']=self.folder_load_partition(self.path_label[split])
return partition
def loadPartition(self):
partition={}
partition['val']=self.loadSplitPartition('val')
partition['train']=self.loadSplitPartition('train')
partition['test']=self.loadSplitPartition('test')
return partition | 36.380952 | 149 | 0.723604 | 661 | 4,584 | 4.856278 | 0.154312 | 0.104673 | 0.051402 | 0.046729 | 0.53053 | 0.494704 | 0.319003 | 0.289097 | 0.263551 | 0.251402 | 0 | 0.00122 | 0.106239 | 4,584 | 126 | 150 | 36.380952 | 0.78228 | 0.140489 | 0 | 0.247423 | 0 | 0 | 0.140235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.164948 | false | 0.010309 | 0.072165 | 0 | 0.360825 | 0.030928 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac8f8da22cdd19010ae321a792a6e554a54a7b2c | 2,396 | py | Python | massgenotyping/show_alignment.py | kohyamat/MassGenotyping | 4a3d8d073d39aa4860db4a214954bbaae65dbaf1 | [
"MIT"
] | 5 | 2020-04-27T21:24:42.000Z | 2021-12-15T03:14:42.000Z | massgenotyping/show_alignment.py | kohyamat/MassGenotyping | 4a3d8d073d39aa4860db4a214954bbaae65dbaf1 | [
"MIT"
] | 1 | 2020-09-23T17:17:28.000Z | 2020-09-24T17:05:50.000Z | massgenotyping/show_alignment.py | kohyamat/massgenotyping | 4a3d8d073d39aa4860db4a214954bbaae65dbaf1 | [
"MIT"
] | null | null | null | import sys
from pathlib import Path
from matplotlib import get_backend
from .argument_parser import get_args
from .base import SeqData, count_uniq_seq
from .find_ssrs import find_ssrs, get_longest_RepData
from .variant_filter import VisualCheck
class ShowAlignment(VisualCheck):
def __init__(self, seqdat, suptitle):
super().__init__(seqdat, suptitle=suptitle)
self.reset_button.remove()
def on_pick(self, event):
artist = event.artist
mouseevent = event.mouseevent
if self.subplot is None:
return
if self.subplot == 1:
idx = self.rects1.index(artist)
else:
idx = self.rects2.index(artist)
if mouseevent.button == 1:
self.reset_info()
if self.idx_tmp != idx:
self.idx_tmp = idx
else:
self.idx_tmp = -1
self.show_info(self.idx_tmp)
event.canvas.draw()
def on_key(self, event):
if event.key in ["down", "ctrl+j"]:
if self.idx_tmp == len(self.rects2) - 1:
self.idx_tmp = -1
else:
self.idx_tmp += 1
elif event.key in ["up", "ctrl+k"]:
if self.idx_tmp == -1:
self.idx_tmp = len(self.rects2) - 1
else:
self.idx_tmp -= 1
elif event.key in ["ctrl+c"]:
self.disconnect()
sys.exit()
if self.idx_tmp > -2:
self.reset_info()
self.show_info(self.idx_tmp)
event.canvas.draw()
def text_selected(self):
if self.txt0:
self.txt0.remove()
s = u"Left click on a sequence (or press \u2191/\u2193 key) to show information"
self.txt0 = self.ax0.text(
0.5, 0.6, s=s, color="grey", va="center", ha="center", wrap=True
)
def main(args):
if get_backend == "agg":
sys.exit()
for seq_file in args.infile:
seqdat = [
SeqData("Seq{}".format(i), s, c)
for i, (s, c) in enumerate(count_uniq_seq(seq_file).items())
]
for s in seqdat:
s.rep_data = get_longest_RepData(find_ssrs(s.seq))
sal = ShowAlignment(seqdat, Path(seq_file).name)
sal.show()
if __name__ == "__main__":
argv = ["show-alignment"]
argv.extend(sys.argv[1:])
main(get_args(argv))
| 26.32967 | 88 | 0.55384 | 313 | 2,396 | 4.057508 | 0.351438 | 0.066142 | 0.094488 | 0.043307 | 0.167717 | 0.147244 | 0.147244 | 0.110236 | 0.110236 | 0.110236 | 0 | 0.019327 | 0.330551 | 2,396 | 90 | 89 | 26.622222 | 0.772444 | 0 | 0 | 0.202899 | 0 | 0 | 0.059683 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072464 | false | 0 | 0.101449 | 0 | 0.202899 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac9ad7a4db534aa6aea2c237b1bae0ff5816679b | 3,706 | py | Python | animusic/__main__.py | wardzin/dancing-art | ab3e2f13e9421b85b1737527519706734c7eb8f8 | [
"MIT"
] | 13 | 2021-09-16T00:16:49.000Z | 2022-03-11T12:32:37.000Z | animusic/__main__.py | wardzin/dancing-art | ab3e2f13e9421b85b1737527519706734c7eb8f8 | [
"MIT"
] | null | null | null | animusic/__main__.py | wardzin/dancing-art | ab3e2f13e9421b85b1737527519706734c7eb8f8 | [
"MIT"
] | 3 | 2021-11-16T18:04:01.000Z | 2022-03-25T10:01:15.000Z | import os
import platform
import PySimpleGUI as sg
import json
from concurrent.futures import ThreadPoolExecutor
from . import anim
def main():
# include ffmpeg binary for Windows users
if platform.system() == 'Windows':
os.environ['PATH'] += os.pathsep + os.path.join(os.path.dirname(__file__), 'bin')
win_width, win_height = 600, 150
layout = [
[sg.Text('Input Image or Video'), sg.Input(key='input'), sg.FileBrowse(file_types=(('Image or Video', '*.jpg *.jpeg *.png *.gif *.mp4 *.mov *.flv *.avi *.webm *.mkv'), ('All Files', '*.*')))],
[sg.Text('Audio'), sg.Input(key='audio'), sg.FileBrowse(file_types=(('Audio', '*.mp3 *.mpeg *.aiff *.wav *.flac *.ogg *.aac',), ('All Files', '*.*')))],
[sg.Text('Output Video'), sg.Input(key='output'), sg.FileSaveAs(file_types=(('MP4 Video', '*.mp4'), ('All Files', '*.*')))],
# [sg.Frame('Video Enconding Options', layout=[
# [sg.Text('Frames per second'), sg.Spin([i for i in range(24, 61)], initial_value=30)],
# [sg.Text('Encoding speed'), sg.Combo(['very fast', 'fast', 'medium', 'slow', 'very slow'], default_value='medium')],
# [sg.Text('Additional FFmpeg parameters'), sg.InputText('-bf 2 -b_strategy 2')],
# ])],
[sg.Button('Animate')],
]
save_keys = ['input', 'audio', 'output']
window = sg.Window('Animusic', layout, size=(win_width, win_height))
window.finalize()
with ThreadPoolExecutor(1) as executor:
animation_thread = None
save_filepath = os.path.join(os.path.dirname(__file__), 'save.json')
try:
with open(save_filepath, 'r') as save_file:
saved_values = json.load(save_file)
for key in save_keys:
if key in saved_values:
window[key].update(saved_values[key])
print('Loaded values from previous session')
except (FileNotFoundError, json.decoder.JSONDecodeError):
pass
last_values = {key: None for key in save_keys}
while True:
event, values = window.read(timeout=100)
if event == sg.WIN_CLOSED:
saved_values = {key: last_values[key] for key in save_keys}
with open(save_filepath, 'w') as save_file:
json.dump(saved_values, save_file)
break
if event == 'Animate':
if not values['output'].endswith('.mp4'):
window['output'].update(values['output']+'.mp4')
values['output'] += '.mp4'
animation_thread = executor.submit(anim.create_animation,
img=values['input'],
audio=values['audio'],
output=values['output'],
)
window['Animate'].update(disabled=True, text='Running...')
if animation_thread is not None and animation_thread.done():
try:
animation_thread.result() # we call this to propagate errors to the main thread
except Exception as e:
print(e)
animation_thread = None
window['Animate'].update(disabled=False, text='Animate')
if platform.system() == 'Windows':
os.system(values['output'])
elif platform.system() == 'Darwin':
os.system('open ' + values['output'])
elif platform.system() == 'Linux':
os.system('xdg-open ' + values['output'])
last_values = values.copy()
window.close()
if __name__ == '__main__':
main()
| 40.282609 | 200 | 0.546141 | 416 | 3,706 | 4.737981 | 0.396635 | 0.042618 | 0.015221 | 0.018265 | 0.10756 | 0.027397 | 0.027397 | 0 | 0 | 0 | 0 | 0.009664 | 0.301943 | 3,706 | 91 | 201 | 40.725275 | 0.752223 | 0.118187 | 0 | 0.090909 | 0 | 0 | 0.142945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0.015152 | 0.090909 | 0 | 0.106061 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac9b14773cb23d17a07c781a60be30188bec5605 | 3,412 | py | Python | test/FirstLookAtTensorflow.py | rexzhang2014/Easi-ML | 5ff084b81b2c516d0ebea75f1dc0db1ebcb775db | [
"Apache-2.0"
] | 1 | 2018-01-29T08:41:23.000Z | 2018-01-29T08:41:23.000Z | test/FirstLookAtTensorflow.py | rexzhang2014/Easi-ML | 5ff084b81b2c516d0ebea75f1dc0db1ebcb775db | [
"Apache-2.0"
] | null | null | null | test/FirstLookAtTensorflow.py | rexzhang2014/Easi-ML | 5ff084b81b2c516d0ebea75f1dc0db1ebcb775db | [
"Apache-2.0"
] | 2 | 2018-04-04T04:24:31.000Z | 2019-02-11T09:01:51.000Z |
# coding: utf-8
# Getting Started with Tensorflow
#
# Notes:
# 1. constant: 可以代入公式的常数
# 2. placeholder: 运行时需要传入的数值,用于传入训练样本(X, Y)
# 3. variable: 公式中的变量,用于需要训练来确认的值如W
#
# For More Reference : https://tensorflow.google.cn/get_started/get_started
# In[2]:
import tensorflow as tf
import pandas as pd
import numpy as np
# <!--img src="http://www.forkosh.com/mathtex.cgi? \Large x=\frac{-b\pm\sqrt{b^2-4ac}}{2a}"-->
# 1. DEFINE constant
# 2. Run session
# In[8]:
#Create constants
node1 = tf.constant(3.0, dtype=tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
print(node1, node2) # Print out Tensor at this moment
# Create 'add' computation
from __future__ import print_function
node3 = tf.add(node1, node2) #
print("node3:", node3)
# Create Session
sess = tf.Session()
# Run Session
result = sess.run([node1,node2])
print(type(result), result)
#print(sess.run([node1, node2])) # Print the run result
print("sess.run(node3):", sess.run(node3))
# 1. Define placeholder : to create the computation graph but provide value at session run-time
# 2. Run session
# In[18]:
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # + provides a shortcut for tf.add(a, b)
#Tensor could be a number
print(sess.run(adder_node, {a: 3, b: 4.5}))
# A vector
print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
# A matrix
print(sess.run(adder_node, {a: [[1, 3], [1, 1]], b: [[2, 4], [5, -1]]}))
# Define Variables: Variables 指的是可以通过模型进行训练的那些参数。前面的constant是预先定好的,placeholder是在执行时要传入的。
#
# In[25]:
# 指定线性模型的参数为W和b,而x为自变量,函数形式为Wx + b
# 假设我们已知一个线性模型的参数值,可令W和b为定值, 再将x作为placeholder在run时传入, 则可以得到模型的输出值
W = tf.Variable([.3], dtype=tf.float64) # 如果使用tf.float32可以观察到0.3000001的数值
b = tf.Variable([-.3], dtype=tf.float64)
x = tf.placeholder(tf.float64)
linear_model = W*x + b
# 初始化:
# 对constant,创建时进行初始化,所以可以直接run
# 对variable,创建时不初始化,因此在run之前要先初始化
init = tf.global_variables_initializer() # 注意这里用创建Variable时定义的初始值进行初始化
sess.run(init)
print(sess.run(linear_model, {x: [1, 2, 3, 4]}))
# 1. 定义constant,placeholder及variable
# 2. 定义Loss/Cost/Objective function:通常我们只知道函数的结构(表达式),但并不能确定哪个参数最好,则需要通过样本来拟合最合适的参数值; 目标函数是解决机器学习问题的关键,它刻画的是模型分布和数据分布之间的差异,当这个差异最小时,说明我们的模型分布和数据分布最为接近; 目标函数直接决定了如何训练,若目标函数为凸函数,则极值唯一存在,若目标函数可微,则可以使用梯度算法。常用的目标函数是最小二乘和交叉熵。
# 3. 进行训练: 训练算法是当今机器学习研究的热点,由于不同目标函数的性质,只能使用在这种目标下适用的算法。当模型结构十分复杂时,需要考虑两点,一是是否有足够的数据量来调整大量的参数;二是算法的性能必须使其能在可接受的时间范围内完成,性能有两种场景需要考虑,一是实时应用必须有非常快的计算,二是在开发调整模型的时候,需要算法足够快以实现快速迭代的开发模式。
# In[28]:
# 定义目标函数: 平方和误差,这个误差函数的最小值是0
y = tf.placeholder(tf.float64)
squared_deltas = tf.square(linear_model - y) #前面定义了linear_model的表达式
loss = tf.reduce_sum(squared_deltas) #求平方和
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})) # 观察loss的值,发现它是一个较大的值
# 现在改变W和b的值,再次观察loss值, 注意此时不能改变(x,y),否则验证失去意义
fixW = tf.assign(W, [-1.]) # assign修改变量的值, 返回的是一个handle,可以传给session在run时去执行
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
# 1. 创建训练算法对象: 机器学习中最最常见和简单的训练算法是梯度下降。注意梯度下降要求目标函数可导。若目标函数为凸函数,一定可以收敛到极值点,否则不一定收敛,也不一定收敛到全局最优。
# 2. 执行训练
# In[29]:
optimizer = tf.train.GradientDescentOptimizer(0.01) #创建梯度下降对象,Learning rate=0.01
train = optimizer.minimize(loss) # 创建优化器对象, 目标函数为前面定义的loss对象
sess.run(init) # 用前面的定义的初始化对象进行初始化
# 执行1000次迭代进行训练, 每次迭代更新一次参数
for i in range(1000):
sess.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
print(sess.run([W, b]))
| 26.246154 | 219 | 0.720106 | 476 | 3,412 | 5.119748 | 0.436975 | 0.043086 | 0.044317 | 0.006565 | 0.094789 | 0.074682 | 0.045137 | 0.045137 | 0.025441 | 0.025441 | 0 | 0.044467 | 0.123388 | 3,412 | 129 | 220 | 26.449612 | 0.770311 | 0.549824 | 0 | 0.1 | 0 | 0 | 0.014905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac9c2aed546d5bfc561402f15ce15f292f5c80cd | 5,846 | py | Python | pyjordan/tools.py | jmbarbone/pyjordan | 575e8e71df0c6ec23865d55dec9a57987c964e43 | [
"MIT"
] | null | null | null | pyjordan/tools.py | jmbarbone/pyjordan | 575e8e71df0c6ec23865d55dec9a57987c964e43 | [
"MIT"
] | null | null | null | pyjordan/tools.py | jmbarbone/pyjordan | 575e8e71df0c6ec23865d55dec9a57987c964e43 | [
"MIT"
] | null | null | null |
def is_none(x):
""" Is none
True/False is value is None
Parameters
----------
x : An object
Returns
-------
True if x is None, otherwise False
"""
return x is None
def exists(x, where="local"):
""" Exists
Description
-----------
Checks if an object exists by the name of the object
Parameters
----------
x : str
The name of an object as a string
where : str, "local" (default), "global", or "builtin"
Where to search for the object. If not one of the three above, raises
and exception
Returns
-------
If object is found, True, otherwise False
References
----------
Adapted from: https://stackoverflow.com/a/6386015/12126576
"""
import __builtin__
import sys
if where == "local":
res = x in sys.getframe(1).f_locals
elif where == "global":
res = x in sys.getframe(1).f_globals
elif where == "builtin":
res = x in vars(__builtin__)
else:
raise Warning("`where` should be one of: 'local', 'global', 'builtin'")
return(res)
def print_time(x):
""" Print the current
Description
-----------
Appends the current time into a print statement
Parameters
----------
x : String
A message to be printed
"""
from datetime import datetime
ts = datetime.now().strftime("%y-%m-%d %H:%M:%S")
print(f"[{ts}] {x}", flush=True)
return None
def round_by(x, by, method="round"):
""" Round by
Description
-----------
Rounds a number by another
Parameters
----------
x : numeric
A number or list to round
by : numeric
The number or list by which to round
method : string
The method of rounding to use: round, ceiling, or floor
Returns
-------
A list of rounded numbers
"""
from math import floor, ceil
x = as_list(x)
by = as_list(by)
FUN = {
"round": round,
"ceiling": ceil,
"floor": floor,
}
if method not in ["round", "ceiling", "floor"]:
raise Exception('`by` must be one of: "round", "ceiling", "floor"')
try:
return [FUN[method](i / b) * b for b in by for i in x]
except KeyError:
raise Exception('`method` must be one of: "round", "ceiling", "floor"')
def unnest(x):
""" Unnest
Description
-----------
Unnests a list of lists
Parameters
----------
x : list
A list to be unnested
Returns
-------
The values of `x` as separate elements
References
----------
Adapted from flatten() but with improvements to continue unnesting with
multiple nesting statements. This can be seen with the second example below.
Examples
--------
x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
unnest(x) # [1, 2, 3, 4, 5, 6, 7, 8, 9]
x = [[1], [2, [3, 4]]]
unnest(x) # [1, 2, 3, 4]
"""
res = [j for i in as_list(x) for j in as_list(i)]
while any([isinstance(i, list) for i in res]):
res = unnest(res)
return res
def as_list(x):
if not isinstance(x, list):
x = [x]
return x
def flatten(x):
"""
Flatten a list
Performs a single unnesting of a list.
Parameters
----------
x : list
A list to be flattened
Returns
-------
The values of `x` but with a single unnesting
Referneces
----------
https://stackoverflow.com/a/952952/12126576
Examples
--------
x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
flatten(x) # [1, 2, 3, 4, 5, 6, 7, 8, 9]
# Not compatiable with different levels of nesting
# For this, use unnest()
x = [[1], [2, [3, 4]]]
flatten(x) # [1, 2, [3, 4]]
"""
return [item for sublist in x for item in sublist]
def which(x):
""" Which (is True)
Description
-----------
Returns a numeric list of which elements are True
Parameters
----------
x : list, bool
A list of bools
Returns
-------
True or False
Examples
--------
which(True) # [0]
which([True, False, None, True, False]) # [0, 3]
"""
x = as_list(x)
out = []
if not is_boolean(x):
raise ValueError("x must be boolean")
for i in range(0, len(x)):
if x[i]:
out.append(i)
return out
def is_boolean(x):
""" Is boolean?
Description
-----------
Evaluates an object or list as boolean
Parameters
----------
x : Object
An object to be evaluated as boolean.
Returns
-------
True or False
Examples
--------
is_boolean(True) # True
is_boolean([1, 2, True, False]) # False
is_boolean([True, None, False, True]) # True
"""
if isinstance(x, list):
return all([is_boolean(i) for i in x])
return is_none(x) or isinstance(x, bool)
def limit(x, lower=None, upper=None):
"""Limit a list
Description
-----------
Limits a list of numbers by a lower and/or upper limit
Parameters
----------
x : numeric list
A list of numeric elements which to compare to lower and upper
lower : numeric
A lower limit for `x`. If `None` (default) will use `min(x)`
upper : numeric
An upper limit for `x`. If `None` (default) will use `max(x)`
Returns
-------
A numeric list
Examples
--------
x = [3, 2, 1, 4, 5, -1, 4]
limit(x) # sample as x
limit(x, lower=0)
limit(x, lower=1, upper=3)
limit(x, upper=4)
"""
x = as_list(x)
if is_none(lower):
lower = min(x)
if is_none(upper):
upper = max(x)
if lower > upper:
raise Exception("`lower` cannot be greater than `upper`")
return [lower if i < lower else upper if i > upper else i for i in x]
| 20.089347 | 80 | 0.532672 | 804 | 5,846 | 3.837065 | 0.220149 | 0.016207 | 0.00778 | 0.010373 | 0.134198 | 0.098217 | 0.082982 | 0.036953 | 0.018152 | 0.018152 | 0 | 0.024963 | 0.314745 | 5,846 | 290 | 81 | 20.158621 | 0.745132 | 0.527711 | 0 | 0.044776 | 0 | 0 | 0.140367 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.149254 | false | 0 | 0.059701 | 0 | 0.358209 | 0.029851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac9c7a81a5ddd55c91c09e96810730eb84eb076d | 810 | py | Python | url/views.py | blarmon/url_shortener | 036454e25f720e7f3a1e4c738b3417f5e0d7dc92 | [
"MIT"
] | null | null | null | url/views.py | blarmon/url_shortener | 036454e25f720e7f3a1e4c738b3417f5e0d7dc92 | [
"MIT"
] | 5 | 2018-12-31T22:13:38.000Z | 2021-06-10T21:04:56.000Z | url/views.py | blarmon/url_shortener | 036454e25f720e7f3a1e4c738b3417f5e0d7dc92 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from url.forms import URL_Form
from url.models import URL
# Create your views here.
def index(request):
context = {}
if request.method == 'POST':
form = URL_Form(request.POST)
if form.is_valid():
new_url = form.save()
request.session['new_object_id'] = new_url.id
return redirect('index')
if 'new_object_id' in request.session:
session_object = URL.objects.get(pk=request.session['new_object_id'])
context.update({'session_object': session_object})
form = URL_Form
context.update({'form': form})
return render(request, 'url/index.html', context)
def page_redirect(request, number):
redirect_url = URL.objects.get(pk=number).user_url
return redirect(redirect_url) | 35.217391 | 77 | 0.677778 | 109 | 810 | 4.862385 | 0.348624 | 0.05283 | 0.062264 | 0.086792 | 0.09434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.206173 | 810 | 23 | 78 | 35.217391 | 0.824261 | 0.028395 | 0 | 0 | 0 | 0 | 0.101781 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ac9e6046eedf93512beb9a61af904e57c7b7b2de | 3,437 | py | Python | tests/test_docs/helper.py | ejfitzgerald/agents-aea | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | [
"Apache-2.0"
] | null | null | null | tests/test_docs/helper.py | ejfitzgerald/agents-aea | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | [
"Apache-2.0"
] | null | null | null | tests/test_docs/helper.py | ejfitzgerald/agents-aea | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains helper function to extract code from the .md files."""
import re
import traceback
from typing import Dict
import pytest
def extract_code_blocks(filepath, filter=None):
"""Extract code blocks from .md files."""
code_blocks = []
with open(filepath, "r", encoding="utf-8") as f:
while True:
line = f.readline()
if not line:
# EOF
break
out = re.match("[^`]*```(.*)$", line)
if out:
if filter and filter.strip() != out.group(1).strip():
continue
code_block = [f.readline()]
while re.search("```", code_block[-1]) is None:
code_block.append(f.readline())
code_blocks.append("".join(code_block[:-1]))
return code_blocks
def extract_python_code(filepath):
"""Removes the license part from the scripts"""
python_str = ""
with open(filepath, "r") as python_file:
read_python_file = python_file.readlines()
for i in range(21, len(read_python_file)):
python_str += read_python_file[i]
return python_str
def read_md_file(filepath):
"""Reads an md file and returns the string."""
with open(filepath, "r", encoding="utf-8") as md_file:
md_file_str = md_file.read()
return md_file_str
def compile_and_exec(code: str, locals_dict: Dict = None) -> Dict:
"""
Compile and exec the code.
:param code: the code to execute.
:param locals_dict: the dictionary of local variables.
:return: the dictionary of locals.
"""
locals_dict = {} if locals_dict is None else locals_dict
try:
code_obj = compile(code, "fakemodule", "exec")
exec(code_obj, locals_dict) # nosec
except Exception:
pytest.fail(
"The execution of the following code:\n{}\nfailed with error:\n{}".format(
code, traceback.format_exc()
)
)
return locals_dict
def compare_enum_classes(expected_enum_class, actual_enum_class):
"""Compare enum classes."""
try:
# do some pre-processing
expected_pairs = sorted(map(lambda x: (x.name, x.value), expected_enum_class))
actual_pairs = sorted(map(lambda x: (x.name, x.value), actual_enum_class))
assert expected_pairs == actual_pairs, "{} != {}".format(
expected_pairs, actual_pairs
)
except AssertionError:
pytest.fail(
"Actual enum {} is different from the actual one {}".format(
expected_enum_class, actual_enum_class
)
)
| 33.368932 | 86 | 0.592959 | 426 | 3,437 | 4.638498 | 0.392019 | 0.035425 | 0.024292 | 0.02581 | 0.096154 | 0.096154 | 0.063765 | 0.063765 | 0.032389 | 0 | 0 | 0.007855 | 0.259238 | 3,437 | 102 | 87 | 33.696078 | 0.768264 | 0.336922 | 0 | 0.071429 | 0 | 0 | 0.074796 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.089286 | false | 0 | 0.071429 | 0 | 0.232143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |