hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f8d470d1980749c03e842d69c111ae8c0604cde9 | 992 | py | Python | tests/pylint_plugins/test_assert_raises_without_msg.py | L-Net-1992/mlflow | a90574dbb730935c815ff41a0660b9a823b81630 | [
"Apache-2.0"
] | null | null | null | tests/pylint_plugins/test_assert_raises_without_msg.py | L-Net-1992/mlflow | a90574dbb730935c815ff41a0660b9a823b81630 | [
"Apache-2.0"
] | null | null | null | tests/pylint_plugins/test_assert_raises_without_msg.py | L-Net-1992/mlflow | a90574dbb730935c815ff41a0660b9a823b81630 | [
"Apache-2.0"
] | null | null | null | import pytest
from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable
pytestmark = skip_if_pylint_unavailable()
@pytest.fixture(scope="module")
def test_case():
import pylint.testutils
from pylint_plugins import AssertRaisesWithoutMsg
class TestAssertRaisesWithoutMsg(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = AssertRaisesWithoutMsg
test_case = TestAssertRaisesWithoutMsg()
test_case.setup_method()
return test_case
def test_assert_raises_without_msg(test_case):
node = extract_node("self.assertRaises(Exception)")
with test_case.assertAddsMessages(create_message(test_case.CHECKER_CLASS.name, node)):
test_case.walk(node)
node = extract_node("self.assertRaises(Exception, msg='test')")
with test_case.assertNoMessages():
test_case.walk(node)
node = extract_node("pandas.assertRaises(Exception)")
with test_case.assertNoMessages():
test_case.walk(node)
| 30.060606 | 95 | 0.768145 | 114 | 992 | 6.394737 | 0.377193 | 0.131687 | 0.061728 | 0.065844 | 0.318244 | 0.272977 | 0.183813 | 0.120713 | 0 | 0 | 0 | 0 | 0.144153 | 992 | 32 | 96 | 31 | 0.858657 | 0 | 0 | 0.227273 | 0 | 0 | 0.104839 | 0.086694 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8d6b09688dbea2ed0259d01f1aa0504d9acbfdc | 821 | py | Python | bites/bite029.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | null | null | null | bites/bite029.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | null | null | null | bites/bite029.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | 1 | 2019-07-16T19:12:52.000Z | 2019-07-16T19:12:52.000Z | def get_index_different_char(chars):
alnum = []
not_alnum = []
for index, char in enumerate(chars):
if str(char).isalnum():
alnum.append(index)
else:
not_alnum.append(index)
result = alnum[0] if len(alnum) < len(not_alnum) else not_alnum[0]
return result
# tests
def test_wrong_char():
inputs = (
['A', 'f', '.', 'Q', 2],
['.', '{', ' ^', '%', 'a'],
[1, '=', 3, 4, 5, 'A', 'b', 'a', 'b', 'c'],
['=', '=', '', '/', '/', 9, ':', ';', '?', '¡'],
list(range(1,9)) + ['}'] + list('abcde'), # noqa E231
)
expected = [2, 4, 1, 5, 8]
for arg, exp in zip(inputs, expected):
err = f'get_index_different_char({arg}) should return index {exp}'
assert get_index_different_char(arg) == exp, err | 30.407407 | 74 | 0.478685 | 104 | 821 | 3.644231 | 0.471154 | 0.084433 | 0.134565 | 0.166227 | 0.126649 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031034 | 0.293544 | 821 | 27 | 75 | 30.407407 | 0.62069 | 0.01827 | 0 | 0 | 0 | 0 | 0.108209 | 0.038557 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8da2f02f4840468e37f0eba92152ef522fab6ae | 2,589 | py | Python | source/tree.py | holderekt/regression-tree | 130fe07262faea8681159092718310d9aefe9889 | [
"MIT"
] | null | null | null | source/tree.py | holderekt/regression-tree | 130fe07262faea8681159092718310d9aefe9889 | [
"MIT"
] | null | null | null | source/tree.py | holderekt/regression-tree | 130fe07262faea8681159092718310d9aefe9889 | [
"MIT"
] | null | null | null | import utils as utl
import error_measures as err
# Regression Tree Node
class Node:
def __init__(self, parent, node_id, index=None, value=None, examples=None, prediction=0):
self.index = index
self.id = node_id
self.prediction = prediction
self.value = value
self.parent = parent
self.examples = examples
self.right = None
self.left = None
self.ssr = 0
self.leaves = 0
self.ssr_as_root = 0
def is_leaf(self):
if(self.right == None and self.left == None):
return True
return False
def leafs_id(self):
if(not self.is_leaf()):
return self._leafs_search(self.left) + self._leafs_search(self.right)
return [1]
def n_leafs(self):
return len(self.leafs_id())
def _leafs_search(self, node):
if node.is_leaf():
return [node.id]
return self._leafs_search(node.left) + self._leafs_search(node.right)
def __str__(self):
return str(self.id)
# Regression Tree
class Regression_Tree:
def __init__(self, y_train, root):
self.y = y_train
self.root = root
# Generate Prediction given a test example
def predict(self, example, deleted=[]):
current_node = self.root
while(not current_node.is_leaf() and ((current_node in deleted) == False)):
if(example[current_node.index] <= current_node.value):
current_node = current_node.left
else:
current_node = current_node.right
return current_node.prediction
# Generate Sum Square Residuals of a given node on training data
def node_ssr(self, node):
ssr = 0
for example in node.examples:
ssr = ssr + pow((self.y[example] - node.prediction) , 2)
return ssr
def leafs_id(self):
return self.root.leafs_id()
def n_leafs(self):
return len(self.leafs_id())
def __str__(self):
return self._print(self.root)
def print_leaf(self, node):
if(node.is_leaf()):
print(len(node.examples))
else:
self.print_leaf(node.left)
self.print_leaf(node.right)
def _print(self, node):
node_id = str(node.id)
r_string = node_id + " " + str(node.ssr)
if(not node.is_leaf()):
r_string = r_string + "\nLeft : " + node_id + "\n" + self._print(node.left)
r_string = r_string + "\nRight: " + node_id + "\n" + self._print(node.right)
return r_string
| 29.758621 | 93 | 0.588644 | 341 | 2,589 | 4.249267 | 0.202346 | 0.075914 | 0.041408 | 0.019324 | 0.1049 | 0.1049 | 0.049689 | 0.049689 | 0.049689 | 0.049689 | 0 | 0.003898 | 0.306296 | 2,589 | 86 | 94 | 30.104651 | 0.802895 | 0.054075 | 0 | 0.149254 | 0 | 0 | 0.009411 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.208955 | false | 0 | 0.029851 | 0.074627 | 0.477612 | 0.119403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8de8fc01b4a4af13fb95b42532f7a7fe7198cd6 | 225 | py | Python | loadbalanceRL/lib/__init__.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 11 | 2018-10-29T06:50:43.000Z | 2022-03-28T14:26:09.000Z | loadbalanceRL/lib/__init__.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 1 | 2022-03-01T13:46:25.000Z | 2022-03-01T13:46:25.000Z | loadbalanceRL/lib/__init__.py | fqzhou/LoadBalanceControl-RL | 689eec3b3b27e121aa45d2793e411f1863f6fc0b | [
"MIT"
] | 6 | 2019-02-05T20:01:53.000Z | 2020-09-04T12:30:00.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Contains core logic for Rainman2
"""
__author__ = 'Ari Saha (arisaha@icloud.com), Mingyang Liu(liux3941@umn.edu)'
__date__ = 'Wednesday, February 14th 2018, 11:42:09 am'
| 20.454545 | 76 | 0.68 | 32 | 225 | 4.53125 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.098958 | 0.146667 | 225 | 10 | 77 | 22.5 | 0.65625 | 0.342222 | 0 | 0 | 0 | 0 | 0.741007 | 0.302158 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8dfe184dbac3633e171f2ced9f8b35d7607d947 | 717 | py | Python | openff/bespokefit/__init__.py | openforcefield/bespoke-f | 27b072bd09610dc8209429118d739e1f453edd61 | [
"MIT"
] | 12 | 2020-08-28T20:49:00.000Z | 2021-11-17T08:50:32.000Z | openff/bespokefit/__init__.py | openforcefield/bespoke-f | 27b072bd09610dc8209429118d739e1f453edd61 | [
"MIT"
] | 95 | 2020-02-19T18:40:54.000Z | 2021-12-02T10:52:23.000Z | openff/bespokefit/__init__.py | openforcefield/bespoke-f | 27b072bd09610dc8209429118d739e1f453edd61 | [
"MIT"
] | 3 | 2021-04-01T04:22:49.000Z | 2021-04-13T03:19:10.000Z | """
BespokeFit
Creating bespoke parameters for individual molecules.
"""
import logging
import sys
from ._version import get_versions
versions = get_versions()
__version__ = versions["version"]
__git_revision__ = versions["full-revisionid"]
del get_versions, versions
# Silence verbose messages when running the CLI otherwise you can't read the output
# without seeing tens of 'Unable to load AmberTools' or don't import simtk warnings...
if sys.argv[0].endswith("openff-bespoke"):
from openff.bespokefit.utilities.logging import DeprecationWarningFilter
# if "openff-bespoke"
logging.getLogger("openff.toolkit").setLevel(logging.ERROR)
logging.getLogger().addFilter(DeprecationWarningFilter())
| 28.68 | 86 | 0.781032 | 87 | 717 | 6.287356 | 0.643678 | 0.060329 | 0.06947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001595 | 0.125523 | 717 | 24 | 87 | 29.875 | 0.870813 | 0.351464 | 0 | 0 | 0 | 0 | 0.10989 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.363636 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
f8e61d9aa8b9610c3339494d4c960ec17ee4ba35 | 286 | py | Python | src_py/ui/identify_page.py | Magier/Aetia | 7f6045d99904b808e1201f445d0d10b0dce54c37 | [
"MIT"
] | null | null | null | src_py/ui/identify_page.py | Magier/Aetia | 7f6045d99904b808e1201f445d0d10b0dce54c37 | [
"MIT"
] | null | null | null | src_py/ui/identify_page.py | Magier/Aetia | 7f6045d99904b808e1201f445d0d10b0dce54c37 | [
"MIT"
] | null | null | null | import streamlit as st
from ui.session_state import SessionState, get_state
from infer import ModelStage
def show(state: SessionState):
st.header("identify")
state = get_state()
if state.model.stage < ModelStage.DEFINED:
st.error("Please create the model first!")
| 26 | 52 | 0.734266 | 39 | 286 | 5.307692 | 0.641026 | 0.077295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.178322 | 286 | 10 | 53 | 28.6 | 0.880851 | 0 | 0 | 0 | 0 | 0 | 0.132867 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.375 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
f8eb7f85d81c3a2dfe42f499dfc3e4db4b3a0b93 | 444 | py | Python | cpdb/trr/migrations/0002_alter_trr_subject_id_type.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 25 | 2018-07-20T22:31:40.000Z | 2021-07-15T16:58:41.000Z | cpdb/trr/migrations/0002_alter_trr_subject_id_type.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 13 | 2018-06-18T23:08:47.000Z | 2022-02-10T07:38:25.000Z | cpdb/trr/migrations/0002_alter_trr_subject_id_type.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 6 | 2018-05-17T21:59:43.000Z | 2020-11-17T00:30:26.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-06 04:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trr', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='trr',
name='subject_id',
field=models.PositiveIntegerField(null=True),
),
]
| 21.142857 | 57 | 0.61036 | 48 | 444 | 5.479167 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064815 | 0.27027 | 444 | 20 | 58 | 22.2 | 0.746914 | 0.153153 | 0 | 0 | 1 | 0 | 0.075067 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8ed0d2649220a6a4bd9e78f42580892fbc06d4f | 288 | py | Python | stdlib/csv/custom_dialect.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 13 | 2017-08-22T12:26:07.000Z | 2021-07-29T16:13:50.000Z | stdlib/csv/custom_dialect.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 1 | 2021-02-08T10:24:33.000Z | 2021-02-08T10:24:33.000Z | stdlib/csv/custom_dialect.py | janbodnar/Python-Course | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | [
"BSD-2-Clause"
] | 17 | 2018-08-13T11:10:33.000Z | 2021-07-29T16:14:02.000Z | #!/usr/bin/python
# custom_dialect.py
import csv
csv.register_dialect("hashes", delimiter="#")
f = open('items3.csv', 'w')
with f:
writer = csv.writer(f, dialect="hashes")
writer.writerow(("pencils", 2))
writer.writerow(("plates", 1))
writer.writerow(("books", 4))
| 16.941176 | 45 | 0.635417 | 38 | 288 | 4.763158 | 0.631579 | 0.232044 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016529 | 0.159722 | 288 | 16 | 46 | 18 | 0.731405 | 0.118056 | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8ed4e06a829662d9c548dd0be8745a5ea388df8 | 996 | py | Python | servicex/web/forms.py | zorache/ServiceX_App | 4479afa0f019bbdcd35812691e78abba442c9d37 | [
"BSD-3-Clause"
] | 3 | 2019-12-31T06:44:06.000Z | 2021-03-19T17:39:42.000Z | servicex/web/forms.py | zorache/ServiceX_App | 4479afa0f019bbdcd35812691e78abba442c9d37 | [
"BSD-3-Clause"
] | 132 | 2019-10-09T20:45:53.000Z | 2022-03-30T19:07:37.000Z | servicex/web/forms.py | zorache/ServiceX_App | 4479afa0f019bbdcd35812691e78abba442c9d37 | [
"BSD-3-Clause"
] | 12 | 2019-12-06T22:47:35.000Z | 2021-09-23T21:13:47.000Z | from typing import Optional
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired, Length, Email
from servicex.models import UserModel
class ProfileForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired(), Length(0, 120)])
email = StringField('Email', validators=[DataRequired(), Email()])
institution = StringField('Institution', validators=[DataRequired()])
experiment = SelectField('Experiment', validators=[DataRequired()],
choices=[("ATLAS", "ATLAS"), ("CMS", "CMS")],
default="ATLAS")
submit = SubmitField('Save Profile')
def __init__(self, user: Optional[UserModel] = None):
super().__init__()
if user:
self.name.data = user.name
self.email.data = user.email
self.institution.data = user.institution
self.experiment.data = user.experiment
| 38.307692 | 80 | 0.653614 | 97 | 996 | 6.618557 | 0.42268 | 0.137072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005202 | 0.227912 | 996 | 25 | 81 | 39.84 | 0.829649 | 0 | 0 | 0 | 0 | 0 | 0.068273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.25 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
f8f25cd96d67041f861381dbd21810aa553cccdc | 883 | py | Python | tests/assets/test_driver_errors.py | CyrilLeMat/modelkit | 2150ffe78ebb00e3302dac36ccb09e66becd5130 | [
"MIT"
] | null | null | null | tests/assets/test_driver_errors.py | CyrilLeMat/modelkit | 2150ffe78ebb00e3302dac36ccb09e66becd5130 | [
"MIT"
] | null | null | null | tests/assets/test_driver_errors.py | CyrilLeMat/modelkit | 2150ffe78ebb00e3302dac36ccb09e66becd5130 | [
"MIT"
] | null | null | null | import os
import pytest
from modelkit.assets import errors
from tests.conftest import skip_unless
def _perform_driver_error_object_not_found(driver):
with pytest.raises(errors.ObjectDoesNotExistError):
driver.download_object("someasset", "somedestination")
assert not os.path.isfile("somedestination")
def test_local_driver(local_assetsmanager):
local_driver = local_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(local_driver)
@skip_unless("ENABLE_GCS_TEST", "True")
def test_gcs_driver(gcs_assetsmanager):
gcs_driver = gcs_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(gcs_driver)
@skip_unless("ENABLE_S3_TEST", "True")
def test_s3_driver(s3_assetsmanager):
s3_driver = s3_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(s3_driver)
| 29.433333 | 65 | 0.822197 | 117 | 883 | 5.726496 | 0.307692 | 0.077612 | 0.107463 | 0.143284 | 0.352239 | 0.352239 | 0.304478 | 0.304478 | 0.304478 | 0.304478 | 0 | 0.007595 | 0.105323 | 883 | 29 | 66 | 30.448276 | 0.840506 | 0 | 0 | 0 | 0 | 0 | 0.08607 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.210526 | false | 0 | 0.210526 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8f2d8744612c8dd54640bd2fc3dd67702898911 | 2,163 | py | Python | wiki/tests.py | Jarquevious/makewiki | a945da5ab7704042ef9d740987e23da19ec87267 | [
"MIT"
] | null | null | null | wiki/tests.py | Jarquevious/makewiki | a945da5ab7704042ef9d740987e23da19ec87267 | [
"MIT"
] | 4 | 2020-06-06T01:42:46.000Z | 2021-06-10T20:10:57.000Z | wiki/tests.py | Jarquevious/makewiki | a945da5ab7704042ef9d740987e23da19ec87267 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
# Create your tests here.
def test_detail_page(self):
""" Test to see if slug generated when saving a Page."""
# Create a user and save to the database
user = User.objects.create()
user.save()
# Create a page and save to the database
page = Page(title="My Detail Test Page", content="details_test", author=user)
page.save()
# Slug is generated matches with what we expect
slug = page.slug
response = self.client.get(f'/{slug}/')
self.assertEqual(response.status_code, 200)
info = self.client.get('/')
self.assertContains(info, 'makewiki', html=True)
def test_edit_page(self):
"""Test edit page."""
# Test data that will be displayed on the screen
user = User.objects.create()
user.save()
page = Page.objects.create(title="My Test Page", content="edit_test", author=user)
page.save()
# Make a GET request to the MakeWiki homepage that will get a response back
post_data = {
'title': 'Who',
'content': 'Are you?',
'author': user.id,
}
response = self.client.post('/form/', data=post_data)
# Check if response is 200
self.assertEqual(response.status_code, 200)
# Check the number of pages passed to the template matches the number of pages in the database
end = self.client.get('/')
result = end.context['pages']
self.assertQuerysetEqual(result, ['<Page: My Test Page>', '<Page: Test>'], ordered=False)
def test_page_creation(self):
# Create user object and save it
user = User.objects.create()
user.save()
# Create a page
page = Page.objects.create(title="The Test Page", content="edit_test", author=user)
page.save()
post_data = {
'title': 'COVID19',
'content': 'Mass Testing is Underway',
'author': user.id
}
response = self.client.post('/form/', data = post_data)
self.assertEqual(response.status_code, 302)
page_object = Page.objects.get(title='COVID19')
self.assertEqual(page_object.content, 'Mass Testing is Underway') | 27.730769 | 98 | 0.660194 | 299 | 2,163 | 4.715719 | 0.314381 | 0.046099 | 0.031915 | 0.044681 | 0.401418 | 0.257447 | 0.185816 | 0.185816 | 0.185816 | 0.070922 | 0 | 0.009445 | 0.216828 | 2,163 | 78 | 99 | 27.730769 | 0.822904 | 0.230236 | 0 | 0.357143 | 1 | 0 | 0.152068 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8f4dcd9fb78ee1924b9f50173ac949a710abcfd | 3,190 | py | Python | testcases/school_bus.py | wilsonsuen/av-testing | a6967b4cb4e4ad6b10d041ffd3dc62188fccad81 | [
"MIT"
] | null | null | null | testcases/school_bus.py | wilsonsuen/av-testing | a6967b4cb4e4ad6b10d041ffd3dc62188fccad81 | [
"MIT"
] | null | null | null | testcases/school_bus.py | wilsonsuen/av-testing | a6967b4cb4e4ad6b10d041ffd3dc62188fccad81 | [
"MIT"
] | null | null | null | import sys
import os
import glob
import json
from robot import rebot
from robot.api import TestSuite
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
if __name__ == "__main__":
main_suite = TestSuite('School Bus Scenario')
main_suite.resource.imports.library('lib/simulation.py')
testcase_paths = glob.glob('data/testdata/04_school_bus/*.json')
testcase_paths.sort()
for testcase_path in testcase_paths[110:113]:
with open(testcase_path) as f:
testdata = json.load(f)
tags = list(testdata['testcase']['context'].values()) +\
list(testdata['testcase']['input'].values())
school_bus_test = main_suite.tests.create(testdata['testcase']['name'], tags=tags)
school_bus_test.setup.config(name='Setup Scenario', args=[testcase_path])
school_bus_test.body.create_keyword('Start Simulation')
school_bus_test.body.create_keyword('Validate Result')
school_bus_test.teardown.config(name='Test Case Teardown')
main_suite.run(output='results/04_school_bus/output.xml')
rebot('results/04_school_bus/output.xml',
log="results/04_school_bus/log.html",
report="results/04_school_bus/report.html")
"""
rebot --tagstatcombine "8:00AMANDSunny:8AM and Sunny(C1)" --tagstatcombine "8:00AMANDCloudy:8AM and Cloudy(C2)" --tagstatcombine "8:00AMANDRainning:8AM and Rainning(C3)" --tagstatcombine "8:00AMANDFoggy:8AM and Foggy(C4)" --tagstatcombine "12:00PMANDSunny:12PM and Sunny(C5)" --tagstatcombine "12:00PMANDCloudy:12PM and Cloudy(C6)" --tagstatcombine "12:00PMANDRainning:12PM and Rainning(C7)" --tagstatcombine "12:00PMANDFoggy:12PM and Foggy(C8)" --tagstatcombine "3:00PMANDSunny:3PM and Sunny(C9)" --tagstatcombine "3:00PMANDCloudy:3PM and Cloudy(C10)" --tagstatcombine "3:00PMANDRainning:3PM and Rainning(C11)" --tagstatcombine "3:00PMANDFoggy:3PM and Foggy(C12)" --tagstatcombine "5:00PMANDSunny:5PM and Sunny(C13)" --tagstatcombine "5:00PMANDCloudy:5PM and Cloudy(C14)" --tagstatcombine "5:00PMANDRainning:5PM and Ranining(C15)" --tagstatcombine "5:00PMANDFoggy:5PM and Foggy(C16)" --tagstatcombine "7:00PMANDSunny:7PM and Sunny(C17)" --tagstatcombine "7:00PMANDCloudy:7PM and Cloudy(C18)" --tagstatcombine "7:00PMANDRainning:7PM and Rainning(C19)" --tagstatcombine "7:00PMANDFoggy:7PM and Foggy(C20)" --tagstatcombine MovingANDBackward_lane:Moving\ and\ Backward\ lane\(I12\) --tagstatcombine MovingANDForward_lane:Moving\ and\ Forward\ lane\(I9\) --tagstatcombine LoadingANDBackward_lane:Loading\ and\ Backward\ lane\(I6\) --tagstatcombine LoadingANDForward_lane:Loading\ and\ Forward\ lane\(I3\) --tagstatcombine StopANDBackward_lane:Stop\ and\ Backward\ lane\(I18\) --tagstatcombine StopANDForward_lane:Stop\ and\ Forward\ lane\(I15\) --tagstatexclude Forward_lane --tagstatexclude Backward_lane --tagstatexclude Moving --tagstatexclude Loading --tagstatexclude Stop --tagstatexclude 8\:00AM --tagstatexclude 12\:00PM --tagstatexclude 3\:00PM --tagstatexclude 5\:00PM --tagstatexclude 7\:00PM --tagstatexclude Sunny --tagstatexclude Foggy --tagstatexclude Rainning --tagstatexclude Cloudy -r combined_report.html -l combined_log.html output.xml
""" | 91.142857 | 1,951 | 0.754232 | 399 | 3,190 | 5.894737 | 0.343358 | 0.042092 | 0.023384 | 0.030612 | 0.048469 | 0.048469 | 0 | 0 | 0 | 0 | 0 | 0.056318 | 0.109404 | 3,190 | 35 | 1,952 | 91.142857 | 0.771559 | 0 | 0 | 0 | 0 | 0 | 0.251623 | 0.130682 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.269231 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
f8f65ce2aa90b1532e983805cc84833de1433b1e | 1,316 | py | Python | Python38/Lib/site-packages/PyInstaller/hooks/hook-PyQt4.py | AXFS-H/Windows10Debloater | ab5f8a8a8fb065bb40b7ddbd1df75563d8b8d13e | [
"MIT"
] | 5 | 2020-08-24T23:29:58.000Z | 2022-02-07T19:58:07.000Z | PyInstaller/hooks/hook-PyQt4.py | jeremysanders/pyinstaller | 321b24f9a9a5978337735816b36ca6b4a90a2fb4 | [
"Apache-2.0"
] | 12 | 2020-02-15T04:04:55.000Z | 2022-02-18T20:29:49.000Z | PyInstaller/hooks/hook-PyQt4.py | jeremysanders/pyinstaller | 321b24f9a9a5978337735816b36ca6b4a90a2fb4 | [
"Apache-2.0"
] | 2 | 2020-08-24T23:30:06.000Z | 2021-12-23T18:23:38.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import qt_menu_nib_dir
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt4 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt4') for x in getsitepackages()])
hiddenimports = ['sip']
# For Qt to work on Mac OS X it is necessary to include directory qt_menu.nib.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
datas = [
(qt_menu_nib_dir('PyQt4'), 'qt_menu.nib'),
]
| 35.567568 | 86 | 0.670213 | 178 | 1,316 | 4.865169 | 0.550562 | 0.027714 | 0.04157 | 0.027714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014235 | 0.145897 | 1,316 | 36 | 87 | 36.555556 | 0.756228 | 0.669453 | 0 | 0 | 0 | 0 | 0.057279 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.454545 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
f8f83df34dfaf5ae52ea9e532bb035a4e1cce478 | 825 | py | Python | ex085.py | EduotavioFonseca/ProgramasPython | 8e0ef5f6f4239d1fe52321f8795b6573f6ff5130 | [
"MIT"
] | null | null | null | ex085.py | EduotavioFonseca/ProgramasPython | 8e0ef5f6f4239d1fe52321f8795b6573f6ff5130 | [
"MIT"
] | null | null | null | ex085.py | EduotavioFonseca/ProgramasPython | 8e0ef5f6f4239d1fe52321f8795b6573f6ff5130 | [
"MIT"
] | null | null | null | # Lista dentro de dicionário
campeonato = dict()
gol = []
aux = 0
campeonato['Jogador'] = str(input('Digite o nome do jogador: '))
print()
partidas = int(input('Quantas partidas ele jogou? '))
print()
for i in range(0, partidas):
aux = int(input(f'Quantos gols na partida {i + 1}? '))
gol.append(aux)
print()
campeonato['Gols'] = gol[:]
campeonato['Total'] = sum(gol)
print('=' * 55)
print()
print(campeonato)
print()
print('=' * 55)
print()
for k, v in campeonato.items():
print(f'O campo {k} tem o valor: {v}')
print()
print('=' * 55)
print(f'O jogador {campeonato["Jogador"]} jogou {partidas} partidas.')
print()
for i in range(0, partidas):
print(f'Na partida {i + 1} ele fez {gol[i]} gol(s).')
print()
print(f'No total ele fez {campeonato["Total"]} gols.')
print('=' * 55)
| 25.78125 | 71 | 0.613333 | 121 | 825 | 4.181818 | 0.371901 | 0.055336 | 0.071146 | 0.043478 | 0.098814 | 0.098814 | 0.098814 | 0 | 0 | 0 | 0 | 0.01949 | 0.191515 | 825 | 31 | 72 | 26.612903 | 0.73913 | 0.031515 | 0 | 0.5 | 0 | 0 | 0.368146 | 0.057441 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.6 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
5d018e12b1c73bed5cd0c0150226e9cf4fc0779d | 50,253 | py | Python | dev/tools/leveleditor/direct/showbase/ContainerLeakDetector.py | CrankySupertoon01/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | dev/tools/leveleditor/direct/showbase/ContainerLeakDetector.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | dev/tools/leveleditor/direct/showbase/ContainerLeakDetector.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | from pandac.PandaModules import PStatCollector
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import Queue, invertDictLossless, makeFlywheelGen
from direct.showbase.PythonUtil import itype, serialNum, safeRepr, fastRepr
from direct.showbase.Job import Job
import types, weakref, random, __builtin__
def _createContainerLeak():
def leakContainer(task=None):
base = getBase()
if not hasattr(base, 'leakContainer'):
base.leakContainer = {}
# use tuples as keys since they can't be weakref'd, and use an instance
# since it can't be repr/eval'd
# that will force the leak detector to hold a normal 'non-weak' reference
class LeakKey:
pass
base.leakContainer[(LeakKey(),)] = {}
# test the non-weakref object reference handling
if random.random() < .01:
key = random.choice(base.leakContainer.keys())
ContainerLeakDetector.notify.debug(
'removing reference to leakContainer key %s so it will be garbage-collected' % safeRepr(key))
del base.leakContainer[key]
taskMgr.doMethodLater(10, leakContainer, 'leakContainer-%s' % serialNum())
if task:
return task.done
leakContainer()
def _createTaskLeak():
leakTaskName = uniqueName('leakedTask')
leakDoLaterName = uniqueName('leakedDoLater')
def nullTask(task=None):
return task.cont
def nullDoLater(task=None):
return task.done
def leakTask(task=None, leakTaskName=leakTaskName):
base = getBase()
taskMgr.add(nullTask, uniqueName(leakTaskName))
taskMgr.doMethodLater(1 << 31, nullDoLater, uniqueName(leakDoLaterName))
taskMgr.doMethodLater(10, leakTask, 'doLeakTask-%s' % serialNum())
if task:
return task.done
leakTask()
class NoDictKey:
pass
class Indirection:
"""
Represents the indirection that brings you from a container to an element of the container.
Stored as a string to be used as part of an eval, or as a key to be looked up in a dict.
Each dictionary dereference is individually eval'd since the dict key might have been
garbage-collected
TODO: store string components that are duplicates of strings in the actual system so that
Python will keep one copy and reduce memory usage
"""
def __init__(self, evalStr=None, dictKey=NoDictKey):
# if this is a dictionary lookup, pass dictKey instead of evalStr
self.evalStr = evalStr
self.dictKey = NoDictKey
# is the dictKey a weak reference?
self._isWeakRef = False
self._refCount = 0
if dictKey is not NoDictKey:
# if we can repr/eval the key, store it as an evalStr
keyRepr = safeRepr(dictKey)
useEval = False
try:
keyEval = eval(keyRepr)
useEval = True
except:
pass
if useEval:
# check to make sure the eval succeeded
if hash(keyEval) != hash(dictKey):
useEval = False
if useEval:
# eval/repr succeeded, store as an evalStr
self.evalStr = '[%s]' % keyRepr
else:
try:
# store a weakref to the key
self.dictKey = weakref.ref(dictKey)
self._isWeakRef = True
except TypeError, e:
ContainerLeakDetector.notify.debug('could not weakref dict key %s' % keyRepr)
self.dictKey = dictKey
self._isWeakRef = False
def destroy(self):
# re-entrant
self.dictKey = NoDictKey
def acquire(self):
self._refCount += 1
def release(self):
self._refCount -= 1
if self._refCount == 0:
self.destroy()
def isDictKey(self):
# is this an indirection through a dictionary?
return self.dictKey is not NoDictKey
def _getNonWeakDictKey(self):
if not self._isWeakRef:
return self.dictKey
else:
key = self.dictKey()
if key is None:
return '<garbage-collected dict key>'
return key
def dereferenceDictKey(self, parentDict):
# look ourselves up in parentDict
key = self._getNonWeakDictKey()
# objects in __builtin__ will have parentDict==None
if parentDict is None:
return key
return parentDict[key]
def getString(self, prevIndirection=None, nextIndirection=None):
# return our contribution to the full name of an object
instanceDictStr = '.__dict__'
if self.evalStr is not None:
# if we're an instance dict, skip over this one (obj.__dict__[keyName] == obj.keyName)
if nextIndirection is not None and self.evalStr[-len(instanceDictStr):] == instanceDictStr:
return self.evalStr[:-len(instanceDictStr)]
# if the previous indirection was an instance dict, change our syntax from ['key'] to .key
if prevIndirection is not None and prevIndirection.evalStr is not None:
if prevIndirection.evalStr[-len(instanceDictStr):] == instanceDictStr:
return '.%s' % self.evalStr[2:-2]
return self.evalStr
# we're stored as a dict key
keyRepr = safeRepr(self._getNonWeakDictKey())
# if the previous indirection was an instance dict, change our syntax from ['key'] to .key
if prevIndirection is not None and prevIndirection.evalStr is not None:
if prevIndirection.evalStr[-len(instanceDictStr):] == instanceDictStr:
return '.%s' % keyRepr
return '[%s]' % keyRepr
def __repr__(self):
return self.getString()
class ObjectRef:
"""
stores a reference to a container in a way that does not prevent garbage
collection of the container if possible
stored as a series of 'indirections' (obj.foo -> '.foo', dict[key] -> '[key]', etc.)
"""
notify = directNotify.newCategory("ObjectRef")
class FailedEval(Exception):
pass
def __init__(self, indirection, objId, other=None):
self._indirections = []
# are we building off of an existing ref?
if other is not None:
for ind in other._indirections:
self._indirections.append(ind)
# make sure we're not storing a reference to the actual object,
# that could cause a memory leak
assert type(objId) in (types.IntType, types.LongType)
# prevent cycles (i.e. base.loader.base.loader)
assert not self.goesThrough(objId=objId)
self._indirections.append(indirection)
# make sure our indirections don't get destroyed while we're using them
for ind in self._indirections:
ind.acquire()
self.notify.debug(repr(self))
def destroy(self):
for indirection in self._indirections:
indirection.release()
del self._indirections
def getNumIndirections(self):
return len(self._indirections)
def goesThroughGen(self, obj=None, objId=None):
if obj is None:
assert type(objId) in (types.IntType, types.LongType)
else:
objId = id(obj)
o = None
evalStr = ''
curObj = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
yield None
indirection.acquire()
for indirection in indirections:
yield None
if not indirection.isDictKey():
# build up a string to be eval'd
evalStr += indirection.getString()
else:
curObj = self._getContainerByEval(evalStr, curObj=curObj)
if curObj is None:
raise FailedEval(evalStr)
# try to look up this key in the curObj dictionary
curObj = indirection.dereferenceDictKey(curObj)
evalStr = ''
yield None
o = self._getContainerByEval(evalStr, curObj=curObj)
if id(o) == objId:
break
for indirection in indirections:
yield None
indirection.release()
yield id(o) == objId
def goesThrough(self, obj=None, objId=None):
# since we cache the ids involved in this reference,
# this isn't perfect, for example if base.myObject is reassigned
# to a different object after this Ref was created this would return
# false, allowing a ref to base.myObject.otherObject.myObject
for goesThrough in self.goesThroughGen(obj=obj, objId=objId):
pass
return goesThrough
def _getContainerByEval(self, evalStr, curObj=None):
if curObj is not None:
# eval('curObj.foo.bar.someDict')
evalStr = 'curObj%s' % evalStr
else:
# this eval is not based off of curObj, use the global__builtin__ namespace
# put __builtin__ at the start if it's not already there
bis = '__builtin__'
if evalStr[:len(bis)] != bis:
evalStr = '%s.%s' % (bis, evalStr)
try:
container = eval(evalStr)
except NameError, ne:
return None
except AttributeError, ae:
return None
except KeyError, ke:
return None
return container
def getContainerGen(self, getInstance=False):
# try to get a handle on the container by eval'ing and looking things
# up in dictionaries, depending on the type of each indirection
# if getInstance is True, will return instance instead of instance dict
#import pdb;pdb.set_trace()
evalStr = ''
curObj = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
indirection.acquire()
for indirection in indirections:
yield None
if not indirection.isDictKey():
# build up a string to be eval'd
evalStr += indirection.getString()
else:
curObj = self._getContainerByEval(evalStr, curObj=curObj)
if curObj is None:
raise FailedEval(evalStr)
# try to look up this key in the curObj dictionary
curObj = indirection.dereferenceDictKey(curObj)
evalStr = ''
for indirection in indirections:
yield None
indirection.release()
if getInstance:
lenDict = len('.__dict__')
if evalStr[-lenDict:] == '.__dict__':
evalStr = evalStr[:-lenDict]
# TODO: check that this is still the object we originally pointed to
yield self._getContainerByEval(evalStr, curObj=curObj)
def getEvalStrGen(self, getInstance=False):
str = ''
prevIndirection = None
curIndirection = None
nextIndirection = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
indirection.acquire()
for i in xrange(len(indirections)):
yield None
if i > 0:
prevIndirection = indirections[i-1]
else:
prevIndirection = None
curIndirection = indirections[i]
if i < len(indirections)-1:
nextIndirection = indirections[i+1]
else:
nextIndirection = None
str += curIndirection.getString(prevIndirection=prevIndirection,
nextIndirection=nextIndirection)
if getInstance:
lenDict = len('.__dict__')
if str[-lenDict:] == '.__dict__':
str = str[:-lenDict]
for indirection in indirections:
yield None
indirection.release()
yield str
def getFinalIndirectionStr(self):
prevIndirection = None
if len(self._indirections) > 1:
prevIndirection = self._indirections[-2]
return self._indirections[-1].getString(prevIndirection=prevIndirection)
def __repr__(self):
for result in self.getEvalStrGen():
pass
return result
class FindContainers(Job):
"""
Explore the Python graph, looking for objects that support __len__()
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self._id2ref = self._leakDetector._id2ref
# these hold objects that we should start traversals from often and not-as-often,
# respectively
self._id2baseStartRef = {}
self._id2discoveredStartRef = {}
# these are working copies so that our iterations aren't disturbed by changes to the
# definitive ref sets
self._baseStartRefWorkingList = ScratchPad(refGen=nullGen(),
source=self._id2baseStartRef)
self._discoveredStartRefWorkingList = ScratchPad(refGen=nullGen(),
source=self._id2discoveredStartRef)
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
# set up the base containers, the ones that hold most objects
ref = ObjectRef(Indirection(evalStr='__builtin__.__dict__'), id(__builtin__.__dict__))
self._id2baseStartRef[id(__builtin__.__dict__)] = ref
# container for objects that want to make sure they are found by
# the object exploration algorithm, including objects that exist
# just to measure things such as C++ memory usage, scene graph size,
# framerate, etc. See LeakDetectors.py
if not hasattr(__builtin__, "leakDetectors"):
__builtin__.leakDetectors = {}
ref = ObjectRef(Indirection(evalStr='leakDetectors'), id(leakDetectors))
self._id2baseStartRef[id(leakDetectors)] = ref
for i in self._addContainerGen(__builtin__.__dict__, ref):
pass
try:
base
except:
pass
else:
ref = ObjectRef(Indirection(evalStr='base.__dict__'), id(base.__dict__))
self._id2baseStartRef[id(base.__dict__)] = ref
for i in self._addContainerGen(base.__dict__, ref):
pass
try:
simbase
except:
pass
else:
ref = ObjectRef(Indirection(evalStr='simbase.__dict__'), id(simbase.__dict__))
self._id2baseStartRef[id(simbase.__dict__)] = ref
for i in self._addContainerGen(simbase.__dict__, ref):
pass
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Low
@staticmethod
def getStartObjAffinity(startObj):
# how good of a starting object is this object for traversing the object graph?
try:
return len(startObj)
except:
return 1
def _isDeadEnd(self, obj, objName=None):
if type(obj) in (types.BooleanType, types.BuiltinFunctionType,
types.BuiltinMethodType, types.ComplexType,
types.FloatType, types.IntType, types.LongType,
types.NoneType, types.NotImplementedType,
types.TypeType, types.CodeType, types.FunctionType,
types.StringType, types.UnicodeType,
types.TupleType):
return True
# if it's an internal object, ignore it
if id(obj) in ContainerLeakDetector.PrivateIds:
return True
# prevent crashes in objects that define __cmp__ and don't handle strings
if type(objName) == types.StringType and objName in ('im_self', 'im_class'):
return True
try:
className = obj.__class__.__name__
except:
pass
else:
# prevent infinite recursion in built-in containers related to methods
if className == 'method-wrapper':
return True
return False
def _hasLength(self, obj):
return hasattr(obj, '__len__')
def _addContainerGen(self, cont, objRef):
contId = id(cont)
# if this container is new, or the objRef repr is shorter than what we already have,
# put it in the table
if contId in self._id2ref:
for existingRepr in self._id2ref[contId].getEvalStrGen():
yield None
for newRepr in objRef.getEvalStrGen():
yield None
if contId not in self._id2ref or len(newRepr) < len(existingRepr):
if contId in self._id2ref:
self._leakDetector.removeContainerById(contId)
self._id2ref[contId] = objRef
def _addDiscoveredStartRef(self, obj, ref):
# we've discovered an object that can be used to start an object graph traversal
objId = id(obj)
if objId in self._id2discoveredStartRef:
existingRef = self._id2discoveredStartRef[objId]
if type(existingRef) not in (types.IntType, types.LongType):
if (existingRef.getNumIndirections() >=
ref.getNumIndirections()):
# the ref that we already have is more concise than the new ref
return
if objId in self._id2ref:
if (self._id2ref[objId].getNumIndirections() >=
ref.getNumIndirections()):
# the ref that we already have is more concise than the new ref
return
storedItem = ref
# if we already are storing a reference to this object, don't store a second reference
if objId in self._id2ref:
storedItem = objId
self._id2discoveredStartRef[objId] = storedItem
def run(self):
try:
# this yields a different set of start refs every time we start a new traversal
# force creation of a new workingListSelector inside the while loop right off the bat
workingListSelector = nullGen()
# this holds the current step of the current traversal
curObjRef = None
while True:
# yield up here instead of at the end, since we skip back to the
# top of the while loop from various points
yield None
#import pdb;pdb.set_trace()
if curObjRef is None:
# choose an object to start a traversal from
try:
startRefWorkingList = workingListSelector.next()
except StopIteration:
# do relative # of traversals on each set based on how many refs it contains
baseLen = len(self._baseStartRefWorkingList.source)
discLen = len(self._discoveredStartRefWorkingList.source)
minLen = float(max(1, min(baseLen, discLen)))
# this will cut down the traversals of the larger set by 2/3
minLen *= 3.
workingListSelector = flywheel([self._baseStartRefWorkingList, self._discoveredStartRefWorkingList],
[baseLen/minLen, discLen/minLen])
yield None
continue
# grab the next start ref from this sequence and see if it's still valid
while True:
yield None
try:
curObjRef = startRefWorkingList.refGen.next()
break
except StopIteration:
# we've run out of refs, grab a new set
if len(startRefWorkingList.source) == 0:
# ref set is empty, choose another
break
# make a generator that yields containers a # of times that is
# proportional to their length
for fw in makeFlywheelGen(
startRefWorkingList.source.values(),
countFunc=lambda x: self.getStartObjAffinity(x),
scale=.05):
yield None
startRefWorkingList.refGen = fw
if curObjRef is None:
# this ref set is empty, choose another
# the base set should never be empty (__builtin__ etc.)
continue
# do we need to go look up the object in _id2ref? sometimes we do that
# to avoid storing multiple redundant refs to a single item
if type(curObjRef) in (types.IntType, types.LongType):
startId = curObjRef
curObjRef = None
try:
for containerRef in self._leakDetector.getContainerByIdGen(startId):
yield None
except:
# ref is invalid
self.notify.debug('invalid startRef, stored as id %s' % startId)
self._leakDetector.removeContainerById(startId)
continue
curObjRef = containerRef
try:
for curObj in curObjRef.getContainerGen():
yield None
except:
self.notify.debug('lost current container, ref.getContainerGen() failed')
# that container is gone, try again
curObjRef = None
continue
self.notify.debug('--> %s' % curObjRef)
#import pdb;pdb.set_trace()
# store a copy of the current objRef
parentObjRef = curObjRef
# if we hit a dead end, start over from another container
curObjRef = None
if hasattr(curObj, '__dict__'):
child = curObj.__dict__
hasLength = self._hasLength(child)
notDeadEnd = not self._isDeadEnd(child)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThroughGen(child):
# don't yield, container might lose this element
pass
if not goesThrough:
objRef = ObjectRef(Indirection(evalStr='.__dict__'),
id(child), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(child, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(child, objRef)
curObjRef = objRef
continue
if type(curObj) is types.DictType:
key = None
attr = None
keys = curObj.keys()
# we will continue traversing the object graph via one key of the dict,
# choose it at random without taking a big chunk of CPU time
numKeysLeft = len(keys) + 1
for key in keys:
yield None
numKeysLeft -= 1
try:
attr = curObj[key]
except KeyError, e:
# this is OK because we are yielding during the iteration
self.notify.debug('could not index into %s with key %s' % (
parentObjRef, safeRepr(key)))
continue
hasLength = self._hasLength(attr)
notDeadEnd = False
# if we haven't picked the next ref, check if this one is a candidate
if curObjRef is None:
notDeadEnd = not self._isDeadEnd(attr, key)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThroughGen(curObj[key]):
# don't yield, container might lose this element
pass
if not goesThrough:
if curObj is __builtin__.__dict__:
objRef = ObjectRef(Indirection(evalStr='%s' % key),
id(curObj[key]))
else:
objRef = ObjectRef(Indirection(dictKey=key),
id(curObj[key]), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(attr, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(attr, objRef)
if curObjRef is None and random.randrange(numKeysLeft) == 0:
curObjRef = objRef
del key
del attr
continue
try:
childNames = dir(curObj)
except:
pass
else:
try:
index = -1
attrs = []
while 1:
yield None
try:
attr = itr.next()
except:
# some custom classes don't do well when iterated
attr = None
break
attrs.append(attr)
# we will continue traversing the object graph via one attr,
# choose it at random without taking a big chunk of CPU time
numAttrsLeft = len(attrs) + 1
for attr in attrs:
yield None
index += 1
numAttrsLeft -= 1
hasLength = self._hasLength(attr)
notDeadEnd = False
if curObjRef is None:
notDeadEnd = not self._isDeadEnd(attr)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThrough(curObj[index]):
# don't yield, container might lose this element
pass
if not goesThrough:
objRef = ObjectRef(Indirection(evalStr='[%s]' % index),
id(curObj[index]), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(attr, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(attr, objRef)
if curObjRef is None and random.randrange(numAttrsLeft) == 0:
curObjRef = objRef
del attr
except StopIteration, e:
pass
del itr
continue
except Exception, e:
print 'FindContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class CheckContainers(Job):
"""
Job to check container sizes and find potential leaks; sub-job of ContainerLeakDetector
"""
ReprItems = 5
def __init__(self, name, leakDetector, index):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._index = index
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
self._leakDetector._index2containerId2len[self._index] = {}
ids = self._leakDetector.getContainerIds()
# record the current len of each container
for objId in ids:
yield None
try:
for result in self._leakDetector.getContainerByIdGen(objId):
yield None
container = result
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s no longer exists; caught exception in getContainerById (%s)' % (
contName, e))
self._leakDetector.removeContainerById(objId)
continue
if container is None:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug('%s no longer exists; getContainerById returned None' %
contName)
self._leakDetector.removeContainerById(objId)
continue
try:
cLen = len(container)
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s is no longer a container, it is now %s (%s)' %
(contName, safeRepr(container), e))
self._leakDetector.removeContainerById(objId)
continue
self._leakDetector._index2containerId2len[self._index][objId] = cLen
# compare the current len of each container to past lens
if self._index > 0:
idx2id2len = self._leakDetector._index2containerId2len
for objId in idx2id2len[self._index]:
yield None
if objId in idx2id2len[self._index-1]:
diff = idx2id2len[self._index][objId] - idx2id2len[self._index-1][objId]
"""
# this check is too spammy
if diff > 20:
if diff > idx2id2len[self._index-1][objId]:
minutes = (self._leakDetector._index2delay[self._index] -
self._leakDetector._index2delay[self._index-1]) / 60.
name = self._leakDetector.getContainerNameById(objId)
if idx2id2len[self._index-1][objId] != 0:
percent = 100. * (float(diff) / float(idx2id2len[self._index-1][objId]))
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (1)')
else:
self.notify.warning(
'%s (%s) grew %.2f%% in %.2f minutes (%s items at last measurement, current contents: %s)' % (
name, itype(container), percent, minutes, idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
yield None
"""
if (self._index > 2 and
objId in idx2id2len[self._index-2] and
objId in idx2id2len[self._index-3]):
diff2 = idx2id2len[self._index-1][objId] - idx2id2len[self._index-2][objId]
diff3 = idx2id2len[self._index-2][objId] - idx2id2len[self._index-3][objId]
if self._index <= 4:
if diff > 0 and diff2 > 0 and diff3 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (2)')
else:
msg = ('%s (%s) consistently increased in size over the last '
'3 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
elif (objId in idx2id2len[self._index-4] and
objId in idx2id2len[self._index-5]):
# if size has consistently increased over the last 5 checks,
# send out a warning
diff4 = idx2id2len[self._index-3][objId] - idx2id2len[self._index-4][objId]
diff5 = idx2id2len[self._index-4][objId] - idx2id2len[self._index-5][objId]
if diff > 0 and diff2 > 0 and diff3 > 0 and diff4 > 0 and diff5 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (3)')
else:
msg = ('leak detected: %s (%s) consistently increased in size over the last '
'5 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
messenger.send(self._leakDetector.getLeakEvent(), [container, name])
if config.GetBool('pdb-on-leak-detect', 0):
import pdb;pdb.set_trace()
pass
except Exception, e:
print 'CheckContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class FPTObjsOfType(Job):
def __init__(self, name, leakDetector, otn, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._otn = otn
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
getInstance = (self._otn.lower() not in 'dict')
yield None
try:
for container in self._leakDetector.getContainerByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
if hasattr(container, '__class__'):
cName = container.__class__.__name__
else:
cName = container.__name__
if (self._otn.lower() in cName.lower()):
try:
for ptc in self._leakDetector.getContainerNameByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
print 'GPTC(' + self._otn + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsOfType job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class FPTObjsNamed(Job):
def __init__(self, name, leakDetector, on, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._on = on
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
pass
else:
name = self._leakDetector._id2ref[id].getFinalIndirectionStr()
if self._on.lower() in name.lower():
try:
for ptc in self._leakDetector.getContainerNameByIdGen(id):
yield None
except:
pass
else:
print 'GPTCN(' + self._on + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsNamed job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class PruneObjectRefs(Job):
"""
Job to destroy any container refs that are no longer valid.
Checks validity by asking for each container
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
ids = self._leakDetector.getContainerIds()
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
# reference is invalid, remove it
self._leakDetector.removeContainerById(id)
_id2baseStartRef = self._leakDetector._findContainersJob._id2baseStartRef
ids = _id2baseStartRef.keys()
for id in ids:
yield None
try:
for container in _id2baseStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2baseStartRef[id]
_id2discoveredStartRef = self._leakDetector._findContainersJob._id2discoveredStartRef
ids = _id2discoveredStartRef.keys()
for id in ids:
yield None
try:
for container in _id2discoveredStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2discoveredStartRef[id]
except Exception, e:
print 'PruneObjectRefs job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class ContainerLeakDetector(Job):
"""
Low-priority Python object-graph walker that looks for leaking containers.
To reduce memory usage, this does a random walk of the Python objects to
discover containers rather than keep a set of all visited objects; it may
visit the same object many times but eventually it will discover every object.
Checks container sizes at ever-increasing intervals.
"""
notify = directNotify.newCategory("ContainerLeakDetector")
# set of containers that should not be examined
PrivateIds = set()
def __init__(self, name, firstCheckDelay = None):
Job.__init__(self, name)
self._serialNum = serialNum()
self._findContainersJob = None
self._checkContainersJob = None
self._pruneContainersJob = None
if firstCheckDelay is None:
firstCheckDelay = 60. * 15.
# divide by two, since the first check just takes length measurements and
# doesn't check for leaks
self._nextCheckDelay = firstCheckDelay/2.
self._checkDelayScale = config.GetFloat('leak-detector-check-delay-scale', 1.5)
self._pruneTaskPeriod = config.GetFloat('leak-detector-prune-period', 60. * 30.)
# main dict of id(container)->containerRef
self._id2ref = {}
# storage for results of check-container job
self._index2containerId2len = {}
self._index2delay = {}
if config.GetBool('leak-container', 0):
_createContainerLeak()
if config.GetBool('leak-tasks', 0):
_createTaskLeak()
# don't check our own tables for leaks
ContainerLeakDetector.addPrivateObj(ContainerLeakDetector.PrivateIds)
ContainerLeakDetector.addPrivateObj(self.__dict__)
self.setPriority(Job.Priorities.Min)
jobMgr.add(self)
def destroy(self):
messenger.send(self._getDestroyEvent())
self.ignoreAll()
if self._pruneContainersJob is not None:
jobMgr.remove(self._pruneContainersJob)
self._pruneContainersJob = None
if self._checkContainersJob is not None:
jobMgr.remove(self._checkContainersJob)
self._checkContainersJob = None
jobMgr.remove(self._findContainersJob)
self._findContainersJob = None
del self._id2ref
del self._index2containerId2len
del self._index2delay
def _getDestroyEvent(self):
# sent when leak detector is about to be destroyed
return 'cldDestroy-%s' % self._serialNum
def getLeakEvent(self):
# sent when a leak is detected
# passes description string as argument
return 'containerLeakDetected-%s' % self._serialNum
@classmethod
def addPrivateObj(cls, obj):
cls.PrivateIds.add(id(obj))
@classmethod
def removePrivateObj(cls, obj):
cls.PrivateIds.remove(id(obj))
def _getCheckTaskName(self):
return 'checkForLeakingContainers-%s' % self._serialNum
def _getPruneTaskName(self):
return 'pruneLeakingContainerRefs-%s' % self._serialNum
def getContainerIds(self):
return self._id2ref.keys()
def getContainerByIdGen(self, id, **kwArgs):
# return a generator to look up a container
return self._id2ref[id].getContainerGen(**kwArgs)
def getContainerById(self, id):
for result in self._id2ref[id].getContainerGen():
pass
return result
def getContainerNameByIdGen(self, id, **kwArgs):
return self._id2ref[id].getEvalStrGen(**kwArgs)
def getContainerNameById(self, id):
if id in self._id2ref:
return repr(self._id2ref[id])
return '<unknown container>'
def removeContainerById(self, id):
if id in self._id2ref:
self._id2ref[id].destroy()
del self._id2ref[id]
def run(self):
# start looking for containers
self._findContainersJob = FindContainers(
'%s-findContainers' % self.getJobName(), self)
jobMgr.add(self._findContainersJob)
self._scheduleNextLeakCheck()
self._scheduleNextPruning()
while True:
yield Job.Sleep
def getPathsToContainers(self, name, ot, doneCallback=None):
j = FPTObjsOfType(name, self, ot, doneCallback)
jobMgr.add(j)
return j
def getPathsToContainersNamed(self, name, on, doneCallback=None):
j = FPTObjsNamed(name, self, on, doneCallback)
jobMgr.add(j)
return j
def _scheduleNextLeakCheck(self):
taskMgr.doMethodLater(self._nextCheckDelay, self._checkForLeaks,
self._getCheckTaskName())
# delay between checks
# fib: 1 1 2 3 5 8 13 21 34 55 89
# * 2.: 1 2 4 8 16 32 64 128 256 512 1024
# * 1.5: 1 1.5 2.3 3.4 5.1 7.6 11.4 17.1 25.6 38.4 57.7
#
# delay from job start
# fib: 1 2 4 7 12 20 33 54 88 143 232
# * 2.: 1 3 7 15 31 63 127 255 511 1023 2047
# * 1.5: 1 2.5 4.75 8.1 13.2 20.8 32.2 49.3 74.9 113.3 171
self._nextCheckDelay = self._nextCheckDelay * self._checkDelayScale
def _checkForLeaks(self, task=None):
self._index2delay[len(self._index2containerId2len)] = self._nextCheckDelay
self._checkContainersJob = CheckContainers(
'%s-checkForLeaks' % self.getJobName(), self, len(self._index2containerId2len))
self.acceptOnce(self._checkContainersJob.getFinishedEvent(),
self._scheduleNextLeakCheck)
jobMgr.add(self._checkContainersJob)
return task.done
def _scheduleNextPruning(self):
taskMgr.doMethodLater(self._pruneTaskPeriod, self._pruneObjectRefs,
self._getPruneTaskName())
def _pruneObjectRefs(self, task=None):
self._pruneContainersJob = PruneObjectRefs(
'%s-pruneObjectRefs' % self.getJobName(), self)
self.acceptOnce(self._pruneContainersJob.getFinishedEvent(),
self._scheduleNextPruning)
jobMgr.add(self._pruneContainersJob)
return task.done
| 44.159051 | 138 | 0.528804 | 4,653 | 50,253 | 5.585859 | 0.155599 | 0.03078 | 0.016082 | 0.008618 | 0.37209 | 0.326921 | 0.305452 | 0.282675 | 0.272864 | 0.243661 | 0 | 0.012192 | 0.404274 | 50,253 | 1,137 | 139 | 44.197889 | 0.855998 | 0.128032 | 0 | 0.505814 | 0 | 0 | 0.037967 | 0.004393 | 0 | 0 | 0 | 0.003518 | 0.003488 | 0 | null | null | 0.026744 | 0.00814 | null | null | 0.00814 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d03e84b9fba295a1596df9171a5466ae68073d3 | 415 | py | Python | django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py | gfhuertac/coding_dojo_python | 4d17bb63fb2b9669216a0f60326d4a4b9055af7e | [
"MIT"
] | null | null | null | django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py | gfhuertac/coding_dojo_python | 4d17bb63fb2b9669216a0f60326d4a4b9055af7e | [
"MIT"
] | 6 | 2020-06-06T01:50:21.000Z | 2022-02-10T11:33:02.000Z | django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py | gfhuertac/coding_dojo_python | 4d17bb63fb2b9669216a0f60326d4a4b9055af7e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-31 23:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('leagues', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='team',
old_name='city',
new_name='location',
),
]
| 19.761905 | 48 | 0.592771 | 45 | 415 | 5.266667 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070946 | 0.286747 | 415 | 20 | 49 | 20.75 | 0.72973 | 0.163855 | 0 | 0 | 1 | 0 | 0.101744 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d15eae6d6d420d8166df518e95a6f5df2ba41f1 | 2,619 | py | Python | main.py | showtimesynergy/mojify | 8c012730b9f56d6e7e2003e8db99669516f4e027 | [
"BSD-2-Clause"
] | null | null | null | main.py | showtimesynergy/mojify | 8c012730b9f56d6e7e2003e8db99669516f4e027 | [
"BSD-2-Clause"
] | null | null | null | main.py | showtimesynergy/mojify | 8c012730b9f56d6e7e2003e8db99669516f4e027 | [
"BSD-2-Clause"
] | null | null | null | from PIL import Image
import csv
from ast import literal_eval as make_tuple
from math import sqrt
import argparse
import os.path
def load_img(image):
# load an image as a PIL object
im = Image.open(image).convert('RGBA')
return im
def color_distance(c_tuple1, c_tuple2):
# calculate the color distance between two rgb tuples
red_mean = (c_tuple1[0] + c_tuple2[0]) / 2
red = c_tuple1[0] - c_tuple2[0]
green = c_tuple1[1] - c_tuple2[1]
blue = c_tuple1[2] - c_tuple2[2]
delta = (2 + (red_mean / 256)) * (red ** 2)
delta += (4 * (green ** 2))
delta += (2 + ((255 - red_mean) / 256)) * (blue ** 2)
delta = sqrt(delta)
return delta
def write_out(text_matrix):
# write out emoji grid to txt file
with open('out.txt', '+w', encoding='utf-8') as out:
for line in text_matrix:
line_out = ''
for char in line:
# TODO: ZWJ support
if char is None:
line_out += '\u2001\u2006'
else:
char_code = '0x' + char
char_code = int(char_code, base=16)
line_out += chr(char_code)
out.writelines(line_out + '\n')
def gen_matrix(pix_data):
# generate unicode data from colors
pix = pix_data.load()
emoji_grid = []
for y in range(0, size[1]):
emoji_grid.append([])
for x in range(0, size[0]):
pixel = pix[x, y]
best_delta = float('Inf')
for entry in emoji_list:
emoji_color = entry[1]
if pixel[3] == 0:
best = None
else:
delta = color_distance(emoji_color, pixel)
if delta < best_delta:
best = entry[0]
best_delta = delta
emoji_grid[-1].append(best)
return emoji_grid
def handle_arguments():
parser = argparse.ArgumentParser(
description='Represent an image using emoji'
)
parser.add_argument('image', help='image to be processed')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = handle_arguments()
path = args.image
emoji_list = []
with open('proc.csv') as raw_list:
emoji_list = []
reader = csv.reader(raw_list)
raw_list = list(reader)
for entry in raw_list:
emoji_list.append([entry[0], make_tuple(entry[1])])
image = load_img(path)
size = image.size
emoji_grid = gen_matrix(image)
write_out(emoji_grid)
print('Output in out.txt')
| 29.426966 | 63 | 0.557083 | 349 | 2,619 | 3.988539 | 0.352436 | 0.045259 | 0.011494 | 0.012931 | 0.022989 | 0.022989 | 0 | 0 | 0 | 0 | 0 | 0.032702 | 0.334479 | 2,619 | 88 | 64 | 29.761364 | 0.765921 | 0.063383 | 0 | 0.055556 | 0 | 0 | 0.051492 | 0 | 0 | 0 | 0 | 0.011364 | 0 | 1 | 0.069444 | false | 0 | 0.083333 | 0 | 0.208333 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d17411291af35f334b84d9eb9a01839cc0b1a31 | 340 | py | Python | YourJobAidApi/migrations/0019_remove_category_count_post.py | rayhanrock/django-yourjobaid-api | 17751dac5a298998aeecf7a70b79792f8311b9b2 | [
"MIT"
] | 1 | 2020-07-08T10:57:37.000Z | 2020-07-08T10:57:37.000Z | YourJobAidApi/migrations/0019_remove_category_count_post.py | rayhanrock/django-yourjobaid-api | 17751dac5a298998aeecf7a70b79792f8311b9b2 | [
"MIT"
] | 6 | 2021-04-08T20:23:17.000Z | 2021-09-22T18:58:16.000Z | YourJobAidApi/migrations/0019_remove_category_count_post.py | rayhanrock/django-yourjobaid-api | 17751dac5a298998aeecf7a70b79792f8311b9b2 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-04-16 23:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('YourJobAidApi', '0018_category_count_post'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='count_post',
),
]
| 20 | 54 | 0.608824 | 36 | 340 | 5.611111 | 0.777778 | 0.089109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077869 | 0.282353 | 340 | 16 | 55 | 21.25 | 0.75 | 0.132353 | 0 | 0 | 1 | 0 | 0.187713 | 0.081911 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d252f7a220679f8642989c387a00db59609427b | 3,194 | py | Python | core/formulas.py | mike006322/PolynomialCalculator | bf56b0e773a3461ab2aa958d0d90e08f80a4d201 | [
"MIT"
] | null | null | null | core/formulas.py | mike006322/PolynomialCalculator | bf56b0e773a3461ab2aa958d0d90e08f80a4d201 | [
"MIT"
] | null | null | null | core/formulas.py | mike006322/PolynomialCalculator | bf56b0e773a3461ab2aa958d0d90e08f80a4d201 | [
"MIT"
] | null | null | null | def solve(polynomial):
"""
input is polynomial
if more than one variable, returns 'too many variables'
looks for formula to apply to coefficients
returns solution or 'I cannot solve yet...'
"""
if len(polynomial.term_matrix[0]) > 2:
return 'too many variables'
elif len(polynomial.term_matrix[0]) == 1:
return polynomial.term_matrix[1][0]
elif len(polynomial.term_matrix[0]) == 2:
degree = polynomial.term_matrix[1][1]
if degree == 1:
if len(polynomial.term_matrix) == 2:
return 0
else:
return -polynomial.term_matrix[2][0]/polynomial.term_matrix[1][0]
if degree == 2:
ans = quadratic_formula(polynomial)
return ans
if degree > 2:
return Durand_Kerner(polynomial)
def quadratic_formula(polynomial):
"""
input is single-variable polynomial of degree 2
returns zeros
"""
if len(polynomial.term_matrix) == 3:
if polynomial.term_matrix[2][1] == 1:
a, b = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return 0, -b/a
a, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return (-c/a)**.5, -(-c/a)**.5
if len(polynomial.term_matrix) == 2:
a, b, c, = polynomial.term_matrix[1][0], 0, 0
elif len(polynomial.term_matrix) == 3:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], 0
else:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], polynomial.term_matrix[3][0]
ans1 = (-b + (b**2 - 4*a*c)**.5)/2*a
ans2 = (-b - (b**2 - 4*a*c)**.5)/2*a
if ans1 == ans2:
return ans1
return ans1, ans2
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0001):
"""
returns boolean whether abs(a-b) is less than abs_total or rel_total*max(a, b)
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def Durand_Kerner(f):
"""
input polynomial
returns numerical approximation of all complex roots
"""
roots = []
for i in range(f.degree()):
roots.append((0.4 + 0.9j)**i)
diff = 1
diff_temp = 0
def iterate():
nonlocal roots
new_roots = roots[:]
for i in range(len(roots)):
q = 1
for j, root in enumerate(roots):
if j != i:
q *= roots[i] - root
new_roots[i] = roots[i] - f(roots[i])/q
nonlocal diff
nonlocal diff_temp
diff_temp = diff
diff = 0
for i in range(len(roots)):
diff += abs(roots[i] - new_roots[i])
roots = new_roots
while diff > .00000001 and not isclose(diff_temp, diff):
iterate()
for i in range(len(roots)):
if isclose(roots[i].real, round(roots[i].real)):
temp = round(roots[i].real)
roots[i] -= roots[i].real
roots[i] += temp
if isclose(roots[i].imag, round(roots[i].imag)):
temp = round(roots[i].imag)
roots[i] -= roots[i].imag*1j
roots[i] += temp*1j
return roots
if __name__ == '__main__':
pass
| 31.623762 | 106 | 0.556669 | 456 | 3,194 | 3.79386 | 0.201754 | 0.178035 | 0.254335 | 0.09711 | 0.367052 | 0.318497 | 0.17341 | 0.17341 | 0.123121 | 0.112717 | 0 | 0.041723 | 0.302129 | 3,194 | 100 | 107 | 31.94 | 0.73441 | 0.116781 | 0 | 0.09589 | 0 | 0 | 0.009489 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068493 | false | 0.013699 | 0 | 0 | 0.232877 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d29262ef748030566b97eaf9b5c7c914c6c44fd | 1,811 | py | Python | test/testers/winforms/scrollbar/__init__.py | ABEMBARKA/monoUI | 5fda266ad2db8f89580a40b525973d86cd8de939 | [
"MIT"
] | 1 | 2019-08-13T15:22:12.000Z | 2019-08-13T15:22:12.000Z | test/testers/winforms/scrollbar/__init__.py | ABEMBARKA/monoUI | 5fda266ad2db8f89580a40b525973d86cd8de939 | [
"MIT"
] | null | null | null | test/testers/winforms/scrollbar/__init__.py | ABEMBARKA/monoUI | 5fda266ad2db8f89580a40b525973d86cd8de939 | [
"MIT"
] | 1 | 2019-08-13T15:22:17.000Z | 2019-08-13T15:22:17.000Z |
##############################################################################
# Written by: Cachen Chen <cachen@novell.com>
# Date: 08/06/2008
# Description: Application wrapper for scrollbar.py
# Used by the scrollbar-*.py tests
##############################################################################$
'Application wrapper for scrollbar'
from strongwind import *
from os.path import exists
from sys import path
def launchScrollBar(exe=None):
'Launch ScrollBar with accessibility enabled and return a scrollbar object. Log an error and return None if something goes wrong'
if exe is None:
# make sure we can find the sample application
harness_dir = path[0]
i = harness_dir.rfind("/")
j = harness_dir[:i].rfind("/")
uiaqa_path = harness_dir[:j]
if uiaqa_path is None:
raise IOError, "When launching an application you must provide the "\
"full path or set the\nUIAQA_HOME environment "\
"variable."
exe = '%s/samples/winforms/scrollbar.py' % uiaqa_path
if not os.path.exists(exe):
raise IOError, "%s does not exist" % exe
args = [exe]
(app, subproc) = cache.launchApplication(args=args, name='ipy', wait=config.LONG_DELAY)
scrollbar = ScrollBar(app, subproc)
cache.addApplication(scrollbar)
scrollbar.scrollBarFrame.app = scrollbar
return scrollbar
# class to represent the application
class ScrollBar(accessibles.Application):
#checkShowing=False
def __init__(self, accessible, subproc=None):
'Get a reference to the scrollBar window'
super(ScrollBar, self).__init__(accessible, subproc)
self.findFrame(re.compile('^ScrollBar control'), logName='Scroll Bar')
| 32.339286 | 134 | 0.602982 | 204 | 1,811 | 5.269608 | 0.54902 | 0.037209 | 0.03907 | 0.055814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006442 | 0.228603 | 1,811 | 55 | 135 | 32.927273 | 0.763064 | 0.145224 | 0 | 0 | 0 | 0.034483 | 0.280029 | 0.023155 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.103448 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d3efa01c738d69c4c33cd7d548df41216a056d7 | 3,738 | py | Python | iba_scrape.py | wmwilcox/mix-mind | 02da016f314bb5f30f267f1f46c67c6d4a4c370c | [
"Apache-2.0"
] | 1 | 2021-05-02T19:50:44.000Z | 2021-05-02T19:50:44.000Z | iba_scrape.py | wmwilcox/mix-mind | 02da016f314bb5f30f267f1f46c67c6d4a4c370c | [
"Apache-2.0"
] | 34 | 2018-08-07T13:09:29.000Z | 2021-05-13T17:25:18.000Z | iba_scrape.py | wmwilcox/mix-mind | 02da016f314bb5f30f267f1f46c67c6d4a4c370c | [
"Apache-2.0"
] | 4 | 2019-02-14T04:17:24.000Z | 2021-05-14T15:33:39.000Z | #! /usr/bin/env python
# scrape the IBA pages for cocktail lists
import sys
import xml.etree.ElementTree as ET
from lxml import html
import requests
from pprint import pprint
from collections import OrderedDict
import json
url = 'http://iba-world.com/new-era-drinks/'
jsonfile = 'IBA_new_era_drinks.json'
url = 'http://iba-world.com/iba-cocktails/'
jsonfile = 'IBA_unforgettables.json'
url = 'http://iba-world.com/contemporary-classics/'
jsonfile = 'IBA_contemporary_classics.json'
jsonfile = 'IBA_.json'
recipes = OrderedDict()
page = requests.get(url)
tree = html.fromstring(page.content)
items = tree.findall(".//div[@class='blog_list_item_lists']")
for item in items:
name = item.find(".//h3").text
name = ' '.join([word.capitalize() for word in name.split()])
body = item.find(".//div[@class='blog_text']")
recipes[name] = {'unit': 'cL'}
print name
children = [c for c in body.iterchildren()]
n = 0
if children[1].tag == 'ul':
n = -1
style = children[n+1].text
if style is None:
try:
style = children[n+1].find('span').text
except:
pass
recipes[name]['style'] = style
recipes[name]['ingredients'] = OrderedDict()
if not children[n+2].tag == 'ul':
print "adapting <p> ingredients:", children[n+2].text
ing_list = ET.tostring(children[n+2]).lstrip('<p>').rstrip('</p>\n').split('<br />\n')
else:
ing_list = [i.text for i in children[n+2].iterchildren()]
for ingredient in ing_list:
if len(ingredient.split()) == 1:
recipes[name]['ingredients'][ingredient.lower()] = ''
continue
unit = ingredient.split()[1].lower()
if unit == 'cl':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = float(ingredient.split()[0])
elif unit == 'bar' or unit == 'to': # bar spoon
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[3:]])] = ' '.join(ingredient.split()[:3])
elif unit == 'dashes' or unit == 'drops' or unit == 'with':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = ' '.join(ingredient.split()[:2])
elif unit == 'dash':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = 'dash'
else:
print "using literal: ", ingredient
literal = {'1': 'one', '2': 'two', 'A': 'one'}
try:
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[1:]])] = literal[ingredient.split()[0]]
except:
recipes[name]['ingredients'][ingredient.lower()] = ''
# Get full description from the link
ref_url = item.find(".//a[@class='top_hover_image']").attrib.get('href')
detail_page = requests.get(ref_url)
detail_tree = html.fromstring(detail_page.content)
use_next = False
for child in detail_tree.find(".//div[@class='col-sm-9']").iterchildren():
if use_next and child.tag == 'p':
recipes[name]['IBA_description'] = child.text
break
if child.tag =='ul':
use_next = True
with open(jsonfile, 'w') as fp:
json.dump(recipes, fp, indent=4, separators=(',', ': '))
print "Wrote out as {}".format(jsonfile)
sys.exit(0)
raw = sys.argv[1]
with open(raw) as fp:
for line in fp.readlines():
if line.lstrip().startswith(r'<h3>'):
print line.lstrip()
# super hax
if line.startswith(r'<p>'):
print line
if line.startswith(r'<li>'):
print line
if not line.lstrip().startswith('<'):
print line
| 33.981818 | 132 | 0.584805 | 486 | 3,738 | 4.444444 | 0.312757 | 0.056019 | 0.081481 | 0.060185 | 0.188889 | 0.15463 | 0.124074 | 0.124074 | 0.124074 | 0.124074 | 0 | 0.009787 | 0.234617 | 3,738 | 109 | 133 | 34.293578 | 0.745194 | 0.031033 | 0 | 0.127907 | 0 | 0 | 0.162887 | 0.05365 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.011628 | 0.081395 | null | null | 0.104651 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d3f1eebd4bcf21a7d4d5c5ef291d2d1f120515e | 1,101 | py | Python | Data Structures/Tree.py | Royals-Aeo-Gamer/MyPyMods | be3a521e9f823ce0b704f925b19f6f34dcb5405d | [
"MIT"
] | null | null | null | Data Structures/Tree.py | Royals-Aeo-Gamer/MyPyMods | be3a521e9f823ce0b704f925b19f6f34dcb5405d | [
"MIT"
] | null | null | null | Data Structures/Tree.py | Royals-Aeo-Gamer/MyPyMods | be3a521e9f823ce0b704f925b19f6f34dcb5405d | [
"MIT"
] | null | null | null | class TreeNode:
def __init__(self, name, data, parent=None):
self.name = name
self.parent = parent
self.data = data
self.childs = {}
def add_child(self, name, data):
self.childs.update({name:(type(self))(name, data, self)})
def rm_branch(self, name, ansistors_n: list = None,):
focus = self.childs
while True:
if ansistors_n == None or ansistors_n == self.name:
del focus[name]
break
elif ansistors_n[0] in focus:
focus = (focus[ansistors_n[0]]).childs
del ansistors_n[0]
elif name in focus and ansistors_n is None:
del focus[name]
break
else:
print(focus)
raise NameError(f"couldn't find branch {ansistors_n[0]}")
def __getitem__(self, item):
return self.childs[item]
def __setitem__(self, key, value):
self.childs[key] = value
def __delitem__(self, key, ansistors_n: list = None):
self.rm_branch(key, ansistors_n)
| 31.457143 | 73 | 0.551317 | 135 | 1,101 | 4.281481 | 0.340741 | 0.17301 | 0.076125 | 0.055363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005563 | 0.346957 | 1,101 | 34 | 74 | 32.382353 | 0.798331 | 0 | 0 | 0.137931 | 0 | 0 | 0.033636 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.206897 | false | 0 | 0 | 0.034483 | 0.275862 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d41c3b8ea2fc0ea3e45c5b6768c95bfbb166b0c | 1,965 | py | Python | wiki/tests.py | Prones94/Make_Wiki | f8816eb31bb370f48affff8568a6b0d0ffaf7cd4 | [
"MIT"
] | null | null | null | wiki/tests.py | Prones94/Make_Wiki | f8816eb31bb370f48affff8568a6b0d0ffaf7cd4 | [
"MIT"
] | 5 | 2020-06-06T01:41:16.000Z | 2021-06-10T20:09:01.000Z | wiki/tests.py | Prones94/Make_Wiki | f8816eb31bb370f48affff8568a6b0d0ffaf7cd4 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
from django.utils.text import slugify
# Create your tests here.
class WikiPageTest(TestCase):
def test_edit(self):
user = User.objects.create_user(username='admin', password='djangopony')
self.client.login(username='admin', password='djangopony')
page = Page.objects.create(title="My Test Page", content="test", author=user)
page.save()
edit = {
'title': 'testing title',
'content': 'testing content'
}
response = self.client.post('/%s/' %slugify(page.title), edit)
updated = Page.objects.get(title = edit['title'])
self.assertEqual(response.status_code, 302)
self.assertEqual(updated.title, edit['title'])
def test_page(self):
user = User.objects.create_user(username='admin', password='djangopony')
self.client.login(username='admin', password='djangopony')
page = Page.objects.create(title="My Test Page", content="test", author=user)
page.save()
response = self.client.get('/%s/' %slugify(page.title))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test')
def test_create(self):
user = User.objects.create_user(username='admin', password='djangopony')
self.client.login(username='admin', password='djangopony')
new = {
'title': 'testing title',
'content': 'testing content'
}
response = self.client.post('/wiki/new/', new)
updated = Page.objects.get(title = new['title'])
self.assertEqual(response.status_code, 302)
self.assertEqual(updated.title, new['title'])
'''
Steps to writing a test
1. Set up your test data
2. Make a request (GET, POST)
3a. Check if response matches what we expect
3b. Check if database matches what we expect
''' | 33.87931 | 85 | 0.641221 | 237 | 1,965 | 5.278481 | 0.28692 | 0.06235 | 0.100719 | 0.148681 | 0.634692 | 0.593126 | 0.56275 | 0.56275 | 0.56275 | 0.56275 | 0 | 0.00853 | 0.224427 | 1,965 | 58 | 86 | 33.87931 | 0.812336 | 0.011705 | 0 | 0.432432 | 0 | 0 | 0.139508 | 0 | 0 | 0 | 0 | 0 | 0.162162 | 1 | 0.081081 | false | 0.162162 | 0.108108 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5d451217c589da4fbfb78dd271865830d16162d1 | 826 | py | Python | 34. Find First and Last Position of Element in Sorted Array/main.py | Competitive-Programmers-Community/LeetCode | 841fdee805b1a626e9f1cd0e12398d25054638af | [
"MIT"
] | 2 | 2019-10-05T09:48:20.000Z | 2019-10-05T15:40:01.000Z | 34. Find First and Last Position of Element in Sorted Array/main.py | Competitive-Programmers-Community/LeetCode | 841fdee805b1a626e9f1cd0e12398d25054638af | [
"MIT"
] | null | null | null | 34. Find First and Last Position of Element in Sorted Array/main.py | Competitive-Programmers-Community/LeetCode | 841fdee805b1a626e9f1cd0e12398d25054638af | [
"MIT"
] | null | null | null | class Solution:
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if not nums:
return [-1, -1]
low = 0
high = len(nums) - 1
f = 0
while low<=high:
mid = (low+high)//2
if nums[mid] == target:
f = 1
break
elif nums[mid] < target:
low = mid + 1
elif nums[mid] > target:
high = mid - 1
i, j = mid, mid
while i>=1 and nums[i-1] == target:
i = i-1
while j<len(nums)-1 and nums[j+1] == target:
j = j+1
if f == 1:
return [i, j]
else:
return [-1, -1]
| 24.294118 | 52 | 0.361985 | 97 | 826 | 3.082474 | 0.298969 | 0.070234 | 0.130435 | 0.113712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04534 | 0.51937 | 826 | 33 | 53 | 25.030303 | 0.707809 | 0.069007 | 0 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d4867285dd6af6ea7e2fbae337fc27c75376241 | 1,893 | py | Python | c/create.py | LMS57/domato | 005739f55b49ead0ac47ea14b324decee05a7625 | [
"Apache-2.0"
] | null | null | null | c/create.py | LMS57/domato | 005739f55b49ead0ac47ea14b324decee05a7625 | [
"Apache-2.0"
] | null | null | null | c/create.py | LMS57/domato | 005739f55b49ead0ac47ea14b324decee05a7625 | [
"Apache-2.0"
] | null | null | null | data = open('./original').readlines()
alphabet = {
"<":"lt",
">":"gt",
"=":"=",
"-":'-',
"+":"+",
"-":"-",
"~":"~",
"!":"ex",
"%":"%",
"^":"^",
"&":"&",
"*":"*",
"(":"(",
")":"right_paran",
"[":"[",
"]":"]",
"{":"{",
"}":"}",
"[":"[",
"]":"]",
"|":"|",
";":";",
":":":",
",":",",
".":".",
"?":"?",
"/":"/",
}
def item(y):
if "'" in y:
tmp = y.split("'")[1]
test = 0
for x in alphabet:
if x in tmp:
test = 1
if test:
final = ''
for x in tmp:
final += item(alphabet[x])
return final
else:
return item(tmp)
else:
return "<"+y+">"
start = 0
current = ""
space = "<space>"
declared = []
referenced = []
for x in data:
x = x.strip()
if x == "":
continue
if '%%' == x:
start = 1
continue
elif start != 1:
continue
if x == "test":
break;
x = x.split(' ')
if len(x) == 1:#item declaration or end
if x[0] == ';':
current = ""
else:
current = x[0]
declared.append(item(x[0]))
print ""
else:
x = x[1:]
tmp = item(current)+'\t=\t'
for y in range(len(x)):
referenced.append(item(x[y]))
tmp += item(x[y])
if y != len(x)-1 and "'" not in x[y+1] and "'" not in x[y]:
tmp+=space
print tmp
referenced = set(referenced)
final = []
for x in referenced:
if x not in declared:
final.append(x)
print ""
for x in final:
tmp = x+'\t=\t'
x = x[1:-1]
print tmp + x.lower()
| 18.742574 | 71 | 0.320655 | 184 | 1,893 | 3.293478 | 0.25 | 0.029703 | 0.049505 | 0.036304 | 0.036304 | 0.036304 | 0 | 0 | 0 | 0 | 0 | 0.014507 | 0.453777 | 1,893 | 100 | 72 | 18.93 | 0.571567 | 0.01215 | 0 | 0.168539 | 0 | 0 | 0.057785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.044944 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d48a2b09ec3e91f3ac7c94a610ddffec5774abc | 10,500 | py | Python | AppServer/google/appengine/api/memcache/memcache_distributed.py | isabella232/scale-safe | 8b887726768106b6b67d7be6ea257bee5cd83f9a | [
"Apache-2.0"
] | 3 | 2016-06-12T01:18:49.000Z | 2018-07-16T18:20:23.000Z | AppServer/google/appengine/api/memcache/memcache_distributed.py | davgit/appscale | 17d35a14fa5a56975de1e3517bec9e7f9047d82a | [
"Apache-2.0"
] | 1 | 2021-06-08T10:04:35.000Z | 2021-06-08T10:04:35.000Z | AppServer/google/appengine/api/memcache/memcache_distributed.py | davgit/appscale | 17d35a14fa5a56975de1e3517bec9e7f9047d82a | [
"Apache-2.0"
] | 1 | 2020-05-25T02:59:15.000Z | 2020-05-25T02:59:15.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Non-stub version of the memcache API, keeping all data in memcached.
Uses the python-memcached library to interface with memcached.
"""
import base64
import cPickle
import logging
import memcache
import os
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api.memcache import memcache_service_pb
from google.appengine.runtime import apiproxy_errors
MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse
MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest
MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest
MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse
MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse
from google.appengine.api.memcache import TYPE_INT
from google.appengine.api.memcache import TYPE_LONG
class MemcacheService(apiproxy_stub.APIProxyStub):
"""Python only memcache service.
This service keeps all data in any external servers running memcached.
"""
# The memcached default port.
MEMCACHE_PORT = "11211"
# An AppScale file which has a list of IPs running memcached.
APPSCALE_MEMCACHE_FILE = "/etc/appscale/memcache_ips"
# The minimum frequency by which memcache clients will update their list of
# clients that they connect to (which can change if AppScale scales up or
# down).
UPDATE_WINDOW = 60 # seconds
def __init__(self, gettime=time.time, service_name='memcache'):
"""Initializer.
Args:
gettime: time.time()-like function used for testing.
service_name: Service name expected for all calls.
"""
super(MemcacheService, self).__init__(service_name)
self._gettime = gettime
self._memcache = None
self.setupMemcacheClient()
def setupMemcacheClient(self):
""" Sets up the memcache client. """
if os.path.exists(self.APPSCALE_MEMCACHE_FILE):
memcache_file = open(self.APPSCALE_MEMCACHE_FILE, "r")
all_ips = memcache_file.read().split("\n")
memcache_file.close()
else:
all_ips = ['localhost']
memcaches = [ip + ":" + self.MEMCACHE_PORT for ip in all_ips if ip != '']
memcaches.sort()
self._memcache = memcache.Client(memcaches, debug=0)
def _Dynamic_Get(self, request, response):
"""Implementation of gets for memcache.
Args:
request: A MemcacheGetRequest protocol buffer.
response: A MemcacheGetResponse protocol buffer.
"""
for key in set(request.key_list()):
internal_key = self._GetKey(request.name_space(), key)
value = self._memcache.get(internal_key)
if value is None:
continue
flags = 0
stored_flags, cas_id, stored_value = cPickle.loads(value)
flags |= stored_flags
item = response.add_item()
item.set_key(key)
item.set_value(stored_value)
item.set_flags(flags)
if request.for_cas():
item.set_cas_id(cas_id)
def _Dynamic_Set(self, request, response):
"""Implementation of sets for memcache.
Args:
request: A MemcacheSetRequest.
response: A MemcacheSetResponse.
"""
for item in request.item_list():
key = self._GetKey(request.name_space(), item.key())
set_policy = item.set_policy()
old_entry = self._memcache.get(key)
cas_id = 0
if old_entry:
_, cas_id, _ = cPickle.loads(old_entry)
set_status = MemcacheSetResponse.NOT_STORED
if ((set_policy == MemcacheSetRequest.SET) or
(set_policy == MemcacheSetRequest.ADD and old_entry is None) or
(set_policy == MemcacheSetRequest.REPLACE and
old_entry is not None)):
if (old_entry is None or set_policy == MemcacheSetRequest.SET):
set_status = MemcacheSetResponse.STORED
elif (set_policy == MemcacheSetRequest.CAS and item.for_cas() and
item.has_cas_id()):
if old_entry is None:
set_status = MemcacheSetResponse.NOT_STORED
elif cas_id != item.cas_id():
set_status = MemcacheSetResponse.EXISTS
else:
set_status = MemcacheSetResponse.STORED
if (set_status == MemcacheSetResponse.STORED
or set_policy == MemcacheSetRequest.REPLACE):
set_value = cPickle.dumps(
[item.flags(), cas_id + 1, item.value()])
if set_policy == MemcacheSetRequest.REPLACE:
self._memcache.replace(key, set_value)
else:
self._memcache.set(key, set_value, item.expiration_time())
response.add_set_status(set_status)
def _Dynamic_Delete(self, request, response):
"""Implementation of delete in memcache.
Args:
request: A MemcacheDeleteRequest protocol buffer.
response: A MemcacheDeleteResponse protocol buffer.
"""
for item in request.item_list():
key = self._GetKey(request.name_space(), item.key())
entry = self._memcache.get(key)
delete_status = MemcacheDeleteResponse.DELETED
if entry is None:
delete_status = MemcacheDeleteResponse.NOT_FOUND
else:
self._memcache.delete(key)
response.add_delete_status(delete_status)
def _Increment(self, namespace, request):
"""Internal function for incrementing from a MemcacheIncrementRequest.
Args:
namespace: A string containing the namespace for the request,
if any. Pass an empty string if there is no namespace.
request: A MemcacheIncrementRequest instance.
Returns:
An integer or long if the offset was successful, None on error.
"""
if not request.delta():
return None
cas_id = 0
key = self._GetKey(namespace, request.key())
value = self._memcache.get(key)
if value is None:
if not request.has_initial_value():
return None
flags, cas_id, stored_value = (
TYPE_INT, cas_id, str(request.initial_value()))
else:
flags, cas_id, stored_value = cPickle.loads(value)
if flags == TYPE_INT:
new_value = int(stored_value)
elif flags == TYPE_LONG:
new_value = long(stored_value)
if request.direction() == MemcacheIncrementRequest.INCREMENT:
new_value += request.delta()
elif request.direction() == MemcacheIncrementRequest.DECREMENT:
new_value -= request.delta()
new_stored_value = cPickle.dumps([flags, cas_id + 1, str(new_value)])
try:
self._memcache.cas(key, new_stored_value)
except Exception, e:
logging.error(str(e))
return None
return new_value
def _Dynamic_Increment(self, request, response):
"""Implementation of increment for memcache.
Args:
request: A MemcacheIncrementRequest protocol buffer.
response: A MemcacheIncrementResponse protocol buffer.
"""
new_value = self._Increment(request.name_space(), request)
if new_value is None:
raise apiproxy_errors.ApplicationError(
memcache_service_pb.MemcacheServiceError.UNSPECIFIED_ERROR)
response.set_new_value(new_value)
def _Dynamic_BatchIncrement(self, request, response):
"""Implementation of batch increment for memcache.
Args:
request: A MemcacheBatchIncrementRequest protocol buffer.
response: A MemcacheBatchIncrementResponse protocol buffer.
"""
namespace = request.name_space()
for request_item in request.item_list():
new_value = self._Increment(namespace, request_item)
item = response.add_item()
if new_value is None:
item.set_increment_status(MemcacheIncrementResponse.NOT_CHANGED)
else:
item.set_increment_status(MemcacheIncrementResponse.OK)
item.set_new_value(new_value)
def _Dynamic_FlushAll(self, request, response):
"""Implementation of MemcacheService::FlushAll().
Args:
request: A MemcacheFlushRequest.
response: A MemcacheFlushResponse.
"""
self._memcache.flush_all()
def _Dynamic_Stats(self, request, response):
"""Implementation of MemcacheService::Stats().
Args:
request: A MemcacheStatsRequest.
response: A MemcacheStatsResponse.
"""
stats = response.mutable_stats()
num_servers = 0
hits_total = 0
misses_total = 0
byte_hits_total = 0
items_total = 0
bytes_total = 0
time_total = 0
def get_stats_value(stats_dict, key, _type=int):
""" Gets statisical values and makes sure the key is in the dict. """
if key not in stats_dict:
logging.warn("No stats for key '%s'." % key)
return _type(stats_dict.get(key, '0'))
for server, server_stats in self._memcache.get_stats():
num_servers += 1
hits_total += get_stats_value(server_stats, 'get_hits')
misses_total += get_stats_value(server_stats, 'get_misses')
byte_hits_total += get_stats_value(server_stats, 'bytes_read')
items_total += get_stats_value(server_stats, 'curr_items')
bytes_total += get_stats_value(server_stats, 'bytes')
time_total += get_stats_value(server_stats, 'time', float)
stats.set_hits(hits_total)
stats.set_misses(misses_total)
stats.set_byte_hits(byte_hits_total)
stats.set_items(items_total)
stats.set_bytes(bytes_total)
# With the Python 2.7 GAE runtime, it expects all fields here to be ints.
# Python 2.5 was fine with this being a float, so callers in that runtime
# may not be expecting an int.
stats.set_oldest_item_age(int(time.time() - time_total / num_servers))
def _GetKey(self, namespace, key):
"""Used to get the Memcache key. It is encoded because the sdk
allows special characters but the Memcache client does not.
Args:
namespace: The namespace as provided by the application.
key: The key as provided by the application.
Returns:
A base64 string __{appname}__{namespace}__{key}
"""
appname = os.environ['APPNAME']
internal_key = appname + "__" + namespace + "__" + key
return base64.b64encode(internal_key)
| 33.980583 | 77 | 0.701619 | 1,311 | 10,500 | 5.409611 | 0.223494 | 0.00987 | 0.016779 | 0.032572 | 0.218133 | 0.11943 | 0.078962 | 0.039763 | 0.01692 | 0.01692 | 0 | 0.005079 | 0.212381 | 10,500 | 308 | 78 | 34.090909 | 0.852479 | 0.094381 | 0 | 0.147929 | 0 | 0 | 0.018651 | 0.003646 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.065089 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d4ce281f4ac42992169e4a43a604e5e249ccc55 | 592 | py | Python | Python/usec_mode.py | hanayik/StimSync | f08ec01a36c47b00bfe4937b5a6eb2a60af0713d | [
"BSD-2-Clause"
] | 6 | 2017-12-04T18:33:45.000Z | 2021-08-04T02:07:21.000Z | source/Python/usec_mode.py | neurolabusc/StimSync | 749908572bda3073b0911566d50fe92d74d3cdb7 | [
"BSD-2-Clause"
] | null | null | null | source/Python/usec_mode.py | neurolabusc/StimSync | 749908572bda3073b0911566d50fe92d74d3cdb7 | [
"BSD-2-Clause"
] | 3 | 2018-01-13T12:17:18.000Z | 2021-08-01T06:43:10.000Z | import serial
ser = serial.Serial('/dev/tty.usbmodem7071', 115200, timeout=10)
ser.write("\xb1\xa3\xb5\xb5") #set usec mode 177,163,181,181
ser.flush()
ser.flushInput()
obs = ser.read(8)
if len(obs) != 8:
print('Error: no buttons presses detected')
print 'Observed data (as hex): '+ obs.encode('hex')
obsBin = [ord(c) for c in obs]
usec = (obsBin[3] << 24)+ (obsBin[4] << 16)+ (obsBin[5] << 8)+obsBin[6]
keys = (obsBin[1] << 8)+obsBin[2]
print 'keys pressed %d at %d usec' % (keys, usec)
ser.write("\xb1\xa3\xa9\xa9") #turn off oscilloscope: set keyboard mode 177,163,169,169
ser.close() | 37 | 87 | 0.663851 | 102 | 592 | 3.852941 | 0.598039 | 0.040712 | 0.05598 | 0.071247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11306 | 0.133446 | 592 | 16 | 88 | 37 | 0.653021 | 0.143581 | 0 | 0 | 0 | 0 | 0.27668 | 0.041502 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.066667 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d527097e73751e96803cabcd187b0fd2d52470c | 1,737 | py | Python | build/common/hex2carray.py | isabella232/nanos-nonsecure-firmware | d1ce2e0e01a8ed6d8840a24308e16f6560a626aa | [
"Apache-2.0"
] | 16 | 2018-03-20T11:52:29.000Z | 2021-02-12T07:39:54.000Z | build/common/hex2carray.py | LedgerHQ/nanos-nonsecure-firmware | d1ce2e0e01a8ed6d8840a24308e16f6560a626aa | [
"Apache-2.0"
] | 1 | 2022-03-06T09:56:16.000Z | 2022-03-06T09:56:16.000Z | build/common/hex2carray.py | isabella232/nanos-nonsecure-firmware | d1ce2e0e01a8ed6d8840a24308e16f6560a626aa | [
"Apache-2.0"
] | 7 | 2017-08-24T00:42:09.000Z | 2022-03-06T09:51:51.000Z | """
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
from ledgerblue.hexParser import IntelHexParser
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--hex", help="Hex file to be converted as a C array")
args = parser.parse_args()
if args.hex == None:
raise Exception("Missing hex filename to sign")
parser = IntelHexParser(args.hex)
def hexU8(value):
return hex(0x100|(value & 0xFF))[3:]
for a in parser.getAreas():
if (len(a.data) > 0x10000):
raise BaseException("data must be splitted in chunks of 64k")
print "0x" + hexU8(a.start >> 24) + ", 0x" + hexU8(a.start >> 16) + ", 0x" + hexU8(a.start >> 8) + ", 0x" + hexU8(a.start) + ", "
print "0x" + hexU8(len(a.data) >> 24) + ", 0x" + hexU8(len(a.data) >> 16) + ", 0x" + hexU8(len(a.data) >> 8) + ", 0x" + hexU8(len(a.data)) + ", "
# low @ to high @
offset = 0
while offset < len(a.data):
string = ""
for i in range(8):
if offset+i < len(a.data):
string += " 0x" + hexU8(a.data[offset+i]) + ","
print string
offset+=8
| 31.581818 | 146 | 0.599885 | 240 | 1,737 | 4.333333 | 0.483333 | 0.060577 | 0.053846 | 0.05 | 0.057692 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03771 | 0.17559 | 1,737 | 54 | 147 | 32.166667 | 0.688547 | 0.008636 | 0 | 0 | 0 | 0 | 0.15 | 0 | 0 | 0 | 0.016667 | 0 | 0 | 0 | null | null | 0 | 0.086957 | null | null | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d52a5f4ab272695a4c951a2d0a2e0909bf0ed0b | 1,413 | py | Python | application/modules/login.py | BaggerFast/Simple_votings | 843769fa6fd2c04feb542e6b301b7b4810260d4e | [
"MIT"
] | null | null | null | application/modules/login.py | BaggerFast/Simple_votings | 843769fa6fd2c04feb542e6b301b7b4810260d4e | [
"MIT"
] | null | null | null | application/modules/login.py | BaggerFast/Simple_votings | 843769fa6fd2c04feb542e6b301b7b4810260d4e | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views import View
from application.forms import AuthenticateForm
from application.views import get_navbar, Page
class LoginView(View):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.context = {}
def get(self, request):
self.context['navbar'] = get_navbar(request)
self.context['form'] = AuthenticateForm()
return render(request, Page.login, self.context)
def post(self, request):
self.context['navbar'] = get_navbar(request)
data = request.POST
form = AuthenticateForm(data)
if form.is_valid():
user = authenticate(
username=data['username'],
password=data['password'],
)
if user:
login(request, user)
messages.success(request, 'You have successfully logged in!')
return redirect(reverse('main'))
messages.error(request, 'Invalid username and password pair.', extra_tags='danger')
else:
messages.error(request, 'Invalid username and password pair.', extra_tags='danger')
self.context['form'] = AuthenticateForm(data)
return render(request, Page.login, self.context)
| 36.230769 | 95 | 0.640481 | 153 | 1,413 | 5.823529 | 0.359477 | 0.08642 | 0.060606 | 0.049383 | 0.332211 | 0.332211 | 0.332211 | 0.244669 | 0.145903 | 0.145903 | 0 | 0 | 0.255485 | 1,413 | 38 | 96 | 37.184211 | 0.846958 | 0 | 0 | 0.181818 | 0 | 0 | 0.108988 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.090909 | 0.212121 | 0 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5d53556c82d1a27255c1497656b5efc347cde76d | 1,035 | py | Python | alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniVersionAuditApplyResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniVersionAuditApplyResponse, self).__init__()
self._speed_up = None
self._speed_up_memo = None
@property
def speed_up(self):
return self._speed_up
@speed_up.setter
def speed_up(self, value):
self._speed_up = value
@property
def speed_up_memo(self):
return self._speed_up_memo
@speed_up_memo.setter
def speed_up_memo(self, value):
self._speed_up_memo = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniVersionAuditApplyResponse, self).parse_response_content(response_content)
if 'speed_up' in response:
self.speed_up = response['speed_up']
if 'speed_up_memo' in response:
self.speed_up_memo = response['speed_up_memo']
| 28.75 | 112 | 0.699517 | 124 | 1,035 | 5.459677 | 0.266129 | 0.186115 | 0.146233 | 0.088626 | 0.22452 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00123 | 0.214493 | 1,035 | 35 | 113 | 29.571429 | 0.831488 | 0.04058 | 0 | 0.08 | 0 | 0 | 0.042381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.24 | false | 0 | 0.08 | 0.08 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d546fd247cbdfbb018dec6e3f4e3273ffdefdb8 | 3,115 | py | Python | pysnmp-with-texts/MWORKS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/MWORKS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/MWORKS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module MWORKS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MWORKS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:16:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, Unsigned32, ObjectIdentity, IpAddress, Bits, MibIdentifier, Integer32, enterprises, ModuleIdentity, TimeTicks, Counter32, NotificationType, iso, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Unsigned32", "ObjectIdentity", "IpAddress", "Bits", "MibIdentifier", "Integer32", "enterprises", "ModuleIdentity", "TimeTicks", "Counter32", "NotificationType", "iso", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tecElite = MibIdentifier((1, 3, 6, 1, 4, 1, 217))
meterWorks = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16))
mw501 = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1))
mwMem = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 1))
mwHeap = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 2))
mwMemCeiling = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwMemCeiling.setStatus('mandatory')
if mibBuilder.loadTexts: mwMemCeiling.setDescription('bytes of memory the agent memory manager will allow the agent to use.')
mwMemUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwMemUsed.setStatus('mandatory')
if mibBuilder.loadTexts: mwMemUsed.setDescription("bytes of memory that meterworks has malloc'ed. some of this may be in free pools.")
mwHeapTotal = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwHeapTotal.setStatus('mandatory')
if mibBuilder.loadTexts: mwHeapTotal.setDescription('bytes of memory given to the heap manager.')
mwHeapUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwHeapUsed.setStatus('mandatory')
if mibBuilder.loadTexts: mwHeapUsed.setDescription('bytes of available memory in the heap.')
mibBuilder.exportSymbols("MWORKS-MIB", mwHeap=mwHeap, mwHeapUsed=mwHeapUsed, mwMemCeiling=mwMemCeiling, meterWorks=meterWorks, tecElite=tecElite, mwMem=mwMem, mw501=mw501, mwHeapTotal=mwHeapTotal, mwMemUsed=mwMemUsed)
| 97.34375 | 505 | 0.759551 | 376 | 3,115 | 6.292553 | 0.345745 | 0.007608 | 0.011412 | 0.015216 | 0.476331 | 0.326712 | 0.326712 | 0.24049 | 0.231192 | 0.221048 | 0 | 0.070743 | 0.09695 | 3,115 | 31 | 506 | 100.483871 | 0.770352 | 0.100803 | 0 | 0 | 0 | 0 | 0.271848 | 0.015759 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d553e6733970b4280761ad4ec3ddb284ae1146d | 1,382 | py | Python | vars_in_python.py | klyusba/python-quiz | 9f469417458f8ba6b21f9507cc860ca4547ea67b | [
"MIT"
] | null | null | null | vars_in_python.py | klyusba/python-quiz | 9f469417458f8ba6b21f9507cc860ca4547ea67b | [
"MIT"
] | null | null | null | vars_in_python.py | klyusba/python-quiz | 9f469417458f8ba6b21f9507cc860ca4547ea67b | [
"MIT"
] | null | null | null | # == 1 ==
bar = [1, 2]
def foo(bar):
bar = sum(bar)
return bar
print(foo(bar))
# == 2 ==
bar = [1, 2]
def foo(bar):
bar[0] = 1
return sum(bar)
print(foo(bar))
# == 3 ==
bar = [1, 2]
def foo():
bar = sum(bar)
return bar
print(foo())
# == 4 ==
bar = [1, 2]
def foo(bar):
bar = [1, 2, 3, ]
return sum(bar)
print(foo(bar), bar)
# == 5 ==
bar = [1, 2]
def foo(bar):
bar[:] = [1, 2, 3, ]
return sum(bar)
print(foo(bar), bar)
# == 6 ==
try:
bar = 1 / 0
print(bar)
except ZeroDivisionError as bar:
print(bar)
print(bar)
# == 7 ==
bar = [1, 2]
print(list(bar for bar in bar))
print(bar)
# == 8 ==
bar = [1, 2]
f = lambda: sum(bar)
print(f())
bar = [1, 2, 3, ]
print(f())
# == 9 ==
bar = [1, 2]
def foo(bar):
return lambda: sum(bar)
f = foo(bar)
print(f())
bar = [1, 2, 3, ]
print(f())
# == 10 ==
bar = [1, 2]
foo = []
for i in bar:
foo.append(lambda: i)
print([f() for f in foo])
# == 11 ==
bar = [1, 2]
foo = [
lambda: i
for i in bar
]
print(list(f() for f in foo))
# == 12 ==
bar = [1, 2]
foo = [
lambda: i
for i in bar
]
print(list(f() for f in foo))
bar = [1, 2, 3, ]
print(list(f() for f in foo))
bar[:] = [1, 2, 3, ]
print(list(f() for f in foo))
# == 13 ==
bar = [1, 2]
foo = [
lambda i=i: i
for i in bar
]
print(list(f() for f in foo))
| 11.145161 | 32 | 0.469609 | 243 | 1,382 | 2.670782 | 0.127572 | 0.117103 | 0.138675 | 0.07396 | 0.701079 | 0.68567 | 0.588598 | 0.460709 | 0.460709 | 0.395994 | 0 | 0.065762 | 0.306802 | 1,382 | 123 | 33 | 11.235772 | 0.611691 | 0.096237 | 0 | 0.753425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082192 | false | 0 | 0 | 0.013699 | 0.164384 | 0.273973 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d5c6de0926f1a98ed21db39c4944a17b7f61725 | 823 | py | Python | home/migrations/0002_auto_20171017_0412.py | Taywee/amberherbert.com | 6bf384d7cdf18dc613252fe4dde38545150eabbc | [
"MIT"
] | null | null | null | home/migrations/0002_auto_20171017_0412.py | Taywee/amberherbert.com | 6bf384d7cdf18dc613252fe4dde38545150eabbc | [
"MIT"
] | 2 | 2017-10-15T20:36:59.000Z | 2017-10-17T05:27:49.000Z | home/migrations/0002_auto_20171017_0412.py | Taywee/amberherbert.com | 6bf384d7cdf18dc613252fe4dde38545150eabbc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 04:12
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='navigation',
field=wagtail.core.fields.StreamField((('item', wagtail.core.blocks.StructBlock((('text', wagtail.core.blocks.CharBlock(help_text='If this is left blank, the title of the linked page will be used instead', max_length=16, required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=True))))),), blank=True, help_text='The list of navigation items', null=True),
),
]
| 35.782609 | 384 | 0.684083 | 104 | 823 | 5.317308 | 0.663462 | 0.119349 | 0.122966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034175 | 0.18226 | 823 | 22 | 385 | 37.409091 | 0.787519 | 0.082625 | 0 | 0 | 1 | 0 | 0.194149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d5d8bde571d6e8d8f2723242cd35348a71ff40f | 8,457 | py | Python | sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetKMSCryptoKeyVersionResult',
'AwaitableGetKMSCryptoKeyVersionResult',
'get_kms_crypto_key_version',
'get_kms_crypto_key_version_output',
]
@pulumi.output_type
class GetKMSCryptoKeyVersionResult:
"""
A collection of values returned by getKMSCryptoKeyVersion.
"""
def __init__(__self__, algorithm=None, crypto_key=None, id=None, name=None, protection_level=None, public_keys=None, state=None, version=None):
if algorithm and not isinstance(algorithm, str):
raise TypeError("Expected argument 'algorithm' to be a str")
pulumi.set(__self__, "algorithm", algorithm)
if crypto_key and not isinstance(crypto_key, str):
raise TypeError("Expected argument 'crypto_key' to be a str")
pulumi.set(__self__, "crypto_key", crypto_key)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protection_level and not isinstance(protection_level, str):
raise TypeError("Expected argument 'protection_level' to be a str")
pulumi.set(__self__, "protection_level", protection_level)
if public_keys and not isinstance(public_keys, list):
raise TypeError("Expected argument 'public_keys' to be a list")
pulumi.set(__self__, "public_keys", public_keys)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def algorithm(self) -> str:
"""
The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter(name="cryptoKey")
def crypto_key(self) -> str:
return pulumi.get(self, "crypto_key")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name for this CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectionLevel")
def protection_level(self) -> str:
"""
The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs.
"""
return pulumi.get(self, "protection_level")
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Sequence['outputs.GetKMSCryptoKeyVersionPublicKeyResult']:
"""
If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below.
"""
return pulumi.get(self, "public_keys")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def version(self) -> Optional[int]:
return pulumi.get(self, "version")
class AwaitableGetKMSCryptoKeyVersionResult(GetKMSCryptoKeyVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKMSCryptoKeyVersionResult(
algorithm=self.algorithm,
crypto_key=self.crypto_key,
id=self.id,
name=self.name,
protection_level=self.protection_level,
public_keys=self.public_keys,
state=self.state,
version=self.version)
def get_kms_crypto_key_version(crypto_key: Optional[str] = None,
version: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKMSCryptoKeyVersionResult:
"""
Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).
A CryptoKeyVersion represents an individual cryptographic key, and the associated key material.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="us-central1")
my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key",
key_ring=my_key_ring.id)
my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"])
```
:param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the
`kms.CryptoKey` resource/datasource.
:param int version: The version number for this CryptoKeyVersion. Defaults to `1`.
"""
__args__ = dict()
__args__['cryptoKey'] = crypto_key
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:kms/getKMSCryptoKeyVersion:getKMSCryptoKeyVersion', __args__, opts=opts, typ=GetKMSCryptoKeyVersionResult).value
return AwaitableGetKMSCryptoKeyVersionResult(
algorithm=__ret__.algorithm,
crypto_key=__ret__.crypto_key,
id=__ret__.id,
name=__ret__.name,
protection_level=__ret__.protection_level,
public_keys=__ret__.public_keys,
state=__ret__.state,
version=__ret__.version)
@_utilities.lift_output_func(get_kms_crypto_key_version)
def get_kms_crypto_key_version_output(crypto_key: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[Optional[int]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKMSCryptoKeyVersionResult]:
"""
Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).
A CryptoKeyVersion represents an individual cryptographic key, and the associated key material.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="us-central1")
my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key",
key_ring=my_key_ring.id)
my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"])
```
:param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the
`kms.CryptoKey` resource/datasource.
:param int version: The version number for this CryptoKeyVersion. Defaults to `1`.
"""
...
| 41.253659 | 247 | 0.684522 | 1,010 | 8,457 | 5.506931 | 0.192079 | 0.055016 | 0.019417 | 0.024272 | 0.436893 | 0.385293 | 0.37918 | 0.330097 | 0.312118 | 0.312118 | 0 | 0.001358 | 0.216271 | 8,457 | 204 | 248 | 41.455882 | 0.837809 | 0.386898 | 0 | 0.119266 | 1 | 0 | 0.148293 | 0.04566 | 0 | 0 | 0 | 0 | 0 | 1 | 0.110092 | false | 0 | 0.055046 | 0.018349 | 0.275229 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d66ab6b71d371d38fa50d90c8734a50bf50ee30 | 2,625 | py | Python | examples/src/Charts/MultiCategoryChart.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | examples/src/Charts/MultiCategoryChart.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | examples/src/Charts/MultiCategoryChart.py | aspose-slides/Aspose.Slides-for-Python-via-.NET | c55ad5c71f942598f1e67e22a52cbcd1cb286467 | [
"MIT"
] | null | null | null | import aspose.pydrawing as drawing
import aspose.slides as slides
def charts_multi_category_chart():
#ExStart:MultiCategoryChart
# The path to the documents directory.
outDir = "./examples/out/"
with slides.Presentation() as pres:
slide = pres.slides[0]
ch = pres.slides[0].shapes.add_chart(slides.charts.ChartType.CLUSTERED_COLUMN, 100, 100, 600, 450)
ch.chart_data.series.clear()
ch.chart_data.categories.clear()
fact = ch.chart_data.chart_data_workbook
fact.clear(0)
defaultWorksheetIndex = 0
category = ch.chart_data.categories.add(fact.get_cell(0, "c2", "A"))
category.grouping_levels.set_grouping_item(1, "Group1")
category = ch.chart_data.categories.add(fact.get_cell(0, "c3", "B"))
category = ch.chart_data.categories.add(fact.get_cell(0, "c4", "C"))
category.grouping_levels.set_grouping_item(1, "Group2")
category = ch.chart_data.categories.add(fact.get_cell(0, "c5", "D"))
category = ch.chart_data.categories.add(fact.get_cell(0, "c6", "E"))
category.grouping_levels.set_grouping_item(1, "Group3")
category = ch.chart_data.categories.add(fact.get_cell(0, "c7", "F"))
category = ch.chart_data.categories.add(fact.get_cell(0, "c8", "G"))
category.grouping_levels.set_grouping_item(1, "Group4")
category = ch.chart_data.categories.add(fact.get_cell(0, "c9", "H"))
# Adding Series
series = ch.chart_data.series.add(fact.get_cell(0, "D1", "Series 1"),
slides.charts.ChartType.CLUSTERED_COLUMN)
series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D2", 10))
series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D3", 20))
series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D4", 30))
series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D5", 40))
series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D6", 50))
series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D7", 60))
series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D8", 70))
series.data_points.add_data_point_for_bar_series(fact.get_cell(defaultWorksheetIndex, "D9", 80))
# Save presentation with chart
pres.save(outDir + "charts_multi_category_chart_out.pptx", slides.export.SaveFormat.PPTX)
#ExEnd:MultiCategoryChart | 51.470588 | 106 | 0.708952 | 361 | 2,625 | 4.878116 | 0.268698 | 0.067575 | 0.10619 | 0.107325 | 0.662692 | 0.613288 | 0.613288 | 0.526973 | 0.526973 | 0.526973 | 0 | 0.030622 | 0.166476 | 2,625 | 51 | 107 | 51.470588 | 0.773766 | 0.053714 | 0 | 0 | 0 | 0 | 0.050424 | 0.014522 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.057143 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d66ef032fbd2dcf091b5ffde482a5d596613146 | 1,940 | py | Python | bin/write2cly.py | docdiesel/smartmetertools | 3b7449c7a9069696af078631aa5440f53d0f57bc | [
"MIT"
] | 1 | 2019-05-30T08:28:31.000Z | 2019-05-30T08:28:31.000Z | bin/write2cly.py | docdiesel/smartmetertools | 3b7449c7a9069696af078631aa5440f53d0f57bc | [
"MIT"
] | null | null | null | bin/write2cly.py | docdiesel/smartmetertools | 3b7449c7a9069696af078631aa5440f53d0f57bc | [
"MIT"
] | null | null | null | #!/usr/bin/python3
## write2cly.py - reads json (generated by sml_reader.py) from stdin
## - writes values to Corlysis time series InfluxDB
##
## Writes data from smart meter to time series database (InfluxDB)
## at Corlysis.com [1]. You need to configure your database and token
## in the config section.
##
## [1] https://corlysis.com/
##==== license section ========
## This code is under MIT License: Copyright (C) 2019 Bernd Künnen
## License details see https://choosealicense.com/licenses/mit/
##==== config section ========
# define corlysis settings here - set db and token at least
cly_base_url = 'https://corlysis.com:8086/write'
cly_parameters = {
"db": "energy",
"u" : "token",
"p" : "placeyourtokenhere",
"precision": "ms"}
# assign readable field names
config = {
"1.8.0": "Bezug",
"2.8.0": "Einspeisung",
"16.7.0": "Wirkleistung"
}
##==== code section ==== no need to change lines below ====
##-- import libraries
import json, sys, requests
import requests
import time
# load json from stdin
try:
myjson = json.load(sys.stdin)
except:
sys.stderr.write('!! error loading json')
exit(1)
# decode json
try:
line = "meter_data "
# add each meter value to line
for obis in myjson['data']:
key = config[obis] # set human readable field name
value = myjson['data'][obis] # get value from smart meter
line += key + '=' + str(value) + ',' # add key=value to insert line
# cut off last comma
line = line[:-1]
# add timestamp as unix timestamp in ms
line += ' ' + str(int(time.time()*1000)) #+ '\n'
# post data into time series database; http response should be 204
r = requests.post(cly_base_url, params=cly_parameters, data=line)
if r.status_code != 204 :
sys.stderr.write(r.status_code)
sys.stderr.write(r.content)
# catch if input is no valid json
except:
sys.stderr.write('!!error: no data block in json')
exit(2)
| 25.526316 | 71 | 0.652062 | 278 | 1,940 | 4.514388 | 0.496403 | 0.028685 | 0.044622 | 0.031873 | 0.039841 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022581 | 0.201031 | 1,940 | 75 | 72 | 25.866667 | 0.787097 | 0.506186 | 0 | 0.117647 | 1 | 0 | 0.209836 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088235 | 0 | 0.088235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d6e47beb4576bf2e083ccdcb792c2e2830c83c4 | 50,279 | py | Python | user_program/usb4vc_ui.py | dekuNukem/USB4VC | 66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c | [
"MIT"
] | 78 | 2022-02-07T16:48:11.000Z | 2022-03-31T12:25:35.000Z | user_program/usb4vc_ui.py | dekuNukem/USB4VC | 66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c | [
"MIT"
] | 1 | 2022-02-26T20:16:08.000Z | 2022-02-26T20:24:04.000Z | user_program/usb4vc_ui.py | dekuNukem/USB4VC | 66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c | [
"MIT"
] | 1 | 2022-02-24T03:34:15.000Z | 2022-02-24T03:34:15.000Z | # https://luma-oled.readthedocs.io/en/latest/software.html
import os
import sys
import time
import threading
import usb4vc_oled
from luma.core.render import canvas
import RPi.GPIO as GPIO
import usb4vc_usb_scan
import usb4vc_shared
import usb4vc_show_ev
import usb4vc_check_update
import json
import subprocess
from subprocess import Popen, PIPE
from usb4vc_shared import this_app_dir_path
from usb4vc_shared import config_dir_path
from usb4vc_shared import firmware_dir_path
from usb4vc_shared import temp_dir_path
from usb4vc_shared import ensure_dir
from usb4vc_shared import i2c_bootloader_pbid
from usb4vc_shared import usb_bootloader_pbid
config_file_path = os.path.join(config_dir_path, 'config.json')
ensure_dir(this_app_dir_path)
ensure_dir(config_dir_path)
ensure_dir(firmware_dir_path)
ensure_dir(temp_dir_path)
PLUS_BUTTON_PIN = 27
MINUS_BUTTON_PIN = 19
ENTER_BUTTON_PIN = 22
SHUTDOWN_BUTTON_PIN = 21
PBOARD_RESET_PIN = 25
PBOARD_BOOT0_PIN = 12
SLEEP_LED_PIN = 26
GPIO.setmode(GPIO.BCM)
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN)
GPIO.setup(SLEEP_LED_PIN, GPIO.OUT)
GPIO.output(SLEEP_LED_PIN, GPIO.LOW)
SPI_MOSI_MAGIC = 0xde
SPI_MOSI_MSG_TYPE_SET_PROTOCOL = 2
set_protocl_spi_msg_template = [SPI_MOSI_MAGIC, 0, SPI_MOSI_MSG_TYPE_SET_PROTOCOL] + [0]*29
class my_button(object):
def __init__(self, bcm_pin):
super(my_button, self).__init__()
self.pin_number = bcm_pin
GPIO.setup(self.pin_number, GPIO.IN, pull_up_down=GPIO.PUD_UP)
self.prev_state = GPIO.input(self.pin_number)
def is_pressed(self):
result = False
current_state = GPIO.input(self.pin_number)
if self.prev_state == 1 and current_state == 0:
result = True
self.prev_state = current_state
return result
PBOARD_ID_UNKNOWN = 0
PBOARD_ID_IBMPC = 1
PBOARD_ID_ADB = 2
pboard_info_spi_msg = [0] * 32
this_pboard_id = PBOARD_ID_UNKNOWN
USBGP_BTN_SOUTH = 0x130
USBGP_BTN_EAST = 0x131
USBGP_BTN_C = 0x132
USBGP_BTN_NORTH = 0x133
USBGP_BTN_WEST = 0x134
USBGP_BTN_Z = 0x135
USBGP_BTN_TL = 0x136
USBGP_BTN_TR = 0x137
USBGP_BTN_TL2 = 0x138
USBGP_BTN_TR2 = 0x139
USBGP_BTN_SELECT = 0x13a
USBGP_BTN_START = 0x13b
USBGP_BTN_MODE = 0x13c
USBGP_BTN_THUMBL = 0x13d
USBGP_BTN_THUMBR = 0x13e
USBGP_BTN_A = USBGP_BTN_SOUTH
USBGP_BTN_B = USBGP_BTN_EAST
USBGP_BTN_X = USBGP_BTN_NORTH
USBGP_BTN_Y = USBGP_BTN_WEST
USBGP_ABS_X = 0x00 # left stick X
USBGP_ABS_Y = 0x01 # left stick Y
USBGP_ABS_Z = 0x02 # left analog trigger
USBGP_ABS_RX = 0x03 # right stick X
USBGP_ABS_RY = 0x04 # right stick Y
USBGP_ABS_RZ = 0x05 # right analog trigger
USBGP_ABS_HAT0X = 0x10 # D-pad X
USBGP_ABS_HAT0Y = 0x11 # D-pad Y
GENERIC_USB_GAMEPAD_TO_MOUSE_KB_DEAULT_MAPPING = {
"MAPPING_TYPE": "DEFAULT_MOUSE_KB",
'BTN_TL': {'code': 'BTN_LEFT'},
'BTN_TR': {'code': 'BTN_RIGHT'},
'BTN_TL2': {'code': 'BTN_LEFT'},
'BTN_TR2': {'code': 'BTN_RIGHT'},
'ABS_X': {'code': 'REL_X'},
'ABS_Y': {'code': 'REL_Y'},
'ABS_HAT0X': {'code': 'KEY_RIGHT', 'code_neg': 'KEY_LEFT'},
'ABS_HAT0Y': {'code': 'KEY_DOWN', 'code_neg': 'KEY_UP'}
}
IBM_GENERIC_USB_GAMEPAD_TO_15PIN_GAMEPORT_GAMEPAD_DEAULT_MAPPING = {
"MAPPING_TYPE": "DEFAULT_15PIN",
# buttons to buttons
'BTN_SOUTH': {'code':'IBM_GGP_BTN_1'},
'BTN_NORTH': {'code':'IBM_GGP_BTN_2'},
'BTN_EAST': {'code':'IBM_GGP_BTN_3'},
'BTN_WEST': {'code':'IBM_GGP_BTN_4'},
'BTN_TL': {'code':'IBM_GGP_BTN_1'},
'BTN_TR': {'code':'IBM_GGP_BTN_2'},
'BTN_Z': {'code':'IBM_GGP_BTN_3'},
'BTN_C': {'code':'IBM_GGP_BTN_4'},
'BTN_TL2': {'code':'IBM_GGP_BTN_1'},
'BTN_TR2': {'code':'IBM_GGP_BTN_2'},
# analog axes to analog axes
'ABS_X': {'code':'IBM_GGP_JS1_X'},
'ABS_Y': {'code':'IBM_GGP_JS1_Y'},
'ABS_HAT0X': {'code':'IBM_GGP_JS1_X'},
'ABS_HAT0Y': {'code':'IBM_GGP_JS1_Y'},
'ABS_RX': {'code':'IBM_GGP_JS2_X'},
'ABS_RY': {'code':'IBM_GGP_JS2_Y'},
}
PROTOCOL_OFF = {'pid':0, 'display_name':"OFF"}
PROTOCOL_AT_PS2_KB = {'pid':1, 'display_name':"AT/PS2"}
PROTOCOL_XT_KB = {'pid':2, 'display_name':"PC XT"}
PROTOCOL_ADB_KB = {'pid':3, 'display_name':"ADB"}
PROTOCOL_PS2_MOUSE_NORMAL = {'pid':4, 'display_name':"PS/2"}
PROTOCOL_MICROSOFT_SERIAL_MOUSE = {'pid':5, 'display_name':"Microsft Serial"}
PROTOCOL_ADB_MOUSE = {'pid':6, 'display_name':"ADB"}
PROTOCOL_15PIN_GAMEPORT_GAMEPAD = {'pid':7, 'display_name':"Generic 15-Pin", 'mapping':IBM_GENERIC_USB_GAMEPAD_TO_15PIN_GAMEPORT_GAMEPAD_DEAULT_MAPPING}
PROTOCOL_MOUSESYSTEMS_SERIAL_MOUSE = {'pid':8, 'display_name':"MouseSys Serial"}
PROTOCOL_USB_GP_TO_MOUSE_KB = {'pid':0, 'display_name':'Mouse & KB', 'mapping':GENERIC_USB_GAMEPAD_TO_MOUSE_KB_DEAULT_MAPPING}
PROTOCOL_RAW_KEYBOARD = {'pid':125, 'display_name':"Raw data"}
PROTOCOL_RAW_MOUSE = {'pid':126, 'display_name':"Raw data"}
PROTOCOL_RAW_GAMEPAD = {'pid':127, 'display_name':"Raw data"}
custom_profile_list = []
try:
onlyfiles = [f for f in os.listdir(config_dir_path) if os.path.isfile(os.path.join(config_dir_path, f))]
json_map_files = [os.path.join(config_dir_path, x) for x in onlyfiles if x.lower().startswith('usb4vc_map') and x.lower().endswith(".json")]
for item in json_map_files:
print('loading json file:', item)
with open(item) as json_file:
custom_profile_list.append(json.load(json_file))
except Exception as e:
print('exception json load:', e)
def get_list_of_usb_drive():
usb_drive_set = set()
try:
usb_drive_path = subprocess.getoutput(f"timeout 2 df -h | grep -i usb").replace('\r', '').split('\n')
for item in [x for x in usb_drive_path if len(x) > 2]:
usb_drive_set.add(os.path.join(item.split(' ')[-1], 'usb4vc'))
except Exception as e:
print("exception get_list_of_usb_drive:", e)
return usb_drive_set
def copy_debug_log():
usb_drive_set = get_list_of_usb_drive()
if len(usb_drive_set) == 0:
return False
for this_path in usb_drive_set:
if os.path.isdir(this_path):
print('copying debug log to', this_path)
os.system(f'sudo cp -v /home/pi/usb4vc/usb4vc_debug_log.txt {this_path}')
return True
def check_usb_drive():
usb_drive_set = get_list_of_usb_drive()
if len(usb_drive_set) == 0:
return False, 'USB Drive Not Found'
for this_path in usb_drive_set:
usb_config_path = os.path.join(this_path, 'config')
if not os.path.isdir(usb_config_path):
usb_config_path = None
if usb_config_path is not None:
return True, usb_config_path
return False, 'No Update Data Found'
def get_pbid_and_version(dfu_file_name):
pbid = None
try:
pbid = int(dfu_file_name.split('PBID')[-1].split('_')[0])
except Exception as e:
print("exception fw pbid parse:", e)
fw_ver_tuple = None
try:
fw_ver = dfu_file_name.lower().split('_v')[-1].split('.')[0].split('_')
fw_ver_tuple = (int(fw_ver[0]), int(fw_ver[1]), int(fw_ver[2]))
except Exception as e:
print('exception fw ver parse:', e)
return pbid, fw_ver_tuple
def reset_pboard():
print("resetting protocol board...")
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN)
GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT)
GPIO.output(PBOARD_RESET_PIN, GPIO.LOW)
time.sleep(0.05)
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
time.sleep(0.05)
print("done")
def enter_dfu():
# RESET LOW: Enter reset
GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT)
GPIO.output(PBOARD_RESET_PIN, GPIO.LOW)
time.sleep(0.05)
# BOOT0 HIGH: Boot into DFU mode
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.OUT)
GPIO.output(PBOARD_BOOT0_PIN, GPIO.HIGH)
time.sleep(0.05)
# Release RESET, BOOT0 still HIGH, STM32 now in DFU mode
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
time.sleep(1.5)
def exit_dfu():
# Release BOOT0
GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN)
# Activate RESET
GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT)
GPIO.output(PBOARD_RESET_PIN, GPIO.LOW)
time.sleep(0.05)
# Release RESET, BOOT0 is LOW, STM32 boots in normal mode
GPIO.setup(PBOARD_RESET_PIN, GPIO.IN)
time.sleep(1.5)
def fw_update(fw_path, pbid):
is_updated = False
if pbid in i2c_bootloader_pbid and fw_path.lower().endswith('.hex'):
enter_dfu()
os.system(f'sudo stm32flash -w {fw_path} -a 0x3b /dev/i2c-1')
is_updated = True
elif pbid in usb_bootloader_pbid and fw_path.lower().endswith('.dfu'):
enter_dfu()
lsusb_str = subprocess.getoutput("lsusb")
if 'in DFU'.lower() not in lsusb_str.lower():
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Connect a USB cable", usb4vc_oled.font_regular, 0, draw)
usb4vc_oled.oled_print_centered("from P-Card to RPi", usb4vc_oled.font_regular, 10, draw)
usb4vc_oled.oled_print_centered("and try again", usb4vc_oled.font_regular, 20, draw)
time.sleep(4)
else:
os.system(f'sudo dfu-util --device ,0483:df11 -a 0 -D {fw_path}')
is_updated = True
exit_dfu()
return is_updated
def update_pboard_firmware(this_pid):
onlyfiles = [f for f in os.listdir(firmware_dir_path) if os.path.isfile(os.path.join(firmware_dir_path, f))]
firmware_files = [x for x in onlyfiles if x.startswith("PBFW_") and (x.lower().endswith(".dfu") or x.lower().endswith(".hex")) and "PBID" in x]
this_pboard_version_tuple = (pboard_info_spi_msg[5], pboard_info_spi_msg[6], pboard_info_spi_msg[7])
for item in firmware_files:
pbid, fw_ver_tuple = get_pbid_and_version(item)
if pbid is None or fw_ver_tuple is None:
continue
print('update_pboard_firmware:', this_pid, this_pboard_version_tuple, fw_ver_tuple)
if pbid == this_pid and fw_ver_tuple > this_pboard_version_tuple:
print("DOING IT NOW")
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Loading Firmware:", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(item.strip("PBFW_").strip(".dfu").strip(".hex"), usb4vc_oled.font_regular, 16, draw)
if fw_update(os.path.join(firmware_dir_path, item), this_pid):
return True
return False
def update_from_usb(usb_config_path):
if usb_config_path is not None:
os.system(f'cp -v /home/pi/usb4vc/config/config.json {usb_config_path}')
os.system('mv -v /home/pi/usb4vc/config/config.json /home/pi/usb4vc/config.json')
os.system('rm -rfv /home/pi/usb4vc/config/*')
os.system(f"cp -v {os.path.join(usb_config_path, '*')} /home/pi/usb4vc/config")
os.system("mv -v /home/pi/usb4vc/config.json /home/pi/usb4vc/config/config.json")
ibmpc_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_AT_PS2_KB, PROTOCOL_XT_KB]
ibmpc_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_PS2_MOUSE_NORMAL, PROTOCOL_MICROSOFT_SERIAL_MOUSE, PROTOCOL_MOUSESYSTEMS_SERIAL_MOUSE]
ibmpc_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_15PIN_GAMEPORT_GAMEPAD, PROTOCOL_USB_GP_TO_MOUSE_KB]
adb_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_ADB_KB]
adb_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_ADB_MOUSE]
adb_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_USB_GP_TO_MOUSE_KB]
raw_keyboard_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_KEYBOARD]
raw_mouse_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_MOUSE]
raw_gamepad_protocols = [PROTOCOL_OFF, PROTOCOL_RAW_GAMEPAD]
mouse_sensitivity_list = [1, 1.25, 1.5, 1.75, 0.25, 0.5, 0.75]
"""
key is protocol card ID
conf_dict[pbid]:
hw revision
current keyboard protocol
current mouse protocol
current gamepad procotol
mouse sensitivity
"""
configuration_dict = {}
LINUX_EXIT_CODE_TIMEOUT = 124
def bt_setup():
rfkill_str = subprocess.getoutput("/usr/sbin/rfkill -n")
if 'bluetooth' not in rfkill_str:
return 1, "no BT receiver found"
os.system('/usr/sbin/rfkill unblock bluetooth')
time.sleep(0.1)
exit_code = os.system('timeout 1 bluetoothctl agent NoInputNoOutput') >> 8
if exit_code == LINUX_EXIT_CODE_TIMEOUT:
return 2, 'bluetoothctl stuck'
return 0, ''
def scan_bt_devices(timeout_sec = 5):
exit_code = os.system(f"timeout {timeout_sec} bluetoothctl --agent NoInputNoOutput scan on") >> 8
if exit_code != LINUX_EXIT_CODE_TIMEOUT:
return None, 'scan error'
device_str = subprocess.getoutput("bluetoothctl --agent NoInputNoOutput devices")
dev_list = []
for line in device_str.replace('\r', '').split('\n'):
if 'device' not in line.lower():
continue
line_split = line.split(' ', maxsplit=2)
# skip if device has no name
if len(line_split) < 3 or line_split[2].count('-') == 5:
continue
dev_list.append((line_split[1], line_split[2]))
return dev_list, ''
def pair_device(mac_addr):
is_ready = False
is_sent = False
fail_phrases = ['fail', 'error', 'not available', 'excep']
with Popen(["bluetoothctl", "--agent", "NoInputNoOutput"], stdout=PIPE, stdin=PIPE, bufsize=1,
universal_newlines=True, shell=True) as p:
for line in p.stdout:
print(line, end='')
line_lo = line.lower()
if 'registered' in line_lo:
is_ready = True
if is_ready is False:
continue
if '#' in line_lo and is_sent == False:
p.stdin.write(f'pair {mac_addr}\n')
is_sent = True
if 'PIN code:' in line:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Enter PIN code:", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(line.split('PIN code:')[-1], usb4vc_oled.font_medium, 15, draw)
if '(yes/no)' in line:
p.stdin.write('yes\n')
if 'number in 0-999999' in line:
return False, "Error: Passkey needed"
if 'successful' in line_lo:
p.stdin.write('exit\n')
return True, 'Success!'
for item in fail_phrases:
if item in line_lo:
p.stdin.write('exit\n')
return False, line
return False, "wtf"
def get_paired_devices():
dev_set = set()
try:
device_str = subprocess.getoutput(f"timeout 5 bluetoothctl --agent NoInputNoOutput paired-devices")
for line in device_str.replace('\r', '').split('\n'):
if 'device' not in line.lower():
continue
line_split = line.split(' ', maxsplit=2)
# skip if device has no name
if len(line_split) < 3 or line_split[2].count('-') == 5:
continue
dev_set.add((line_split[1], line_split[2]))
except Exception as e:
print('exception get_paired_devices:', e)
return dev_set
def load_config():
global configuration_dict
try:
with open(config_file_path) as json_file:
temp_dict = json.load(json_file)
# json dump all keys as strings, need to convert them back to ints
for key in temp_dict:
if key.isdigit():
configuration_dict[int(key)] = temp_dict[key]
else:
configuration_dict[key] = temp_dict[key]
except Exception as e:
print("exception config load failed!", e)
def get_ip_address():
ip_str = subprocess.getoutput("timeout 1 hostname -I")
ip_list = [x for x in ip_str.split(' ') if '.' in x]
if len(ip_list) == 0:
return "Offline"
return f'{ip_list[0]}'
def save_config():
try:
with open(config_file_path, 'w', encoding='utf-8') as save_file:
save_file.write(json.dumps(configuration_dict))
except Exception as e:
print("exception config save failed!", e)
curve_vertial_axis_x_pos = 80
curve_horizontal_axis_width = 32
curve_linear = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20, 21: 21, 22: 22, 23: 23, 24: 24, 25: 25, 26: 26, 27: 27, 28: 28, 29: 29, 30: 30, 31: 31, 32: 32, 33: 33, 34: 34, 35: 35, 36: 36, 37: 37, 38: 38, 39: 39, 40: 40, 41: 41, 42: 42, 43: 43, 44: 44, 45: 45, 46: 46, 47: 47, 48: 48, 49: 49, 50: 50, 51: 51, 52: 52, 53: 53, 54: 54, 55: 55, 56: 56, 57: 57, 58: 58, 59: 59, 60: 60, 61: 61, 62: 62, 63: 63, 64: 64, 65: 65, 66: 66, 67: 67, 68: 68, 69: 69, 70: 70, 71: 71, 72: 72, 73: 73, 74: 74, 75: 75, 76: 76, 77: 77, 78: 78, 79: 79, 80: 80, 81: 81, 82: 82, 83: 83, 84: 84, 85: 85, 86: 86, 87: 87, 88: 88, 89: 89, 90: 90, 91: 91, 92: 92, 93: 93, 94: 94, 95: 95, 96: 96, 97: 97, 98: 98, 99: 99, 100: 100, 101: 101, 102: 102, 103: 103, 104: 104, 105: 105, 106: 106, 107: 107, 108: 108, 109: 109, 110: 110, 111: 111, 112: 112, 113: 113, 114: 114, 115: 115, 116: 116, 117: 117, 118: 118, 119: 119, 120: 120, 121: 121, 122: 122, 123: 123, 124: 124, 125: 125, 126: 126, 127: 127}
curve1 = {0: 1, 1: 1, 2: 2, 3: 2, 4: 3, 5: 4, 6: 4, 7: 5, 8: 5, 9: 6, 10: 6, 11: 7, 12: 7, 13: 8, 14: 8, 15: 9, 16: 9, 17: 10, 18: 11, 19: 11, 20: 12, 21: 12, 22: 13, 23: 13, 24: 14, 25: 15, 26: 15, 27: 16, 28: 16, 29: 17, 30: 18, 31: 18, 32: 19, 33: 19, 34: 20, 35: 21, 36: 21, 37: 22, 38: 22, 39: 23, 40: 24, 41: 24, 42: 25, 43: 26, 44: 26, 45: 27, 46: 28, 47: 28, 48: 29, 49: 30, 50: 30, 51: 31, 52: 32, 53: 33, 54: 33, 55: 34, 56: 35, 57: 36, 58: 36, 59: 37, 60: 38, 61: 39, 62: 39, 63: 40, 64: 41, 65: 42, 66: 43, 67: 44, 68: 45, 69: 46, 70: 46, 71: 47, 72: 48, 73: 49, 74: 50, 75: 51, 76: 52, 77: 53, 78: 55, 79: 56, 80: 57, 81: 58, 82: 59, 83: 60, 84: 61, 85: 62, 86: 63, 87: 65, 88: 66, 89: 67, 90: 68, 91: 70, 92: 71, 93: 72, 94: 73, 95: 75, 96: 76, 97: 77, 98: 79, 99: 80, 100: 81, 101: 83, 102: 84, 103: 86, 104: 87, 105: 89, 106: 90, 107: 92, 108: 93, 109: 95, 110: 96, 111: 98, 112: 100, 113: 101, 114: 103, 115: 105, 116: 106, 117: 108, 118: 110, 119: 112, 120: 113, 121: 115, 122: 117, 123: 119, 124: 121, 125: 123, 126: 125, 127: 127}
curve2 = {0: 1, 1: 1, 2: 1, 3: 1, 4: 2, 5: 2, 6: 2, 7: 2, 8: 2, 9: 3, 10: 3, 11: 3, 12: 3, 13: 4, 14: 4, 15: 4, 16: 4, 17: 5, 18: 5, 19: 5, 20: 5, 21: 6, 22: 6, 23: 6, 24: 7, 25: 7, 26: 7, 27: 8, 28: 8, 29: 8, 30: 8, 31: 9, 32: 9, 33: 9, 34: 10, 35: 10, 36: 10, 37: 11, 38: 11, 39: 12, 40: 12, 41: 12, 42: 13, 43: 13, 44: 13, 45: 14, 46: 14, 47: 15, 48: 15, 49: 15, 50: 16, 51: 16, 52: 17, 53: 17, 54: 18, 55: 18, 56: 19, 57: 19, 58: 20, 59: 20, 60: 21, 61: 21, 62: 22, 63: 22, 64: 23, 65: 23, 66: 24, 67: 24, 68: 25, 69: 26, 70: 26, 71: 27, 72: 28, 73: 28, 74: 29, 75: 30, 76: 30, 77: 31, 78: 32, 79: 33, 80: 34, 81: 35, 82: 36, 83: 37, 84: 38, 85: 39, 86: 40, 87: 41, 88: 42, 89: 43, 90: 44, 91: 45, 92: 47, 93: 48, 94: 49, 95: 51, 96: 52, 97: 53, 98: 55, 99: 56, 100: 58, 101: 59, 102: 61, 103: 63, 104: 64, 105: 66, 106: 68, 107: 70, 108: 71, 109: 73, 110: 75, 111: 78, 112: 80, 113: 82, 114: 84, 115: 86, 116: 89, 117: 92, 118: 94, 119: 96, 120: 100, 121: 102, 122: 106, 123: 110, 124: 112, 125: 116, 126: 120, 127: 125}
curve3 = {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 3, 32: 3, 33: 3, 34: 3, 35: 3, 36: 3, 37: 3, 38: 4, 39: 4, 40: 4, 41: 4, 42: 4, 43: 4, 44: 5, 45: 5, 46: 5, 47: 5, 48: 5, 49: 6, 50: 6, 51: 6, 52: 6, 53: 7, 54: 7, 55: 7, 56: 7, 57: 8, 58: 8, 59: 8, 60: 8, 61: 9, 62: 9, 63: 9, 64: 10, 65: 10, 66: 10, 67: 11, 68: 11, 69: 11, 70: 12, 71: 12, 72: 12, 73: 13, 74: 13, 75: 14, 76: 14, 77: 15, 78: 15, 79: 16, 80: 16, 81: 17, 82: 17, 83: 18, 84: 19, 85: 19, 86: 20, 87: 21, 88: 21, 89: 22, 90: 23, 91: 24, 92: 25, 93: 26, 94: 27, 95: 28, 96: 29, 97: 30, 98: 32, 99: 33, 100: 34, 101: 35, 102: 37, 103: 38, 104: 40, 105: 41, 106: 43, 107: 45, 108: 46, 109: 48, 110: 50, 111: 52, 112: 54, 113: 56, 114: 59, 115: 61, 116: 64, 117: 66, 118: 69, 119: 72, 120: 76, 121: 79, 122: 83, 123: 87, 124: 92, 125: 99, 126: 104, 127: 118}
joystick_curve_list = [curve_linear, curve1, curve2, curve3]
class usb4vc_menu(object):
def cap_index(self, index, list_size):
if index >= list_size:
return 0
return index
def __init__(self, pboard, conf_dict):
super(usb4vc_menu, self).__init__()
self.current_level = 0
self.current_page = 0
self.level_size = 6
self.page_size = [7, 6, 4, 1, 1, 5]
self.kb_protocol_list = list(pboard['protocol_list_keyboard'])
self.mouse_protocol_list = list(pboard['protocol_list_mouse'])
self.gamepad_protocol_list = list(pboard['protocol_list_gamepad'])
self.pb_info = dict(pboard)
self.current_keyboard_protocol_index = self.cap_index(conf_dict.get('keyboard_protocol_index', 0), len(self.kb_protocol_list))
self.current_mouse_protocol_index = self.cap_index(conf_dict.get("mouse_protocol_index", 0), len(self.mouse_protocol_list))
self.current_mouse_sensitivity_offset_index = self.cap_index(conf_dict.get("mouse_sensitivity_index", 0), len(mouse_sensitivity_list))
self.current_gamepad_protocol_index = self.cap_index(conf_dict.get("gamepad_protocol_index", 0), len(self.gamepad_protocol_list))
self.current_keyboard_protocol = self.kb_protocol_list[self.current_keyboard_protocol_index]
self.current_mouse_protocol = self.mouse_protocol_list[self.current_mouse_protocol_index]
self.current_gamepad_protocol = self.gamepad_protocol_list[self.current_gamepad_protocol_index]
self.current_joystick_curve_index = self.cap_index(conf_dict.get("joystick_curve_index", 0), len(joystick_curve_list))
self.last_spi_message = []
self.bluetooth_device_list = None
self.error_message = ''
self.pairing_result = ''
self.bt_scan_timeout_sec = 10
self.paired_devices_list = []
self.send_protocol_set_spi_msg()
def switch_page(self, amount):
self.current_page = (self.current_page + amount) % self.page_size[self.current_level]
def goto_page(self, new_page):
if new_page < self.page_size[self.current_level]:
self.current_page = new_page
def goto_level(self, new_level):
if new_level < self.level_size:
self.current_level = new_level
self.current_page = 0
def draw_joystick_curve(self):
this_curve = joystick_curve_list[self.current_joystick_curve_index % len(joystick_curve_list)]
with canvas(usb4vc_oled.oled_device) as draw:
draw.text((0, 0), "Joystick", font=usb4vc_oled.font_medium, fill="white")
draw.text((0, 15), "Curve", font=usb4vc_oled.font_medium, fill="white")
draw.line((curve_vertial_axis_x_pos, 0, curve_vertial_axis_x_pos, curve_vertial_axis_x_pos), fill="white")
draw.line((curve_vertial_axis_x_pos, 31, curve_vertial_axis_x_pos+curve_horizontal_axis_width, 31), fill="white")
for xxx in range(curve_horizontal_axis_width):
dict_key = xxx*4
this_point_x = xxx + curve_vertial_axis_x_pos
this_point_y = usb4vc_oled.OLED_HEIGHT - this_curve[dict_key]//4 - 1
draw.line((this_point_x,this_point_y,this_point_x,this_point_y), fill="white")
def display_page(self, level, page):
if level == 0:
if page == 0:
with canvas(usb4vc_oled.oled_device) as draw:
mouse_count, kb_count, gp_count = usb4vc_usb_scan.get_device_count()
draw.text((0, 0), f"KBD {kb_count} {self.current_keyboard_protocol['display_name']}", font=usb4vc_oled.font_regular, fill="white")
draw.text((0, 10), f"MOS {mouse_count} {self.current_mouse_protocol['display_name']}", font=usb4vc_oled.font_regular, fill="white")
draw.text((0, 20), f"GPD {gp_count} {self.current_gamepad_protocol['display_name']}", font=usb4vc_oled.font_regular, fill="white")
if page == 1:
with canvas(usb4vc_oled.oled_device) as draw:
if 'Unknown' in self.pb_info['full_name']:
draw.text((0, 0), f"{self.pb_info['full_name']} PID {this_pboard_id}", font=usb4vc_oled.font_regular, fill="white")
else:
draw.text((0, 0), f"{self.pb_info['full_name']}", font=usb4vc_oled.font_regular, fill="white")
draw.text((0, 10), f"PB {self.pb_info['fw_ver'][0]}.{self.pb_info['fw_ver'][1]}.{self.pb_info['fw_ver'][2]} RPi {usb4vc_shared.RPI_APP_VERSION_TUPLE[0]}.{usb4vc_shared.RPI_APP_VERSION_TUPLE[1]}.{usb4vc_shared.RPI_APP_VERSION_TUPLE[2]}", font=usb4vc_oled.font_regular, fill="white")
draw.text((0, 20), f"IP: {get_ip_address()}", font=usb4vc_oled.font_regular, fill="white")
if page == 2:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Load Custom", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("Config from USB", usb4vc_oled.font_medium, 16, draw)
if page == 3:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Internet Update", usb4vc_oled.font_medium, 10, draw)
if page == 4:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Show Event Codes", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("(experimental)", usb4vc_oled.font_regular, 20, draw)
if page == 5:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Remove BT Device", usb4vc_oled.font_medium, 10, draw)
if page == 6:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Pair Bluetooth", usb4vc_oled.font_medium, 10, draw)
if level == 1:
if page == 0:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Keyboard Protocol", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(self.kb_protocol_list[self.current_keyboard_protocol_index]['display_name'], usb4vc_oled.font_medium, 15, draw)
if page == 1:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Mouse Protocol", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(self.mouse_protocol_list[self.current_mouse_protocol_index]['display_name'], usb4vc_oled.font_medium, 15, draw)
if page == 2:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Gamepad Protocol", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(self.gamepad_protocol_list[self.current_gamepad_protocol_index]['display_name'], usb4vc_oled.font_medium, 15, draw)
if page == 3:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Mouse Sensitivity", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(f"{mouse_sensitivity_list[self.current_mouse_sensitivity_offset_index]}", usb4vc_oled.font_medium, 15, draw)
if page == 4:
self.draw_joystick_curve()
if page == 5:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Save & Quit", usb4vc_oled.font_medium, 10, draw)
if level == 2:
if page == 0:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Put your device in", usb4vc_oled.font_regular, 0, draw)
usb4vc_oled.oled_print_centered("pairing mode now.", usb4vc_oled.font_regular, 10, draw)
usb4vc_oled.oled_print_centered("Press enter to start", usb4vc_oled.font_regular, 20, draw)
if page == 1:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Scanning...", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("Please wait", usb4vc_oled.font_medium, 15, draw)
result, self.error_message = bt_setup()
if result != 0:
self.goto_page(3)
self.display_curent_page()
return
paired_devices_set = get_paired_devices()
self.bluetooth_device_list, self.error_message = scan_bt_devices(self.bt_scan_timeout_sec)
self.bluetooth_device_list = list(set(self.bluetooth_device_list) - paired_devices_set)
if len(self.bluetooth_device_list) == 0:
self.error_message = "Nothing was found"
self.goto_page(3)
self.display_curent_page()
return
print("BT LIST:", self.bluetooth_device_list)
# set up level 3 menu structure
self.page_size[3] = len(self.bluetooth_device_list) + 1
self.goto_level(3)
self.display_curent_page()
if page == 2:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Pairing result:", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(self.pairing_result, usb4vc_oled.font_regular, 20, draw)
if page == 3:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Bluetooth Error!", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(self.error_message, usb4vc_oled.font_regular, 20, draw)
if level == 3:
if page == self.page_size[3] - 1:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Exit", usb4vc_oled.font_medium, 10, draw)
else:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered(f"Found {len(self.bluetooth_device_list)}. Pair this?", usb4vc_oled.font_regular, 0, draw)
usb4vc_oled.oled_print_centered(f"{self.bluetooth_device_list[page][1]}", usb4vc_oled.font_regular, 10, draw)
usb4vc_oled.oled_print_centered(f"{self.bluetooth_device_list[page][0]}", usb4vc_oled.font_regular, 20, draw)
if level == 4:
if page == self.page_size[4] - 1:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Exit", usb4vc_oled.font_medium, 10, draw)
else:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered(f"Remove this?", usb4vc_oled.font_regular, 0, draw)
usb4vc_oled.oled_print_centered(f"{self.paired_devices_list[page][1]}", usb4vc_oled.font_regular, 10, draw)
usb4vc_oled.oled_print_centered(f"{self.paired_devices_list[page][0]}", usb4vc_oled.font_regular, 20, draw)
if level == 5:
if page == 0:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Power Down", usb4vc_oled.font_medium, 10, draw)
if page == 1:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Relaunch", usb4vc_oled.font_medium, 10, draw)
if page == 2:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Reboot", usb4vc_oled.font_medium, 10, draw)
if page == 3:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Exit to Linux", usb4vc_oled.font_medium, 10, draw)
if page == 4:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Cancel", usb4vc_oled.font_medium, 10, draw)
def send_protocol_set_spi_msg(self):
status_dict = {}
for index, item in enumerate(self.kb_protocol_list):
if item['pid'] & 0x7f in status_dict and status_dict[item['pid'] & 0x7f] == 1:
continue
status_dict[item['pid'] & 0x7f] = 0
if index == self.current_keyboard_protocol_index:
status_dict[item['pid'] & 0x7f] = 1
for index, item in enumerate(self.mouse_protocol_list):
if item['pid'] & 0x7f in status_dict and status_dict[item['pid'] & 0x7f] == 1:
continue
status_dict[item['pid'] & 0x7f] = 0
if index == self.current_mouse_protocol_index:
status_dict[item['pid'] & 0x7f] = 1
for index, item in enumerate(self.gamepad_protocol_list):
if item['pid'] & 0x7f in status_dict and status_dict[item['pid'] & 0x7f] == 1:
continue
status_dict[item['pid'] & 0x7f] = 0
if index == self.current_gamepad_protocol_index:
status_dict[item['pid'] & 0x7f] = 1
protocol_bytes = []
for key in status_dict:
if key == PROTOCOL_OFF['pid']:
continue
if status_dict[key]:
protocol_bytes.append(key | 0x80)
else:
protocol_bytes.append(key)
this_msg = list(set_protocl_spi_msg_template)
this_msg[3:3+len(protocol_bytes)] = protocol_bytes
self.current_keyboard_protocol = self.kb_protocol_list[self.current_keyboard_protocol_index]
self.current_mouse_protocol = self.mouse_protocol_list[self.current_mouse_protocol_index]
self.current_gamepad_protocol = self.gamepad_protocol_list[self.current_gamepad_protocol_index]
if this_msg == self.last_spi_message:
print("SPI: no need to send")
return
print("set_protocol:", [hex(x) for x in this_msg])
usb4vc_usb_scan.set_protocol(this_msg)
print('new status:', [hex(x) for x in usb4vc_usb_scan.get_pboard_info()])
self.last_spi_message = list(this_msg)
def action(self, level, page):
if level == 0:
if page == 2:
usb_present, config_path = check_usb_drive()
if usb_present is False:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Error:", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(str(config_path), usb4vc_oled.font_regular, 16, draw)
time.sleep(3)
self.goto_level(0)
else:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Copying", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("Debug Log...", usb4vc_oled.font_medium, 16, draw)
copy_debug_log()
time.sleep(2)
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Copying custom", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("mapping...", usb4vc_oled.font_medium, 16, draw)
time.sleep(2)
update_from_usb(config_path)
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Update complete!", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("Relaunching...", usb4vc_oled.font_medium, 16, draw)
time.sleep(3)
usb4vc_oled.oled_device.clear()
os._exit(0)
self.goto_level(0)
elif page == 3:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Updating...", usb4vc_oled.font_medium, 10, draw)
fffff = usb4vc_check_update.download_latest_firmware(this_pboard_id)
if fffff != 0:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Unable to download", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(f"firmware: {fffff}", usb4vc_oled.font_medium, 16, draw)
elif update_pboard_firmware(this_pboard_id):
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Firmware updated!", usb4vc_oled.font_medium, 10, draw)
else:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("FW update ERR or", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("already newest", usb4vc_oled.font_medium, 15, draw)
time.sleep(3)
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Updating code...", usb4vc_oled.font_medium, 10, draw)
time.sleep(1)
update_result = usb4vc_check_update.update(temp_dir_path)
if update_result[0] == 0:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Update complete!", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("Relaunching...", usb4vc_oled.font_medium, 16, draw)
else:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Update failed:", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered(f"{update_result[-1]} {update_result[0]}", usb4vc_oled.font_regular, 16, draw)
time.sleep(4)
usb4vc_oled.oled_device.clear()
os._exit(0)
elif page == 4:
try:
usb4vc_show_ev.ev_loop([plus_button, minus_button, enter_button])
except Exception as e:
print('exception ev_loop:', e)
self.goto_level(0)
elif page == 5:
self.paired_devices_list = list(get_paired_devices())
self.page_size[4] = len(self.paired_devices_list) + 1
self.goto_level(4)
elif page == 6:
self.goto_level(2)
else:
self.goto_level(1)
if level == 1:
if page == 0:
self.current_keyboard_protocol_index = (self.current_keyboard_protocol_index + 1) % len(self.kb_protocol_list)
if page == 1:
self.current_mouse_protocol_index = (self.current_mouse_protocol_index + 1) % len(self.mouse_protocol_list)
if page == 2:
self.current_gamepad_protocol_index = (self.current_gamepad_protocol_index + 1) % len(self.gamepad_protocol_list)
if page == 3:
self.current_mouse_sensitivity_offset_index = (self.current_mouse_sensitivity_offset_index + 1) % len(mouse_sensitivity_list)
if page == 4:
self.current_joystick_curve_index = (self.current_joystick_curve_index + 1) % len(joystick_curve_list)
self.draw_joystick_curve()
if page == 5:
configuration_dict[this_pboard_id]["keyboard_protocol_index"] = self.current_keyboard_protocol_index
configuration_dict[this_pboard_id]["mouse_protocol_index"] = self.current_mouse_protocol_index
configuration_dict[this_pboard_id]["mouse_sensitivity_index"] = self.current_mouse_sensitivity_offset_index
configuration_dict[this_pboard_id]["gamepad_protocol_index"] = self.current_gamepad_protocol_index
configuration_dict[this_pboard_id]["joystick_curve_index"] = self.current_joystick_curve_index
save_config()
self.send_protocol_set_spi_msg()
self.goto_level(0)
if level == 2:
if page == 0:
self.switch_page(1)
if page == 2:
self.goto_level(0)
if page == 3:
self.goto_level(0)
if level == 3:
if page == self.page_size[3] - 1:
self.goto_level(0)
else:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Pairing...", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("Please wait", usb4vc_oled.font_medium, 15, draw)
print("pairing", self.bluetooth_device_list[page])
bt_mac_addr = self.bluetooth_device_list[page][0]
is_successful, result_message = pair_device(bt_mac_addr)
self.pairing_result = result_message.split('.')[-1].strip()[-22:]
if is_successful:
os.system(f'timeout {self.bt_scan_timeout_sec} bluetoothctl --agent NoInputNoOutput trust {bt_mac_addr}')
os.system(f'timeout {self.bt_scan_timeout_sec} bluetoothctl --agent NoInputNoOutput connect {bt_mac_addr}')
self.goto_level(2)
self.goto_page(2)
if level == 4:
if page == self.page_size[4] - 1:
self.goto_level(0)
else:
os.system(f'timeout 5 bluetoothctl --agent NoInputNoOutput untrust {self.paired_devices_list[page][0]}')
os.system(f'timeout 5 bluetoothctl --agent NoInputNoOutput remove {self.paired_devices_list[page][0]}')
self.goto_level(0)
if level == 5:
if page == 0:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Wait Until Green", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("LED Stops Blinking", usb4vc_oled.font_medium, 15, draw)
time.sleep(2)
os.system("sudo halt")
while 1:
time.sleep(1)
if page == 1:
usb4vc_oled.oled_device.clear()
os._exit(0)
if page == 2:
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Rebooting...", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("Unplug if stuck >10s", usb4vc_oled.font_regular, 16, draw)
os.system("sudo reboot")
while 1:
time.sleep(1)
if page == 3:
usb4vc_oled.oled_device.clear()
os._exit(169)
if page == 4:
self.goto_level(0)
self.display_curent_page()
def action_current_page(self):
self.action(self.current_level, self.current_page)
def display_curent_page(self):
self.display_page(self.current_level, self.current_page)
def update_usb_status(self):
if self.current_level == 0 and self.current_page == 0:
self.display_page(0, 0)
def update_board_status(self):
if self.current_level == 0 and self.current_page == 1:
self.display_page(0, 1)
pboard_database = {
PBOARD_ID_UNKNOWN:{'author':'Unknown', 'fw_ver':(0,0,0), 'full_name':'Unknown', 'hw_rev':0, 'protocol_list_keyboard':raw_keyboard_protocols, 'protocol_list_mouse':raw_mouse_protocols, 'protocol_list_gamepad':raw_gamepad_protocols},
PBOARD_ID_IBMPC:{'author':'dekuNukem', 'fw_ver':(0,0,0), 'full_name':'IBM PC Compatible', 'hw_rev':0, 'protocol_list_keyboard':ibmpc_keyboard_protocols, 'protocol_list_mouse':ibmpc_mouse_protocols, 'protocol_list_gamepad':ibmpc_gamepad_protocols},
PBOARD_ID_ADB:{'author':'dekuNukem', 'fw_ver':(0,0,0), 'full_name':'Apple Desktop Bus', 'hw_rev':0, 'protocol_list_keyboard':adb_keyboard_protocols, 'protocol_list_mouse':adb_mouse_protocols, 'protocol_list_gamepad':adb_gamepad_protocols},
}
def get_pboard_dict(pid):
if pid not in pboard_database:
pid = 0
return pboard_database[pid]
def get_mouse_sensitivity():
return mouse_sensitivity_list[configuration_dict[this_pboard_id]["mouse_sensitivity_index"]]
def ui_init():
global pboard_info_spi_msg
global this_pboard_id
load_config()
pboard_info_spi_msg = usb4vc_usb_scan.get_pboard_info()
print("PB INFO:", pboard_info_spi_msg)
this_pboard_id = pboard_info_spi_msg[3]
if this_pboard_id in pboard_database:
# load custom profile mapping into protocol list
for item in custom_profile_list:
this_mapping_bid = usb4vc_shared.board_id_lookup.get(item['protocol_board'], 0)
if this_mapping_bid == this_pboard_id and item['device_type'] in pboard_database[this_pboard_id]:
this_mapping_pid = usb4vc_shared.protocol_id_lookup.get(item['protocol_name'])
item['pid'] = this_mapping_pid
pboard_database[this_pboard_id][item['device_type']].append(item)
pboard_database[this_pboard_id]['hw_rev'] = pboard_info_spi_msg[4]
pboard_database[this_pboard_id]['fw_ver'] = (pboard_info_spi_msg[5], pboard_info_spi_msg[6], pboard_info_spi_msg[7])
if 'rpi_app_ver' not in configuration_dict:
configuration_dict['rpi_app_ver'] = usb4vc_shared.RPI_APP_VERSION_TUPLE
if this_pboard_id not in configuration_dict:
configuration_dict[this_pboard_id] = {"keyboard_protocol_index":1, "mouse_protocol_index":1, "mouse_sensitivity_index":0, "gamepad_protocol_index":1}
plus_button = my_button(PLUS_BUTTON_PIN)
minus_button = my_button(MINUS_BUTTON_PIN)
enter_button = my_button(ENTER_BUTTON_PIN)
shutdown_button = my_button(SHUTDOWN_BUTTON_PIN)
class oled_sleep_control(object):
def __init__(self):
super(oled_sleep_control, self).__init__()
self.is_sleeping = False
self.last_input_event = time.time()
self.ui_loop_count = 0
def sleep(self):
if self.is_sleeping is False:
print("sleeping!")
usb4vc_oled.oled_device.clear()
self.is_sleeping = True
# GPIO.output(SLEEP_LED_PIN, GPIO.HIGH)
def wakeup(self):
if self.is_sleeping:
print("waking up!")
my_menu.display_curent_page()
self.last_input_event = time.time()
self.is_sleeping = False
# GPIO.output(SLEEP_LED_PIN, GPIO.LOW)
def check_sleep(self):
# time.time() might jump ahead a lot when RPi gets its time from network
# this ensures OLED won't go to sleep too early
if self.ui_loop_count <= 1500:
return
if time.time() - self.last_input_event > 180:
self.sleep()
else:
self.wakeup()
def kick(self):
self.last_input_event = time.time()
my_oled = oled_sleep_control()
my_menu = None
def ui_worker():
global my_menu
print(configuration_dict)
print("ui_worker started")
my_menu = usb4vc_menu(get_pboard_dict(this_pboard_id), configuration_dict[this_pboard_id])
my_menu.display_page(0, 0)
for x in range(2):
GPIO.output(SLEEP_LED_PIN, GPIO.HIGH)
time.sleep(0.2)
GPIO.output(SLEEP_LED_PIN, GPIO.LOW)
time.sleep(0.2)
while 1:
time.sleep(0.1)
my_oled.ui_loop_count += 1
if my_oled.is_sleeping is False and my_oled.ui_loop_count % 5 == 0:
my_menu.update_usb_status()
my_menu.update_board_status()
if plus_button.is_pressed():
my_oled.kick()
if my_oled.is_sleeping:
my_oled.wakeup()
elif my_menu.current_level != 2:
my_menu.switch_page(1)
my_menu.display_curent_page()
if minus_button.is_pressed():
my_oled.kick()
if my_oled.is_sleeping:
my_oled.wakeup()
elif my_menu.current_level != 2:
my_menu.switch_page(-1)
my_menu.display_curent_page()
if enter_button.is_pressed():
my_oled.kick()
if my_oled.is_sleeping:
my_oled.wakeup()
else:
my_menu.action_current_page()
if shutdown_button.is_pressed():
my_oled.kick()
if my_oled.is_sleeping:
my_oled.wakeup()
else:
my_menu.goto_level(5)
my_menu.display_curent_page()
my_oled.check_sleep()
def get_gamepad_protocol():
return my_menu.current_gamepad_protocol
def get_joystick_curve():
return joystick_curve_list[my_menu.current_joystick_curve_index]
def oled_print_model_changed():
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("RPi Model Changed!", usb4vc_oled.font_regular, 0, draw)
usb4vc_oled.oled_print_centered("Recompiling BT Driver", usb4vc_oled.font_regular, 10, draw)
usb4vc_oled.oled_print_centered("Might take a while...", usb4vc_oled.font_regular, 20, draw)
def oled_print_oneline(msg):
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered(msg, usb4vc_oled.font_medium, 10, draw)
def oled_print_reboot():
with canvas(usb4vc_oled.oled_device) as draw:
usb4vc_oled.oled_print_centered("Done! Rebooting..", usb4vc_oled.font_medium, 0, draw)
usb4vc_oled.oled_print_centered("Unplug if stuck >10s", usb4vc_oled.font_regular, 16, draw)
ui_thread = threading.Thread(target=ui_worker, daemon=True)
| 51.462641 | 1,075 | 0.63269 | 7,347 | 50,279 | 4.037566 | 0.087519 | 0.072141 | 0.06041 | 0.046117 | 0.560646 | 0.497573 | 0.421015 | 0.371932 | 0.311152 | 0.288498 | 0 | 0.079882 | 0.251318 | 50,279 | 976 | 1,076 | 51.515369 | 0.708153 | 0.015812 | 0 | 0.335267 | 0 | 0.00464 | 0.112747 | 0.030017 | 0 | 0 | 0.003387 | 0 | 0 | 1 | 0.053364 | false | 0.00116 | 0.024362 | 0.00348 | 0.11949 | 0.12065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d6f6e6ed3bbf01cb5af3d5c038344399c98f74f | 384 | py | Python | study/migrations/0003_auto_20200224_2316.py | hpathipati/Quick-Tutor | 17476d79b87f51b12a6c8fc435d1a6506bff1e04 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | study/migrations/0003_auto_20200224_2316.py | hpathipati/Quick-Tutor | 17476d79b87f51b12a6c8fc435d1a6506bff1e04 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | study/migrations/0003_auto_20200224_2316.py | hpathipati/Quick-Tutor | 17476d79b87f51b12a6c8fc435d1a6506bff1e04 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | # Generated by Django 3.0.2 on 2020-02-24 23:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('study', '0002_student'),
]
operations = [
migrations.AlterField(
model_name='student',
name='bio',
field=models.CharField(blank=True, max_length=200),
),
]
| 20.210526 | 63 | 0.588542 | 42 | 384 | 5.309524 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.080882 | 0.291667 | 384 | 18 | 64 | 21.333333 | 0.738971 | 0.117188 | 0 | 0 | 1 | 0 | 0.080119 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5d730d1afb5f1402b6e9a016eacea8ab0f918612 | 858 | py | Python | umbra/monitor/main.py | RafaelAPB/umbra | cf075bbe73e46540e9edee25f9ec3d0828620d5f | [
"Apache-2.0"
] | null | null | null | umbra/monitor/main.py | RafaelAPB/umbra | cf075bbe73e46540e9edee25f9ec3d0828620d5f | [
"Apache-2.0"
] | null | null | null | umbra/monitor/main.py | RafaelAPB/umbra | cf075bbe73e46540e9edee25f9ec3d0828620d5f | [
"Apache-2.0"
] | null | null | null | import logging
import json
import asyncio
from google.protobuf import json_format
from umbra.common.protobuf.umbra_grpc import MonitorBase
from umbra.common.protobuf.umbra_pb2 import Instruction, Snapshot
from umbra.monitor.tools import Tools
logger = logging.getLogger(__name__)
logging.getLogger("hpack").setLevel(logging.WARNING)
class Monitor(MonitorBase):
def __init__(self, info):
self.tools = Tools()
async def Listen(self, stream):
logging.debug("Instruction Received")
instruction: Instruction = await stream.recv_message()
instruction_dict = json_format.MessageToDict(instruction, preserving_proto_field_name=True)
snapshot_dict = await self.tools.handle(instruction_dict)
snapshot = json_format.ParseDict(snapshot_dict, Snapshot())
await stream.send_message(snapshot)
| 31.777778 | 99 | 0.757576 | 101 | 858 | 6.217822 | 0.445545 | 0.047771 | 0.047771 | 0.073248 | 0.089172 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001391 | 0.162005 | 858 | 26 | 100 | 33 | 0.872045 | 0 | 0 | 0 | 0 | 0 | 0.029138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.368421 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
537138998ce86bd69153421493a543bbc8be7c36 | 723 | py | Python | hemp/internal/utils.py | Addvilz/hemp | 2cd1d437fc59a8f7b24f5d150c623bf75c3b6747 | [
"Apache-2.0"
] | 1 | 2020-08-13T22:28:28.000Z | 2020-08-13T22:28:28.000Z | hemp/internal/utils.py | Addvilz/hemp | 2cd1d437fc59a8f7b24f5d150c623bf75c3b6747 | [
"Apache-2.0"
] | null | null | null | hemp/internal/utils.py | Addvilz/hemp | 2cd1d437fc59a8f7b24f5d150c623bf75c3b6747 | [
"Apache-2.0"
] | null | null | null | import sys
from fabric.utils import error, puts
from git import RemoteProgress
def print_err(message, func=None, exception=None, stdout=None, stderr=None):
error('[Hemp] ' + message, func, exception, stdout, stderr)
def print_info(text, show_prefix=None, end="\n", flush=True):
puts('[Hemp] ' + text, show_prefix, end, flush)
def print_git_output(stdout):
for line in stdout.split('\n'):
sys.stdout.write('[GIT] ' + line + '\n')
sys.stdout.flush()
class SimpleProgressPrinter(RemoteProgress):
def _parse_progress_line(self, line):
if '\r' in line:
line = line.replace('\r', '\r[GIT] ')
sys.stdout.write('[GIT] ' + line + '\n')
sys.stdout.flush()
| 26.777778 | 76 | 0.637621 | 97 | 723 | 4.659794 | 0.42268 | 0.079646 | 0.066372 | 0.075221 | 0.159292 | 0.159292 | 0.159292 | 0.159292 | 0.159292 | 0 | 0 | 0 | 0.204703 | 723 | 26 | 77 | 27.807692 | 0.786087 | 0 | 0 | 0.235294 | 0 | 0 | 0.063624 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.235294 | false | 0 | 0.176471 | 0 | 0.470588 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53726406b1ce515956afb2308d74b2a4c7e1b255 | 4,227 | py | Python | tests/arch/x86/test_x86parser.py | IMULMUL/barf-project | 9547ef843b8eb021c2c32c140e36173c0b4eafa3 | [
"BSD-2-Clause"
] | 1,395 | 2015-01-02T11:43:30.000Z | 2022-03-30T01:15:26.000Z | tests/arch/x86/test_x86parser.py | IMULMUL/barf-project | 9547ef843b8eb021c2c32c140e36173c0b4eafa3 | [
"BSD-2-Clause"
] | 54 | 2015-02-11T05:18:05.000Z | 2021-12-10T08:45:39.000Z | tests/arch/x86/test_x86parser.py | IMULMUL/barf-project | 9547ef843b8eb021c2c32c140e36173c0b4eafa3 | [
"BSD-2-Clause"
] | 207 | 2015-01-05T09:47:54.000Z | 2022-03-30T01:15:29.000Z | # Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import unittest
from barf.arch import ARCH_X86_MODE_32
from barf.arch import ARCH_X86_MODE_64
from barf.arch.x86.parser import X86Parser
class X86Parser32BitsTests(unittest.TestCase):
def setUp(self):
self._parser = X86Parser(ARCH_X86_MODE_32)
def test_two_oprnd_reg_reg(self):
asm = self._parser.parse("add eax, ebx")
self.assertEqual(str(asm), "add eax, ebx")
def test_two_oprnd_reg_imm(self):
asm = self._parser.parse("add eax, 0x12345678")
self.assertEqual(str(asm), "add eax, 0x12345678")
def test_two_oprnd_reg_mem(self):
asm = self._parser.parse("add eax, [ebx + edx * 4 + 0x10]")
self.assertEqual(str(asm), "add eax, [ebx+edx*4+0x10]")
def test_two_oprnd_mem_reg(self):
asm = self._parser.parse("add [ebx + edx * 4 + 0x10], eax")
self.assertEqual(str(asm), "add [ebx+edx*4+0x10], eax")
def test_one_oprnd_reg(self):
asm = self._parser.parse("inc eax")
self.assertEqual(str(asm), "inc eax")
def test_one_oprnd_imm(self):
asm = self._parser.parse("jmp 0x12345678")
self.assertEqual(str(asm), "jmp 0x12345678")
def test_one_oprnd_mem(self):
asm = self._parser.parse("inc dword ptr [ebx+edx*4+0x10]")
self.assertEqual(str(asm), "inc dword ptr [ebx+edx*4+0x10]")
def test_zero_oprnd(self):
asm = self._parser.parse("nop")
self.assertEqual(str(asm), "nop")
# Misc
# ======================================================================== #
def test_misc_1(self):
asm = self._parser.parse("mov dword ptr [-0x21524111], ecx")
self.assertEqual(str(asm), "mov dword ptr [-0x21524111], ecx")
self.assertNotEqual(str(asm), "mov dword ptr [0xdeadbeef], ecx")
def test_misc_2(self):
asm = self._parser.parse("fucompi st(1)")
self.assertEqual(str(asm), "fucompi st1")
class X86Parser64BitsTests(unittest.TestCase):
def setUp(self):
self._parser = X86Parser(ARCH_X86_MODE_64)
def test_64_two_oprnd_reg_reg(self):
asm = self._parser.parse("add rax, rbx")
self.assertEqual(str(asm), "add rax, rbx")
def test_64_two_oprnd_reg_reg_2(self):
asm = self._parser.parse("add rax, r8")
self.assertEqual(str(asm), "add rax, r8")
def test_64_two_oprnd_reg_mem(self):
asm = self._parser.parse("add rax, [rbx + r15 * 4 + 0x10]")
self.assertEqual(str(asm), "add rax, [rbx+r15*4+0x10]")
# Misc
# ======================================================================== #
def test_misc_offset_1(self):
asm = self._parser.parse("add byte ptr [rax+0xffffff89], cl")
self.assertEqual(str(asm), "add byte ptr [rax+0xffffff89], cl")
def main():
unittest.main()
if __name__ == '__main__':
main()
| 33.283465 | 80 | 0.666903 | 588 | 4,227 | 4.639456 | 0.295918 | 0.058651 | 0.056452 | 0.087243 | 0.533724 | 0.435484 | 0.294721 | 0.180718 | 0.14956 | 0.14956 | 0 | 0.043772 | 0.194701 | 4,227 | 126 | 81 | 33.547619 | 0.757638 | 0.345162 | 0 | 0.034483 | 0 | 0 | 0.210507 | 0 | 0 | 0 | 0.044509 | 0 | 0.258621 | 1 | 0.293103 | false | 0 | 0.086207 | 0 | 0.413793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
537ea975bc8b1468e691c88bd35a36f7347e9442 | 1,895 | py | Python | set-config.py | astubenazy/vrops-metric-collection | c4e5b8d7058759aa5eded74cc619d1dedcbc821a | [
"MIT"
] | 2 | 2020-04-08T13:03:00.000Z | 2020-08-25T18:21:27.000Z | set-config.py | astubenazy/vrops-metric-collection | c4e5b8d7058759aa5eded74cc619d1dedcbc821a | [
"MIT"
] | 1 | 2019-08-15T11:19:18.000Z | 2019-08-17T11:38:48.000Z | set-config.py | astubenazy/vrops-metric-collection | c4e5b8d7058759aa5eded74cc619d1dedcbc821a | [
"MIT"
] | 7 | 2018-06-06T13:47:52.000Z | 2021-06-17T18:33:27.000Z | # !/usr/bin python
"""
#
# set-config - a small python program to setup the configuration environment for data-collect.py
# data-collect.py contain the python program to gather Metrics from vROps
# Author Sajal Debnath <sdebnath@vmware.com>
#
"""
# Importing the required modules
import json
import base64
import os,sys
# Getting the absolute path from where the script is being run
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
def get_the_inputs():
adapterkind = raw_input("Please enter Adapter Kind: ")
resourceKind = raw_input("Please enter Resource Kind: ")
servername = raw_input("Enter enter Server IP/FQDN: ")
serveruid = raw_input("Please enter user id: ")
serverpasswd = raw_input("Please enter vRops password: ")
encryptedvar = base64.b64encode(serverpasswd)
maxsamples = raw_input("Please enter the maximum number of samples to collect: ")
keys_to_monitor = raw_input("Please enter the number of keys to monitor: ")
keys = []
for i in range(int(keys_to_monitor)):
keys.append(raw_input("Enter the key: "))
data = {}
if int(maxsamples) < 1:
maxsamples = 1
data["adapterKind"] = adapterkind
data["resourceKind"] = resourceKind
data["sampleno"] = int(maxsamples)
serverdetails = {}
serverdetails["name"] = servername
serverdetails["userid"] = serveruid
serverdetails["password"] = encryptedvar
data["server"] = serverdetails
data["keys"] = keys
return data
# Getting the path where config.json file should be kept
path = get_script_path()
fullpath = path+"/"+"config.json"
# Getting the data for the config.json file
final_data = get_the_inputs()
# Saving the data to config.json file
with open(fullpath, 'w') as outfile:
json.dump(final_data, outfile, sort_keys = True, indent = 2, separators=(',', ':'), ensure_ascii=False) | 29.153846 | 107 | 0.701847 | 250 | 1,895 | 5.224 | 0.448 | 0.049005 | 0.064319 | 0.087289 | 0.033691 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006498 | 0.187863 | 1,895 | 65 | 107 | 29.153846 | 0.842105 | 0.244327 | 0 | 0 | 0 | 0 | 0.226761 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0.085714 | 0.085714 | 0.028571 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
53840797fa9f83c58be0cb1122c4f31c4c62dc94 | 4,841 | py | Python | unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py | mueller/mysql-shell | 29bafc5692bd536a12c4e41c54cb587375fe52cf | [
"Apache-2.0"
] | 119 | 2016-04-14T14:16:22.000Z | 2022-03-08T20:24:38.000Z | unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py | mueller/mysql-shell | 29bafc5692bd536a12c4e41c54cb587375fe52cf | [
"Apache-2.0"
] | 9 | 2017-04-26T20:48:42.000Z | 2021-09-07T01:52:44.000Z | unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py | mueller/mysql-shell | 29bafc5692bd536a12c4e41c54cb587375fe52cf | [
"Apache-2.0"
] | 51 | 2016-07-20T05:06:48.000Z | 2022-03-09T01:20:53.000Z | # Assumptions: validate_crud_functions available
# Assumes __uripwd is defined as <user>:<pwd>@<host>:<plugin_port>
from __future__ import print_function
from mysqlsh import mysqlx
mySession = mysqlx.get_session(__uripwd)
ensure_schema_does_not_exist(mySession, 'js_shell_test')
schema = mySession.create_schema('js_shell_test')
# Creates a test collection and inserts data into it
collection = schema.create_collection('collection1')
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA01", "name": 'jack', "age": 17, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA02", "name": 'adam', "age": 15, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA03", "name": 'brian', "age": 14, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA04", "name": 'alma', "age": 13, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA05", "name": 'carol', "age": 14, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA06", "name": 'donna', "age": 16, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA07", "name": 'angel', "age": 14, "gender": 'male'}).execute()
# ------------------------------------------------
# collection.remove Unit Testing: Dynamic Behavior
# ------------------------------------------------
#@ CollectionRemove: valid operations after remove
crud = collection.remove('some_condition')
validate_crud_functions(crud, ['sort', 'limit', 'bind', 'execute'])
#@ CollectionRemove: valid operations after sort
crud = crud.sort(['name'])
validate_crud_functions(crud, ['limit', 'bind', 'execute'])
#@ CollectionRemove: valid operations after limit
crud = crud.limit(1)
validate_crud_functions(crud, ['bind', 'execute'])
#@ CollectionRemove: valid operations after bind
crud = collection.remove('name = :data').bind('data', 'donna')
validate_crud_functions(crud, ['bind', 'execute'])
#@ CollectionRemove: valid operations after execute
result = crud.execute()
validate_crud_functions(crud, ['limit', 'bind', 'execute'])
#@ Reusing CRUD with binding
print('Deleted donna:', result.affected_items_count, '\n')
result=crud.bind('data', 'alma').execute()
print('Deleted alma:', result.affected_items_count, '\n')
# ----------------------------------------------
# collection.remove Unit Testing: Error Conditions
# ----------------------------------------------
#@# CollectionRemove: Error conditions on remove
crud = collection.remove()
crud = collection.remove(' ')
crud = collection.remove(5)
crud = collection.remove('test = "2')
#@# CollectionRemove: Error conditions sort
crud = collection.remove('some_condition').sort()
crud = collection.remove('some_condition').sort(5)
crud = collection.remove('some_condition').sort([])
crud = collection.remove('some_condition').sort(['name', 5])
crud = collection.remove('some_condition').sort('name', 5)
#@# CollectionRemove: Error conditions on limit
crud = collection.remove('some_condition').limit()
crud = collection.remove('some_condition').limit('')
#@# CollectionRemove: Error conditions on bind
crud = collection.remove('name = :data and age > :years').bind()
crud = collection.remove('name = :data and age > :years').bind(5, 5)
crud = collection.remove('name = :data and age > :years').bind('another', 5)
#@# CollectionRemove: Error conditions on execute
crud = collection.remove('name = :data and age > :years').execute()
crud = collection.remove('name = :data and age > :years').bind('years', 5).execute()
# ---------------------------------------
# collection.remove Unit Testing: Execution
# ---------------------------------------
#@ CollectionRemove: remove under condition
//! [CollectionRemove: remove under condition]
result = collection.remove('age = 15').execute()
print('Affected Rows:', result.affected_items_count, '\n')
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
//! [CollectionRemove: remove under condition]
#@ CollectionRemove: remove with binding
//! [CollectionRemove: remove with binding]
result = collection.remove('gender = :heorshe').limit(2).bind('heorshe', 'male').execute()
print('Affected Rows:', result.affected_items_count, '\n')
//! [CollectionRemove: remove with binding]
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
#@ CollectionRemove: full remove
//! [CollectionRemove: full remove]
result = collection.remove('1').execute()
print('Affected Rows:', result.affected_items_count, '\n')
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
//! [CollectionRemove: full remove]
# Cleanup
mySession.drop_schema('js_shell_test')
mySession.close()
| 41.732759 | 126 | 0.685602 | 531 | 4,841 | 6.129944 | 0.218456 | 0.117972 | 0.110599 | 0.058986 | 0.577573 | 0.504455 | 0.463902 | 0.293088 | 0.278648 | 0.220891 | 0 | 0.040155 | 0.094609 | 4,841 | 115 | 127 | 42.095652 | 0.702487 | 0.244784 | 0 | 0.306452 | 0 | 0 | 0.278775 | 0.061827 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.032258 | null | null | 0.145161 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
538b05195aa3c62cda3499af221928cc57bfb7bb | 1,423 | py | Python | alipay/aop/api/domain/KbAdvertSettleBillResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/KbAdvertSettleBillResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/KbAdvertSettleBillResponse.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbAdvertSettleBillResponse(object):
def __init__(self):
self._download_url = None
self._paid_date = None
@property
def download_url(self):
return self._download_url
@download_url.setter
def download_url(self, value):
self._download_url = value
@property
def paid_date(self):
return self._paid_date
@paid_date.setter
def paid_date(self, value):
self._paid_date = value
def to_alipay_dict(self):
params = dict()
if self.download_url:
if hasattr(self.download_url, 'to_alipay_dict'):
params['download_url'] = self.download_url.to_alipay_dict()
else:
params['download_url'] = self.download_url
if self.paid_date:
if hasattr(self.paid_date, 'to_alipay_dict'):
params['paid_date'] = self.paid_date.to_alipay_dict()
else:
params['paid_date'] = self.paid_date
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbAdvertSettleBillResponse()
if 'download_url' in d:
o.download_url = d['download_url']
if 'paid_date' in d:
o.paid_date = d['paid_date']
return o
| 25.410714 | 75 | 0.599438 | 174 | 1,423 | 4.603448 | 0.235632 | 0.205993 | 0.131086 | 0.044944 | 0.248439 | 0.238452 | 0 | 0 | 0 | 0 | 0 | 0.00101 | 0.304287 | 1,423 | 55 | 76 | 25.872727 | 0.808081 | 0.029515 | 0 | 0.097561 | 0 | 0 | 0.081336 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170732 | false | 0 | 0.04878 | 0.04878 | 0.365854 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
538bf59cdb6e50d49c8fe6d1f6a72767b79df904 | 3,333 | py | Python | textvis/textprizm/models.py | scclab/textvisdrg-prototype | e912e4441b0e42e0f6c477edd03227b93b8ace73 | [
"MIT"
] | null | null | null | textvis/textprizm/models.py | scclab/textvisdrg-prototype | e912e4441b0e42e0f6c477edd03227b93b8ace73 | [
"MIT"
] | null | null | null | textvis/textprizm/models.py | scclab/textvisdrg-prototype | e912e4441b0e42e0f6c477edd03227b93b8ace73 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Schema(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
class Code(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
active_instances = models.PositiveIntegerField(default=0)
schema = models.ForeignKey(Schema, related_name="codes")
code_type = models.IntegerField(default=0)
def __unicode__(self):
if self.description:
return "%s/%s (%d): %s" % (self.schema_id, self.name, self.id, self.description)
else:
return "%s/%s (%d)" % (self.schema_id, self.name, self.id)
class DataSet(models.Model):
name = models.CharField(max_length=100)
created = models.DateTimeField()
class Session(models.Model):
set = models.ForeignKey(DataSet)
started = models.DateTimeField()
ended = models.DateTimeField()
def __unicode__(self):
return "%d (%s - %s)" % (self.id, str(self.started), str(self.ended))
class Participant(models.Model):
name = models.CharField(max_length=100)
description = models.TextField()
def __unicode__(self):
return self.name
class Message(models.Model):
session = models.ForeignKey(Session)
idx = models.IntegerField()
time = models.DateTimeField()
type = models.IntegerField()
participant = models.ForeignKey(Participant, related_name='messages')
message = models.TextField()
codes = models.ManyToManyField(Code, through='CodeInstance')
@classmethod
def get_between(cls, start, end):
"""
Get messages that are inclusively between the two messages, or two dates.
Takes into account the exact ordering of messages,
meaning that you won't get messages at the same time but after the last message, for example.
"""
if isinstance(start, Message):
after_first = ~models.Q(session=start.session) | models.Q(idx__gte=start.idx)
after_first = models.Q(time__gte=start.time) & after_first
else:
after_first = models.Q(time__gte=start)
if isinstance(end, Message):
before_last = ~models.Q(session=end.session) | models.Q(idx__lte=end.idx)
before_last = models.Q(time__lte=end.time) & before_last
else:
before_last = models.Q(time__lte=end)
return cls.objects.filter(after_first, before_last)
@property
def text(self):
return self.message
@property
def user_name(self):
return self.participant.name
@property
def created_at(self):
return self.time
class User(models.Model):
name = models.CharField(max_length=100)
full_name = models.CharField(max_length=250)
email = models.CharField(max_length=250)
def __unicode__(self):
return self.name
class AbstractCodeInstance(models.Model):
class Meta:
abstract = True
code = models.ForeignKey(Code)
message = models.ForeignKey(Message)
added = models.DateTimeField()
class CodeInstance(AbstractCodeInstance):
user = models.ForeignKey(User)
task_id = models.PositiveIntegerField()
intensity = models.FloatField()
flag = models.IntegerField()
| 28.245763 | 101 | 0.659166 | 392 | 3,333 | 5.461735 | 0.27551 | 0.041102 | 0.058851 | 0.078468 | 0.256889 | 0.229799 | 0.229799 | 0.122373 | 0.063522 | 0.063522 | 0 | 0.009005 | 0.233723 | 3,333 | 117 | 102 | 28.487179 | 0.829287 | 0.073507 | 0 | 0.263158 | 0 | 0 | 0.020013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.013158 | 0.078947 | 0.776316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
538d3918006c09254385e7ece91e4c11554aa399 | 462 | py | Python | django_project/user_profile/migrations/0003_order_payment_method.py | aliyaandabekova/DJANGO_PROJECT | 7b94f80fa56acf936da014aa5d91da79457bf4eb | [
"MIT"
] | null | null | null | django_project/user_profile/migrations/0003_order_payment_method.py | aliyaandabekova/DJANGO_PROJECT | 7b94f80fa56acf936da014aa5d91da79457bf4eb | [
"MIT"
] | null | null | null | django_project/user_profile/migrations/0003_order_payment_method.py | aliyaandabekova/DJANGO_PROJECT | 7b94f80fa56acf936da014aa5d91da79457bf4eb | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-27 13:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0002_auto_20210526_1747'),
]
operations = [
migrations.AddField(
model_name='order',
name='payment_method',
field=models.CharField(choices=[('cash', 'cash'), ('wallet', 'wallet')], default='cash', max_length=10),
),
]
| 24.315789 | 116 | 0.603896 | 51 | 462 | 5.333333 | 0.803922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095376 | 0.251082 | 462 | 18 | 117 | 25.666667 | 0.690751 | 0.097403 | 0 | 0 | 1 | 0 | 0.187952 | 0.055422 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5391eb5d4685629e3d8228f4e55d8a98857010ab | 7,787 | py | Python | django_loci/tests/base/test_admin.py | yashikajotwani12/django-loci | 2c0bcb33f4a56d559f798e37fd17b2143b912ce4 | [
"BSD-3-Clause"
] | 205 | 2017-11-17T10:35:02.000Z | 2022-03-29T18:50:32.000Z | django_loci/tests/base/test_admin.py | yashikajotwani12/django-loci | 2c0bcb33f4a56d559f798e37fd17b2143b912ce4 | [
"BSD-3-Clause"
] | 98 | 2017-11-20T16:03:27.000Z | 2022-01-19T21:12:47.000Z | django_loci/tests/base/test_admin.py | yashikajotwani12/django-loci | 2c0bcb33f4a56d559f798e37fd17b2143b912ce4 | [
"BSD-3-Clause"
] | 46 | 2017-11-20T23:25:26.000Z | 2022-02-10T05:06:16.000Z | import json
import os
import responses
from django.urls import reverse
from .. import TestAdminMixin, TestLociMixin
class BaseTestAdmin(TestAdminMixin, TestLociMixin):
geocode_url = 'https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/'
def test_location_list(self):
self._login_as_admin()
self._create_location(name='test-admin-location-1')
url = reverse('{0}_location_changelist'.format(self.url_prefix))
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_floorplan_list(self):
self._login_as_admin()
self._create_floorplan()
self._create_location()
url = reverse('{0}_floorplan_changelist'.format(self.url_prefix))
r = self.client.get(url)
self.assertContains(r, '1st floor')
def test_location_json_view(self):
self._login_as_admin()
loc = self._create_location()
r = self.client.get(reverse('admin:django_loci_location_json', args=[loc.pk]))
expected = {
'name': loc.name,
'address': loc.address,
'type': loc.type,
'is_mobile': loc.is_mobile,
'geometry': json.loads(loc.geometry.json),
}
self.assertDictEqual(r.json(), expected)
def test_location_floorplan_json_view(self):
self._login_as_admin()
fl = self._create_floorplan()
r = self.client.get(
reverse('admin:django_loci_location_floorplans_json', args=[fl.location.pk])
)
expected = {
'choices': [
{
'id': str(fl.pk),
'str': str(fl),
'floor': fl.floor,
'image': fl.image.url,
'image_width': fl.image.width,
'image_height': fl.image.height,
}
]
}
self.assertDictEqual(r.json(), expected)
def test_location_change_image_removed(self):
self._login_as_admin()
loc = self._create_location(name='test-admin-location-1', type='indoor')
fl = self._create_floorplan(location=loc)
# remove floorplan image
os.remove(fl.image.path)
url = reverse('{0}_location_change'.format(self.url_prefix), args=[loc.pk])
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_floorplan_change_image_removed(self):
self._login_as_admin()
loc = self._create_location(name='test-admin-location-1', type='indoor')
fl = self._create_floorplan(location=loc)
# remove floorplan image
os.remove(fl.image.path)
url = reverse('{0}_floorplan_change'.format(self.url_prefix), args=[fl.pk])
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_is_mobile_location_json_view(self):
self._login_as_admin()
loc = self._create_location(is_mobile=True, geometry=None)
response = self.client.get(
reverse('admin:django_loci_location_json', args=[loc.pk])
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content['geometry'], None)
loc1 = self._create_location(
name='location2', address='loc2 add', type='outdoor'
)
response1 = self.client.get(
reverse('admin:django_loci_location_json', args=[loc1.pk])
)
self.assertEqual(response1.status_code, 200)
content1 = json.loads(response1.content)
expected = {
'name': 'location2',
'address': 'loc2 add',
'type': 'outdoor',
'is_mobile': False,
'geometry': {'type': 'Point', 'coordinates': [12.512124, 41.898903]},
}
self.assertEqual(content1, expected)
@responses.activate
def test_geocode(self):
self._login_as_admin()
address = 'Red Square'
url = '{0}?address={1}'.format(
reverse('admin:django_loci_location_geocode_api'), address
)
# Mock HTTP request to the URL to work offline
responses.add(
responses.GET,
f'{self.geocode_url}findAddressCandidates?singleLine=Red+Square&f=json&maxLocations=1',
body=self._load_content('base/static/test-geocode.json'),
content_type='application/json',
)
response = self.client.get(url)
response_lat = round(response.json()['lat'])
response_lng = round(response.json()['lng'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response_lat, 56)
self.assertEqual(response_lng, 38)
def test_geocode_no_address(self):
self._login_as_admin()
url = reverse('admin:django_loci_location_geocode_api')
response = self.client.get(url)
expected = {'error': 'Address parameter not defined'}
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), expected)
@responses.activate
def test_geocode_invalid_address(self):
self._login_as_admin()
invalid_address = 'thisaddressisnotvalid123abc'
url = '{0}?address={1}'.format(
reverse('admin:django_loci_location_geocode_api'), invalid_address
)
responses.add(
responses.GET,
f'{self.geocode_url}findAddressCandidates?singleLine=thisaddressisnotvalid123abc'
'&f=json&maxLocations=1',
body=self._load_content('base/static/test-geocode-invalid-address.json'),
content_type='application/json',
)
response = self.client.get(url)
expected = {'error': 'Not found location with given name'}
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json(), expected)
@responses.activate
def test_reverse_geocode(self):
self._login_as_admin()
lat = 52
lng = 21
url = '{0}?lat={1}&lng={2}'.format(
reverse('admin:django_loci_location_reverse_geocode_api'), lat, lng
)
# Mock HTTP request to the URL to work offline
responses.add(
responses.GET,
f'{self.geocode_url}reverseGeocode?location=21.0%2C52.0&f=json&outSR=4326',
body=self._load_content('base/static/test-reverse-geocode.json'),
content_type='application/json',
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'POL')
@responses.activate
def test_reverse_location_with_no_address(self):
self._login_as_admin()
lat = -30
lng = -30
url = '{0}?lat={1}&lng={2}'.format(
reverse('admin:django_loci_location_reverse_geocode_api'), lat, lng
)
responses.add(
responses.GET,
f'{self.geocode_url}reverseGeocode?location=-30.0%2C-30.0&f=json&outSR=4326',
body=self._load_content(
'base/static/test-reverse-location-with-no-address.json'
),
content_type='application/json',
)
response = self.client.get(url)
response_address = response.json()['address']
self.assertEqual(response.status_code, 404)
self.assertEqual(response_address, '')
def test_reverse_geocode_no_coords(self):
self._login_as_admin()
url = reverse('admin:django_loci_location_reverse_geocode_api')
response = self.client.get(url)
expected = {'error': 'lat or lng parameter not defined'}
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), expected)
| 38.549505 | 99 | 0.617054 | 887 | 7,787 | 5.198422 | 0.157835 | 0.052049 | 0.039471 | 0.04229 | 0.718716 | 0.712861 | 0.657124 | 0.606159 | 0.563002 | 0.494253 | 0 | 0.019805 | 0.260819 | 7,787 | 201 | 100 | 38.741294 | 0.781272 | 0.017337 | 0 | 0.383333 | 0 | 0.011111 | 0.217079 | 0.141101 | 0 | 0 | 0 | 0 | 0.127778 | 1 | 0.072222 | false | 0 | 0.027778 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5395cbb4a78f713d4a2814a8d200c21fd6a061c3 | 485 | py | Python | core/urls.py | donnellan0007/blog | 02c8850688422e3b685ffac10c32bf3e7a7c2e7a | [
"MIT"
] | null | null | null | core/urls.py | donnellan0007/blog | 02c8850688422e3b685ffac10c32bf3e7a7c2e7a | [
"MIT"
] | null | null | null | core/urls.py | donnellan0007/blog | 02c8850688422e3b685ffac10c32bf3e7a7c2e7a | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path
from .views import index, email, post_detail, posts, hot_takes, take_detail
from . import views
app_name = "core"
urlpatterns = [
path('',views.index,name="index"),
path('email/',views.email,name="email"),
path('post/<slug>/',views.post_detail,name='post'),
path('posts/',views.posts,name='posts'),
path('takes/',views.hot_takes,name='takes'),
path('take/<slug>/',views.take_detail,name='take'),
] | 32.333333 | 75 | 0.68866 | 69 | 485 | 4.73913 | 0.304348 | 0.061162 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.121649 | 485 | 15 | 76 | 32.333333 | 0.767606 | 0 | 0 | 0 | 0 | 0 | 0.152263 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.307692 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5398b81471428ab8f27e820b3cfc198272b782d9 | 1,573 | py | Python | utils/dbconn.py | iamvishnuks/Xmigrate | f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7 | [
"MIT"
] | 4 | 2020-05-26T11:19:02.000Z | 2020-08-06T11:12:34.000Z | utils/dbconn.py | iamvishnuks/Xmigrate | f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7 | [
"MIT"
] | 46 | 2022-02-19T09:11:11.000Z | 2022-03-31T15:42:50.000Z | utils/dbconn.py | iamvishnuks/Xmigrate | f8405c72a2ee4203b0fc5ddb55c0a1d9f8d8a7c7 | [
"MIT"
] | 2 | 2019-12-20T12:30:33.000Z | 2020-01-02T22:01:25.000Z | from mongoengine import *
from dotenv import load_dotenv
from os import getenv
from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table
from cassandra.query import ordered_dict_factory
from model.discover import *
from model.blueprint import *
from model.disk import *
from model.storage import *
from model.project import *
from model.network import *
from model.user import *
load_dotenv()
cass_db = getenv("CASS_DB")
cass_password = getenv("CASS_PASSWORD")
cass_user = getenv("CASS_USER")
def create_db_con():
auth_provider = PlainTextAuthProvider(username=cass_user, password=cass_password)
cluster = Cluster([cass_db],auth_provider=auth_provider)
session = cluster.connect()
session.execute("""
CREATE KEYSPACE IF NOT EXISTS migration
WITH replication = { 'class': 'SimpleStrategy', 'replication_factor': '2' }
""")
session.set_keyspace('migration')
session.row_factory = ordered_dict_factory
connection.setup([cass_db], "migration",protocol_version=3,auth_provider=auth_provider)
sync_table(BluePrint)
sync_table(Discover)
sync_table(Project)
sync_table(Network)
sync_table(Subnet)
sync_table(Storage)
sync_table(Bucket)
sync_table(GcpBucket)
sync_table(User)
sync_table(Disk)
session.execute("CREATE INDEX IF NOT EXISTS ON blue_print (network);")
session.execute("CREATE INDEX IF NOT EXISTS ON blue_print (subnet);")
return session
| 33.468085 | 91 | 0.760966 | 201 | 1,573 | 5.756219 | 0.318408 | 0.085566 | 0.077787 | 0.041487 | 0.081245 | 0.081245 | 0.081245 | 0.081245 | 0.081245 | 0.081245 | 0 | 0.0015 | 0.152575 | 1,573 | 46 | 92 | 34.195652 | 0.866467 | 0 | 0 | 0 | 0 | 0 | 0.183725 | 0.01335 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0.046512 | 0.348837 | 0 | 0.395349 | 0.069767 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
539ea2a319db010bc0f4b82dc9bd72f7d9cbdfe7 | 175 | py | Python | scratchnet/scratchnet.py | Gr1m3y/scratchnet | 5fce471b6e12dc05b3a92fd8581445f7d598d1c3 | [
"MIT"
] | null | null | null | scratchnet/scratchnet.py | Gr1m3y/scratchnet | 5fce471b6e12dc05b3a92fd8581445f7d598d1c3 | [
"MIT"
] | null | null | null | scratchnet/scratchnet.py | Gr1m3y/scratchnet | 5fce471b6e12dc05b3a92fd8581445f7d598d1c3 | [
"MIT"
] | null | null | null | import numpy as np
import network
def main():
x = np.array([2, 3])
nw = network.NeuralNetwork()
print(nw.feedforward(x))
if __name__ == "__main__":
main()
| 13.461538 | 32 | 0.617143 | 24 | 175 | 4.166667 | 0.708333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014925 | 0.234286 | 175 | 12 | 33 | 14.583333 | 0.731343 | 0 | 0 | 0 | 0 | 0 | 0.045714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0.125 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
539f08b39f8bed483a13e19cdf11f4b9e2b776e6 | 1,850 | py | Python | code/run_policy.py | kirk86/ARS | a4ac03e06bce5f183f7b18ea74b81c6c45c4426b | [
"BSD-2-Clause"
] | null | null | null | code/run_policy.py | kirk86/ARS | a4ac03e06bce5f183f7b18ea74b81c6c45c4426b | [
"BSD-2-Clause"
] | null | null | null | code/run_policy.py | kirk86/ARS | a4ac03e06bce5f183f7b18ea74b81c6c45c4426b | [
"BSD-2-Clause"
] | 1 | 2019-03-27T14:11:16.000Z | 2019-03-27T14:11:16.000Z | """
Code to load a policy and generate rollout data. Adapted from https://github.com/berkeleydeeprlcourse.
Example usage:
python run_policy.py ../trained_policies/Humanoid-v1/policy_reward_11600/lin_policy_plus.npz Humanoid-v1 --render \
--num_rollouts 20
"""
import numpy as np
import gym
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('expert_policy_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument('--num_rollouts', type=int, default=20,
help='Number of expert rollouts')
args = parser.parse_args()
print('loading and building expert policy')
lin_policy = np.load(args.expert_policy_file)
lin_policy = lin_policy[lin_policy.files[0]]
M = lin_policy[0]
# mean and std of state vectors estimated online by ARS.
mean = lin_policy[1]
std = lin_policy[2]
env = gym.make(args.envname)
returns = []
observations = []
actions = []
for i in range(args.num_rollouts):
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = np.dot(M, (obs - mean)/std)
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if args.render:
env.render()
if steps % 100 == 0: print("%i/%i"%(steps, env.spec.timestep_limit))
if steps >= env.spec.timestep_limit:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
if __name__ == '__main__':
main()
| 28.90625 | 119 | 0.605946 | 234 | 1,850 | 4.632479 | 0.452991 | 0.066421 | 0.062731 | 0.02952 | 0.090406 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016381 | 0.274054 | 1,850 | 63 | 120 | 29.365079 | 0.790767 | 0.173514 | 0 | 0 | 1 | 0 | 0.108037 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.066667 | 0 | 0.088889 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
539f836eb4814996e6e8dcea4c9325a8edccf36d | 6,048 | py | Python | src/poliastro/plotting/tisserand.py | TreshUp/poliastro | 602eb3c39d315be6dc1edaa12d72ab0e361334f6 | [
"MIT"
] | null | null | null | src/poliastro/plotting/tisserand.py | TreshUp/poliastro | 602eb3c39d315be6dc1edaa12d72ab0e361334f6 | [
"MIT"
] | null | null | null | src/poliastro/plotting/tisserand.py | TreshUp/poliastro | 602eb3c39d315be6dc1edaa12d72ab0e361334f6 | [
"MIT"
] | null | null | null | """ Generates Tisserand plots """
from enum import Enum
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from poliastro.plotting._base import BODY_COLORS
from poliastro.twobody.mean_elements import get_mean_elements
from poliastro.util import norm
class TisserandKind(Enum):
"""All possible Tisserand kinds"""
APSIS = "apsis"
ENERGY = "energy"
PERIOD = "period"
class TisserandPlotter:
"""Generates Tisserand figures"""
def __init__(self, kind=TisserandKind.APSIS, axes=None):
"""Object initializer
Parameters
----------
kind : TisserandKind
Nature for the Tisserand
axes : ~matplotlib.pyplot.axes
Axes for the figure
"""
# Asign Tisserand kind
self.kind = kind
# Check if axis available
if not axes:
_, self.ax = plt.subplots(1, 1)
else:
self.ax = axes
# Force axes scale regarding Tisserand kind
self.ax.set_xscale("log")
if self.kind == TisserandKind.APSIS:
self.ax.set_yscale("log")
def _solve_tisserand(
self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100
):
"""Solves all possible Tisserand lines with a meshgrid workflow
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_array : ~astropy.units.Quantity
Desired Vinf for the flyby
num_contours : int
Number of contour lines for flyby speed
alpha_lim : tuple
Minimum and maximum flyby angles.
N : int
Number of points for flyby angle.
Notes
-----
The algorithm for generating Tisserand plots is the one depicted in
"Preliminary Trajectory Design of a Mission to Enceladus" by David
Falcato Fialho Palma, section 3.6
"""
# Generate mean orbital elements Earth
body_rv = get_mean_elements(body).to_vectors()
R_body, V_body = norm(body_rv.r), norm(body_rv.v)
# Generate non-dimensional velocity and alpha span
vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours)
alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N)
vinf_array /= V_body
# Construct the mesh for any configuration
V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array)
# Solving for non-dimensional a_sc and ecc_sc
A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA))
ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2)
# Compute main Tisserand variables
RR_P = A_SC * R_body * (1 - ECC_SC)
RR_A = A_SC * R_body * (1 + ECC_SC)
TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k)
EE = -body.parent.k / (2 * A_SC * R_body)
# Build color lines to internal canvas
return RR_P, RR_A, EE, TT
def _build_lines(self, RR_P, RR_A, EE, TT, color):
"""Collect lines and append them to internal data
Parameters
----------
data : list
Array containing [RR_P, RR_A, EE, TT, color]
Returns
-------
lines: list
Plotting lines for the Tisserand
"""
# Plot desired kind lines
if self.kind == TisserandKind.APSIS:
# Generate apsis lines
lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color)
elif self.kind == TisserandKind.ENERGY:
# Generate energy lines
lines = self.ax.plot(
RR_P.to(u.AU), EE.to(u.km ** 2 / u.s ** 2), color=color
)
elif self.kind == TisserandKind.PERIOD:
# Generate period lines
lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color)
return lines
def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None):
"""Plots body Tisserand line within flyby angle
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf : ~astropy.units.Quantity
Vinf velocity line
alpha_lim : tuple
Minimum and maximum flyby angles
color : str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# HACK: to reuse Tisserand solver, we transform input Vinf into a tuple
vinf_span = (vinf, vinf)
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(
body, vinf_span, num_contours=2, alpha_lim=alpha_lim
)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
def plot(self, body, vinf_span, num_contours=10, color=None):
"""Plots body Tisserand for given amount of solutions within Vinf span
Parameters
----------
body : ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_span : tuple
Minimum and maximum Vinf velocities
num_contours : int
Number of points to iterate over previously defined velocities
color : str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
| 30.24 | 81 | 0.586475 | 778 | 6,048 | 4.424165 | 0.253213 | 0.019175 | 0.010169 | 0.012202 | 0.395991 | 0.341371 | 0.296049 | 0.279198 | 0.255375 | 0.255375 | 0 | 0.008311 | 0.323578 | 6,048 | 199 | 82 | 30.39196 | 0.833048 | 0.41584 | 0 | 0.16129 | 1 | 0 | 0.007794 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.112903 | 0 | 0.33871 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53a46773e97ade0a733cbe735e77d4be70d5d02d | 3,927 | py | Python | openstack/tests/unit/block_storage/v2/test_proxy.py | infonova/openstacksdk | 3cf6730a71d8fb448f24af8a5b4e82f2af749cea | [
"Apache-2.0"
] | null | null | null | openstack/tests/unit/block_storage/v2/test_proxy.py | infonova/openstacksdk | 3cf6730a71d8fb448f24af8a5b4e82f2af749cea | [
"Apache-2.0"
] | null | null | null | openstack/tests/unit/block_storage/v2/test_proxy.py | infonova/openstacksdk | 3cf6730a71d8fb448f24af8a5b4e82f2af749cea | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.block_storage.v2 import _proxy
from openstack.block_storage.v2 import snapshot
from openstack.block_storage.v2 import stats
from openstack.block_storage.v2 import type
from openstack.block_storage.v2 import volume
from openstack.tests.unit import test_proxy_base
class TestVolumeProxy(test_proxy_base.TestProxyBase):
def setUp(self):
super(TestVolumeProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_snapshot_get(self):
self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot)
def test_snapshots_detailed(self):
self.verify_list(self.proxy.snapshots, snapshot.SnapshotDetail,
paginated=True,
method_kwargs={"details": True, "query": 1},
expected_kwargs={"query": 1})
def test_snapshots_not_detailed(self):
self.verify_list(self.proxy.snapshots, snapshot.Snapshot,
paginated=True,
method_kwargs={"details": False, "query": 1},
expected_kwargs={"query": 1})
def test_snapshot_create_attrs(self):
self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot)
def test_snapshot_delete(self):
self.verify_delete(self.proxy.delete_snapshot,
snapshot.Snapshot, False)
def test_snapshot_delete_ignore(self):
self.verify_delete(self.proxy.delete_snapshot,
snapshot.Snapshot, True)
def test_type_get(self):
self.verify_get(self.proxy.get_type, type.Type)
def test_types(self):
self.verify_list(self.proxy.types, type.Type, paginated=False)
def test_type_create_attrs(self):
self.verify_create(self.proxy.create_type, type.Type)
def test_type_delete(self):
self.verify_delete(self.proxy.delete_type, type.Type, False)
def test_type_delete_ignore(self):
self.verify_delete(self.proxy.delete_type, type.Type, True)
def test_volume_get(self):
self.verify_get(self.proxy.get_volume, volume.Volume)
def test_volumes_detailed(self):
self.verify_list(self.proxy.volumes, volume.VolumeDetail,
paginated=True,
method_kwargs={"details": True, "query": 1},
expected_kwargs={"query": 1})
def test_volumes_not_detailed(self):
self.verify_list(self.proxy.volumes, volume.Volume,
paginated=True,
method_kwargs={"details": False, "query": 1},
expected_kwargs={"query": 1})
def test_volume_create_attrs(self):
self.verify_create(self.proxy.create_volume, volume.Volume)
def test_volume_delete(self):
self.verify_delete(self.proxy.delete_volume, volume.Volume, False)
def test_volume_delete_ignore(self):
self.verify_delete(self.proxy.delete_volume, volume.Volume, True)
def test_volume_extend(self):
self._verify("openstack.block_storage.v2.volume.Volume.extend",
self.proxy.extend_volume,
method_args=["value", "new-size"],
expected_args=["new-size"])
def test_backend_pools(self):
self.verify_list(self.proxy.backend_pools, stats.Pools,
paginated=False)
| 39.27 | 75 | 0.663102 | 487 | 3,927 | 5.141684 | 0.227926 | 0.071885 | 0.10623 | 0.055112 | 0.582668 | 0.522764 | 0.435304 | 0.435304 | 0.38738 | 0.238019 | 0 | 0.006054 | 0.242934 | 3,927 | 99 | 76 | 39.666667 | 0.836192 | 0.132926 | 0 | 0.212121 | 0 | 0 | 0.040083 | 0.013852 | 0 | 0 | 0 | 0 | 0 | 1 | 0.30303 | false | 0 | 0.090909 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53a8f467665d04dfb54d9331579d408e1a611989 | 1,461 | py | Python | pythainlp/util/thai.py | korkeatw/pythainlp | 6fc7c3434d5e58c8e8e2bf13470445cbab0866bd | [
"Apache-2.0"
] | null | null | null | pythainlp/util/thai.py | korkeatw/pythainlp | 6fc7c3434d5e58c8e8e2bf13470445cbab0866bd | [
"Apache-2.0"
] | null | null | null | pythainlp/util/thai.py | korkeatw/pythainlp | 6fc7c3434d5e58c8e8e2bf13470445cbab0866bd | [
"Apache-2.0"
] | 1 | 2020-05-27T09:53:09.000Z | 2020-05-27T09:53:09.000Z | # -*- coding: utf-8 -*-
"""
Check if it is Thai text
"""
import string
_DEFAULT_IGNORE_CHARS = string.whitespace + string.digits + string.punctuation
def isthaichar(ch: str) -> bool:
"""
Check if a character is Thai
เป็นอักษรไทยหรือไม่
:param str ch: input character
:return: True or False
"""
ch_val = ord(ch)
if ch_val >= 3584 and ch_val <= 3711:
return True
return False
def isthai(word: str, ignore_chars: str = ".") -> bool:
"""
Check if all character is Thai
เป็นคำที่มีแต่อักษรไทยหรือไม่
:param str word: input text
:param str ignore_chars: characters to be ignored (i.e. will be considered as Thai)
:return: True or False
"""
if not ignore_chars:
ignore_chars = ""
for ch in word:
if ch not in ignore_chars and not isthaichar(ch):
return False
return True
def countthai(text: str, ignore_chars: str = _DEFAULT_IGNORE_CHARS) -> float:
"""
:param str text: input text
:return: float, proportion of characters in the text that is Thai character
"""
if not text or not isinstance(text, str):
return 0
if not ignore_chars:
ignore_chars = ""
num_thai = 0
num_ignore = 0
for ch in text:
if ch in ignore_chars:
num_ignore += 1
elif isthaichar(ch):
num_thai += 1
num_count = len(text) - num_ignore
return (num_thai / num_count) * 100
| 22.476923 | 87 | 0.612594 | 223 | 1,461 | 3.959641 | 0.363229 | 0.137033 | 0.047565 | 0.03171 | 0.151755 | 0.110985 | 0.04983 | 0 | 0 | 0 | 0 | 0.016409 | 0.290897 | 1,461 | 64 | 88 | 22.828125 | 0.822394 | 0.014374 | 0 | 0.285714 | 0 | 0 | 0.001083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.035714 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53a96c42fcec2518a3a26c0e6dece5934119cc53 | 1,941 | py | Python | Python/Filter.py | KilroyWasHere-cs-j/savitzky-golay | 2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f | [
"MIT"
] | null | null | null | Python/Filter.py | KilroyWasHere-cs-j/savitzky-golay | 2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f | [
"MIT"
] | null | null | null | Python/Filter.py | KilroyWasHere-cs-j/savitzky-golay | 2ce110d54e9ad7bc1e4a0c29fa80ad8303ec530f | [
"MIT"
] | null | null | null | import numpy as np
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import MadDog
x = []
y = []
def generate():
# Generate random data
base = np.linspace(0, 5, 11)
# base = np.random.randint(0, 10, 5)
outliers = np.random.randint(10, 20, 2)
data = np.concatenate((base, outliers))
np.random.shuffle(data)
return data
def fill_data():
# Build random data
return np.concatenate((np.array([0]), MadDog.find_outliers(generate()))), np.concatenate(
(np.array([0]), MadDog.find_outliers(generate()))) # np.sin(x) + np.cos(x) + np.random.random(100)
# np.linspace(0, 2*np.pi, 100)
def savitzky(x, y, ploy_nom):
return savgol_filter(x, len(x) - 1, 10), savgol_filter(y, len(y) - 1, 10)
def map(x_filtered, y_filtered, x, y, title="title"):
# Generate some test data
heatmap, xedges, yedges = np.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
heatmap, xedges, yedges = np.histogram2d(x_filtered, y_filtered, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.show()
def show(x_filtered, y_filtered, x, y, title="Lorem ipsum"):
# Plotting
fig = plt.figure()
ax = fig.subplots()
plt.plot(x_filtered, y_filtered, 'red', marker="o")
plt.plot(x, y, 'green', marker="o")
plt.subplots_adjust(bottom=0.25)
plt.xlabel('x')
plt.ylabel('y')
plt.title(title)
plt.legend(["Filter", "Raw"])
plt.show()
# Generating the noisy signal
x, y = fill_data()
print(len(y))
# Savitzky-Golay filter
x_filtered, y_filtered = savitzky(x, y, 2)
print("X unfiltered>> ", x)
print("Y unfiltered>> ", y)
print("X filtered>> ", x_filtered)
print("Y filtered>> ", y_filtered)
show(x_filtered, y_filtered, x, y)
| 26.589041 | 107 | 0.640907 | 296 | 1,941 | 4.125 | 0.297297 | 0.014742 | 0.097461 | 0.088452 | 0.348894 | 0.348894 | 0.29484 | 0.230958 | 0.230958 | 0.230958 | 0 | 0.029728 | 0.185471 | 1,941 | 72 | 108 | 26.958333 | 0.742568 | 0.119011 | 0 | 0.191489 | 1 | 0 | 0.060588 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.085106 | 0.042553 | 0.255319 | 0.106383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53aa536c76b41bd1afbf13c8b634be33ef9462e1 | 8,087 | py | Python | examples/adwords/v201406/advanced_operations/add_ad_customizer.py | dietrichc/streamline-ppc-reports | 256f79246aba3c2cf8f792d87a066391a2f471e0 | [
"Apache-2.0"
] | null | null | null | examples/adwords/v201406/advanced_operations/add_ad_customizer.py | dietrichc/streamline-ppc-reports | 256f79246aba3c2cf8f792d87a066391a2f471e0 | [
"Apache-2.0"
] | null | null | null | examples/adwords/v201406/advanced_operations/add_ad_customizer.py | dietrichc/streamline-ppc-reports | 256f79246aba3c2cf8f792d87a066391a2f471e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adds an ad customizer feed.
Associates the feed with customer and adds an ad that
uses the feed to populate dynamic data.
Tags: CustomerFeedService.mutate, FeedItemService.mutate
Tags: FeedMappingService.mutate, FeedService.mutate
Tags: AdGroupAdService.mutate
"""
__author__ = ('api.msaniscalchi@gmail.com (Mark Saniscalchi)',
'yufeng.dev@gmail.com (Yufeng Guo)')
# Import appropriate classes from the client library.
from googleads import adwords
# See the Placeholder reference page for a list of all the placeholder types
# and fields:
# https://developers.google.com/adwords/api/docs/appendix/placeholders
PLACEHOLDER_AD_CUSTOMIZER = '10'
PLACEHOLDER_FIELD_INTEGER = '1'
PLACEHOLDER_FIELD_FLOAT = '2'
PLACEHOLDER_FIELD_PRICE = '3'
PLACEHOLDER_FIELD_DATE = '4'
PLACEHOLDER_FIELD_STRING = '5'
ADGROUPS = [
'INSERT_ADGROUP_ID_HERE',
'INSERT_ADGROUP_ID_HERE'
]
FEEDNAME = 'INSERT_FEED_NAME_HERE'
def main(client, adgroups):
# Initialize appropriate services.
ad_group_ad_service = client.GetService('AdGroupAdService', version='v201406')
customer_feed_service = client.GetService(
'CustomerFeedService', version='v201406')
feed_item_service = client.GetService('FeedItemService', version='v201406')
feed_mapping_service = client.GetService(
'FeedMappingService', version='v201406')
feed_service = client.GetService('FeedService', version='v201406')
# First, create a customizer feed. One feed per account can be used for all
# ads.
customizer_feed = {
'name': FEEDNAME,
'attributes': [
{'type': 'STRING', 'name': 'Name'},
{'type': 'STRING', 'name': 'Price'},
{'type': 'DATE_TIME', 'name': 'Date'}
]
}
feed_service_operation = {
'operator': 'ADD',
'operand': customizer_feed
}
response = feed_service.mutate([feed_service_operation])
if response and 'value' in response:
feed = response['value'][0]
feed_data = {
'feedId': feed['id'],
'nameId': feed['attributes'][0]['id'],
'priceId': feed['attributes'][1]['id'],
'dateId': feed['attributes'][2]['id']
}
print ('Feed with name \'%s\' and ID %s was added with:'
'\tName attribute ID %s and price attribute ID %s and date attribute'
'ID %s') % (feed['name'], feed['id'], feed_data['nameId'],
feed_data['priceId'], feed_data['dateId'])
else:
raise Exception('No feeds were added')
# Creating feed mapping to map the fields with customizer IDs.
feed_mapping = {
'placeholderType': PLACEHOLDER_AD_CUSTOMIZER,
'feedId': feed_data['feedId'],
'attributeFieldMappings': [
{
'feedAttributeId': feed_data['nameId'],
'fieldId': PLACEHOLDER_FIELD_STRING
},
{
'feedAttributeId': feed_data['priceId'],
'fieldId': PLACEHOLDER_FIELD_PRICE
},
{
'feedAttributeId': feed_data['dateId'],
'fieldId': PLACEHOLDER_FIELD_DATE
}
]
}
feed_mapping_operation = {
'operator': 'ADD',
'operand': feed_mapping
}
response = feed_mapping_service.mutate([feed_mapping_operation])
if response and 'value' in response:
feed_mapping = response['value'][0]
print ('Feed mapping with ID %s and placeholder type %s was saved for feed'
' with ID %s.') % (feed_mapping['feedMappingId'],
feed_mapping['placeholderType'],
feed_mapping['feedId'])
else:
raise Exception('No feed mappings were added.')
# Now adding feed items -- the values we'd like to place.
items_data = [
{
'name': 'Mars',
'price': '$1234.56',
'date': '20140601 000000',
'adGroupId': adgroups[0]
},
{
'name': 'Venus',
'price': '$1450.00',
'date': '20140615 120000',
'adGroupId': adgroups[1]
}
]
feed_items = [{'feedId': feed_data['feedId'],
'adGroupTargeting': {
'TargetingAdGroupId': item['adGroupId']
},
'attributeValues': [
{
'feedAttributeId': feed_data['nameId'],
'stringValue': item['name']
},
{
'feedAttributeId': feed_data['priceId'],
'stringValue': item['price']
},
{
'feedAttributeId': feed_data['dateId'],
'stringValue': item['date']
}
]} for item in items_data]
feed_item_operations = [{
'operator': 'ADD',
'operand': feed_item
} for feed_item in feed_items]
response = feed_item_service.mutate(feed_item_operations)
if response and 'value' in response:
for feed_item in response['value']:
print 'Feed item with ID %s was added.' % feed_item['feedItemId']
else:
raise Exception('No feed items were added.')
# Finally, creating a customer (account-level) feed with a matching function
# that determines when to use this feed. For this case we use the "IDENTITY"
# matching function that is always 'true' just to associate this feed with
# the customer. The targeting is done within the feed items using the
# :campaign_targeting, :ad_group_targeting, or :keyword_targeting attributes.
matching_function = {
'operator': 'IDENTITY',
'lhsOperand': [
{
'xsi_type': 'ConstantOperand',
'type': 'BOOLEAN',
'booleanValue': 'true'
}
]
}
customer_feed = {
'feedId': feed_data['feedId'],
'matchingFunction': matching_function,
'placeholderTypes': [PLACEHOLDER_AD_CUSTOMIZER]
}
customer_feed_operation = {
'operator': 'ADD',
'operand': customer_feed
}
response = customer_feed_service.mutate([customer_feed_operation])
if response and 'value' in response:
feed = response['value'][0]
print 'Customer feed with ID %s was added.' % feed['feedId']
else:
raise Exception('No customer feeds were added.')
# All set! We can now create ads with customizations.
text_ad = {
'xsi_type': 'TextAd',
'headline': 'Luxury Cruise to {=%s.Name}' % FEEDNAME,
'description1': 'Only {=%s.Price}' % FEEDNAME,
'description2': 'Offer ends in {=countdown(%s.Date)}!' % FEEDNAME,
'url': 'http://www.example.com',
'displayUrl': 'www.example.com'
}
# We add the same ad to both ad groups. When they serve, they will show
# different values, since they match different feed items.
operations = [{
'operator': 'ADD',
'operand': {
'adGroupId': adgroup,
'ad': text_ad
}
} for adgroup in adgroups]
print operations
response = ad_group_ad_service.mutate(operations)
print '===ad group ad service==='
print response
if response and 'value' in response:
for ad in response['value']:
print ('\tCreated an ad with ID \'%s\', type \'%s\', and status \'%s\'.'
% (ad['ad']['id'], ad['ad']['Ad.Type'], ad['status']))
else:
raise Exception('No ads were added.')
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, ADGROUPS)
| 32.09127 | 80 | 0.614072 | 894 | 8,087 | 5.417226 | 0.302013 | 0.021474 | 0.028495 | 0.018584 | 0.082387 | 0.051827 | 0.043981 | 0.031179 | 0.022713 | 0.022713 | 0 | 0.015956 | 0.263757 | 8,087 | 251 | 81 | 32.219124 | 0.797447 | 0.197354 | 0 | 0.142857 | 0 | 0 | 0.292454 | 0.021813 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.005714 | null | null | 0.045714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53ab5b39a644e03ecaaf97048f3ae768e29b5a48 | 503 | py | Python | settings.py | danylo-dudok/youtube-rss | c4478605274cdeac33f909d7fcb7d265898e80bc | [
"MIT"
] | null | null | null | settings.py | danylo-dudok/youtube-rss | c4478605274cdeac33f909d7fcb7d265898e80bc | [
"MIT"
] | null | null | null | settings.py | danylo-dudok/youtube-rss | c4478605274cdeac33f909d7fcb7d265898e80bc | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from typing import final
from tools import localize_time
RSS_URL_PREFIX: final = 'https://www.youtube.com/feeds/videos.xml?channel_id={0}'
LOCATION_ARGUMENT_PREFIX: final = '--location='
CHANNEL_ARGUMENT_PREFIX: final = '--channels='
LAST_CHECK_ARGUMENT_PREFIX: final = '--last-check='
TWO_WEEKS_IN_DAYS: final = 14
DEFAULT_LAST_CHECK: final = localize_time(datetime.now() - timedelta(days=TWO_WEEKS_IN_DAYS))
EMPTY: final = ''
CHANNEL_POSTS_LIMIT: final = 20
| 35.928571 | 93 | 0.787276 | 72 | 503 | 5.194444 | 0.527778 | 0.117647 | 0.152406 | 0.074866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011013 | 0.097416 | 503 | 13 | 94 | 38.692308 | 0.812775 | 0 | 0 | 0 | 0 | 0 | 0.178926 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53b25c7fce6d985ae97109a316a32f1fdb359f32 | 1,049 | py | Python | coba/learners/__init__.py | mrucker/banditbenchmark | 0365291b3a0cf1d862d294e0386d0ccad3f360f1 | [
"BSD-3-Clause"
] | 1 | 2020-07-22T13:43:14.000Z | 2020-07-22T13:43:14.000Z | coba/learners/__init__.py | mrucker/coba | 4f679fb5c6e39e2d0bf3e609c77a2a6865168795 | [
"BSD-3-Clause"
] | null | null | null | coba/learners/__init__.py | mrucker/coba | 4f679fb5c6e39e2d0bf3e609c77a2a6865168795 | [
"BSD-3-Clause"
] | null | null | null | """This module contains all public learners and learner interfaces."""
from coba.learners.primitives import Learner, SafeLearner
from coba.learners.bandit import EpsilonBanditLearner, UcbBanditLearner, FixedLearner, RandomLearner
from coba.learners.corral import CorralLearner
from coba.learners.vowpal import VowpalMediator
from coba.learners.vowpal import VowpalArgsLearner, VowpalEpsilonLearner, VowpalSoftmaxLearner, VowpalBagLearner
from coba.learners.vowpal import VowpalCoverLearner, VowpalRegcbLearner, VowpalSquarecbLearner, VowpalOffPolicyLearner
from coba.learners.linucb import LinUCBLearner
__all__ = [
'Learner',
'SafeLearner',
'RandomLearner',
'FixedLearner',
'EpsilonBanditLearner',
'UcbBanditLearner',
'CorralLearner',
'LinUCBLearner',
'VowpalArgsLearner',
'VowpalEpsilonLearner',
'VowpalSoftmaxLearner',
'VowpalBagLearner',
'VowpalCoverLearner',
'VowpalRegcbLearner',
'VowpalSquarecbLearner',
'VowpalOffPolicyLearner',
'VowpalMediator'
] | 36.172414 | 122 | 0.766444 | 79 | 1,049 | 10.126582 | 0.417722 | 0.07 | 0.14 | 0.0825 | 0.105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15348 | 1,049 | 29 | 123 | 36.172414 | 0.900901 | 0.06101 | 0 | 0 | 0 | 0 | 0.276531 | 0.043878 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.269231 | 0 | 0.269231 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53b4d42745fdda68cc9c6626c17825d3356f7324 | 474 | py | Python | backend/resource_files_sample.py | Bhaskers-Blu-Org1/multicloud-incident-response-navigator | e6ba6322fdcc533b6ed14abb4681470a6bb6bd85 | [
"Apache-2.0"
] | null | null | null | backend/resource_files_sample.py | Bhaskers-Blu-Org1/multicloud-incident-response-navigator | e6ba6322fdcc533b6ed14abb4681470a6bb6bd85 | [
"Apache-2.0"
] | null | null | null | backend/resource_files_sample.py | Bhaskers-Blu-Org1/multicloud-incident-response-navigator | e6ba6322fdcc533b6ed14abb4681470a6bb6bd85 | [
"Apache-2.0"
] | 1 | 2020-07-30T10:07:19.000Z | 2020-07-30T10:07:19.000Z | import resource_files
resources = resource_files.ResourceFiles()
# sample use case of getting yamls
print(resources.get_yaml("Pod", "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf", "default", "mycluster"))
# sample use case of getting events
print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a'))
# sample use case of getting describe info
print(resources.get_logs('mycluster', 'default', "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf"))
| 36.461538 | 103 | 0.78481 | 62 | 474 | 5.919355 | 0.516129 | 0.073569 | 0.106267 | 0.122616 | 0.386921 | 0.207084 | 0 | 0 | 0 | 0 | 0 | 0.062069 | 0.082278 | 474 | 12 | 104 | 39.5 | 0.781609 | 0.225738 | 0 | 0 | 0 | 0 | 0.476584 | 0.336088 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.6 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
53b66284f62a337ba9819ca33a9acfe617722619 | 1,785 | py | Python | tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py | AngsarM/QuanGuru | 5db6105f843bbc78c2d5b1547e32d494fbe10b8d | [
"BSD-3-Clause"
] | 9 | 2021-05-23T06:30:45.000Z | 2021-12-27T13:33:54.000Z | tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py | cahitkargi/QuanGuru | 9b5c94465cd58bc32f6ff845f29dfdec7e0f9075 | [
"BSD-3-Clause"
] | 26 | 2022-03-18T02:40:54.000Z | 2022-03-25T07:00:25.000Z | tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py | cahitkargi/QuanGuru | 9b5c94465cd58bc32f6ff845f29dfdec7e0f9075 | [
"BSD-3-Clause"
] | 5 | 2021-05-23T06:30:24.000Z | 2022-02-04T02:40:08.000Z | import random as rn
import numpy as np
# open system dynamics of a qubit and compare numerical results with the analytical calculations
# NOTE these are also TUTORIALS of the library, so see the Tutorials for what these are doing and analytical
# calculations.
# currently includes 2 cases: (i) decay only, and (ii) unitary evolution by calling Liouville method without giving
# any collapse operators. For now, only looks at excited state populations
# TODO this is an unfinished test. below two tests are the same and it actually is not testing open system dynamics.
decayRateSM = rn.random()
excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t)
populations = {'excitedAnalytical':[], 'excitedNumerical':[]}
# this is used as the calculate attribute of the qubit, and the singleQubit fixture evolve method calls this at every
# step of the evolution. It stores both numerical and analytical excited state populations into the dictionary above.
def singleQubitDecayCalculate(qub, state, i):
populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize))
populations['excitedNumerical'].append(state[0, 0])
def test_qubitUnitaryEvolutionFromLiouville(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
def test_qubitDecay(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
| 45.769231 | 117 | 0.773109 | 217 | 1,785 | 6.35023 | 0.511521 | 0.010885 | 0.026125 | 0.024673 | 0.301887 | 0.301887 | 0.301887 | 0.301887 | 0.301887 | 0.301887 | 0 | 0.01054 | 0.14958 | 1,785 | 38 | 118 | 46.973684 | 0.897233 | 0.419608 | 0 | 0.545455 | 0 | 0 | 0.095424 | 0 | 0 | 0 | 0 | 0.026316 | 0.090909 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53b6dc5235fed6c6481fdc6dfb8b105b1f554689 | 4,480 | py | Python | uncoverml/metadata_profiler.py | GeoscienceAustralia/uncoverml | 672914377afa4ad1c069fcd4845bc45f80132e36 | [
"Apache-2.0"
] | 34 | 2017-03-14T23:59:58.000Z | 2022-03-03T18:04:25.000Z | uncoverml/metadata_profiler.py | GeoscienceAustralia/uncoverml | 672914377afa4ad1c069fcd4845bc45f80132e36 | [
"Apache-2.0"
] | 106 | 2017-03-22T00:26:10.000Z | 2022-03-12T00:19:08.000Z | uncoverml/metadata_profiler.py | GeoscienceAustralia/uncoverml | 672914377afa4ad1c069fcd4845bc45f80132e36 | [
"Apache-2.0"
] | 21 | 2017-05-04T04:02:39.000Z | 2022-02-04T00:55:18.000Z | #! /usr/bin/env python
"""
Description:
Gather Metadata for the uncover-ml prediction output results:
Reference: email 2019-05-24
Overview
Creator: (person who generated the model)
Model;
Name:
Type and date:
Algorithm:
Extent: Lat/long - location on Australia map?
SB Notes: None of the above is required as this information will be captured in the yaml file.
Model inputs:
1. Covariates - list (in full)
2. Targets: path to shapefile: csv file
SB Notes: Only covaraite list file. Targets and path to shapefile is not required as this is available in the yaml file. May be the full path to the shapefile has some merit as one can specify partial path.
Model performance
JSON file (in full)
SB Notes: Yes
Model outputs
1. Prediction grid including path
2. Quantiles Q5; Q95
3. Variance:
4. Entropy:
5. Feature rank file
6. Raw covariates file (target value - covariate value)
7. Optimisation output
8. Others ??
SB Notes: Not required as these are model dependent, and the metadata will be contained in each of the output geotif file.
Model parameters:
1. YAML file (in full)
2. .SH file (in full)
SB Notes: The .sh file is not required. YAML file is read as a python dictionary in uncoverml which can be dumped in the metadata.
CreationDate: 31/05/19
Developer: fei.zhang@ga.gov.au
Revision History:
LastUpdate: 31/05/19 FZ
LastUpdate: dd/mm/yyyy Who Optional description
"""
# import section
import os
import sys
import json
import pickle
import datetime
import getpass
import socket
from ppretty import ppretty
import uncoverml
class MetadataSummary():
"""
Summary Description of the ML prediction output
"""
def __init__(self, model, config):
self.model = model
self.description = "Metadata for the ML results"
username = getpass.getuser()
hostname = socket.gethostname()
self.creator = username
self.computename = hostname
self.datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.version = uncoverml.__version__
model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True,
show_address=False, str_length=50)
self.config = config
self.name = self.config.name # 'demo_regression'
self.algorithm = self.config.algorithm # 'svr'
self.extent = ((-10, 100),(-40, 140))
if config.cross_validate and os.path.exists(config.crossval_scores_file):
with open(config.crossval_scores_file) as sf:
self.model_performance_metrics = json.load(sf)
else:
self.model_performance_metrics = None
def write_metadata(self, out_filename):
"""
write the metadata for this prediction result, into a human-readable txt file.
in order to make the ML results traceable and reproduceable (provenance)
"""
with open(out_filename, 'w') as outf:
outf.write("# Metadata Profile for the Prediction Results")
outf.write("\n\n############ Software Environment ###########\n\n")
outf.write("Creator = %s \n"%self.creator)
outf.write("Computer = %s \n"%self.computename)
outf.write("ML Algorithm = %s \n"%self.algorithm)
outf.write("Version = %s\n"%self.version)
outf.write("Datetime = %s \n"%self.datetime)
outf.write("\n\n############ Performance Matrics ###########\n\n")
if self.model_performance_metrics:
for keys, values in self.model_performance_metrics.items():
outf.write("%s = %s\n"%(keys, values))
outf.write("\n\n############ Configuration ###########\n\n")
conf_str = ppretty(self.config, indent=' ', width=200, seq_length=200,
show_protected=True, show_static=True, show_properties=True,
show_address=False, str_length=200)
outf.write(conf_str)
outf.write("\n\n############ Model ###########\n\n")
model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True,
show_address=False, str_length=50)
outf.write(model_str)
outf.write("\n\n############ The End of Metadata ###########\n\n")
return out_filename
| 32.941176 | 206 | 0.620536 | 578 | 4,480 | 4.731834 | 0.385813 | 0.046069 | 0.018282 | 0.02011 | 0.11042 | 0.087751 | 0.077148 | 0.064351 | 0.064351 | 0.064351 | 0 | 0.017608 | 0.264732 | 4,480 | 135 | 207 | 33.185185 | 0.81269 | 0.3875 | 0 | 0.074074 | 1 | 0 | 0.158913 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0.037037 | 0.166667 | 0 | 0.240741 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53b7d55368f6a08688dd3db11b258ac91759ec48 | 2,447 | py | Python | asv_bench/benchmarks/algorithms.py | raspbian-packages/pandas | fb33806b5286deb327b2e0fa96aedf25a6ed563f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | asv_bench/benchmarks/algorithms.py | raspbian-packages/pandas | fb33806b5286deb327b2e0fa96aedf25a6ed563f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | asv_bench/benchmarks/algorithms.py | raspbian-packages/pandas | fb33806b5286deb327b2e0fa96aedf25a6ed563f | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pandas as pd
from pandas.util import testing as tm
class algorithm(object):
goal_time = 0.2
def setup(self):
N = 100000
self.int_unique = pd.Int64Index(np.arange(N * 5))
# cache is_unique
self.int_unique.is_unique
self.int = pd.Int64Index(np.arange(N).repeat(5))
self.float = pd.Float64Index(np.random.randn(N).repeat(5))
# Convenience naming.
self.checked_add = pd.core.nanops._checked_add_with_arr
self.arr = np.arange(1000000)
self.arrpos = np.arange(1000000)
self.arrneg = np.arange(-1000000, 0)
self.arrmixed = np.array([1, -1]).repeat(500000)
def time_int_factorize(self):
self.int.factorize()
def time_float_factorize(self):
self.int.factorize()
def time_int_unique_duplicated(self):
self.int_unique.duplicated()
def time_int_duplicated(self):
self.int.duplicated()
def time_float_duplicated(self):
self.float.duplicated()
def time_add_overflow_pos_scalar(self):
self.checked_add(self.arr, 1)
def time_add_overflow_neg_scalar(self):
self.checked_add(self.arr, -1)
def time_add_overflow_zero_scalar(self):
self.checked_add(self.arr, 0)
def time_add_overflow_pos_arr(self):
self.checked_add(self.arr, self.arrpos)
def time_add_overflow_neg_arr(self):
self.checked_add(self.arr, self.arrneg)
def time_add_overflow_mixed_arr(self):
self.checked_add(self.arr, self.arrmixed)
class hashing(object):
goal_time = 0.2
def setup(self):
N = 100000
self.df = pd.DataFrame(
{'A': pd.Series(tm.makeStringIndex(100).take(
np.random.randint(0, 100, size=N))),
'B': pd.Series(tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=N))),
'D': np.random.randn(N),
'E': np.arange(N),
'F': pd.date_range('20110101', freq='s', periods=N),
'G': pd.timedelta_range('1 day', freq='s', periods=N),
})
self.df['C'] = self.df['B'].astype('category')
self.df.iloc[10:20] = np.nan
def time_frame(self):
self.df.hash()
def time_series_int(self):
self.df.E.hash()
def time_series_string(self):
self.df.B.hash()
def time_series_categorical(self):
self.df.C.hash()
| 26.89011 | 67 | 0.612178 | 343 | 2,447 | 4.186589 | 0.268222 | 0.07312 | 0.068245 | 0.075209 | 0.365599 | 0.262535 | 0.262535 | 0.190808 | 0.123955 | 0.123955 | 0 | 0.048874 | 0.255823 | 2,447 | 90 | 68 | 27.188889 | 0.739703 | 0.014303 | 0 | 0.129032 | 0 | 0 | 0.012868 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.274194 | false | 0 | 0.048387 | 0 | 0.387097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53bae4caf0f5e1b3ae61fd16a27c99803d8b7c2e | 1,357 | py | Python | index.py | extwiii/Rock-paper-scissors-lizard-Spock | 7a8eda9f168636a9878c91620e625997ba0994a8 | [
"Apache-2.0"
] | 1 | 2018-08-02T00:52:33.000Z | 2018-08-02T00:52:33.000Z | index.py | extwiii/Rock-paper-scissors-lizard-Spock | 7a8eda9f168636a9878c91620e625997ba0994a8 | [
"Apache-2.0"
] | null | null | null | index.py | extwiii/Rock-paper-scissors-lizard-Spock | 7a8eda9f168636a9878c91620e625997ba0994a8 | [
"Apache-2.0"
] | null | null | null | # Rock-paper-scissors-lizard-Spock template
# The key idea of this program is to equate the strings
# "rock", "paper", "scissors", "lizard", "Spock" to numbers
# as follows:
#
# 0 - rock
# 1 - Spock
# 2 - paper
# 3 - lizard
# 4 - scissors
import random
def name_to_number(name):
if name == "rock":
return 0
elif name == 'Spock':
return 1
elif name == 'paper':
return 2
elif name == 'lizard':
return 3
elif name == 'scissors':
return 4
else :
return None
def number_to_name(number):
if number == 0:
return "rock"
elif number == 1:
return 'Spock'
elif number == 2:
return 'paper'
elif number == 3:
return 'lizard'
elif number == 4:
return 'scissors'
else :
return None
def rpsls(player_choice):
print ""
print "Player chooses",player_choice
player_number = name_to_number(player_choice)
comp_number = random.randrange(5)
comp_choice = number_to_name(comp_number)
print "Computer chooses",comp_choice
diff = (player_number - comp_number)%5
if (diff == 1) or (diff == 2):
print "Player wins!"
elif (diff == 3) or (diff == 4):
print "Computer wins!"
else :
print "Tie!"
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
| 21.539683 | 59 | 0.590273 | 175 | 1,357 | 4.474286 | 0.268571 | 0.040868 | 0.043423 | 0.058748 | 0.07152 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021717 | 0.287399 | 1,357 | 62 | 60 | 21.887097 | 0.788004 | 0.160648 | 0 | 0.108696 | 0 | 0 | 0.12766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.021739 | null | null | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53bd7ca2bf66bb072074f8694f4fa68fad92a150 | 9,067 | py | Python | libs/clustering/ensembles/utils.py | greenelab/phenoplier | 95f04b17f0b5227560fcf32ac0a85b2c5aa9001f | [
"BSD-2-Clause-Patent"
] | 3 | 2021-08-17T21:59:19.000Z | 2022-03-08T15:46:24.000Z | libs/clustering/ensembles/utils.py | greenelab/phenoplier | 95f04b17f0b5227560fcf32ac0a85b2c5aa9001f | [
"BSD-2-Clause-Patent"
] | 4 | 2021-08-04T13:57:24.000Z | 2021-10-11T14:57:15.000Z | libs/clustering/ensembles/utils.py | greenelab/phenoplier | 95f04b17f0b5227560fcf32ac0a85b2c5aa9001f | [
"BSD-2-Clause-Patent"
] | null | null | null | """
Contains functions to generate and combine a clustering ensemble.
"""
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.metrics import adjusted_mutual_info_score as ami
from sklearn.metrics import normalized_mutual_info_score as nmi
from tqdm import tqdm
from clustering.utils import reset_estimator, compare_arrays
def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None):
"""
It generates an ensemble from the data given a set of clusterers (a
clusterer is an instance of a clustering algorithm with a fixed set of
parameters).
Args:
data:
A numpy array, pandas dataframe, or any other structure supported
by the clusterers as data input.
clusterers:
A dictionary with clusterers specified in this format: { 'k-means
#1': KMeans(n_clusters=2), ... }
attributes:
A list of attributes to save in the final dataframe; for example,
including "n_clusters" will extract this attribute from the
estimator and include it in the final dataframe returned.
affinity_matrix:
If the clustering algorithm is AgglomerativeClustering (from
sklearn) and the linkage method is different than ward (which only
support euclidean distance), the affinity_matrix is given as data
input to the estimator instead of data.
Returns:
A pandas DataFrame with all the partitions generated by the clusterers.
Columns include the clusterer name/id, the partition, the estimator
parameters (obtained with the get_params() method) and any other
attribute specified.
"""
ensemble = []
for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)):
# get partition
#
# for agglomerative clustering both data and affinity_matrix should be
# given; for ward linkage, data is used, and for the other linkage
# methods the affinity_matrix is used
if (type(clus_obj).__name__ == "AgglomerativeClustering") and (
clus_obj.linkage != "ward"
):
partition = clus_obj.fit_predict(affinity_matrix).astype(float)
else:
partition = clus_obj.fit_predict(data).astype(float)
# remove from partition noisy points (for example, if using DBSCAN)
partition[partition < 0] = np.nan
# get number of clusters
partition_no_nan = partition[~np.isnan(partition)]
n_clusters = np.unique(partition_no_nan).shape[0]
# stop if n_clusters <= 1
if n_clusters <= 1:
reset_estimator(clus_obj)
continue
res = pd.Series(
{
"clusterer_id": clus_name,
"clusterer_params": str(clus_obj.get_params()),
"partition": partition,
}
)
for attr in attributes:
if attr == "n_clusters" and not hasattr(clus_obj, attr):
res[attr] = n_clusters
else:
res[attr] = getattr(clus_obj, attr)
ensemble.append(res)
# for some estimators such as DBSCAN this is needed, because otherwise
# the estimator saves references of huge data structures not needed in
# this context
reset_estimator(clus_obj)
return pd.DataFrame(ensemble).set_index("clusterer_id")
def get_ensemble_distance_matrix(ensemble, n_jobs=1):
"""
Given an ensemble, it computes the coassociation matrix (a distance matrix
for all objects using the ensemble information). For each object pair, the
coassociation matrix contains the percentage of times the pair of objects
was clustered together in the ensemble.
Args:
ensemble:
A numpy array representing a set of clustering solutions on the same
data. Each row is a clustering solution (partition) and columns are
objects.
n_jobs:
The number of jobs used by the pairwise_distance matrix from
sklearn.
Returns:
A numpy array representing a square distance matrix for all objects
(coassociation matrix).
"""
def _compare(x, y):
xy = np.array([x, y]).T
xy = xy[~np.isnan(xy).any(axis=1)]
return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0]
return pairwise_distances(
ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite="allow-nan"
)
def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False):
"""
It combines a clustering ensemble using a set of methods that the user can
specify. Each of these methods combines the ensemble and returns a single
partition. This function returns the combined partition that maximizes the
selection criterion.
Args:
ensemble:
a clustering ensemble (rows are partitions, columns are objects).
k:
the final number of clusters for the combined partition.
methods:
a list of methods to apply on the ensemble; each returns a combined
partition.
selection_criterion:
a function that represents the selection criterion; this function
has to accept an ensemble as the first argument, and a partition as
the second one.
n_jobs:
number of jobs.
use_tqdm:
ensembles/disables the use of tqdm to show a progress bar.
Returns:
Returns a tuple: (partition, best method name, best criterion value)
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
methods_results = {}
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods}
for future in tqdm(
as_completed(tasks),
total=len(tasks),
disable=(not use_tqdm),
ncols=100,
):
method_name = tasks[future]
part = future.result()
criterion_value = selection_criterion(ensemble, part)
methods_results[method_name] = {
"partition": part,
"criterion_value": criterion_value,
}
# select the best performing method according to the selection criterion
best_method = max(
methods_results, key=lambda x: methods_results[x]["criterion_value"]
)
best_method_results = methods_results[best_method]
return (
best_method_results["partition"],
best_method,
best_method_results["criterion_value"],
)
def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs):
"""
Runs a consensus clustering method on the ensemble data, obtains the
consolidated partition with the desired number of clusters, and computes
a series of performance measures.
Args:
method_func:
A consensus function (first argument is either the ensemble or
the coassociation matrix derived from the ensemble).
ensemble_data:
A numpy array with the ensemble data that will be given to the
specified method. For evidence accumulation methods, this is the
coassociation matrix (a square matrix with the distance between
object pairs derived from the ensemble).
ensemble:
A numpy array representing the ensemble (partitions in rows, objects
in columns).
k:
The number of clusters to obtain from the ensemble data using the
specified method.
kwargs:
Other parameters passed to `method_func`.
Returns:
It returns a tuple with the data partition derived from the ensemble
data using the specified method, and some performance measures of this
partition.
"""
part = method_func(ensemble_data, k, **kwargs)
nmi_values = np.array(
[
compare_arrays(ensemble_member, part, nmi, use_weighting=True)
for ensemble_member in ensemble
]
)
ami_values = np.array(
[
compare_arrays(ensemble_member, part, ami, use_weighting=True)
for ensemble_member in ensemble
]
)
ari_values = np.array(
[
compare_arrays(ensemble_member, part, ari, use_weighting=True)
for ensemble_member in ensemble
]
)
performance_values = {
"ari_mean": np.mean(ari_values),
"ari_median": np.median(ari_values),
"ari_std": np.std(ari_values),
"ami_mean": np.mean(ami_values),
"ami_median": np.median(ami_values),
"ami_std": np.std(ami_values),
"nmi_mean": np.mean(nmi_values),
"nmi_median": np.median(nmi_values),
"nmi_std": np.std(nmi_values),
}
return part, performance_values
| 35.837945 | 88 | 0.644645 | 1,117 | 9,067 | 5.097583 | 0.248881 | 0.023182 | 0.009659 | 0.01686 | 0.114858 | 0.06059 | 0.06059 | 0.06059 | 0 | 0 | 0 | 0.002331 | 0.290394 | 9,067 | 252 | 89 | 35.980159 | 0.882655 | 0.486931 | 0 | 0.085714 | 1 | 0 | 0.054836 | 0.005413 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.085714 | 0 | 0.180952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53bfb5244dff3d80fd05051eac4247280b733cea | 5,761 | py | Python | hastakayit_gui.py | roselight/Image-Recognition-with-OpenCv | 4d0607f37bc80ee0b00790cdcbb0a22c76852ac4 | [
"MIT"
] | 2 | 2020-04-10T21:53:52.000Z | 2020-04-11T12:24:35.000Z | hastakayit_gui.py | roselight/Image-Recognition-with-OpenCv | 4d0607f37bc80ee0b00790cdcbb0a22c76852ac4 | [
"MIT"
] | null | null | null | hastakayit_gui.py | roselight/Image-Recognition-with-OpenCv | 4d0607f37bc80ee0b00790cdcbb0a22c76852ac4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\hastakayit_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import mysql.connector
from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow
from PyQt5.QtCore import Qt, QDate, QDateTime
# Veritabanı bağlantısı için sql cümleciği oluşturuldu.
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="12345",
database="cilth_vt"
)
cursor = db.cursor()
class Ui_MainWindow2(QMainWindow):
def setupUi2(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(600, 205)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
MainWindow.setWindowIcon(icon)
MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.btn_kayit = QtWidgets.QPushButton(self.centralwidget)
self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("../avatar.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.btn_kayit.setIcon(icon1)
self.btn_kayit.setObjectName("btn_kayit")
self.btn_kayit.clicked.connect(self.kayitekle)
self.btn_cikis = QtWidgets.QPushButton(self.centralwidget)
self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31))
self.btn_cikis.setObjectName("btn_cikis")
self.btn_cikis.clicked.connect(self.close)
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_htc.setObjectName("lbl_htc")
self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1)
self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hadsoyad.setObjectName("lbl_hadsoyad")
self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1)
self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hcinsiyet.setObjectName("lbl_hcinsiyet")
self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1)
self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1)
self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_3.setObjectName("lineEdit_3")
self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1)
self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hdt.setObjectName("lbl_hdt")
self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1)
self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2)
self.dt_hdt.setObjectName("dt_hdt")
self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def kayitekle(self):
# k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir.
h_tc=self.lineEdit.text()
h_ads=self.lineEdit_2.text()
h_csyt=self.lineEdit_3.text()
h_dt=self.dt_hdt.text()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
QMessageBox.setWindowIcon(self, icon)
try:
hasta_ekle = ("INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)")
cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt))
db.commit()
veri = cursor.rowcount
except:
veri=2
if (veri == 1):
QMessageBox.information(self, 'BİLGİLENDİRME', "İşlem Başarılı.")
else:
QMessageBox.information(self, 'BİLGİLENDİRME', "İşlem Başarısız")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı"))
self.btn_kayit.setText(_translate("MainWindow", "ONAYLA"))
self.btn_cikis.setText(_translate("MainWindow", "İPTAL"))
self.lbl_htc.setText(_translate("MainWindow", "TC Kimlik No:"))
self.lbl_hadsoyad.setText(_translate("MainWindow", "Hasta Adı Soyadı:"))
self.lbl_hcinsiyet.setText(_translate("MainWindow", "Cinsiyet: "))
self.lbl_hdt.setText(_translate("MainWindow", "Doğum Tarihi:"))
self.dt_hdt.setDisplayFormat(_translate("MainWindow", "yyyy.MM.dd"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow2()
ui.setupUi2(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 44.658915 | 117 | 0.69849 | 714 | 5,761 | 5.488796 | 0.281513 | 0.028579 | 0.064302 | 0.057413 | 0.278898 | 0.246491 | 0.155652 | 0.064812 | 0.064812 | 0.064812 | 0 | 0.029898 | 0.181392 | 5,761 | 128 | 118 | 45.007813 | 0.799194 | 0.052769 | 0 | 0.018868 | 1 | 0.009434 | 0.102442 | 0.005875 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028302 | false | 0.009434 | 0.04717 | 0 | 0.084906 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53c4401601b96a14bafd9a44d9c96d488de53fcf | 7,279 | py | Python | vitrage/datasources/static/driver.py | HoonMinJeongUm/Hunmin-vitrage | 37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6 | [
"Apache-2.0"
] | null | null | null | vitrage/datasources/static/driver.py | HoonMinJeongUm/Hunmin-vitrage | 37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6 | [
"Apache-2.0"
] | null | null | null | vitrage/datasources/static/driver.py | HoonMinJeongUm/Hunmin-vitrage | 37d43d6b78e8b76fa6a2e83e5c739e9e4917a7b6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 - Nokia, ZTE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from itertools import chain
from six.moves import reduce
from oslo_log import log
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import GraphAction
from vitrage.datasources.driver_base import DriverBase
from vitrage.datasources.static import STATIC_DATASOURCE
from vitrage.datasources.static import StaticFields
from vitrage.utils import file as file_utils
LOG = log.getLogger(__name__)
class StaticDriver(DriverBase):
# base fields are required for all entities, others are treated as metadata
BASE_FIELDS = {StaticFields.STATIC_ID,
StaticFields.TYPE,
StaticFields.ID}
def __init__(self, conf):
super(StaticDriver, self).__init__()
self.cfg = conf
self.entities_cache = []
@staticmethod
def _is_valid_config(config):
"""check for validity of configuration"""
# TODO(yujunz) check with yaml schema or reuse template validation
return StaticFields.DEFINITIONS in config
@staticmethod
def get_event_types():
return []
def enrich_event(self, event, event_type):
pass
def get_all(self, datasource_action):
return self.make_pickleable(self._get_and_cache_all_entities(),
STATIC_DATASOURCE,
datasource_action)
def get_changes(self, datasource_action):
return self.make_pickleable(self._get_and_cache_changed_entities(),
STATIC_DATASOURCE,
datasource_action)
def _get_and_cache_all_entities(self):
self.entities_cache = self._get_all_entities()
return self.entities_cache
def _get_all_entities(self):
files = file_utils.list_files(self.cfg.static.directory, '.yaml', True)
return list(reduce(chain, [self._get_entities_from_file(path)
for path in files], []))
def _get_and_cache_changed_entities(self):
changed_entities = []
new_entities = self._get_all_entities()
for new_entity in new_entities:
old_entity = self._find_entity(new_entity, self.entities_cache)
if old_entity:
# Add modified entities
if not self._equal_entities(old_entity, new_entity):
changed_entities.append(new_entity.copy())
else:
# Add new entities
changed_entities.append(new_entity.copy())
# Add deleted entities
for old_entity in self.entities_cache:
if not self._find_entity(old_entity, new_entities):
old_entity_copy = old_entity.copy()
old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY
changed_entities.append(old_entity_copy)
self.entities_cache = new_entities
return changed_entities
@classmethod
def _get_entities_from_file(cls, path):
config = file_utils.load_yaml_file(path)
if not cls._is_valid_config(config):
LOG.warning("Skipped invalid config (possible obsoleted): {}"
.format(path))
return []
definitions = config[StaticFields.DEFINITIONS]
entities = definitions[StaticFields.ENTITIES]
relationships = definitions[StaticFields.RELATIONSHIPS]
return cls._pack(entities, relationships)
@classmethod
def _pack(cls, entities, relationships):
entities_dict = {}
for entity in entities:
cls._pack_entity(entities_dict, entity)
for rel in relationships:
cls._pack_rel(entities_dict, rel)
return entities_dict.values()
@classmethod
def _pack_entity(cls, entities_dict, entity):
static_id = entity[StaticFields.STATIC_ID]
if static_id not in entities_dict:
metadata = {key: value for key, value in entity.items()
if key not in cls.BASE_FIELDS}
entities_dict[static_id] = entity
entity[StaticFields.RELATIONSHIPS] = []
entity[StaticFields.METADATA] = metadata
else:
LOG.warning("Skipped duplicated entity: {}".format(entity))
@classmethod
def _pack_rel(cls, entities_dict, rel):
source_id = rel[StaticFields.SOURCE]
target_id = rel[StaticFields.TARGET]
if source_id == target_id:
# self pointing relationship
entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel)
else:
source, target = entities_dict[source_id], entities_dict[target_id]
source[StaticFields.RELATIONSHIPS].append(
cls._expand_neighbor(rel, target))
@staticmethod
def _expand_neighbor(rel, neighbor):
"""Expand config id to neighbor entity
rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'}
neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}
result={'relationship_type': 'attached', 'source': 's1',
'target': {'static_id': 'h1',
'vitrage_type': 'host.nova',
'id': 1}}
"""
rel = rel.copy()
if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]:
rel[StaticFields.SOURCE] = neighbor
elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]:
rel[StaticFields.TARGET] = neighbor
else:
# TODO(yujunz) raise exception and ignore invalid relationship
LOG.error("Invalid neighbor {} for relationship {}"
.format(neighbor, rel))
return None
return rel
@staticmethod
def _find_entity(search_entity, entities):
# naive implementation since we don't expect many static entities
for entity in entities:
if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \
and entity[StaticFields.ID] == \
search_entity[StaticFields.ID]:
return entity
@staticmethod
def _equal_entities(old_entity, new_entity):
# TODO(iafek): compare also the relationships
return old_entity.get(StaticFields.TYPE) == \
new_entity.get(StaticFields.TYPE) and \
old_entity.get(StaticFields.ID) == \
new_entity.get(StaticFields.ID) and \
old_entity.get(StaticFields.NAME) == \
new_entity.get(StaticFields.NAME) and \
old_entity.get(StaticFields.STATE) == \
new_entity.get(StaticFields.STATE)
| 37.911458 | 79 | 0.637588 | 810 | 7,279 | 5.504938 | 0.245679 | 0.028257 | 0.037677 | 0.021529 | 0.17874 | 0.118636 | 0.061449 | 0.040816 | 0.040816 | 0.026463 | 0 | 0.002862 | 0.279846 | 7,279 | 191 | 80 | 38.109948 | 0.847768 | 0.18258 | 0 | 0.181102 | 0 | 0 | 0.020474 | 0 | 0 | 0 | 0 | 0.010471 | 0 | 1 | 0.125984 | false | 0.007874 | 0.070866 | 0.031496 | 0.322835 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53c79195c421ab20eafd11d18287a51c1a99fb79 | 779 | py | Python | python_minecraft_tut_2021/weatherCraft.py | LeGamermc/ursina_tutorials | f0ad518be3a02cdb52f27c87f2f70817b4d0e8b0 | [
"MIT"
] | 13 | 2021-09-01T01:38:13.000Z | 2022-03-29T01:43:50.000Z | python_minecraft_tut_2021/weatherCraft.py | LeGamermc/ursina_tutorials | f0ad518be3a02cdb52f27c87f2f70817b4d0e8b0 | [
"MIT"
] | 14 | 2021-08-01T05:00:22.000Z | 2022-02-03T21:53:23.000Z | python_minecraft_tut_2021/weatherCraft.py | LeGamermc/ursina_tutorials | f0ad518be3a02cdb52f27c87f2f70817b4d0e8b0 | [
"MIT"
] | 31 | 2021-08-09T04:08:11.000Z | 2022-03-23T11:06:15.000Z | """
Weather functions.
"""
from ursina import color, window, time
from nMap import nMap
class Weather:
def __init__(this, rate=1):
this.red = 0
this.green = 200
this.blue = 211
this.darkling = 0
this.rate = rate
this.towardsNight = 1
def setSky(this):
r = nMap(this.darkling,0,100,0,this.red)
g = nMap(this.darkling,0,100,0,this.green)
b = nMap(this.darkling,0,100,0,this.blue)
window.color = color.rgb(r,g,b)
def update(this):
this.darkling -= ( this.rate *
this.towardsNight *
time.dt)
if this.darkling < 0:
this.towardsNight *= -1
this.darkling = 0
this.setSky()
| 22.911765 | 50 | 0.519897 | 97 | 779 | 4.134021 | 0.329897 | 0.087282 | 0.194514 | 0.127182 | 0.187032 | 0.187032 | 0.187032 | 0 | 0 | 0 | 0 | 0.05668 | 0.365854 | 779 | 33 | 51 | 23.606061 | 0.755061 | 0.023107 | 0 | 0.086957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.086957 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53c80402ffddb5cb55023d530bbbc0ac778cca90 | 416 | py | Python | account/migrations/0003_customuser_phone_number.py | zenofewords/thebrushstash | 7d53bd5f22a2daa1011bb502bce56e735504dc84 | [
"MIT"
] | null | null | null | account/migrations/0003_customuser_phone_number.py | zenofewords/thebrushstash | 7d53bd5f22a2daa1011bb502bce56e735504dc84 | [
"MIT"
] | 18 | 2019-12-05T07:27:52.000Z | 2022-02-12T20:50:22.000Z | account/migrations/0003_customuser_phone_number.py | zenofewords/thebrushstash | 7d53bd5f22a2daa1011bb502bce56e735504dc84 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2019-11-17 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_remove_customuser_full_name'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='phone_number',
field=models.CharField(blank=True, max_length=500),
),
]
| 21.894737 | 63 | 0.620192 | 46 | 416 | 5.456522 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.072368 | 0.269231 | 416 | 18 | 64 | 23.111111 | 0.753289 | 0.108173 | 0 | 0 | 1 | 0 | 0.165312 | 0.086721 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53ccd38a42372cb4c8b6646892db6cc4fe7a6bd1 | 722 | py | Python | ipcam/test_snap.py | jack139/HF | 4810f4ee2faf9ab51c867e105addc139da2adfd1 | [
"BSD-3-Clause"
] | 10 | 2019-04-07T20:13:23.000Z | 2021-12-07T06:23:52.000Z | ipcam/test_snap.py | jack139/HF | 4810f4ee2faf9ab51c867e105addc139da2adfd1 | [
"BSD-3-Clause"
] | 1 | 2020-05-29T16:11:22.000Z | 2020-05-29T16:11:22.000Z | ipcam/test_snap.py | jack139/HF | 4810f4ee2faf9ab51c867e105addc139da2adfd1 | [
"BSD-3-Clause"
] | 6 | 2017-10-20T10:53:33.000Z | 2020-04-24T06:34:18.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys,os,time
if len(sys.argv)<2:
print "usage: test_snap.py <check|show>"
sys.exit(2)
kam_cmd=sys.argv[1]
path='/var/data2/snap_store'
a=os.listdir(path)
a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera not used in HF system
if kam_cmd=='show' or kam_cmd=='check':
last_sub=int(time.time()/600)
for i in a:
sub='%s/%s' % (path, i)
b=os.listdir(sub)
if 'capture' in b:
b.remove('capture')
b.sort()
sub2='%s/%s' % (sub, b[-1])
c=os.listdir(sub2)
if kam_cmd=='show' or last_sub-int(b[-1])>3:
print "%s - %d, %s - %d, (%d)" % (i, len(b), b[-1], len(c), last_sub-int(b[-1]))
else:
print "usage: test_snap.py <check|show>"
sys.exit(2)
| 21.878788 | 83 | 0.613573 | 138 | 722 | 3.137681 | 0.42029 | 0.055427 | 0.069284 | 0.083141 | 0.290993 | 0.170901 | 0.170901 | 0.170901 | 0.170901 | 0.170901 | 0 | 0.044335 | 0.15651 | 722 | 32 | 84 | 22.5625 | 0.666667 | 0.112188 | 0 | 0.173913 | 0 | 0 | 0.263323 | 0.070533 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.043478 | null | null | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53cfe05a29410444b4904c98e9ea7e4826833ee4 | 4,702 | py | Python | awx/main/management/commands/run_dispatcher.py | atr0s/awx | 388ef077c384f4c5296d4870d3b0cf0e6718db80 | [
"Apache-2.0"
] | null | null | null | awx/main/management/commands/run_dispatcher.py | atr0s/awx | 388ef077c384f4c5296d4870d3b0cf0e6718db80 | [
"Apache-2.0"
] | null | null | null | awx/main/management/commands/run_dispatcher.py | atr0s/awx | 388ef077c384f4c5296d4870d3b0cf0e6718db80 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import os
import logging
from multiprocessing import Process
from django.conf import settings
from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from kombu import Connection, Exchange, Queue
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
logger = logging.getLogger('awx.main.dispatch')
def construct_bcast_queue_name(common_name):
return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID
class Command(BaseCommand):
help = 'Launch the task dispatcher'
def add_arguments(self, parser):
parser.add_argument('--status', dest='status', action='store_true',
help='print the internal state of any running dispatchers')
parser.add_argument('--running', dest='running', action='store_true',
help='print the UUIDs of any tasked managed by this dispatcher')
parser.add_argument('--reload', dest='reload', action='store_true',
help=('cause the dispatcher to recycle all of its worker processes;'
'running jobs will run to completion first'))
def beat(self):
from celery import Celery
from celery.beat import PersistentScheduler
from celery.apps import beat
class AWXScheduler(PersistentScheduler):
def __init__(self, *args, **kwargs):
self.ppid = os.getppid()
super(AWXScheduler, self).__init__(*args, **kwargs)
def setup_schedule(self):
super(AWXScheduler, self).setup_schedule()
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
def tick(self, *args, **kwargs):
if os.getppid() != self.ppid:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
raise SystemExit()
return super(AWXScheduler, self).tick(*args, **kwargs)
def apply_async(self, entry, producer=None, advance=True, **kwargs):
task = TaskWorker.resolve_callable(entry.task)
result, queue = task.apply_async()
class TaskResult(object):
id = result['uuid']
return TaskResult()
app = Celery()
app.conf.BROKER_URL = settings.BROKER_URL
app.conf.CELERY_TASK_RESULT_EXPIRES = False
beat.Beat(
30,
app,
schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler
).run()
def handle(self, *arg, **options):
if options.get('status'):
print Control('dispatcher').status()
return
if options.get('running'):
print Control('dispatcher').running()
return
if options.get('reload'):
return Control('dispatcher').control({'control': 'reload'})
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race conditions)
django_connection.close()
django_cache.close()
beat = Process(target=self.beat)
beat.daemon = True
beat.start()
reaper.reap()
consumer = None
with Connection(settings.BROKER_URL) as conn:
try:
bcast = 'tower_broadcast_all'
queues = [
Queue(q, Exchange(q), routing_key=q)
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
]
queues.append(
Queue(
construct_bcast_queue_name(bcast),
exchange=Exchange(bcast, type='fanout'),
routing_key=bcast,
reply=True
)
)
consumer = AWXConsumer(
'dispatcher',
conn,
TaskWorker(),
queues,
AutoscalePool(min_workers=4)
)
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()
| 37.616 | 96 | 0.576989 | 492 | 4,702 | 5.394309 | 0.422764 | 0.013188 | 0.028259 | 0.028636 | 0.020347 | 0.020347 | 0 | 0 | 0 | 0 | 0 | 0.002558 | 0.334964 | 4,702 | 124 | 97 | 37.919355 | 0.846178 | 0.076138 | 0 | 0.020619 | 0 | 0 | 0.11024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.154639 | null | null | 0.041237 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53d38a232396aeecc14c7708fa90954da15a7129 | 21,306 | py | Python | Contents/scripts/siweighteditor/weight.py | jdrese/SIWeightEditor | 0529c1a366b955f4373acd2e2f08f63b7909ff82 | [
"MIT"
] | 1 | 2018-12-12T15:39:13.000Z | 2018-12-12T15:39:13.000Z | Contents/scripts/siweighteditor/weight.py | jdrese/SIWeightEditor | 0529c1a366b955f4373acd2e2f08f63b7909ff82 | [
"MIT"
] | null | null | null | Contents/scripts/siweighteditor/weight.py | jdrese/SIWeightEditor | 0529c1a366b955f4373acd2e2f08f63b7909ff82 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from maya import mel
from maya import cmds
from . import lang
from . import common
import os
import json
import re
class WeightCopyPaste():
def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto',
threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False):
if viewmsg:
cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5)
'''
ウェイトデータの保存、読み込み関数
mode→コピーするかペーストするか'copy'or'paste'
saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定
method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」
「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。
「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。
「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、
ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。
「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。
nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在
→barycentric、bylinearはMaya2016Extention2から利用可能
weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。
→Mayaコピー時にファイル名指定すると複数保存できないので注意。
threshold→nearest,barycentricの位置検索範囲
'''
self.skinMeshes = skinMeshes
self.saveName = saveName
self.method = method
self.weightFile = weightFile
self.threshold = threshold
self.engine = engine
self.memShapes = {}
self.target = tgt
self.pasteMode = {'index':1, 'nearest':3}
# リストタイプじゃなかったらリストに変換する
if not isinstance(self.skinMeshes, list):
temp = self.skinMeshes
self.skinMeshes = []
self.skinMeshes.append(temp)
# ファイルパスを生成しておく
if path == 'default':
self.filePath = os.getenv('MAYA_APP_DIR') + '\\Scripting_Files\\weight\\' + self.saveName
elif path == 'project':
self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1])
self.protect_path = os.path.join(self.scene_path, 'weight_protector')
try:
if not os.path.exists(self.protect_path):
os.makedirs(self.protect_path)
except Exception as e:
print e.message
return
self.filePath = self.protect_pat+'\\' + self.saveName
self.fileName = os.path.join(self.filePath, self.saveName + '.json')
self.apiName = os.path.join(self.filePath, self.saveName + '.skn')
# コピーかペーストをそれぞれ呼び出し
if mode == 'copy':
self.weightCopy()
if mode == 'paste':
self.weightPaste()
def weightPaste(self):
dummy = cmds.spaceLocator()
for skinMesh in self.skinMeshes:
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
meshName = str(weightFile).replace('|', '__pipe__')
if os.path.exists(self.fileName):
try:
with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
saveData = json.load(f) # ロード
# self.visibility = saveData['visibility']#セーブデータ読み込み
skinningMethod = saveData[';skinningMethod']
dropoffRate = saveData[';dropoffRate']
maintainMaxInfluences = saveData[';maintainMaxInfluences']
maxInfluences = saveData[';maxInfluences']
bindMethod = saveData[';bindMethod']
normalizeWeights = saveData[';normalizeWeights']
influences = saveData[';influences']
# 子のノードがトランスフォームならダミーに親子付けして退避
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut')
influences = cmds.ls(influences, l=True, tr=True)
# バインド
dstSkinCluster = cmds.skinCluster(
skinMesh,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
dstSkinCluster = dstSkinCluster[0]
# 親子付けを戻す
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent')
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
except Exception as e:
print e.message
print 'Error !! Skin bind failed : ' + skinMesh
continue
else:
dstSkinCluster = dstSkinCluster[0]
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
if self.engine == 'maya':
files = os.listdir(self.filePath)
print files
if len(files) == 2:
for file in files:
name, ext = os.path.splitext(file)
if ext == '.xml':
xml_name = file
else:
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
xml_name = meshName + '.xml'
if os.path.isfile(self.filePath + '\\' + xml_name):
if self.method == 'index' or self.method == 'over':
cmds.deformerWeights(xml_name,
im=True,
method=self.method,
deformer=dstSkinCluster,
path=self.filePath + '\\')
else:
cmds.deformerWeights(xml_name,
im=True,
deformer=dstSkinCluster,
method=self.method,
worldSpace=True,
positionTolerance=self.threshold,
path=self.filePath + '\\')
cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True)
print 'Weight paste to : ' + str(skinMesh)
else:
print 'Not exist seved weight XML file : ' + skinMesh
# ダミー親削除
cmds.delete(dummy)
cmds.select(self.skinMeshes, r=True)
# ウェイト情報を保存する関数
def weightCopy(self):
saveData = {}
# 保存ディレクトリが無かったら作成
if not os.path.exists(self.filePath):
os.makedirs(os.path.dirname(self.filePath + '\\')) # 末尾\\が必要なので注意
else: # ある場合は中身を削除
files = os.listdir(self.filePath)
if files is not None:
for file in files:
os.remove(self.filePath + '\\' + file)
skinFlag = False
all_influences = []
for skinMesh in self.skinMeshes:
try:
cmds.bakePartialHistory(skinMesh, ppt=True)
except:
pass
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったら次に移行
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
saveData[';skinningMethod'] = skinningMethod
saveData[';dropoffRate'] = dropoffRate
saveData[';maintainMaxInfluences'] = maintainMaxInfluences
saveData[';maxInfluences'] = maxInfluences
saveData[';bindMethod'] = bindMethod
saveData[';normalizeWeights'] = normalizeWeights
all_influences += influences
#saveData[';influences'] = influences
skinFlag = True
all_influences = list(set(all_influences))
saveData[';influences'] = all_influences
#インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS
for skinMesh in self.skinMeshes:
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったらfor分の次に移行
srcSkinCluster = srcSkinCluster[0]
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
sub_influences = list(set(all_influences) - set(influences))
if sub_influences:
cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0)
if self.engine == 'maya':
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\')
with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
json.dump(saveData, f)
def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True):
'''
スキンウェイトの転送関数
転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド
・引数
skinMesh→転送元メッシュ(1個,リスト形式でも可)
transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫)
transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue
logTransfer→ログ表示するかどうか
returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse
'''
massege01 = lang.Lang(
en=': It does not perform the transfer of weight because it is not a skin mesh.',
ja=u': スキンメッシュではないのでウェイトの転送を行いません'
).output()
massege02 = lang.Lang(
en='Transfer the weight:',
ja=u'ウェイトを転送:'
).output()
massege03 = lang.Lang(
en='Transfer bind influences:',
ja=u'バインド状態を転送:'
).output()
if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す
skinMesh = skinMesh[0] # リストを渡されたときのための保険
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False)
if not srcSkinCluster:
if logTransfer:
print skinMesh + massege01
return False # スキンクラスタがなかったら関数抜ける
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード
# リストタイプじゃなかったらリストに変換する
if not isinstance(transferedMesh, list):
temp = transferedMesh
transferedMesh = []
transferedMesh.append(temp)
for dst in transferedMesh:
#子供のノード退避用ダミーペアレントを用意
dummy = common.TemporaryReparent().main(mode='create')
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut')
shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh')
if not shapes: # もしメッシュがなかったら
continue # 処理を中断して次のオブジェクトへ
# スキンクラスタの有無を取得
dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
# バインド
dstSkinCluster = cmds.skinCluster(
dst,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
if logTransfer:
print massege03 + '[' + skinMesh + '] >>> [' + dst + ']'
dstSkinCluster = dstSkinCluster[0]
if transferWeight:
cmds.copySkinWeights(
ss=srcSkinCluster,
ds=dstSkinCluster,
surfaceAssociation='closestPoint',
influenceAssociation=['name', 'closestJoint', 'oneToOne'],
normalize=True,
noMirror=True
)
if logTransfer:
print massege02 + '[' + skinMesh + '] >>> [' + dst + ']'
#親子付けを戻す
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent')
#ダミーペアレントを削除
common.TemporaryReparent().main(dummyParent=dummy, mode='delete')
if returnInfluences:
return influences
else:
return True
def symmetry_weight(srcNode=None, dstNode=None, symWeight=True):
'''
ウェイトシンメトリする関数
srcNode→反転元
dstNode→反転先
symWeight→ウェイトミラーするかどうか
'''
# スキンクラスタを取得
if srcNode is None:
return
srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh')
if srcShapes:
srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster')
# スキンクラスタがあったらジョイントラベルを設定してウェイトミラー
if srcSkinCluster:
# バインド状態を転送する関数呼び出し
skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得
for skinJoint in skinJointAll:
# ジョイントラベル設定関数呼び出し
joint_label(skinJoint, visibility=False)
if symWeight is False or dstNode is None:
return
transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True)
dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh')
dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False)
cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0],
mirrorMode='YZ', surfaceAssociation='closestComponent',
influenceAssociation='label', normalize=True)
def load_joint_label_rules():
#ロードできなかった時の初期値
start_l_list = ['L_', 'l_', 'Left_', 'left_']
start_r_list = ['R_', 'r_', 'Right_', 'right_']
mid_l_list = ['_L_', '_l_', '_Left_', '_left_']
mid_r_list = ['_R_', '_r_', '_Right_', '_right_']
end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left']
end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right']
def_left_list_list = [start_l_list, mid_l_list, end_l_list]
def_right_list_list = [start_r_list, mid_r_list, end_r_list]
#左右対称設定ファイルからルールをロードする
dir_path = os.path.join(
os.getenv('MAYA_APP_dir'),
'Scripting_Files')
start_file = dir_path+'/joint_rule_start.json'
middle_file = dir_path+'/joint_rule_middle.json'
end_file = dir_path+'/joint_rule_end.json'
save_files = [start_file, middle_file, end_file]
left_list_list = []
right_list_list = []
for i, save_file in enumerate(save_files):
if os.path.exists(save_file):#保存ファイルが存在したら
try:
with open(save_file, 'r') as f:
save_data = json.load(f)
l_list = save_data.keys()
r_list = save_data.values()
left_list_list.append(l_list)
right_list_list.append(r_list)
except Exception as e:
print e.message
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
else:
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
return left_list_list, right_list_list
def joint_label(object, visibility=False):
'''
ジョイントラベル設定関数
object→オブジェクト、リスト形式可
visibility→ラベルの可視性、省略可能。デフォルトFalse。
'''
#ラベリングルールをロードしておく
left_list_list, right_list_list = load_joint_label_rules()
# リストタイプじゃなかったらリストに変換する
if not isinstance(object, list):
temp = object
object = []
object.append(temp)
for skinJoint in object:
objTypeName = cmds.objectType(skinJoint)
if objTypeName == 'joint':
split_name = skinJoint.split('|')[-1]
# スケルトン名にLRが含まれているかどうかを判定
side = 0
side_name = ''
for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)):
for j, lr_list in enumerate([l_list, r_list]):
for k, lr in enumerate(lr_list):
if i == 0:
if re.match(lr, split_name):
side = j + 1
if i == 1:
if re.search(lr, split_name):
side = j + 1
if i == 2:
if re.match(lr[::-1], split_name[::-1]):
side = j + 1
if side:#対象が見つかってたら全部抜ける
side_name = lr
break
if side:
break
if side:
break
#print 'joint setting :', split_name, side, side_name
# 左右のラベルを設定、どちらでもないときは中央
cmds.setAttr(skinJoint + '.side', side)
# ラベルタイプを”その他”に設定
cmds.setAttr(skinJoint + '.type', 18)
new_joint_name = split_name.replace(side_name.replace('.', ''), '')
# スケルトン名設定
cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string')
# 可視性設定
cmds.setAttr(skinJoint + '.drawLabel', visibility)
else:
print(str(skinJoint) + ' : ' + str(objTypeName) + ' Skip Command')
#ウェイトのミュートをトグル
def toggle_mute_skinning():
msg01 = lang.Lang(
en='No mesh selection.\nWould you like to process all of mesh in this scene?.',
ja=u'選択メッシュがありません。\nシーン内のすべてのメッシュを処理しますか?').output()
msg02 = lang.Lang(en='Yes', ja=u'はい').output()
msg03 = lang.Lang(en='No', ja=u'いいえ').output()
msg04 = lang.Lang(
en='Skinning is disabled',
ja=u'スキニングは無効になりました') .output()
msg05 = lang.Lang(
en='Skinning is enabled',
ja=u'スキニングが有効になりました') .output()
cmds.selectMode(o=True)
objects = cmds.ls(sl=True, l=True)
ad_node = []
for node in objects:
children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform')
ad_node += [node]+children
#print len(ad_node)
objects = set(ad_node)
#print len(objects)
if not objects:
all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03)
if all_mesh == msg02:
objects = cmds.ls(type='transform')
if not objects:
return
mute_flag = 1
skin_list = []
for node in objects:
skin = cmds.ls(cmds.listHistory(node), type='skinCluster')
if not skin:
continue
skin_list.append(skin)
if cmds.getAttr(skin[0]+'.envelope') > 0:
mute_flag = 0
for skin in skin_list:
cmds.setAttr(skin[0]+'.envelope', mute_flag)
if mute_flag == 0:
cmds.confirmDialog(m=msg04)
if mute_flag == 1:
cmds.confirmDialog(m=msg05) | 43.129555 | 120 | 0.555102 | 1,906 | 21,306 | 6.09234 | 0.23085 | 0.013779 | 0.025835 | 0.012659 | 0.289356 | 0.250172 | 0.218653 | 0.161126 | 0.15329 | 0.15329 | 0 | 0.007091 | 0.344739 | 21,306 | 494 | 121 | 43.129555 | 0.823437 | 0.07045 | 0 | 0.344828 | 0 | 0 | 0.080935 | 0.009721 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.002653 | 0.018568 | null | null | 0.029178 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53d3daf836c3d211bfbd295aeb46edb04453a89a | 1,350 | py | Python | pyConTextNLP/__init__.py | Blulab-Utah/pyConTextPipeline | d4060f89d54f4db56914832033f8ce589ee3c181 | [
"Apache-2.0"
] | 1 | 2021-04-30T11:18:32.000Z | 2021-04-30T11:18:32.000Z | pyConTextNLP/__init__.py | Blulab-Utah/pyConTextPipeline | d4060f89d54f4db56914832033f8ce589ee3c181 | [
"Apache-2.0"
] | null | null | null | pyConTextNLP/__init__.py | Blulab-Utah/pyConTextPipeline | d4060f89d54f4db56914832033f8ce589ee3c181 | [
"Apache-2.0"
] | 1 | 2020-06-28T01:51:56.000Z | 2020-06-28T01:51:56.000Z | #Copyright 2010 Brian E. Chapman
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""This is an alternative implementation of the pyConText package where I make
use of graphs to indicate relationships between targets and modifiers. Nodes of
thegraphs are the targets and modifiers identified in the text; edges of the
graphs are relationships between the targets. This provides for much simpler
code than what exists in the other version of pyConText where each object has a
dictionary of __modifies and __modifiedby that must be kept in sync with each
other.
Also it is hoped that the use of a directional graph could ultimately simplify
our itemData structures as we could chain together items"""
import os
version = {}
with open(os.path.join(os.path.dirname(__file__),"version.py")) as f0:
exec(f0.read(), version)
__version__ = version['__version__']
| 43.548387 | 79 | 0.786667 | 214 | 1,350 | 4.88785 | 0.593458 | 0.057361 | 0.024857 | 0.030593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00878 | 0.156296 | 1,350 | 30 | 80 | 45 | 0.90957 | 0.856296 | 0 | 0 | 0 | 0 | 0.119318 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53d42695123c2326facf4f279256b1c384089fd3 | 78,742 | py | Python | pypeit/metadata.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | pypeit/metadata.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | pypeit/metadata.py | rcooke-ast/PYPIT | 0cb9c4cb422736b855065a35aefc2bdba6d51dd0 | [
"BSD-3-Clause"
] | null | null | null | """
Provides a class that handles the fits metadata required by PypeIt.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import os
import io
import string
from copy import deepcopy
import datetime
from IPython import embed
import numpy as np
import yaml
from astropy import table, coordinates, time, units
from pypeit import msgs
from pypeit import utils
from pypeit.core import framematch
from pypeit.core import flux_calib
from pypeit.core import parse
from pypeit.core import meta
from pypeit.io import dict_to_lines
from pypeit.par import PypeItPar
from pypeit.par.util import make_pypeit_file
from pypeit.bitmask import BitMask
# TODO: Turn this into a DataContainer
# Initially tried to subclass this from astropy.table.Table, but that
# proved too difficult.
class PypeItMetaData:
"""
Provides a table and interface to the relevant fits file metadata
used during the reduction.
The content of the fits table is dictated by the header keywords
specified for the provided spectrograph. It is expected that this
table can be used to set the frame type of each file.
The metadata is validated using checks specified by the provided
spectrograph class.
For the data table, one should typically provide either the file
list from which to grab the data from the fits headers or the
data directly. If neither are provided the table is instantiated
without any data.
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:obj:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior.
files (:obj:`str`, :obj:`list`, optional):
The list of files to include in the table.
data (table-like, optional):
The data to include in the table. The type can be anything
allowed by the instantiation of
:class:`astropy.table.Table`.
usrdata (:obj:`astropy.table.Table`, optional):
A user provided set of data used to supplement or overwrite
metadata read from the file headers. The table must have a
`filename` column that is used to match to the metadata
table generated within PypeIt. **Note**: This is ignored if
`data` is also provided. This functionality is only used
when building the metadata from the fits files.
strict (:obj:`bool`, optional):
Function will fault if there is a problem with the reading
the header for any of the provided files; see
:func:`pypeit.spectrographs.spectrograph.get_headarr`. Set
to False to instead report a warning and continue.
Attributes:
spectrograph
(:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:class:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior. If not
provided, the default parameters specific to the provided
spectrograph are used.
configs (:obj:`dict`):
A dictionary of the unique configurations identified.
type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`):
The bitmask used to set the frame type of each fits file.
calib_bitmask (:class:`BitMask`):
The bitmask used to keep track of the calibration group bits.
table (:class:`astropy.table.Table`):
The table with the relevant metadata for each fits file to
use in the data reduction.
"""
def __init__(self, spectrograph, par, files=None, data=None, usrdata=None,
strict=True):
if data is None and files is None:
# Warn that table will be empty
msgs.warn('Both data and files are None in the instantiation of PypeItMetaData.'
' The table will be empty!')
# Initialize internals
self.spectrograph = spectrograph
self.par = par
if not isinstance(self.par, PypeItPar):
raise TypeError('Input parameter set must be of type PypeItPar.')
self.type_bitmask = framematch.FrameTypeBitMask()
# Build table
self.table = table.Table(data if files is None
else self._build(files, strict=strict,
usrdata=usrdata))
# Merge with user data, if present
if usrdata is not None:
self.merge(usrdata)
# Impose types on specific columns
self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str])
# Initialize internal attributes
self.configs = None
self.calib_bitmask = None
# Initialize columns that the user might add
self.set_user_added_columns()
# Validate instrument name
self.spectrograph.vet_instrument(self.table)
def _impose_types(self, columns, types):
"""
Impose a set of types on certain columns.
.. note::
:attr:`table` is edited in place.
Args:
columns (:obj:`list`):
List of column names
types (:obj:`list`):
List of types
"""
for c,t in zip(columns, types):
if c in self.keys():
self.table[c] = self.table[c].astype(t)
def _build(self, files, strict=True, usrdata=None):
"""
Generate the fitstbl that will be at the heart of PypeItMetaData.
Args:
files (:obj:`str`, :obj:`list`):
One or more files to use to build the table.
strict (:obj:`bool`, optional):
Function will fault if :func:`fits.getheader` fails to
read any of the headers. Set to False to report a
warning and continue.
usrdata (astropy.table.Table, optional):
Parsed for frametype for a few instruments (e.g. VLT)
where meta data may not be required
Returns:
dict: Dictionary with the data to assign to :attr:`table`.
"""
# Allow for single files
_files = files if hasattr(files, '__len__') else [files]
# Build lists to fill
data = {k:[] for k in self.spectrograph.meta.keys()}
data['directory'] = ['None']*len(_files)
data['filename'] = ['None']*len(_files)
# Build the table
for idx, ifile in enumerate(_files):
# User data (for frame type)
if usrdata is None:
usr_row = None
else:
# TODO: This check should be done elsewhere
# Check
if os.path.basename(ifile) != usrdata['filename'][idx]:
msgs.error('File name list does not match user-provided metadata table. See '
'usrdata argument of instantiation of PypeItMetaData.')
usr_row = usrdata[idx]
# Add the directory and file name to the table
data['directory'][idx], data['filename'][idx] = os.path.split(ifile)
if not data['directory'][idx]:
data['directory'][idx] = '.'
# Read the fits headers
headarr = self.spectrograph.get_headarr(ifile, strict=strict)
# Grab Meta
for meta_key in self.spectrograph.meta.keys():
value = self.spectrograph.get_meta_value(headarr, meta_key,
required=strict,
usr_row=usr_row,
ignore_bad_header = self.par['rdx']['ignore_bad_headers'])
if isinstance(value, str) and '#' in value:
value = value.replace('#', '')
msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format(
meta_key, value))
data[meta_key].append(value)
msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1]))
# JFH Changed the below to not crash if some files have None in
# their MJD. This is the desired behavior since if there are
# empty or corrupt files we still want this to run.
# Validate, print out a warning if there is problem
try:
time.Time(data['mjd'], format='mjd')
except ValueError:
mjd = np.asarray(data['mjd'])
filenames = np.asarray(data['filename'])
bad_files = filenames[mjd == None]
# Print status message
msg = 'Time invalid for {0} files.\n'.format(len(bad_files))
msg += 'Continuing, but the following frames may be empty or have corrupt headers:\n'
for file in bad_files:
msg += ' {0}\n'.format(file)
msgs.warn(msg)
# Return
return data
# TODO: In this implementation, slicing the PypeItMetaData object
# will return an astropy.table.Table, not a PypeItMetaData object.
def __getitem__(self, item):
return self.table.__getitem__(item)
def __setitem__(self, item, value):
return self.table.__setitem__(item, value)
def __len__(self):
return self.table.__len__()
def __repr__(self):
return self.table._base_repr_(html=False,
descr_vals=['PypeItMetaData:\n',
' spectrograph={0}\n'.format(
self.spectrograph.name),
' length={0}\n'.format(len(self))])
def _repr_html_(self):
return self.table._base_repr_(html=True, max_width=-1,
descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\n'.format(
self.spectrograph.name, len(self))])
@staticmethod
def default_keys():
return [ 'directory', 'filename', 'instrume' ]
def keys(self):
return self.table.keys()
def sort(self, col):
return self.table.sort(col)
def merge(self, usrdata, match_type=True):
"""
Use the provided table to supplement or overwrite the metadata.
If the internal table already contains the column in `usrdata`,
the function will try to match the data type of the `usrdata`
column to the existing data type. If it can't it will just add
the column anyway, with the type in `usrdata`. You can avoid
this step by setting `match_type=False`.
Args:
usrdata (:obj:`astropy.table.Table`):
A user provided set of data used to supplement or
overwrite metadata read from the file headers. The
table must have a `filename` column that is used to
match to the metadata table generated within PypeIt.
match_type (:obj:`bool`, optional):
Attempt to match the data type in `usrdata` to the type
in the internal table. See above.
Raises:
TypeError:
Raised if `usrdata` is not an `astropy.io.table.Table`
KeyError:
Raised if `filename` is not a key in the provided table.
"""
meta_data_model = meta.get_meta_data_model()
# Check the input
if not isinstance(usrdata, table.Table):
raise TypeError('Must provide an astropy.io.table.Table instance.')
if 'filename' not in usrdata.keys():
raise KeyError('The user-provided table must have \'filename\' column!')
# Make sure the data are correctly ordered
srt = [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']]
# Convert types if possible
existing_keys = list(set(self.table.keys()) & set(usrdata.keys()))
radec_done = False
if len(existing_keys) > 0 and match_type:
for key in existing_keys:
if len(self.table[key].shape) > 1: # NOT ALLOWED!!
# TODO: This should be converted to an assert statement...
raise ValueError('CODING ERROR: Found high-dimensional column.')
#embed(header='372 of metadata')
elif key in meta_data_model.keys(): # Is this meta data??
dtype = meta_data_model[key]['dtype']
else:
dtype = self.table[key].dtype
# Deal with None's properly
nones = usrdata[key] == 'None'
usrdata[key][nones] = None
# Rest
# Allow for str RA, DEC (backwards compatability)
if key in ['ra', 'dec'] and not radec_done:
ras, decs = meta.convert_radec(usrdata['ra'][~nones].data,
usrdata['dec'][~nones].data)
usrdata['ra'][~nones] = ras.astype(dtype)
usrdata['dec'][~nones] = decs.astype(dtype)
radec_done = True
else:
usrdata[key][~nones] = usrdata[key][~nones].astype(dtype)
# Include the user data in the table
for key in usrdata.keys():
self.table[key] = usrdata[key][srt]
def finalize_usr_build(self, frametype, setup):
"""
Finalize the build of the table based on user-provided data,
typically pulled from the PypeIt file.
This function:
- sets the frame types based on the provided object
- sets all the configurations to the provided `setup`
- assigns all frames to a single calibration group, if the
'calib' column does not exist
- if the 'comb_id' column does not exist, this sets the
combination groups to be either undefined or to be unique
for each science or standard frame, see
:func:`set_combination_groups`.
.. note::
This should only be run if all files are from a single
instrument configuration. :attr:`table` is modified
in-place.
See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`.
.. todo::
- Why isn't frametype just in the user-provided data? It
may be (see get_frame_types) and I'm just not using it...
Args:
frametype (:obj:`dict`):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
setup (:obj:`str`):
If the 'setup' columns does not exist, fill the
configuration setup columns with this single identifier.
"""
self.get_frame_types(user=frametype)
# TODO: Add in a call to clean_configurations? I didn't add it
# here, because this method is only called for a preconstructed
# pypeit file, which should nominally follow an execution of
# pypeit_setup. If the user edits back in a frame that has an
# invalid key, at least for now the DEIMOS image reader will
# fault.
self.set_configurations(fill=setup)
self.set_calibration_groups(default=True)
self.set_combination_groups()
def get_configuration(self, indx, cfg_keys=None):
"""
Return the configuration dictionary for a given frame.
This is not the same as the backwards compatible "setup"
dictionary.
Args:
indx (:obj:`int`):
The index of the table row to use to construct the
configuration.
cfg_keys (:obj:`list`, optional):
The list of metadata keys to use to construct the
configuration. If None, the `configuration_keys` of
:attr:`spectrograph` is used.
Returns:
dict: A dictionary with the metadata values from the
selected row.
"""
_cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys
return {k:self.table[k][indx] for k in _cfg_keys}
def master_key(self, row, det=1):
"""
Construct the master key for the file in the provided row.
The master key is the combination of the configuration, the
calibration group, and the detector. The configuration ID is
the same as included in the configuration column (A, B, C, etc),
the calibration group is the same as the calibration bit number,
and the detector number is provided as an argument and converted
to a zero-filled string with two digits (the maximum number of
detectors is 99).
Using the calibration bit in the keyword allows MasterFrames to
be used with multiple calibration groups.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the key.
det (:obj:`int`, :obj:`tuple`, optional):
The 1-indexed detector number(s). If a tuple, it must include
detectors designated as a viable mosaic for
:attr:`spectrograph`; see
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`.
Returns:
:obj:`str`: Master key with configuration, calibration group(s), and
detector.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns
haven't been defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot provide master key string without setup and calibbit; '
'run set_configurations and set_calibration_groups.')
det_name = self.spectrograph.get_det_name(det)
return f"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}"
def construct_obstime(self, row):
"""
Construct the MJD of when the frame was observed.
.. todo::
- Consolidate with :func:`convert_time` ?
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
Returns:
astropy.time.Time: The MJD of the observation.
"""
return time.Time(self['mjd'][row], format='mjd')
def construct_basename(self, row, obstime=None):
"""
Construct the root name primarily for PypeIt file output.
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
obstime (:class:`astropy.time.Time`, optional):
The MJD of the observation. If None, constructed using
:func:`construct_obstime`.
Returns:
str: The root name for file output.
"""
_obstime = self.construct_obstime(row) if obstime is None else obstime
tiso = time.Time(_obstime, format='isot')
dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')
return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],
self['target'][row].replace(" ", ""),
self.spectrograph.camera,
datetime.datetime.strftime(dtime, '%Y%m%dT'),
tiso.value.split("T")[1].replace(':',''))
def get_setup(self, row, det=None, config_only=False):
"""
Construct the setup dictionary.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting it. And it may be something to put
in the relevant spectrograph class.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the setup.
det (:obj:`int`, optional):
The 1-indexed detector to include. If None, all
detectors are included.
config_only (:obj:`bool`, optional):
Just return the dictionary with the configuration, don't
include the top-level designation of the configuration
itself.
Returns:
dict: The pypeit setup dictionary with the default format.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot provide instrument setup without \'setup\' column; '
'run set_configurations.')
dispname = 'none' if 'dispname' not in self.keys() else self['dispname'][row]
dispangle = 'none' if 'dispangle' not in self.keys() else self['dispangle'][row]
dichroic = 'none' if 'dichroic' not in self.keys() else self['dichroic'][row]
decker = 'none' if 'decker' not in self.keys() else self['decker'][row]
slitwid = 'none' if 'slitwid' not in self.keys() else self['slitwid'][row]
slitlen = 'none' if 'slitlen' not in self.keys() else self['slitlen'][row]
binning = '1,1' if 'binning' not in self.keys() else self['binning'][row]
skey = 'Setup {}'.format(self['setup'][row])
# Key names *must* match configuration_keys() for spectrographs
setup = {skey:
{'--':
{'disperser': {'dispname': dispname, 'dispangle':dispangle},
'dichroic': dichroic,
'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen},
'binning': binning, # PypeIt orientation binning of a science image
}
}
}
#_det = np.arange(self.spectrograph.ndet)+1 if det is None else [det]
#for d in _det:
# setup[skey][str(d).zfill(2)] \
# = {'binning': binning, 'det': d,
# 'namp': self.spectrograph.detector[d-1]['numamplifiers']}
return setup[skey] if config_only else setup
def get_configuration_names(self, ignore=None, return_index=False, configs=None):
"""
Get the list of the unique configuration names.
This provides just the list of setup identifiers ('A', 'B',
etc.) and the row index where it first occurs. This is
different from :func:`unique_configurations` because the latter
determines and provides the configurations themselves.
This is mostly a convenience function for the writing routines.
Args:
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
return_index (:obj:`bool`, optional):
Return row indices with the first occurence of these
configurations.
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']).
Returns:
numpy.array: The list of unique setup names. A second
returned object provides the indices of the first occurrence
of these setups, if requested.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot get setup names; run set_configurations.')
# Unique configurations
setups, indx = np.unique(self['setup'], return_index=True)
if ignore is not None:
# Remove the selected configurations to ignore
rm = np.logical_not(np.isin(setups, ignore))
setups = setups[rm]
indx = indx[rm]
# Restrict
_configs = None if configs is None else np.atleast_1d(configs)
# TODO: Why do we need to specify 'all' here? Can't `configs is
# None` mean that you want all the configurations? Or can we
# make the default 'all'?
if configs is not None and 'all' not in _configs:
use = np.isin(setups, _configs)
setups = setups[use]
indx = indx[use]
return setups, indx if return_index else setups
def _get_cfgs(self, copy=False, rm_none=False):
"""
Convenience method to return :attr:`configs` with possible
alterations.
This method *should not* be called by any method outside of
this class; use :func:`unique_configurations` instead.
Args:
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
"""
_cfg = deepcopy(self.configs) if copy else self.configs
if rm_none and 'None' in _cfg.keys():
del _cfg['None']
return _cfg
def unique_configurations(self, force=False, copy=False, rm_none=False):
"""
Return the unique instrument configurations.
If run before the ``'setup'`` column is initialized, this function
determines the unique instrument configurations by finding
unique combinations of the items in the metadata table listed by
the spectrograph ``configuration_keys`` method.
If run after the ``'setup'`` column has been set, this simply
constructs the configuration dictionary using the unique
configurations in that column.
This is used to set the internal :attr:`configs`. If this
attribute is not None, this function simply returns
:attr:`config` (cf. ``force``).
.. warning::
Any frame types returned by the
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
method for :attr:`spectrograph` will be ignored in the
construction of the unique configurations. If
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
does not return None and the frame types have not yet
been defined (see :func:`get_frame_types`), this method
will fault!
Args:
force (:obj:`bool`, optional):
Force the configurations to be redetermined. Otherwise
the configurations are only determined if
:attr:`configs` has not yet been defined.
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
Raises:
PypeItError:
Raised if there are list of frame types to ignore but
the frame types have not been defined yet.
"""
if self.configs is not None and not force:
return self._get_cfgs(copy=copy, rm_none=rm_none)
if 'setup' in self.keys():
msgs.info('Setup column already set. Finding unique configurations.')
uniq, indx = np.unique(self['setup'], return_index=True)
ignore = uniq == 'None'
if np.sum(ignore) > 0:
msgs.warn('Ignoring {0} frames with configuration set to None.'.format(
np.sum(ignore)))
self.configs = {}
for i in range(len(uniq)):
if ignore[i]:
continue
self.configs[uniq[i]] = self.get_configuration(indx[i])
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
msgs.info('Using metadata to determine unique configurations.')
# If the frame types have been set, ignore anything listed in
# the ignore_frames
indx = np.arange(len(self))
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To ignore frames, types must have been defined; run get_frame_types.')
ignore_frames = list(ignore_frames.keys())
msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames))
use = np.ones(len(self), dtype=bool)
for ftype in ignore_frames:
use &= np.logical_not(self.find_frames(ftype))
indx = indx[use]
if len(indx) == 0:
msgs.error('No frames to use to define configurations!')
# Get the list of keys to use
cfg_keys = self.spectrograph.configuration_keys()
# Configuration identifiers are iterations through the
# upper-case letters: A, B, C, etc.
double_alphabet = [str_i + str_j for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase]
cfg_iter = list(string.ascii_uppercase) + double_alphabet
cfg_indx = 0
# TODO: Placeholder: Allow an empty set of configuration keys
# meaning that the instrument setup has only one configuration.
if len(cfg_keys) == 0:
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = {}
msgs.info('All files assumed to be from a single configuration.')
return self._get_cfgs(copy=copy, rm_none=rm_none)
# Use the first file to set the first unique configuration
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys)
cfg_indx += 1
# Check if any of the other files show a different
# configuration.
for i in indx[1:]:
j = 0
for c in self.configs.values():
if row_match_config(self.table[i], c, self.spectrograph):
break
j += 1
unique = j == len(self.configs)
if unique:
if cfg_indx == len(cfg_iter):
msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter)))
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys)
cfg_indx += 1
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
def set_configurations(self, configs=None, force=False, fill=None):
"""
Assign each frame to a configuration (setup) and include it
in the metadata table.
The internal table is edited *in place*. If the 'setup'
column already exists, the configurations are **not** reset
unless you call the function with ``force=True``.
Args:
configs (:obj:`dict`, optional):
A nested dictionary, one dictionary per configuration
with the associated values of the metadata associated
with each configuration. The metadata keywords in the
dictionary should be the same as in the table, and the
keywords used to set the configuration should be the
same as returned by the spectrograph
`configuration_keys` method. The latter is not checked.
If None, this is set by :func:`unique_configurations`.
force (:obj:`bool`, optional):
Force the configurations to be reset.
fill (:obj:`str`, optional):
If the 'setup' column does not exist, fill the
configuration setup columns with this single identifier.
Ignores other inputs.
Raises:
PypeItError:
Raised if none of the keywords in the provided
configuration match with the metadata keywords. Also
raised when some frames cannot be assigned to a
configuration, the spectrograph defined frames that
have been ignored in the determination of the unique
configurations, but the frame types have not been set
yet.
"""
# Configurations have already been set
if 'setup' in self.keys() and not force:
return
if 'setup' not in self.keys() and fill is not None:
self['setup'] = fill
return
_configs = self.unique_configurations() if configs is None else configs
for k, cfg in _configs.items():
if len(set(cfg.keys()) - set(self.keys())) > 0:
msgs.error('Configuration {0} defined using unavailable keywords!'.format(k))
self.table['setup'] = 'None'
nrows = len(self)
for i in range(nrows):
for d, cfg in _configs.items():
if row_match_config(self.table[i], cfg, self.spectrograph):
self.table['setup'][i] = d
# Check if any of the configurations are not set
not_setup = self.table['setup'] == 'None'
if not np.any(not_setup):
# All are set, so we're done
return
# Some frame types may have been ignored
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is None:
# Nope, we're still done
return
# At this point, we need the frame type to continue
if 'frametype' not in self.keys():
msgs.error('To account for ignored frames, types must have been defined; run '
'get_frame_types.')
# For each configuration, determine if any of the frames with
# the ignored frame types should be assigned to it:
for cfg_key in _configs.keys():
in_cfg = self.table['setup'] == cfg_key
for ftype, metakey in ignore_frames.items():
# TODO: For now, use this assert to check that the
# metakey is either not set or a string
assert metakey is None or isinstance(metakey, str), \
'CODING ERROR: metadata keywords set by config_indpendent_frames are not ' \
'correctly defined for {0}; values must be None or a string.'.format(
self.spectrograph.__class__.__name__)
# Get the list of frames of this type without a
# configuration
indx = (self.table['setup'] == 'None') & self.find_frames(ftype)
if not np.any(indx):
continue
if metakey is None:
# No matching meta data defined, so just set all
# the frames to this (first) configuration
self.table['setup'][indx] = cfg_key
continue
# Find the unique values of meta for this configuration
uniq_meta = np.unique(self.table[metakey][in_cfg].data)
# Warn the user that the matching meta values are not
# unique for this configuration.
if uniq_meta.size != 1:
msgs.warn('When setting the instrument configuration for {0} '.format(ftype)
+ 'frames, configuration {0} does not have unique '.format(cfg_key)
+ '{0} values.' .format(meta))
# Find the frames of this type that match any of the
# meta data values
indx &= np.isin(self.table[metakey], uniq_meta)
self.table['setup'][indx] = cfg_key
def clean_configurations(self):
"""
Ensure that configuration-defining keywords all have values
that will yield good PypeIt reductions. Any frames that do
not are removed from :attr:`table`, meaning this method may
modify that attribute directly.
The valid values for configuration keys is set by
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`.
"""
cfg_limits = self.spectrograph.valid_configuration_values()
if cfg_limits is None:
# No values specified, so we're done
return
good = np.ones(len(self), dtype=bool)
for key in cfg_limits.keys():
# NOTE: For now, check that the configuration values were
# correctly assigned in the spectrograph class definition.
# This should probably go somewhere else or just removed.
assert isinstance(cfg_limits[key], list), \
'CODING ERROR: valid_configuration_values is not correctly defined ' \
'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__)
# Check that the metadata are valid for this column.
indx = np.isin(self[key], cfg_limits[key])
if not np.all(indx):
msgs.warn('Found frames with invalid {0}.'.format(key))
good &= indx
if np.all(good):
# All values good, so we're done
return
# Alert the user that some of the frames are going to be
# removed
msg = 'The following frames have configurations that cannot be reduced by PypeIt' \
' and will be removed from the metadata table (pypeit file):\n'
indx = np.where(np.logical_not(good))[0]
for i in indx:
msg += ' {0}\n'.format(self['filename'][i])
msgs.warn(msg)
# And remove 'em
self.table = self.table[good]
def _set_calib_group_bits(self):
"""
Set the calibration group bit based on the string values of the
'calib' column.
"""
# Find the number groups by searching for the maximum number
# provided, regardless of whether or not a science frame is
# assigned to that group.
ngroups = 0
for i in range(len(self)):
if self['calib'][i] in ['all', 'None']:
# No information, keep going
continue
# Convert to a list of numbers
l = np.amax([ 0 if len(n) == 0 else int(n)
for n in self['calib'][i].replace(':',',').split(',')])
# Check against current maximum
ngroups = max(l+1, ngroups)
# Define the bitmask and initialize the bits
self.calib_bitmask = BitMask(np.arange(ngroups))
self['calibbit'] = 0
# Set the calibration bits
for i in range(len(self)):
# Convert the string to the group list
grp = parse.str2list(self['calib'][i], ngroups)
if grp is None:
# No group selected
continue
# Assign the group; ensure the integers are unique
self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp)
def _check_calib_groups(self):
"""
Check that the calibration groups are valid.
This currently only checks that the science frames are
associated with one calibration group.
TODO: Is this appropriate for NIR data?
"""
is_science = self.find_frames('science')
for i in range(len(self)):
if not is_science[i]:
continue
if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1:
msgs.error('Science frames can only be assigned to a single calibration group.')
@property
def n_calib_groups(self):
"""Return the number of calibration groups."""
return None if self.calib_bitmask is None else self.calib_bitmask.nbits
def set_calibration_groups(self, global_frames=None, default=False, force=False):
"""
Group calibration frames into sets.
Requires the 'setup' column to have been defined. For now this
is a simple grouping of frames with the same configuration.
.. todo::
- Maintain a detailed description of the logic.
The 'calib' column has a string type to make sure that it
matches with what can be read from the pypeit file. The
'calibbit' column is actually what is used to determine the
calibration group of each frame; see :attr:`calib_bitmask`.
Args:
global_frames (:obj:`list`, optional):
A list of strings with the frame types to use in all
calibration groups (e.g., ['bias', 'dark']).
default (:obj:`bool`, optional):
If the 'calib' column is not present, set a single
calibration group *for all rows*.
force (:obj:`bool`, optional):
Force the calibration groups to be reconstructed if
the 'calib' column already exists.
Raises:
PypeItError:
Raised if 'setup' column is not defined, or if
`global_frames` is provided but the frame types have not
been defined yet.
"""
# Set the default if requested and 'calib' doesn't exist yet
if 'calib' not in self.keys() and default:
self['calib'] = '0'
# Make sure the calibbit column does not exist
if 'calibbit' in self.keys():
del self['calibbit']
# Groups have already been set
if 'calib' in self.keys() and 'calibbit' in self.keys() and not force:
return
# Groups have been set but the bits have not (likely because the
# data was read from a pypeit file)
if 'calib' in self.keys() and 'calibbit' not in self.keys() and not force:
self._set_calib_group_bits()
self._check_calib_groups()
return
# TODO: The rest of this just nominally sets the calibration
# group based on the configuration. This will change!
# The configuration must be present to determine the calibration
# group
if 'setup' not in self.keys():
msgs.error('Must have defined \'setup\' column first; try running set_configurations.')
configs = np.unique(self['setup'].data).tolist()
if 'None' in configs:
configs.remove('None') # Ignore frames with undefined configurations
n_cfg = len(configs)
# TODO: Science frames can only have one calibration group
# Assign everything from the same configuration to the same
# calibration group; this needs to have dtype=object, otherwise
# any changes to the strings will be truncated at 4 characters.
self.table['calib'] = np.full(len(self), 'None', dtype=object)
for i in range(n_cfg):
self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] = str(i)
# Allow some frame types to be used in all calibration groups
# (like biases and darks)
if global_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To set global frames, types must have been defined; '
'run get_frame_types.')
calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str))
for ftype in global_frames:
indx = np.where(self.find_frames(ftype))[0]
for i in indx:
self['calib'][i] = calibs
# Set the bits based on the string representation of the groups
self._set_calib_group_bits()
# Check that the groups are valid
self._check_calib_groups()
def find_frames(self, ftype, calib_ID=None, index=False):
"""
Find the rows with the associated frame type.
If the index is provided, the frames must also be matched to the
relevant science frame.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`. If
set to the string 'None', this returns all frames
without a known type.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
index (:obj:`bool`, optional):
Return an array of 0-indexed indices instead of a
boolean array.
Returns:
numpy.ndarray: A boolean array, or an integer array if
index=True, with the rows that contain the frames of the
requested type.
Raises:
PypeItError:
Raised if the `framebit` column is not set in the table.
"""
if 'framebit' not in self.keys():
msgs.error('Frame types are not set. First run get_frame_types.')
if ftype == 'None':
return self['framebit'] == 0
# Select frames
indx = self.type_bitmask.flagged(self['framebit'], ftype)
if calib_ID is not None:
# Select frames in the same calibration group
indx &= self.find_calib_group(calib_ID)
# Return
return np.where(indx)[0] if index else indx
def find_frame_files(self, ftype, calib_ID=None):
"""
Return the list of files with a given frame type.
The frames must also match the science frame index, if it is
provided.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
Returns:
list: List of file paths that match the frame type and
science frame ID, if the latter is provided.
"""
return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID))
def frame_paths(self, indx):
"""
Return the full paths to one or more frames.
Args:
indx (:obj:`int`, array-like):
One or more 0-indexed rows in the table with the frames
to return. Can be an array of indices or a boolean
array of the correct length.
Returns:
list: List of the full paths of one or more frames.
"""
if isinstance(indx, (int,np.integer)):
return os.path.join(self['directory'][indx], self['filename'][indx])
return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])]
def set_frame_types(self, type_bits, merge=True):
"""
Set and return a Table with the frame types and bits.
Args:
type_bits (numpy.ndarray):
Integer bitmask with the frame types. The length must
match the existing number of table rows.
merge (:obj:`bool`, optional):
Merge the types and bits into the existing table. This
will *overwrite* any existing columns.
Returns:
`astropy.table.Table`: Table with two columns, the frame
type name and bits.
"""
# Making Columns to pad string array
ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype')
# KLUDGE ME
#
# TODO: It would be good to get around this. Is it related to
# this change?
# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3
#
# See also:
#
# http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode
#
# Or we can force type_names() in bitmask to always return the
# correct type...
if int(str(ftype_colmA.dtype)[2:]) < 9:
ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9',
name='frametype')
else:
ftype_colm = ftype_colmA
fbits_colm = table.Column(type_bits, name='framebit')
t = table.Table([ftype_colm, fbits_colm])
if merge:
self['frametype'] = t['frametype']
self['framebit'] = t['framebit']
return t
def edit_frame_type(self, indx, frame_type, append=False):
"""
Edit the frame type by hand.
Args:
indx (:obj:`int`):
The 0-indexed row in the table to edit
frame_type (:obj:`str`, :obj:`list`):
One or more frame types to append/overwrite.
append (:obj:`bool`, optional):
Append the frame type. If False, all existing frame
types are overwitten by the provided type.
"""
if not append:
self['framebit'][indx] = 0
self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type)
self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx])
def get_frame_types(self, flag_unknown=False, user=None, merge=True):
"""
Generate a table of frame types from the input metadata object.
.. todo::
- Here's where we could add a SPIT option.
Args:
flag_unknown (:obj:`bool`, optional):
Instead of crashing out if there are unidentified files,
leave without a type and continue.
user (:obj:`dict`, optional):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
merge (:obj:`bool`, optional):
Merge the frame typing into the exiting table.
Returns:
:obj:`astropy.table.Table`: A Table with two columns, the
type names and the type bits. See
:class:`pypeit.core.framematch.FrameTypeBitMask` for the
allowed frame types.
"""
# Checks
if 'frametype' in self.keys() or 'framebit' in self.keys():
msgs.warn('Removing existing frametype and framebit columns.')
if 'frametype' in self.keys():
del self.table['frametype']
if 'framebit' in self.keys():
del self.table['framebit']
# # TODO: This needs to be moved into each Spectrograph
# if useIDname and 'idname' not in self.keys():
# raise ValueError('idname is not set in table; cannot use it for file typing.')
# Start
msgs.info("Typing files")
type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype())
# Use the user-defined frame types from the input dictionary
if user is not None:
if len(user.keys()) != len(self):
raise ValueError('The user-provided dictionary does not match table length.')
msgs.info('Using user-provided frame types.')
for ifile,ftypes in user.items():
indx = self['filename'] == ifile
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(','))
return self.set_frame_types(type_bits, merge=merge)
# Loop over the frame types
for i, ftype in enumerate(self.type_bitmask.keys()):
# # Initialize: Flag frames with the correct ID name or start by
# # flagging all as true
# indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \
# else np.ones(len(self), dtype=bool)
# Include a combination of instrument-specific checks using
# combinations of the full set of metadata
exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \
else self.par['calibrations']['{0}frame'.format(ftype)]['exprng']
# TODO: Use & or | ? Using idname above gets overwritten by
# this if the frames to meet the other checks in this call.
# indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
# Turn on the relevant bits
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype)
# Find the nearest standard star to each science frame
# TODO: Should this be 'standard' or 'science' or both?
if 'ra' not in self.keys() or 'dec' not in self.keys():
msgs.warn('Cannot associate standard with science frames without sky coordinates.')
else:
# TODO: Do we want to do this here?
indx = self.type_bitmask.flagged(type_bits, flag='standard')
for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx],
self['dec'][indx]):
if ra == 'None' or dec == 'None':
msgs.warn('RA and DEC must not be None for file:' + msgs.newline() + f)
msgs.warn('The above file could be a twilight flat frame that was'
+ msgs.newline() + 'missed by the automatic identification.')
b = self.type_bitmask.turn_off(b, flag='standard')
continue
# If an object exists within 20 arcmins of a listed standard,
# then it is probably a standard star
foundstd = flux_calib.find_standard_file(ra, dec, check=True)
b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard')
# Find the files without any types
indx = np.logical_not(self.type_bitmask.flagged(type_bits))
if np.any(indx):
msgs.info("Couldn't identify the following files:")
for f in self['filename'][indx]:
msgs.info(f)
if not flag_unknown:
msgs.error("Check these files before continuing")
# Finish up (note that this is called above if user is not None!)
msgs.info("Typing completed!")
return self.set_frame_types(type_bits, merge=merge)
def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False):
"""
Generate the list of columns to be included in the fitstbl
(nearly the complete list).
Args:
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Returns:
`numpy.ndarray`_: Array of columns to be used in the fits
table>
"""
# Columns for output
columns = self.spectrograph.pypeit_file_keys()
extras = []
# comb, bkg columns
if write_bkg_pairs:
extras += ['calib', 'comb_id', 'bkg_id']
# manual
if write_manual:
extras += ['manual']
for key in extras:
if key not in columns:
columns += [key]
# Take only those present
output_cols = np.array(columns)
return output_cols[np.isin(output_cols, self.keys())].tolist()
def set_combination_groups(self, assign_objects=True):
"""
Set combination groups.
.. note::
:attr:`table` is edited in place.
This function can be used to initialize the combination group
and background group columns, and/or to initialize the combination
groups to the set of objects (science or standard frames) to a
unique integer.
If the 'comb_id' or 'bkg_id' columns do not exist, they're set
to -1.
Args:
assign_objects (:obj:`bool`, optional):
If all of 'comb_id' values are less than 0 (meaning
they're unassigned), the combination groups are set to
be unique for each standard and science frame.
"""
if 'comb_id' not in self.keys():
self['comb_id'] = -1
if 'bkg_id' not in self.keys():
self['bkg_id'] = -1
if assign_objects and np.all(self['comb_id'] < 0):
# find_frames will throw an exception if framebit is not
# set...
sci_std_idx = np.where(np.any([self.find_frames('science'),
self.find_frames('standard')], axis=0))[0]
self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1
def set_user_added_columns(self):
"""
Set columns that the user *might* add
.. note::
:attr:`table` is edited in place.
This function can be used to initialize columns
that the user might add
"""
if 'manual' not in self.keys():
self['manual'] = ''
def write_sorted(self, ofile, overwrite=True, ignore=None,
write_bkg_pairs=False, write_manual=False):
"""
Write the sorted file.
The sorted file lists all the unique instrument configurations
(setups) and the frames associated with each configuration. The
output data table is identical to the pypeit file output.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot write sorted instrument configuration table without \'setup\' '
'column; run set_configurations.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
cfgs = self.unique_configurations(copy=ignore is not None)
if ignore is not None:
for key in cfgs.keys():
if key in ignore:
del cfgs[key]
# Construct file
ff = open(ofile, 'w')
for setup in cfgs.keys():
# Get the subtable of frames taken in this configuration
indx = self['setup'] == setup
if not np.any(indx):
continue
subtbl = self.table[output_cols][indx]
# Write the file
ff.write('##########################################################\n')
ff.write('Setup {:s}\n'.format(setup))
ff.write('\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\n')
ff.write('#---------------------------------------------------------\n')
mjd = subtbl['mjd'].copy()
# Deal with possibly None mjds if there were corrupt header cards
mjd[mjd == None] = -99999.0
isort = np.argsort(mjd)
subtbl = subtbl[isort]
subtbl.write(ff, format='ascii.fixed_width')
ff.write('##end\n')
ff.close()
# TODO: Do we need a calib file?
def write_calib(self, ofile, overwrite=True, ignore=None):
"""
Write the calib file.
The calib file provides the unique instrument configurations
(setups) and the association of each frame from that
configuration with a given calibration group.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
- This is complicated by allowing some frame types to have
no association with an instrument configuration
- This is primarily used for QA now; but could probably use the pypeit file instead
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore calibration groups in the provided list.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns haven't been
defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot write calibration groups without \'setup\' and \'calibbit\' '
'columns; run set_configurations and set_calibration_groups.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Construct the setups dictionary
cfg = self.unique_configurations(copy=True, rm_none=True)
# TODO: We should edit the relevant follow-on code so that we
# don't have to do these gymnastics. Or better yet, just stop
# producing/using the *.calib file.
_cfg = {}
for setup in cfg.keys():
_cfg[setup] = {}
_cfg[setup]['--'] = deepcopy(cfg[setup])
cfg = _cfg
# Iterate through the calibration bit names as these are the root of the
# MasterFrames and QA
for icbit in np.unique(self['calibbit'].data):
cbit = int(icbit) # for yaml
# Skip this group
if ignore is not None and cbit in ignore:
continue
# Find the frames in this group
#in_group = self.find_calib_group(i)
in_cbit = self['calibbit'] == cbit
# Find the unique configurations in this group, ignoring any
# undefined ('None') configurations
#setup = np.unique(self['setup'][in_group]).tolist()
setup = np.unique(self['setup'][in_cbit]).tolist()
if 'None' in setup:
setup.remove('None')
# Make sure that each calibration group should only contain
# frames from a single configuration
if len(setup) != 1:
msgs.error('Each calibration group must be from one and only one instrument '
'configuration with a valid letter identifier; i.e., the '
'configuration cannot be None.')
# Find the frames of each type in this group
cfg[setup[0]][cbit] = {}
for key in self.type_bitmask.keys():
#ftype_in_group = self.find_frames(key) & in_group
ftype_in_group = self.find_frames(key) & in_cbit
cfg[setup[0]][cbit][key] = [ os.path.join(d,f)
for d,f in zip(self['directory'][ftype_in_group],
self['filename'][ftype_in_group])]
# Write it
ff = open(ofile, 'w')
ff.write(yaml.dump(utils.yamlify(cfg)))
ff.close()
def write_pypeit(self, output_path=None, cfg_lines=None,
write_bkg_pairs=False, write_manual=False,
configs=None):
"""
Write a pypeit file in data-table format.
The pypeit file is the main configuration file for PypeIt,
configuring the control-flow and algorithmic parameters and
listing the data files to read. This function writes the
columns selected by the
:func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`,
which can be specific to each instrument.
Args:
output_path (:obj:`str`, optional):
Root path for the output pypeit files. If None, set
to current directory. If the output directory does
not exist, it is created.
cfg_lines (:obj:`list`, optional):
The list of configuration lines to include in the file.
If None are provided, the vanilla configuration is
included.
write_bkg_pairs (:obj:`bool`, optional):
When constructing the
:class:`pypeit.metadata.PypeItMetaData` object, include
two columns called `comb_id` and `bkg_id` that identify
object and background frame pairs.
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']). See
:attr:`configs`.
Raises:
PypeItError:
Raised if the 'setup' isn't defined and split is True.
Returns:
:obj:`list`: List of ``PypeIt`` files generated.
"""
# Set output path
if output_path is None:
output_path = os.getcwd()
# Find unique configurations, always ignoring any 'None'
# configurations...
cfg = self.unique_configurations(copy=True, rm_none=True)
# Get the setups to write
if configs is None or configs == 'all' or configs == ['all']:
cfg_keys = list(cfg.keys())
else:
_configs = configs if isinstance(configs, list) else [configs]
cfg_keys = [key for key in cfg.keys() if key in _configs]
if len(cfg_keys) == 0:
msgs.error('No setups to write!')
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
# Write the pypeit files
ofiles = [None]*len(cfg_keys)
for j,setup in enumerate(cfg_keys):
# Create the output directory
root = '{0}_{1}'.format(self.spectrograph.name, setup)
odir = os.path.join(output_path, root)
if not os.path.isdir(odir):
os.makedirs(odir)
# Create the output file name
ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root))
# Get the setup lines
setup_lines = dict_to_lines({'Setup {0}'.format(setup):
utils.yamlify(cfg[setup])}, level=1)
# Get the paths
in_cfg = self['setup'] == setup
if not np.any(in_cfg):
continue
paths = np.unique(self['directory'][in_cfg]).tolist()
# Get the data lines
subtbl = self.table[output_cols][in_cfg]
subtbl.sort(['frametype','filename'])
with io.StringIO() as ff:
subtbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
# Write the file
make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines,
setup_lines=setup_lines, sorted_files=data_lines, paths=paths)
# Return
return ofiles
def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False,
header=None):
"""
Write the metadata either to a file or to the screen.
The method allows you to set the columns to print and which column to
use for sorting.
Args:
output (:obj:`str`, optional):
Output signature or file name. If None, the table contents
are printed to the screen. If ``'table'``, the table that
would have been printed/written to disk is returned.
Otherwise, the string is interpreted as the name of an ascii
file to which to write the table contents.
rows (`numpy.ndarray`_, optional):
A boolean vector selecting the rows of the table to write. If
None, all rows are written. Shape must match the number of
the rows in the table.
columns (:obj:`str`, :obj:`list`, optional):
A list of columns to include in the output file. Can be
provided as a list directly or as a comma-separated string.
If None or ``'all'``, all columns in are written; if
``'pypeit'``, the columns are the same as those included in
the pypeit file. Each selected column must be a valid pypeit
metadata keyword, specific to :attr:`spectrograph`.
Additional valid keywords, depending on the processing level
of the metadata table, are directory, filename, frametype,
framebit, setup, calib, and calibbit.
sort_col (:obj:`str`, optional):
Name of the column to use for sorting the output. If
None, the table is printed in its current state.
overwrite (:obj:`bool`, optional):
Overwrite any existing file; otherwise raise an
exception.
header (:obj:`str`, :obj:`list`, optional):
One or more strings to write to the top of the file, on
string per file line; ``# `` is added to the beginning of
each string. Ignored if ``output`` does not specify an output
file.
Returns:
`astropy.table.Table`: The table object that would have been
written/printed if ``output == 'table'``. Otherwise, the method
always returns None.
Raises:
ValueError:
Raised if the columns to include are not valid, or if the
column to use for sorting is not valid.
FileExistsError:
Raised if overwrite is False and the file exists.
"""
# Check the file can be written (this is here because the spectrograph
# needs to be defined first)
ofile = None if output in [None, 'table'] else output
if ofile is not None and os.path.isfile(ofile) and not overwrite:
raise FileExistsError(f'{ofile} already exists; set flag to overwrite.')
# Check the rows input
if rows is not None and len(rows) != len(self.table):
raise ValueError('Boolean vector selecting output rows has incorrect length.')
# Get the columns to return
if columns in [None, 'all']:
tbl_cols = list(self.keys())
elif columns == 'pypeit':
tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True)
else:
all_cols = list(self.keys())
tbl_cols = columns if isinstance(columns, list) else columns.split(',')
badcol = [col not in all_cols for col in tbl_cols]
if np.any(badcol):
raise ValueError('The following columns are not valid: {0}'.format(
', '.join(tbl_cols[badcol])))
# Make sure the basic parameters are the first few columns; do them in
# reverse order so I can always insert at the beginning of the list
for col in ['framebit', 'frametype', 'filename', 'directory']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != 0:
tbl_cols.insert(0, tbl_cols.pop(indx))
# Make sure the dithers and combination and background IDs are the last
# few columns
ncol = len(tbl_cols)
for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != ncol-1:
tbl_cols.insert(ncol-1, tbl_cols.pop(indx))
# Copy the internal table so that it is unaltered
output_tbl = self.table.copy()
# Select the output rows if a vector was provided
if rows is not None:
output_tbl = output_tbl[rows]
# Select and sort the data by a given column
if sort_col is not None:
if sort_col not in self.keys():
raise ValueError(f'Cannot sort by {sort_col}. Not a valid column.')
# Ignore any NoneTypes
indx = output_tbl[sort_col] != None
is_None = np.logical_not(indx)
srt = np.append(np.where(is_None)[0],
np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)])
output_tbl = output_tbl[tbl_cols][srt]
else:
output_tbl = output_tbl[tbl_cols]
if output == 'table':
# Instead of writing, just return the modified table
return output_tbl
# Always write the table in ascii format
with io.StringIO() as ff:
output_tbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
if ofile is None:
# Output file not defined so just print it
print('\n'.join(data_lines))
return None
# Write the output to an ascii file
with open(ofile, 'w') as f:
if header is not None:
_header = header if isinstance(header, list) else [header]
for h in _header:
f.write(f'# {h}\n')
f.write('\n')
f.write('\n'.join(data_lines))
f.write('\n')
# Just to be explicit that the method returns None when writing to a
# file...
return None
def find_calib_group(self, grp):
"""
Find all the frames associated with the provided calibration group.
Args:
grp (:obj:`int`):
The calibration group integer.
Returns:
numpy.ndarray: Boolean array selecting those frames in the
table included in the selected calibration group.
Raises:
PypeItError:
Raised if the 'calibbit' column is not defined.
"""
if 'calibbit' not in self.keys():
msgs.error('Calibration groups are not set. First run set_calibration_groups.')
return self.calib_bitmask.flagged(self['calibbit'].data, grp)
def find_frame_calib_groups(self, row):
"""
Find the calibration groups associated with a specific frame.
"""
return self.calib_bitmask.flagged_bits(self['calibbit'][row])
# TODO: Is there a reason why this is not an attribute of
# PypeItMetaData?
def row_match_config(row, config, spectrograph):
"""
Queries whether a row from the fitstbl matches the
input configuration
Args:
row (astropy.table.Row): From fitstbl
config (dict): Defines the configuration
spectrograph (pypeit.spectrographs.spectrograph.Spectrograph):
Used to grab the rtol value for float meta (e.g. dispangle)
Returns:
bool: True if the row matches the input configuration
"""
# Loop on keys in config
match = []
for k in config.keys():
# Deal with floating configs (e.g. grating angle)
if isinstance(config[k], float):
if row[k] is None:
match.append(False)
elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']:
match.append(True)
else:
match.append(False)
else:
# The np.all allows for arrays in the Table (e.g. binning)
match.append(np.all(config[k] == row[k]))
# Check
return np.all(match)
| 42.817836 | 122 | 0.575411 | 9,701 | 78,742 | 4.599835 | 0.09164 | 0.005378 | 0.009188 | 0.00874 | 0.279587 | 0.226229 | 0.194787 | 0.170607 | 0.152029 | 0.138897 | 0 | 0.002512 | 0.337837 | 78,742 | 1,838 | 123 | 42.841132 | 0.853318 | 0.457329 | 0 | 0.158133 | 0 | 0 | 0.140905 | 0.008467 | 0 | 0 | 0 | 0.011425 | 0.003012 | 1 | 0.064759 | false | 0 | 0.028614 | 0.012048 | 0.162651 | 0.001506 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53d54a4c34c0a67e36d2d017230ceb288acd1564 | 2,341 | py | Python | aql/aql/main/aql_builtin_tools.py | menify/sandbox | 32166c71044f0d5b414335b2b6559adc571f568c | [
"MIT"
] | null | null | null | aql/aql/main/aql_builtin_tools.py | menify/sandbox | 32166c71044f0d5b414335b2b6559adc571f568c | [
"MIT"
] | null | null | null | aql/aql/main/aql_builtin_tools.py | menify/sandbox | 32166c71044f0d5b414335b2b6559adc571f568c | [
"MIT"
] | null | null | null |
import os.path
import shutil
import errno
from aql.nodes import Builder, FileBuilder
from .aql_tools import Tool
__all__ = ( "ExecuteCommand",
"InstallBuilder",
"BuiltinTool",
)
"""
Unique Value - name + type
value
node
node = ExecuteCommand('gcc --help -v')
tools.cpp.cxx
node = ExecuteCommand( tools.cpp.cxx, '--help -v' )
node = ExecuteMethod( target = my_function )
dir_node = CopyFiles( prog_node, target = dir_name )
dir_node = CopyFilesAs( prog_node, target = dir_name )
dir_node = MoveFiles( prog_node, )
dir_node = MoveFilesAs( prog_node )
dir_node = RemoveFiles( prog_node )
node = FindFiles( dir_node )
dir_node = FileDir( prog_node )
"""
def _makeTagetDirs( path_dir ):
try:
os.makedirs( path_dir )
except OSError as e:
if e.errno != errno.EEXIST:
raise
#//===========================================================================//
class ExecuteCommand (Builder):
def build( self, node ):
cmd = node.getSources()
out = self.execCmd( cmd )
node.setNoTargets()
return out
#//-------------------------------------------------------//
def getBuildStrArgs( self, node, brief ):
cmd = node.getSourceValues()
return (cmd,)
#//===========================================================================//
class InstallBuilder (FileBuilder):
def __init__(self, options, target ):
self.target = os.path.abspath( target )
#//-------------------------------------------------------//
def build( self, node ):
sources = node.getSources()
target = self.target
_makeTagetDirs( target )
for source in sources:
if os.path.isfile( source ):
shutil.copy( source, target )
node.setNoTargets()
#//-------------------------------------------------------//
def getTraceTargets( self, node, brief ):
return self.target
#//===========================================================================//
class BuiltinTool( Tool ):
def ExecuteCommand( self, options ):
return ExecuteCommand( options )
def Install(self, options, target ):
return InstallBuilder( options, target )
def DirName(self, options):
raise NotImplementedError()
def BaseName(self, options):
raise NotImplementedError()
| 22.509615 | 80 | 0.529688 | 215 | 2,341 | 5.632558 | 0.353488 | 0.040462 | 0.02725 | 0.028076 | 0.046243 | 0.046243 | 0.046243 | 0 | 0 | 0 | 0 | 0 | 0.208885 | 2,341 | 103 | 81 | 22.728155 | 0.653888 | 0.176848 | 0 | 0.130435 | 0 | 0 | 0.027046 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.217391 | false | 0 | 0.108696 | 0.065217 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53d8b7928beadd81971824eb5f4c9a1dab184d41 | 1,318 | py | Python | data/parse_hipp_data.py | slinderman/pyhsmm-spiketrains | 462d8d2c59bd2e7c39d20d624bd8b289a31baaa2 | [
"MIT"
] | 10 | 2016-04-23T00:23:20.000Z | 2022-01-05T19:28:08.000Z | data/parse_hipp_data.py | slinderman/pyhsmm-spiketrains | 462d8d2c59bd2e7c39d20d624bd8b289a31baaa2 | [
"MIT"
] | 1 | 2017-06-24T06:37:12.000Z | 2017-07-07T17:19:59.000Z | data/parse_hipp_data.py | slinderman/pyhsmm-spiketrains | 462d8d2c59bd2e7c39d20d624bd8b289a31baaa2 | [
"MIT"
] | 9 | 2016-03-29T21:37:46.000Z | 2022-01-05T19:28:11.000Z | import os
import numpy as np
from scipy.io import loadmat
data = loadmat("data/hipp_2dtrack_a/smJun03p2.dat")
N = 49
data = reshape(data, 3, length(data)/3);
data = data';
size(data) % 43799-by-3
fclose(fid);
% sampling time
Ts = 0.0333;
duration = size(data,1) * Ts; % in second
Tmax = data(end, 3);
Tmin = data(1,3);
time_edges = [Tmin: 0.25: Tmax]; % 250 ms per bin
% interpolated rat's position in time bins
Rat_pos = interp1(data(:, 3), [data(:, 1), data(:, 2)], time_edges');
vel = abs(diff(Rat_pos, 1, 1 )); % row difference
vel = [vel(1, :); vel];
% 250 ms
rat_vel = 4 * sqrt(vel(:, 1).^2 + vel(:, 2).^2); % unit: cm/s
vel_ind = find(rat_vel >= 10); % RUN velocity threshold
% using RUN only
T = length(vel_ind);
% using Run + pause periods
T = length(time_edges);
AllSpikeData = zeros(C,T);
for i=1:C
str = ['Cell_num' num2str(i)];
fid = fopen(str, 'r');
cell_data = fscanf(fid, '%f');
cell_data = reshape(cell_data, 3, length(cell_data)/3)';
spike_time = cell_data(:, 3);
spike_pos = cell_data(:, 1:2);
[spike_time_count, bin] = histc(spike_time, time_edges); % column vector
% if analyzing the RUN period only uncomment this
% spike_time_count = spike_time_count(vel_ind);
AllSpikeData(i, :) = spike_time_count';
fclose(fid);
end | 22.338983 | 78 | 0.634294 | 215 | 1,318 | 3.744186 | 0.44186 | 0.037267 | 0.069565 | 0.034783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050237 | 0.199545 | 1,318 | 59 | 79 | 22.338983 | 0.712796 | 0 | 0 | 0.05 | 0 | 0.025 | 0.033359 | 0.025019 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.075 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53e05b14f47fe11d4c2e4b89d1492b45ec46b072 | 5,199 | py | Python | etl/transform.py | ACWI-SOGW/ngwmn_monitoring_locations_etl | e9ebfebbc5fa349a58669fb1d9944786f26729c3 | [
"CC0-1.0"
] | 1 | 2020-10-07T14:44:30.000Z | 2020-10-07T14:44:30.000Z | etl/transform.py | ACWI-SOGW/ngwmn_monitoring_locations_etl | e9ebfebbc5fa349a58669fb1d9944786f26729c3 | [
"CC0-1.0"
] | 7 | 2020-10-14T19:13:10.000Z | 2021-10-06T20:04:38.000Z | etl/transform.py | ACWI-SOGW/ngwmn_monitoring_locations_etl | e9ebfebbc5fa349a58669fb1d9944786f26729c3 | [
"CC0-1.0"
] | 1 | 2020-10-02T14:43:18.000Z | 2020-10-02T14:43:18.000Z | """
Transform the data into a form that
works with the WELL_REGISTRY_STG table.
"""
import re
def mapping_factory(mapping):
def map_func(key):
if key is not None:
ora_val = mapping.get(key.lower())
else:
ora_val = None
return ora_val
return map_func
WELL_TYPES = {
'surveillance': 1,
'trend': 2,
'special': 3,
}
map_well_type = mapping_factory(WELL_TYPES)
WELL_PURPOSE = {
'dedicated monitoring/observation': 1,
'other': 2
}
map_well_purpose = mapping_factory(WELL_PURPOSE)
QW_WELL_CHARS = {
'background': 1,
'suspected/anticipated changes': 2,
'known changes': 3
}
map_qw_well_chars = mapping_factory(QW_WELL_CHARS)
WL_WELL_CHARS = {
'background': 1,
'suspected/anticipated changes': 2,
'known changes': 3,
'unknown': 999
}
map_wl_well_chars = mapping_factory(WL_WELL_CHARS)
def to_flag(flag):
return '1' if flag else '0'
def transform_mon_loc_data(ml_data):
"""
Map the fields from the API JSON response to
the fields in the WELL_REGISTRY_STG table with
appropriate foreign key values.
"""
mapped_data = dict()
mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd']
mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm']
mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med']
mapped_data['SITE_NO'] = ml_data['site_no']
mapped_data['SITE_NAME'] = ml_data['site_name']
mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va']
mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va']
mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum']
mapped_data['ALT_VA'] = ml_data['alt_va']
mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum']
try:
mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd']
mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc']
except (AttributeError, KeyError, TypeError):
mapped_data['NAT_AQUIFER_CD'] = None
mapped_data['NAT_AQFR_DESC'] = None
mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name']
mapped_data['AQFR_CHAR'] = ml_data['aqfr_type']
mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag'])
mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag'])
mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars'])
mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose'])
mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name']
mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag'])
mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag'])
mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars'])
mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose'])
mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name']
mapped_data['DATA_PROVIDER'] = None
mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag'])
mapped_data['WL_DATA_PROVIDER'] = None
mapped_data['QW_DATA_PROVIDER'] = None
mapped_data['LITH_DATA_PROVIDER'] = None
mapped_data['CONST_DATA_PROVIDER'] = None
mapped_data['WELL_DEPTH'] = ml_data['well_depth']
mapped_data['LINK'] = ml_data['link']
mapped_data['INSERT_DATE'] = ml_data['insert_date']
mapped_data['UPDATE_DATE'] = ml_data['update_date']
mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes']
mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes']
mapped_data['INSERT_USER_ID'] = ml_data['insert_user']
mapped_data['UPDATE_USER_ID'] = ml_data['update_user']
mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type'])
mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type'])
mapped_data['LOCAL_AQUIFER_CD'] = None
mapped_data['REVIEW_FLAG'] = None
try:
mapped_data['STATE_CD'] = ml_data['state']['state_cd']
except (AttributeError, KeyError, TypeError):
mapped_data['STATE_CD'] = None
try:
mapped_data['COUNTY_CD'] = ml_data['county']['county_cd']
except (AttributeError, KeyError, TypeError):
mapped_data['COUNTY_CD'] = None
try:
mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd']
except (AttributeError, KeyError, TypeError):
mapped_data['COUNTRY_CD'] = None
mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None
mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None
mapped_data['SITE_TYPE'] = ml_data['site_type']
mapped_data['HORZ_METHOD'] = ml_data['horz_method']
mapped_data['HORZ_ACY'] = ml_data['horz_acy']
mapped_data['ALT_METHOD'] = ml_data['alt_method']
mapped_data['ALT_ACY'] = ml_data['alt_acy']
return mapped_data
def date_format(mapped_data):
# fix missing fractions of a second
if re.match(r".*:\d\dZ$", mapped_data['INSERT_DATE']):
mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + ".0Z"
if re.match(r".*:\d\dZ$", mapped_data['UPDATE_DATE']):
mapped_data['UPDATE_DATE'] = mapped_data['UPDATE_DATE'][:-1] + ".0Z"
| 38.227941 | 117 | 0.695903 | 766 | 5,199 | 4.278068 | 0.173629 | 0.201404 | 0.046994 | 0.018309 | 0.424474 | 0.227037 | 0.160818 | 0.081782 | 0.037229 | 0.037229 | 0 | 0.004583 | 0.160608 | 5,199 | 135 | 118 | 38.511111 | 0.746334 | 0.045009 | 0 | 0.111111 | 0 | 0 | 0.317592 | 0.029996 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046296 | false | 0 | 0.009259 | 0.009259 | 0.092593 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53ebe27af2c0c28dac914d098023620cb50fc322 | 1,529 | py | Python | igibson/object_states/aabb.py | mamadbiabon/iGibson | d416a470240eb7ad86e04fee475ae4bd67263a7c | [
"MIT"
] | 360 | 2020-04-02T11:12:09.000Z | 2022-03-24T21:46:58.000Z | igibson/object_states/aabb.py | mamadbiabon/iGibson | d416a470240eb7ad86e04fee475ae4bd67263a7c | [
"MIT"
] | 169 | 2020-04-07T21:01:05.000Z | 2022-03-31T10:07:39.000Z | igibson/object_states/aabb.py | mamadbiabon/iGibson | d416a470240eb7ad86e04fee475ae4bd67263a7c | [
"MIT"
] | 94 | 2020-04-09T23:22:17.000Z | 2022-03-17T21:49:03.000Z | import numpy as np
from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links
from igibson.object_states.object_state_base import CachingEnabledObjectState
class AABB(CachingEnabledObjectState):
def _compute_value(self):
body_id = self.obj.get_body_id()
all_links = get_all_links(body_id)
aabbs = [get_aabb(body_id, link=link) for link in all_links]
aabb_low, aabb_hi = aabb_union(aabbs)
if not hasattr(self.obj, "category") or self.obj.category != "floors" or self.obj.room_floor is None:
return np.array(aabb_low), np.array(aabb_hi)
# TODO: remove after split floors
# room_floor will be set to the correct RoomFloor beforehand
room_instance = self.obj.room_floor.room_instance
# Get the x-y values from the room segmentation map
room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance)
if room_aabb_low is None:
return np.array(aabb_low), np.array(aabb_hi)
# Use the z values from pybullet
room_aabb_low[2] = aabb_low[2]
room_aabb_hi[2] = aabb_hi[2]
return np.array(room_aabb_low), np.array(room_aabb_hi)
def _set_value(self, new_value):
raise NotImplementedError("AABB state currently does not support setting.")
# Nothing needs to be done to save/load AABB since it will happen due to pose caching.
def _dump(self):
return None
def load(self, data):
return
| 36.404762 | 109 | 0.699804 | 233 | 1,529 | 4.343348 | 0.381974 | 0.055336 | 0.043478 | 0.047431 | 0.077075 | 0.077075 | 0.077075 | 0.077075 | 0.077075 | 0.077075 | 0 | 0.003373 | 0.22433 | 1,529 | 41 | 110 | 37.292683 | 0.849916 | 0.16743 | 0 | 0.083333 | 0 | 0 | 0.047356 | 0 | 0 | 0 | 0 | 0.02439 | 0 | 1 | 0.166667 | false | 0 | 0.125 | 0.083333 | 0.541667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53f022c5295afcf5069c62aac2f57d65cf97e719 | 2,147 | py | Python | data_steward/constants/validation/email_notification.py | jp3477/curation | 41f98d57c8273d9963ad6d466a237c99b63c74be | [
"MIT"
] | 1 | 2021-04-05T18:06:25.000Z | 2021-04-05T18:06:25.000Z | data_steward/constants/validation/email_notification.py | jp3477/curation | 41f98d57c8273d9963ad6d466a237c99b63c74be | [
"MIT"
] | null | null | null | data_steward/constants/validation/email_notification.py | jp3477/curation | 41f98d57c8273d9963ad6d466a237c99b63c74be | [
"MIT"
] | null | null | null | MANDRILL_API_KEY = 'MANDRILL_API_KEY'
UNSET_MANDRILL_API_KEY_MSG = f"Mandrill API key not set in environment variable {MANDRILL_API_KEY}"
CONTACT_LIST_QUERY = """
SELECT *
FROM `{{project}}.{{dataset}}.{{contact_table}}`
"""
EHR_OPERATIONS = 'EHR Ops'
EHR_OPS_ZENDESK = 'support@aou-ehr-ops.zendesk.com'
DATA_CURATION_LISTSERV = 'datacuration@researchallofus.org'
NO_REPLY_ADDRESS = 'noreply@researchallofus.org'
NO_DATA_STEWARD = 'no data steward'
# HPO contact list table columns
SITE_NAME = 'site_name'
HPO_ID = 'hpo_id'
SITE_POINT_OF_CONTACT = 'site_point_of_contact'
# Mandrill API constants
MAIL_TO = 'mail_to'
EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload'
# Email content
EMAIL_BODY = """
<p style="font-size:115%;">Hi {{ site_name }},</p>
<p style="font-size:115%;">Your submission <b>{{ folder }}</b>
{% if submission_error %}was NOT successfully loaded on {{ timestamp }}.<br>
{% else %}was successfully loaded on {{ timestamp }}.<br>
{% endif %}
Please review the <code>results.html</code> submission report attached to this email{% if submission_error %}<br>
and resolve the errors before making a new submission{% endif %}.<br>
If any of your files have not been successfully uploaded, please run the
<a href="https://github.com/all-of-us/aou-ehr-file-check">local file check</a> before making your submission.<br>
To view the full set of curation reports, please visit the submission folder in your
GCS bucket <a href="{{ submission_folder_url }}">here</a>.<br>
For more information on the reports and how to download them, please refer to our
<a href="{{ ehr_ops_site_url }}">EHR Ops website</a>.</p>
<p style="font-size:115%;">You are receiving this email because you are listed as a point of contact
for HPO Site <em>{{ site_name }}</em>.<br>
If you have additional questions or wish to no longer receive these emails, please reply/send an
email to <a href="mailto:{{ eo_zendesk }}">{{ eo_zendesk }}</a>.</p>
<p style="font-size:115%;">EHR Ops team, DRC<br>
<em>All of Us</em> Research Program<br>
<img src="cid:{{ aou_logo }}"/></p>
"""
AOU_LOGO = 'aou_logo'
AOU_LOGO_PNG = 'all-of-us-logo.png'
| 39.036364 | 116 | 0.726129 | 346 | 2,147 | 4.349711 | 0.427746 | 0.027907 | 0.046512 | 0.037209 | 0.089701 | 0.037209 | 0.025249 | 0 | 0 | 0 | 0 | 0.006417 | 0.129017 | 2,147 | 54 | 117 | 39.759259 | 0.798396 | 0.031206 | 0 | 0.05 | 0 | 0.075 | 0.832852 | 0.148362 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53f2926766ffb4a7606e6a1c06800d6ce10ac775 | 3,893 | py | Python | src/stochastic_tour.py | DavidNKraemer/ams553-final-project | fc23fe5f126a8bd9ea593c0b339883ec71820a05 | [
"MIT"
] | null | null | null | src/stochastic_tour.py | DavidNKraemer/ams553-final-project | fc23fe5f126a8bd9ea593c0b339883ec71820a05 | [
"MIT"
] | null | null | null | src/stochastic_tour.py | DavidNKraemer/ams553-final-project | fc23fe5f126a8bd9ea593c0b339883ec71820a05 | [
"MIT"
] | null | null | null |
import numpy as np
import random
from collections import namedtuple
def generate_prob_matrix(n):
matrix = np.random.rand(n, n)
for i in range(n):
matrix[i][i] = 0
for i in range(n):
matrix[i] = (1/np.sum(matrix[i]))*matrix[i]
return matrix
def categorical(p):
return np.random.choice(len(p), 1, p=p)[0]
Drone = namedtuple('Drone', 'speed probability')
Site = namedtuple('Site', 'location')
class System:
def __init__(self, sites, drones):
self.sites = {}
self.drones = {}
n = len(sites)
for i, drone in enumerate(drones):
self.drones[i] = drone
for i, site in enumerate(sites):
self.sites[i] = site
distance = np.zeros([n, n])
for i in range(n):
for j in range(n):
if i < j:
x = np.subtract(sites[i], sites[j])
d = np.linalg.norm(x)
distance[i][j] = d
distance[j][i] = d
self.distance = distance
def get_site(self, site_id):
return self.sites[site_id]
def get_drone(self, drone_id):
return self.drones[drone_id]
def compute_path_distance(self, path):
n = len(path)
d = 0
for i in range(n - 1):
d += self.distance[path[i]][path[i + 1]]
return d
def compute_path_time(self, path, drone_id):
d = self.compute_path_distance(path)
return d/self.get_drone(drone_id).speed
def generate_path_of_length(self, length, drone_id):
path = []
P = self.get_drone(drone_id).probability
num_sites = len(self.sites)
s = categorical([1/num_sites]*num_sites)
path.append(s)
site = s
for i in range(length):
site = categorical(P[site])
path.append(site)
return path
def generate_path(self, s, t, drone_id):
path = [s]
P = self.get_drone(drone_id).probability
site = categorical(P[s])
path.append(site)
while site != t:
site = categorical(P[site])
path.append(site)
return path
@staticmethod
def generate_random_system(n, k):
locations = np.random.rand(n, 2)
sites = []
for i in locations:
sites.append(Site(i))
drones = []
for i in range(k):
speed = abs(random.random())
probability = generate_prob_matrix(n)
drones.append(Drone(speed, probability))
return System(sites, drones)
def _compute_arrival_times(path, drone_id, sites, speed):
arrival_times = []
t = 0
for i in range(len(path) - 1):
t += system.compute_path_time(path[i:i+2], drone_id=drone_id)
arrival_times.append((drone_id, path[i], path[i+1], t))
return arrival_times
def _generate_arrival_times(system, num_drones, length):
arrival_times = [[] for _ in range(len(system.sites))]
events = []
for i in range(system):
pass
events.extend(compute_arrival_times(path, i))
def get_key(item):
return item[3]
events = sorted(events, key=get_key)
for event in events:
drone_id = event[0]
site_id = event[2]
time = event[3]
arrival_times[site_id].append((drone_id, time))
return arrival_times
def compute_cost(system, n):
arrival_times = generate_arrival_times(system, n)
interarrival_times = [[] for _ in range(len(system.sites))]
for i in range(len(arrival_times)):
arrivals = arrival_times[i]
for j in range(len(arrivals) - 1):
interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1])
interarrival_avgs = [compute_average(i) for i in interarrival_times]
return max(interarrival_avgs)
def compute_average(data):
return (1/len(data))*sum(data)
| 25.781457 | 75 | 0.5813 | 533 | 3,893 | 4.095685 | 0.15197 | 0.044892 | 0.030234 | 0.04535 | 0.158497 | 0.126432 | 0.120018 | 0.040312 | 0.040312 | 0 | 0 | 0.008062 | 0.298998 | 3,893 | 150 | 76 | 25.953333 | 0.791865 | 0 | 0 | 0.127273 | 1 | 0 | 0.008736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0.009091 | 0.027273 | 0.045455 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53f8fdaf42e35a017e458aac366d4271e4baa22e | 1,932 | py | Python | examples/python/masked_hist.py | DerThorsten/seglib | 4655079e390e301dd93e53f5beed6c9737d6df9f | [
"MIT"
] | null | null | null | examples/python/masked_hist.py | DerThorsten/seglib | 4655079e390e301dd93e53f5beed6c9737d6df9f | [
"MIT"
] | null | null | null | examples/python/masked_hist.py | DerThorsten/seglib | 4655079e390e301dd93e53f5beed6c9737d6df9f | [
"MIT"
] | null | null | null | import vigra
import numpy
import pylab
from seglib import cgp2d
from seglib.preprocessing import norm01
import seglib.edge_detectors.pixel as edp
import seglib.region_descriptors.pixel as rdp
from seglib.preprocessing import norm01
from seglib.histogram import jointHistogram,histogram
from seglib.region_descriptors.pixel.sift import denseSift
# change me to your path
img = "img/text.jpg"
img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:]
binCount = 30
sigma = 1.5
histImg = numpy.zeros(img.shape[0:2]+(binCount*3,))
imgBig = None
sizes = [3,4,5,8,10,15,20,25,40,100]
scalings = [5,10,15]
for size in sizes:
for scaling in scalings:
size = int (size)
scaling = float(scaling)
print size,scaling
labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size)
labels = vigra.analysis.labelImage(labels).astype(numpy.uint64)
cgp,tgrid = cgp2d.cgpFromLabels(labels)
if imgBig is None:
imgBig=vigra.sampling.resize(img,cgp.shape)
#cgp2d.visualize(imgBig,cgp=cgp)
print "accumulate cell "
hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma)
hist = hist.reshape([cgp.numCells(2),-1])
for c in range(histImg.shape[2]):
histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False)
histImg=numpy.require(histImg,dtype=numpy.float32)
histImg=vigra.taggedView(histImg, 'xyc')
histImg = vigra.gaussianSmoothing(histImg,sigma=1.0)
#for c in range(histImg.shape[2]):
# #print c
# pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) )
# pylab.show()
#
# print "hist",hist.shape
imgdt = rdp.deepDetexturize(srcImg=img,img=histImg,nIteration=10,
nCluster=10,reductionAlg='pca',nldEdgeThreshold=10.0,nldScale=10.0,distance=None)#'cityblock')
| 27.6 | 133 | 0.70911 | 263 | 1,932 | 5.193916 | 0.437262 | 0.036603 | 0.033675 | 0.04246 | 0.086384 | 0.035139 | 0.035139 | 0 | 0 | 0 | 0 | 0.042488 | 0.15942 | 1,932 | 69 | 134 | 28 | 0.798645 | 0.11853 | 0 | 0.052632 | 0 | 0 | 0.020106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.263158 | null | null | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
53fad9cdfe9f1c4fdba68eaa168284de33fce059 | 647 | py | Python | var/spack/repos/builtin/packages/exiv2/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/exiv2/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/exiv2/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Exiv2(CMakePackage):
"""Exiv2 is a Cross-platform C++ library and a command line utility
to manage image metadata
"""
homepage = "https://www.exiv2.org/"
url = "https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz"
version('0.27.2', sha256='3dbcaf01fbc5b98d42f091d1ff0d4b6cd9750dc724de3d9c0d113948570b2934')
depends_on('zlib', type='link')
depends_on('expat@2.2.6:', type='link')
| 30.809524 | 96 | 0.710974 | 87 | 647 | 5.264368 | 0.781609 | 0.0131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123389 | 0.160742 | 647 | 20 | 97 | 32.35 | 0.720074 | 0.431221 | 0 | 0 | 0 | 0.142857 | 0.482857 | 0.182857 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
53fbe12da973d06be5b6afaae786b7644d276650 | 1,309 | py | Python | workflows/post_process_run/fv3post/gsutil.py | jacnugent/fv3net | 84958651bdd17784fdab98f87ad0d65414c03368 | [
"MIT"
] | 5 | 2021-03-20T22:42:40.000Z | 2021-06-30T18:39:36.000Z | workflows/post_process_run/fv3post/gsutil.py | jacnugent/fv3net | 84958651bdd17784fdab98f87ad0d65414c03368 | [
"MIT"
] | 195 | 2021-09-16T05:47:18.000Z | 2022-03-31T22:03:15.000Z | workflows/post_process_run/fv3post/gsutil.py | ai2cm/fv3net | e62038aee0a97d6207e66baabd8938467838cf51 | [
"MIT"
] | 1 | 2021-06-16T22:04:24.000Z | 2021-06-16T22:04:24.000Z | import os
import subprocess
import backoff
class GSUtilResumableUploadException(Exception):
pass
def _decode_to_str_if_bytes(s, encoding="utf-8"):
if isinstance(s, bytes):
return s.decode(encoding)
else:
return s
def authenticate():
try:
credentials = os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
except KeyError:
pass
else:
subprocess.check_call(
["gcloud", "auth", "activate-service-account", "--key-file", credentials]
)
@backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3)
def upload_dir(d, dest):
try:
# Pipe stderr to stdout because gsutil logs upload progress there.
subprocess.check_output(
["gsutil", "-m", "rsync", "-r", "-e", d, dest], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
output = _decode_to_str_if_bytes(e.output)
if "ResumableUploadException" in output:
raise GSUtilResumableUploadException()
else:
raise e
def download_directory(dir_, dest):
os.makedirs(dest, exist_ok=True)
subprocess.check_call(["gsutil", "-m", "rsync", "-r", dir_, dest])
def cp(source, destination):
subprocess.check_call(["gsutil", "cp", source, destination])
| 25.666667 | 85 | 0.654698 | 146 | 1,309 | 5.712329 | 0.506849 | 0.071942 | 0.068345 | 0.031175 | 0.043165 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001982 | 0.229183 | 1,309 | 50 | 86 | 26.18 | 0.824579 | 0.048892 | 0 | 0.194444 | 0 | 0 | 0.115044 | 0.062751 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0.055556 | 0.083333 | 0 | 0.305556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
99019a837f86e3b14c54300ab0d06ff51f85071a | 173 | py | Python | intValues.py | jules552/ProjetISN | 20da3572b59af25a166022bc2f5b25d46add2650 | [
"Unlicense"
] | null | null | null | intValues.py | jules552/ProjetISN | 20da3572b59af25a166022bc2f5b25d46add2650 | [
"Unlicense"
] | null | null | null | intValues.py | jules552/ProjetISN | 20da3572b59af25a166022bc2f5b25d46add2650 | [
"Unlicense"
] | null | null | null | MAP = 1
SPEED = 1.5
VELOCITYRESET = 6
WIDTH = 1280
HEIGHT = 720
X = WIDTH / 2 - 50
Y = HEIGHT / 2 - 50
MOUSER = 325
TICKRATES = 120
nfc = False
raspberry = False | 14.416667 | 20 | 0.606936 | 27 | 173 | 3.888889 | 0.777778 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190083 | 0.300578 | 173 | 12 | 21 | 14.416667 | 0.677686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
99062a5160d0b8327745e2f7901f243a1d23d8b8 | 853 | py | Python | public/js/tinymice/plugins/bootstrap/jquery-file-tree/connectors/jqueryFileTree.py | btybug/main.albumbugs | 2343466bae7ee3d8941abc4c9684667cccc3e103 | [
"MIT"
] | 13 | 2016-05-25T16:12:49.000Z | 2021-04-09T01:49:24.000Z | public/js/tinymice/plugins/bootstrap/jquery-file-tree/connectors/jqueryFileTree.py | btybug/main.albumbugs | 2343466bae7ee3d8941abc4c9684667cccc3e103 | [
"MIT"
] | 265 | 2015-10-19T02:40:55.000Z | 2022-03-28T07:24:49.000Z | public/js/tinymice/plugins/bootstrap/jquery-file-tree/connectors/jqueryFileTree.py | btybug/main.albumbugs | 2343466bae7ee3d8941abc4c9684667cccc3e103 | [
"MIT"
] | 7 | 2016-02-08T11:41:40.000Z | 2021-06-08T18:18:02.000Z | #
# jQuery File Tree
# Python/Django connector script
# By Martin Skou
#
import os
import urllib
def dirlist(request):
r=['<ul class="jqueryFileTree" style="display: none;">']
try:
r=['<ul class="jqueryFileTree" style="display: none;">']
d=urllib.unquote(request.POST.get('dir','c:\\temp'))
for f in os.listdir(d):
ff=os.path.join(d,f)
if os.path.isdir(ff):
r.append('<li class="directory collapsed"><a href="#" rel="%s/">%s</a></li>' % (ff,f))
else:
e=os.path.splitext(f)[1][1:] # get .ext and remove dot
r.append('<li class="file ext_%s"><a href="#" rel="%s">%s</a></li>' % (e,ff,f))
r.append('</ul>')
except Exception,e:
r.append('Could not load directory: %s' % str(e))
r.append('</ul>')
return HttpResponse(''.join(r))
| 32.807692 | 101 | 0.548652 | 125 | 853 | 3.736 | 0.512 | 0.074946 | 0.034261 | 0.094218 | 0.218415 | 0.218415 | 0.218415 | 0 | 0 | 0 | 0 | 0.003067 | 0.235639 | 853 | 25 | 102 | 34.12 | 0.71319 | 0.100821 | 0 | 0.210526 | 0 | 0.052632 | 0.355263 | 0.085526 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.105263 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54d943f36b7e93ff9b844e618cfa99e6c35ca662 | 2,011 | py | Python | contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | null | null | null | contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | null | null | null | contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py | lahosken/pants | 1b0340987c9b2eab9411416803c75b80736716e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pyflakes.checker import Checker as FlakesChecker
from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin, Nit
class FlakeError(Nit):
# TODO(wickman) There is overlap between this and Flake8 -- consider integrating
# checkstyle plug-ins into the PEP8 tool directly so that this can be inherited
# by flake8.
# Code reference is here: http://flake8.readthedocs.org/en/latest/warnings.html
CLASS_ERRORS = {
'DuplicateArgument': 'F831',
'ImportShadowedByLoopVar': 'F402',
'ImportStarUsed': 'F403',
'LateFutureImport': 'F404',
'Redefined': 'F810',
'RedefinedInListComp': 'F812',
'RedefinedWhileUnused': 'F811',
'UndefinedExport': 'F822',
'UndefinedLocal': 'F823',
'UndefinedName': 'F821',
'UnusedImport': 'F401',
'UnusedVariable': 'F841',
}
def __init__(self, python_file, flake_message):
line_range = python_file.line_range(flake_message.lineno)
super(FlakeError, self).__init__(
self.get_error_code(flake_message),
Nit.ERROR,
python_file.filename,
flake_message.message % flake_message.message_args,
line_range,
python_file.lines[line_range])
@classmethod
def get_error_code(cls, message):
return cls.CLASS_ERRORS.get(message.__class__.__name__, 'F999')
class PyflakesChecker(CheckstylePlugin):
"""Detect common coding errors via the pyflakes package."""
def nits(self):
checker = FlakesChecker(self.python_file.tree, self.python_file.filename)
for message in sorted(checker.messages, key=lambda msg: msg.lineno):
if FlakeError.get_error_code(message) not in self.options.ignore:
yield FlakeError(self.python_file, message)
| 35.910714 | 93 | 0.721532 | 235 | 2,011 | 5.961702 | 0.604255 | 0.049964 | 0.039971 | 0.027123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030048 | 0.172551 | 2,011 | 55 | 94 | 36.563636 | 0.811899 | 0.218797 | 0 | 0 | 0 | 0 | 0.152662 | 0.014753 | 0 | 0 | 0 | 0.018182 | 0 | 1 | 0.081081 | false | 0 | 0.189189 | 0.027027 | 0.378378 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54da935d3d5cf04aac496677e269b59710d17100 | 5,503 | py | Python | dev/ideas/cython/playing_around.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | dev/ideas/cython/playing_around.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | dev/ideas/cython/playing_around.py | achilleas-k/brian2 | 906563b6b1321585b082f79f74f1b4ab386347ec | [
"BSD-2-Clause"
] | null | null | null | from pylab import *
import cython
import time, timeit
from brian2.codegen.runtime.cython_rt.modified_inline import modified_cython_inline
import numpy
from scipy import weave
import numexpr
import theano
from theano import tensor as tt
tau = 20 * 0.001
N = 1000000
b = 1.2 # constant current mean, the modulation varies
freq = 10.0
t = 0.0
dt = 0.0001
_array_neurongroup_a = a = linspace(.05, 0.75, N)
_array_neurongroup_v = v = rand(N)
ns = {'_array_neurongroup_a': a, '_array_neurongroup_v': v,
'_N': N,
'dt': dt, 't': t, 'tau': tau, 'b': b, 'freq': freq,# 'sin': numpy.sin,
'pi': pi,
}
code = '''
cdef int _idx
cdef int _vectorisation_idx
cdef int N = <int>_N
cdef double a, v, _v
#cdef double [:] _cy_array_neurongroup_a = _array_neurongroup_a
#cdef double [:] _cy_array_neurongroup_v = _array_neurongroup_v
cdef double* _cy_array_neurongroup_a = &(_array_neurongroup_a[0])
cdef double* _cy_array_neurongroup_v = &(_array_neurongroup_v[0])
for _idx in range(N):
_vectorisation_idx = _idx
a = _cy_array_neurongroup_a[_idx]
v = _cy_array_neurongroup_v[_idx]
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
#_v = a*b+0.0001*sin(v)
#_v = a*b+0.0001*v
v = _v
_cy_array_neurongroup_v[_idx] = v
'''
def timefunc_cython_inline():
cython.inline(code, locals=ns)
f_mod, f_arg_list = modified_cython_inline(code, locals=ns, globals={})
def timefunc_cython_modified_inline():
f_mod.__invoke(*f_arg_list)
#modified_cython_inline(code, locals=ns)
def timefunc_python():
for _idx in xrange(N):
_vectorisation_idx = _idx
a = _array_neurongroup_a[_idx]
v = _array_neurongroup_v[_idx]
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
v = _v
_array_neurongroup_v[_idx] = v
def timefunc_numpy():
_v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)
v[:] = _v
def timefunc_numpy_smart():
_sin_term = sin(2.0*freq*pi*t)
_exp_term = exp(-dt/tau)
_a_term = (_sin_term-_sin_term*_exp_term)
_v = v
_v *= _exp_term
_v += a*_a_term
_v += -b*_exp_term + b
def timefunc_numpy_blocked():
ext = exp(-dt/tau)
sit = sin(2.0*freq*pi*t)
bs = 20000
for i in xrange(0, N, bs):
ab = a[i:i+bs]
vb = v[i:i+bs]
absit = ab*sit + b
vb *= ext
vb += absit
vb -= absit*ext
def timefunc_numexpr():
v[:] = numexpr.evaluate('a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)')
def timefunc_numexpr_smart():
_sin_term = sin(2.0*freq*pi*t)
_exp_term = exp(-dt/tau)
_a_term = (_sin_term-_sin_term*_exp_term)
_const_term = -b*_exp_term + b
#v[:] = numexpr.evaluate('a*_a_term+v*_exp_term+_const_term')
numexpr.evaluate('a*_a_term+v*_exp_term+_const_term', out=v)
def timefunc_weave(*args):
code = '''
// %s
int N = _N;
for(int _idx=0; _idx<N; _idx++)
{
double a = _array_neurongroup_a[_idx];
double v = _array_neurongroup_v[_idx];
double _v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau);
v = _v;
_array_neurongroup_v[_idx] = v;
}
''' % str(args)
weave.inline(code, ns.keys(), ns, compiler='gcc', extra_compile_args=list(args))
def timefunc_weave_slow():
timefunc_weave('-O3', '-march=native')
def timefunc_weave_fast():
timefunc_weave('-O3', '-march=native', '-ffast-math')
def get_theano_func():
a = tt.dvector('a')
v = tt.dvector('v')
freq = tt.dscalar('freq')
t = tt.dscalar('t')
dt = tt.dscalar('dt')
tau = tt.dscalar('tau')
return theano.function([a, v, freq, t, dt, tau],
a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau))
# return theano.function([a, v],
# a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau))
theano.config.gcc.cxxflags = '-O3 -ffast-math'
theano_func = get_theano_func()
#print theano.pp(theano_func.maker.fgraph.outputs[0])
#print
#theano.printing.debugprint(theano_func.maker.fgraph.outputs[0])
#theano.printing.pydotprint(theano_func, 'func.png')
#exit()
def timefunc_theano():
v[:] = theano_func(a, v, freq, t, dt, tau)
def dotimeit(f):
v[:] = 1
f()
print '%s: %.2f' % (f.__name__.replace('timefunc_', ''),
timeit.timeit(f.__name__+'()', setup='from __main__ import '+f.__name__, number=100))
def check_values(f):
v[:] = 1
v[:5] = linspace(0, 1, 5)
f()
print '%s: %s' % (f.__name__.replace('timefunc_', ''), v[:5])
if __name__=='__main__':
funcs = [#timefunc_cython_inline,
timefunc_cython_modified_inline,
timefunc_numpy,
timefunc_numpy_smart,
timefunc_numpy_blocked,
timefunc_numexpr,
timefunc_numexpr_smart,
timefunc_weave_slow,
timefunc_weave_fast,
timefunc_theano,
]
if 1:
print 'Values'
print '======'
for f in funcs:
check_values(f)
print
if 1:
print 'Times'
print '====='
for f in funcs:
dotimeit(f)
| 30.743017 | 125 | 0.589678 | 853 | 5,503 | 3.504103 | 0.157093 | 0.112412 | 0.028438 | 0.051188 | 0.461024 | 0.362663 | 0.317497 | 0.301104 | 0.301104 | 0.243894 | 0 | 0.024644 | 0.247865 | 5,503 | 178 | 126 | 30.91573 | 0.697511 | 0.089042 | 0 | 0.155405 | 0 | 0.02027 | 0.262961 | 0.085027 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.067568 | null | null | 0.047297 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54e0817402b9c2ce35c6af23684ce91b4042e10a | 5,639 | py | Python | home/views.py | Kshitij-Kumar-Singh-Chauhan/docon | bff0547e7bbd030e027217a2ca7800a8da529b56 | [
"MIT"
] | null | null | null | home/views.py | Kshitij-Kumar-Singh-Chauhan/docon | bff0547e7bbd030e027217a2ca7800a8da529b56 | [
"MIT"
] | null | null | null | home/views.py | Kshitij-Kumar-Singh-Chauhan/docon | bff0547e7bbd030e027217a2ca7800a8da529b56 | [
"MIT"
] | 2 | 2021-06-17T05:35:07.000Z | 2021-06-17T06:01:23.000Z | from django.http.response import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect, render
from cryptography.fernet import Fernet
from .models import Book, UserDetails
from .models import Contact
from .models import Book
from .models import Report
from .models import Diagnostic
from datetime import datetime
# Create your views here.
def homePage(request):
if(request.method == 'POST'):
email = request.POST.get('email')
password = request.POST.get('password')
try:
object = UserDetails.objects.get(email = email)
key1 = object.key
key1=key1[2:-1]
key1 = bytes(key1,'utf-8')
f = Fernet(key1)
truepassword = object.password
truepassword = truepassword[2:-1]
truepassword = bytes(truepassword,'utf-8')
truepassword = f.decrypt(truepassword).decode('utf-8')
except:
object = None
if(object==None):
context = {
'message': "Email Does Not Exist"
}
return render(request,"login.html",context)
elif(password == truepassword):
if object.profession == "PATIENT":
object1=UserDetails.objects.filter(profession="DOCTOR")
# name=(object.name)
# appointment(request,email,name)
context1={
'message':'Welcome '+object.name,
'mail' : object.email,
'doctors':object1
}
return render(request,"index.html",context1)
else:
context2={
'message':'Welcome '+object.name,
'mail' : object.email
}
return render(request,"dindex.html",context2)
else:
return redirect("/")
else:
return render(request,"login.html",{})
def signUpPage(request):
if(request.method == 'POST'):
name = request.POST.get('name')
email = request.POST.get('email')
password = request.POST.get('password')
passwordVerif = request.POST.get('passwordVerif')
profession = request.POST.get('user')
data = request.POST.get('data')
if(email ==''):
context = {
'message': "Please enter Email ID"
}
return render(request,"signup.html",context)
elif(password == passwordVerif):
key = Fernet.generate_key()
f = Fernet(key)
password = bytes(password,'utf-8')
token = f.encrypt(password)
key = str(key)
print(key)
UserDetails.objects.create(email=email, name=name , password=token, key = key, profession=profession, data=data)
return redirect("/")
else:
context = {
'message': "Password doesn't match"
}
return render(request,"signup.html",context)
else:
return render(request,"signup.html",{})
# def index(request):
# context={ 'alpha': 'This is sent'}
# if request.method=='POST':
# pass
# else: return render(request, 'index.html',context)
#HttpResponse('This is homepage')
def about(request):
return render(request, 'about.html')
def services(request):
return render(request, 'services.html')
def contact(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
contact = Contact(email=email , name=name, phone=phone,address=address,date=datetime.today())
contact.save()
# messages.success(request, 'Your message has been sent !')
return render(request,"contact.html")
def book(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
book = Book(email=email , name=name, phone=phone,problem=address,date=datetime.today())
book.save()
return render(request,"book.html")
def report(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
message = request.POST.get('message')
report = Report(email=email , name=name, phone=phone, message=message, date=datetime.today())
report.save()
return render(request,"report.html")
def diag(request):
if request.method == "POST":
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
tests = request.POST.get('drop1')
tests = str(tests)
if(email ==''):
context = {
'message': "Please enter Email ID"
}
return render(request,"diag.html",context)
else:
diag = Diagnostic(email=email , name=name, phone=phone, tests=tests, date=datetime.today())
diag.save()
# messages.success(request, 'Your message has been sent !')
return render(request,"diag.html")
# def appointment(request,email,name):
# if request.method == "POST":
# problem = request.POST.get('problem')
# book = Appoint(problem=problem, email=email, name=name)
# book.save()
# return render(request,"index.html") | 33.565476 | 124 | 0.567477 | 589 | 5,639 | 5.431239 | 0.183362 | 0.085964 | 0.109409 | 0.047515 | 0.417318 | 0.333542 | 0.281963 | 0.25758 | 0.25758 | 0.25758 | 0 | 0.005339 | 0.302536 | 5,639 | 168 | 125 | 33.565476 | 0.808035 | 0.109949 | 0 | 0.357143 | 0 | 0 | 0.09916 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063492 | false | 0.103175 | 0.079365 | 0.015873 | 0.269841 | 0.007937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
54e0ed7eefaaeac2cfcbec8d464ffc806c518afa | 9,892 | py | Python | compressor/tests/templatetags.py | bigmlcom/django_compressor | 66dfda503633018275fdb64ad46ef80dc9a3901d | [
"Apache-2.0"
] | null | null | null | compressor/tests/templatetags.py | bigmlcom/django_compressor | 66dfda503633018275fdb64ad46ef80dc9a3901d | [
"Apache-2.0"
] | null | null | null | compressor/tests/templatetags.py | bigmlcom/django_compressor | 66dfda503633018275fdb64ad46ef80dc9a3901d | [
"Apache-2.0"
] | null | null | null | from __future__ import with_statement
import os
import sys
from mock import Mock
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
from compressor.conf import settings
from compressor.signals import post_compress
from compressor.tests.base import css_tag, test_dir
def render(template_string, context_dict=None):
"""
A shortcut for testing template output.
"""
if context_dict is None:
context_dict = {}
c = Context(context_dict)
t = Template(template_string)
return t.render(c).strip()
class TemplatetagTestCase(TestCase):
def setUp(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = True
self.context = {'MEDIA_URL': settings.COMPRESS_URL}
def tearDown(self):
settings.COMPRESS_ENABLED = self.old_enabled
def test_empty_tag(self):
template = u"""{% load compress %}{% compress js %}{% block js %}
{% endblock %}{% endcompress %}"""
self.assertEqual(u'', render(template, self.context))
def test_css_tag(self):
template = u"""{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ MEDIA_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="stylesheet" href="{{ MEDIA_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/media/CACHE/css/e41ba2cc6982.css")
self.assertEqual(out, render(template, self.context))
def test_uppercase_rel(self):
template = u"""{% load compress %}{% compress css %}
<link rel="StyleSheet" href="{{ MEDIA_URL }}css/one.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
<link rel="StyleSheet" href="{{ MEDIA_URL }}css/two.css" type="text/css">
{% endcompress %}"""
out = css_tag("/media/CACHE/css/e41ba2cc6982.css")
self.assertEqual(out, render(template, self.context))
def test_nonascii_css_tag(self):
template = u"""{% load compress %}{% compress css %}
<link rel="stylesheet" href="{{ MEDIA_URL }}css/nonasc.css" type="text/css">
<style type="text/css">p { border:5px solid green;}</style>
{% endcompress %}
"""
out = css_tag("/media/CACHE/css/799f6defe43c.css")
self.assertEqual(out, render(template, self.context))
def test_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/066cd253eada.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_nonascii_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/nonasc.js" type="text/javascript"></script>
<script type="text/javascript">var test_value = "\u2014";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/e214fe629b28.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_nonascii_latin1_js_tag(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/nonasc-latin1.js" type="text/javascript" charset="latin-1"></script>
<script type="text/javascript">var test_value = "\u2014";</script>
{% endcompress %}
"""
out = u'<script type="text/javascript" src="/media/CACHE/js/be9e078b5ca7.js"></script>'
self.assertEqual(out, render(template, self.context))
def test_compress_tag_with_illegal_arguments(self):
template = u"""{% load compress %}{% compress pony %}
<script type="pony/application">unicorn</script>
{% endcompress %}"""
self.assertRaises(TemplateSyntaxError, render, template, {})
def test_debug_toggle(self):
template = u"""{% load compress %}{% compress js %}
<script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
class MockDebugRequest(object):
GET = {settings.COMPRESS_DEBUG_TOGGLE: 'true'}
context = dict(self.context, request=MockDebugRequest())
out = u"""<script src="/media/js/one.js" type="text/javascript"></script>
<script type="text/javascript">obj.value = "value";</script>"""
self.assertEqual(out, render(template, context))
def test_named_compress_tag(self):
template = u"""{% load compress %}{% compress js inline foo %}
<script type="text/javascript">obj.value = "value";</script>
{% endcompress %}
"""
def listener(sender, **kwargs):
pass
callback = Mock(wraps=listener)
post_compress.connect(callback)
render(template)
args, kwargs = callback.call_args
context = kwargs['context']
self.assertEqual('foo', context['compressed']['name'])
class PrecompilerTemplatetagTestCase(TestCase):
def setUp(self):
self.old_enabled = settings.COMPRESS_ENABLED
self.old_precompilers = settings.COMPRESS_PRECOMPILERS
precompiler = os.path.join(test_dir, 'precompiler.py')
python = sys.executable
settings.COMPRESS_ENABLED = True
settings.COMPRESS_PRECOMPILERS = (
('text/coffeescript', '%s %s' % (python, precompiler)),
)
self.context = {'MEDIA_URL': settings.COMPRESS_URL}
def tearDown(self):
settings.COMPRESS_ENABLED = self.old_enabled
settings.COMPRESS_PRECOMPILERS = self.old_precompilers
def test_compress_coffeescript_tag(self):
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/e920d58f166d.js")
self.assertEqual(out, render(template, self.context))
def test_compress_coffeescript_tag_and_javascript_tag(self):
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
<script type="text/javascript"># this too is a comment.</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/ef6b32a54575.js")
self.assertEqual(out, render(template, self.context))
def test_coffeescript_and_js_tag_with_compress_enabled_equals_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
<script type="text/javascript"># this too is a comment.</script>
{% endcompress %}"""
out = (script('# this is a comment.\n') + '\n' +
script('# this too is a comment.'))
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_compress_coffeescript_tag_compress_enabled_is_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""{% load compress %}{% compress js %}
<script type="text/coffeescript"># this is a comment.</script>
{% endcompress %}"""
out = script("# this is a comment.\n")
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_compress_coffeescript_file_tag_compress_enabled_is_false(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""
{% load compress %}{% compress js %}
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee">
</script>
{% endcompress %}"""
out = script(src="/media/CACHE/js/one.95cfb869eead.js")
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def test_multiple_file_order_conserved(self):
self.old_enabled = settings.COMPRESS_ENABLED
settings.COMPRESS_ENABLED = False
try:
template = u"""
{% load compress %}{% compress js %}
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee">
</script>
<script src="{{ MEDIA_URL }}js/one.js"></script>
<script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.js">
</script>
{% endcompress %}"""
out = '\n'.join([
script(src="/media/CACHE/js/one.95cfb869eead.js"),
script(scripttype="", src="/media/js/one.js"),
script(src="/media/CACHE/js/one.81a2cd965815.js"),])
self.assertEqual(out, render(template, self.context))
finally:
settings.COMPRESS_ENABLED = self.old_enabled
def script(content="", src="", scripttype="text/javascript"):
"""
returns a unicode text html script element.
>>> script('#this is a comment', scripttype="text/applescript")
'<script type="text/applescript">#this is a comment</script>'
"""
out_script = u'<script '
if scripttype:
out_script += u'type="%s" ' % scripttype
if src:
out_script += u'src="%s" ' % src
return out_script[:-1] + u'>%s</script>' % content
| 41.563025 | 107 | 0.616761 | 1,119 | 9,892 | 5.322609 | 0.138517 | 0.042982 | 0.044661 | 0.056414 | 0.708193 | 0.683848 | 0.663533 | 0.651612 | 0.622901 | 0.603425 | 0 | 0.011936 | 0.237768 | 9,892 | 237 | 108 | 41.738397 | 0.777984 | 0.021229 | 0 | 0.530928 | 0 | 0.046392 | 0.434097 | 0.120295 | 0 | 0 | 0 | 0 | 0.082474 | 1 | 0.118557 | false | 0.005155 | 0.046392 | 0 | 0.190722 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54e1fce9e0db363710daf71e66104aba025bc831 | 477 | py | Python | ringapp/migrations/0009_auto_20150116_1759.py | rschwiebert/RingApp | 35675b3dd81728d71b7dc70071be3185d7f99bf4 | [
"MIT"
] | 10 | 2015-02-02T12:40:05.000Z | 2022-01-29T14:11:03.000Z | ringapp/migrations/0009_auto_20150116_1759.py | rschwiebert/RingApp | 35675b3dd81728d71b7dc70071be3185d7f99bf4 | [
"MIT"
] | 22 | 2015-01-07T21:29:24.000Z | 2022-03-19T01:15:13.000Z | ringapp/migrations/0009_auto_20150116_1759.py | rschwiebert/RingApp | 35675b3dd81728d71b7dc70071be3185d7f99bf4 | [
"MIT"
] | 1 | 2016-08-07T15:41:51.000Z | 2016-08-07T15:41:51.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0008_auto_20150116_1755'),
]
operations = [
migrations.AlterModelTable(
name='invariance',
table='invariance',
),
migrations.AlterModelTable(
name='invarianttype',
table='invariant_types',
),
]
| 20.73913 | 47 | 0.589099 | 38 | 477 | 7.157895 | 0.736842 | 0.183824 | 0.213235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050898 | 0.29979 | 477 | 22 | 48 | 21.681818 | 0.763473 | 0.044025 | 0 | 0.25 | 0 | 0 | 0.171806 | 0.050661 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54e218f734c2d85cbff6df8c45d35331a499ae96 | 654 | py | Python | front-end/testsuite-python-lib/Python-3.1/Lib/json/tests/test_dump.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | 1 | 2020-11-26T18:53:46.000Z | 2020-11-26T18:53:46.000Z | Lib/json/tests/test_dump.py | orestis/python | 870a82aac7788ffa105e2a3e4480b3715c93bff6 | [
"PSF-2.0"
] | null | null | null | Lib/json/tests/test_dump.py | orestis/python | 870a82aac7788ffa105e2a3e4480b3715c93bff6 | [
"PSF-2.0"
] | 2 | 2018-08-06T04:37:38.000Z | 2022-02-27T18:07:12.000Z | from unittest import TestCase
from io import StringIO
import json
class TestDump(TestCase):
def test_dump(self):
sio = StringIO()
json.dump({}, sio)
self.assertEquals(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEquals(json.dumps({}), '{}')
def test_encode_truefalse(self):
self.assertEquals(json.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEquals(json.dumps(
{2: 3.0, 4.0: 5, False: 1, 6: True}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
| 29.727273 | 69 | 0.547401 | 81 | 654 | 4.345679 | 0.37037 | 0.181818 | 0.170455 | 0.213068 | 0.318182 | 0.034091 | 0 | 0 | 0 | 0 | 0 | 0.034115 | 0.282875 | 654 | 21 | 70 | 31.142857 | 0.716418 | 0 | 0 | 0.117647 | 0 | 0.058824 | 0.117737 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.176471 | false | 0 | 0.176471 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54e789caffaeff5bc10488464b0b5f0c11ea3f0e | 522 | py | Python | App/migrations/0010_remove_user_percentage_preferences_user_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | null | null | null | App/migrations/0010_remove_user_percentage_preferences_user_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | 44 | 2022-01-21T01:33:59.000Z | 2022-03-26T23:35:25.000Z | App/migrations/0010_remove_user_percentage_preferences_user_preferences.py | dlanghorne0428/StudioMusicPlayer | 54dabab896b96d90b68d6435edfd52fe6a866bc2 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-03-03 02:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('App', '0009_alter_song_holiday_alter_songfileinput_holiday'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='percentage_preferences',
),
migrations.AddField(
model_name='user',
name='preferences',
field=models.JSONField(null=True),
),
]
| 22.695652 | 71 | 0.597701 | 51 | 522 | 5.941176 | 0.705882 | 0.059406 | 0.085809 | 0.112211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.049046 | 0.296935 | 522 | 22 | 72 | 23.727273 | 0.776567 | 0.082375 | 0 | 0.25 | 1 | 0 | 0.199161 | 0.15304 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54e901540b5f6fa6fc62f5e51511aa0c656882ca | 3,653 | py | Python | venv/Lib/site-packages/captcha/conf/settings.py | Rudeus3Greyrat/admin-management | 7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23 | [
"MIT"
] | 1 | 2020-05-21T06:48:34.000Z | 2020-05-21T06:48:34.000Z | venv/Lib/site-packages/captcha/conf/settings.py | Rudeus3Greyrat/admin-management | 7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23 | [
"MIT"
] | 3 | 2021-03-19T03:07:36.000Z | 2021-04-08T20:33:38.000Z | venv/Lib/site-packages/captcha/conf/settings.py | Rudeus3Greyrat/admin-management | 7e81d2b1908afa3ea57a82c542c9aebb1d0ffd23 | [
"MIT"
] | 1 | 2020-05-21T06:48:36.000Z | 2020-05-21T06:48:36.000Z | import os
import warnings
from django.conf import settings
CAPTCHA_FONT_PATH = getattr(settings, 'CAPTCHA_FONT_PATH', os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'fonts/Vera.ttf')))
CAPTCHA_FONT_SIZE = getattr(settings, 'CAPTCHA_FONT_SIZE', 22)
CAPTCHA_LETTER_ROTATION = getattr(settings, 'CAPTCHA_LETTER_ROTATION', (-35, 35))
CAPTCHA_BACKGROUND_COLOR = getattr(settings, 'CAPTCHA_BACKGROUND_COLOR', '#ffffff')
CAPTCHA_FOREGROUND_COLOR = getattr(settings, 'CAPTCHA_FOREGROUND_COLOR', '#001100')
CAPTCHA_CHALLENGE_FUNCT = getattr(settings, 'CAPTCHA_CHALLENGE_FUNCT', 'captcha.helpers.random_char_challenge')
CAPTCHA_NOISE_FUNCTIONS = getattr(settings, 'CAPTCHA_NOISE_FUNCTIONS', ('captcha.helpers.noise_arcs', 'captcha.helpers.noise_dots',))
CAPTCHA_FILTER_FUNCTIONS = getattr(settings, 'CAPTCHA_FILTER_FUNCTIONS', ('captcha.helpers.post_smooth',))
CAPTCHA_WORDS_DICTIONARY = getattr(settings, 'CAPTCHA_WORDS_DICTIONARY', '/usr/share/dict/words')
CAPTCHA_PUNCTUATION = getattr(settings, 'CAPTCHA_PUNCTUATION', '''_"',.;:-''')
CAPTCHA_FLITE_PATH = getattr(settings, 'CAPTCHA_FLITE_PATH', None)
CAPTCHA_SOX_PATH = getattr(settings, 'CAPTCHA_SOX_PATH', None)
CAPTCHA_TIMEOUT = getattr(settings, 'CAPTCHA_TIMEOUT', 5) # Minutes
CAPTCHA_LENGTH = int(getattr(settings, 'CAPTCHA_LENGTH', 4)) # Chars
# CAPTCHA_IMAGE_BEFORE_FIELD = getattr(settings, 'CAPTCHA_IMAGE_BEFORE_FIELD', True)
CAPTCHA_DICTIONARY_MIN_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MIN_LENGTH', 0)
CAPTCHA_DICTIONARY_MAX_LENGTH = getattr(settings, 'CAPTCHA_DICTIONARY_MAX_LENGTH', 99)
CAPTCHA_IMAGE_SIZE = getattr(settings, 'CAPTCHA_IMAGE_SIZE', None)
CAPTCHA_IMAGE_TEMPLATE = getattr(settings, 'CAPTCHA_IMAGE_TEMPLATE', 'captcha/image.html')
CAPTCHA_HIDDEN_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_HIDDEN_FIELD_TEMPLATE', 'captcha/hidden_field.html')
CAPTCHA_TEXT_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_TEXT_FIELD_TEMPLATE', 'captcha/text_field.html')
if getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None):
msg = ("CAPTCHA_FIELD_TEMPLATE setting is deprecated in favor of widget's template_name.")
warnings.warn(msg, DeprecationWarning)
CAPTCHA_FIELD_TEMPLATE = getattr(settings, 'CAPTCHA_FIELD_TEMPLATE', None)
if getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None):
msg = ("CAPTCHA_OUTPUT_FORMAT setting is deprecated in favor of widget's template_name.")
warnings.warn(msg, DeprecationWarning)
CAPTCHA_OUTPUT_FORMAT = getattr(settings, 'CAPTCHA_OUTPUT_FORMAT', None)
CAPTCHA_MATH_CHALLENGE_OPERATOR = getattr(settings, 'CAPTCHA_MATH_CHALLENGE_OPERATOR', '*')
CAPTCHA_GET_FROM_POOL = getattr(settings, 'CAPTCHA_GET_FROM_POOL', False)
CAPTCHA_GET_FROM_POOL_TIMEOUT = getattr(settings, 'CAPTCHA_GET_FROM_POOL_TIMEOUT', 5)
CAPTCHA_TEST_MODE = getattr(settings, 'CAPTCHA_TEST_MODE', False)
# Failsafe
if CAPTCHA_DICTIONARY_MIN_LENGTH > CAPTCHA_DICTIONARY_MAX_LENGTH:
CAPTCHA_DICTIONARY_MIN_LENGTH, CAPTCHA_DICTIONARY_MAX_LENGTH = CAPTCHA_DICTIONARY_MAX_LENGTH, CAPTCHA_DICTIONARY_MIN_LENGTH
def _callable_from_string(string_or_callable):
if callable(string_or_callable):
return string_or_callable
else:
return getattr(__import__('.'.join(string_or_callable.split('.')[:-1]), {}, {}, ['']), string_or_callable.split('.')[-1])
def get_challenge(generator=None):
return _callable_from_string(generator or CAPTCHA_CHALLENGE_FUNCT)
def noise_functions():
if CAPTCHA_NOISE_FUNCTIONS:
return map(_callable_from_string, CAPTCHA_NOISE_FUNCTIONS)
return []
def filter_functions():
if CAPTCHA_FILTER_FUNCTIONS:
return map(_callable_from_string, CAPTCHA_FILTER_FUNCTIONS)
return []
| 52.942029 | 141 | 0.800712 | 456 | 3,653 | 5.97807 | 0.221491 | 0.165077 | 0.234043 | 0.047689 | 0.319516 | 0.233309 | 0.152605 | 0.121056 | 0.121056 | 0.108217 | 0 | 0.006019 | 0.090337 | 3,653 | 68 | 142 | 53.720588 | 0.814324 | 0.028744 | 0 | 0.076923 | 0 | 0 | 0.288738 | 0.196444 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0.019231 | 0.288462 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54eceeb38625ac7f7302479b3298ad5a3adabd40 | 1,307 | py | Python | src/lora_multihop/module_config.py | marv1913/lora_multihop | ef07493c2f763d07161fa25d4b884ef79b94afa4 | [
"MIT"
] | null | null | null | src/lora_multihop/module_config.py | marv1913/lora_multihop | ef07493c2f763d07161fa25d4b884ef79b94afa4 | [
"MIT"
] | 1 | 2022-02-20T13:18:13.000Z | 2022-02-24T18:32:23.000Z | src/lora_multihop/module_config.py | marv1913/lora_multihop | ef07493c2f763d07161fa25d4b884ef79b94afa4 | [
"MIT"
] | null | null | null | import logging
from lora_multihop import serial_connection, variables
def config_module(configuration=variables.MODULE_CONFIG):
if serial_connection.execute_command(configuration, [variables.STATUS_OK]):
serial_connection.execute_command('AT+SEND=1', [variables.STATUS_OK])
serial_connection.execute_command('a', ['AT,SENDING', 'AT,SENDED'])
logging.debug('module config successfully set')
return True
logging.warning("could not set module config")
return False
def set_address(address):
cmd = f'AT+ADDR={address}'
if serial_connection.execute_command(serial_connection.str_to_bytes(cmd), [variables.STATUS_OK]):
logging.debug(f'module address successfully set to: {address}')
return True
logging.warning("could not set module address")
return False
def get_current_address():
serial_connection.execute_command(serial_connection.str_to_bytes(variables.GET_ADDR))
addr = serial_connection.response_q.get(variables.COMMAND_VERIFICATION_TIMEOUT)
addr = serial_connection.bytes_to_str(addr)
addr_as_list = addr.split(variables.LORA_MODULE_DELIMITER)
if addr_as_list[0].strip() != 'AT' or addr_as_list[2].strip() != 'OK':
raise ValueError('could not get address of module')
return addr_as_list[1]
| 39.606061 | 101 | 0.746748 | 173 | 1,307 | 5.387283 | 0.323699 | 0.171674 | 0.123391 | 0.160944 | 0.345494 | 0.309013 | 0.309013 | 0.208155 | 0.120172 | 0 | 0 | 0.003604 | 0.150727 | 1,307 | 32 | 102 | 40.84375 | 0.836036 | 0 | 0 | 0.16 | 0 | 0 | 0.161438 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54f7f3b4bb05515aa800aef3ce44e23eb1933bf4 | 443 | py | Python | Desafios/desafio_041.py | romulogoleniesky/Python_C_E_V | 2dcf5fb3505a20443788a284c52114c6434118ce | [
"MIT"
] | null | null | null | Desafios/desafio_041.py | romulogoleniesky/Python_C_E_V | 2dcf5fb3505a20443788a284c52114c6434118ce | [
"MIT"
] | null | null | null | Desafios/desafio_041.py | romulogoleniesky/Python_C_E_V | 2dcf5fb3505a20443788a284c52114c6434118ce | [
"MIT"
] | null | null | null | import datetime
ano = (datetime.datetime.now()).year
nasc = int(input("Digite o seu ano de nascimento: "))
categoria = 0
if (ano - nasc) <= 9:
categoria = str("MIRIM")
elif 9 < (ano - nasc) <= 14:
categoria = str("INFANTIL")
elif 14 < (ano - nasc) <= 19 :
categoria = str("JUNIOR")
elif 19 < (ano - nasc) <= 25:
categoria = str("SÊNIOR")
else:
categoria = str("MASTER")
print(f"A categoria do atleta é {str(categoria)}.")
| 26.058824 | 53 | 0.616253 | 62 | 443 | 4.403226 | 0.548387 | 0.21978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036932 | 0.205418 | 443 | 16 | 54 | 27.6875 | 0.738636 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
54fbc8636ea0532bcc0fa404a8de1597f6db3f5f | 354 | py | Python | myproject/apps/events/migrations/0002_alter_eventhero_options.py | cahyareza/django_admin_cookbook | 6c82dbd3aebe455b68feb020d5cad7978b8191b7 | [
"MIT"
] | null | null | null | myproject/apps/events/migrations/0002_alter_eventhero_options.py | cahyareza/django_admin_cookbook | 6c82dbd3aebe455b68feb020d5cad7978b8191b7 | [
"MIT"
] | null | null | null | myproject/apps/events/migrations/0002_alter_eventhero_options.py | cahyareza/django_admin_cookbook | 6c82dbd3aebe455b68feb020d5cad7978b8191b7 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.12 on 2022-03-28 11:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='eventhero',
options={'verbose_name_plural': 'Event heroes'},
),
]
| 19.666667 | 60 | 0.601695 | 36 | 354 | 5.833333 | 0.861111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078431 | 0.279661 | 354 | 17 | 61 | 20.823529 | 0.745098 | 0.129944 | 0 | 0 | 1 | 0 | 0.189542 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0708030cc6b0ac486ef0bd568029e80e9873483c | 2,332 | py | Python | particle.py | coush001/Imperial-MSc-Group-Project-2 | 9309217895802d11c6fe9d2dca9b21f98fbc1c61 | [
"MIT"
] | null | null | null | particle.py | coush001/Imperial-MSc-Group-Project-2 | 9309217895802d11c6fe9d2dca9b21f98fbc1c61 | [
"MIT"
] | null | null | null | particle.py | coush001/Imperial-MSc-Group-Project-2 | 9309217895802d11c6fe9d2dca9b21f98fbc1c61 | [
"MIT"
] | null | null | null | from itertools import count
import numpy as np
class Particle(object):
"""Object containing all the properties for a single particle"""
_ids = count(0)
def __init__(self, main_data=None, x=np.zeros(2)):
self.id = next(self._ids)
self.main_data = main_data
self.x = np.array(x)
self.v = np.zeros(2)
self.a = np.zeros(2)
self.D = 0
self.rho = main_data.rho0
self.P = 0
self.m = main_data.dx ** 2 * main_data.rho0 # initial mass depends on the initial particle spacing
self.boundary = False # Particle by default is not on the boundary
# For predictor corrector
self.prev_x = np.array(x)
self.prev_v = np.zeros(2)
self.prev_rho = main_data.rho0
def calc_index(self):
"""Calculates the 2D integer index for the particle's location in the search grid"""
# Calculates the bucket coordinates
self.list_num = np.array((self.x - self.main_data.min_x) /
(2.0 * self.main_data.h), int)
def B(self):
return (self.main_data.rho0 * self.main_data.c0 ** 2) / self.main_data.gamma
def update_P(self):
"""
Equation of state
System is assumed slightly compressible
"""
rho0 = self.main_data.rho0
gamma = self.main_data.gamma
self.P = self.B() * ((self.rho / rho0)**gamma - 1)
def set_main_data(self, main_data):
self.main_data = main_data
def set_x(self, x):
self.x = x
self.calc_index()
def set_v(self, v):
self.v = v
def set_a(self, a):
self.a = a
def set_D(self, D):
self.D = D
def set_rho(self, rho):
self.rho = rho
self.update_P()
def m(self, m):
self.m = m
def list_attributes(self):
x_s = "position: " + str(self.x) + ", "
v_s = "velocity: " + str(self.v) + ", "
a_s = "acceleration: " + str(self.a) + ", "
D_s = "derivative of density: " + str(self.D) + ", "
rho_s = "density: " + str(self.rho) + ", "
m_s = "mass: " + str(self.m) + ", "
P_s = "pressure: " + str(self.P) + ", "
boundary_s = "is boundary: " + str(self.boundary)
return [x_s + v_s + a_s + D_s + rho_s + m_s + P_s + boundary_s]
| 30.285714 | 107 | 0.551887 | 344 | 2,332 | 3.578488 | 0.264535 | 0.116978 | 0.10723 | 0.038993 | 0.090983 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013208 | 0.318182 | 2,332 | 76 | 108 | 30.684211 | 0.761006 | 0.150086 | 0 | 0.037736 | 0 | 0 | 0.056273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.226415 | false | 0 | 0.037736 | 0.018868 | 0.339623 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
070a513dc67a15b46d7b419d4ba1b638e56fb11a | 731 | py | Python | test/rdfa/test_non_xhtml.py | RDFLib/PyRDFa | efc24d4940910ca1e65900c25b62047301bbdcc7 | [
"BSD-3-Clause"
] | 8 | 2015-04-01T19:55:22.000Z | 2020-04-25T08:50:05.000Z | test/rdfa/test_non_xhtml.py | DalavanCloud/PyRDFa | fd5c8826fb9e5f6f5a578564b1149fdae6c40aad | [
"BSD-3-Clause"
] | null | null | null | test/rdfa/test_non_xhtml.py | DalavanCloud/PyRDFa | fd5c8826fb9e5f6f5a578564b1149fdae6c40aad | [
"BSD-3-Clause"
] | 1 | 2019-02-12T03:15:00.000Z | 2019-02-12T03:15:00.000Z | from unittest import TestCase
from pyRdfa import pyRdfa
class NonXhtmlTest(TestCase):
"""
RDFa that is in not well-formed XHTML is passed through html5lib.
These tests make sure that this RDFa can be processed both from
a file, and from a URL.
"""
target1 = '<og:isbn>9780596516499</og:isbn>'
target2 = '<gr:typeOfGood rdf:resource="urn:x-domain:oreilly.com:product:9780596803391.EBOOK"/>'
def test_url(self):
g = pyRdfa().rdf_from_source('http://oreilly.com/catalog/9780596516499/')
self.assert_(self.target1.encode('utf-8') in g)
def test_file(self):
g = pyRdfa().rdf_from_source('test/rdfa/oreilly.html')
self.assert_(self.target2.encode('utf-8') in g)
| 33.227273 | 100 | 0.682627 | 105 | 731 | 4.67619 | 0.571429 | 0.020367 | 0.044807 | 0.057026 | 0.150713 | 0.09776 | 0 | 0 | 0 | 0 | 0 | 0.077703 | 0.19015 | 731 | 21 | 101 | 34.809524 | 0.751689 | 0.212038 | 0 | 0 | 0 | 0.090909 | 0.343013 | 0.22323 | 0 | 0 | 0 | 0 | 0.181818 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.636364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
070a6926f75c6689b9bf183a8c81961b1ffe5bbd | 1,150 | py | Python | python/pyoai/setup.py | jr3cermak/robs-kitchensink | 74b7eb1b1acd8b700d61c5a9ba0c69be3cc6763a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/pyoai/setup.py | jr3cermak/robs-kitchensink | 74b7eb1b1acd8b700d61c5a9ba0c69be3cc6763a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/pyoai/setup.py | jr3cermak/robs-kitchensink | 74b7eb1b1acd8b700d61c5a9ba0c69be3cc6763a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
from os.path import join, dirname
setup(
name='pyoai',
version='2.4.6.b',
author='Infrae',
author_email='rob.cermak@gmail.com',
url='https://github.com/jr3cermak/robs-kitchensink/tree/master/python/pyoai',
classifiers=["Development Status :: 4 - Beta",
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment"],
description="""\
The oaipmh module is a Python implementation of an "Open Archives
Initiative Protocol for Metadata Harvesting" (version 2) client and server.
The protocol is described here:
http://www.openarchives.org/OAI/openarchivesprotocol.html
""",
long_description=(open(join(dirname(__file__), 'README.rst')).read()+
'\n\n'+
open(join(dirname(__file__), 'HISTORY.txt')).read()),
packages=find_packages('src'),
package_dir = {'': 'src'},
zip_safe=False,
license='BSD',
keywords='OAI-PMH xml archive',
install_requires=['lxml'],
)
| 35.9375 | 81 | 0.650435 | 133 | 1,150 | 5.511278 | 0.736842 | 0.04502 | 0.040928 | 0.051842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00655 | 0.203478 | 1,150 | 31 | 82 | 37.096774 | 0.793668 | 0 | 0 | 0 | 0 | 0 | 0.511304 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.068966 | 0 | 0.068966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
070b402dc83b92f4ca29c79684b3e9fb26a6238f | 4,201 | py | Python | utils/functions.py | Roozbeh-Bazargani/CPSC-533R-project | 453f093b23d2363f09c61079d1d4fbd878abf3be | [
"MIT"
] | null | null | null | utils/functions.py | Roozbeh-Bazargani/CPSC-533R-project | 453f093b23d2363f09c61079d1d4fbd878abf3be | [
"MIT"
] | null | null | null | utils/functions.py | Roozbeh-Bazargani/CPSC-533R-project | 453f093b23d2363f09c61079d1d4fbd878abf3be | [
"MIT"
] | null | null | null | import torch
from torch import nn
import math
#0 left hip
#1 left knee
#2 left foot
#3 right hip
#4 right knee
#5 right foot
#6 middle hip
#7 neck
#8 nose
#9 head
#10 left shoulder
#11 left elbow
#12 left wrist
#13 right shoulder
#14 right elbow
#15 right wrist
def random_rotation(J3d):
J = J3d # need copy????
batch_size = J.shape[0]
theta = torch.rand(batch_size).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root = J[:,:,8] # joint 8 = nose is root
J3d_R = rotation(J.cuda(), theta.cuda(), root.unsqueeze(-1).cuda(), False)
return J3d_R, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[:,2].cuda() # absolute depth of the root joint
batch_size = root.shape[0]
v_t = torch.zeros((batch_size, 3, 1)).cuda()
v_t[:, 2, :] = D.cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.zeros((batch_size, 3, 3)).cuda() # rotation matrix over y by theta degrees
R[:, 0, 0] = torch.cos(theta)
R[:, 0, 2] = torch.sin(theta)
R[:, 1, 1] = torch.ones(batch_size)
R[:, 2, 0] = -torch.sin(theta)
R[:, 2, 2] = torch.cos(theta)
# R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]) # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J - root) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
J = J3d_R # need copy????
return rotation(J.cuda(), theta.cuda(), root.unsqueeze(-1).cuda(), True)
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
#print(torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1).shape)
#stop
mse_fn = nn.MSELoss()
return mse_fn(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), torch.zeros(J.shape[0], 3, 16).cuda())
#return torch.norm(J.reshape(J.shape[0], 3, 16) - K.reshape(J.shape[0], 3, 16) - J_R.reshape(J.shape[0], 3, 16) + K_R.reshape(J.shape[0], 3, 16), dim=1)**2
'''
def temporal_loss(J, K, J_R, K_R): # J is J3d at time t and K is J3d at time t+k. J_R means the reversed rotation of J
return torch.norm(J - K - J_R + K_R, dim=1)**2
'''
'''
def random_rotation(J3d):
# J = torch.transpose(J3d, 1, 2)
J = J3d
root = torch.zeros(J.shape[0:2])
for i in range(J.shape[0]):
theta = torch.rand(1).cuda() * 2*torch.tensor(math.pi).cuda() # random theta
root[i] = J[i,:,8] # joint 8 = nose is root
temp = rotation(J[i,:,:], theta, root[i].unsqueeze(1), False)
# print(temp.shape)
J[i,:,:] = temp
return J, theta, root # need these values in the code
def rotation(J, theta, root, is_reversed): # rotation over y axis by theta
D = root[2] # absolute depth of the root joint
v_t = torch.tensor([[0], [0], [D]]).cuda() # translation vector
if is_reversed:
root, v_t = v_t, root # swap
theta = -theta
# R = torch.tensor([[torch.cos(theta), -torch.sin(theta), 0], [torch.sin(theta), torch.cos(theta), 0], [0, 0, 1]]) # rotation matrix over z by theta degrees
R = torch.tensor([[torch.cos(theta), 0, torch.sin(theta)], [0, 1, 0], [-torch.sin(theta), 0, torch.cos(theta)]]).cuda() # rotation matrix over y by theta degrees
# R = torch.tensor([[1, 0, 0], [0, torch.cos(theta), -torch.sin(theta)], [0, torch.sin(theta), torch.cos(theta)]]) # rotation matrix over x by theta degrees
J_R = torch.matmul(R, J.cuda() - root.cuda()) + v_t # rotation
return J_R
def reverse_rotation(J3d_R, theta, root):
# J = torch.transpose(J3d_R, 1, 2)
J = J3d_R
for i in range(J.shape[0]):
J[i,:,:] = rotation(J[i,:,:].cuda(), theta.cuda(), root[i].unsqueeze(1).cuda(), True)
return J
''' | 42.434343 | 181 | 0.633183 | 778 | 4,201 | 3.352185 | 0.137532 | 0.041411 | 0.045629 | 0.039877 | 0.754218 | 0.704755 | 0.655291 | 0.639571 | 0.639571 | 0.592791 | 0 | 0.047715 | 0.176863 | 4,201 | 99 | 182 | 42.434343 | 0.706478 | 0.303023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.09375 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0711c47f68c0681b184df5cde182256dcc62322f | 11,286 | py | Python | sdk/python/pulumi_azure_native/notificationhubs/latest/get_namespace.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/notificationhubs/latest/get_namespace.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/notificationhubs/latest/get_namespace.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNamespaceResult',
'AwaitableGetNamespaceResult',
'get_namespace',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.""", DeprecationWarning)
@pulumi.output_type
class GetNamespaceResult:
"""
Description of a Namespace resource.
"""
def __init__(__self__, created_at=None, critical=None, data_center=None, enabled=None, id=None, location=None, metric_id=None, name=None, namespace_type=None, provisioning_state=None, region=None, scale_unit=None, service_bus_endpoint=None, sku=None, status=None, subscription_id=None, tags=None, type=None, updated_at=None):
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if critical and not isinstance(critical, bool):
raise TypeError("Expected argument 'critical' to be a bool")
pulumi.set(__self__, "critical", critical)
if data_center and not isinstance(data_center, str):
raise TypeError("Expected argument 'data_center' to be a str")
pulumi.set(__self__, "data_center", data_center)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if metric_id and not isinstance(metric_id, str):
raise TypeError("Expected argument 'metric_id' to be a str")
pulumi.set(__self__, "metric_id", metric_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if namespace_type and not isinstance(namespace_type, str):
raise TypeError("Expected argument 'namespace_type' to be a str")
pulumi.set(__self__, "namespace_type", namespace_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
if scale_unit and not isinstance(scale_unit, str):
raise TypeError("Expected argument 'scale_unit' to be a str")
pulumi.set(__self__, "scale_unit", scale_unit)
if service_bus_endpoint and not isinstance(service_bus_endpoint, str):
raise TypeError("Expected argument 'service_bus_endpoint' to be a str")
pulumi.set(__self__, "service_bus_endpoint", service_bus_endpoint)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if subscription_id and not isinstance(subscription_id, str):
raise TypeError("Expected argument 'subscription_id' to be a str")
pulumi.set(__self__, "subscription_id", subscription_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The time the namespace was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def critical(self) -> Optional[bool]:
"""
Whether or not the namespace is set as Critical.
"""
return pulumi.get(self, "critical")
@property
@pulumi.getter(name="dataCenter")
def data_center(self) -> Optional[str]:
"""
Data center for the namespace
"""
return pulumi.get(self, "data_center")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Whether or not the namespace is currently enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="metricId")
def metric_id(self) -> str:
"""
Identifier for Azure Insights metrics
"""
return pulumi.get(self, "metric_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namespaceType")
def namespace_type(self) -> Optional[str]:
"""
The namespace type.
"""
return pulumi.get(self, "namespace_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the Namespace.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def region(self) -> Optional[str]:
"""
Specifies the targeted region in which the namespace should be created. It can be any of the following values: Australia East, Australia Southeast, Central US, East US, East US 2, West US, North Central US, South Central US, East Asia, Southeast Asia, Brazil South, Japan East, Japan West, North Europe, West Europe
"""
return pulumi.get(self, "region")
@property
@pulumi.getter(name="scaleUnit")
def scale_unit(self) -> Optional[str]:
"""
ScaleUnit where the namespace gets created
"""
return pulumi.get(self, "scale_unit")
@property
@pulumi.getter(name="serviceBusEndpoint")
def service_bus_endpoint(self) -> Optional[str]:
"""
Endpoint you can use to perform NotificationHub operations.
"""
return pulumi.get(self, "service_bus_endpoint")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the created namespace
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Status of the namespace. It can be any of these values:1 = Created/Active2 = Creating3 = Suspended4 = Deleting
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
The Id of the Azure subscription associated with the namespace.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> Optional[str]:
"""
The time the namespace was updated.
"""
return pulumi.get(self, "updated_at")
class AwaitableGetNamespaceResult(GetNamespaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNamespaceResult(
created_at=self.created_at,
critical=self.critical,
data_center=self.data_center,
enabled=self.enabled,
id=self.id,
location=self.location,
metric_id=self.metric_id,
name=self.name,
namespace_type=self.namespace_type,
provisioning_state=self.provisioning_state,
region=self.region,
scale_unit=self.scale_unit,
service_bus_endpoint=self.service_bus_endpoint,
sku=self.sku,
status=self.status,
subscription_id=self.subscription_id,
tags=self.tags,
type=self.type,
updated_at=self.updated_at)
def get_namespace(namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNamespaceResult:
"""
Description of a Namespace resource.
Latest API Version: 2017-04-01.
:param str namespace_name: The namespace name.
:param str resource_group_name: The name of the resource group.
"""
pulumi.log.warn("""get_namespace is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:notificationhubs:getNamespace'.""")
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:notificationhubs/latest:getNamespace', __args__, opts=opts, typ=GetNamespaceResult).value
return AwaitableGetNamespaceResult(
created_at=__ret__.created_at,
critical=__ret__.critical,
data_center=__ret__.data_center,
enabled=__ret__.enabled,
id=__ret__.id,
location=__ret__.location,
metric_id=__ret__.metric_id,
name=__ret__.name,
namespace_type=__ret__.namespace_type,
provisioning_state=__ret__.provisioning_state,
region=__ret__.region,
scale_unit=__ret__.scale_unit,
service_bus_endpoint=__ret__.service_bus_endpoint,
sku=__ret__.sku,
status=__ret__.status,
subscription_id=__ret__.subscription_id,
tags=__ret__.tags,
type=__ret__.type,
updated_at=__ret__.updated_at)
| 37.003279 | 329 | 0.641148 | 1,318 | 11,286 | 5.243551 | 0.14264 | 0.016495 | 0.043988 | 0.082477 | 0.28158 | 0.166257 | 0.11677 | 0.079728 | 0.055853 | 0.032412 | 0 | 0.001668 | 0.256158 | 11,286 | 304 | 330 | 37.125 | 0.82156 | 0.125199 | 0 | 0.142157 | 1 | 0.009804 | 0.179446 | 0.020021 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107843 | false | 0 | 0.029412 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.