hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
704714d83f647fd1d63d1eea3b51b21e192ee2bc | 1,270 | py | Python | simmbse/link.py | tsherburne/ma_sim | 4082da1c80401dec4293415bc9e9239a6bb8185d | [
"BSD-3-Clause"
] | null | null | null | simmbse/link.py | tsherburne/ma_sim | 4082da1c80401dec4293415bc9e9239a6bb8185d | [
"BSD-3-Clause"
] | null | null | null | simmbse/link.py | tsherburne/ma_sim | 4082da1c80401dec4293415bc9e9239a6bb8185d | [
"BSD-3-Clause"
] | null | null | null | import simpy
DEF_LINK_CAPACITY = 5
DEF_LINK_DELAY = 5
class Link:
"""
A link is the physical implementation of an interface.
"""
def __init__(self, env, capacity, delay):
self.env = env
self.delay = delay
self.msg = simpy.Store(env)
self.capacity = simpy.Container(env, capacity)
def latency(self, value):
"""
Delay message by
"""
yield self.env.timeout(self.delay)
self.msg.put(value)
def queue(self, value, size):
# queue message based on link capacity and message size
remaining = size
while remaining > 0:
if remaining > self.capacity.capacity:
chunk = self.capacity.capacity
else:
chunk = remaining
# handle contention of multiple senders
self.capacity.put(chunk)
# link: capacity of 1, transfers message of size 1 in 1 time unit
yield self.env.timeout(chunk / self.capacity.capacity)
self.capacity.get(chunk)
remaining -= chunk
self.env.process(self.latency(value))
def put(self, value, size):
self.env.process(self.queue(value, size))
def get(self):
return self.msg.get()
| 25.918367 | 77 | 0.583465 | 153 | 1,270 | 4.79085 | 0.339869 | 0.057299 | 0.081855 | 0.051842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007001 | 0.325197 | 1,270 | 48 | 78 | 26.458333 | 0.848308 | 0.179528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.035714 | 0.035714 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7047d5cefa65f2e1e003e6c7a82fbac36e8eb44c | 2,163 | py | Python | dovetail/tests/unit/test_parser.py | hashnfv/hashnfv-dovetail | 73f332fc513f184513be483db6a108bd3c7b7d9b | [
"Apache-2.0"
] | null | null | null | dovetail/tests/unit/test_parser.py | hashnfv/hashnfv-dovetail | 73f332fc513f184513be483db6a108bd3c7b7d9b | [
"Apache-2.0"
] | null | null | null | dovetail/tests/unit/test_parser.py | hashnfv/hashnfv-dovetail | 73f332fc513f184513be483db6a108bd3c7b7d9b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# lingui.zeng@huawei.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
Test 'parser' module
"""
import logging
import os
import unittest
import yaml
import mock
from dovetail import parser
from dovetail.utils.dovetail_config import DovetailConfig as dt_cfg
@mock.patch('dovetail.parser.Parser.logger')
class TestParser(unittest.TestCase):
test_path = os.path.dirname(os.path.realpath(__file__))
def setUp(self):
"""Test case setup"""
dt_cfg.load_config_files()
logging.disable(logging.CRITICAL)
def test_parser_cmd(self, mock_logger):
"""Test whether the command is correctly parsed."""
mock_cmd = "python /functest/ci/run_tests.py "\
"-t {{validate_testcase}} -r"
with open(os.path.join(self.test_path, 'test_testcase.yaml')) as f:
mock_testcase_yaml = yaml.safe_load(f)
MockTestcase = type('Testcase', (object,), {})
mock_testcase = MockTestcase()
mock_testcase.testcase = mock_testcase_yaml.values()[0]
output = parser.Parser.parse_cmd(mock_cmd, mock_testcase)
expected_output = ("python /functest/ci/run_tests.py -t "
"tempest_smoke_serial -r")
self.assertEqual(expected_output, output)
def test_parser_cmd_fail(self, mock_logger):
"""Test whether the command is correctly parsed."""
mock_cmd = "python /functest/ci/run_tests.py "\
"-t {{validate_testcase}} -r"
mock_testcase_yaml = {}
MockTestcase = type('Testcase', (object,), {})
mock_testcase = MockTestcase()
mock_testcase.testcase = mock_testcase_yaml.values()
output = parser.Parser.parse_cmd(mock_cmd, mock_testcase)
expected_output = ("python /functest/ci/run_tests.py -t "
"None -r")
self.assertEqual(expected_output, output)
if __name__ == '__main__':
unittest.main()
| 33.796875 | 75 | 0.66343 | 269 | 2,163 | 5.111524 | 0.390335 | 0.087273 | 0.046545 | 0.055273 | 0.466909 | 0.466909 | 0.414545 | 0.414545 | 0.414545 | 0.414545 | 0 | 0.002976 | 0.223301 | 2,163 | 63 | 76 | 34.333333 | 0.815476 | 0.188165 | 0 | 0.368421 | 0 | 0 | 0.169855 | 0.09913 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.078947 | false | 0 | 0.184211 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
704c104a01777ddf21029bc89ce3fac781b60738 | 1,072 | py | Python | app/utils/cosineSimilarity.py | P-DOX/MultiModelNLP | 9ab93fdc6f90c829d3d63249565df9fc0e45f119 | [
"Apache-2.0"
] | null | null | null | app/utils/cosineSimilarity.py | P-DOX/MultiModelNLP | 9ab93fdc6f90c829d3d63249565df9fc0e45f119 | [
"Apache-2.0"
] | null | null | null | app/utils/cosineSimilarity.py | P-DOX/MultiModelNLP | 9ab93fdc6f90c829d3d63249565df9fc0e45f119 | [
"Apache-2.0"
] | null | null | null | from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# X = input("Enter first string: ").lower()
# Y = input("Enter second string: ").lower()
# X ="I love horror movies"
# Y ="Lights out is a horror movie"
def cosineSimilarity(X, Y):
# tokenization
X_list = word_tokenize(X)
Y_list = word_tokenize(Y)
# sw contains the list of stopwords
sw = stopwords.words('english')
l1 =[];l2 =[]
# remove stop words from the string
X_set = {w for w in X_list if not w in sw}
Y_set = {w for w in Y_list if not w in sw}
# form a set containing keywords of both strings
rvector = X_set.union(Y_set)
for w in rvector:
if w in X_set: l1.append(1) # create a vector
else: l1.append(0)
if w in Y_set: l2.append(1)
else: l2.append(0)
c = 0
# cosine formula
for i in range(len(rvector)):
c+= l1[i]*l2[i]
cosine = c / float((sum(l1)*sum(l2))**0.5)
# print("similarity: ", cosine)
return cosine | 29.777778 | 54 | 0.58209 | 168 | 1,072 | 3.636905 | 0.404762 | 0.03437 | 0.02946 | 0.026187 | 0.07856 | 0.045827 | 0 | 0 | 0 | 0 | 0 | 0.022849 | 0.30597 | 1,072 | 36 | 55 | 29.777778 | 0.798387 | 0.3125 | 0 | 0 | 0 | 0 | 0.01013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
704ece007cac76b4214891671e52a643444b368a | 1,375 | py | Python | crawl_movies/pipelines.py | KevinLJJ/crawl_movies | 16d03a8a18fee57582ff3fb0b80ba0d12a824f96 | [
"MIT"
] | null | null | null | crawl_movies/pipelines.py | KevinLJJ/crawl_movies | 16d03a8a18fee57582ff3fb0b80ba0d12a824f96 | [
"MIT"
] | null | null | null | crawl_movies/pipelines.py | KevinLJJ/crawl_movies | 16d03a8a18fee57582ff3fb0b80ba0d12a824f96 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from crawl_movies.models.mongo import MovieDetail, WorkerDetail
class CrawlMoviesPipeline(object):
def process_item(self, item, spider):
"""
管道接受item
:param item:
:param spider:
:return:
"""
# 当前数据是否存在
saved = MovieDetail.objects(movie_id=item['movie_id'])
# 不存在则添加
if not saved:
self.save_movie_data(item)
return item
def save_movie_data(self, item):
"""
保存电影详情
:param item:
:return:
"""
movie = MovieDetail()
movie.movie_id = item.get('movie_id', '')
movie.magnet = self.splite_magnet(item.get('magnet', ''))
pass
def split_words(self, words: str, flag):
"""
人名分词
:param words:
:param flag:
:return:
"""
if flag == 'name':
words = words.replace(' ', '')
if words.find('<br>') != -1:
words.split(' <br> ')
return words
def splite_magnet(self, url):
"""
清洗磁力链
:param url:
:return:
"""
return url.replace('amp;', '') if url else ''
| 24.122807 | 66 | 0.522909 | 147 | 1,375 | 4.795918 | 0.489796 | 0.039716 | 0.031206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002227 | 0.346909 | 1,375 | 56 | 67 | 24.553571 | 0.782851 | 0.248727 | 0 | 0 | 0 | 0 | 0.048066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0.05 | 0.05 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
704f9d6253b758f6c2018d25a5de126b3018f3f2 | 427 | py | Python | Algorithms/Easy/1394. Find Lucky Integer in an Array/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/1394. Find Lucky Integer in an Array/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/1394. Find Lucky Integer in an Array/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
def findLucky(self, arr: List[int]) -> int:
dict = {}
for a in arr:
dict.setdefault(a, 0)
dict[a] += 1
res = -1
for k, v in dict.items():
if k == v:
res = max(res, k)
return res
if __name__ == "__main__":
s = Solution()
result = s.findLucky([1, 2, 2, 3, 3, 3])
print(result)
| 20.333333 | 47 | 0.468384 | 58 | 427 | 3.310345 | 0.551724 | 0.020833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035019 | 0.398126 | 427 | 20 | 48 | 21.35 | 0.712062 | 0 | 0 | 0 | 0 | 0 | 0.018735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.25 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
704fd7adeafe4b3c7852a5ce2db279a718b6a6db | 1,882 | py | Python | cauldron/test/cli/server/routes/synchronize/test_sync_open.py | DanMayhew/cauldron | ac41481830fc1a363c145f4b58ce785aac054d10 | [
"MIT"
] | null | null | null | cauldron/test/cli/server/routes/synchronize/test_sync_open.py | DanMayhew/cauldron | ac41481830fc1a363c145f4b58ce785aac054d10 | [
"MIT"
] | null | null | null | cauldron/test/cli/server/routes/synchronize/test_sync_open.py | DanMayhew/cauldron | ac41481830fc1a363c145f4b58ce785aac054d10 | [
"MIT"
] | null | null | null | import json
import os
import cauldron
from cauldron.test import support
from cauldron.test.support.flask_scaffolds import FlaskResultsTest
EXAMPLE_PROJECTS_DIRECTORY = os.path.realpath(os.path.join(
os.path.dirname(os.path.realpath(cauldron.__file__)),
'resources',
'examples'
))
class TestSyncOpen(FlaskResultsTest):
""" """
def test_no_args(self):
""" should error without arguments """
opened = self.post('/sync-open')
self.assertEqual(opened.flask.status_code, 200)
response = opened.response
self.assert_has_error_code(response, 'INVALID_ARGS')
def test_missing_definition(self):
""" should error without cauldron.json definition argument """
opened = self.post('/sync-open', {'source_directory': 'abc'})
response = opened.response
self.assert_has_error_code(response, 'INVALID_ARGS')
def test_missing_source_directory(self):
""" should error without source directory argument """
opened = self.post('/sync-open', {'definition': 'abc'})
response = opened.response
self.assert_has_error_code(response, 'INVALID_ARGS')
def test_open(self):
""" should open project remotely """
source_directory = os.path.join(
EXAMPLE_PROJECTS_DIRECTORY,
'hello_text'
)
source_path = os.path.join(source_directory, 'cauldron.json')
with open(source_path, 'r') as f:
definition = json.load(f)
opened = self.post('/sync-open', dict(
definition=definition,
source_directory=source_directory
))
response = opened.response
self.assert_has_success_code(response, 'PROJECT_OPENED')
project = cauldron.project.get_internal_project()
self.assertEqual(project.remote_source_directory, source_directory)
| 28.953846 | 75 | 0.664187 | 209 | 1,882 | 5.755981 | 0.291866 | 0.112219 | 0.04655 | 0.05985 | 0.306733 | 0.270158 | 0.191189 | 0.191189 | 0.191189 | 0.191189 | 0 | 0.002065 | 0.227949 | 1,882 | 64 | 76 | 29.40625 | 0.825877 | 0.087141 | 0 | 0.225 | 0 | 0 | 0.096736 | 0 | 0 | 0 | 0 | 0 | 0.15 | 1 | 0.1 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70523f493b7d38b249e11082c64f836106e49316 | 2,166 | py | Python | common/vision/models/digits.py | billzhonggz/Transfer-Learning-Library | d7a6e4298e571d5101e05515a2ab1f171160ef89 | [
"MIT"
] | 1,474 | 2020-07-24T02:55:55.000Z | 2022-03-31T12:35:56.000Z | common/vision/models/digits.py | mxliu/Transfer-Learning-Library | 7b0ccb3a8087ecc65daf4b1e815e5a3f42106641 | [
"MIT"
] | 70 | 2020-08-05T10:47:33.000Z | 2022-03-31T03:48:54.000Z | common/vision/models/digits.py | mxliu/Transfer-Learning-Library | 7b0ccb3a8087ecc65daf4b1e815e5a3f42106641 | [
"MIT"
] | 312 | 2020-08-01T11:08:39.000Z | 2022-03-30T06:03:47.000Z | """
@author: Junguang Jiang
@contact: JiangJunguang1123@outlook.com
"""
import torch.nn as nn
class LeNet(nn.Sequential):
def __init__(self, num_classes=10):
super(LeNet, self).__init__(
nn.Conv2d(1, 20, kernel_size=5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Conv2d(20, 50, kernel_size=5),
nn.Dropout2d(p=0.5),
nn.MaxPool2d(2),
nn.ReLU(),
nn.Flatten(start_dim=1),
nn.Linear(50 * 4 * 4, 500),
nn.ReLU(),
nn.Dropout(p=0.5),
)
self.num_classes = num_classes
self.out_features = 500
def copy_head(self):
return nn.Linear(500, self.num_classes)
class DTN(nn.Sequential):
def __init__(self, num_classes=10):
super(DTN, self).__init__(
nn.Conv2d(3, 64, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.Dropout2d(0.1),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(128),
nn.Dropout2d(0.3),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(256),
nn.Dropout2d(0.5),
nn.ReLU(),
nn.Flatten(start_dim=1),
nn.Linear(256 * 4 * 4, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Dropout(),
)
self.num_classes = num_classes
self.out_features = 512
def copy_head(self):
return nn.Linear(512, self.num_classes)
def lenet(pretrained=False, **kwargs):
"""LeNet model from
`"Gradient-based learning applied to document recognition" <http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf>`_
Args:
num_classes (int): number of classes. Default: 10
.. note::
The input image size must be 28 x 28.
"""
return LeNet(**kwargs)
def dtn(pretrained=False, **kwargs):
""" DTN model
Args:
num_classes (int): number of classes. Default: 10
.. note::
The input image size must be 32 x 32.
"""
return DTN(**kwargs) | 26.096386 | 117 | 0.545245 | 281 | 2,166 | 4.067616 | 0.316726 | 0.087489 | 0.048994 | 0.036745 | 0.496063 | 0.496063 | 0.496063 | 0.415573 | 0.347332 | 0.118985 | 0 | 0.081978 | 0.31856 | 2,166 | 83 | 118 | 26.096386 | 0.692412 | 0.199446 | 0 | 0.346939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122449 | false | 0 | 0.020408 | 0.040816 | 0.265306 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7053994678a4357d47cfc599844f36810176dba3 | 6,164 | py | Python | cviceni10/mind.py | malja/cvut-python | 945aa0fefb72c65a97c505e38597881d8433f93b | [
"MIT"
] | null | null | null | cviceni10/mind.py | malja/cvut-python | 945aa0fefb72c65a97c505e38597881d8433f93b | [
"MIT"
] | null | null | null | cviceni10/mind.py | malja/cvut-python | 945aa0fefb72c65a97c505e38597881d8433f93b | [
"MIT"
] | null | null | null | import itertools
class mind:
"""
Třída řešící hru Mastermind ve třech úrovních obtížnosti.
Podporované módy:
1) Hádání 4 pozic se 6 barvami
2) Hádání 5 pozic s 7 barvami
3) Hádání 6 pozic s 8 barvami
O zadání, učování správného řešení a ohodnocování jednotlivých tahů
se stará třída master.
V prvním kole se pro každý herní mód využije pevně danný tah. Ten by
měl pro další kola vyloučit co nejvyšší množství "potencíálních"
řešení.
Po ohodnocení prvního kola (zajišťuje master), jsou z množiny všech
možných řešení dané úlohy vyloučeny ty nesprávné. Tedy ty, pro
které by (pokud by byly hledaným řešením úlohy) nemohl naposledy
hraný tah získat stejné ohodnocení, které dostal.
Postup se opakuje, dokud není množina všech řešení dostatečně malá
(moemntálně pod 1000 prvků). Zde přichází do hry výběr nejlepšího
dalšího tahu. Za ten je považován tah, který je nejvíce podobný
ostatním (má nejvyšší součetsoučtu vůči ostatním nejvyšší skóre).
"""
def __init__(self):
# Kódové označení
self.codename = "up to 32 characters"
# Herní mód - obsahuje čísla 4 (4 ze 6), 5 (5 ze 7), 6 (6 z 8)
self.game_mode = 0
# Set s všemi možnými řešeními aktuální úlohy
self.possibilities = set()
# Jde o první tah?
self.first_guess = True
# Ohodnocení posledního pokusu o řešení
self.last_score = 0
# Cache vzájemného ohodnocení dvou možností
self.cache = {}
def init(self, numbers, positions):
"""
Metoda volaná po každé změně obtížnosti (herního typu), aby se
nastavilo vše potřebné.
Parametry:
----------
numbers:integer
Počet číslic, které mohou být na jedné pozici v rozsahu
0... numbers-1
positions:integer
Počet pozic.
"""
self.game_mode = positions
self.possibilities = set(itertools.product(range(numbers), repeat=positions))
self.first_guess = True
self.cache = {}
def pick_best_guess(self):
"""
Metoda, jenž se pokusí o nalezení nejlepšího dalšího tahu.
Protože je relativně pomalá (porovnává prvky v poli řešení
každý s každým), měla by se volat až když je pole řešení
menší.
Vrací:
------
set
Nejlepší možný tah.
"""
best = {}
if len(self.possibilities) == 1:
return self.possibilities.pop()
# Kontroluje každý s každým
for guess in self.possibilities:
for compare in self.possibilities:
# Samo se sebou neporovnává
if guess == compare:
continue
# Vytvoří počítadlo
if not guess in best:
best[guess] = 0
# Přičte vzájemné skóre k počítadlu.
best[guess] += self.get_score( guess, compare)
# Vrátí tah s nejvyšším součtem všech skóre
return max(best, key=lambda key: best[key])
def count_matching_colors(self, a, b):
"""
Spočítá počet stejných barev (na různých pozicích) v řešení a
a b.
Parametry:
---------
a:set
Řešení A
b:set
Řešení B
Vrací:
------
integer
Počet stejných barev.
"""
count = 0
a_iterator = iter(sorted(a))
b_iterator = iter(sorted(b))
a_value = next(a_iterator)
b_value = next(b_iterator)
try:
while True:
if a_value == b_value:
count += 1
a_value = next(a_iterator)
b_value = next(b_iterator)
elif a_value < b_value:
a_value = next(a_iterator)
else:
b_value = next(b_iterator)
except StopIteration:
return count
def get_score( self, guess, compare):
"""
Metoda vracející vzájemné ohodnocení dvou možných řešení.
Parametry:
----------
guess:set
Řešení A
compare:set
Řešení B
"""
# Prohledávání cache, zda jsme to už nepočítali.
# Bohužel mě nenapadlo jak vytvořit unikátní klíč
# na základě parametrů guess a compare tak, aby
# nezáleželo na jejich pořadí.
#
# Memoize by asi moc nepomohlo...
a = guess + compare
b = compare + guess
if a in self.cache:
return self.cache[a]
elif b in self.cache:
return self.cache[b]
# Výpočet ohodnocení
key = a
blacks = sum(1 for a, b in zip(guess, compare) if a == b)
color_matches = self.count_matching_colors(guess, compare)
whites = color_matches - blacks
# Uložení do cache
self.cache[key] = blacks * 10 + whites
return blacks * 10 + whites
def guess(self):
guess = 0
if self.first_guess:
self.first_guess = False
if self.game_mode == 4:
guess = (0, 0, 1, 1)
elif self.game_mode == 5:
guess = (0, 0, 1, 1, 2)
elif self.game_mode == 6:
guess = (0, 0, 1, 1, 2, 2)
self.possibilities.remove(guess)
# Čas hledat nejlepší řešení
# Neosvědčilo se
"""
if len(self.possibilities) < 1000:
guess = self.pick_best_guess()
else:
"""
guess = self.possibilities.pop()
return guess
def eval(self, guess, black, white):
self.last_score = black * 10 + white
# Promaže všechny možnosti, která nemohou být řešením
self.possibilities = set(filter(
lambda n: self.get_score(guess,n) == self.last_score,
self.possibilities
)) | 29.21327 | 85 | 0.541045 | 702 | 6,164 | 4.683761 | 0.411681 | 0.056873 | 0.018248 | 0.010037 | 0.059307 | 0.045012 | 0.023114 | 0.023114 | 0.023114 | 0.023114 | 0 | 0.016411 | 0.387086 | 6,164 | 211 | 86 | 29.21327 | 0.853891 | 0.401849 | 0 | 0.131579 | 0 | 0 | 0.006131 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092105 | false | 0 | 0.013158 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
705779fc95331f18047ab12ddb0742f0946134c2 | 3,055 | py | Python | restclients/sws/v5/notice.py | uw-it-cte/uw-restclients | 2b09348bf066e5508304401f93f281805e965af5 | [
"Apache-2.0"
] | null | null | null | restclients/sws/v5/notice.py | uw-it-cte/uw-restclients | 2b09348bf066e5508304401f93f281805e965af5 | [
"Apache-2.0"
] | null | null | null | restclients/sws/v5/notice.py | uw-it-cte/uw-restclients | 2b09348bf066e5508304401f93f281805e965af5 | [
"Apache-2.0"
] | null | null | null | """
Interfaceing with the Student Web Service,
for notice resource
"""
import copy
import logging
from restclients.models.sws import Notice, NoticeAttribute
from restclients.sws import get_resource
from dateutil import parser
import pytz
notice_res_url_prefix = "/student/v5/notice/"
logger = logging.getLogger(__name__)
def get_notices_by_regid(regid):
"""
Returns a list of restclients.models.sws.Notice objects
for the passed regid.
"""
url = notice_res_url_prefix + regid + ".json"
return _notices_from_json(get_resource(url))
def _notices_from_json(notice_data):
notices_list = notice_data.get("Notices")
if notices_list is None:
return None
notices = []
for notice in notices_list:
notice_obj = Notice()
notice_obj.notice_category = notice.get("NoticeCategory")
notice_obj.notice_content = notice.get("NoticeContent")
notice_obj.notice_type = notice.get("NoticeType")
notice_attribs = []
try:
for notice_attrib in notice.get("NoticeAttributes"):
attribute = NoticeAttribute()
attribute.data_type = notice_attrib.get("DataType")
attribute.name = notice_attrib.get("Name")
# Currently known data types
if attribute.data_type == "url":
attribute._url_value = notice_attrib.get("Value")
elif attribute.data_type == "date":
# Convert to UTC datetime
date = parser.parse(notice_attrib.get("Value"))
localtz = pytz.timezone('America/Los_Angeles')
local_dt = localtz.localize(date)
utc_dt = local_dt.astimezone(pytz.utc)
attribute._date_value = utc_dt
elif attribute.data_type == "string":
attribute._string_value = notice_attrib.get("Value")
else:
logger.warn("Unkown attribute type: %s\nWith Value:%s" %
(attribute.data_type,
notice_attrib.get("value")))
continue
notice_attribs.append(attribute)
except TypeError:
pass
notice_obj.attributes = notice_attribs
notices.append(notice_obj)
return _associate_short_long(notices)
def _associate_short_long(notices):
"""
If a notice is type ${1}Short, associate with its Long notice
in an attribute called long_notice.
"""
for notice in notices:
if notice.notice_type is not None and\
notice.notice_category == "StudentFinAid" and\
notice.notice_type.endswith("Short"):
notice.long_notice = _find_notice_by_type(notices,
notice.notice_type[:-5])
return notices
def _find_notice_by_type(notices, type):
for notice in notices:
if notice.notice_type == type:
return notice
return None
| 33.944444 | 78 | 0.60491 | 335 | 3,055 | 5.271642 | 0.307463 | 0.047565 | 0.050963 | 0.0453 | 0.13137 | 0.07701 | 0.04077 | 0.04077 | 0 | 0 | 0 | 0.001429 | 0.312602 | 3,055 | 89 | 79 | 34.325843 | 0.839524 | 0.095254 | 0 | 0.065574 | 0 | 0 | 0.075819 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065574 | false | 0.016393 | 0.098361 | 0 | 0.262295 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
705807e28589aed3f29367b0717ee4329f43b530 | 2,654 | py | Python | vulcan/_utils.py | v0idzz/vulcan-api | 1ae83d90236e678042a9f85f6c34b6bd82bf279f | [
"MIT"
] | 49 | 2019-01-10T22:10:19.000Z | 2022-01-10T17:07:51.000Z | vulcan/_utils.py | v0idzz/vulcan-api | 1ae83d90236e678042a9f85f6c34b6bd82bf279f | [
"MIT"
] | 47 | 2019-01-08T21:04:17.000Z | 2022-03-21T04:02:37.000Z | vulcan/_utils.py | v0idzz/vulcan-api | 1ae83d90236e678042a9f85f6c34b6bd82bf279f | [
"MIT"
] | 31 | 2019-05-04T14:05:33.000Z | 2021-11-01T18:51:16.000Z | # -*- coding: utf-8 -*-
import asyncio
import logging
import math
import platform
import time
import urllib
import uuid as _uuid
from datetime import datetime
import aiohttp
import requests
APP_NAME = "DzienniczekPlus 2.0"
APP_VERSION = "1.4.2"
APP_OS = "Android"
APP_USER_AGENT = "Dart/2.10 (dart:io)"
log = logging.getLogger("client")
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
log.addHandler(handler)
TIME_FORMAT_H_M = "%H:%M"
class VulcanAPIException(Exception):
pass
def default_device_model():
return "Vulcan API (Python {})".format(platform.python_version())
async def get_base_url(token):
code = token[0:3]
components = await get_components()
try:
return components[code]
except KeyError:
raise VulcanAPIException("Invalid token!")
async def get_components():
log.info("Getting Vulcan components...")
async with aiohttp.ClientSession() as session:
async with session.get(
"http://komponenty.vulcan.net.pl/UonetPlusMobile/RoutingRules.txt"
) as r:
if r.headers["Content-Type"] == "text/plain":
r_txt = await r.text()
components = (c.split(",") for c in r_txt.split())
components = {a[0]: a[1] for a in components}
else:
components = {}
components.update({"FK1": "http://api.fakelog.tk"})
return components
def get_firebase_token():
log.info("Getting Firebase token...")
aid = "4609707972546570896:3626695765779152704"
device = aid.split(":")[0]
app = "pl.edu.vulcan.hebe"
data = {
"sender": "987828170337",
"X-scope": "*",
"X-gmp_app_id": "1:987828170337:android:ac97431a0a4578c3",
"app": app,
"device": device,
}
headers = {
"Authorization": "AidLogin {}".format(aid),
"User-Agent": "Android-GCM/1.5",
"app": app,
}
r = requests.post(
"https://android.clients.google.com/c2dm/register3", data=data, headers=headers
)
token = r.text.split("=")[1]
return token
def millis():
return math.floor(time.time() * 1000)
def now_datetime(): # UTC+0
return datetime.utcnow()
def now_iso(dt=None): # ISO 8601, local timezone
return (dt or datetime.now()).strftime("%Y-%m-%d %H:%M:%S")
def now_gmt(dt=None): # RFC 2822, UTC+0
return (dt or datetime.utcnow()).strftime("%a, %d %b %Y %H:%M:%S GMT")
def uuid(seed=None):
if seed:
return str(_uuid.uuid5(_uuid.NAMESPACE_X500, str(seed)))
return str(_uuid.uuid4())
def urlencode(s):
return urllib.parse.quote(str(s))
| 23.280702 | 87 | 0.620573 | 338 | 2,654 | 4.786982 | 0.446746 | 0.004944 | 0.013597 | 0.02225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055365 | 0.230972 | 2,654 | 113 | 88 | 23.486726 | 0.737384 | 0.025622 | 0 | 0.025 | 0 | 0 | 0.212708 | 0.030221 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.0125 | 0.125 | 0.075 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7058092211009357bacdd3e00eaae3ca9bf62a90 | 518 | py | Python | chkuser2.py | YousefAllam221b/QuickStor | 59cbbb6e7be358bde0adced49ba3f3ff9a92e6de | [
"Apache-2.0"
] | null | null | null | chkuser2.py | YousefAllam221b/QuickStor | 59cbbb6e7be358bde0adced49ba3f3ff9a92e6de | [
"Apache-2.0"
] | null | null | null | chkuser2.py | YousefAllam221b/QuickStor | 59cbbb6e7be358bde0adced49ba3f3ff9a92e6de | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3.6
import sys,etcdget,subprocess,time
with open('Data/chkuser2','w') as f:
f.write(str(sys.argv))
y=[]
z=[]
x=etcdget.etcdget('updlogged/'+sys.argv[1])
z.append(sys.argv[2])
y.append(x[0])
cmdline=['./pump.sh','UnixChkUser2']+sys.argv[1:]
result=subprocess.run(cmdline,stdout=subprocess.PIPE)
while float(z[0]) > 100 and str(y[0])==str(x[0]):
time.sleep(2)
y=etcdget.etcdget('updlogged/'+sys.argv[1])
z=etcdget.etcdget('logged/'+sys.argv[1])
y=etcdget.etcdget('logged/'+sys.argv[1])
print(y[0])
| 27.263158 | 53 | 0.687259 | 92 | 518 | 3.869565 | 0.456522 | 0.13764 | 0.11236 | 0.146067 | 0.337079 | 0.337079 | 0.179775 | 0 | 0 | 0 | 0 | 0.039014 | 0.059846 | 518 | 18 | 54 | 28.777778 | 0.691992 | 0.03668 | 0 | 0 | 0 | 0 | 0.138554 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
705a67e947bb08a691c95b9f12624b016023afa5 | 4,269 | py | Python | neural_network.py | harsh020/ml_subpack | d36e97922ec17d84526884555181045ad50e8809 | [
"MIT"
] | null | null | null | neural_network.py | harsh020/ml_subpack | d36e97922ec17d84526884555181045ad50e8809 | [
"MIT"
] | 6 | 2019-10-06T13:41:55.000Z | 2019-10-16T18:14:04.000Z | neural_network.py | harsh020/ml_subpack | d36e97922ec17d84526884555181045ad50e8809 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.optimize import fmin_cg
from utils.optimize import gradient_desc, computeNumericalGradient
from utils.utility import sigmoid, sigmoid_grad, ravel, unravel
import sys
np.set_printoptions(threshold=sys.maxsize)
np.seterr(divide = 'ignore')
class NeuralNetwork:
def __init__(self, hidden_layers, units):
self.hidden_layers = hidden_layers
self.units = units
def _forward_prop(self, X, y, theta, i):
m, n = X.shape
a_ = []
z_ = []
J = 0
a = np.array(X[i, :].T).reshape(n, 1)
a_.append(a)
z_.append(np.concatenate((np.ones((1, 1)), a), axis=0))
for j in range(self.hidden_layers+1):
z = np.array(theta[j] @ a)
a = sigmoid(z)
a = np.concatenate((np.ones((1, 1)), a), axis=0)
a_.append(a)
z_.append(z)
a_L = a[1:]
J = ((((y[i, :])@np.log(a_L)) + ((1-y[i, :])@np.log(1-a_L)))/m)
return J, np.array(z_), np.array(a_)
def _back_prop(self, X, y, theta, z, a):
y = np.array(y).reshape(10, 1)
a_L = a[-1][1:, :]
theta_grad = []
delta = a_L - y
theta_grad.append(np.array(delta * a[len(theta)-1].T))
for j in range(len(theta)-1, 0, -1):
delta = (theta[j][:, 1:].T @ delta) * sigmoid_grad(z[j])
theta_grad.append(np.array(delta * a[j-1].T))
return theta_grad[::-1]
def _cost_function(self, theta, X, y, lambda_):
m, n = X.shape
J = 0
theta = np.array(unravel(theta, self.units, self.hidden_layers, n-1))
theta_grad = np.array([np.zeros(x.shape) for x in theta])
for i in range(m):
j, z, a = self._forward_prop(X, y, theta, i)
J -= j
theta_grad += np.array(self._back_prop(X, y[i, :].T, theta, z, a))
reg_sum = 0
for i in range(self.hidden_layers+1):
reg_sum += sum(sum(np.array(theta[i])**2)[1:])
J += (lambda_/(2*m))*reg_sum
reg_sum = 0
for i in range(self.hidden_layers+1):
reg_sum += sum(sum(np.array(theta[i]))[1:])
for i in range(len(theta)):
theta_grad[i] = np.c_[(1/m)*theta_grad[i][:, 0], ((1/m)*theta_grad[i][:, 1:]+(lambda_/m)*theta[i][:, 1:])]
theta_grad = ravel(theta_grad)
return J, theta_grad
def fit(self, X, y, lambda_=1, iter=500, alpha=1):
""" Fit the Nural Network according to the given training data.
Parameters
----------
X : Numpy array, shape: (n_samples, n_features).
Training Data. Consists of feature vectors with n_features.
y : Numpy array (vector), shape: (n_samples, 1).
Target Values or Labels. It is a vector with n_samples elements.
lambda_ : Scalar, Real-Number.
Regularization Parameter.
alpha : Scalar, Real-Number.
It defines the step-size to be taken during gradient descent.
iter : Scalar, Positive Integer.
It defines the number of times to run gradient descent.
Return
------
self : object
"""
m, n = X.shape
X = np.concatenate((np.ones((m, 1)), X), axis=1)
theta = np.random.randn((n+1)*self.units + self.units*(self.units+1)*(self.hidden_layers-1) + 10*(self.units+1), 1) * 2
theta = np.sin(theta)
y_ = np.zeros((m, max(y)[0]+1))
for i in range(m):
y_[i, y[i][0]] = 1
y = y_
self.optimal_theta = gradient_desc(self._cost_function, X, y, theta, lambda_, iter, alpha, disp_curve)
return self
def predict(self, X):
m, n = X.shape
X = np.concatenate((np.ones((m, 1)), X), axis=1)
m, n = X.shape
y_predict = np.zeros((X.shape[0], 1))
theta = unravel(self.optimal_theta, self.units, self.hidden_layers, n-1)
for i in range(m):
a = np.array(X[i, :].T).reshape(n, 1)
for j in range(self.hidden_layers+1):
z = np.array(theta[j] @ a)
a = sigmoid(z)
a = np.concatenate((np.ones((1, 1)), a), axis=0)
a_L = a[1:]
y_predict[i] = list(a_L).index(max(a_L))
return y_predict
| 30.934783 | 127 | 0.539236 | 655 | 4,269 | 3.383206 | 0.19542 | 0.044224 | 0.064982 | 0.029783 | 0.3037 | 0.255415 | 0.244134 | 0.218863 | 0.189982 | 0.159747 | 0 | 0.023529 | 0.303115 | 4,269 | 137 | 128 | 31.160584 | 0.721345 | 0.134692 | 0 | 0.357143 | 0 | 0 | 0.001687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.059524 | 0 | 0.202381 | 0.011905 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
705b8955240faf6cb0cd184dc93e8b934d2a6298 | 6,192 | py | Python | stats.py | glommer/ghstats | bd874a05fb23f75aed0f0675a9929b5afdfd3f52 | [
"Apache-2.0"
] | null | null | null | stats.py | glommer/ghstats | bd874a05fb23f75aed0f0675a9929b5afdfd3f52 | [
"Apache-2.0"
] | null | null | null | stats.py | glommer/ghstats | bd874a05fb23f75aed0f0675a9929b5afdfd3f52 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import requests
import sys
import datetime
from dateutil.parser import parse
import argparse
class User:
cache = {}
def __init__(self, userurl, token):
headers = {'Authorization': 'token {}'.format(token)}
if userurl in User.cache:
self.info = User.cache[userurl]
else:
self.info = requests.get(userurl, headers=headers).json()
def __str__(self):
try:
if not self.info['name']:
return self.info['login']
return self.info['name']
except:
return "None"
class ScyllaPR:
def __init__(self, json, token):
self.token = token
self.created_at = parse(json['created_at']).date()
self.url = json['html_url']
try:
self.closed_at = parse(json['closed_at']).date()
except TypeError:
self.closed_at = None
try:
self.merged_at = parse(json['merged_at']).date()
except TypeError:
self.merged_at = None
self.title = json['title']
self.user = User(json['user']['url'], self.token)
self.reviewers = [ User(x['url'], self.token) for x in json['requested_reviewers'] ]
def timeToClose(self):
return (self.closed_at - self.created_at).days
def openFor(self):
return (datetime.date.today() - self.created_at).days
def isOpen(self):
return not self.closed_at
def isAbandoned(self):
return self.closed_at and not self.merged_at
def isMerged(self):
return self.merged_at
def needsAttention(self, days=15):
return self.isOpen() and datetime.date.today() - datetime.timedelta(days=days) > self.created_at
def __str__(self):
s = ""
s += "\tAuthor : {}\n".format(self.user)
s += "\tTitle : {}\n".format(self.title)
s += "\tURL : {}\n".format(self.url)
if self.isOpen():
s += "\tCreated at : {} ({} days ago)\n".format(self.created_at, self.openFor())
else:
s += "\tCreated at : {} and Closed at {} ({} after days)\n".format(self.created_at, self.closed_at, self.timeToClose())
return s
def read_all(dummy):
return True
def getGithubData(url, token, add_criteria = read_all):
ret = []
headers = {'Authorization': 'token {}'.format(token)}
while True:
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
print("Can't contact github API", file=sys.stderr)
sys.exit(-1)
json = resp.json()
ret += [ ScyllaPR(x, token) for x in json if add_criteria(x) ]
if 'next' in resp.links:
url = resp.links['next']['url']
else:
return ret
def printHistogram(closedPR, action = "merge", actor=True):
bins = { 0 : 0,
1 : 0,
2 : 0,
3 : 0,
4 : 0,
5 : 0,
6 : 0,
7 : 0,
15 : 0,
21 : 0,
30 : 0,
60 : 0,
120 : 0
}
sorted_keys = sorted(bins.keys())
data = [ x.timeToClose() for x in closedPR ]
for x in data:
for k in sorted_keys:
if x <= k:
bins[k] += 1
break
print("\tAverage time to {}: {:d} days".format(action, int(sum(data) / len(data))))
print("\tPeak time to {}: {:d} days".format(action, int(max(data))))
print("\tHistogram of {} time: in days".format(action))
while bins[sorted_keys[-1]] == 0:
sorted_keys.pop()
for k in sorted_keys:
print("\t\t{:3d}: {}".format(k, bins[k] * '@'))
def printStats(days, openPR, abandonedPR = None, mergedPR = None):
if days:
period = "for the past {days} days".format(days)
else:
period = "For the entire life of the repository"
if mergedPR:
print("Merged Pull Requests {period}: {m}\n".format(period=period, m=len(mergedPR)))
printHistogram(mergedPR, "merge")
if abandonedPR:
print("\nAbandoned Pull Requests {period}: {m}\n".format(period=period, m=len(abandonedPR)))
printHistogram(abandonedPR, "abandon")
print("\nCurrently Open Pull Requests: {m}\n".format(m=len(openPR)))
attDay = 15
openPR.sort(key=lambda x: x.openFor(), reverse=True)
needsAttention = [ str(x) for x in openPR if x.needsAttention(attDay) ]
if len(needsAttention) > 0:
print("Pull Requests needing attention: (open for more than {} days):".format(attDay))
[ print(x) for x in needsAttention ]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Parse github statistics about our repos')
parser.add_argument('--repo', type=str, default='scylla', help='Repository')
parser.add_argument('--period', type=int, help='days to look back')
parser.add_argument('--token', type=str, required=True, help='github authentication token. Without it, this will be rate limited and fail anyway')
parser.add_argument('--open-only', action='store_true', help='Only look at open PRs')
args = parser.parse_args()
open_pr_url = 'https://api.github.com/repos/scylladb/{}/pulls?state=open?sort=created_at?direction=desc'
closed_pr_url = 'https://api.github.com/repos/scylladb/{}/pulls?state=closed?sort=closed_at?direction=desc'
openPR = getGithubData(open_pr_url.format(args.repo), args.token)
abandonedPR = []
mergedPR = []
if not args.open_only:
def shouldIncludePR(data):
days = args.period
if not days:
return True
return datetime.date.today() - datetime.timedelta(days=days) < parse(data['closed_at']).date()
closedPR = getGithubData(closed_pr_url.format(args.repo), args.token, shouldIncludePR)
for x in closedPR:
if x.isOpen():
raise Exception("Not expecting an open PR")
if x.isAbandoned():
abandonedPR.append(x)
elif x.isMerged():
mergedPR.append(x)
printStats(args.period, openPR, abandonedPR, mergedPR)
| 32.082902 | 150 | 0.580426 | 772 | 6,192 | 4.560881 | 0.260363 | 0.022721 | 0.011928 | 0.017609 | 0.193695 | 0.11758 | 0.103948 | 0.049418 | 0.049418 | 0.049418 | 0 | 0.010342 | 0.281654 | 6,192 | 192 | 151 | 32.25 | 0.78125 | 0.003391 | 0 | 0.114865 | 0 | 0.013514 | 0.17893 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101351 | false | 0 | 0.033784 | 0.047297 | 0.25 | 0.101351 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
705da61ca28beb4cbccf634052b516bed03caf59 | 654 | py | Python | setup.py | fksato/vmz_interface | 985e7129f4bf266a6226dbc2b7e108dafc8b917a | [
"Apache-2.0"
] | null | null | null | setup.py | fksato/vmz_interface | 985e7129f4bf266a6226dbc2b7e108dafc8b917a | [
"Apache-2.0"
] | null | null | null | setup.py | fksato/vmz_interface | 985e7129f4bf266a6226dbc2b7e108dafc8b917a | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
requirements = [
"tqdm",
]
setup(
name='vmz_interface',
version='0.1.0',
packages=find_packages(exclude=['tests']),
url='https://github.com/fksato/vmz_interface',
author='Fukushi Sato',
author_email='f.kazuo.sato@gmail.com',
description='Facebook VMZ interface',
install_requires=requirements,
classifiers=[
'Development Status :: Pre-Alpha',
'Intended Audience :: Developers',
'License :: Apache License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 26.16 | 51 | 0.640673 | 69 | 654 | 5.985507 | 0.724638 | 0.087167 | 0.121065 | 0.125908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013566 | 0.211009 | 654 | 24 | 52 | 27.25 | 0.786822 | 0 | 0 | 0 | 0 | 0 | 0.474006 | 0.033639 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.045455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
705de60f199bd5095fe33004172b3313546ce5d4 | 3,132 | py | Python | api/v2/views/parties.py | Davidodari/POLITICO-API | 479560f7accc3a6e46a8cec34c4f435ae9284138 | [
"MIT"
] | 1 | 2019-09-05T23:20:21.000Z | 2019-09-05T23:20:21.000Z | api/v2/views/parties.py | Davidodari/POLITICO-API | 479560f7accc3a6e46a8cec34c4f435ae9284138 | [
"MIT"
] | 4 | 2019-02-12T10:06:12.000Z | 2019-02-20T05:00:40.000Z | api/v2/views/parties.py | Davidodari/POLITICO-API | 479560f7accc3a6e46a8cec34c4f435ae9284138 | [
"MIT"
] | 4 | 2019-02-08T23:54:24.000Z | 2019-02-19T16:26:59.000Z | from flask import Blueprint, request, jsonify, make_response
from api.v2.models.parties import PartiesModelDb
from flask_jwt_extended import jwt_required
from . import check_user, id_conversion, resource_handler
parties_api_v2 = Blueprint('parties_v2', __name__, url_prefix="/api/v2")
@parties_api_v2.route("/parties", methods=['POST'])
@jwt_required
def api_create_parties():
party = request.get_json(force=True)
return resource_handler('party', party)
@parties_api_v2.route("/parties", methods=['GET'])
@jwt_required
def api_get_parties():
parties = PartiesModelDb().get_resource('party')
return make_response(jsonify({"status": 200, "data": parties}), 200)
@parties_api_v2.route("/parties/<party_id>/name", methods=['PATCH'])
@jwt_required
def api_edit_party(party_id):
if 'Requires Admin Privilege' not in check_user():
pid = id_conversion(party_id)
updated_party_data = request.get_json(force=True)
if {'name'} <= set(updated_party_data):
model_result = PartiesModelDb().edit_resource('party', updated_party_data['name'], pid)
if 'Invalid Id' in model_result or 'Invalid Data' in model_result:
return make_response(jsonify({"status": 400, "error": "Invalid Data ,Check id or data being updated"}),
400)
elif 'Party Exists' in model_result:
return make_response(jsonify({"status": 409, "error": "Party with similar name exists"}), 409)
return make_response(
jsonify({"status": 200, "message": "{} Updated Successfully".format(model_result[0][1])}),
200)
return make_response(jsonify({"status": 400, "error": "Please Check All Input Fields Are Filled"}), 400)
return make_response(jsonify({"status": 401, "error": "Unauthorized Access,Requires Admin Rights"}), 401)
@parties_api_v2.route("/parties/<party_id>", methods=['GET'])
@jwt_required
def api_specific_party_get(party_id):
oid = id_conversion(party_id)
party = PartiesModelDb().get_specific_resource('party', oid)
if isinstance(party, list) and len(party) >= 1:
print(party)
response_body = {
"id": party[0][0],
"name": party[0][1],
"hqAddress": party[0][2],
"logoUrl": party[0][3]
}
return make_response(jsonify({"status": 200, "data": [response_body]}), 200)
return make_response(jsonify({"status": 404, "error": "Party Not Found"}), 404)
@parties_api_v2.route("/parties/<party_id>", methods=['DELETE'])
@jwt_required
def api_specific_party_delete(party_id):
if 'Requires Admin Privilege' not in check_user():
oid = id_conversion(party_id)
party = PartiesModelDb().delete_resource('party', oid)
if isinstance(party, list):
return make_response(jsonify({"status": 200, "message": "{} Deleted".format(party[0][0])}), 200)
return make_response(jsonify({"status": 404, "error": "Party Not Found"}), 404)
return make_response(jsonify({"status": 401, "error": "Unauthorized Access,Requires Admin Rights"}), 401)
| 44.742857 | 119 | 0.664112 | 395 | 3,132 | 5.043038 | 0.235443 | 0.072289 | 0.099398 | 0.138052 | 0.559237 | 0.536145 | 0.461847 | 0.26506 | 0.182731 | 0.182731 | 0 | 0.034783 | 0.192209 | 3,132 | 69 | 120 | 45.391304 | 0.752569 | 0 | 0 | 0.224138 | 0 | 0 | 0.202746 | 0.007663 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0.068966 | 0 | 0.362069 | 0.051724 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
705e3b446233d2f6c1287b4611d5abe26c570d0a | 1,307 | py | Python | demo.py | Li-Pro/Clipboard | 329be81902d6fc5e29e60c85d660a86055e5c3ac | [
"MIT"
] | 2 | 2020-05-20T06:34:33.000Z | 2020-05-30T02:32:37.000Z | demo.py | Li-Pro/Clipboarder | 329be81902d6fc5e29e60c85d660a86055e5c3ac | [
"MIT"
] | null | null | null | demo.py | Li-Pro/Clipboarder | 329be81902d6fc5e29e60c85d660a86055e5c3ac | [
"MIT"
] | null | null | null | from pathlib import Path
import re
import time
from capture import getClipboardImage, NoImageData
PATHNAME_REGEX = re.compile(r'(\w+\.*)*\w')
def regexMatch(regex, s):
return regex.fullmatch(s) != None
def imgCompare(img1, img2):
if (not img1) or (not img2):
return False
h, w = img1.size
if not img2.size == (h, w):
return False
return img1.tobytes() == img2.tobytes()
def main():
iwdir = input('Working directory: ')
if not regexMatch(PATHNAME_REGEX, iwdir):
raise Exception('Invalid directory name.')
wdir = Path('saves') / iwdir
wdir.mkdir(parents=True, exist_ok=True)
## Init workspace
wdinfo = Path(wdir) / '.wdinfo'
if wdinfo.is_file():
with wdinfo.open() as file:
try:
wdcnt = int(file.readline())
except:
raise Exception('Error while loading workspace.')
else:
wdcnt = 0
## Wait for input
lstimg = None
while True:
try:
img = getClipboardImage()
except NoImageData:
time.sleep(.1)
continue
except (KeyboardInterrupt, SystemExit):
break
else:
if imgCompare(img, lstimg):
time.sleep(.2)
continue
img_path = wdir / '{}.png'.format(wdcnt)
img.save(img_path.resolve())
wdcnt += 1
lstimg = img
finally:
with wdinfo.open('w') as file:
file.write('{}\n'.format(wdcnt))
main() | 18.671429 | 54 | 0.651875 | 173 | 1,307 | 4.890173 | 0.473988 | 0.017731 | 0.033097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011639 | 0.211171 | 1,307 | 70 | 55 | 18.671429 | 0.808923 | 0.022188 | 0 | 0.16 | 0 | 0 | 0.083203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.08 | 0.02 | 0.22 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
706002b48666a37499861a75991efe6d866c08af | 1,288 | py | Python | document/code/exo_demo_1/results.py | lebrice/george | 14c4c89906c770528dad2b80973aab0320141fe5 | [
"MIT"
] | 9 | 2018-01-20T16:51:30.000Z | 2020-12-06T22:13:44.000Z | document/code/exo_demo_1/results.py | lebrice/george | 14c4c89906c770528dad2b80973aab0320141fe5 | [
"MIT"
] | null | null | null | document/code/exo_demo_1/results.py | lebrice/george | 14c4c89906c770528dad2b80973aab0320141fe5 | [
"MIT"
] | 4 | 2017-08-31T21:59:56.000Z | 2022-03-03T20:01:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["results"]
import os
import triangle
import numpy as np
import cPickle as pickle
import matplotlib.pyplot as pl
def results(fn):
model, sampler = pickle.load(open(fn, "rb"))
mu = np.median(model.f)
ppm = lambda f: (f / mu - 1) * 1e6
# Plot the data.
fig = pl.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
ax.plot(model.t, ppm(model.f), ".k")
ax.set_xlim(np.min(model.t), np.max(model.t))
ax.set_xlabel("time since transit [days]")
ax.set_ylabel("relative flux [ppm]")
fig.subplots_adjust(left=0.2, bottom=0.2, top=0.9, right=0.9)
# Plot the predictions.
samples = sampler.flatchain
t = np.linspace(model.t.min(), model.t.max(), 1000)
for i in np.random.randint(len(samples), size=10):
model.vector = samples[i]
ax.plot(t, ppm(model.predict(t)), color="#4682b4", alpha=0.5)
fig.savefig(os.path.splitext(fn)[0] + "-results.pdf")
# Plot the corner plot.
fig = triangle.corner(samples, labels=model.labels,
truths=model.true_vector)
fig.savefig(os.path.splitext(fn)[0] + "-triangle.png")
if __name__ == "__main__":
import sys
results(sys.argv[1])
| 27.404255 | 69 | 0.632764 | 199 | 1,288 | 3.9799 | 0.532663 | 0.037879 | 0.022727 | 0.040404 | 0.068182 | 0.068182 | 0.068182 | 0 | 0 | 0 | 0 | 0.032195 | 0.204193 | 1,288 | 46 | 70 | 28 | 0.740488 | 0.078416 | 0 | 0 | 0 | 0 | 0.080372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.233333 | 0 | 0.266667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
706047b9ae6c7b0c95aae34cd316a11a431e74e3 | 2,681 | py | Python | vae/model_torch.py | tpvt99/sbcs5478 | 56e4a70462691a40252d7f51da7c45d44d9ca822 | [
"MIT"
] | null | null | null | vae/model_torch.py | tpvt99/sbcs5478 | 56e4a70462691a40252d7f51da7c45d44d9ca822 | [
"MIT"
] | null | null | null | vae/model_torch.py | tpvt99/sbcs5478 | 56e4a70462691a40252d7f51da7c45d44d9ca822 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
class Encoder(nn.Module):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self, latent_dim, input_shape, name="encoder", **kwargs):
super(Encoder, self).__init__()
self.dense_proj = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)),
nn.ReLU(),
nn.Flatten(),
nn.Linear(in_features=10240, out_features=latent_dim),
nn.ReLU(),
)
def forward(self, x):
return self.dense_proj(x)
class Decoder(nn.Module):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self, output_shape, latent_dim, name="decoder", **kwargs):
super(Decoder, self).__init__()
self.dense_proj = nn.Sequential(
nn.Linear(in_features=latent_dim, out_features=8*10*128), # Must adjust number here
nn.ReLU()
)
self.conv_proj = nn.Sequential(
nn.ConvTranspose2d(in_channels=128, out_channels=128, kernel_size=(3,3), stride=(2,2), padding=(1,1), output_padding=(1,1)),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), output_padding=(1,1)),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=(3,3), stride=(2,2), padding=(1,1), output_padding=(1,1)),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=32, out_channels=3, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
nn.Sigmoid()
)
def forward(self, inputs):
x = self.dense_proj(inputs)
x = x.view(-1, 128, 8, 10)
x = self.conv_proj(x)
return x
class AutoEncoder(nn.Module):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(
self,
input_shape=(64,80,3),
latent_dim=16,
name="autoencoder",
**kwargs
):
super(AutoEncoder, self).__init__()
self.encoder = Encoder(latent_dim=latent_dim, input_shape=input_shape)
self.decoder = Decoder(output_shape=input_shape, latent_dim=latent_dim)
def forward(self, inputs):
z = self.encoder(inputs)
reconstructed = self.decoder(z)
return reconstructed
| 35.746667 | 138 | 0.603133 | 370 | 2,681 | 4.162162 | 0.232432 | 0.014286 | 0.058442 | 0.054545 | 0.385714 | 0.361688 | 0.361688 | 0.34026 | 0.294805 | 0.294805 | 0 | 0.054699 | 0.249907 | 2,681 | 74 | 139 | 36.22973 | 0.711089 | 0.08094 | 0 | 0.196429 | 0 | 0 | 0.010221 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.053571 | 0.017857 | 0.267857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70634f2f852c70987a8353d2bd23f6aa46115566 | 2,277 | py | Python | pymetamap/MetaMapLite.py | liquet-ai/pymetamap | 05db8f9625cb1f3b8e4b5e2133624ba799b86a1b | [
"Apache-2.0"
] | 151 | 2015-01-26T21:11:07.000Z | 2022-03-02T08:16:24.000Z | pymetamap/MetaMapLite.py | liquet-ai/pymetamap | 05db8f9625cb1f3b8e4b5e2133624ba799b86a1b | [
"Apache-2.0"
] | 47 | 2016-03-05T11:45:54.000Z | 2022-01-25T17:48:14.000Z | pymetamap/MetaMapLite.py | liquet-ai/pymetamap | 05db8f9625cb1f3b8e4b5e2133624ba799b86a1b | [
"Apache-2.0"
] | 69 | 2015-07-09T02:56:16.000Z | 2021-12-04T17:49:20.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from os import access, X_OK
from os.path import isabs, isdir
class MetaMapLite:
""" Abstract base class for extracting concepts from text using
MetaMapLite. To use this you will need to have downloaded the
recent MetaMapLite software from NLM. metamap_home should point
to the public_mm_lite folder which contains metamaplite.sh.
Subclasses need to override the extract_concepts method.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, metamap_home):
self.metamap_home = str(metamap_home)
assert isdir(self.metamap_home), "metamap_home: {0} :: Please provide public_mm_lite directory path" \
" which contains metamaplite.sh".format(self.metamap_home)
assert access(self.metamap_home, X_OK), "User doesn't have executable access to metamap_home: {}".format(self.metamap_home)
assert isabs(self.metamap_home), "metamap_home: {0} should be an absolute path".format(self.metamap_home)
@abc.abstractmethod
def extract_concepts(self, sentences=None, ids=None, filename=None):
""" Extract concepts from a list of sentences using MetaMapLite. """
return
@staticmethod
def get_instance(metamap_home, backend='subprocess', **extra_args):
extra_args.update(metamap_home=metamap_home)
assert isabs(metamap_home), "metamap_home: {0} should be an absolute path".format(metamap_home)
if backend == 'subprocess':
from .SubprocessBackendLite import SubprocessBackendLite
return SubprocessBackendLite(**extra_args)
raise ValueError("Unknown backend: %r (known backends: "
"'subprocess')" % backend)
| 44.647059 | 131 | 0.709267 | 297 | 2,277 | 5.306397 | 0.464646 | 0.132614 | 0.076142 | 0.055838 | 0.118655 | 0.084391 | 0.064721 | 0.064721 | 0.064721 | 0.064721 | 0 | 0.003911 | 0.213878 | 2,277 | 50 | 132 | 45.54 | 0.876536 | 0.38823 | 0 | 0 | 0 | 0 | 0.231058 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 1 | 0.130435 | false | 0 | 0.173913 | 0 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7063fade463f2ff5785707f51f17ad043d668b7e | 1,779 | py | Python | examples/chi2_fit.py | HDembinski/pyik | fc9e87dec88484458479225c1ec2357ff48b5bb7 | [
"BSD-3-Clause"
] | 11 | 2018-04-16T08:07:11.000Z | 2022-02-14T15:19:22.000Z | examples/chi2_fit.py | HDembinski/pyik | fc9e87dec88484458479225c1ec2357ff48b5bb7 | [
"BSD-3-Clause"
] | 10 | 2018-04-09T13:57:16.000Z | 2019-04-28T11:28:55.000Z | examples/chi2_fit.py | HDembinski/pyik | fc9e87dec88484458479225c1ec2357ff48b5bb7 | [
"BSD-3-Clause"
] | 4 | 2017-09-22T10:24:57.000Z | 2020-11-24T10:24:30.000Z | # -*- coding: utf-8 -*-
"""
This example demonstrates fitting a model to a (simulated) dataset using
numpyext.chi2_fit, which wraps Minuit.
"""
import numpy as np
from matplotlib import pyplot
from pyik.fit import ChiSquareFunction
from pyik.mplext import cornertext
np.random.seed(1)
def model(x, pars):
"""A slightly complex function. Needs to be vectorized."""
a0, a1, x_break = pars # unpack parameter vector
x = np.atleast_1d(x) # x needs to be a numpy array
y = np.empty_like(x)
mask = x <= x_break
y[mask] = a0 * x[mask]
y[~mask] = a0 * x[~mask] + a1 * (x[~mask] - x_break)**2
return y
# Simulate a dataset of n measurements
n = 20
parsTrue = (2.0, 0.5, 13.0)
xs = np.linspace(0, 20, n)
ys = model(xs, parsTrue)
# Add some noise to the points
eys = 1.5 * np.ones(n)
ys += np.random.randn(n) * eys
# Perform a fit to the data points; reuse the errors used to generate the noise
# Note: fits to data without xerrors are much faster
starts = (1.0, 1.0, 10.0) # starting values
# define bounds for parameter "x_break"
lower_bounds = (-np.inf, -np.inf, 0.0)
upper_bounds = (np.inf, np.inf, 20.0)
pars, cov, chi2, ndof = \
ChiSquareFunction(model, xs, ys, eys) \
.Minimize(starts,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds)
# Generate a plot of the fit
new_xs = np.linspace(0., 20, 1000)
figure = pyplot.figure()
pyplot.plot(new_xs, model(new_xs, pars), 'b')
pyplot.errorbar(xs, ys, eys, fmt='ok')
pyplot.xlim(-1, 21)
pyplot.xlabel("x")
pyplot.ylabel("y")
s = "Fit Example:\n"
for i, label in enumerate(("a_0", "a_1", "x_{brk}")):
s += "$%s = %.2f \pm %.2f$ (True: $%.1f$)\n" % (label, pars[i], cov[i, i]**0.5, parsTrue[i])
s += "$\\chi^2 / n_{dof} = %.3f$" % (chi2 / ndof)
cornertext(s)
pyplot.show()
| 25.782609 | 94 | 0.649241 | 305 | 1,779 | 3.721311 | 0.422951 | 0.021145 | 0.015859 | 0.014097 | 0.075771 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039392 | 0.186622 | 1,779 | 68 | 95 | 26.161765 | 0.74499 | 0.289488 | 0 | 0 | 0 | 0 | 0.07649 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.1 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70649b12e8177f792e96cdeb6bd72018b6de7576 | 5,406 | py | Python | Python/esys/lsm/doc/Overview.py | danielfrascarelli/esys-particle | e56638000fd9c4af77e21c75aa35a4f8922fd9f0 | [
"Apache-2.0"
] | null | null | null | Python/esys/lsm/doc/Overview.py | danielfrascarelli/esys-particle | e56638000fd9c4af77e21c75aa35a4f8922fd9f0 | [
"Apache-2.0"
] | null | null | null | Python/esys/lsm/doc/Overview.py | danielfrascarelli/esys-particle | e56638000fd9c4af77e21c75aa35a4f8922fd9f0 | [
"Apache-2.0"
] | null | null | null | #############################################################
## ##
## Copyright (c) 2003-2017 by The University of Queensland ##
## Centre for Geoscience Computing ##
## http://earth.uq.edu.au/centre-geoscience-computing ##
## ##
## Primary Business: Brisbane, Queensland, Australia ##
## Licensed under the Open Software License version 3.0 ##
## http://www.apache.org/licenses/LICENSE-2.0 ##
## ##
#############################################################
__docformat__ = "restructuredtext en"
import esys.lsm.doc.Util
import esys.lsm.util.InstallInfo
__esysParticleOverviewSection = \
"""
{pkgName:s} Overview
==================================
The Lattice Solid Model (LSM) [MoraPAGEOPH1994]_, [PlaceJCompPhys1999]_ is a particle-based
model similar to the Distinct Element Model [CundallGeotech1979]_.
The model consists of particles which are characterized by
their shape, mass, position, orientation and velocity. The particles
interact with their nearest neighbours by imparting contact forces.
Typically, Discrete Element Model (DEM) particles are spherical and the
contact forces consist of a linear elastic normal component and linear
elastic tangential component. {pkgName:s} is a parallel implementation
of the LSM with a Python_ scripting interface.
.. _Python: http://www.python.org
Particle Types
--------------
Currently, there exist three types of {pkgName:s} spherical particles:
*non-rotational*, *rotational*, *thermal-rotational*:
Non-rotational Spheres
Non-rotational spherical particles possess no rotational
degrees of freedom. Objects of class `esys.lsm.NRotSphere`
represent non-rotational spherical particles.
Rotational Spheres
Rotational spherical particles possess orientation information.
Particles of this type change orientation according to the
applied moments. Objects of class `esys.lsm.RotSphere`
represent rotational spherical particles.
Thermal Rotational Spheres
Thermal rotational spherical particles are the same as "Rotational Spheres"
with the addition of thermal properties (temperature and thermal expansion).
Objects of class `esys.lsm.RotThermalSphere`
represent thermal rotational spherical particles.
Inter-particle Interactions
---------------------------
Interactions between model particles are also classified as
*non-rotational* and *rotational*. Two spherical particles
involved in a non-rotational interaction have
all forces applied at the centre of mass.
Two spherical particles involved in a
rotational interaction experience moments
due to forces which are, typically, applied
at a *contact point*. The inter-particle interactions
include:
Non-rotational Elastic
Purely linear elastic repulsion of particles when in contact.
Non-rotational Bonded
Linear elastic attraction and repulsion while bond remains intact.
Bond *breaks* when a threshold separation distance is reached.
Non-rotational Friction
Linear elastic repulsion, linear elastic shear rigidity and Coulomb
dynamic friction law.
Rotational Elastic
Linear elastic repulsion as well as linear elastic shear rigidity.
Rotational Bonded
Linear elastic tension, compression, shear, torsion and bending
forces while bond remains intact. Bond *breaks* if a threshold
force limit is reached.
Rotational Friction
Linear elastic repulsion, linear elastic shear rigidity and Coulomb
dynamic friction law.
Thermal Non-rotational Elastic
Linear elastic repulsion as well as heat transfer.
Thermal Rotational Bonded
Same as "Rotational Bonded" with addition of heat transfer.
Thermal Rotational Friction
Same as "Rotational Friction" with addition of heat transfer and
heat generation during frictional slip.
Fixed objects
-------------
Particles not only interact with other particles, but also with
*fixed* objects within the model. These fixed objects are not
subject to the laws of motion and provide a means of imposing
particular types of boundary conditions. Fixed objects include:
Walls
An infinite plane characterized by position and normal direction.
Linear Mesh
A piecewise linear mesh which can be used to represent a surface in 2D.
Triangular mesh
A triangular mesh which can be used to represent a surface in 3D.
"""
__citSection = \
"""
References
==========
.. [CundallGeotech1979] P.A. Cundall and O.A.D Strack
(1979)
"A Discrete Numerical Model for Granular Assemblies",
*Ge\'otechnique*,
**vol. 29**,
pp. 47-65.
.. [MoraPAGEOPH1994] P. Mora and D. Place
(1994)
"Simulation of the Stick-Slip Instability",
*Pure Appl. Geophys.*,
**vol. 143**,
pp. 61-87.
.. [PlaceJCompPhys1999] D. Place and P. Mora
(1999)
"The Lattice Solid Model to Simulate the Physics of Rocks and Earthquakes:
Incorporation of Friction",
*J. Comp. Physics*,
**vol. 150**,
pp. 332-372.
"""
__doc__ = \
esys.lsm.doc.Util.setSectionDoc("ESySParticleOverviewSection",__esysParticleOverviewSection) \
+ esys.lsm.doc.Util.setSectionDoc("CitationSection",__citSection) \
+ ("\n:summary: {0:s} overview.\n".format(esys.lsm.util.InstallInfo.pkgName))
| 34.653846 | 100 | 0.699593 | 639 | 5,406 | 5.885759 | 0.397496 | 0.041478 | 0.044669 | 0.011167 | 0.174688 | 0.129753 | 0.095719 | 0.095719 | 0.070726 | 0.070726 | 0 | 0.01682 | 0.197188 | 5,406 | 155 | 101 | 34.877419 | 0.84977 | 0.071032 | 0 | 0 | 0 | 0 | 0.208817 | 0.062645 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7066c0de46a9cb6cbb4bbc49dad20327a5f51884 | 4,483 | py | Python | tour5_damage_bond/damage2d_explorer.py | bmcs-group/bmcs_tutorial | 4e008e72839fad8820a6b663a20d3f188610525d | [
"MIT"
] | null | null | null | tour5_damage_bond/damage2d_explorer.py | bmcs-group/bmcs_tutorial | 4e008e72839fad8820a6b663a20d3f188610525d | [
"MIT"
] | null | null | null | tour5_damage_bond/damage2d_explorer.py | bmcs-group/bmcs_tutorial | 4e008e72839fad8820a6b663a20d3f188610525d | [
"MIT"
] | null | null | null |
import numpy as np
import sympy as sp
import bmcs_utils.api as bu
from bmcs_cross_section.pullout import MATS1D5BondSlipD
s_x, s_y = sp.symbols('s_x, s_y')
kappa_ = sp.sqrt( s_x**2 + s_y**2 )
get_kappa = sp.lambdify( (s_x, s_y), kappa_, 'numpy' )
def get_tau_s(s_x_n1, s_y_n1, Eps_n, bs, **kw):
'''Get the stress for the slip in x, y dirctions given the state kappa_n'''
_, _, kappa_n = Eps_n
kappa = get_kappa(s_x_n1, s_y_n1)
# adapt the shape of the state array
kappa_n_ = np.broadcast_to(kappa_n, kappa.shape)
kappa_n1 = np.max(np.array([kappa_n_, kappa], dtype=np.float_),axis=0)
E_b = bs.E_b
omega_n1 = bs.omega_fn_(kappa_n1)
tau_x_n1 = (1 - omega_n1) * E_b * s_x_n1
tau_y_n1 = (1 - omega_n1) * E_b * s_y_n1
return (
np.array([s_x_n1, s_y_n1, kappa_n1]),
np.array([tau_x_n1, tau_y_n1, omega_n1])
)
def plot_tau_s(ax, Eps_n, s_min, s_max, n_s, bs, **kw):
n_s_i = complex(0,n_s)
s_x_n1, s_y_n1 = np.mgrid[s_min:s_max:n_s_i, s_min:s_max:n_s_i]
Eps_n1, Sig_n1 = get_tau_s(s_x_n1, s_y_n1, Eps_n, bs, **kw)
s_x_n1, s_y_n1, _ = Eps_n1
tau_x_n1, tau_y_n1, _ = Sig_n1
tau_n1 = np.sqrt(tau_x_n1**2 + tau_y_n1**2)
ax.plot_surface(s_x_n1, s_y_n1, tau_n1, alpha=0.2)
phi=np.linspace(0,2*np.pi,100)
_, _, kappa_n = Eps_n
kappa_0 = bs.omega_fn_.kappa_0
E_b = bs.E_b
r = max(kappa_0, kappa_n)
omega_n = bs.omega_fn_(r)
f_t = (1-omega_n)*E_b*r
s0_x, s0_y = r*np.sin(phi), r*np.cos(phi)
ax.plot(s0_x, s0_y, 0, color='gray')
ax.plot(s0_x, s0_y, f_t, color='gray')
ax.set_xlabel(r'$s_x$ [mm]');ax.set_ylabel(r'$s_y$ [mm]');
ax.set_zlabel(r'$\| \tau \| = \sqrt{\tau_x^2 + \tau_y^2}$ [MPa]');
class Explore(bu.Model):
name = 'Damage model explorer'
bs = bu.Instance(MATS1D5BondSlipD, ())
tree = ['bs']
def __init__(self, *args, **kw):
super(Explore, self).__init__(*args, **kw)
self.reset_i()
def reset_i(self):
self.s_x_0, self.s_y_0 = 0, 0
self.t0 = 0
self.Sig_record = []
self.Eps_record = []
iter_record = []
self.t_arr = []
self.s_x_t, self.s_y_t = [], []
self.Eps_n1 = np.zeros((3,), dtype=np.float_)
def get_response_i(self):
n_steps = self.n_steps
t1 = self.t0 + n_steps + 1
ti_arr = np.linspace(self.t0, t1, n_steps + 1)
si_x_t = np.linspace(self.s_x_0, self.s_x_1, n_steps + 1)
si_y_t = np.linspace(self.s_y_0, self.s_y_1, n_steps + 1)
for s_x_n1, s_y_n1 in zip(si_x_t, si_y_t):
self.Eps_n1, self.Sig_n1 = get_tau_s(s_x_n1, s_y_n1, self.Eps_n1, self.bs)
self.Sig_record.append(self.Sig_n1)
self.Eps_record.append(self.Eps_n1)
self.t_arr = np.hstack([self.t_arr, ti_arr])
self.s_x_t = np.hstack([self.s_x_t, si_x_t])
self.s_y_t = np.hstack([self.s_y_t, si_y_t])
self.t0 = t1
self.s_x_0, self.s_y_0 = self.s_x_1, self.s_y_1
return
def plot_Sig_Eps(self, ax1, Sig_arr):
tau_x, tau_y, kappa = Sig_arr.T
tau = np.sqrt(tau_x ** 2 + tau_y ** 2)
ax1.plot3D(self.s_x_t, self.s_y_t, tau, color='orange', lw=3)
def subplots(self, fig):
ax_sxy = fig.add_subplot(1, 1, 1, projection='3d')
return ax_sxy
def update_plot(self, ax):
self.get_response_i()
Sig_arr = np.array(self.Sig_record, dtype=np.float_)
Eps_arr = np.array(self.Eps_record, dtype=np.float_)
plot_tau_s(ax, Eps_arr[-1, ...],
self.s_min, self.s_max, 500, self.bs)
self.plot_Sig_Eps(ax, Sig_arr)
ax.plot(self.s_x_t, self.s_y_t, 0, color='red')
n_s = bu.Int(500, BC=True)
s_x_1 = bu.Float(0, BC=True)
s_y_1 = bu.Float(0, BC=True)
n_steps = bu.Float(20, BC=True)
s_min = bu.Float(-0.1, BC=True)
s_max = bu.Float(0.1, BC=True)
def run(self, update_progress=lambda t: t):
try:
self.get_response_i(update_progress)
except ValueError:
print('No convergence reached')
return
t = bu.Float(0)
t_max = bu.Float(1)
def reset(self):
self.reset_i()
ipw_view = bu.View(
bu.Item('s_max'),
bu.Item('n_s'),
bu.Item('s_x_1', editor=bu.FloatRangeEditor(low_name='s_min',high_name='s_max')),
bu.Item('s_y_1', editor=bu.FloatRangeEditor(low_name='s_min',high_name='s_max')),
bu.Item('n_steps'),
)
| 33.706767 | 89 | 0.602052 | 871 | 4,483 | 2.746269 | 0.166475 | 0.022575 | 0.016722 | 0.018813 | 0.30811 | 0.228679 | 0.144231 | 0.104097 | 0.074833 | 0.074833 | 0 | 0.040261 | 0.246487 | 4,483 | 132 | 90 | 33.962121 | 0.667851 | 0.023422 | 0 | 0.072727 | 0 | 0 | 0.043249 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.036364 | 0 | 0.281818 | 0.009091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
706745eb0046576ce77b57a2d81861de6f37d9a2 | 2,042 | py | Python | plugins/hanime.py | streamlink-plugins/streamlink-plugins | 64a3dd5d98a94cf5e61f0faa22821f58c6be9258 | [
"Unlicense"
] | 1 | 2021-08-17T07:26:59.000Z | 2021-08-17T07:26:59.000Z | plugins/hanime.py | streamlink-plugins/streamlink-plugins | 64a3dd5d98a94cf5e61f0faa22821f58c6be9258 | [
"Unlicense"
] | 1 | 2021-07-20T05:56:21.000Z | 2021-08-05T00:07:45.000Z | plugins/hanime.py | streamlink-plugins/streamlink-plugins | 64a3dd5d98a94cf5e61f0faa22821f58c6be9258 | [
"Unlicense"
] | null | null | null | import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
_post_schema = validate.Schema({
'hentai_video': validate.Schema({
'name': validate.text,
'is_visible': bool
}),
'videos_manifest': validate.Schema({
'servers': validate.Schema([{
'streams': validate.Schema([{
'height': validate.text,
'url': validate.text,
'id': int
}])
}])
}),
})
@pluginmatcher(re.compile(
r"https?://hanime\.tv/videos/hentai/(?P<videoid>[a-zA-Z0-9_-]+)"
))
class hanimetv(Plugin):
def get_title(self):
return self.title
def get_author(self):
return "hanime"
def get_category(self):
return "VOD"
def _get_streams(self):
videoid = self.match.group("videoid")
api_call = "https://hw.hanime.tv/api/v8/video?id={0}".format(videoid)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0",
"Referer": self.url,
}
res = self.session.http.get(api_call, headers=headers)
data = self.session.http.json(res, schema=_post_schema)
if not data:
self.logger.info("Not a valid url.")
return
self.title = data["hentai_video"]["name"]
self.logger.info("Video Name: {0}".format(self.title))
for stream in data["videos_manifest"]["servers"][0]["streams"]:
if (stream["url"]):
q = "{0}p".format(stream["height"])
s = HLSStream(self.session, stream["url"])
yield q, s
else:
q = "{0}p".format(stream["height"])
u = "https://weeb.hanime.tv/weeb-api-cache/api/v8/m3u8s/{0}.m3u8".format(stream["id"])
s = HLSStream(self.session, u)
yield q, s
__plugin__ = hanimetv
| 29.171429 | 102 | 0.539667 | 235 | 2,042 | 4.6 | 0.382979 | 0.064755 | 0.037003 | 0.016651 | 0.038853 | 0.038853 | 0 | 0 | 0 | 0 | 0 | 0.023438 | 0.31048 | 2,042 | 69 | 103 | 29.594203 | 0.744318 | 0 | 0 | 0.148148 | 0 | 0.055556 | 0.210088 | 0.029873 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0.055556 | 0.240741 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
706c68208b42d394b8a5aacf46f45e4eb99996af | 2,712 | py | Python | src/tests/test_pagure_flask_ui_app_index.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/tests/test_pagure_flask_ui_app_index.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/tests/test_pagure_flask_ui_app_index.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
(c) 2017 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import datetime
import unittest
import shutil
import sys
import os
import six
import json
import pygit2
from mock import patch, MagicMock
sys.path.insert(
0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
)
import pagure.lib.query
import tests
class PagureFlaskAppIndextests(tests.Modeltests):
""" Tests for the index page of flask app controller of pagure """
def test_index_logged_out(self):
""" Test the index endpoint when logged out. """
output = self.app.get("/")
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn("<title>Home - Pagure</title>", output_text)
self.assertIn(
'<h3 class="m-0 font-weight-bold">All Projects '
'<span class="badge badge-secondary">0</span></h3>',
output_text,
)
tests.create_projects(self.session)
output = self.app.get("/?page=abc")
self.assertEqual(output.status_code, 200)
self.assertIn(
'<h3 class="m-0 font-weight-bold">All Projects '
'<span class="badge badge-secondary">3</span></h3>',
output.get_data(as_text=True),
)
def test_index_logged_in(self):
"""
Test the index endpoint when logged in.
It should redirect to the userdash.
"""
tests.create_projects(self.session)
# Add a 3rd project with a long description
item = pagure.lib.model.Project(
user_id=2, # foo
name="test3",
description="test project #3 with a very long description",
hook_token="aaabbbeeefff",
)
self.session.add(item)
self.session.commit()
user = tests.FakeUser(username="foo")
with tests.user_set(self.app.application, user):
output = self.app.get("/", follow_redirects=True)
self.assertEqual(output.status_code, 200)
output_text = output.get_data(as_text=True)
self.assertIn(
'<span class="btn btn-outline-secondary disabled '
'opacity-100 border-0 ml-auto font-weight-bold">'
"1 Projects</span>\n",
output_text,
)
self.assertNotIn(
'<h3 class="m-0 font-weight-bold">All Projects '
'<span class="badge badge-secondary">3</span></h3>',
output_text,
)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 28.547368 | 71 | 0.598451 | 329 | 2,712 | 4.796353 | 0.410334 | 0.038023 | 0.035488 | 0.030418 | 0.363118 | 0.325095 | 0.292776 | 0.249683 | 0.249683 | 0.249683 | 0 | 0.019497 | 0.281342 | 2,712 | 94 | 72 | 28.851064 | 0.790149 | 0.122419 | 0 | 0.290323 | 0 | 0 | 0.220835 | 0.074903 | 0 | 0 | 0 | 0 | 0.129032 | 1 | 0.032258 | false | 0 | 0.193548 | 0 | 0.241935 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
706c9213e500aaaf1819246602b29a0148ed5c14 | 3,255 | py | Python | src/server/message-distributor/message-distributor.py | fredsongyu/mmitss-az | 62fb59a9e5a19f62a1096971f3cc0ecc04599106 | [
"Apache-2.0"
] | 10 | 2018-12-05T14:48:59.000Z | 2022-02-17T02:10:51.000Z | src/server/message-distributor/message-distributor.py | fredsongyu/mmitss-az | 62fb59a9e5a19f62a1096971f3cc0ecc04599106 | [
"Apache-2.0"
] | null | null | null | src/server/message-distributor/message-distributor.py | fredsongyu/mmitss-az | 62fb59a9e5a19f62a1096971f3cc0ecc04599106 | [
"Apache-2.0"
] | 8 | 2018-11-16T06:38:25.000Z | 2022-03-09T18:22:59.000Z | '''
***************************************************************************************
© 2019 Arizona Board of Regents on behalf of the University of Arizona with rights
granted for USDOT OSADP distribution with the Apache 2.0 open source license.
***************************************************************************************
M_MessageDistributor.py
Created by: Niraj Vasant Altekar
University of Arizona
College of Engineering
This code was developed under the supervision of Professor Larry Head
in the Systems and Industrial Engineering Department.
***************************************************************************************
'''
TESTING = False
import json
import socket
import sys
from MessageDistributor import MessageDistributor
import datetime
if TESTING:
configFile = open("../../../config/simulation-tools/nojournal/bin/mmitss-phase3-master-config.json", 'r')
else:
configFile = open("/nojournal/bin/mmitss-phase3-master-config.json", 'r')
masterConfig = json.load(configFile)
configFile.close()
if TESTING:
configFile = open("../../../config/simulation-tools/nojournal/bin/mmitss-message-distributor-config.json", 'r')
else:
configFile = open("/nojournal/bin/mmitss-message-distributor-config.json", 'r')
config = json.load(configFile)
configFile.close()
msgDist = MessageDistributor(config)
receivingSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if TESTING:
receivingSocket.bind(("127.0.0.1", masterConfig["PortNumber"]["MessageDistributor"]))
else:
receivingSocket.bind((masterConfig["MessageDistributorIP"], masterConfig["PortNumber"]["MessageDistributor"]))
rawBsmLogging = config["raw_bsm_logging"]
if rawBsmLogging == True:
logfile = open(("rawBsmLog_" + ('{:%m%d%Y_%H%M%S}'.format(datetime.datetime.now())) + ".csv"), 'w')
logfile.write("timestamp,secMark,temporaryId,latitude,longitude,elevation,speed,heading,type,length,width\n")
while True:
data, addr = receivingSocket.recvfrom(40960)
msg = json.loads(data.decode())
msg = msgDist.timestampMessage(msg)
messageType = msgDist.distributeMsgToInfrastructureAndGetType(msg)
if messageType == "BSM":
msgDist.distributeBsmToClients(msg)
if rawBsmLogging == True:
logfile.write(str(msg["Timestamp_posix"]) + "," +
str(msg["BasicVehicle"]["secMark_Second"]) + "," +
str(msg["BasicVehicle"]["temporaryID"]) + "," +
str(msg["BasicVehicle"]["position"]["latitude_DecimalDegree"]) + "," +
str(msg["BasicVehicle"]["position"]["longitude_DecimalDegree"]) + "," +
str(msg["BasicVehicle"]["position"]["elevation_Meter"]) + "," +
str(msg["BasicVehicle"]["speed_MeterPerSecond"]) + "," +
str(msg["BasicVehicle"]["heading_Degree"]) + "," +
str(msg["BasicVehicle"]["type"]) + "," +
str(msg["BasicVehicle"]["size"]["length_cm"]) + "," +
str(msg["BasicVehicle"]["size"]["width_cm"]) + "\n"
)
elif messageType == "MAP":
msgDist.distributeMapToClients(msg)
elif messageType == "SSM":
msgDist.distributeSsmToClients(msg)
if rawBsmLogging == True:
logfile.close()
receivingSocket.close() | 40.6875 | 115 | 0.623656 | 314 | 3,255 | 6.417197 | 0.442675 | 0.032754 | 0.08933 | 0.03871 | 0.249132 | 0.148883 | 0.148883 | 0.148883 | 0.108189 | 0.061538 | 0 | 0.006904 | 0.154531 | 3,255 | 80 | 116 | 40.6875 | 0.724927 | 0.208909 | 0 | 0.2 | 0 | 0.018182 | 0.318925 | 0.156153 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
706f77988f356002f78f82f354138db4b26d737c | 2,989 | py | Python | Section_16 _Orientacao_a_objeto/execicios/ex02.py | thiagofreitascarneiro/Python_OOP | 037621e334ec7159fe0da937db8418eba6321bdd | [
"MIT"
] | null | null | null | Section_16 _Orientacao_a_objeto/execicios/ex02.py | thiagofreitascarneiro/Python_OOP | 037621e334ec7159fe0da937db8418eba6321bdd | [
"MIT"
] | null | null | null | Section_16 _Orientacao_a_objeto/execicios/ex02.py | thiagofreitascarneiro/Python_OOP | 037621e334ec7159fe0da937db8418eba6321bdd | [
"MIT"
] | null | null | null | '''
Crie uma classe Agenda que pode armazenar 10 pessoas e seja capaz de realizar as
seguintes operações:
* void armazenaPessoa(String nome, int idade, float altura);
* void removePessoa(String nome);
* int buscaPessoa(String nome); // informa em que posição da agenda está a pessoa
* void imprimeAgenda(); // imprime os dados de todas as pessoas da agenda
* int buscaPessoa(String nome); // imprime os dados da pessoa que está na posição 'i' da agenda.
'''
class Pessoa:
def __init__(self, nome, idade, altura):
self.__nome = nome
self.__idade = idade
self.__altura = altura
class Agenda:
agenda = []
def armazena_pessoa(self, pessoa):
self.agenda.append(pessoa)
def imprime_agenda(self):
print('*** IMPRIMINDO OS DADOS DA AGENDA ***')
for i in self.agenda:
print(f'Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|')
print()
def imprime_pessoa(self, index):
for p, i in enumerate(self.agenda):
if p == index:
print('*** IMPRIMINDO OS DADOS DA PESSOA ***')
print(f'Dados da pessoa na posição {index}: Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|')
print()
def busca_pessoa(self, nome):
for p, i in enumerate(self.agenda):
if nome == i._Pessoa__nome:
print('*** INFORMANDO A POSIÇÃO DA AGENDA. ***')
print(f'A posição da Agenda que a/o {nome} se encontra é na {p} posição.')
elif nome != i._Pessoa__nome and p == len(self.agenda) - 1:
print(f'O {nome} não existe na Agenda.')
print()
def remover_pessoa(self, nome):
print('*** REMOVENDO A PESSOA DA AGENDA. ***')
for i, n in enumerate(self.agenda):
if nome == n._Pessoa__nome:
print(f'Removendo o contato {n._Pessoa__nome}')
del self.agenda[i]
for i in self.agenda:
print(f'Nome: {i._Pessoa__nome}| Idade: {i._Pessoa__idade} |altura: {i._Pessoa__altura}|')
print()
# instancia do objeto para Pessoa
user1 = Pessoa('Bob Jack', 32, 1.85)
user2 = Pessoa('Billy Joe', 39, 1.89)
user3 = Pessoa('Ayn Rand', 69, 1.67)
user4 = Pessoa('Thomas Sowell', 85, 1.99)
user5 = Pessoa('Hermione Granger', 29, 1.65)
# instância do objeto para Agenda
agenda = Agenda()
# Armazenando os dados
Agenda.armazena_pessoa(agenda, user1)
Agenda.armazena_pessoa(agenda, user2)
Agenda.armazena_pessoa(agenda, user3)
Agenda.armazena_pessoa(agenda, user4)
Agenda.armazena_pessoa(agenda, user5)
# testando os comandos
Agenda.imprime_agenda(agenda)
Agenda.remover_pessoa(agenda, 'Bob Jack') #Removendo o contato
Agenda.imprime_pessoa(agenda, 2) #Imprimindo Agenda pela posição.
Agenda.busca_pessoa(agenda, 'Hermione Granger') #nome que existe
Agenda.busca_pessoa(agenda, 'Babu Rangel') #nome não existe na lista
| 27.675926 | 142 | 0.641352 | 404 | 2,989 | 4.586634 | 0.272277 | 0.041554 | 0.029682 | 0.040475 | 0.190502 | 0.164598 | 0.147868 | 0.147868 | 0.117647 | 0.117647 | 0 | 0.017188 | 0.240883 | 2,989 | 107 | 143 | 27.934579 | 0.799471 | 0.222817 | 0 | 0.188679 | 0 | 0.056604 | 0.281727 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0 | 0 | 0.169811 | 0.264151 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
706f947075d48d6e639e35f385287b783e8a5945 | 3,148 | py | Python | get_feature.py | 1895-art/stock-price-predict | 951a632cd397e969229d793e0c23f0575d154240 | [
"MIT"
] | 95 | 2018-07-15T10:04:27.000Z | 2022-03-24T11:49:18.000Z | get_feature.py | 1895-art/stock-price-predict | 951a632cd397e969229d793e0c23f0575d154240 | [
"MIT"
] | 3 | 2019-01-18T08:09:57.000Z | 2020-01-07T13:19:32.000Z | get_feature.py | kaka-lin/stock-price-predict | 951a632cd397e969229d793e0c23f0575d154240 | [
"MIT"
] | 27 | 2018-08-07T05:17:05.000Z | 2021-06-20T01:53:38.000Z | import getopt, sys, os
import csv
import pandas as pd
import locale
from locale import atof
locale.setlocale(locale.LC_NUMERIC, '')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:v:f:", ["help", "output=", "filepath"])
except getopt.GetoptError as err:
usage()
sys.exit(2)
output = None
verbose = False
filepath = os.getcwd()
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
elif o in ("-f", "--filepath"):
filepath = a
else:
assert False, "unhandled option"
return filepath
def usage():
print ("=======================\n"\
"please input filepath\n"\
"ex: python get_feature.py -f ./data/20180427 \n"\
"=======================")
def get_feature_data(filepath, encode=None, **argv):
"""
input:
filepath
encode
argv:
Code,Date,CB,Open,High,Low,Close,Volumn
True or False
"""
params = []
for param in argv:
params = [i for i, t in argv.items() if t == True]
# abs filepath
filepath = os.path.abspath(filepath)
get_date = os.path.basename(filepath)
tetfp_file = os.path.join(filepath, "tetfp.csv")
save_process_path = os.path.join(os.path.abspath("./data/" + get_date + "_process"))
with open(tetfp_file, encoding=encode) as file:
rows = csv.reader(file, delimiter=",")
data = []
for row in rows:
new_index = []
for index in row:
if index:
index = index.strip()
new_index.append(index)
data.append(new_index)
df = pd.DataFrame(data=data[1:], columns=change_columns(*data[0]))
df = df.dropna()
df["Volumn"] = pd.to_numeric(df["Volumn"].replace('\.','', regex=True)
.replace(',','', regex=True)
.astype(int))
types = set(df.loc[:,"Code"])
if not os.path.exists(save_process_path):
os.mkdir(save_process_path)
for t in types:
str_t = str(int(t))
t_types = df.loc[df['Code'] == t][params]
t_types.to_csv(os.path.join(save_process_path, get_date + "_" + str_t + ".csv"), index=False)
def change_columns(*header):
"""
replace header to English
"""
column_dict = {
"代碼":"Code",
"日期":"Date",
"中文簡稱":"CB",
"開盤價(元)":"Open",
"最高價(元)":"High",
"最低價(元)":"Low",
"收盤價(元)":"Close",
"成交張數(張)": "Volumn"
}
return [column_dict[h] for h in header]
if __name__ == "__main__":
"""
choose data output column
"""
choose = {
"Code":True,
"Date":True,
"CB": False,
"Open": True,
"High": True,
"Low": True,
"Close": True,
"Volumn": True
}
filepath = main()
get_feature_data(filepath, "big5", **choose)
| 26.233333 | 101 | 0.495553 | 371 | 3,148 | 4.091644 | 0.350404 | 0.027668 | 0.039526 | 0.028986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006235 | 0.337675 | 3,148 | 119 | 102 | 26.453782 | 0.721823 | 0.043202 | 0 | 0.022727 | 0 | 0 | 0.124654 | 0.016621 | 0 | 0 | 0 | 0 | 0.011364 | 1 | 0.045455 | false | 0 | 0.056818 | 0 | 0.125 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7070e1e2b4357ac3610eef2cb5402b0e25e73572 | 13,883 | py | Python | resources/lib/api/api_requests.py | sajo84/plugin.video.netflix | 757cd2866f2c89c777d12a2772484fe675743543 | [
"MIT"
] | null | null | null | resources/lib/api/api_requests.py | sajo84/plugin.video.netflix | 757cd2866f2c89c777d12a2772484fe675743543 | [
"MIT"
] | null | null | null | resources/lib/api/api_requests.py | sajo84/plugin.video.netflix | 757cd2866f2c89c777d12a2772484fe675743543 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Methods to execute requests to Netflix API
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from functools import wraps
import resources.lib.common as common
import resources.lib.kodi.ui as ui
from resources.lib.common import cache_utils
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.globals import g
from .exceptions import APIError, MissingCredentialsError, MetadataNotAvailable, CacheMiss
from .paths import EPISODES_PARTIAL_PATHS, ART_PARTIAL_PATHS, build_paths
def catch_api_errors(func):
"""Decorator that catches API errors and displays a notification"""
# pylint: disable=missing-docstring
@wraps(func)
def api_error_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except APIError as exc:
ui.show_notification(common.get_local_string(30118).format(exc))
return api_error_wrapper
def logout():
"""Logout of the current account"""
common.make_call('logout', g.BASE_URL)
def login(ask_credentials=True):
"""Perform a login"""
try:
if ask_credentials:
ui.ask_credentials()
if not common.make_call('login'):
# Login not validated
# ui.show_notification(common.get_local_string(30009))
return False
return True
except MissingCredentialsError:
# Aborted from user or leave an empty field
ui.show_notification(common.get_local_string(30112))
raise
def update_lolomo_context(context_name):
"""Update the lolomo list by context"""
lolomo_root = g.LOCAL_DB.get_value('lolomo_root_id', '', TABLE_SESSION)
context_index = g.LOCAL_DB.get_value('lolomo_{}_index'.format(context_name.lower()), '', TABLE_SESSION)
context_id = g.LOCAL_DB.get_value('lolomo_{}_id'.format(context_name.lower()), '', TABLE_SESSION)
if not context_index:
return
path = [['lolomos', lolomo_root, 'refreshListByContext']]
# The fourth parameter is like a request-id, but it doesn't seem to match to
# serverDefs/date/requestId of reactContext (g.LOCAL_DB.get_value('request_id', table=TABLE_SESSION))
# nor to request_id of the video event request
# has a kind of relationship with renoMessageId suspect with the logblob but i'm not sure because my debug crashed,
# and i am no longer able to trace the source.
# I noticed also that this request can also be made with the fourth parameter empty,
# but it still doesn't update the continueWatching list of lolomo, that is strange because of no error
params = [common.enclose_quotes(context_id),
context_index,
common.enclose_quotes(context_name),
'']
# path_suffixs = [
# [['trackIds', 'context', 'length', 'genreId', 'videoId', 'displayName', 'isTallRow', 'isShowAsARow',
# 'impressionToken', 'showAsARow', 'id', 'requestId']],
# [{'from': 0, 'to': 100}, 'reference', 'summary'],
# [{'from': 0, 'to': 100}, 'reference', 'title'],
# [{'from': 0, 'to': 100}, 'reference', 'titleMaturity'],
# [{'from': 0, 'to': 100}, 'reference', 'userRating'],
# [{'from': 0, 'to': 100}, 'reference', 'userRatingRequestId'],
# [{'from': 0, 'to': 100}, 'reference', 'boxarts', '_342x192', 'jpg'],
# [{'from': 0, 'to': 100}, 'reference', 'promoVideo']
# ]
callargs = {
'callpaths': path,
'params': params,
# 'path_suffixs': path_suffixs
}
try:
response = common.make_http_call('callpath_request', callargs)
common.debug('refreshListByContext response: {}', response)
except Exception: # pylint: disable=broad-except
# I do not know the reason yet, but sometimes continues to return error 401,
# making it impossible to update the bookmark position
if not common.is_debug_verbose():
return
ui.show_notification(title=common.get_local_string(30105),
msg='An error prevented the update the lolomo context on netflix',
time=10000)
def update_videoid_bookmark(video_id):
"""Update the videoid bookmark position"""
# You can check if this function works through the official android app
# by checking if the status bar watched of the video will be updated
callargs = {
'callpaths': [['refreshVideoCurrentPositions']],
'params': ['[' + video_id + ']', '[]'],
}
try:
response = common.make_http_call('callpath_request', callargs)
common.debug('refreshVideoCurrentPositions response: {}', response)
except Exception: # pylint: disable=broad-except
# I do not know the reason yet, but sometimes continues to return error 401,
# making it impossible to update the bookmark position
ui.show_notification(title=common.get_local_string(30105),
msg='An error prevented the update the status watched on netflix',
time=10000)
@common.time_execution(immediate=False)
def get_video_raw_data(videoids, custom_partial_path=None): # Do not apply cache to this method
"""Retrieve raw data for specified video id's"""
video_ids = [int(videoid.value) for videoid in videoids]
common.debug('Requesting video raw data for {}', video_ids)
if not custom_partial_path:
paths = build_paths(['videos', video_ids], EPISODES_PARTIAL_PATHS)
if videoids[0].mediatype == common.VideoId.EPISODE:
paths.extend(build_paths(['videos', int(videoids[0].tvshowid)], ART_PARTIAL_PATHS + [['title']]))
else:
paths = build_paths(['videos', video_ids], custom_partial_path)
return common.make_call('path_request', paths)
@catch_api_errors
@common.time_execution(immediate=False)
def rate(videoid, rating):
"""Rate a video on Netflix"""
common.debug('Rating {} as {}', videoid.value, rating)
# In opposition to Kodi, Netflix uses a rating from 0 to in 0.5 steps
rating = min(10, max(0, rating)) / 2
common.make_call(
'post',
{'endpoint': 'set_video_rating',
'data': {
'titleId': int(videoid.value),
'rating': rating}})
ui.show_notification(common.get_local_string(30127).format(rating * 2))
@catch_api_errors
@common.time_execution(immediate=False)
def rate_thumb(videoid, rating, track_id_jaw):
"""Rate a video on Netflix"""
common.debug('Thumb rating {} as {}', videoid.value, rating)
event_uuid = common.get_random_uuid()
response = common.make_call(
'post',
{'endpoint': 'set_thumb_rating',
'data': {
'eventUuid': event_uuid,
'titleId': int(videoid.value),
'trackId': track_id_jaw,
'rating': rating,
}})
if response.get('status', '') == 'success':
ui.show_notification(common.get_local_string(30045).split('|')[rating])
else:
common.error('Rating thumb error, response detail: {}', response)
ui.show_error_info('Rating error', 'Error type: {}' + response.get('status', '--'),
True, True)
@catch_api_errors
@common.time_execution(immediate=False)
def update_my_list(videoid, operation, params):
"""Call API to update my list with either add or remove action"""
common.debug('My List: {} {}', operation, videoid)
common.make_call(
'post',
{'endpoint': 'update_my_list',
'data': {
'operation': operation,
'videoId': videoid.value}})
ui.show_notification(common.get_local_string(30119))
_update_mylist_cache(videoid, operation, params)
def _update_mylist_cache(videoid, operation, params):
"""Update the my list cache to speeding up page load"""
# Avoids making a new request to the server to request the entire list updated
perpetual_range_start = params.get('perpetual_range_start')
mylist_identifier = 'mylist'
if perpetual_range_start and perpetual_range_start != 'None':
mylist_identifier += '_' + perpetual_range_start
if operation == 'remove':
try:
video_list_sorted_data = g.CACHE.get(cache_utils.CACHE_MYLIST, mylist_identifier)
del video_list_sorted_data.videos[videoid.value]
g.CACHE.add(cache_utils.CACHE_MYLIST, mylist_identifier, video_list_sorted_data)
except CacheMiss:
pass
try:
my_list_videoids = g.CACHE.get(cache_utils.CACHE_MYLIST, 'my_list_items')
my_list_videoids.remove(videoid)
g.CACHE.add(cache_utils.CACHE_MYLIST, 'my_list_items', my_list_videoids)
except CacheMiss:
pass
else:
try:
common.make_call('add_videoids_to_video_list_cache', {'cache_bucket': cache_utils.CACHE_MYLIST,
'cache_identifier': mylist_identifier,
'video_ids': [videoid.value]})
except CacheMiss:
pass
try:
my_list_videoids = g.CACHE.get(cache_utils.CACHE_MYLIST, 'my_list_items')
my_list_videoids.append(videoid)
g.CACHE.add(cache_utils.CACHE_MYLIST, 'my_list_items', my_list_videoids)
except CacheMiss:
pass
@common.time_execution(immediate=False)
def get_metadata(videoid, refresh=False):
"""Retrieve additional metadata for the given VideoId"""
# Delete the cache if we need to refresh the all metadata
if refresh:
g.CACHE.delete(cache_utils.CACHE_METADATA, videoid.value)
metadata_data = {}, None
if videoid.mediatype not in [common.VideoId.EPISODE, common.VideoId.SEASON]:
metadata_data = _metadata(videoid), None
elif videoid.mediatype == common.VideoId.SEASON:
metadata_data = _metadata(videoid.derive_parent(None)), None
else:
try:
metadata_data = _episode_metadata(videoid)
except KeyError as exc:
# Episode metadata may not exist if its a new episode and cached
# data is outdated. In this case, delete the cache entry and
# try again safely (if it doesn't exist this time, there is no
# metadata for the episode, so we assign an empty dict).
common.debug('{}, refreshing cache', exc)
g.CACHE.delete(cache_utils.CACHE_METADATA, videoid.tvshowid)
try:
metadata_data = _episode_metadata(videoid)
except KeyError as exc:
common.error(exc)
return metadata_data
@common.time_execution(immediate=False)
def _episode_metadata(videoid):
show_metadata = _metadata(videoid)
episode_metadata, season_metadata = common.find_episode_metadata(videoid, show_metadata)
return episode_metadata, season_metadata, show_metadata
@common.time_execution(immediate=False)
@cache_utils.cache_output(cache_utils.CACHE_METADATA, identify_from_kwarg_name='video_id')
def _metadata(video_id):
"""Retrieve additional metadata for a video.This is a separate method from
metadata(videoid) to work around caching issues when new episodes are added
to a show by Netflix."""
import time
common.debug('Requesting metadata for {}', video_id)
# Always use params 'movieid' to all videoid identifier
ipc_call = common.make_http_call if g.IS_SERVICE else common.make_call
metadata_data = ipc_call(
'get',
{
'endpoint': 'metadata',
'params': {'movieid': video_id.value,
'_': int(time.time())}
})
if not metadata_data:
# This return empty
# - if the metadata is no longer available
# - if it has been exported a tv show/movie from a specific language profile that is not
# available using profiles with other languages
raise MetadataNotAvailable
return metadata_data['video']
@common.time_execution(immediate=False)
def get_parental_control_data(password):
"""Get the parental control data"""
return common.make_call('parental_control_data', {'password': password})
@common.time_execution(immediate=False)
def set_parental_control_data(data):
"""Set the parental control data"""
try:
common.make_call(
'post',
{'endpoint': 'content_restrictions',
'data': {'action': 'update',
'authURL': data['token'],
'experience': data['experience'],
'guid': data['guid'],
'maturity': data['maturity']}}
)
return True
except Exception as exc: # pylint: disable=broad-except
common.error('Api call profile_hub raised an error: {}', exc)
return False
@common.time_execution(immediate=False)
def verify_pin(pin):
"""Send adult PIN to Netflix and verify it."""
try:
return common.make_call(
'post',
{'endpoint': 'pin_service',
'data': {'pin': pin}}
).get('success', False)
except Exception: # pylint: disable=broad-except
return False
@common.time_execution(immediate=False)
def verify_profile_lock(guid, pin):
"""Send profile PIN to Netflix and verify it."""
try:
return common.make_call(
'post',
{'endpoint': 'profile_lock',
'data': {'pin': pin,
'action': 'verify',
'guid': guid}}
).get('success', False)
except Exception: # pylint: disable=broad-except
return False
| 40.832353 | 119 | 0.644961 | 1,669 | 13,883 | 5.180947 | 0.23547 | 0.017347 | 0.019429 | 0.035619 | 0.345091 | 0.304152 | 0.254192 | 0.189777 | 0.180062 | 0.163294 | 0 | 0.010429 | 0.247137 | 13,883 | 339 | 120 | 40.952802 | 0.816877 | 0.258806 | 0 | 0.364807 | 0 | 0 | 0.12205 | 0.012837 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077253 | false | 0.025751 | 0.042918 | 0 | 0.193133 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70727332781524d67d9e08873ec4e97cf38aa95f | 6,578 | py | Python | projects/wgan/gaussian_gan.py | niujinshuchong/stochastic_processes | ea2538d2f09c39bec1834df5addd37e0699a88bf | [
"MIT"
] | null | null | null | projects/wgan/gaussian_gan.py | niujinshuchong/stochastic_processes | ea2538d2f09c39bec1834df5addd37e0699a88bf | [
"MIT"
] | null | null | null | projects/wgan/gaussian_gan.py | niujinshuchong/stochastic_processes | ea2538d2f09c39bec1834df5addd37e0699a88bf | [
"MIT"
] | null | null | null | import argparse
import os
import numpy as np
import math
import sys
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from torchvision.utils import save_image
import random
from math import *
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
import cv2
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=128, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.00005, help='learning rate')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space')
parser.add_argument('--img_size', type=int, default=28, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=1, help='number of image channels')
parser.add_argument('--n_critic', type=int, default=5, help='number of training steps for discriminator per iter')
parser.add_argument('--clip_value', type=float, default=0.01, help='lower and upper clip value for disc. weights')
parser.add_argument('--sample_interval', type=int, default=400, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
img_shape = (2,)
cuda = True if torch.cuda.is_available() else False
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
def block(in_feat, out_feat, normalize=True):
layers = [ nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(opt.latent_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
)
self.one_hot = nn.Linear(1024, 100)
self.one_hot_offset = nn.Linear(100, int(np.prod(img_shape)))
self.offset = nn.Linear(1024, int(np.prod(img_shape)))
self.softmax = nn.Softmax()
def forward(self, z):
img = self.model(z)
one_hot = self.one_hot(img)
one_hot_offset = self.one_hot_offset(self.softmax(one_hot))
img = one_hot_offset + self.offset(img)
img = img.view(img.shape[0], *img_shape)
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1)
)
def forward(self, img):
img_flat = img.view(img.shape[0], -1)
validity = self.model(img_flat)
return validity
def gaussian_mixture(batchsize, ndim, num_labels):
if ndim % 2 != 0:
raise Exception("ndim must be a multiple of 2.")
def sample(x, y, label, num_labels):
shift = 1.4
r = 2.0 * np.pi / float(num_labels) * float(label)
new_x = x * cos(r) - y * sin(r)
new_y = x * sin(r) + y * cos(r)
new_x += shift * cos(r)
new_y += shift * sin(r)
return np.array([new_x, new_y]).reshape((2,))
x_var = 0.05
y_var = 0.05
x = np.random.normal(0, x_var, (batchsize, ndim // 2))
y = np.random.normal(0, y_var, (batchsize, ndim // 2))
z = np.empty((batchsize, ndim), dtype=np.float32)
for batch in range(batchsize):
for zi in range(ndim // 2):
z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], random.randint(0, num_labels - 1), num_labels)
return z
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
# Optimizers
optimizer_G = torch.optim.RMSprop(generator.parameters(), lr=opt.lr)
optimizer_D = torch.optim.RMSprop(discriminator.parameters(), lr=opt.lr)
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# Training
# ----------
batches_done = 0
for epoch in range(opt.n_epochs):
for i in range(1000):
imgs = gaussian_mixture(opt.batch_size, 2, 4)
imgs = Tensor(imgs)
#imgs = Tensor(np.random.uniform(low=1.3, high=5.7, size=(opt.batch_size, 2)))
# Configure input
real_imgs = Variable(imgs.type(Tensor))
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Sample noise as generator input
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
# Generate a batch of images
fake_imgs = generator(z).detach()
# Adversarial loss
loss_D = -torch.mean(discriminator(real_imgs)) + torch.mean(discriminator(fake_imgs))
loss_D.backward()
optimizer_D.step()
# Clip weights of discriminator
for p in discriminator.parameters():
p.data.clamp_(-opt.clip_value, opt.clip_value)
# Train the generator every n_critic iterations
if i % opt.n_critic == 0:
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
gen_imgs = generator(z)
# Adversarial loss
loss_G = -torch.mean(discriminator(gen_imgs))
loss_G.backward()
optimizer_G.step()
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, opt.n_epochs,
batches_done % 1000, 1000,
loss_D.item(), loss_G.item()))
if batches_done % opt.sample_interval == 0:
Y = gen_imgs.detach().cpu().numpy()
plt.scatter(Y[:, 0], Y[:, 1])
plt.savefig('tmp.png')
plt.close()
image = cv2.imread('tmp.png')
cv2.imshow("image", image)
cv2.waitKey(1)
batches_done += 1
| 33.390863 | 129 | 0.594862 | 871 | 6,578 | 4.359357 | 0.257176 | 0.023703 | 0.044772 | 0.014222 | 0.08454 | 0.060047 | 0.030024 | 0.016855 | 0 | 0 | 0 | 0.030511 | 0.267559 | 6,578 | 196 | 130 | 33.561224 | 0.757576 | 0.074187 | 0 | 0.045113 | 0 | 0.007519 | 0.087838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.12782 | 0 | 0.233083 | 0.015038 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
707383f583ad7bbda06c20e6e87636595bd3dd55 | 1,302 | py | Python | _669.py | elfgzp/leetCode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
] | 3 | 2019-04-12T06:22:56.000Z | 2019-05-04T04:25:01.000Z | _669.py | elfgzp/Leetcode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
] | null | null | null | _669.py | elfgzp/Leetcode | 964c6574d310a9a6c486bf638487fd2f72b83b3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'gzp'
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from utils import Tree
class Solution(object):
def trimBST(self, root, L, R):
"""
:type root: TreeNode
:type L: int
:type R: int
:rtype: TreeNode
"""
return self._trimBST(root, L, R)
def _trimBST(self, node, L, R):
"""
:type root: TreeNode
:type L: int
:type R: int
:rtype: TreeNode
"""
if not node:
return
if node.val is not None and node.val < L:
node = self._trimBST(node.right, L, R)
elif node.val is not None and node.val > R:
node = self._trimBST(node.left, L, R)
if not node:
return
node.right = self._trimBST(node.right, L, R)
node.left = self.trimBST(node.left, L, R)
return node
if __name__ == '__main__':
s = Solution()
root = Tree([1, 0, 2])
print(root.get_nodes())
print(s.trimBST(root, 1, 2).get_nodes())
root = Tree([3, 0, 4, None, 2, None, None, 1])
print(root.get_nodes())
print(s.trimBST(root, 1, 3).get_nodes())
| 22.067797 | 52 | 0.523041 | 178 | 1,302 | 3.685393 | 0.280899 | 0.021341 | 0.091463 | 0.030488 | 0.460366 | 0.460366 | 0.329268 | 0.329268 | 0.25 | 0.143293 | 0 | 0.015152 | 0.341014 | 1,302 | 58 | 53 | 22.448276 | 0.749417 | 0.236559 | 0 | 0.24 | 0 | 0 | 0.012318 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.04 | 0 | 0.32 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7074d22367368afc1491da30cd43502d5330386e | 3,895 | py | Python | spiders/a85.py | senlyu163/crawler | ecf95f7b356c726922b5e5d90000fda3e16ae90d | [
"Apache-2.0"
] | null | null | null | spiders/a85.py | senlyu163/crawler | ecf95f7b356c726922b5e5d90000fda3e16ae90d | [
"Apache-2.0"
] | null | null | null | spiders/a85.py | senlyu163/crawler | ecf95f7b356c726922b5e5d90000fda3e16ae90d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..utils import extract_CN_from_content
from ..items import ScrapySpiderItem
import re
from scrapy_splash import SplashRequest
class A85Spider(CrawlSpider):
name = '85'
allowed_domains = ['houqi.gov.cn']
start_urls = ['http://houqi.gov.cn/kzhq/zwgk/zwgk.shtml']
rules = (
Rule(LinkExtractor(allow=r'/kzhq/[a-zA-Z]+\d+/list\.shtml'), follow=True),
Rule(LinkExtractor(allow=r'/kzhq/[a-zA-Z]+/list\.shtml'), follow=True),
Rule(LinkExtractor(allow=r'/kzhq/[a-z]+/list\.shtml'), follow=True),
Rule(LinkExtractor(allow=r'/kzhq/gsgg/list\.shtml'), follow=True),
Rule(LinkExtractor(restrict_xpaths='//ul[@class="ggnav"]//li'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow=r'list_\d+.shtml'), follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
# Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
)
def _build_request(self, rule, link):
r = SplashRequest(url=link.url, callback=self._response_downloaded, args={"wait": 0.5})
r.meta.update(rule=rule, link_text=link.text)
return r
def _requests_to_follow(self, response):
# if not isinstance(response, HtmlResponse):
# return
seen = set()
for n, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = self._build_request(n, link)
yield rule.process_request(r)
def parse_item(self, response):
# print(response.url)
# item = {}
#item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
#item['name'] = response.xpath('//div[@id="name"]').get()
#item['description'] = response.xpath('//div[@id="description"]').get()
# return item
if ('qzqd' in response.url) or ('gsgg' in response.url) or ('xwfbh' in response.url) or ('hqxw' in response.url) or ('tzgg' in response.url):
try:
item = ScrapySpiderItem()
item['url'] = response.url
#
date = response.xpath('/html/body/div[3]/div/div[2]/div[1]/text()').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('//div[@class="content"]/h1/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="zhengw"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
print("there have no date in case 1.")
else:
try:
item = ScrapySpiderItem()
item['url'] = response.url
#
date = response.xpath('/html/body/div[3]/div/div[2]/div[1]/p[7]/em/text()').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[3]/div/div[2]/div[1]/p[3]/em/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="zhengw"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
print("there have no date in case 2.")
| 44.261364 | 149 | 0.567394 | 475 | 3,895 | 4.568421 | 0.267368 | 0.070507 | 0.081106 | 0.084793 | 0.501382 | 0.501382 | 0.484793 | 0.482488 | 0.467281 | 0.44424 | 0 | 0.010175 | 0.268293 | 3,895 | 87 | 150 | 44.770115 | 0.751228 | 0.137099 | 0 | 0.31746 | 0 | 0.047619 | 0.174836 | 0.103407 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.111111 | 0 | 0.285714 | 0.031746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70755d5d03c099f6085f0bb0e914a4c7034022e9 | 2,589 | py | Python | skeleton/functional.py | dogoncouch/dogoncouch-misc | 46e020cc541cc6cf19edc0114a73f24e96ce15d0 | [
"MIT"
] | 3 | 2020-02-05T07:25:01.000Z | 2021-12-24T20:08:03.000Z | skeleton/functional.py | dogoncouch/dogoncouch-misc | 46e020cc541cc6cf19edc0114a73f24e96ce15d0 | [
"MIT"
] | null | null | null | skeleton/functional.py | dogoncouch/dogoncouch-misc | 46e020cc541cc6cf19edc0114a73f24e96ce15d0 | [
"MIT"
] | 2 | 2018-02-24T18:59:29.000Z | 2020-06-14T15:15:19.000Z | #!/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Dan Persons (dpersonsdev@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from argparse import ArgumentParser
from argparse import FileType
from configparser import ConfigParser
from os.path import isfile
__version__ = '0.1'
def get_args():
"""Set argument options"""
arg_parser = ArgumentParser()
arg_parser.add_argument('--version', action = 'version',
version = '%(prog)s ' + str(__version__))
arg_parser.add_argument('-c',
action = 'store', dest = 'config',
default = '/etc/nothing.conf',
help = ('set the config file'))
arg_parser.add_argument('--full',
action = 'store_true',
help = ('Do nothing to the fullest'))
arg_parser.add_argument('files',
type = FileType('r'), metavar='FILE', nargs = '?',
help = ('set a file with which to do nothing'))
args = arg_parser.parse_args()
return args
def get_config(configfile):
"""Read the config file"""
config = ConfigParser()
if isfile(configfile):
myconf = args.config
config.read(myconf)
else: return None
def main_event():
"""Do the actual nothing"""
pass
def run_script():
"""Run the program that does nothing"""
try:
args = get_args()
config = get_config(args.config)
main_event()
except KeyboardInterrupt:
print('\nExiting on KeyboardInterrupt')
def main():
run_script()
if __name__ == "__main__":
main()
| 28.450549 | 80 | 0.679413 | 341 | 2,589 | 5.052786 | 0.486804 | 0.051074 | 0.027858 | 0.046431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002996 | 0.226342 | 2,589 | 90 | 81 | 28.766667 | 0.857214 | 0.467362 | 0 | 0 | 0 | 0 | 0.151085 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.025 | 0.1 | 0 | 0.25 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7076835d176f91ff169e5946974bfd4dbfe39989 | 14,077 | py | Python | src/silx/gui/plot/tools/RadarView.py | rnwatanabe/silx | b0395f4a06c048b7778dc04ada828edd195ef02d | [
"CC0-1.0",
"MIT"
] | 94 | 2016-03-04T17:25:53.000Z | 2022-03-18T18:05:23.000Z | src/silx/gui/plot/tools/RadarView.py | rnwatanabe/silx | b0395f4a06c048b7778dc04ada828edd195ef02d | [
"CC0-1.0",
"MIT"
] | 2,841 | 2016-01-21T09:06:49.000Z | 2022-03-18T14:53:56.000Z | src/silx/gui/plot/tools/RadarView.py | rnwatanabe/silx | b0395f4a06c048b7778dc04ada828edd195ef02d | [
"CC0-1.0",
"MIT"
] | 71 | 2015-09-30T08:35:35.000Z | 2022-03-16T07:16:28.000Z | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2018 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""QWidget displaying an overview of a 2D plot.
This shows the available range of the data, and the current location of the
plot view.
"""
__authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "22/02/2021"
import logging
import weakref
from ... import qt
from ...utils import LockReentrant
_logger = logging.getLogger(__name__)
class _DraggableRectItem(qt.QGraphicsRectItem):
"""RectItem which signals its change through visibleRectDragged."""
def __init__(self, *args, **kwargs):
super(_DraggableRectItem, self).__init__(
*args, **kwargs)
self._previousCursor = None
self.setFlag(qt.QGraphicsItem.ItemIsMovable)
self.setFlag(qt.QGraphicsItem.ItemSendsGeometryChanges)
self.setAcceptHoverEvents(True)
self._ignoreChange = False
self._constraint = 0, 0, 0, 0
def setConstraintRect(self, left, top, width, height):
"""Set the constraint rectangle for dragging.
The coordinates are in the _DraggableRectItem coordinate system.
This constraint only applies to modification through interaction
(i.e., this constraint is not applied to change through API).
If the _DraggableRectItem is smaller than the constraint rectangle,
the _DraggableRectItem remains within the constraint rectangle.
If the _DraggableRectItem is wider than the constraint rectangle,
the constraint rectangle remains within the _DraggableRectItem.
"""
self._constraint = left, left + width, top, top + height
def setPos(self, *args, **kwargs):
"""Overridden to ignore changes from API in itemChange."""
self._ignoreChange = True
super(_DraggableRectItem, self).setPos(*args, **kwargs)
self._ignoreChange = False
def moveBy(self, *args, **kwargs):
"""Overridden to ignore changes from API in itemChange."""
self._ignoreChange = True
super(_DraggableRectItem, self).moveBy(*args, **kwargs)
self._ignoreChange = False
def itemChange(self, change, value):
"""Callback called before applying changes to the item."""
if (change == qt.QGraphicsItem.ItemPositionChange and
not self._ignoreChange):
# Makes sure that the visible area is in the data
# or that data is in the visible area if area is too wide
x, y = value.x(), value.y()
xMin, xMax, yMin, yMax = self._constraint
if self.rect().width() <= (xMax - xMin):
if x < xMin:
value.setX(xMin)
elif x > xMax - self.rect().width():
value.setX(xMax - self.rect().width())
else:
if x > xMin:
value.setX(xMin)
elif x < xMax - self.rect().width():
value.setX(xMax - self.rect().width())
if self.rect().height() <= (yMax - yMin):
if y < yMin:
value.setY(yMin)
elif y > yMax - self.rect().height():
value.setY(yMax - self.rect().height())
else:
if y > yMin:
value.setY(yMin)
elif y < yMax - self.rect().height():
value.setY(yMax - self.rect().height())
if self.pos() != value:
# Notify change through signal
views = self.scene().views()
assert len(views) == 1
views[0].visibleRectDragged.emit(
value.x() + self.rect().left(),
value.y() + self.rect().top(),
self.rect().width(),
self.rect().height())
return value
return super(_DraggableRectItem, self).itemChange(
change, value)
def hoverEnterEvent(self, event):
"""Called when the mouse enters the rectangle area"""
self._previousCursor = self.cursor()
self.setCursor(qt.Qt.OpenHandCursor)
def hoverLeaveEvent(self, event):
"""Called when the mouse leaves the rectangle area"""
if self._previousCursor is not None:
self.setCursor(self._previousCursor)
self._previousCursor = None
class RadarView(qt.QGraphicsView):
"""Widget presenting a synthetic view of a 2D area and
the current visible area.
Coordinates are as in QGraphicsView:
x goes from left to right and y goes from top to bottom.
This widget preserves the aspect ratio of the areas.
The 2D area and the visible area can be set with :meth:`setDataRect`
and :meth:`setVisibleRect`.
When the visible area has been dragged by the user, its new position
is signaled by the *visibleRectDragged* signal.
It is possible to invert the direction of the axes by using the
:meth:`scale` method of QGraphicsView.
"""
visibleRectDragged = qt.Signal(float, float, float, float)
"""Signals that the visible rectangle has been dragged.
It provides: left, top, width, height in data coordinates.
"""
_DATA_PEN = qt.QPen(qt.QColor('white'))
_DATA_BRUSH = qt.QBrush(qt.QColor('light gray'))
_ACTIVEDATA_PEN = qt.QPen(qt.QColor('black'))
_ACTIVEDATA_BRUSH = qt.QBrush(qt.QColor('transparent'))
_ACTIVEDATA_PEN.setWidth(2)
_ACTIVEDATA_PEN.setCosmetic(True)
_VISIBLE_PEN = qt.QPen(qt.QColor('blue'))
_VISIBLE_PEN.setWidth(2)
_VISIBLE_PEN.setCosmetic(True)
_VISIBLE_BRUSH = qt.QBrush(qt.QColor(0, 0, 0, 0))
_TOOLTIP = 'Radar View:\nRed contour: Visible area\nGray area: The image'
_PIXMAP_SIZE = 256
def __init__(self, parent=None):
self.__plotRef = None
self._scene = qt.QGraphicsScene()
self._dataRect = self._scene.addRect(0, 0, 1, 1,
self._DATA_PEN,
self._DATA_BRUSH)
self._imageRect = self._scene.addRect(0, 0, 1, 1,
self._ACTIVEDATA_PEN,
self._ACTIVEDATA_BRUSH)
self._imageRect.setVisible(False)
self._scatterRect = self._scene.addRect(0, 0, 1, 1,
self._ACTIVEDATA_PEN,
self._ACTIVEDATA_BRUSH)
self._scatterRect.setVisible(False)
self._curveRect = self._scene.addRect(0, 0, 1, 1,
self._ACTIVEDATA_PEN,
self._ACTIVEDATA_BRUSH)
self._curveRect.setVisible(False)
self._visibleRect = _DraggableRectItem(0, 0, 1, 1)
self._visibleRect.setPen(self._VISIBLE_PEN)
self._visibleRect.setBrush(self._VISIBLE_BRUSH)
self._scene.addItem(self._visibleRect)
super(RadarView, self).__init__(self._scene, parent)
self.setHorizontalScrollBarPolicy(qt.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(qt.Qt.ScrollBarAlwaysOff)
self.setFocusPolicy(qt.Qt.NoFocus)
self.setStyleSheet('border: 0px')
self.setToolTip(self._TOOLTIP)
self.__reentrant = LockReentrant()
self.visibleRectDragged.connect(self._viewRectDragged)
self.__timer = qt.QTimer(self)
self.__timer.timeout.connect(self._updateDataContent)
def sizeHint(self):
# """Overridden to avoid sizeHint to depend on content size."""
return self.minimumSizeHint()
def wheelEvent(self, event):
# """Overridden to disable vertical scrolling with wheel."""
event.ignore()
def resizeEvent(self, event):
# """Overridden to fit current content to new size."""
self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio)
super(RadarView, self).resizeEvent(event)
def setDataRect(self, left, top, width, height):
"""Set the bounds of the data rectangular area.
This sets the coordinate system.
"""
self._dataRect.setRect(left, top, width, height)
self._visibleRect.setConstraintRect(left, top, width, height)
self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio)
def setVisibleRect(self, left, top, width, height):
"""Set the visible rectangular area.
The coordinates are relative to the data rect.
"""
self.__visibleRect = left, top, width, height
self._visibleRect.setRect(0, 0, width, height)
self._visibleRect.setPos(left, top)
self.fitInView(self._scene.itemsBoundingRect(), qt.Qt.KeepAspectRatio)
def __setVisibleRectFromPlot(self, plot):
"""Update radar view visible area.
Takes care of y coordinate conversion.
"""
xMin, xMax = plot.getXAxis().getLimits()
yMin, yMax = plot.getYAxis().getLimits()
self.setVisibleRect(xMin, yMin, xMax - xMin, yMax - yMin)
def getPlotWidget(self):
"""Returns the connected plot
:rtype: Union[None,PlotWidget]
"""
if self.__plotRef is None:
return None
plot = self.__plotRef()
if plot is None:
self.__plotRef = None
return plot
def setPlotWidget(self, plot):
"""Set the PlotWidget this radar view connects to.
As result `setDataRect` and `setVisibleRect` will be called
automatically.
:param Union[None,PlotWidget] plot:
"""
previousPlot = self.getPlotWidget()
if previousPlot is not None: # Disconnect previous plot
plot.getXAxis().sigLimitsChanged.disconnect(self._xLimitChanged)
plot.getYAxis().sigLimitsChanged.disconnect(self._yLimitChanged)
plot.getYAxis().sigInvertedChanged.disconnect(self._updateYAxisInverted)
# Reset plot and timer
# FIXME: It would be good to clean up the display here
self.__plotRef = None
self.__timer.stop()
if plot is not None: # Connect new plot
self.__plotRef = weakref.ref(plot)
plot.getXAxis().sigLimitsChanged.connect(self._xLimitChanged)
plot.getYAxis().sigLimitsChanged.connect(self._yLimitChanged)
plot.getYAxis().sigInvertedChanged.connect(self._updateYAxisInverted)
self.__setVisibleRectFromPlot(plot)
self._updateYAxisInverted()
self.__timer.start(500)
def _xLimitChanged(self, vmin, vmax):
plot = self.getPlotWidget()
self.__setVisibleRectFromPlot(plot)
def _yLimitChanged(self, vmin, vmax):
plot = self.getPlotWidget()
self.__setVisibleRectFromPlot(plot)
def _updateYAxisInverted(self, inverted=None):
"""Sync radar view axis orientation."""
plot = self.getPlotWidget()
if inverted is None:
# Do not perform this when called from plot signal
inverted = plot.getYAxis().isInverted()
# Use scale to invert radarView
# RadarView default Y direction is from top to bottom
# As opposed to Plot. So invert RadarView when Plot is NOT inverted.
self.resetTransform()
if not inverted:
self.scale(1., -1.)
self.update()
def _viewRectDragged(self, left, top, width, height):
"""Slot for radar view visible rectangle changes."""
plot = self.getPlotWidget()
if plot is None:
return
if self.__reentrant.locked():
return
with self.__reentrant:
plot.setLimits(left, left + width, top, top + height)
def _updateDataContent(self):
"""Update the content to the current data content"""
plot = self.getPlotWidget()
if plot is None:
return
ranges = plot.getDataRange()
xmin, xmax = ranges.x if ranges.x is not None else (0, 0)
ymin, ymax = ranges.y if ranges.y is not None else (0, 0)
self.setDataRect(xmin, ymin, xmax - xmin, ymax - ymin)
self.__updateItem(self._imageRect, plot.getActiveImage())
self.__updateItem(self._scatterRect, plot.getActiveScatter())
self.__updateItem(self._curveRect, plot.getActiveCurve())
def __updateItem(self, rect, item):
"""Sync rect with item bounds
:param QGraphicsRectItem rect:
:param Item item:
"""
if item is None:
rect.setVisible(False)
return
ranges = item._getBounds()
if ranges is None:
rect.setVisible(False)
return
xmin, xmax, ymin, ymax = ranges
width = xmax - xmin
height = ymax - ymin
rect.setRect(xmin, ymin, width, height)
rect.setVisible(True)
| 38.88674 | 84 | 0.617674 | 1,564 | 14,077 | 5.438619 | 0.257673 | 0.014108 | 0.011286 | 0.016929 | 0.22925 | 0.181754 | 0.143193 | 0.126734 | 0.107924 | 0.093581 | 0 | 0.006613 | 0.280244 | 14,077 | 361 | 85 | 38.99446 | 0.832906 | 0.275556 | 0 | 0.228155 | 0 | 0 | 0.013498 | 0 | 0 | 0 | 0 | 0.00277 | 0.004854 | 1 | 0.106796 | false | 0 | 0.019417 | 0.004854 | 0.228155 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
707748746f67df072b8d127ebd45987c9f4adb01 | 1,486 | py | Python | purly/py/purly/model/client.py | rmorshea/purly | 0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7 | [
"MIT"
] | 2 | 2018-08-18T05:39:24.000Z | 2018-08-21T19:02:16.000Z | purly/py/purly/model/client.py | rmorshea/purly | 0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7 | [
"MIT"
] | 2 | 2018-07-27T07:14:19.000Z | 2018-07-27T07:17:06.000Z | purly/py/purly/model/client.py | rmorshea/purly | 0d07d6d7636fd81d9c1c14e2df6a32fc28b325f7 | [
"MIT"
] | null | null | null | import json
import time
import websocket
class Client:
def __init__(self, url):
self._url = url
self._updates = []
self._socket = create_socket(url, connection_timeout=2)
def sync(self):
recv = []
while True:
data = self._socket.recv()
if data:
recv.extend(json.loads(data))
outgoing = self._updates[:1000]
self._socket.send(json.dumps(outgoing))
self._updates[:1000] = []
if not self._updates:
break
for incoming in recv:
self._recv(incoming)
def serve(self, function=None):
while True:
if function is not None:
try:
function()
except StopIteration:
break
self.sync()
def _send(self, content, header):
self._updates.append({
'header': header,
'content': content,
})
def _recv(self, msg):
datatype = msg['header']['type']
method = '_on_%s' % datatype
if hasattr(self, method):
getattr(self, method)(msg['content'])
def create_socket(uri, *args, **kwargs):
start = time.time()
while True:
try:
return websocket.create_connection(uri, *args, **kwargs)
except ConnectionRefusedError:
if time.time() - start > kwargs.get('connection_timeout', 0):
raise
| 26.070175 | 73 | 0.520188 | 150 | 1,486 | 5 | 0.4 | 0.073333 | 0.050667 | 0.061333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010718 | 0.37214 | 1,486 | 56 | 74 | 26.535714 | 0.79314 | 0 | 0 | 0.148936 | 0 | 0 | 0.036339 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.06383 | 0 | 0.234043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
707aa505d5a6e2672f86a0292a8b2705393bee85 | 1,232 | py | Python | practice_problems/prog4_vi.py | vishwasks32/python3-learning | 39f39238428727ef0c97c74c8de2570bd84da403 | [
"Apache-2.0"
] | 3 | 2018-02-08T21:09:27.000Z | 2021-06-15T04:48:46.000Z | practice_problems/prog4_vi.py | vishwasks32/python3-learning | 39f39238428727ef0c97c74c8de2570bd84da403 | [
"Apache-2.0"
] | null | null | null | practice_problems/prog4_vi.py | vishwasks32/python3-learning | 39f39238428727ef0c97c74c8de2570bd84da403 | [
"Apache-2.0"
] | 1 | 2018-02-08T21:09:31.000Z | 2018-02-08T21:09:31.000Z | #!/usr/bin/env python3
#
# Author: Vishwas K Singh
# Email: vishwasks32@gmail.com
#
# Script to convert Celcius Temperature to Farenheit
def temp_conv(temp_type, temp_val):
''' Function to convert Temperature from Celcius to farenheit
and vice versa'''
if(temp_type == 'f'):
temp_faren = ((9/5)*temp_val) + 32
return temp_faren
elif(temp_type == 'c'):
temp_cel = (5*(temp_val - 32))/9
return temp_cel
if __name__=='__main__':
print("Welcome to Temperature Converter")
print("Select 1. Farenheit to Celcius\n\t2. Celcius to Farenheit")
conv_type = input()
if conv_type == '1':
temp_type = 'c'
temp_val = float(input("Enter the farenheit value to be converted: "))
temp_celcius = temp_conv(temp_type,temp_val)
print("%.2f degree farenheit converts to %.2f degree celcius."%(temp_val, temp_celcius))
elif conv_type == '2':
temp_type = 'f'
temp_val = float(input("Enter the Celcius value to be converted: "))
temp_farenheit = temp_conv(temp_type,temp_val)
print("%.2f degree celcius converts to %.2f degree farenheit."%(temp_val, temp_farenheit))
else:
print("Invalid Input!! Exit..")
| 35.2 | 98 | 0.647727 | 170 | 1,232 | 4.470588 | 0.358824 | 0.082895 | 0.047368 | 0.063158 | 0.248684 | 0.190789 | 0.094737 | 0.094737 | 0.094737 | 0 | 0 | 0.020063 | 0.231331 | 1,232 | 34 | 99 | 36.235294 | 0.782471 | 0.160714 | 0 | 0 | 0 | 0 | 0.312932 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.130435 | 0.217391 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
707b95b9e394fd7ccab1823b73b68b69754eb13a | 696 | py | Python | 2020/09/part1.py | timofurrer/aoc-2020 | 446b688a57601d9891f520e43b7f822c373a6ff4 | [
"MIT"
] | null | null | null | 2020/09/part1.py | timofurrer/aoc-2020 | 446b688a57601d9891f520e43b7f822c373a6ff4 | [
"MIT"
] | null | null | null | 2020/09/part1.py | timofurrer/aoc-2020 | 446b688a57601d9891f520e43b7f822c373a6ff4 | [
"MIT"
] | null | null | null | import os
import sys
puzzle_input_path = os.path.join(os.path.dirname(__file__), "input_1.txt")
with open(puzzle_input_path) as puzzle_input_file:
puzzle_input_raw = puzzle_input_file.read()
preamble = 25
numbers = [int(x) for x in puzzle_input_raw.splitlines()]
number = next(
n
for i, n
in enumerate(numbers[preamble:], start=preamble)
if all(n - e not in numbers[i - preamble:i] for e in numbers[i - preamble:i])
)
print(number)
# for idx, number in enumerate(numbers[preamble:], start=preamble):
# last_numbers = numbers[idx - preamble:idx]
# does_not_match = all(number - e not in last_numbers for e in last_numbers)
# if does_not_match:
# break | 29 | 81 | 0.70546 | 111 | 696 | 4.207207 | 0.36036 | 0.141328 | 0.06424 | 0.111349 | 0.248394 | 0.167024 | 0 | 0 | 0 | 0 | 0 | 0.005282 | 0.183908 | 696 | 24 | 82 | 29 | 0.816901 | 0.327586 | 0 | 0 | 0 | 0 | 0.023758 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
707ebe5c40335557036acfbcae3d06ae69d50f9a | 2,642 | py | Python | app/dapp_examples/py/media_analysis/image_quality/App.py | TheAdamBC/AdamBC | e854a64c19442e24a50e4d65ce2f2e8f6ea46f4c | [
"BSD-3-Clause"
] | 1 | 2021-12-14T07:28:46.000Z | 2021-12-14T07:28:46.000Z | app/dapp_examples/py/media_analysis/image_quality/App.py | TheAdamBC/AdamBC | e854a64c19442e24a50e4d65ce2f2e8f6ea46f4c | [
"BSD-3-Clause"
] | null | null | null | app/dapp_examples/py/media_analysis/image_quality/App.py | TheAdamBC/AdamBC | e854a64c19442e24a50e4d65ce2f2e8f6ea46f4c | [
"BSD-3-Clause"
] | null | null | null | #**
# * The Decentralized App (DApp):
# * This is where the App developer writes the decentralized app.
# * Make sure the code is written within the specified space region.
# *
# * IMPORTANT:
# * 1. Developer DApp CODE MUST BE WRITTEN WITHIN SPECIFIED SPACE REGION.
# * 2. DApp MUST return values through the 'results' variable.
# * 3. DApp MUST RETURN A JSON Object.
# * 4. DApp data crunching should not exceed 100MB of Data per peer task.
# * 5. If you change the name of 'results', make sure to change it at DApp's 'return results' code.
# *
# *
import sys, json
results = {} # Storage for successful results.
json_str = input() # Capture data input
params = json.loads(json_str) # Load parameters values (params) to process
#*********************************************************************************/
# /* START WRITING YOUR DAPP CODE BEGINNING HERE: */
#*********************************************************************************/
# EXAMPLE:
# Estimating the quality of images in a file directory (We'll consider image quality as calculating the area of an image divided by its size).
# Import necessary DApp resources, scripts, assets and modules needed for the task.
import numpy as np
import cv2
import os
import base64
# Variable to store image quality
imageQuality = {'imageQuality':0}
fileName = params['uParams'][0]['parameter2'] # Capture name of file
fileData = base64.b64decode(params['uParams'][0]['parameter1']) # Capture file
# Parse image file to Numpy array
img_buffer = np.frombuffer(fileData, dtype=np.uint8)
im = cv2.imdecode(img_buffer, flags=1)
# Save file to local directory
try:
cv2.imwrite(os.path.join('app/assets/media/', f'{fileName}'), im)
cv2.waitKey(0)
except:
print('Problem saving file!')
try:
img = cv2.imread(f'app/assets/media/{fileName}', cv2.IMREAD_UNCHANGED) # Load file to OpenCV
except:
print('Error processing file!')
# get dimensions of image
dimensions = img.shape
# height, width, number of channels in image
height = img.shape[0]
width = img.shape[1]
size = os.path.getsize(f'app/assets/media/{fileName}')
# We'll consider image quality as calculating the area of an image divided by its size
imageQuality['imageQuality']=(size/(height*width))
# Return results of processing
results=imageQuality
#*********************************************************************************/
# /* STOP WRITING YOUR DAPP CODE UP UNTIL HERE.*/
#*********************************************************************************/
# Results must return valid JSON Object
print(results)
sys.stdout.flush()
| 33.443038 | 142 | 0.623391 | 335 | 2,642 | 4.901493 | 0.450746 | 0.014616 | 0.025579 | 0.023143 | 0.112058 | 0.084044 | 0.084044 | 0.084044 | 0.084044 | 0.084044 | 0 | 0.013538 | 0.161241 | 2,642 | 78 | 143 | 33.871795 | 0.727437 | 0.641559 | 0 | 0.133333 | 0 | 0 | 0.198901 | 0.059341 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70811820b7b88435ba6e11176b752f15dc282feb | 1,142 | py | Python | tests/conftest.py | jmolmo/managed-tenants-cli | fb3dd79f6629884577aa7333fdfe8d78802a79d4 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | jmolmo/managed-tenants-cli | fb3dd79f6629884577aa7333fdfe8d78802a79d4 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | jmolmo/managed-tenants-cli | fb3dd79f6629884577aa7333fdfe8d78802a79d4 | [
"Apache-2.0"
] | 1 | 2021-09-02T10:11:52.000Z | 2021-09-02T10:11:52.000Z | # Configure different hypothesis profiles
import os
from hypothesis import HealthCheck, Phase, settings
FAST_PROFILE = "fast"
CI_PROFILE = "ci"
# 'fast' profile for local development
settings.register_profile(
FAST_PROFILE,
# Set to true for test reproducibility
# https://hypothesis.readthedocs.io/en/latest/settings.html#hypothesis.settings.derandomize
derandomize=False,
max_examples=3,
# https://hypothesis.readthedocs.io/en/latest/settings.html#controlling-what-runs
phases=[Phase.generate, Phase.explain],
# (sblaisdo) fails `HealthCheck.too_slow` with initial schema/addon loading
suppress_health_check=[HealthCheck.too_slow],
# (sblaisdo) default deadline of 200ms is exceeded in some cases
deadline=None,
)
# 'ci' profile for pr_check.sh
settings.register_profile(
CI_PROFILE,
derandomize=False,
max_examples=5,
phases=[Phase.generate, Phase.explain],
suppress_health_check=[HealthCheck.too_slow],
deadline=None,
)
# Load profile
p = CI_PROFILE if os.getenv("CI") == "true" else FAST_PROFILE
print(f"Loading hypothesis profile: {p}")
settings.load_profile(p)
| 29.282051 | 95 | 0.748687 | 146 | 1,142 | 5.726027 | 0.486301 | 0.052632 | 0.064593 | 0.066986 | 0.277512 | 0.203349 | 0.114833 | 0.114833 | 0 | 0 | 0 | 0.005139 | 0.147986 | 1,142 | 38 | 96 | 30.052632 | 0.85406 | 0.402802 | 0 | 0.434783 | 0 | 0 | 0.064179 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70824a33f97bb12dc07fe121ae5ed43e23d5b01c | 2,994 | py | Python | src/python/exsim3.py | akafael/unb-controle-digital | 4c4915eb1c4d070886284c0f79ce3ee26ece8695 | [
"MIT"
] | null | null | null | src/python/exsim3.py | akafael/unb-controle-digital | 4c4915eb1c4d070886284c0f79ce3ee26ece8695 | [
"MIT"
] | null | null | null | src/python/exsim3.py | akafael/unb-controle-digital | 4c4915eb1c4d070886284c0f79ce3ee26ece8695 | [
"MIT"
] | null | null | null | """
Laboratory Experiment 3 - Script
- Rootlocus project
@author Rafael Lima
"""
from sympy import *
def simplifyFraction(G,s):
"""
Expand numerator and denominator from given fraction
"""
num,den = fraction(G.expand().simplify())
num = Poly(num,s)
den = Poly(den,s)
return (num/den)
def partfrac(G,s):
"""
Split Fraction into several factors using residues theorem
"""
# Find Poles
poles = solve(sympy.fraction(G.expand().simplify())[1],s)
# Find Resudues
Gp = 0
for p in poles:
Gp = Gp + (G*(s-p)).subs(s,p)/(s-p)
return Gp
def roundExpr(expr, num_digits=4):
"""
Round Every Number in an expression
"""
return expr.xreplace({n : round(n, num_digits) for n in expr.atoms(Number)})
printing.printer.Printer().set_global_settings(precision=3)
# Symbols
s = symbols("s",complex=True)
z = symbols("z",complex=True)
K,a1,a0,T,b = symbols("K alpha_1 alpha_0 T beta",real=True)
# Constants
na0 = 1
nT = 0.5
# Open Loop Transfer Function
sGo = 1/(s+na0)
# Z Transform (From table)
sGz = (1/na0)*(1-exp(-T))/(z-exp(-T))
# Controler TF
sGc = K*z/(z-1)
sGma = simplify(expand(sGc*sGz))
sGmf = simplify(expand(sGma/(1+sGma)))
# Characterist Equation
_,poly = fraction(sGmf)
# Find Critical Value for K
sK = solve(poly,K)[0]
Kmax = sK.subs([(T,nT),(z,-1)])
poles2 = solve(poly.subs([(T,nT),(K,2)]),z)
# Part 2
# Constants
na1 = 2
Ts = 0.2
# Open Loop Transfer Function
sGo2 = 1/(s+2)
# TODO find Z Transform (From table)
# BUG: not matching ZOH discretization from matlab
sGz2 = (z-1)*(1/(4*(z-exp(-2*T))) - 1/(4*(z-1)) + T*(1/((z-1)*(z-1))))
# Controler TF
sGc2 = K*(z-exp(-na1*T))/(z-b)
sGma2 = simplifyFraction(sGc2*sGz2,z)
sGmf2 = simplify(expand(sGma2/(1+sGma2)))
# Expression from matlab
mGz2 = ((2533546664982251*z)/144115188075855872 + 554410548014771/36028797018963968)/(z**2 - (3761226368457787*z)/2251799813685248 + 6037706219090157/9007199254740992)
# Controler TF from Matlab
mGc2 = K*(z-exp(-na1*T))/(z-b)
mGma2 = simplifyFraction(mGc2*mGz2,z)
mGmf2 = simplify(expand(mGma2/(1+mGma2)))
# Characterist Equation
_,poly2 = fraction(sGmf2)
# Request Conditions
desiredDamping = 0.5
desiredSettlingTime = 2
desiredOvershoot = exp(-desiredDamping*pi/sqrt(1-desiredDamping**2))
desiredPoles = [0,0]
desiredPoles[0] = -(4/desiredSettlingTime)*(1 + I*sqrt(1-desiredDamping**2)/desiredDamping)
desiredPoles[1] = -(4/desiredSettlingTime)*(1 - I*sqrt(1-desiredDamping**2)/desiredDamping)
desiredPolesZ = [exp(desiredPoles[0]*Ts),exp(desiredPoles[1]*Ts)]
# Solve Linear System to find K and b
sysKb = [K,b]
sysKb[0] = poly2.subs([(z,desiredPolesZ[0]),(T,Ts)]).evalf().collect(K).collect(b)
sysKb[1] = poly2.subs([(z,desiredPolesZ[1]),(T,Ts)]).evalf().collect(K).collect(b)
resp = list(linsolve(sysKb,(K,b)))[0]
nK = resp[0]
nb = resp[1]
# Find TF
nGmf2 = sGmf2.subs([(K,nK),(b,nb),(T,Ts)])
# Find Critical Value for K
sK2 = solve(poly2,K)[0]
#Kmax = sK.subs([(T,nT),(z,-1)])
| 23.761905 | 167 | 0.660655 | 461 | 2,994 | 4.273319 | 0.331887 | 0.007107 | 0.01066 | 0.030457 | 0.130964 | 0.109645 | 0.109645 | 0.074112 | 0.074112 | 0 | 0 | 0.090229 | 0.152305 | 2,994 | 125 | 168 | 23.952 | 0.685973 | 0.238143 | 0 | 0 | 0 | 0 | 0.011797 | 0 | 0 | 0 | 0 | 0.008 | 0 | 1 | 0.053571 | false | 0 | 0.017857 | 0 | 0.125 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
708413de75cff9f09c32fc7eec77271bf88e6168 | 2,698 | py | Python | model/decoder.py | kefirski/hybrid_rvae | 39133e656eeb05c998422e5ad9bfadc913c81b44 | [
"MIT"
] | 23 | 2017-10-24T01:30:07.000Z | 2021-11-15T04:14:02.000Z | model/decoder.py | analvikingur/hybrid_rvae | 39133e656eeb05c998422e5ad9bfadc913c81b44 | [
"MIT"
] | 1 | 2017-08-20T00:34:23.000Z | 2017-08-21T08:03:30.000Z | model/decoder.py | analvikingur/hybrid_rvae | 39133e656eeb05c998422e5ad9bfadc913c81b44 | [
"MIT"
] | 13 | 2017-08-22T15:35:00.000Z | 2021-11-19T01:24:33.000Z | import torch as t
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
def __init__(self, vocab_size, latent_variable_size, rnn_size, rnn_num_layers, embed_size):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.latent_variable_size = latent_variable_size
self.rnn_size = rnn_size
self.embed_size = embed_size
self.rnn_num_layers = rnn_num_layers
self.cnn = nn.Sequential(
nn.ConvTranspose1d(self.latent_variable_size, 512, 4, 2, 0),
nn.BatchNorm1d(512),
nn.ELU(),
nn.ConvTranspose1d(512, 512, 4, 2, 0, output_padding=1),
nn.BatchNorm1d(512),
nn.ELU(),
nn.ConvTranspose1d(512, 256, 4, 2, 0),
nn.BatchNorm1d(256),
nn.ELU(),
nn.ConvTranspose1d(256, 256, 4, 2, 0, output_padding=1),
nn.BatchNorm1d(256),
nn.ELU(),
nn.ConvTranspose1d(256, 128, 4, 2, 0),
nn.BatchNorm1d(128),
nn.ELU(),
nn.ConvTranspose1d(128, self.vocab_size, 4, 2, 0)
)
self.rnn = nn.GRU(input_size=self.vocab_size + self.embed_size,
hidden_size=self.rnn_size,
num_layers=self.rnn_num_layers,
batch_first=True)
self.hidden_to_vocab = nn.Linear(self.rnn_size, self.vocab_size)
def forward(self, latent_variable, decoder_input):
"""
:param latent_variable: An float tensor with shape of [batch_size, latent_variable_size]
:param decoder_input: An float tensot with shape of [batch_size, max_seq_len, embed_size]
:return: two tensors with shape of [batch_size, max_seq_len, vocab_size]
for estimating likelihood for whole model and for auxiliary target respectively
"""
aux_logits = self.conv_decoder(latent_variable)
logits, _ = self.rnn_decoder(aux_logits, decoder_input, initial_state=None)
return logits, aux_logits
def conv_decoder(self, latent_variable):
latent_variable = latent_variable.unsqueeze(2)
out = self.cnn(latent_variable)
return t.transpose(out, 1, 2).contiguous()
def rnn_decoder(self, cnn_out, decoder_input, initial_state=None):
logits, final_state = self.rnn(t.cat([cnn_out, decoder_input], 2), initial_state)
[batch_size, seq_len, _] = logits.size()
logits = logits.contiguous().view(-1, self.rnn_size)
logits = self.hidden_to_vocab(logits)
logits = logits.view(batch_size, seq_len, self.vocab_size)
return logits, final_state
| 34.589744 | 99 | 0.626019 | 353 | 2,698 | 4.518414 | 0.229462 | 0.105329 | 0.048903 | 0.068966 | 0.221944 | 0.160502 | 0.160502 | 0.160502 | 0 | 0 | 0 | 0.041453 | 0.27576 | 2,698 | 77 | 100 | 35.038961 | 0.774821 | 0.127131 | 0 | 0.183673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.061224 | 0 | 0.22449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7084652e3d8514cf5a87012b67dbfa4aee0e8d9d | 15,329 | py | Python | web/olga/analytics/models.py | raccoongang/acceptor | fdc1504912b502c8d789d5478eba8cc1a491934b | [
"Apache-2.0"
] | 5 | 2017-10-20T05:52:59.000Z | 2020-02-25T10:46:33.000Z | web/olga/analytics/models.py | raccoongang/OLGA | fdc1504912b502c8d789d5478eba8cc1a491934b | [
"Apache-2.0"
] | 233 | 2017-08-14T10:56:16.000Z | 2021-04-07T01:09:17.000Z | web/olga/analytics/models.py | raccoongang/acceptor | fdc1504912b502c8d789d5478eba8cc1a491934b | [
"Apache-2.0"
] | 2 | 2018-03-16T22:22:57.000Z | 2018-06-15T20:02:56.000Z | """
Models for analytics application. Models used to store and operate all data received from the edx platform.
"""
from __future__ import division
from datetime import date, timedelta
import operator
import pycountry
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.db.models import Sum, Count, DateField
from django.db.models.expressions import F, Func, Value
from django.db.models.functions import Trunc
def get_last_calendar_day():
"""
Get accurate start and end dates, that create segment between them equal to a full last calendar day.
Returns:
start_of_day (date): Previous day's start. Example for 2017-05-15 is 2017-05-15.
end_of_day (date): Previous day's end, it's a next day (tomorrow) toward day's start,
that doesn't count in segment. Example for 2017-05-15 is 2017-05-16.
"""
end_of_day = date.today()
start_of_day = end_of_day - timedelta(days=1)
return start_of_day, end_of_day
class EdxInstallation(models.Model):
"""
Model that stores overall data received from the edx-platform.
"""
access_token = models.UUIDField(null=True)
platform_name = models.CharField(max_length=255, null=True, blank=True)
platform_url = models.URLField(null=True, blank=True)
uid = models.CharField(null=True, max_length=32)
latitude = models.FloatField(
null=True, blank=True, help_text='Latitude coordinate of edX platform follows `float` type. Example: 50.10'
)
longitude = models.FloatField(
null=True, blank=True, help_text='Longitude coordinate of edX platform follows `float` type. Example: 40.05'
)
class InstallationStatistics(models.Model):
"""
Model that stores statistics data received from the edx-platform.
"""
active_students_amount_day = models.IntegerField(default=0)
active_students_amount_week = models.IntegerField(default=0)
active_students_amount_month = models.IntegerField(default=0)
registered_students = models.IntegerField(default=0)
enthusiastic_students = models.IntegerField(default=0)
generated_certificates = models.IntegerField(default=0)
courses_amount = models.IntegerField(default=0)
data_created_datetime = models.DateTimeField()
edx_installation = models.ForeignKey(EdxInstallation, on_delete=models.CASCADE)
statistics_level = models.CharField(
choices=(
('enthusiast', 'enthusiast'),
('paranoid', 'paranoid'),
),
max_length=255,
default='paranoid'
)
students_per_country = JSONField(
default=dict,
blank=True,
null=True,
help_text='This field has students country-count accordance. It follows `json` type. '
'Example: {"RU": 2632, "CA": 18543, "UA": 2011, "null": 1}'
)
unspecified_country_name = 'Country is not specified'
@staticmethod
def get_statistics_top_country(tabular_countries_list):
"""
Get first country from tabular format country list.
List is sorted, first country is a top active students rank country.
:param tabular_countries_list: list of the two elements tuples
:return: top country name as a string
"""
if not tabular_countries_list:
return ''
return tabular_countries_list[0][0]
@classmethod
def get_stats_for_the_date(cls, statistics_date, edx_installation_object=None):
"""
Provide statistic model instance for the given Edx installation.
:param edx_installation_object: specific installation object.
:return: statistic model instance if it is created at the specified day otherwise None
"""
stat_item = cls.objects.filter(
edx_installation=edx_installation_object,
data_created_datetime__gte=statistics_date,
data_created_datetime__lt=(statistics_date + timedelta(days=1))
).last()
return stat_item
@classmethod
def timeline(cls):
"""
Provide timeline in days for plotting on x axis.
"""
timeline_datetimes = cls.objects.order_by(
'data_created_datetime'
).values_list('data_created_datetime', flat=True).distinct()
timeline_dates = [x.date().strftime('%Y-%m-%d') for x in timeline_datetimes]
# Support case, when data are sent more often, for example when testing every 15 seconds.
# Then filter unique and sort back, because timeline should be ordered.
timeline_dates = sorted(set(timeline_dates))
return timeline_dates
@classmethod
def data_per_period(cls):
"""
Provide total students, courses and instances, from all services per period, day by default.
We summarize values per day, because in same day we can receive data from multiple different instances.
We suppose, that every instance send data only once per day.
"""
subquery = cls.objects.annotate(
date_in_days=Trunc('data_created_datetime', 'day', output_field=DateField())
).values('date_in_days').order_by('date_in_days')
students_per_day = subquery.annotate(
students=Sum('active_students_amount_day')
).values_list('students', flat=True)
courses_per_day = subquery.annotate(courses=Sum('courses_amount')).values_list('courses', flat=True)
instances_per_day = subquery.annotate(
instances=Count('edx_installation__access_token')
).values_list('instances', flat=True)
return list(students_per_day), list(courses_per_day), list(instances_per_day)
@classmethod
def overall_counts(cls):
"""
Provide total count of all instances, courses and students from all instances per previous calendar day.
Returns overall counts as dict.
{
"instances_count": <int:instances_count>,
"courses_count": <int:courses_count>,
"students_count": <int:students_count>,
"generated_certificates_count": <int:generated_certificates_count>,
}
"""
start_of_day, end_of_day = get_last_calendar_day()
all_unique_instances = cls.objects.filter(
data_created_datetime__gte=start_of_day, data_created_datetime__lt=end_of_day
)
instances_count = all_unique_instances.count()
courses_count = all_unique_instances.aggregate(
Sum('courses_amount')
)['courses_amount__sum']
students_count = all_unique_instances.aggregate(
Sum('active_students_amount_day')
)['active_students_amount_day__sum']
generated_certificates_count = all_unique_instances.aggregate(
Sum('generated_certificates')
)['generated_certificates__sum']
registered_students_count = all_unique_instances.aggregate(
Sum('registered_students')
)['registered_students__sum']
return {
"instances_count": instances_count or 0,
"courses_count": courses_count or 0,
"students_count": students_count or 0,
"generated_certificates_count": generated_certificates_count or 0,
"registered_students_count": registered_students_count or 0,
}
@classmethod
def get_charts_data(cls):
"""
Provide data about certificates and users for chart.
:return: dict
{
"19-01-28": [0, 1, 0],
"19-01-22": [0, 7, 0],
"19-01-31": [0, 0, 0],
}
"""
statistics = cls.objects.all()
charts = dict()
for item in statistics:
charts[item.data_created_datetime.strftime('%y-%m-%d')] = [
item.registered_students,
item.generated_certificates,
item.enthusiastic_students
]
return charts
@classmethod
def get_students_per_country_stats(cls):
"""
Total of students amount per country to display on world map from all instances per month.
Returns:
world_students_per_country (dict): Country-count accordance as pair of key-value.
"""
# Get list of instances's students per country data as unicode strings.
queryset = cls.objects.annotate(
month_verbose=Func(
F('data_created_datetime'), Value('TMMonth YYYY'), function='to_char'
),
month_ordering=Func(
F('data_created_datetime'), Value('YYYY-MM'), function='to_char'
),
)
result_rows = queryset.values_list(
'month_ordering', 'month_verbose', 'students_per_country'
)
return cls.aggregate_countries_by_months(result_rows)
@classmethod
def aggregate_countries_by_months(cls, values_list):
"""
Aggregate all the months and countries data by the month.
:param values_list: list queryset result with three elements for every row
:return: dictionary of months with the student countries statistics
"""
months = {}
for month_ordering, month_verbose, countries in values_list:
cls.add_month_countries_data(
month_ordering, month_verbose, countries, months
)
return months
@classmethod
def add_month_countries_data(
cls, month_ordering, month_verbose, countries, months
):
"""
Add a month data to the months dictionary.
:param month_ordering: sortable date key represented as a string
:param month_verbose: human friendly date represented as a string
:param countries: dictionary of countries where the key is the country code and
the value is the amount of the students
:param months: dictionary that needs to be updated by the data, passed to the method
"""
if month_ordering not in months:
months[month_ordering] = {
'countries': countries,
'label': month_verbose,
}
return
cls.add_up_new_month_data(months[month_ordering]['countries'], countries)
@classmethod
def add_up_new_month_data(cls, existing_data, new_data):
"""
Add a new month data to the resulting data dictionary.
Adds the counts from the new countries data dictionary to the existing ones or adds
new countries if the don't exist in the existing_data
"""
for existent_key in existing_data.keys():
existing_data[existent_key] += new_data.pop(existent_key, 0)
existing_data.update(new_data)
@classmethod
def create_students_per_country(cls, worlds_students_per_country):
"""
Create convenient and necessary data formats to render it from view.
Graphs require list-format data.
"""
datamap_format_countries_list = []
tabular_format_countries_map = {}
if not worlds_students_per_country:
tabular_format_countries_map[cls.unspecified_country_name] = [0, 0]
return datamap_format_countries_list, list(tabular_format_countries_map.items())
all_active_students = sum(worlds_students_per_country.values())
for country, count in worlds_students_per_country.items():
student_amount_percentage = cls.get_student_amount_percentage(count, all_active_students)
try:
country_info = pycountry.countries.get(alpha_2=country)
country_alpha_3 = country_info.alpha_3
datamap_format_countries_list += [[country_alpha_3, count]]
country_name = country_info.name
except KeyError:
# Create students without country amount.
country_name = cls.unspecified_country_name
if country_name in tabular_format_countries_map:
tabular_format_countries_map[country_name] = list(map(
operator.add,
tabular_format_countries_map[country_name],
[count, student_amount_percentage]
))
else:
tabular_format_countries_map[country_name] = [count, student_amount_percentage]
# Pop out the unspecified country
unspecified_country_values = tabular_format_countries_map.pop(cls.unspecified_country_name, None)
# Sort in descending order.
tabular_format_countries_list = sorted(
tabular_format_countries_map.items(),
key=lambda x: x[1][0],
reverse=True
)
if unspecified_country_values:
tabular_format_countries_list.append(
(cls.unspecified_country_name, unspecified_country_values)
)
return datamap_format_countries_list, tabular_format_countries_list
@classmethod
def get_students_per_country(cls):
"""
Gather convenient and necessary data formats to render it from view.
"""
months = cls.get_students_per_country_stats()
for month in months.values():
datamap_list, tabular_list = cls.create_students_per_country(month['countries'])
month['datamap_countries_list'] = datamap_list
month['tabular_countries_list'] = tabular_list
month['top_country'] = cls.get_statistics_top_country(tabular_list)
month['countries_amount'] = (
len(month['countries']) - (cls.unspecified_country_name in month['countries'])
)
return months
@staticmethod
def get_student_amount_percentage(country_count_in_statistics, all_active_students):
"""
Calculate student amount percentage based on total countries amount and particular county amount comparison.
"""
if all_active_students == 0:
return 0
students_amount_percentage = int(country_count_in_statistics / all_active_students * 100)
return students_amount_percentage
@classmethod
def get_students_countries_amount(cls, months):
"""
Provide countries amount from students per country statistics as table.
Calculate countries amount in world students per country statistics (from tabular countries list).
Tabular format countries list can be empty - countries amount is zero.
Tabular format countries list can be not empty - it contains particular country-count accordance
and `Country is not specified` field, that has students without country amount.
Actually `Country is not specified` field is not a country, so it does not fill up in countries amount.
"""
countries_amount = 0
for month in months.values():
countries = dict(month['tabular_countries_list'])
countries.pop(cls.unspecified_country_name, None)
countries_amount += len(countries)
return countries_amount
def update(self, stats):
"""
Update model from given dictionary and save it.
:param stats: dictionary with new data.
"""
for (key, value) in stats.items():
setattr(self, key, value)
self.save()
| 37.296837 | 116 | 0.659339 | 1,801 | 15,329 | 5.360911 | 0.189895 | 0.027965 | 0.027965 | 0.023304 | 0.227861 | 0.160228 | 0.084102 | 0.047229 | 0.023822 | 0.023822 | 0 | 0.010819 | 0.264401 | 15,329 | 410 | 117 | 37.387805 | 0.845424 | 0.26414 | 0 | 0.093333 | 0 | 0.004444 | 0.10287 | 0.038837 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071111 | false | 0 | 0.04 | 0 | 0.275556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
708475a8fdb41ed7fcd4a6f028a2dcd0edaa89ad | 20,560 | py | Python | pypowervm/tests/tasks/test_cna.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 24 | 2015-12-02T19:49:45.000Z | 2021-11-17T11:43:51.000Z | pypowervm/tests/tasks/test_cna.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 18 | 2017-03-01T05:54:25.000Z | 2022-03-14T17:32:47.000Z | pypowervm/tests/tasks/test_cna.py | stephenfin/pypowervm | 68f2b586b4f17489f379534ab52fc56a524b6da5 | [
"Apache-2.0"
] | 17 | 2016-02-10T22:53:04.000Z | 2021-11-10T09:47:10.000Z | # Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from pypowervm import adapter as adp
from pypowervm import exceptions as exc
from pypowervm.tasks import cna
from pypowervm.tests import test_fixtures as fx
from pypowervm.tests.test_utils import test_wrapper_abc as twrap
from pypowervm.wrappers import entry_wrapper as ewrap
from pypowervm.wrappers import logical_partition as pvm_lpar
from pypowervm.wrappers import network as pvm_net
VSWITCH_FILE = 'fake_vswitch_feed.txt'
VNET_FILE = 'fake_virtual_network_feed.txt'
class TestCNA(twrap.TestWrapper):
"""Unit Tests for creating Client Network Adapters."""
mock_adapter_fx_args = {'traits': fx.RemoteHMCTraits}
file = VSWITCH_FILE
wrapper_class_to_test = pvm_net.VSwitch
@mock.patch('pypowervm.tasks.cna._find_or_create_vnet')
def test_crt_cna(self, mock_vnet_find):
"""Tests the creation of Client Network Adapters."""
# Create a side effect that can validate the input into the create
# call.
def validate_of_create(*kargs, **kwargs):
self.assertIsNotNone(kargs[0])
self.assertEqual('LogicalPartition', kargs[1])
self.assertEqual('fake_lpar', kwargs.get('root_id'))
self.assertEqual('ClientNetworkAdapter', kwargs.get('child_type'))
return pvm_net.CNA.bld(self.adpt, 1, 'href').entry
self.adpt.create.side_effect = validate_of_create
self.adpt.read.return_value = self.resp
n_cna = cna.crt_cna(self.adpt, None, 'fake_lpar', 5)
self.assertIsNotNone(n_cna)
self.assertIsInstance(n_cna, pvm_net.CNA)
self.assertEqual(1, mock_vnet_find.call_count)
@mock.patch('pypowervm.tasks.cna._find_or_create_vnet')
def test_crt_cna_no_vnet_crt(self, mock_vnet_find):
"""Tests the creation of Client Network Adapters.
The virtual network creation shouldn't be done in this flow.
"""
# PVMish Traits
self.adptfx.set_traits(fx.LocalPVMTraits)
self.adpt.read.return_value = self.resp
# Create a side effect that can validate the input into the create
# call.
def validate_of_create(*kargs, **kwargs):
self.assertIsNotNone(kargs[0])
self.assertEqual('LogicalPartition', kargs[1])
self.assertEqual('fake_lpar', kwargs.get('root_id'))
self.assertEqual('ClientNetworkAdapter', kwargs.get('child_type'))
return pvm_net.CNA.bld(self.adpt, 1, 'href').entry
self.adpt.create.side_effect = validate_of_create
n_cna = cna.crt_cna(self.adpt, None, 'fake_lpar', 5, slot_num=1)
self.assertIsNotNone(n_cna)
self.assertIsInstance(n_cna, pvm_net.CNA)
self.assertEqual(0, mock_vnet_find.call_count)
def test_find_or_create_vswitch(self):
"""Validates that a vswitch can be created."""
self.adpt.read.return_value = self.resp
# Test that it finds the right vSwitch
vswitch_w = cna._find_or_create_vswitch(self.adpt, 'ETHERNET0', True)
self.assertIsNotNone(vswitch_w)
# Create a side effect that can validate the input into the create call
def validate_of_create(*kargs, **kwargs):
self.assertIsNotNone(kargs[0])
# Is the vSwitch create
self.assertEqual('ManagedSystem', kargs[1])
self.assertEqual('VirtualSwitch', kwargs.get('child_type'))
# Return a previously created vSwitch...
return self.dwrap.entry
self.adpt.create.side_effect = validate_of_create
# Test the create
vswitch_w = cna._find_or_create_vswitch(self.adpt, 'Temp', True)
self.assertIsNotNone(vswitch_w)
self.assertTrue(self.adpt.create.called)
# Make sure that if the create flag is set to false, an error is thrown
# when the vswitch can't be found.
self.assertRaises(exc.Error, cna._find_or_create_vswitch, self.adpt,
'Temp', False)
class TestVNET(twrap.TestWrapper):
mock_adapter_fx_args = {'traits': fx.RemoteHMCTraits}
file = VNET_FILE
wrapper_class_to_test = pvm_net.VNet
def test_find_or_create_vnet(self):
"""Tests that the virtual network can be found/created."""
self.adpt.read.return_value = self.resp
fake_vs = mock.Mock()
fake_vs.switch_id = 0
fake_vs.name = 'ETHERNET0'
fake_vs.related_href = ('https://9.1.2.3:12443/rest/api/uom/'
'ManagedSystem/'
'67dca605-3923-34da-bd8f-26a378fc817f/'
'VirtualSwitch/'
'ec8aaa54-9837-3c23-a541-a4e4be3ae489')
# This should find a vnet.
vnet_resp = cna._find_or_create_vnet(self.adpt, '2227', fake_vs)
self.assertIsNotNone(vnet_resp)
# Now flip to a CNA that requires a create...
resp = adp.Response('reqmethod', 'reqpath', 'status', 'reason', {})
resp.entry = ewrap.EntryWrapper._bld(
self.adpt, tag='VirtualNetwork').entry
self.adpt.create.return_value = resp
vnet_resp = cna._find_or_create_vnet(self.adpt, '2228', fake_vs)
self.assertIsNotNone(vnet_resp)
self.assertEqual(1, self.adpt.create.call_count)
def test_find_free_vlan(self):
"""Tests that a free VLAN can be found."""
self.adpt.read.return_value = self.resp
# Mock data specific to the VNET File
fake_vs = mock.Mock()
fake_vs.name = 'ETHERNET0'
fake_vs.related_href = ('https://9.1.2.3:12443/rest/api/uom/'
'ManagedSystem/'
'67dca605-3923-34da-bd8f-26a378fc817f/'
'VirtualSwitch/'
'ec8aaa54-9837-3c23-a541-a4e4be3ae489')
self.assertEqual(1, cna._find_free_vlan(self.adpt, fake_vs))
@mock.patch('pypowervm.wrappers.network.VNet.wrap')
def test_find_free_vlan_mocked(self, mock_vnet_wrap):
"""Uses lots of mock data for a find vlan."""
self.adpt.read.return_value = mock.Mock()
# Helper function to build the vnets.
def build_mock_vnets(max_vlan, vswitch_uri):
vnets = []
for x in range(1, max_vlan + 1):
vnets.append(mock.Mock(vlan=x,
associated_switch_uri=vswitch_uri))
return vnets
mock_vswitch = mock.Mock(related_href='test_vs')
# Test when all the vnet's are on a single switch.
mock_vnet_wrap.return_value = build_mock_vnets(3000, 'test_vs')
self.assertEqual(3001, cna._find_free_vlan(self.adpt, mock_vswitch))
# Test with multiple switches. The second vswitch with a higher vlan
# should not impact the vswitch we're searching for.
mock_vnet_wrap.return_value = (build_mock_vnets(2000, 'test_vs') +
build_mock_vnets(4000, 'test_vs2'))
self.assertEqual(2001, cna._find_free_vlan(self.adpt, mock_vswitch))
# Test when all the VLANs are consumed
mock_vnet_wrap.return_value = build_mock_vnets(4094, 'test_vs')
self.assertRaises(exc.Error, cna._find_free_vlan, self.adpt,
mock_vswitch)
@mock.patch('pypowervm.tasks.cna._find_free_vlan')
def test_assign_free_vlan(self, mock_find_vlan):
mock_find_vlan.return_value = 2016
mocked = mock.MagicMock()
mock_cna = mock.MagicMock(pvid=31, enabled=False)
mock_cna.update.return_value = mock_cna
updated_cna = cna.assign_free_vlan(mocked, mocked, mocked, mock_cna)
self.assertEqual(2016, updated_cna.pvid)
self.assertEqual(mock_cna.enabled, updated_cna.enabled)
updated_cna = cna.assign_free_vlan(mocked, mocked, mocked, mock_cna,
ensure_enabled=True)
self.assertEqual(True, updated_cna.enabled)
@mock.patch('pypowervm.wrappers.network.CNA.bld')
@mock.patch('pypowervm.tasks.cna._find_free_vlan')
@mock.patch('pypowervm.tasks.cna._find_or_create_vswitch')
@mock.patch('pypowervm.tasks.partition.get_partitions')
def test_crt_p2p_cna(
self, mock_get_partitions, mock_find_or_create_vswitch,
mock_find_free_vlan, mock_cna_bld):
"""Tests the crt_p2p_cna."""
# Mock out the data
mock_vswitch = mock.Mock(related_href='vswitch_href')
mock_find_or_create_vswitch.return_value = mock_vswitch
mock_find_free_vlan.return_value = 2050
# Mock the get of the VIOSes
mock_vio1 = mock.Mock(uuid='src_io_host_uuid')
mock_vio2 = mock.Mock(uuid='vios_uuid2')
mock_get_partitions.return_value = [mock_vio1, mock_vio2]
mock_cna = mock.MagicMock()
mock_trunk1, mock_trunk2 = mock.MagicMock(pvid=2050), mock.MagicMock()
mock_trunk1.create.return_value = mock_trunk1
mock_cna_bld.side_effect = [mock_trunk1, mock_trunk2, mock_cna]
# Invoke the create
mock_ext_ids = {'test': 'value', 'test2': 'value2'}
client_adpt, trunk_adpts = cna.crt_p2p_cna(
self.adpt, None, 'lpar_uuid',
['src_io_host_uuid', 'vios_uuid2'], mock_vswitch, crt_vswitch=True,
slot_num=1, mac_addr='aabbccddeeff', ovs_bridge='br-ex',
ovs_ext_ids=mock_ext_ids, configured_mtu=1450)
# Make sure the client and trunk were 'built'
mock_cna_bld.assert_any_call(self.adpt, 2050, 'vswitch_href',
slot_num=1, mac_addr='aabbccddeeff')
mock_cna_bld.assert_any_call(
self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name=None,
ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450)
mock_cna_bld.assert_any_call(
self.adpt, 2050, 'vswitch_href', trunk_pri=2, dev_name=None,
ovs_bridge='br-ex', ovs_ext_ids=mock_ext_ids, configured_mtu=1450)
# Make sure they were then created
self.assertIsNotNone(client_adpt)
self.assertEqual(2, len(trunk_adpts))
mock_cna.create.assert_called_once_with(
parent_type=pvm_lpar.LPAR, parent_uuid='lpar_uuid')
mock_trunk1.create.assert_called_once_with(parent=mock_vio1)
mock_trunk2.create.assert_called_once_with(parent=mock_vio2)
@mock.patch('pypowervm.wrappers.network.CNA.bld')
@mock.patch('pypowervm.tasks.cna._find_free_vlan')
@mock.patch('pypowervm.tasks.cna._find_or_create_vswitch')
@mock.patch('pypowervm.tasks.partition.get_partitions')
def test_crt_p2p_cna_single(
self, mock_get_partitions, mock_find_or_create_vswitch,
mock_find_free_vlan, mock_cna_bld):
"""Tests the crt_p2p_cna with the mgmt lpar and a dev_name."""
# Mock out the data
mock_vswitch = mock.Mock(related_href='vswitch_href')
mock_find_or_create_vswitch.return_value = mock_vswitch
mock_find_free_vlan.return_value = 2050
# Mock the get of the VIOSes
mock_vio1 = mock.Mock(uuid='mgmt_lpar_uuid')
mock_vio2 = mock.Mock(uuid='vios_uuid2')
mock_get_partitions.return_value = [mock_vio1, mock_vio2]
mock_cna = mock.MagicMock()
mock_trunk1 = mock.MagicMock(pvid=2050)
mock_trunk1.create.return_value = mock_trunk1
mock_cna_bld.side_effect = [mock_trunk1, mock_cna]
# Invoke the create
client_adpt, trunk_adpts = cna.crt_p2p_cna(
self.adpt, None, 'lpar_uuid',
['mgmt_lpar_uuid'], mock_vswitch, crt_vswitch=True,
mac_addr='aabbccddeeff', dev_name='tap-12345')
# Make sure the client and trunk were 'built'
mock_cna_bld.assert_any_call(self.adpt, 2050, 'vswitch_href',
mac_addr='aabbccddeeff', slot_num=None)
mock_cna_bld.assert_any_call(
self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name='tap-12345',
ovs_bridge=None, ovs_ext_ids=None, configured_mtu=None)
# Make sure they were then created
self.assertIsNotNone(client_adpt)
self.assertEqual(1, len(trunk_adpts))
mock_cna.create.assert_called_once_with(
parent_type=pvm_lpar.LPAR, parent_uuid='lpar_uuid')
mock_trunk1.create.assert_called_once_with(parent=mock_vio1)
@mock.patch('pypowervm.wrappers.network.CNA.bld')
@mock.patch('pypowervm.tasks.cna._find_free_vlan')
@mock.patch('pypowervm.tasks.cna._find_or_create_vswitch')
@mock.patch('pypowervm.tasks.partition.get_partitions')
def test_crt_trunk_with_free_vlan(
self, mock_get_partitions, mock_find_or_create_vswitch,
mock_find_free_vlan, mock_cna_bld):
"""Tests the crt_trunk_with_free_vlan on mgmt based VIOS."""
# Mock out the data
mock_vswitch = mock.Mock(related_href='vswitch_href')
mock_find_or_create_vswitch.return_value = mock_vswitch
mock_find_free_vlan.return_value = 2050
# Mock the get of the VIOSes.
mock_vio1 = mock.Mock(uuid='vios_uuid1')
mock_get_partitions.return_value = [mock_vio1]
mock_trunk1 = mock.MagicMock(pvid=2050)
mock_trunk1.create.return_value = mock_trunk1
mock_cna_bld.return_value = mock_trunk1
# Invoke the create
mock_ext_id = {'test1': 'value1', 'test2': 'value2'}
trunk_adpts = cna.crt_trunk_with_free_vlan(
self.adpt, None, ['vios_uuid1'],
mock_vswitch, crt_vswitch=True, dev_name='tap-12345',
ovs_bridge='br-int', ovs_ext_ids=mock_ext_id, configured_mtu=1450)
# Make sure the client and trunk were 'built'
mock_cna_bld.assert_any_call(
self.adpt, 2050, 'vswitch_href', trunk_pri=1, dev_name='tap-12345',
ovs_bridge='br-int', ovs_ext_ids=mock_ext_id, configured_mtu=1450)
# Make sure that the trunk was created
self.assertEqual(1, len(trunk_adpts))
mock_trunk1.create.assert_called_once_with(parent=mock_vio1)
@mock.patch('pypowervm.wrappers.network.CNA.get')
def test_find_trunk_on_lpar(self, mock_cna_get):
parent_wrap = mock.MagicMock()
m1 = mock.Mock(is_trunk=True, pvid=2, vswitch_id=2)
m2 = mock.Mock(is_trunk=False, pvid=3, vswitch_id=2)
m3 = mock.Mock(is_trunk=True, pvid=3, vswitch_id=1)
m4 = mock.Mock(is_trunk=True, pvid=3, vswitch_id=2)
mock_cna_get.return_value = [m1, m2, m3]
self.assertIsNone(cna._find_trunk_on_lpar(self.adpt, parent_wrap, m4))
self.assertTrue(mock_cna_get.called)
mock_cna_get.reset_mock()
mock_cna_get.return_value = [m1, m2, m3, m4]
self.assertEqual(m4, cna._find_trunk_on_lpar(self.adpt, parent_wrap,
m4))
self.assertTrue(mock_cna_get.called)
@mock.patch('pypowervm.tasks.cna._find_trunk_on_lpar')
@mock.patch('pypowervm.tasks.partition.get_mgmt_partition')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
def test_find_trunks(self, mock_vios_get, mock_get_mgmt,
mock_find_trunk):
# Mocked responses can be simple, since they are just fed into the
# _find_trunk_on_lpar
mock_vios_get.return_value = [mock.MagicMock(), mock.MagicMock()]
mock_get_mgmt.return_value = mock.MagicMock()
# The responses back from the find trunk. Make it an odd trunk
# priority ordering to make sure we sort properly
v1 = mock.Mock(trunk_pri=3)
c1, c2 = mock.Mock(trunk_pri=1), mock.Mock(trunk_pri=2)
mock_find_trunk.side_effect = [v1, c1, c2]
# Invoke the method.
resp = cna.find_trunks(self.adpt, mock.Mock(pvid=2))
# Make sure four calls to the find trunk
self.assertEqual(3, mock_find_trunk.call_count)
# Order of the response is important. Should be based off of trunk
# priority
self.assertEqual([c1, c2, v1], resp)
@mock.patch('pypowervm.wrappers.network.CNA.get')
def test_find_all_trunks_on_lpar(self, mock_cna_get):
parent_wrap = mock.MagicMock()
m1 = mock.Mock(is_trunk=True, vswitch_id=2)
m2 = mock.Mock(is_trunk=False, vswitch_id=2)
m3 = mock.Mock(is_trunk=True, vswitch_id=1)
m4 = mock.Mock(is_trunk=True, vswitch_id=2)
mock_cna_get.return_value = [m1, m2, m3, m4]
returnVal = [m1, m3, m4]
self.assertEqual(returnVal, cna._find_all_trunks_on_lpar(self.adpt,
parent_wrap))
mock_cna_get.reset_mock()
mock_cna_get.return_value = [m1, m2, m3, m4]
self.assertEqual([m3],
cna._find_all_trunks_on_lpar(self.adpt,
parent_wrap=parent_wrap,
vswitch_id=1))
@mock.patch('pypowervm.wrappers.network.CNA.get')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.wrappers.logical_partition.LPAR.get')
def test_find_cna_wraps(self, mock_lpar_get, mock_vios_get, mock_cna_get):
# Mocked responses are simple since they are only used for
# pvm_net.CNA.get
mock_lpar_get.return_value = [mock.MagicMock()]
mock_vios_get.return_value = [mock.MagicMock()]
# Mocked cna_wraps
m1 = mock.Mock(uuid=2, pvid=2, vswitch_id=2)
m2 = mock.Mock(uuid=3, pvid=1, vswitch_id=1)
m3 = mock.Mock(uuid=1, pvid=1, vswitch_id=1)
mock_cna_get.side_effect = [[m1, m2], [m3]]
mock_trunk = mock.Mock(adapter=self.adpt, uuid=1, pvid=1, vswitch_id=1)
self.assertEqual([m1, m2, m3], cna._find_cna_wraps(mock_trunk))
mock_cna_get.side_effect = [[m1, m2], [m3]]
self.assertEqual([m2, m3], cna._find_cna_wraps(mock_trunk, 1))
@mock.patch('pypowervm.tasks.cna._find_cna_wraps')
def test_find_cnas_on_trunk(self, mock_find_wraps):
# Mocked cna_wraps
m1 = mock.Mock(uuid=2, pvid=2, vswitch_id=2)
m2 = mock.Mock(uuid=3, pvid=1, vswitch_id=1)
m3 = mock.Mock(uuid=1, pvid=1, vswitch_id=1)
mock_find_wraps.return_value = [m1, m2, m3]
mock_trunk = mock.Mock(adapter=self.adpt, uuid=1, pvid=1, vswitch_id=1)
self.assertEqual([m2], cna.find_cnas_on_trunk(mock_trunk))
mock_find_wraps.return_value = [m1, m3]
self.assertEqual([], cna.find_cnas_on_trunk(mock_trunk))
mock_trunk = mock.Mock(adapter=self.adpt, uuid=3, pvid=3, vswitch_id=3)
self.assertEqual([], cna.find_cnas_on_trunk(mock_trunk))
@mock.patch('pypowervm.tasks.cna._find_cna_wraps')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.tasks.partition.get_mgmt_partition')
@mock.patch('pypowervm.tasks.cna._find_all_trunks_on_lpar')
@mock.patch('pypowervm.wrappers.network.VSwitch.search')
def test_find_orphaned_trunks(self, mock_vswitch, mock_trunks,
mock_get_mgmt, mock_vios_get, mock_wraps):
mock_vswitch.return_value = mock.MagicMock(switch_id=1)
mock_get_mgmt.return_value = mock.MagicMock()
mock_vios_get.return_value = [mock.MagicMock()]
# Mocked cna_wraps
m1 = mock.Mock(is_trunk=True, uuid=2, pvid=2, vswitch_id=1)
m2 = mock.Mock(is_trunk=False, uuid=3, pvid=3, vswitch_id=1)
m3 = mock.Mock(is_trunk=True, uuid=1, pvid=1, vswitch_id=1)
m4 = mock.Mock(is_trunk=False, uuid=4, pvid=1, vswitch_id=1)
mock_wraps.return_value = [m1, m2, m3, m4]
mock_trunks.side_effect = [[m1, m3], []]
self.assertEqual([m1], cna.find_orphaned_trunks(self.adpt,
mock.MagicMock))
| 45.286344 | 79 | 0.655302 | 2,827 | 20,560 | 4.483905 | 0.125575 | 0.027769 | 0.0426 | 0.03266 | 0.698959 | 0.651231 | 0.6065 | 0.578731 | 0.524613 | 0.485405 | 0 | 0.028905 | 0.242802 | 20,560 | 453 | 80 | 45.386313 | 0.785329 | 0.135165 | 0 | 0.490132 | 0 | 0 | 0.125715 | 0.077446 | 0 | 0 | 0 | 0 | 0.203947 | 1 | 0.065789 | false | 0 | 0.029605 | 0 | 0.134868 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7084e27c49595c6dd313ddb9fd27d9cdb9c9e2f7 | 17,707 | py | Python | tools/stats_gen_lib.py | mtak-/lockfree-stm | 00cd5f9a056e999f0cd140106c1d66b321d6fd47 | [
"MIT"
] | 9 | 2016-11-14T23:35:30.000Z | 2019-01-18T23:21:08.000Z | tools/stats_gen_lib.py | mtak-/lockfree-stm | 00cd5f9a056e999f0cd140106c1d66b321d6fd47 | [
"MIT"
] | 3 | 2017-01-09T01:22:57.000Z | 2017-03-20T04:50:05.000Z | tools/stats_gen_lib.py | mtak-/lockfree-stm | 00cd5f9a056e999f0cd140106c1d66b321d6fd47 | [
"MIT"
] | null | null | null | #!/usr/bin/python
from string import Formatter
_STATS_TEMPLATE = '''#ifndef {INCLUDE_GUARD}
#define {INCLUDE_GUARD}
// clang-format off
#ifdef {MACRO_PREFIX}ON
{INCLUDES}
#include <iomanip>
#include <sstream>
#include <string>
#include <vector>
// comment out any stats you don't want, and things will be just dandy
{MACROS_ON}
#define {MACRO_PREFIX}PUBLISH_RECORD() \\
do {{ \\
{NS_ACCESS}{CLASS_NAME}::get().publish({NS_ACCESS}tls_record()); \\
{NS_ACCESS}tls_record() = {{}}; \\
}} while(0) \\
/**/
#define {MACRO_PREFIX}CLEAR() {NS_ACCESS}{CLASS_NAME}::get().clear()
#ifndef {MACRO_PREFIX}DUMP
#include <iostream>
#define {MACRO_PREFIX}DUMP() (std::cout << {NS_ACCESS}{CLASS_NAME}::get().results())
#endif /* {MACRO_PREFIX}DUMP */
#else
#define {MACRO_PREFIX}PUBLISH_RECORD() /**/
#define {MACRO_PREFIX}CLEAR() /**/
#ifndef {MACRO_PREFIX}DUMP
#define {MACRO_PREFIX}DUMP() /**/
#endif /* {MACRO_PREFIX}DUMP */
#endif /* {MACRO_PREFIX}ON */
{MACROS_OFF}
// clang-format on
#ifdef {MACRO_PREFIX}ON
{NAMESPACE_BEGIN}
struct {CLASS_NAME}_tls_record
{{
{THREAD_RECORD_MEMBERS}
{CLASS_NAME}_tls_record() noexcept = default;
{THREAD_RECORD_MEMBER_FUNCTIONS}
std::string results() const
{{
std::ostringstream ostr;
ostr
{THREAD_RECORD_STREAM_OUTPUT};
return ostr.str();
}}
}};
inline {CLASS_NAME}_tls_record& tls_record() noexcept
{{
static LSTM_THREAD_LOCAL {CLASS_NAME}_tls_record record{{}};
return record;
}}
struct {CLASS_NAME}
{{
private:
using records_t = std::vector<{CLASS_NAME}_tls_record>;
using records_iter = typename records_t::iterator;
using records_value_type = typename records_t::value_type;
records_t records_;
{CLASS_NAME}() = default;
{CLASS_NAME}(const {CLASS_NAME}&) = delete;
{CLASS_NAME}& operator=(const {CLASS_NAME}&) = delete;
std::uint64_t
total_count(std::function<std::size_t(const {CLASS_NAME}_tls_record*)> accessor) const noexcept
{{
std::size_t result = 0;
for (auto& tid_record : records_)
result += accessor(&tid_record);
return result;
}}
std::uint64_t
max(std::function<std::size_t(const {CLASS_NAME}_tls_record*)> accessor) const noexcept
{{
std::size_t result = 0;
for (auto& tid_record : records_)
result = std::max(result, accessor(&tid_record));
return result;
}}
public:
static {CLASS_NAME}& get() noexcept
{{
static {CLASS_NAME} singleton;
return singleton;
}}
inline void publish({CLASS_NAME}_tls_record record) noexcept
{{
records_.emplace_back(std::move(record));
}}
{TRANSACTION_LOG_MEMBER_FUNCTIONS}
std::size_t thread_count() const noexcept {{ return records_.size(); }}
const records_t& records() const noexcept {{ return records_; }}
void clear() noexcept {{ records_.clear(); }}
std::string results(bool per_thread = true) const
{{
std::ostringstream ostr;
ostr
{TRANSACTION_LOG_STREAM_OUTPUT};
if (per_thread) {{
std::size_t i = 0;
for (auto& record : records_) {{
ostr << "--== Thread: " << std::setw(4) << i++ << " ==--" << '\\n';
ostr << record.results() << '\\n';
}}
}}
return ostr.str();
}}
}};
{NAMESPACE_END}
#endif /* {MACRO_PREFIX}ON */
#endif /* {INCLUDE_GUARD} */'''
types = {
'counter' : 'std::uint64_t',
'max' : 'std::uint64_t',
'sum' : 'std::uint64_t',
}
def indent(s, amount = 1):
return '\n'.join([' ' * amount * 4 + x for x in s.splitlines()])
def get_pretty_name(stat):
return ' '.join([w.capitalize() for w in stat.split()])
def get_mem_name(stat):
return stat.lower().replace(' ', '_')
def get_mem_fun_for_stat(compound_stat):
return get_mem_name(compound_stat) + '()'
def get_mem_or_func_call(stat, stats, compound_stats):
if stat in stats:
return get_mem_name(stat)
assert(stat in compound_stats)
return get_mem_fun_for_stat(stat)
def get_macro_name(stat, macro_prefix):
return macro_prefix + stat.upper().replace(' ', '_')
def get_macro_params(stat, stats_kinds):
param = {
'counter' : '',
'max' : 'amt',
'sum' : 'amt',
}
return param[stats_kinds[stat]]
def get_macro_define(stat, stats_kinds, macro_prefix):
return '#define %s(%s)' % (
get_macro_name(stat, macro_prefix),
get_macro_params(stat, stats_kinds)
)
def add_trailing_whitespace(strings):
max_length = max([len(x) for x in strings])
return ['{0: <{1}}'.format(x, max_length + 1) for x in strings]
def get_macro_defines(stats, stats_kinds, macro_prefix):
return add_trailing_whitespace([get_macro_define(stat, stats_kinds, macro_prefix)
for stat in stats])
def get_macro_expansion_on(stat, stats_kinds, ns_access):
param = {
'counter' : '++{NS_ACCESS}tls_record().{MEM_NAME}',
'max' : '{NS_ACCESS}tls_record().{MEM_NAME} = std::max({NS_ACCESS}tls_record().{MEM_NAME}, static_cast<std::uint64_t>({PARAMS}))',
'sum' : '{NS_ACCESS}tls_record().{MEM_NAME} += {PARAMS}',
}
return param[stats_kinds[stat]].format(
NS_ACCESS = ns_access,
MEM_NAME = get_mem_name(stat),
PARAMS = get_macro_params(stat, stats_kinds)
)
def get_macros_on(stats, stats_kinds, ns_access, macro_prefix):
param = {
'counter' : '++{MEM_NAME}',
'max' : '{MEM_NAME} = std::max({MEM_NAME}, {PARAMS})',
'sum' : '{MEM_NAME} += {PARAMS}',
}
defines = get_macro_defines(stats, stats_kinds, macro_prefix)
return '\n'.join([define + get_macro_expansion_on(stat, stats_kinds, ns_access)
for stat, define in zip(stats, defines)])
def get_macros_off(stats, stats_kinds, macro_prefix):
_FORMAT_STRING = '''#ifndef {MACRO_NAME}
{MACRO_DEFINE} /**/
#endif'''
result = []
for stat in stats:
result.append(_FORMAT_STRING.format(
MACRO_NAME = get_macro_name(stat, macro_prefix),
MACRO_DEFINE = get_macro_define(stat, stats_kinds, macro_prefix),
))
return '\n'.join(result)
def get_thread_record_mems(stats, stats_kinds):
initial_value = {
'counter' : '0',
'max' : '0',
'sum' : '0',
}
_FORMAT_STRING = '%s %s{%s};'
return '\n'.join([_FORMAT_STRING % (types[stats_kinds[stat]],
get_mem_name(stat),
initial_value[stats_kinds[stat]]) for stat in stats])
def map_get_mem_or_func_call(stat_list, stats, compound_stats):
return [get_mem_or_func_call(x, stats, compound_stats) for x in stat_list]
def get_assert(op, operands):
assert_kind = {
'/' : ' <= ',
'-' : ' >= ',
'+' : None,
}
mems = map_get_mem_or_func_call(operands, stats, compound_stats)
if assert_kind[op] != None:
return 'LSTM_ASSERT(%s);\n ' % assert_kind[op].join(mems)
return ''
def get_contents(stats, compound_stats, stat_data):
op = stat_data['op']
operands = stat_data['operands']
casted = map_get_mem_or_func_call(operands, stats, compound_stats)
if op == '/':
casted[-1] = 'float(%s)' % casted[-1]
return (' ' + op + ' ').join(casted)
def get_thread_record_mem_fun(compound_stat, stats, compound_stats, compound_stats_kinds):
_FORMAT_STRING = '''auto {NAME} const noexcept {{ return {CONTENTS}; }}'''
return _FORMAT_STRING.format(
NAME = get_mem_fun_for_stat(compound_stat),
CONTENTS = get_contents(stats, compound_stats, compound_stats_kinds[compound_stat]),
)
def get_thread_record_mem_funs(stats, compound_stats, compound_stats_kinds):
return '\n'.join([get_thread_record_mem_fun(compound_stat,
stats,
compound_stats,
compound_stats_kinds)
for compound_stat in compound_stats])
def get_thread_record_stream_output(all_stats, stats, compound_stats):
names = add_trailing_whitespace([get_pretty_name(s) + ':' for s in all_stats])
values = add_trailing_whitespace([get_mem_or_func_call(s, stats, compound_stats) for s in all_stats])
return '\n'.join(['<< " ' + name + '" << ' + value + ' << \'\\n\''
for name, value in zip(names, values)])
def get_singleton_class_mem_fun_contents(class_name,
stat,
stats,
stats_kinds,
compound_stats_kinds):
if stat in stats:
if stats_kinds[stat] == 'counter' or stats_kinds[stat] == 'sum':
return 'total_count(&%s_tls_record::%s)' % (class_name, get_mem_name(stat))
elif stats_kinds[stat] == 'max':
return 'this->max(&%s_tls_record::%s)' % (class_name, get_mem_name(stat))
else:
assert(false)
stat_data = compound_stats_kinds[stat]
op = stat_data['op']
operands = map(get_mem_fun_for_stat, stat_data['operands'])
if op == '/':
operands[-1] = 'float(%s)' % operands[-1]
return (' ' + op + ' ').join(operands)
def get_singleton_class_mem_fun(class_name, stat, stats, stats_kinds, compound_stats_kinds):
_FORMAT_STRING = '''auto {NAME} const noexcept {{ return {CONTENTS}; }}'''
return _FORMAT_STRING.format(
NAME = get_mem_fun_for_stat(stat),
CONTENTS = get_singleton_class_mem_fun_contents(class_name,
stat,
stats,
stats_kinds,
compound_stats_kinds),
)
def get_singleton_class_mem_funs(class_name,
stats,
compound_stats,
stats_kinds,
compound_stats_kinds):
return '\n'.join([get_singleton_class_mem_fun(class_name,
stat,
stats,
stats_kinds,
compound_stats_kinds)
for stat in stats] +
[get_singleton_class_mem_fun(class_name,
compound_stat,
stats,
stats_kinds,
compound_stats_kinds)
for compound_stat in compound_stats])
def get_singleton_class_stream_output(all_stats):
names = add_trailing_whitespace([get_pretty_name(s) + ':' for s in all_stats])
values = add_trailing_whitespace([get_mem_fun_for_stat(s)
for s in all_stats])
return '\n'.join(['<< "' + name + '" << ' + value + ' << \'\\n\''
for name, value in zip(names, values)])
def gen_stats(
the_stats,
include_guard,
class_name = 'perf_stats',
macro_prefix = '',
includes = '',
namespace_begin = '',
namespace_end = '',
namespace_access = '',
stat_output_ordering = [],
stats_member_ordering = [],
compound_stats_member_func_ordering = []
):
stats = stats_member_ordering
stats += [k for k,v in the_stats.items() if type(v) == type('') and not k in stats]
compound_stats = compound_stats_member_func_ordering
compound_stats += [k for k,v in the_stats.items()
if type(v) != type('') and not k in compound_stats]
stats_kinds = {k:v for k,v in the_stats.items() if type(v) == type('')}
compound_stats_kinds = {k:v for k,v in the_stats.items() if type(v) != type('')}
all_stats = stat_output_ordering
all_stats += [k for k in the_stats.keys() if not k in all_stats]
assert(sorted(all_stats) == sorted(compound_stats + stats))
return _STATS_TEMPLATE.format(
INCLUDE_GUARD = include_guard,
MACRO_PREFIX = macro_prefix,
INCLUDES = indent(includes),
CLASS_NAME = class_name,
NAMESPACE_BEGIN = namespace_begin,
NAMESPACE_END = namespace_end,
NS_ACCESS = namespace_access,
MACROS_ON = indent(get_macros_on(stats, stats_kinds, namespace_access, macro_prefix), 1),
MACROS_OFF = get_macros_off(stats, stats_kinds, macro_prefix),
THREAD_RECORD_MEMBERS = indent(get_thread_record_mems(stats, stats_kinds), 2),
THREAD_RECORD_MEMBER_FUNCTIONS = indent(get_thread_record_mem_funs(stats,
compound_stats,
compound_stats_kinds),
2),
THREAD_RECORD_STREAM_OUTPUT = indent(get_thread_record_stream_output(all_stats,
stats,
compound_stats), 4),
TRANSACTION_LOG_MEMBER_FUNCTIONS = indent(get_singleton_class_mem_funs(class_name,
stats,
compound_stats,
stats_kinds,
compound_stats_kinds), 2),
TRANSACTION_LOG_STREAM_OUTPUT = indent(get_singleton_class_stream_output(all_stats), 4),
)
def get_stats_func(stat, compound_stats, compound_stats_kinds):
if stat in compound_stats:
if compound_stats_kinds[stat]['op'] == '/':
return 'statsd_gauged'
return 'statsd_gauge'
def get_singleton_statsd_output(all_stats, compound_stats, compound_stats_kinds):
_FORMAT_NAME = 'const_cast<char*>(LSTM_TESTNAME ".process.{NAME}")'
stats_funcs = [get_stats_func(s, compound_stats, compound_stats_kinds) for s in all_stats]
names = [_FORMAT_NAME.format(NAME = get_mem_name(s)) for s in all_stats]
values = ['stats.' + get_mem_fun_for_stat(s) for s in all_stats]
return '\n'.join([stats_func + '(link, ' + name + ', ' + value + ');'
for stats_func, name, value in zip(stats_funcs, names, values)])
def get_thread_record_statsd_output(all_stats, stats, compound_stats, compound_stats_kinds):
_FORMAT_NAME = 'const_cast<char*>((LSTM_TESTNAME ".thread" + std::to_string(i) + ".{NAME}").c_str())'
stats_funcs = [get_stats_func(s, compound_stats, compound_stats_kinds) for s in all_stats]
names = [_FORMAT_NAME.format(NAME = get_mem_name(s)) for s in all_stats]
values = ['record.' + get_mem_or_func_call(s, stats, compound_stats) for s in all_stats]
header = '''
int i = 0;
for (auto& record : stats.records()) {{'''
body = indent('\n'.join([stats_func + '(link, ' + name + ', ' + value + ');'
for stats_func, name, value in zip(stats_funcs, names, values)]))
footer = ''' ++i;
}}'''
return '\n'.join([header, body, footer])
def gen_statsd_output(
the_stats,
include_guard,
class_name = 'perf_stats',
macro_prefix = '',
namespace_begin = '',
namespace_end = '',
namespace_access = '',
stat_output_ordering = [],
stats_member_ordering = [],
compound_stats_member_func_ordering = []
):
stats = stats_member_ordering
stats += [k for k,v in the_stats.items() if type(v) == type('') and not k in stats]
compound_stats = compound_stats_member_func_ordering
compound_stats += [k for k,v in the_stats.items()
if type(v) != type('') and not k in compound_stats]
stats_kinds = {k:v for k,v in the_stats.items() if type(v) == type('')}
compound_stats_kinds = {k:v for k,v in the_stats.items() if type(v) != type('')}
all_stats = stat_output_ordering
all_stats += [k for k in the_stats.keys() if not k in all_stats]
assert(sorted(all_stats) == sorted(compound_stats + stats))
result = '\n'.join([
get_singleton_statsd_output(all_stats, compound_stats, compound_stats_kinds),
get_thread_record_statsd_output(all_stats, stats, compound_stats, compound_stats_kinds)])
return indent(result, 2)
| 39.612975 | 142 | 0.550969 | 1,988 | 17,707 | 4.556841 | 0.101107 | 0.090407 | 0.073518 | 0.040181 | 0.62667 | 0.549178 | 0.49575 | 0.456011 | 0.440888 | 0.411524 | 0 | 0.003135 | 0.333371 | 17,707 | 447 | 143 | 39.612975 | 0.764317 | 0.000904 | 0 | 0.365333 | 0 | 0.010667 | 0.296761 | 0.063761 | 0 | 0 | 0 | 0 | 0.021333 | 1 | 0.077333 | false | 0 | 0.002667 | 0.026667 | 0.186667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7086b67b426ff9f8307d4800efd294b1e2f817c3 | 6,482 | py | Python | auto-traveler.py | biomadeira/auto-traveler | 38f2c086923925d9819c07bdef297ec24f2ec58f | [
"Apache-2.0"
] | null | null | null | auto-traveler.py | biomadeira/auto-traveler | 38f2c086923925d9819c07bdef297ec24f2ec58f | [
"Apache-2.0"
] | null | null | null | auto-traveler.py | biomadeira/auto-traveler | 38f2c086923925d9819c07bdef297ec24f2ec58f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
Copyright [2009-present] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import glob
import os
import click
from utils import crw, rfam, ribovision, config
from utils.generate_model_info import generate_model_info
def get_ribotyper_output(fasta_input, output_folder, cm_library):
"""
Run ribotyper on the fasta sequences to select the best matching covariance
model.
"""
ribotyper_long_out = os.path.join(output_folder, os.path.basename(output_folder) + '.ribotyper.long.out')
if not os.path.exists(ribotyper_long_out):
cmd = 'ribotyper.pl --skipval -i {cm_library}/modelinfo.txt -f {fasta_input} {output_folder}'.format(
cm_library=cm_library,
fasta_input=fasta_input,
output_folder=output_folder
)
print(cmd)
os.system(cmd)
f_out = os.path.join(output_folder, 'hits.txt')
cmd = "cat %s | grep -v '^#' | grep -v MultipleHits | grep PASS | awk -v OFS='\t' '{print $2, $8, $3}' > %s" % (ribotyper_long_out, f_out)
os.system(cmd)
return f_out
def symlink_cms(source):
for cm_file in glob.glob(os.path.join(source, '*.cm')):
if 'all.cm' not in cm_file:
target = os.path.join(os.path.abspath(config.CM_LIBRARY), os.path.basename(cm_file))
if not os.path.exists(target):
cmd = 'ln -s {} {}'.format(os.path.abspath(cm_file), target)
os.system(cmd)
@click.group()
def cli():
pass
@cli.command()
def setup():
if not os.path.exists(config.CM_LIBRARY):
os.makedirs(config.CM_LIBRARY)
rfam.setup()
crw.setup()
symlink_cms(config.RIBOVISION_CM_LIBRARY)
symlink_cms(config.CRW_CM_LIBRARY)
generate_model_info(cm_library=config.CM_LIBRARY)
print('Done')
@cli.command()
@click.argument('fasta-input', type=click.Path())
@click.argument('output-folder', type=click.Path())
def draw(fasta_input, output_folder):
"""
Single entry point for visualising 2D for an RNA sequence.
Selects a template and runs Traveler using CRW, LSU, or Rfam libraries.
"""
os.system('mkdir -p %s' % output_folder)
with open(get_ribotyper_output(fasta_input, output_folder, config.CM_LIBRARY), 'r') as f:
for line in f.readlines():
rnacentral_id, model_id, _ = line.split('\t')
print(line)
if model_id.count('.') >= 2:
crw.visualise_crw(fasta_input, output_folder, rnacentral_id, model_id)
elif model_id.count('_') == 2:
ribovision.visualise_lsu(fasta_input, output_folder, rnacentral_id, model_id)
else:
rfam.visualise_rfam(fasta_input, output_folder, rnacentral_id, model_id)
@cli.group('crw')
def crw_group():
pass
@crw_group.command('draw')
@click.option('--test', default=False, is_flag=True, help='Process only the first 10 sequences')
@click.argument('fasta-input', type=click.Path())
@click.argument('output-folder', type=click.Path())
def rrna_draw(fasta_input, output_folder, test):
os.system('mkdir -p %s' % output_folder)
with open(get_ribotyper_output(fasta_input, output_folder, config.CRW_CM_LIBRARY), 'r') as f:
for line in f.readlines():
rnacentral_id, model_id, _ = line.split('\t')
crw.visualise_crw(fasta_input,
output_folder,
rnacentral_id,
model_id)
@cli.group('ribovision')
def ribovision_group():
"""
Commands dealing with laying out sequences based upon RiboVision models.
"""
pass
@ribovision_group.command('draw')
@click.argument('fasta-input', type=click.Path())
@click.argument('output-folder', type=click.Path())
def ribovision_draw (fasta_input, output_folder):
os.system('mkdir -p %s' % output_folder)
with open(get_ribotyper_output(fasta_input, output_folder, config.RIBOVISION_CM_LIBRARY), 'r') as f:
for line in f.readlines():
rnacentral_id, model_id, _ = line.split('\t')
ribovision.visualise_lsu(fasta_input, output_folder, rnacentral_id, model_id)
@cli.group('rfam')
def rfam_group():
"""
Commands dealing with laying out sequences based upon Rfam models.
"""
pass
@rfam_group.command('blacklisted')
def rfam_blacklist():
"""
Show all blacklisted families. These include rRNA families as well as
families that do not have any secondary structure.
"""
for model in sorted(rfam.blacklisted()):
print(model)
@rfam_group.command('draw')
@click.option('--test', default=False, is_flag=True, help='Process only the first 10 sequences')
@click.argument('rfam_accession', type=click.STRING)
@click.argument('fasta-input', type=click.Path())
@click.argument('output-folder', type=click.Path())
def rfam_draw(rfam_accession, fasta_input, output_folder, test=None):
"""
Visualise sequences using the Rfam/R-scape consensus structure as template.
RFAM_ACCESSION - Rfam family to process (RF00001, RF00002 etc)
"""
print(rfam_accession)
if rfam_accession == 'all':
rfam_accs = rfam.get_all_rfam_acc()
else:
rfam_accs = [rfam_accession]
for rfam_acc in rfam_accs:
if rfam.has_structure(rfam_acc):
rfam.rscape2traveler(rfam_acc)
rfam.generate_2d(rfam_acc, output_folder, fasta_input, test)
else:
print('{} does not have a conserved secondary structure'.format(rfam_acc))
@rfam_group.command('validate')
@click.argument('rfam_accession', type=click.STRING)
@click.argument('output', type=click.File('w'))
def rfam_validate(rfam_accession, output):
"""
Check if the given Rfam accession is one that should be drawn. If so it will
be output to the given file, otherwise it will not.
"""
if rfam_accession not in rfam.blacklisted():
output.write(rfam_accession + '\n')
if __name__ == '__main__':
cli()
| 34.478723 | 142 | 0.67726 | 892 | 6,482 | 4.741031 | 0.261211 | 0.076614 | 0.056751 | 0.078033 | 0.391818 | 0.354221 | 0.342398 | 0.332939 | 0.332939 | 0.28943 | 0 | 0.006012 | 0.204566 | 6,482 | 187 | 143 | 34.663102 | 0.814197 | 0.208578 | 0 | 0.307018 | 0 | 0.017544 | 0.120672 | 0.005203 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114035 | false | 0.04386 | 0.04386 | 0 | 0.166667 | 0.061404 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7087c1f44e1b42fefc6088af0233d3d5c9a7f47d | 460 | py | Python | jenkins-dashboard.py | jfm/jenkins-dashboard | 4dc4cf69f7f6be1f9cfd15b24509a96454c4de09 | [
"MIT"
] | null | null | null | jenkins-dashboard.py | jfm/jenkins-dashboard | 4dc4cf69f7f6be1f9cfd15b24509a96454c4de09 | [
"MIT"
] | 3 | 2021-03-18T20:10:45.000Z | 2021-09-07T23:37:52.000Z | jenkins-dashboard.py | jfm/jenkins-dashboard | 4dc4cf69f7f6be1f9cfd15b24509a96454c4de09 | [
"MIT"
] | null | null | null | from jenkinsdashboard.ci.jenkins import Jenkins
from jenkinsdashboard.ui.dashboard import Dashboard
import time
if __name__ == '__main__':
# jenkins = Jenkins('http://10.0.0.102:18081', 'jfm', 'c3po4all')
jenkins = Jenkins(
'http://jenkins.onboarding.liquid.int.tdk.dk', 'admin', '0nboarding')
dashboard = Dashboard(jenkins)
while True:
ci_rows = dashboard.generate()
dashboard.render(ci_rows)
time.sleep(30)
| 28.75 | 77 | 0.678261 | 54 | 460 | 5.592593 | 0.592593 | 0.13245 | 0.119205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045576 | 0.18913 | 460 | 15 | 78 | 30.666667 | 0.764075 | 0.136957 | 0 | 0 | 0 | 0 | 0.167089 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
708b028c1ab718e2fb6ff3f78c3ef2f30aed6475 | 3,852 | py | Python | tests.py | CTPUG/mdx_attr_cols | 8aef79857685f9913c703befe717872d4e2d1bea | [
"0BSD"
] | null | null | null | tests.py | CTPUG/mdx_attr_cols | 8aef79857685f9913c703befe717872d4e2d1bea | [
"0BSD"
] | null | null | null | tests.py | CTPUG/mdx_attr_cols | 8aef79857685f9913c703befe717872d4e2d1bea | [
"0BSD"
] | null | null | null | from unittest import TestCase
import xmltodict
from markdown import Markdown
from markdown.util import etree
from mdx_attr_cols import AttrColTreeProcessor, AttrColExtension, makeExtension
class XmlTestCaseMixin(object):
def mk_doc(self, s):
return etree.fromstring(
"<div>" + s.strip() + "</div>")
def assert_xml_equal(self, a, b):
self.assertEqual(
xmltodict.parse(etree.tostring(a)),
xmltodict.parse(etree.tostring(b)))
class TestAttrColTreeProcessor(XmlTestCaseMixin, TestCase):
def mk_processor(self, **conf):
md = Markdown()
return AttrColTreeProcessor(md, conf)
def test_config_none(self):
md = Markdown
p = AttrColTreeProcessor(md, None)
self.assertEqual(p.columns, 12)
self.assertEqual(p.attr, 'cols')
self.assertEqual(p.tags, set(['section']))
def test_config_defaults(self):
p = self.mk_processor()
self.assertEqual(p.columns, 12)
self.assertEqual(p.attr, 'cols')
self.assertEqual(p.tags, set(['section']))
def test_config_overrides(self):
p = self.mk_processor(
columns=16,
attr='columns',
tags=['section', 'div'],
)
self.assertEqual(p.columns, 16)
self.assertEqual(p.attr, 'columns')
self.assertEqual(p.tags, set(['section', 'div']))
def test_simple_rows(self):
root = self.mk_doc("""
<section cols='4'>Foo</section>
<section cols='6'>Bar</section>
<section cols='2'>Beep</section>
""")
p = self.mk_processor()
new_root = p.run(root)
self.assert_xml_equal(new_root, self.mk_doc("""
<div class="row"><div class="col-md-4"><section>Foo</section>
</div><div class="col-md-6"><section>Bar</section>
</div><div class="col-md-2"><section>Beep</section>
</div></div>
"""))
class TestAttrColExtension(TestCase):
def mk_markdown(self, extensions=None):
if extensions is None:
extensions = ['attr_list', 'mdx_outline']
md = Markdown(extensions=extensions)
return md
def assert_registered(self, md):
processor = md.treeprocessors['attr_cols']
self.assertTrue(isinstance(processor, AttrColTreeProcessor))
def assert_not_registered(self, md):
self.assertFalse('attr_cols' in md.treeprocessors)
def text_create(self):
ext = AttrColExtension({'a': 'b'})
self.assertEqual(ext.conf, {'a': 'b'})
def test_extend_markdown(self):
md = self.mk_markdown()
ext = AttrColExtension({})
ext.extendMarkdown(md)
self.assert_registered(md)
def test_missing_attr_list(self):
md = self.mk_markdown(['mdx_outline'])
ext = AttrColExtension({})
self.assertRaisesRegexp(
RuntimeError,
"The attr_cols markdown extension depends the following"
" extensions which must preceded it in the extension list:"
" attr_list, mdx_outline",
ext.extendMarkdown, md)
self.assert_not_registered(md)
def test_missing_outline(self):
md = self.mk_markdown([])
ext = AttrColExtension({})
self.assertRaisesRegexp(
RuntimeError,
"The attr_cols markdown extension depends the following"
" extensions which must preceded it in the extension list:"
" attr_list, mdx_outline",
ext.extendMarkdown, md)
self.assert_not_registered(md)
class TestExtensionRegistration(TestCase):
def test_make_extension(self):
configs = {'a': 'b'}
ext = makeExtension(**configs)
self.assertTrue(isinstance(ext, AttrColExtension))
self.assertEqual(ext.conf, configs)
| 32.369748 | 79 | 0.616044 | 427 | 3,852 | 5.42623 | 0.222482 | 0.077687 | 0.062149 | 0.02978 | 0.373759 | 0.318084 | 0.285283 | 0.259819 | 0.259819 | 0.259819 | 0 | 0.004924 | 0.261942 | 3,852 | 118 | 80 | 32.644068 | 0.81006 | 0 | 0 | 0.260417 | 0 | 0.010417 | 0.199117 | 0.048027 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.15625 | false | 0 | 0.052083 | 0.010417 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
708c9cd7e0c47f6fd647b59adb4727ab13f448e2 | 1,764 | py | Python | dialogbot/search/local/tfidfmodel.py | ishine/dialogbot | 6c3d2f95555a05a3b935dda818e481ddc20eed08 | [
"Apache-2.0"
] | 68 | 2019-06-30T07:39:59.000Z | 2022-03-30T12:15:40.000Z | dialogbot/search/local/tfidfmodel.py | ishine/dialogbot | 6c3d2f95555a05a3b935dda818e481ddc20eed08 | [
"Apache-2.0"
] | 2 | 2021-06-30T10:22:17.000Z | 2021-07-27T12:41:01.000Z | dialogbot/search/local/tfidfmodel.py | ishine/dialogbot | 6c3d2f95555a05a3b935dda818e481ddc20eed08 | [
"Apache-2.0"
] | 16 | 2019-08-22T16:05:53.000Z | 2022-03-11T07:51:27.000Z | # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import time
from gensim import corpora, models, similarities
from dialogbot.reader.data_helper import load_corpus_file
from dialogbot.utils.log import logger
class TfidfModel:
def __init__(self, corpus_file, word2id):
time_s = time.time()
self.contexts, self.responses = load_corpus_file(corpus_file, word2id, size=50000)
self._train_model()
self.corpus_mm = self.tfidf_model[self.corpus]
self.index = similarities.MatrixSimilarity(self.corpus_mm)
logger.debug("Time to build tfidf model by %s: %2.f seconds." % (corpus_file, time.time() - time_s))
def _train_model(self, min_freq=1):
# Create tfidf model.
self.dct = corpora.Dictionary(self.contexts)
# Filter low frequency words from dictionary.
low_freq_ids = [id_ for id_, freq in
self.dct.dfs.items() if freq <= min_freq]
self.dct.filter_tokens(low_freq_ids)
self.dct.compactify()
# Build tfidf model.
self.corpus = [self.dct.doc2bow(s) for s in self.contexts]
self.tfidf_model = models.TfidfModel(self.corpus)
def _text2vec(self, text):
bow = self.dct.doc2bow(text)
return self.tfidf_model[bow]
def similarity(self, query, size=10):
vec = self._text2vec(query)
sims = self.index[vec]
sim_sort = sorted(list(enumerate(sims)),
key=lambda item: item[1], reverse=True)
return sim_sort[:size]
def get_docs(self, sim_items):
docs = [self.contexts[id_] for id_, score in sim_items]
answers = [self.responses[id_] for id_, score in sim_items]
return docs, answers
| 33.923077 | 108 | 0.647392 | 235 | 1,764 | 4.67234 | 0.395745 | 0.054645 | 0.040984 | 0.03643 | 0.083789 | 0.040073 | 0.040073 | 0 | 0 | 0 | 0 | 0.01497 | 0.24263 | 1,764 | 51 | 109 | 34.588235 | 0.806886 | 0.086735 | 0 | 0 | 0 | 0 | 0.02875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.121212 | 0 | 0.393939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7091366072f1274a003619cb14cf65bebdc5b41f | 6,052 | py | Python | aslam_offline_calibration/kalibr/python/kalibr_evaluation_calibration.py | zhixy/multical | b5eeb6283f4ad68def4b62c10416a6764651e771 | [
"BSD-3-Clause"
] | 27 | 2021-03-26T12:03:48.000Z | 2022-03-29T02:16:56.000Z | aslam_offline_calibration/kalibr/python/kalibr_evaluation_calibration.py | zhixy/multical | b5eeb6283f4ad68def4b62c10416a6764651e771 | [
"BSD-3-Clause"
] | 2 | 2021-03-26T14:34:51.000Z | 2021-11-03T09:14:16.000Z | aslam_offline_calibration/kalibr/python/kalibr_evaluation_calibration.py | zhixy/multical | b5eeb6283f4ad68def4b62c10416a6764651e771 | [
"BSD-3-Clause"
] | 9 | 2021-08-23T11:25:29.000Z | 2022-03-28T13:22:39.000Z | #!/usr/bin/env python
import argparse
import kalibr_common as kc
from mpl_toolkits.mplot3d import art3d, Axes3D, proj3d
import numpy as np
import pylab as pl
import sm
import glob
def parse_arguments():
parser = argparse.ArgumentParser(
description='read calibration results from yaml and compare with ground truth')
parser.add_argument('--reference-sensor', dest='reference_sensor',
help='Specify the sensor as the reference coordinate system: camera0 or imu0', required=True)
parser.add_argument(
'--cam-ground-truth',
dest='cam_ground_truth',
help=
'the name of yaml file which stores the ground truth of camera extrinsics',
required=False)
parser.add_argument(
'--cam-file-name-prefix',
dest='cam_file_name_prefix',
help=
'the name prefix of yaml file which stores the calibration results of camera extrinsics',
required=False)
parser.add_argument(
'--lidar-ground-truth',
dest='lidar_ground_truth',
help=
'the name of yaml file which stores the ground truth of lidar extrinsics',
required=False)
parser.add_argument(
'--lidar-file-name-prefix',
dest='lidar_file_name_prefix',
help=
'the name prefix of yaml file which stores the calibration results of lidar extrinsics',
required=False)
parser.add_argument(
'--imu-ground-truth',
dest='imu_ground_truth',
help=
'the name of yaml file which stores the ground truth of imu extrinsics',
required=False)
parser.add_argument(
'--imu-file-name-prefix',
dest='imu_file_name_prefix',
help=
'the name prefix of yaml file which stores the calibration results of imu extrinsics',
required=False)
parsed_args = parser.parse_args()
return parsed_args
def calcErrorGTAndEstimation(ext_gt, ext):
err_T = ext_gt.inverse() * ext
err_vec = sm.fromTEuler(err_T.T())
return err_vec
def main():
parsed_args = parse_arguments()
if parsed_args.cam_ground_truth and parsed_args.cam_file_name_prefix:
cam_chain_ext_gt = kc.CameraChainParameters(parsed_args.cam_ground_truth)
ext_gt_list = []
num_cam = cam_chain_ext_gt.numCameras()
for camNr in range(1, num_cam):
ext_gt_list.append(cam_chain_ext_gt.getExtrinsicsReferenceToCam(camNr))
err_vec_list_list = [[] for _ in range(num_cam - 1)]
for file_name in glob.glob(parsed_args.cam_file_name_prefix):
cam_chain_ext = kc.CameraChainParameters(file_name, parsed_args.reference_sensor)
for camNr in range(1, num_cam):
ext = cam_chain_ext.getExtrinsicsReferenceToCam(camNr)
err_vec = calcErrorGTAndEstimation(ext_gt_list[camNr-1], ext)
err_vec_list_list[camNr-1].append(err_vec)
for idx, err_vec_list in enumerate(err_vec_list_list):
err_mat = np.array(err_vec_list)
err_mean = np.mean(err_mat, axis=0)
err_variance = np.var(err_mat, axis=0)
print ("cam {} extrinsic calibration error".format(idx+1))
print ("mean of error: ", err_mean)
print ("variance of error: ", err_variance)
if parsed_args.lidar_ground_truth and parsed_args.lidar_file_name_prefix:
lidar_list_ext_gt = kc.LiDARListParameters(parsed_args.lidar_ground_truth, parsed_args.reference_sensor)
ext_gt_list = []
num_lidar = lidar_list_ext_gt.numLiDARs()
for idx in range(0, num_lidar):
lidar_parameter = lidar_list_ext_gt.getLiDARParameters(idx)
ext_gt_list.append(lidar_parameter.getExtrinsicsReferenceToHere())
err_vec_list_list = [[] for _ in range(num_lidar)]
for file_name in glob.glob(parsed_args.lidar_file_name_prefix):
lidar_list_ext = kc.LiDARListParameters(file_name, parsed_args.reference_sensor)
for idx in range(num_lidar):
lidar_parameter = lidar_list_ext.getLiDARParameters(idx)
ext = lidar_parameter.getExtrinsicsReferenceToHere()
err_vec = calcErrorGTAndEstimation(ext_gt_list[idx], ext)
err_vec_list_list[idx].append(err_vec)
for idx, err_vec_list in enumerate(err_vec_list_list):
err_mat = np.array(err_vec_list)
err_mean = np.mean(err_mat, axis=0)
err_variance = np.var(err_mat, axis=0)
print ("LiDAR {} extrinsic calibration error".format(idx))
print ("mean of error: ", err_mean)
print ("variance of error: ", err_variance)
if parsed_args.imu_ground_truth and parsed_args.imu_file_name_prefix:
imu_list_ext_gt = kc.ImuSetParameters(parsed_args.imu_ground_truth, parsed_args.reference_sensor)
ext_gt_list = []
num_imu = imu_list_ext_gt.numImus()
for idx in range(0, num_imu):
imu_parameter = imu_list_ext_gt.getImuParameters(idx)
ext_gt_list.append(imu_parameter.getExtrinsicsReferenceToHere())
err_vec_list_list = [[] for _ in range(num_imu)]
for file_name in glob.glob(parsed_args.imu_file_name_prefix):
imu_list_ext = kc.ImuSetParameters(file_name, parsed_args.reference_sensor)
for idx in range(num_imu):
imu_parameter = imu_list_ext.getImuParameters(idx)
ext = imu_parameter.getExtrinsicsReferenceToHere()
err_vec = calcErrorGTAndEstimation(ext_gt_list[idx], ext)
err_vec_list_list[idx].append(err_vec)
for idx, err_vec_list in enumerate(err_vec_list_list):
err_mat = np.array(err_vec_list)
err_mean = np.mean(err_mat, axis=0)
err_variance = np.var(err_mat, axis=0)
print ("IMU {} extrinsic calibration error".format(idx))
print ("mean of error: ", err_mean)
print ("variance of error: ", err_variance)
if __name__ == "__main__":
main()
| 45.503759 | 119 | 0.666887 | 799 | 6,052 | 4.739675 | 0.138924 | 0.03644 | 0.039609 | 0.033272 | 0.699498 | 0.634275 | 0.616583 | 0.561922 | 0.476895 | 0.415632 | 0 | 0.004392 | 0.247521 | 6,052 | 132 | 120 | 45.848485 | 0.827185 | 0.003305 | 0 | 0.368852 | 0 | 0 | 0.179738 | 0.014923 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02459 | false | 0 | 0.057377 | 0 | 0.098361 | 0.07377 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7092b52d42b2a6cc2e1c28dd93180668936123db | 3,251 | bzl | Python | antlir/vm/bzl/install_kernel_modules.bzl | zeroxoneb/antlir | 811d88965610d16a5c85d831d317f087797ca732 | [
"MIT"
] | 28 | 2020-08-11T16:22:46.000Z | 2022-03-04T15:41:52.000Z | antlir/vm/bzl/install_kernel_modules.bzl | zeroxoneb/antlir | 811d88965610d16a5c85d831d317f087797ca732 | [
"MIT"
] | 137 | 2020-08-11T16:07:49.000Z | 2022-02-27T10:59:05.000Z | antlir/vm/bzl/install_kernel_modules.bzl | zeroxoneb/antlir | 811d88965610d16a5c85d831d317f087797ca732 | [
"MIT"
] | 10 | 2020-09-10T00:01:28.000Z | 2022-03-08T18:00:28.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//antlir/bzl:image.bzl", "image")
load("//antlir/bzl:oss_shim.bzl", "buck_genrule")
load("//antlir/bzl/image/feature:defs.bzl", "feature")
def install_kernel_modules(kernel, module_list):
# This intermediate genrule is here to create a dir hierarchy
# of kernel modules that are needed for the initrd. This
# provides a single dir that can be cloned into the initrd
# layer and allows for kernel modules that might be missing
# from different kernel builds.
buck_genrule(
name = kernel.uname + "-selected--modules",
out = ".",
cmd = """
mkdir -p $OUT
pushd $OUT 2>/dev/null
# copy the needed modules out of the module layer
binary_path=( $(exe //antlir:find-built-subvol) )
layer_loc="$(location {module_layer})"
mod_layer_path=\\$( "${{binary_path[@]}}" "$layer_loc" )
mods="{module_list}"
for mod in $mods; do
mod_src="$mod_layer_path/kernel/$mod"
if [[ -f "$mod_src" ]]; then
mod_dir=\\$(dirname "$mod")
mkdir -p "$mod_dir"
cp "$mod_src" "$mod_dir"
fi
done
""".format(
module_layer = kernel.artifacts.modules,
module_list = " ".join(module_list),
),
antlir_rule = "user-internal",
)
buck_genrule(
name = kernel.uname + "selected--modules-load.conf",
cmd = "echo '{}' > $OUT".format("\n".join([
paths.basename(module).rsplit(".")[0]
for module in module_list
])),
antlir_rule = "user-internal",
visibility = [],
)
return [
# Install the kernel modules specified in module_list above into the
# layer
image.ensure_subdirs_exist("/usr/lib", paths.join("modules", kernel.uname)),
feature.install(
image.source(
source = ":" + kernel.uname + "-selected--modules",
path = ".",
),
paths.join("/usr/lib/modules", kernel.uname, "kernel"),
),
[
[
image.clone(
kernel.artifacts.modules,
paths.join("/modules.{}".format(f)),
paths.join("/usr/lib/modules", kernel.uname, "modules.{}".format(f)),
),
image.clone(
kernel.artifacts.modules,
paths.join("/modules.{}.bin".format(f)),
paths.join("/usr/lib/modules", kernel.uname, "modules.{}.bin".format(f)),
),
]
for f in ("dep", "symbols", "alias", "builtin")
],
# Ensure the kernel modules are loaded by systemd when the initrd is started
image.ensure_subdirs_exist("/usr/lib", "modules-load.d"),
feature.install(":" + kernel.uname + "selected--modules-load.conf", "/usr/lib/modules-load.d/initrd-modules.conf"),
]
| 38.702381 | 123 | 0.538296 | 364 | 3,251 | 4.711538 | 0.35989 | 0.051312 | 0.037901 | 0.060641 | 0.290962 | 0.273469 | 0.177843 | 0.110787 | 0.054811 | 0.054811 | 0 | 0.000907 | 0.321747 | 3,251 | 83 | 124 | 39.168675 | 0.776871 | 0.177484 | 0 | 0.19697 | 0 | 0 | 0.426531 | 0.117625 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0 | 0 | 0 | 0.030303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70956e742f00e379dd33cde03763c0b2f6948b87 | 12,173 | py | Python | medis/Detector/get_photon_data.py | RupertDodkins/medis | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | [
"MIT"
] | 1 | 2021-06-25T17:35:56.000Z | 2021-06-25T17:35:56.000Z | medis/Detector/get_photon_data.py | RupertDodkins/medis | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | [
"MIT"
] | null | null | null | medis/Detector/get_photon_data.py | RupertDodkins/medis | bdb1f00fb93506da2a1f251bc6780e70e97a16c5 | [
"MIT"
] | 2 | 2018-12-08T15:05:13.000Z | 2019-08-08T17:28:24.000Z | """Top level code that takes a atmosphere phase map and propagates a wavefront through the system"""
import os
import numpy as np
import traceback
import multiprocessing
import glob
import random
import pickle as pickle
import time
from proper_mod import prop_run
from medis.Utils.plot_tools import quicklook_im, view_datacube, loop_frames
from medis.Utils.misc import dprint
from medis.params import ap,cp,tp,mp,sp,iop,dp
import medis.Detector.MKIDs as MKIDs
import medis.Detector.H2RG as H2RG
import medis.Detector.pipeline as pipe
import medis.Detector.readout as read
import medis.Telescope.aberrations as aber
import medis.Atmosphere.atmos as atmos
sentinel = None
def gen_timeseries(inqueue, photon_table_queue, outqueue, conf_obj_tup):
"""
generates observation sequence by calling optics_propagate in time series
is the time loop wrapper for optics_propagate
this is where the observation sequence is generated (timeseries of observations by the detector)
thus, where the detector observes the wavefront created by optics_propagate (for MKIDs, the probability distribution)
:param inqueue: time index for parallelization (used by multiprocess)
:param photon_table_queue: photon table (list of photon packets) in the multiprocessing format
:param spectralcube_queue: series of intensity images (spectral image cube) in the multiprocessing format
:param xxx_todo_changeme:
:return:
"""
# TODO change this name
(tp,ap,sp,iop,cp,mp) = conf_obj_tup
try:
if tp.detector == 'MKIDs':
with open(iop.device_params, 'rb') as handle:
dp = pickle.load(handle)
start = time.time()
for it, t in enumerate(iter(inqueue.get, sentinel)):
kwargs = {'iter': t, 'params': [ap, tp, iop, sp]}
_, save_E_fields = prop_run('medis.Telescope.optics_propagate', 1, ap.grid_size, PASSVALUE=kwargs,
VERBOSE=False, PHASE_OFFSET=1)
print(save_E_fields.shape)
spectralcube = np.sum(np.abs(save_E_fields[-1, :, :]) ** 2, axis=1)
if tp.detector == 'ideal':
image = np.sum(spectralcube, axis=0)
vmin = np.min(spectralcube)*10
# cube = ideal.assign_calibtime(spectralcube,PASSVALUE['iter'])
# cube = rawImageIO.arange_into_cube(packets, value='phase')
# rawImageIO.make_phase_map(cube, plot=True)
# return ''
elif tp.detector == 'MKIDs':
packets = read.get_packets(spectralcube, t, dp, mp)
# packets = read.get_packets(save_E_fields, t, dp, mp)
# if sp.show_wframe or sp.show_cube or sp.return_spectralcube:
cube = pipe.arange_into_cube(packets, (mp.array_size[0], mp.array_size[1]))
if mp.remove_close:
timecube = read.remove_close_photons(cube)
if sp.show_wframe:
image = pipe.make_intensity_map(cube, (mp.array_size[0], mp.array_size[1]))
# Interpolating spectral cube from ap.nwsamp discreet wavelengths
# if sp.show_cube or sp.return_spectralcube:
spectralcube = pipe.make_datacube(cube, (mp.array_size[0], mp.array_size[1], ap.w_bins))
if sp.save_obs:
command = read.get_obs_command(packets,t)
photon_table_queue.put(command)
vmin = 0.9
if sp.show_wframe:
dprint((sp.show_wframe, sp.show_wframe == 'continuous'))
quicklook_im(image, logAmp=True, show=sp.show_wframe, vmin=vmin)
if sp.show_cube:
view_datacube(spectralcube, logAmp=True, vmin=vmin)
if sp.use_gui:
gui_images = np.zeros_like(save_E_fields, dtype=np.float)
phase_ind = sp.gui_map_type == 'phase'
amp_ind = sp.gui_map_type == 'amp'
gui_images[phase_ind] = np.angle(save_E_fields[phase_ind], deg=False)
gui_images[amp_ind] = np.absolute(save_E_fields[amp_ind])
outqueue.put((t, gui_images, spectralcube))
elif sp.return_E:
outqueue.put((t, save_E_fields))
else:
outqueue.put((t, spectralcube))
now = time.time()
elapsed = float(now - start) / 60.
each_iter = float(elapsed) / (it + 1)
print('***********************************')
dprint(f'{elapsed:.2f} minutes elapsed, each time step took {each_iter:.2f} minutes') #* ap.numframes/sp.num_processes TODO change to log #
except Exception as e:
traceback.print_exc()
# raise e
pass
def wait_until(somepredicate, timeout, period=0.25, *args, **kwargs):
mustend = time.time() + timeout
while time.time() < mustend:
if somepredicate(*args, **kwargs): return True
time.sleep(period)
return False
def run_medis(EfieldsThread=None, plot=False):
"""
main script to organize calls to various aspects of the simulation
initialize different sub-processes, such as atmosphere and aberration maps, MKID device parameters
sets up the multiprocessing features
returns the observation sequence created by gen_timeseries
:return: obs_sequence
"""
# Printing Params
dprint("Checking Params Info-print params from here (turn on/off)")
# TODO change this to a logging function
# for param in [ap, cp, tp, mp, sp, iop]:
# print('\n', param)
# pprint(param.__dict__)
iop.makedir() # make the directories at this point in case the user doesn't want to keep changing params.py
check = read.check_exists_obs_sequence(plot)
if check:
if iop.obs_seq[-3:] == '.h5':
obs_sequence = read.open_obs_sequence_hdf5(iop.obs_seq)
else:
obs_sequence = read.open_obs_sequence(iop.obs_seq)
return obs_sequence
begin = time.time()
print('Creating New MEDIS Simulation')
print('********** Taking Obs Data ***********')
try:
multiprocessing.set_start_method('spawn')
except RuntimeError:
pass
# initialize atmosphere
print("Atmosdir = %s " % iop.atmosdir)
if tp.use_atmos and glob.glob(iop.atmosdir + '/*.fits') == []:
atmos.generate_maps()
# initialize telescope
if (tp.aber_params['QuasiStatic'] is True) and glob.glob(iop.aberdir + 'quasi/*.fits') == []:
aber.generate_maps(tp.f_lens)
if tp.aber_params['NCPA']:
aber.generate_maps(tp.f_lens, 'NCPA', 'lens')
# if tp.servo_error:
# aber.createObjMapsEmpty()
aber.initialize_CPA_meas()
if tp.active_null:
aber.initialize_NCPA_meas()
if sp.save_locs is None:
sp.save_locs = []
if 'detector' not in sp.save_locs:
sp.save_locs = np.append(sp.save_locs, 'detector')
sp.gui_map_type = np.append(sp.gui_map_type, 'amp')
# initialize MKIDs
if tp.detector == 'MKIDs' and not os.path.isfile(iop.device_params):
MKIDs.initialize()
photon_table_queue = multiprocessing.Queue()
inqueue = multiprocessing.Queue()
outqueue = multiprocessing.Queue()
jobs = []
if sp.save_obs and tp.detector == 'MKIDs':
proc = multiprocessing.Process(target=read.handle_output, args=(photon_table_queue, iop.obsfile))
proc.start()
if ap.companion is False:
ap.contrast = []
if tp.detector == 'MKIDs':
obs_sequence = np.zeros((ap.numframes, ap.w_bins, mp.array_size[1], mp.array_size[0]))
else:
obs_sequence = np.zeros((ap.numframes, ap.w_bins, ap.grid_size, ap.grid_size))
if sp.return_E:
e_fields_sequence = np.zeros((ap.numframes, len(sp.save_locs),
ap.nwsamp, 1 + len(ap.contrast),
ap.grid_size, ap.grid_size), dtype=np.complex64)
else:
e_fields_sequence = None
# Sending Queues to gen_timeseries
for i in range(sp.num_processes):
p = multiprocessing.Process(target=gen_timeseries, args=(inqueue, photon_table_queue, outqueue, (tp,ap,sp,iop,cp,mp)))
jobs.append(p)
p.start()
if tp.quick_ao:
for t in range(ap.startframe, ap.startframe + ap.numframes):
inqueue.put(t)
if sp.use_gui:
it, gui_images, spectralcube = outqueue.get()
while sp.play_gui is False:
time.sleep(0.005)
EfieldsThread.newSample.emit(gui_images)
EfieldsThread.sct.newSample.emit((it, spectralcube))
else:
dprint('If the code has hung here it probably means it cant read the CPA file at some iter')
for t in range(ap.startframe, ap.startframe+ap.numframes):
# time.sleep(rollout[t])
print(t)
if not tp.active_null:
with open(iop.CPA_meas, 'rb') as handle:
_, iters = pickle.load(handle)
# print t, iter, 't, iter'
print(iters, 'iters')
while iters[0] + ap.startframe < t:
time.sleep(0.1)
print('looping', t)
try:
with open(iop.CPA_meas, 'rb') as handle:
_, iters = pickle.load(handle)
iter = iters[0]
# sys.stdout.write("\rWaiting for aberration measurements...\n")
# sys.stdout.flush()
except EOFError:
print('Errored')
else:
with open(iop.NCPA_meas, 'rb') as handle:
_,_, iter = pickle.load(handle)
while iter < t:
time.sleep(0.1)
try:
with open(iop.NCPA_meas, 'rb') as handle:
_,_, iter = pickle.load(handle)
# sys.stdout.write("\rWaiting for aberration measurements...\n")
# sys.stdout.flush()
except EOFError:
print('Errored')
# if t in delay_inds:
# with open(iop.NCPA_meas, 'rb') as handle:
# _, _, iter = pickle.load(handle)
# print iter, t
# while iter != t:
# with open(iop.NCPA_meas, 'rb') as handle:
# _, _, iter = pickle.load(handle)
# # wait_until()
inqueue.put(t)
for i in range(sp.num_processes):
# Send the sentinal to tell Simulation to end
inqueue.put(sentinel)
for t in range(ap.numframes):
if sp.return_E:
t, save_E_fields = outqueue.get()
e_fields_sequence[t - ap.startframe] = save_E_fields
else:
t, spectralcube = outqueue.get()
obs_sequence[t - ap.startframe] = spectralcube # should be in the right order now because of the identifier
# for i, p in enumerate(jobs):
# p.join()
photon_table_queue.put(None)
outqueue.put(None)
if sp.save_obs and tp.detector == 'MKIDs':
proc.join()
obs_sequence = np.array(obs_sequence)
print('MEDIS Data Run Completed')
finish = time.time()
if sp.timing is True:
print(f'Time elapsed: {(finish-begin)/60:.2f} minutes')
print('**************************************')
print(f"Shape of obs_sequence = {np.shape(obs_sequence)}")
if tp.detector == 'H2RG':
obs_sequence = H2RG.scale_to_luminos(obs_sequence)
if tp.detector == 'H2RG' and hp.use_readnoise:
obs_sequence = H2RG.add_readnoise(obs_sequence, hp.readnoise)
if sp.return_E:
read.save_fields(e_fields_sequence, fields_file=iop.fields)
return e_fields_sequence
else:
dprint("Saving obs_sequence as hdf5 file:")
read.save_obs_sequence(obs_sequence, obs_seq_file=iop.obs_seq)
return obs_sequence
if __name__ == '__main__':
sp.timing = True
run_medis()
| 36.446108 | 147 | 0.594348 | 1,531 | 12,173 | 4.561724 | 0.239059 | 0.034651 | 0.01575 | 0.012027 | 0.205183 | 0.171535 | 0.128007 | 0.111684 | 0.108247 | 0.08047 | 0 | 0.006078 | 0.297133 | 12,173 | 333 | 148 | 36.555556 | 0.810192 | 0.204962 | 0 | 0.229268 | 0 | 0 | 0.076754 | 0.015916 | 0 | 0 | 0 | 0.009009 | 0 | 1 | 0.014634 | false | 0.014634 | 0.087805 | 0 | 0.121951 | 0.102439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
709b6bd1dc6310e9d17ea3ad5431e576cabcfddb | 10,286 | py | Python | src/utils/workflow_utils.py | cmikke97/AMSG | ddcfeb6262124e793fcee385405365417e57f91f | [
"Apache-2.0"
] | 3 | 2021-06-30T07:22:46.000Z | 2022-03-23T08:21:10.000Z | src/utils/workflow_utils.py | cmikke97/Automatic-Malware-Signature-Generation | ddcfeb6262124e793fcee385405365417e57f91f | [
"Apache-2.0"
] | null | null | null | src/utils/workflow_utils.py | cmikke97/Automatic-Malware-Signature-Generation | ddcfeb6262124e793fcee385405365417e57f91f | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Crepaldi Michele.
#
# Developed as a thesis project at the TORSEC research group of the Polytechnic of Turin (Italy) under the supervision
# of professor Antonio Lioy and engineer Andrea Atzeni and with the support of engineer Andrea Marcelli.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import base64 # provides functions for encoding/decoding binary data to/from printable ASCII characters
import hashlib # implements a common interface to many different secure hash and message digest algorithms
import mlflow # open source platform for managing the end-to-end machine learning lifecycle
from logzero import logger # robust and effective logging for Python
from mlflow.entities import RunStatus # status of a Run
from mlflow.tracking.fluent import _get_experiment_id # get current experiment id function
from mlflow.utils import mlflow_tags # mlflow tags
class Hash:
""" Simple wrapper around hashlib sha256 functions. """
def __init__(self):
""" Initialize hash class using hashlib sha256 implementation. """
# initialize sha256 hash object
self.m = hashlib.sha256()
def update(self,
w): # string to update hash value with
""" Update current hash value.
Args:
w: String to update hash value with
"""
# update current hash with w
self.m.update(w.encode('utf-8'))
def copy(self):
""" Return a copy of the Hash object
Returns:
Copy of the current Hash instance
"""
# instantiate new hash object
copy = Hash()
# copy current object sha256 into the new instance
copy.m = self.m.copy()
# return the new instance
return copy
def get_b64(self):
""" Get base64 encoding of the current hash value digest.
Returns:
Base64 encoding of the hash digest.
"""
# return base64 encoded (url safe) hash digest
return base64.urlsafe_b64encode(self.m.digest()).decode('utf-8')
def _already_ran(entry_point_name, # entry point name of the run
parameters, # parameters of the run
git_commit, # git version of the code run
config_sha, # sha256 of config file
ignore_git=False, # whether to ignore git version or not (default: False)
experiment_id=None, # experiment id (default: None)
resume=False): # whether to resume a failed/killed previous run or not (default: False)
""" Best-effort detection of if a run with the given entrypoint name, parameters, and experiment id already ran.
The run must have completed successfully and have at least the parameters provided.
Args:
entry_point_name: Entry point name of the run
parameters: Parameters of the run
git_commit: Git version of the code run
config_sha: Sha256 of config file
ignore_git: Whether to ignore git version or not (default: False)
experiment_id: Experiment id (default: None)
resume: Whether to resume a failed/killed previous run (only for training) or not (default: False)
Returns:
Previously executed run if found, None otherwise.
"""
# if experiment ID is not provided retrieve current experiment ID
experiment_id = experiment_id if experiment_id is not None else _get_experiment_id()
# instantiate MLflowClient (creates and manages experiments and runs)
client = mlflow.tracking.MlflowClient()
# get reversed list of run information (from last to first)
all_run_infos = reversed(client.list_run_infos(experiment_id))
run_to_resume_id = None
# for all runs info
for run_info in all_run_infos:
# fetch run from backend store
full_run = client.get_run(run_info.run_id)
# get run dictionary of tags
tags = full_run.data.tags
# if there is no entry point, or the entry point for the run is different from 'entry_point_name', continue
if tags.get(mlflow_tags.MLFLOW_PROJECT_ENTRY_POINT, None) != entry_point_name:
continue
# initialize 'match_failed' bool to false
match_failed = False
# for each parameter in the provided run parameters
for param_key, param_value in parameters.items():
# get run param value from the run dictionary of parameters
run_value = full_run.data.params.get(param_key)
# if the current parameter value is different from the run parameter set 'match_failed' to true and break
if str(run_value) != str(param_value):
match_failed = True
break
# if the current run is not the one we are searching for go to the next one
if match_failed:
continue
# get previous run git commit version
previous_version = tags.get(mlflow_tags.MLFLOW_GIT_COMMIT, None)
# if the previous version is different from the current one, go to the next one
if not ignore_git and git_commit != previous_version:
logger.warning("Run matched, but has a different source version, so skipping (found={}, expected={})"
.format(previous_version, git_commit))
continue
# get config file sha256 from the run
run_config_sha = full_run.data.params.get('config_sha')
# if the config file sha256 for the run is different from the current sha, go to the next one
if str(run_config_sha) != str(config_sha):
logger.warning("Run matched, but config is different.")
continue
# if the run is not finished
if run_info.to_proto().status != RunStatus.FINISHED:
if resume:
# if resume is enabled, set current run to resume id -> if no newer completed run is found,
# this stopped run will be resumed
run_to_resume_id = run_info.run_id
continue
else: # otherwise skip it and try with the next one
logger.warning("Run matched, but is not FINISHED, so skipping " "(run_id={}, status={})"
.format(run_info.run_id, run_info.status))
continue
# otherwise (if the run was found and it is exactly the same), return the found run
return client.get_run(run_info.run_id)
# if no previously executed (and finished) run was found but a stopped run was found, resume such run
if run_to_resume_id is not None:
logger.info("Resuming run with entrypoint=%s and parameters=%s" % (entry_point_name, parameters))
# update new run parameters with the stopped run id
parameters.update({
'run_id': run_to_resume_id
})
# submit new run that will resume the previously interrupted one
submitted_run = mlflow.run(".", entry_point_name, parameters=parameters)
# log config file sha256 as parameter in the submitted run
client.log_param(submitted_run.run_id, 'config_sha', config_sha)
# return submitted (new) run
return mlflow.tracking.MlflowClient().get_run(submitted_run.run_id)
# if the searched run was not found return 'None'
logger.warning("No matching run has been found.")
return None
def run(entrypoint, # entrypoint of the run
parameters, # parameters of the run
config_sha): # sha256 of config file
""" Launch run.
Args:
entrypoint: Entrypoint of the run
parameters: Parameters of the run
config_sha: Sha256 of config file
Returns:
Launched run.
"""
# get mlflow tracking client
client = mlflow.tracking.MlflowClient()
logger.info("Launching new run for entrypoint={} and parameters={}".format(entrypoint, parameters))
# submit (start) run
submitted_run = mlflow.run(".", entrypoint, parameters=parameters)
# log config file sha256 as parameter in the submitted run
client.log_param(submitted_run.run_id, 'config_sha', config_sha)
# return run
return client.get_run(submitted_run.run_id)
def get_or_run(entrypoint, # entrypoint of the run
parameters, # parameters of the run
git_commit, # git version of the run
config_sha, # sha256 of config file
ignore_git=False, # whether to ignore git version or not (default: False)
use_cache=True, # whether to cache previous runs or not (default: True)
resume=False): # whether to resume a failed/killed previous run or not (default: False)
""" Get previously executed run, if it exists, or launch run.
Args:
entrypoint: Entrypoint of the run
parameters: Parameters of the run
git_commit: Git version of the run
config_sha: Sha256 of config file
ignore_git: Whether to ignore git version or not (default: False)
use_cache: Whether to cache previous runs or not (default: True)
resume: Whether to resume a failed/killed previous run or not (default: False)
Returns:
Found or launched run.
"""
# get already executed run, if it exists
existing_run = _already_ran(entrypoint, parameters, git_commit,
ignore_git=ignore_git, resume=resume, config_sha=config_sha)
# if we want to cache previous runs and we found a previously executed run, return found run
if use_cache and existing_run:
logger.info("Found existing run for entrypoint={} and parameters={}".format(entrypoint, parameters))
return existing_run
# otherwise, start run and return it
return run(entrypoint=entrypoint, parameters=parameters, config_sha=config_sha)
| 44.336207 | 118 | 0.66994 | 1,402 | 10,286 | 4.808131 | 0.204708 | 0.016318 | 0.016615 | 0.020175 | 0.33897 | 0.278149 | 0.257084 | 0.249963 | 0.233645 | 0.226969 | 0 | 0.009163 | 0.267937 | 10,286 | 231 | 119 | 44.528139 | 0.886056 | 0.530332 | 0 | 0.228261 | 0 | 0 | 0.094076 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076087 | false | 0 | 0.076087 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
709bdbfb5b18720b7d117f1e6e1e246c7727a60a | 1,578 | py | Python | tests/test_data.py | joseph-nagel/torchutils | e13b5b156734dc1645e1d6c7b81738ca52904c92 | [
"MIT"
] | null | null | null | tests/test_data.py | joseph-nagel/torchutils | e13b5b156734dc1645e1d6c7b81738ca52904c92 | [
"MIT"
] | null | null | null | tests/test_data.py | joseph-nagel/torchutils | e13b5b156734dc1645e1d6c7b81738ca52904c92 | [
"MIT"
] | null | null | null | '''Tests for the data module.'''
import pytest
import numpy as np
import torch
from torch.utils.data import TensorDataset
from torchutils.data import mean_std_over_dataset, image2tensor, tensor2image
@pytest.mark.parametrize('no_samples', [100, 1000])
@pytest.mark.parametrize('feature_shape', [(), (1,), (10,), (10,10)])
def test_mean_std_over_dataset(no_samples, feature_shape):
'''Test correctness of evaluating the mean and standard deviation.'''
torch.manual_seed(0)
X = torch.randn(no_samples, *feature_shape)
y = torch.randint(2, size=(no_samples,))
data_set = TensorDataset(X, y)
mean, std = mean_std_over_dataset(data_set)
ref_mean = X.numpy().mean()
ref_std = X.numpy().std()
assert np.isclose(mean, ref_mean, rtol=1e-02, atol=1e-03)
assert np.isclose(std, ref_std, rtol=1e-02, atol=1e-03)
@pytest.mark.parametrize('shape', [(10,10), (10,10,3), (1,10,10,3)])
def test_image2tensor2image(shape):
'''Test the transformation and back-transformation of an image.'''
np.random.seed(0)
image = np.random.randn(*shape)
tensor = image2tensor(image)
new_image = tensor2image(tensor)
assert np.allclose(image.squeeze(), new_image.squeeze())
@pytest.mark.parametrize('shape', [(10,10), (3,10,10), (1,3,10,10)])
def test_tensor2image2tensor(shape):
'''Test the transformation and back-transformation of a tensor.'''
torch.manual_seed(0)
tensor = torch.randn(*shape)
image = tensor2image(tensor)
new_tensor = image2tensor(image)
assert np.allclose(tensor.squeeze(), new_tensor.squeeze())
| 38.487805 | 77 | 0.707858 | 228 | 1,578 | 4.763158 | 0.307018 | 0.033149 | 0.077348 | 0.049724 | 0.174954 | 0.174954 | 0.090239 | 0.090239 | 0 | 0 | 0 | 0.051661 | 0.141318 | 1,578 | 40 | 78 | 39.45 | 0.749816 | 0.134347 | 0 | 0.064516 | 0 | 0 | 0.024554 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 1 | 0.096774 | false | 0 | 0.16129 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
709be0e39f954015ae500d50e9d8b5505b26abc0 | 8,593 | py | Python | src/model_execution_worker/tasks.py | OasisLMF/OasisPlatform_SQL | e3359d0bd3093e47bc46848c810b8876980d5cbc | [
"BSD-3-Clause"
] | 1 | 2020-02-27T13:25:22.000Z | 2020-02-27T13:25:22.000Z | src/model_execution_worker/tasks.py | OasisLMF/OasisPlatform_SQL | e3359d0bd3093e47bc46848c810b8876980d5cbc | [
"BSD-3-Clause"
] | 3 | 2019-11-14T10:26:46.000Z | 2021-03-25T22:33:52.000Z | src/model_execution_worker/tasks.py | OasisLMF/OasisPlatform_SQL | e3359d0bd3093e47bc46848c810b8876980d5cbc | [
"BSD-3-Clause"
] | 2 | 2019-03-21T09:22:12.000Z | 2019-05-24T15:13:51.000Z | from __future__ import absolute_import
import importlib
import logging
import uuid
from contextlib import contextmanager
import fasteners
import json
import os
import shutil
import tarfile
import glob
import sys
import time
from oasislmf.model_execution.bin import prepare_model_run_directory, prepare_model_run_inputs
from oasislmf.model_execution import runner
from oasislmf.utils import status
from oasislmf.utils.exceptions import OasisException
from oasislmf.utils.log import oasis_log
from pathlib2 import Path
from celery import Celery
from celery.task import task
from ..utils.path import setcwd
from ..conf.settings import settings
from ..conf import celery as celery_conf
'''
Celery task wrapper for Oasis ktools calculation.
'''
ARCHIVE_FILE_SUFFIX = '.tar'
CELERY = Celery()
CELERY.config_from_object(celery_conf)
logging.info("Started worker")
logging.info("INPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'INPUTS_DATA_DIRECTORY')))
logging.info("OUTPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'OUTPUTS_DATA_DIRECTORY')))
logging.info("MODEL_DATA_DIRECTORY: {}".format(settings.get('worker', 'MODEL_DATA_DIRECTORY')))
logging.info("WORKING_DIRECTORY: {}".format(settings.get('worker', 'WORKING_DIRECTORY')))
logging.info("KTOOLS_BATCH_COUNT: {}".format(settings.get('worker', 'KTOOLS_BATCH_COUNT')))
logging.info("KTOOLS_ALLOC_RULE: {}".format(settings.get('worker', 'KTOOLS_ALLOC_RULE')))
logging.info("KTOOLS_MEMORY_LIMIT: {}".format(settings.get('worker', 'KTOOLS_MEMORY_LIMIT')))
logging.info("LOCK_TIMEOUT_IN_SECS: {}".format(settings.get('worker', 'LOCK_TIMEOUT_IN_SECS')))
logging.info("LOCK_RETRY_COUNTDOWN_IN_SECS: {}".format(settings.get('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS')))
logging.info("POST_ANALYSIS_SLEEP_IN_SECS: {}".format(settings.get('worker', 'POST_ANALYSIS_SLEEP_IN_SECS')))
class MissingInputsException(OasisException):
def __init__(self, input_archive):
super(MissingInputsException, self).__init__('Inputs location not found: {}'.format(input_archive))
class InvalidInputsException(OasisException):
def __init__(self, input_archive):
super(InvalidInputsException, self).__init__('Inputs location not a tarfile: {}'.format(input_archive))
class MissingModelDataException(OasisException):
def __init__(self, model_data_path):
super(MissingModelDataException, self).__init__('Model data not found: {}'.format(model_data_path))
@contextmanager
def get_lock():
lock = fasteners.InterProcessLock(settings.get('worker', 'LOCK_FILE'))
gotten = lock.acquire(blocking=True, timeout=settings.getfloat('worker', 'LOCK_TIMEOUT_IN_SECS'))
yield gotten
if gotten:
lock.release()
@task(name='run_analysis', bind=True)
def start_analysis_task(self, input_location, analysis_settings_json):
'''
Task wrapper for running an analysis.
Args:
analysis_profile_json (string): The analysis settings.
Returns:
(string) The location of the outputs.
'''
logging.info("LOCK_FILE: {}".format(settings.get('worker', 'LOCK_FILE')))
logging.info("LOCK_RETRY_COUNTDOWN_IN_SECS: {}".format(
settings.get('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS')))
with get_lock() as gotten:
if not gotten:
logging.info("Failed to get resource lock - retry task")
# max_retries=None is supposed to be unlimited but doesn't seem to work
# Set instead to a large number
raise self.retry(
max_retries=9999999,
countdown=settings.getint('worker', 'LOCK_RETRY_COUNTDOWN_IN_SECS'))
logging.info("Acquired resource lock")
try:
logging.info("INPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'INPUTS_DATA_DIRECTORY')))
logging.info("OUTPUTS_DATA_DIRECTORY: {}".format(settings.get('worker', 'OUTPUTS_DATA_DIRECTORY')))
logging.info("MODEL_DATA_DIRECTORY: {}".format(settings.get('worker', 'MODEL_DATA_DIRECTORY')))
logging.info("WORKING_DIRECTORY: {}".format(settings.get('worker', 'WORKING_DIRECTORY')))
logging.info("KTOOLS_BATCH_COUNT: {}".format(settings.get('worker', 'KTOOLS_BATCH_COUNT')))
logging.info("KTOOLS_MEMORY_LIMIT: {}".format(settings.get('worker', 'KTOOLS_MEMORY_LIMIT')))
self.update_state(state=status.STATUS_RUNNING)
output_location = start_analysis(analysis_settings_json[0], input_location)
except Exception:
logging.exception("Model execution task failed.")
raise
time.sleep(settings.getint('worker', 'POST_ANALYSIS_SLEEP_IN_SECS'))
return output_location
@oasis_log()
def start_analysis(analysis_settings, input_location):
'''
Run an analysis.
Args:
analysis_profile_json (string): The analysis settings.
Returns:
(string) The location of the outputs.
'''
# Check that the input archive exists and is valid
input_archive = os.path.join(
settings.get('worker', 'INPUTS_DATA_DIRECTORY'),
input_location + ARCHIVE_FILE_SUFFIX
)
if not os.path.exists(input_archive):
raise MissingInputsException(input_archive)
if not tarfile.is_tarfile(input_archive):
raise InvalidInputsException(input_archive)
source_tag = analysis_settings['analysis_settings']['source_tag']
analysis_tag = analysis_settings['analysis_settings']['analysis_tag']
logging.info(
"Source tag = {}; Analysis tag: {}".format(analysis_tag, source_tag)
)
module_supplier_id = analysis_settings['analysis_settings']['module_supplier_id']
model_version_id = analysis_settings['analysis_settings']['model_version_id']
logging.info(
"Model supplier - version = {} {}".format(module_supplier_id, model_version_id)
)
# Get the supplier module and call it
use_default_model_runner = not Path(settings.get('worker', 'SUPPLIER_MODULE_DIRECTORY'), module_supplier_id).exists()
model_data_path = os.path.join(
settings.get('worker', 'MODEL_DATA_DIRECTORY'),
module_supplier_id,
model_version_id
)
if not os.path.exists(model_data_path):
raise MissingModelDataException(model_data_path)
logging.info("Setting up analysis working directory")
directory_name = "{}_{}_{}".format(source_tag, analysis_tag, uuid.uuid4().hex)
working_directory = os.path.join(settings.get('worker', 'WORKING_DIRECTORY'), directory_name)
if 'ri_output' in analysis_settings['analysis_settings'].keys():
ri = analysis_settings['analysis_settings']['ri_output']
else:
ri = False
prepare_model_run_directory(working_directory, ri=ri, model_data_src_path=model_data_path, inputs_archive=input_archive)
prepare_model_run_inputs(analysis_settings['analysis_settings'], working_directory, ri=ri)
with setcwd(working_directory):
logging.info("Working directory = {}".format(working_directory))
# Persist the analysis_settings
with open("analysis_settings.json", "w") as json_file:
json.dump(analysis_settings, json_file)
if use_default_model_runner:
model_runner_module = runner
else:
sys.path.append(settings.get('worker', 'SUPPLIER_MODULE_DIRECTORY'))
model_runner_module = importlib.import_module('{}.supplier_model_runner'.format(module_supplier_id))
##! to add check that RI directories take the form of RI_{ID} amd ID is a monotonic index
num_reinsurance_iterations = len(glob.glob('RI_[0-9]*'))
model_runner_module.run(
analysis_settings['analysis_settings'],
settings.getint('worker', 'KTOOLS_BATCH_COUNT'),
num_reinsurance_iterations=num_reinsurance_iterations,
ktools_mem_limit=settings.getboolean('worker', 'KTOOLS_MEMORY_LIMIT'),
set_alloc_rule=settings.getint('worker', 'KTOOLS_ALLOC_RULE'),
fifo_tmp_dir=False
)
output_location = uuid.uuid4().hex
output_filepath = os.path.join(
settings.get('worker', 'OUTPUTS_DATA_DIRECTORY'), output_location + ARCHIVE_FILE_SUFFIX)
output_directory = os.path.join(working_directory, "output")
with tarfile.open(output_filepath, "w:gz") as tar:
tar.add(output_directory, arcname="output")
if settings.getboolean('worker', 'DO_CLEAR_WORKING'):
shutil.rmtree(working_directory, ignore_errors=True)
logging.info("Output location = {}".format(output_location))
return output_location
| 39.237443 | 124 | 0.718375 | 1,036 | 8,593 | 5.650579 | 0.186293 | 0.048855 | 0.0726 | 0.070721 | 0.376153 | 0.312265 | 0.234028 | 0.214042 | 0.205159 | 0.205159 | 0 | 0.001816 | 0.167113 | 8,593 | 218 | 125 | 39.417431 | 0.816124 | 0.068661 | 0 | 0.137931 | 0 | 0 | 0.242986 | 0.076425 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041379 | false | 0 | 0.172414 | 0 | 0.248276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
709c91d65265a22a647756f08a9dbfdc545c3f18 | 1,088 | py | Python | complejidad_algoritmica.py | francomanca93/poo-algoritmos-python | 47bc0289cf1a91e2bee93f354bd39e1b592fa774 | [
"MIT"
] | null | null | null | complejidad_algoritmica.py | francomanca93/poo-algoritmos-python | 47bc0289cf1a91e2bee93f354bd39e1b592fa774 | [
"MIT"
] | null | null | null | complejidad_algoritmica.py | francomanca93/poo-algoritmos-python | 47bc0289cf1a91e2bee93f354bd39e1b592fa774 | [
"MIT"
] | null | null | null | import time
# Importo el modulo sys y aumento el limite de recursión, ya que viene predefinido con 1000
import sys
sys.setrecursionlimit(1000000) # 1 000 000
def factorial_iterativo(n):
respuesta = 1
while n > 1:
respuesta *= n
n -= 1
return respuesta
def factorial_recursivo(n):
if n == 1:
return 1
return n * factorial_iterativo(n - 1)
if __name__ == '__main__':
n = 10000000
print('Complejidad temporal de un algoritmo ITERATIVO. Factorial')
comienzo = time.time()
factorial_iterativo(n)
final = time.time()
tiempo_iterativo = final - comienzo
print(tiempo_iterativo)
print('--------------------')
print('Complejidad temporal de un algoritmo RECURSIVO. Factorial')
comienzo = time.time()
factorial_recursivo(n)
final = time.time()
tiempo_recursivo = final - comienzo
print(tiempo_recursivo)
print('-------------------')
diferencia = abs(tiempo_iterativo - tiempo_recursivo)
print(f'La diferencia de tiempo entre un algoritmo y otro es {diferencia}')
| 23.652174 | 91 | 0.647059 | 130 | 1,088 | 5.269231 | 0.369231 | 0.011679 | 0.083212 | 0.075912 | 0.265693 | 0.108029 | 0 | 0 | 0 | 0 | 0 | 0.038508 | 0.236213 | 1,088 | 45 | 92 | 24.177778 | 0.7858 | 0.090993 | 0 | 0.129032 | 0 | 0 | 0.229442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0 | 0.225806 | 0.225806 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
709f375eec3da89e1429343cd567396c10876145 | 765 | py | Python | wheel/notice/email_sender.py | kong5664546498/half_a_wheel | d50c2359ac7dda55f54dd08bb588091eb6232b81 | [
"MIT"
] | null | null | null | wheel/notice/email_sender.py | kong5664546498/half_a_wheel | d50c2359ac7dda55f54dd08bb588091eb6232b81 | [
"MIT"
] | null | null | null | wheel/notice/email_sender.py | kong5664546498/half_a_wheel | d50c2359ac7dda55f54dd08bb588091eb6232b81 | [
"MIT"
] | null | null | null | import smtplib
from email.header import Header
from email.mime.text import MIMEText
class EmailSender:
def __init__(self) -> None:
self.receiver = "kongandmarx@163.com"
self.sender = "kongandmarx@163.com"
self.smtp_obj = smtplib.SMTP_SSL("smtp.163.com", port=994)
# self.smtp_obj.connect("smtp.163.com", 25)
self.smtp_obj.login("kongandmarx@163.com", "YVLZXZWJBYAHLCAJ")
def send(self, to, subject, text):
t = f"""
<h1> {text} </h1>
"""
message = MIMEText(t, "html")
message["Subject"] = Header(subject)
message["From"] = Header(f"{self.sender}")
message["To"] = Header(f"{to}")
self.smtp_obj.sendmail(self.sender, self.receiver, message.as_string()) | 34.772727 | 79 | 0.616993 | 97 | 765 | 4.762887 | 0.402062 | 0.064935 | 0.095238 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037162 | 0.226144 | 765 | 22 | 79 | 34.772727 | 0.743243 | 0.053595 | 0 | 0 | 0 | 0 | 0.213001 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70a04db5de55b9f9804753c3bf5f7a12c6fc7e92 | 6,590 | py | Python | src/config_common.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | null | null | null | src/config_common.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | null | null | null | src/config_common.py | karawallace/mygene | 35bf066eb50bc929b4bb4e2423d47b4c98797526 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from biothings.www.settings.default import *
from www.api.query_builder import ESQueryBuilder
from www.api.query import ESQuery
from www.api.transform import ESResultTransformer
from www.api.handlers import GeneHandler, QueryHandler, MetadataHandler, StatusHandler, TaxonHandler, DemoHandler
# *****************************************************************************
# Elasticsearch variables
# *****************************************************************************
# elasticsearch server transport url
ES_HOST = 'localhost:9200'
# elasticsearch index name
ES_INDEX = 'mygene_current'
# elasticsearch document type
ES_DOC_TYPE = 'gene'
API_VERSION = 'v3'
HOST_ENVAR_NAME = "MG_HOST"
# *****************************************************************************
# App URL Patterns
# *****************************************************************************
APP_LIST = [
(r"/status", StatusHandler),
(r"/metadata/?", MetadataHandler),
(r"/metadata/fields/?", MetadataHandler),
(r"/demo/?$", DemoHandler),
(r"/{}/species/(\d+)/?".format(API_VERSION), TaxonHandler),
(r"/{}/taxon/(\d+)/?".format(API_VERSION), TaxonHandler),
(r"/{}/gene/(.+)/?".format(API_VERSION), GeneHandler),
(r"/{}/gene/?$".format(API_VERSION), GeneHandler),
(r"/{}/query/?".format(API_VERSION), QueryHandler),
(r"/{}/metadata/?".format(API_VERSION), MetadataHandler),
(r"/{}/metadata/fields/?".format(API_VERSION), MetadataHandler),
]
###############################################################################
# app-specific query builder, query, and result transformer classes
###############################################################################
# *****************************************************************************
# Subclass of biothings.www.api.es.query_builder.ESQueryBuilder to build
# queries for this app
# *****************************************************************************
ES_QUERY_BUILDER = ESQueryBuilder
# *****************************************************************************
# Subclass of biothings.www.api.es.query.ESQuery to execute queries for this app
# *****************************************************************************
ES_QUERY = ESQuery
# *****************************************************************************
# Subclass of biothings.www.api.es.transform.ESResultTransformer to transform
# ES results for this app
# *****************************************************************************
ES_RESULT_TRANSFORMER = ESResultTransformer
GA_ACTION_QUERY_GET = 'query_get'
GA_ACTION_QUERY_POST = 'query_post'
GA_ACTION_ANNOTATION_GET = 'gene_get'
GA_ACTION_ANNOTATION_POST = 'gene_post'
GA_TRACKER_URL = 'MyGene.info'
STATUS_CHECK_ID = '1017'
JSONLD_CONTEXT_PATH = 'www/context/context.json'
# MYGENE THINGS
# This essentially bypasses the es.get fallback as in myvariant...
# The first regex matched integers, in which case the query becomes against entrezgeneall annotation queries are now multimatch
# against the following fields
ANNOTATION_ID_REGEX_LIST = [(re.compile(r'^\d+$'), ['entrezgene', 'retired']),
(re.compile(r'.*'), ['ensembl.gene'])]
DEFAULT_FIELDS = ['name', 'symbol', 'taxid', 'entrezgene']
TAXONOMY = {
"human": {"tax_id": "9606", "assembly": "hg38"},
"mouse": {"tax_id": "10090", "assembly": "mm10"},
"rat": {"tax_id": "10116", "assembly": "rn4"},
"fruitfly": {"tax_id": "7227", "assembly": "dm3"},
"nematode": {"tax_id": "6239", "assembly": "ce10"},
"zebrafish": {"tax_id": "7955", "assembly": "zv9"},
"thale-cress": {"tax_id": "3702"},
"frog": {"tax_id": "8364", "assembly": "xenTro3"},
"pig": {"tax_id": "9823", "assembly": "susScr2"}
}
DATASOURCE_TRANSLATIONS = {
"refseq:": r"refseq.\\\*:",
"accession:": r"accession.\\\*:",
"reporter:": r"reporter.\\\*:",
"interpro:": r"interpro.\\\*:",
# GO:xxxxx looks like a ES raw query, so just look for
# the term as a string in GO's ID (note: searching every keys
# will raise an error because pubmed key is a int and we're
# searching with a string term.
"GO:": r"go.\\\*.id:go\\\:",
#"GO:": r"go.\\\*:go.",
"homologene:": r"homologene.\\\*:",
"reagent:": r"reagent.\\\*:",
"uniprot:": r"uniprot.\\\*:",
"ensemblgene:": "ensembl.gene:",
"ensembltranscript:": "ensembl.transcript:",
"ensemblprotein:": "ensembl.protein:",
# some specific datasources needs to be case-insentive
"hgnc:": r"HGNC:",
"hprd:": r"HPRD:",
"mim:": r"MIM:",
"mgi:": r"MGI:",
"ratmap:": r"RATMAP:",
"rgd:": r"RGD:",
"flybase:": r"FLYBASE:",
"wormbase:": r"WormBase:",
"tair:": r"TAIR:",
"zfin:": r"ZFIN:",
"xenbase:": r"Xenbase:",
"mirbase:": r"miRBase:",
}
SPECIES_TYPEDEF = {'species': {'type': list, 'default': ['all'], 'max': 10,
'translations': [(re.compile(pattern, re.I), translation['tax_id']) for (pattern, translation) in TAXONOMY.items()]}}
# For datasource translations
DATASOURCE_TRANSLATION_TYPEDEF = [(re.compile(pattern, re.I), translation) for
(pattern, translation) in DATASOURCE_TRANSLATIONS.items()]
TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF = [(re.compile(re.sub(r':.*', '', pattern).replace('\\', ''), re.I),
re.sub(r':.*', '', translation).replace('\\','')) for (pattern, translation) in DATASOURCE_TRANSLATIONS.items()]
# Kwarg control update for mygene specific kwargs
# ES KWARGS (_source, scopes,
ANNOTATION_GET_ES_KWARGS['_source'].update({#'default': DEFAULT_FIELDS,
'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
ANNOTATION_POST_ES_KWARGS['_source'].update({#'default': DEFAULT_FIELDS,
'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
QUERY_GET_ES_KWARGS['_source'].update({'default': DEFAULT_FIELDS, 'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
QUERY_POST_ES_KWARGS['_source'].update({'default': DEFAULT_FIELDS, 'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
# Control KWARGS
QUERY_GET_CONTROL_KWARGS['q'].update({'translations': DATASOURCE_TRANSLATION_TYPEDEF})
# query builder KWARGS
ANNOTATION_GET_ESQB_KWARGS.update(SPECIES_TYPEDEF)
ANNOTATION_POST_ESQB_KWARGS.update(SPECIES_TYPEDEF)
QUERY_GET_ESQB_KWARGS.update(SPECIES_TYPEDEF)
QUERY_POST_ESQB_KWARGS.update(SPECIES_TYPEDEF)
QUERY_POST_ESQB_KWARGS['scopes'].update({'translations': TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF})
| 43.642384 | 136 | 0.57648 | 664 | 6,590 | 5.527108 | 0.329819 | 0.013624 | 0.061035 | 0.057221 | 0.291553 | 0.261035 | 0.198365 | 0.125341 | 0.125341 | 0.101362 | 0 | 0.010691 | 0.134143 | 6,590 | 150 | 137 | 43.933333 | 0.632492 | 0.301214 | 0 | 0.022222 | 0 | 0 | 0.264352 | 0.010211 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70a65f53a022d6fe554e262dd2b61539aad6cfe3 | 15,323 | py | Python | ansible_collection/hpe/nimble/plugins/modules/hpe_nimble_network.py | datamattsson/nimble-ansible-modules | ba306153f98db093a9af47c99bdfce1381660880 | [
"Apache-2.0"
] | null | null | null | ansible_collection/hpe/nimble/plugins/modules/hpe_nimble_network.py | datamattsson/nimble-ansible-modules | ba306153f98db093a9af47c99bdfce1381660880 | [
"Apache-2.0"
] | null | null | null | ansible_collection/hpe/nimble/plugins/modules/hpe_nimble_network.py | datamattsson/nimble-ansible-modules | ba306153f98db093a9af47c99bdfce1381660880 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# # Copyright 2020 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# author Alok Ranjan (alok.ranjan2@hpe.com)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
author:
- HPE Nimble Storage Ansible Team (@ar-india) <nimble-dcs-storage-automation-eng@hpe.com>
description: Manage the storage network configuration on the HPE Nimble Storage group.
module: hpe_nimble_network
options:
activate:
required: False
type: bool
description:
- Activate a network configuration.
array:
required: False
type: list
elements: dict
description:
- List of array network configs.
change_name:
required: False
type: str
description:
- Change name of the existing network config.
iscsi_automatic_connection_method:
required: False
type: bool
description:
- Whether automatic connection method is enabled. Enabling this means means redirecting connections from the specified iSCSI
discovery IP address to the best data IP address based on connection counts.
iscsi_connection_rebalancing:
required: False
type: bool
description:
- Whether rebalancing is enabled. Enabling this means rebalancing iSCSI connections by periodically breaking existing
connections that are out-of-balance, allowing the host to reconnect to a more appropriate data IP address.
ignore_validation_mask:
required: False
type: int
description:
- Indicates whether to ignore the validation.
mgmt_ip:
required: False
type: str
description:
- Management IP address for the Group. Four numbers in the range (0,255) separated by periods.
name:
required: True
type: str
choices:
- active
- backup
- draft
description:
- Name of the network configuration. Use the name 'draft' when creating a draft configuration.
secondary_mgmt_ip:
required: False
type: str
description:
- Secondary management IP address for the Group. Four numbers in the range [0,255] separated by periods.
subnet:
required: False
type: list
elements: dict
description:
- List of subnet configs.
route:
required: False
type: list
elements: dict
description:
- List of static routes.
state:
required: True
choices:
- create
- present
- absent
type: str
description:
- The network config operation.
validate:
required: False
type: bool
description:
- Validate a network configuration.
extends_documentation_fragment: hpe.nimble.hpe_nimble
short_description: Manage the HPE Nimble Storage network configuration.
version_added: "2.9.0"
'''
EXAMPLES = r'''
# if state is create, then create network config, fails if it exist or cannot create
# if state is present, then create network config if not present ,else success
- name: Create network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
route: "{{ route }}"
subnet: "{{ subnet }}"
array: "{{ array }}"
iscsi_automatic_connection_method: true
iscsi_connection_rebalancing: False
mgmt_ip: "{{ mgmt_ip }}"
state: "{{ state | default('present') }}"
- name: Delete network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "absent"
- name: Validate network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "present"
ignore_validation_mask: 1
validate: true
- name: Activate Network config
hpe_nimble_network:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
name: "{{ name }}"
state: "present"
ignore_validation_mask: 1
activate: true
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
try:
from nimbleclient.v1 import client
except ImportError:
client = None
import ansible_collections.hpe.nimble.plugins.module_utils.hpe_nimble as utils
def create_update_network_config(
client_obj,
name,
state,
iscsi_automatic_connection_method,
iscsi_connection_rebalancing,
mgmt_ip,
change_name,
**kwargs):
if utils.is_null_or_empty(name):
return (False, False, "Create network config failed as name is not present.", {}, {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
params = utils.remove_null_args(**kwargs)
network_resp = client_obj.network_configs.create(name=name,
iscsi_automatic_connection_method=iscsi_automatic_connection_method,
iscsi_connection_rebalancing=iscsi_connection_rebalancing,
mgmt_ip=mgmt_ip,
**params)
return (True, True, f"Network config '{name}' created successfully.", {}, network_resp.attrs)
else:
if state == "create":
return (False, False, f"Network config '{name}' cannot be created as it is already present in given state.", {}, network_resp.attrs)
# update case
kwargs['name'] = change_name
changed_attrs_dict, params = utils.remove_unchanged_or_null_args(network_resp, **kwargs)
# even though some of the attributes have not changed but it still has to be passed in case of update.
params = utils.remove_null_args(**kwargs)
if changed_attrs_dict.__len__() > 0:
network_resp = client_obj.network_configs.update(id=network_resp.attrs.get("id"),
name=name,
iscsi_automatic_connection_method=iscsi_automatic_connection_method,
iscsi_connection_rebalancing=iscsi_connection_rebalancing,
mgmt_ip=mgmt_ip,
**params)
return (True, True, f"Network config '{name}' already present. Modified the following attributes '{changed_attrs_dict}'",
changed_attrs_dict, network_resp.attrs)
else:
return (True, False, f"Network config '{network_resp.attrs.get('name')}' already present in given state.", {}, network_resp.attrs)
except Exception as ex:
return (False, False, f"Network config creation failed |'{ex}'", {}, {})
def delete_network_config(
client_obj,
name):
if utils.is_null_or_empty(name):
return (False, False, "Delete network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be deleted as it is not present.", {})
client_obj.network_configs.delete(id=network_resp.attrs.get("id"))
return (True, True, f"Deleted network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Delete network config failed |'{ex}'", {})
def validate_network_config(
client_obj,
name,
ignore_validation_mask):
if utils.is_null_or_empty(name):
return (False, False, "Validate network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be validated as it is not present.", {})
client_obj.network_configs.validate_netconfig(
id=network_resp.attrs.get("id"),
ignore_validation_mask=ignore_validation_mask)
return (True, False, f"Validated network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Validate Network config failed |'{ex}'", {})
def activate_network_config(
client_obj,
name,
ignore_validation_mask):
if utils.is_null_or_empty(name):
return (False, False, "Activate network config failed as name is not present.", {})
try:
network_resp = client_obj.network_configs.get(id=None, name=name)
if utils.is_null_or_empty(network_resp):
return (False, False, f"Network config '{name}' cannot be activated as it is not present.", {})
client_obj.network_configs.activate_netconfig(id=network_resp.attrs.get("id"),
ignore_validation_mask=ignore_validation_mask)
return (True, True, f"Activated network config '{name}' successfully.", {})
except Exception as ex:
return (False, False, f"Activate Network config failed |'{ex}'", {})
def main():
fields = {
"activate": {
"required": False,
"type": "bool",
"no_log": False
},
"array": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"change_name": {
"required": False,
"type": "str",
"no_log": False
},
"iscsi_automatic_connection_method": {
"required": False,
"type": "bool",
"no_log": False
},
"iscsi_connection_rebalancing": {
"required": False,
"type": "bool",
"no_log": False
},
"ignore_validation_mask": {
"required": False,
"type": "int",
"no_log": False
},
"mgmt_ip": {
"required": False,
"type": "str",
"no_log": False
},
"name": {
"required": True,
"choices": ['active',
'backup',
'draft'
],
"type": "str",
"no_log": False
},
"secondary_mgmt_ip": {
"required": False,
"type": "str",
"no_log": False
},
"subnet": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"route": {
"required": False,
"type": "list",
"elements": 'dict',
"no_log": False
},
"state": {
"required": True,
"choices": ['create',
'present',
'absent'
],
"type": "str"
},
"validate": {
"required": False,
"type": "bool",
"no_log": False
}
}
default_fields = utils.basic_auth_arg_fields()
fields.update(default_fields)
required_if = [('state', 'create', ['array', 'iscsi_automatic_connection_method', 'iscsi_connection_rebalancing', 'mgmt_ip', 'subnet', 'route'])]
module = AnsibleModule(argument_spec=fields, required_if=required_if)
if client is None:
module.fail_json(msg='Python nimble-sdk could not be found.')
hostname = module.params["host"]
username = module.params["username"]
password = module.params["password"]
activate = module.params["activate"]
array = module.params["array"]
iscsi_automatic_connection_method = module.params["iscsi_automatic_connection_method"]
iscsi_connection_rebalancing = module.params["iscsi_connection_rebalancing"]
ignore_validation_mask = module.params["ignore_validation_mask"]
mgmt_ip = module.params["mgmt_ip"]
name = module.params["name"]
change_name = module.params["change_name"]
secondary_mgmt_ip = module.params["secondary_mgmt_ip"]
subnet = module.params["subnet"]
route = module.params["route"]
state = module.params["state"]
validate = module.params["validate"]
if (username is None or password is None or hostname is None):
module.fail_json(
msg="Missing variables: hostname, username and password is mandatory.")
# defaults
return_status = changed = False
msg = "No task to run."
resp = None
try:
client_obj = client.NimOSClient(
hostname,
username,
password
)
# States
if ((validate is None or validate is False)
and (activate is None or activate is False)
and (state == "create" or state == "present")):
# if not client_obj.network_configs.get(id=None, name=name) or state == "create":
return_status, changed, msg, changed_attrs_dict, resp = create_update_network_config(
client_obj,
name,
state,
iscsi_automatic_connection_method,
iscsi_connection_rebalancing,
mgmt_ip,
change_name,
array_list=array,
ignore_validation_mask=ignore_validation_mask,
secondary_mgmt_ip=secondary_mgmt_ip,
subnet_list=subnet,
route_list=route)
elif state == "absent":
return_status, changed, msg, changed_attrs_dict = delete_network_config(client_obj, name)
elif state == "present" and validate is True:
return_status, changed, msg, changed_attrs_dict = validate_network_config(client_obj, name, ignore_validation_mask)
elif state == "present" and activate is True:
return_status, changed, msg, changed_attrs_dict = activate_network_config(client_obj, name, ignore_validation_mask)
except Exception as ex:
# failed for some reason.
msg = str(ex)
if return_status:
if utils.is_null_or_empty(resp):
module.exit_json(return_status=return_status, changed=changed, msg=msg)
else:
module.exit_json(return_status=return_status, changed=changed, msg=msg, attrs=resp)
else:
module.fail_json(return_status=return_status, changed=changed, msg=msg)
if __name__ == '__main__':
main()
| 34.825 | 149 | 0.601775 | 1,682 | 15,323 | 5.286564 | 0.166468 | 0.049708 | 0.04206 | 0.040486 | 0.554431 | 0.518106 | 0.469636 | 0.408232 | 0.383266 | 0.273954 | 0 | 0.002424 | 0.300072 | 15,323 | 439 | 150 | 34.904328 | 0.826667 | 0.056582 | 0 | 0.458445 | 0 | 0.010724 | 0.396232 | 0.039692 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013405 | false | 0.021448 | 0.013405 | 0 | 0.075067 | 0.002681 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
70a965ab4266b455f06fc24f6cfc9727034185be | 1,428 | py | Python | crawler.py | YellowDong/taohua | b61778d0e49d885141756ca52b1e426f03e89218 | [
"MIT"
] | null | null | null | crawler.py | YellowDong/taohua | b61778d0e49d885141756ca52b1e426f03e89218 | [
"MIT"
] | null | null | null | crawler.py | YellowDong/taohua | b61778d0e49d885141756ca52b1e426f03e89218 | [
"MIT"
] | null | null | null | from requests_html import HTMLSession
import re
from .blog.models import (Article, Tag, Category)
class Spider:
def __init__(self):
self.sesion = HTMLSession()
def get_list(self):
url = 'http://python.jobbole.com/all-posts/'
resp = self.sesion.get(url)
if resp.status_code == 200:
links = re.findall('(http://python.jobbole.com/\d+/)', resp.text)
return set(links)
return
def get_detail(self, detail_url):
resp = self.sesion.get(detail_url)
if resp.status_code == 200:
# text = resp.text
return resp
def parser(self, resp):
#text = resp.html.find('.entry > p')
text = ''.join(list(map(lambda x: x.text, resp.html.find('div.entry p'))))
author = resp.html.find('div.entry div.copyright-area a', first=True).text
temp = resp.html.find('p.entry-meta-hide-on-mobile', first=True).text.strip().split('·')
createtime = temp[0]
category = temp[1]
tag = temp[-1]
# print(createtime)
# print(category)
# print(tag)
# print('================================================')
Article.objects.create(created_time=createtime, )
if __name__ == '__main__':
test = Spider()
links = test.get_list()
if links:
for i in links:
resp = test.get_detail(i)
text = test.parser(resp)
| 30.382979 | 96 | 0.556723 | 178 | 1,428 | 4.348315 | 0.421348 | 0.041344 | 0.062016 | 0.05168 | 0.108527 | 0.056848 | 0 | 0 | 0 | 0 | 0 | 0.008654 | 0.271709 | 1,428 | 46 | 97 | 31.043478 | 0.734615 | 0.107843 | 0 | 0.0625 | 0 | 0 | 0.114534 | 0.021327 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.09375 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5607fbf70d32a16b417fbba1a89c53e34ec639fc | 4,674 | py | Python | ultrasync/create_sync_samples.py | aeshky/ultrasync | 0801bcb0312bba2eab07f12f9a68f3a431f5aaeb | [
"Apache-2.0"
] | 7 | 2019-07-29T20:00:25.000Z | 2021-06-22T21:21:50.000Z | ultrasync/create_sync_samples.py | aeshky/ultrasync | 0801bcb0312bba2eab07f12f9a68f3a431f5aaeb | [
"Apache-2.0"
] | 1 | 2019-10-01T14:25:33.000Z | 2019-10-28T15:19:52.000Z | ultrasync/create_sync_samples.py | aeshky/ultrasync | 0801bcb0312bba2eab07f12f9a68f3a431f5aaeb | [
"Apache-2.0"
] | null | null | null | """
Date: Jul 2018
Author: Aciel Eshky
A script to create positive and negative samples using self-supervision.
"""
import os
import sys
import random
import pandas as pd
from numpy.random import seed as np_seed
from ustools.folder_utils import get_utterance_id, get_dir_info
from ultrasync.create_sync_samples_utils import create_samples, save_samples_to_disk
random.seed(2018)
np_seed(2018)
def mirror_folder_structure(input_path, output_path):
"""
Function to create a mirror of the input path folder structure in output path.
Adapted from https://stackoverflow.com/a/40829525/5190279
:param input_path:
:param output_path:
:return: a list of pairs of core dir which contains files, and corresponding generated dir
"""
folder_pairs = []
for dirpath, dirnames, filenames in os.walk(input_path):
dirnames.sort()
if any(fname.endswith('.ult') for fname in filenames):
new_output_folder = os.path.join(output_path, dirpath[len(input_path):])
if not os.path.isdir(new_output_folder):
print("Creating folder: \t" + new_output_folder)
os.makedirs(new_output_folder)
else:
print("Folder already exits: \t" + new_output_folder)
if filenames: # return the dirs that contain files
folder_pairs.append([dirpath, new_output_folder])
return folder_pairs
def get_file_basenames(directory):
files = os.listdir(directory)
return set([f.split('.')[0] for f in files])
def create_sync_data(folder_pairs):
files_created = []
df = pd.DataFrame(folder_pairs, columns=("core", "generated"))
for index, row in df.iterrows(): # itertools.islice(df.iterrows(), 80):
print("Processing: ", row['core'], row['generated'])
core_dir = row['core']
target_dir = row['generated']
basenames = get_file_basenames(core_dir)
target_basenames = get_file_basenames(target_dir)
for b in basenames:
# if os.path.isfile(os.path.join(target_dir, b + '.npz')):
# print(os.path.join(target_dir, b + '.npz'), "already exists.")
if [b in i for i in target_basenames]:
print(b, "files already exist in target directory.")
elif "E" in b:
print("Skipping utterance of type \"non-speech\" (E):", os.path.join(target_dir, b))
else:
try:
info = get_dir_info(core_dir)
root_id = get_utterance_id(info['dataset'], info['speaker'], info['session'], b)
print(root_id)
samples = create_samples(core_dir, b)
chunk_names = save_samples_to_disk(samples, root_id, target_dir)
list.extend(files_created, chunk_names)
except:
print("Unexpected error:", sys.exc_info()[0])
print("not_processed: ", core_dir, b)
return files_created
def main():
ultrasuite = ["uxtd", "uxssd", "upx"]
# the location of the original ultrasuite data
input_path = sys.argv[1] # "/group/project/ultrax2020/UltraSuite/"
# the destination: where the sync dataset will be stored.
# This will consists of of samples, each corresponding to 200 ms of ultrasound and audio.
output_path = sys.argv[2] # "/disk/scratch_big/../SyncDataSmall"
for dataset in ultrasuite:
docs = os.path.join(output_path, "docs", dataset)
if not os.path.exists(docs):
os.makedirs(docs)
input_path_data = os.path.join(input_path, "core-" + dataset, "core/") # this slash is very important!
output_path_data = os.path.join(output_path, dataset)
print("processing", dataset,
"input directory is:", input_path_data,
"output directory is:", output_path_data)
# source and destination folder pairs.
# destination is created by mirror source.
folder_pairs = mirror_folder_structure(input_path_data, output_path_data)
# save the pairs for logging purposes
pd.DataFrame.to_csv(pd.DataFrame(columns={"source", "target"}, data=folder_pairs),
os.path.join(docs, "folder_pairs.csv"), index=False)
# create and save the data
file_names = create_sync_data(folder_pairs)
# save the sample file names for logging purposes
pd.DataFrame.to_csv(pd.DataFrame(columns={"file_names"}, data=file_names),
os.path.join(docs, "file_names.csv"), index=False)
if __name__ == "__main__":
main()
| 31.369128 | 111 | 0.632649 | 602 | 4,674 | 4.714286 | 0.299003 | 0.02537 | 0.031712 | 0.016913 | 0.128964 | 0.059901 | 0.052854 | 0.036646 | 0.036646 | 0.036646 | 0 | 0.011621 | 0.263586 | 4,674 | 148 | 112 | 31.581081 | 0.812899 | 0.226145 | 0 | 0.027397 | 0 | 0 | 0.101348 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054795 | false | 0 | 0.09589 | 0 | 0.191781 | 0.123288 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5608507a8fbe16afc4a2757ca530a21ae1157296 | 515 | py | Python | setup.py | kaynhelga9/64-bit-Ninja | ba3ffa69797a6f52103142515f0ee253e69e0d49 | [
"MIT"
] | 3 | 2019-02-11T14:40:29.000Z | 2019-04-21T21:59:11.000Z | setup.py | kaynhelga9/64-bit-Ninja | ba3ffa69797a6f52103142515f0ee253e69e0d49 | [
"MIT"
] | null | null | null | setup.py | kaynhelga9/64-bit-Ninja | ba3ffa69797a6f52103142515f0ee253e69e0d49 | [
"MIT"
] | null | null | null | import cx_Freeze
import os
os.environ['TCL_LIBRARY'] = r'C:\Users\Khanh Huynh\AppData\Local\Programs\Python\Python36\tcl\tcl8.6'
os.environ['TK_LIBRARY'] = r'C:\Users\Khanh Huynh\AppData\Local\Programs\Python\Python36\tcl\tk8.6'
executables = [cx_Freeze.Executable('game.py')]
cx_Freeze.setup(
name = '64-bit Ninja',
version = '1.05',
author = 'Khanh H',
options = {'build_exe': {'packages': ['pygame'], 'include_files': ['icon.png', 'idle1.png']}},
executables = executables
) | 34.333333 | 102 | 0.669903 | 71 | 515 | 4.760563 | 0.633803 | 0.071006 | 0.053254 | 0.08284 | 0.360947 | 0.360947 | 0.360947 | 0.360947 | 0.360947 | 0.360947 | 0 | 0.031891 | 0.147573 | 515 | 15 | 103 | 34.333333 | 0.738041 | 0 | 0 | 0 | 0 | 0.166667 | 0.484064 | 0.217131 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5609ed8bf29ac968a3c75282d2fbdfa0946a74d7 | 1,475 | py | Python | xiAnresturant/graph.py | wangteng200000318/-DataMiningMeiTuan | eb152a090c3025726bcb793484d4a88f2072b744 | [
"MIT"
] | 4 | 2020-11-23T04:50:41.000Z | 2021-03-12T06:19:59.000Z | xiAnresturant/graph.py | wangteng200000318/-DataMiningMeiTuan | eb152a090c3025726bcb793484d4a88f2072b744 | [
"MIT"
] | null | null | null | xiAnresturant/graph.py | wangteng200000318/-DataMiningMeiTuan | eb152a090c3025726bcb793484d4a88f2072b744 | [
"MIT"
] | null | null | null | from wordcloud import WordCloud
import jieba
import matplotlib.pyplot as plt
xiaozhai = ['佛伦萨·古典火炉披萨', '蘑菇爱上饭', '珍味林饺子馆', '巷子火锅', '千家粗粮王',
'猫堂小站猫咪主题馆', 'CoCo都可', '福气焖锅烤肉', '5号酒馆', '82°C魔力焖锅',
'小肥羊', '长安大牌档之长安集市', '泰熙家', '大自在火锅', '拉菲达牛排自助',
'猫咪餐厅', '京御煌三汁焖锅', '赵家腊汁肉', '米多多烘焙屋', '瑞可爺爺的店',
'阿姨奶茶专卖', '百富烤霸', '三姊妹香辣土豆片夹馍', '小哥过桥米线',
'太食獸泰式茶餐厅', '和記丸子専買', '0057香辣虾',
'M12铁板餐厅', '重庆鸡公煲',
'洪氏嗨捞·新派猪肚鸡'
]
hangtiancheng = ['辣条印象', '福临北京烤鸭', '味都西饼店', '刘大饼香辣土豆片夹馍', '韩味坊牛排自助',
'星期八工坊', '红透天自助涮烤', '和福顺养生焖锅', '臻膳轩自助涮烤城',
'李想大虾火锅花园',
'欧味轩艺术蛋糕', '王府臻品火锅', '艾米客蛋糕', '红透天自助涮烤',
'川渝小渔哥', '面道'
]
xiaozhai_words = []
hangtiancheng_words = []
for word in xiaozhai:
xiaozhai_words.append(jieba.cut(word))
for word in hangtiancheng:
hangtiancheng_words.append(jieba.cut(word))
res_xiaozhai = ""
res_hangtiancheng = ""
for i in range(len(xiaozhai_words)):
res_xiaozhai += ("/" + "/".join(xiaozhai_words[i]))
for i in range(len(hangtiancheng_words)):
res_hangtiancheng += ("/" + "/".join(hangtiancheng_words[i]))
w1 = WordCloud(font_path="simsun.ttf", background_color='white')
w1.generate(res_xiaozhai)
w1.to_file('小寨附近餐饮店铺词云图.png')
w2 = WordCloud(font_path="simsun.ttf", background_color='white')
w2.generate(res_hangtiancheng)
w2.to_file("航天城附近餐饮店铺词云图.png")
| 37.820513 | 69 | 0.589831 | 155 | 1,475 | 5.503226 | 0.554839 | 0.060961 | 0.021102 | 0.044549 | 0.194607 | 0.107855 | 0.107855 | 0.107855 | 0 | 0 | 0 | 0.013345 | 0.237966 | 1,475 | 38 | 70 | 38.815789 | 0.742883 | 0 | 0 | 0 | 0 | 0 | 0.244955 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
560adc09db2a2d64ed970d3656db7a5e1e25da54 | 4,055 | py | Python | api/server/routes/catchall.py | corentinthomasset/ibm-call-for-code-2021 | 2a3cbc7c9a5a21fd0caa9cbc0a57904bb2087872 | [
"Apache-2.0"
] | null | null | null | api/server/routes/catchall.py | corentinthomasset/ibm-call-for-code-2021 | 2a3cbc7c9a5a21fd0caa9cbc0a57904bb2087872 | [
"Apache-2.0"
] | null | null | null | api/server/routes/catchall.py | corentinthomasset/ibm-call-for-code-2021 | 2a3cbc7c9a5a21fd0caa9cbc0a57904bb2087872 | [
"Apache-2.0"
] | null | null | null |
import logging
import json
import warnings
import time
import datetime as dt
from ast import literal_eval as make_tuple
from flask import jsonify, abort, Response, request
from server import app, cln_client
from cloudant.error import CloudantException, ResultException
from cloudant.query import Query
import yfinance as yf
import numpy as np
from pandas_datareader import data as pdr
from yahoo_fin import stock_info as si
import pandas as pd
def ticker_details(symbol, backwards):
reply = dict()
dbs = {
'ratings': 'esg-ratings-ibm-cfc',
'indicators': 'esg-indicators-ibm-cfc',
'details': 'ticker-details-ibm-cfc'
}
for aspect, db in dbs.items():
try:
conn = cln_client.create_database(db)
except CloudantException as e:
logging.critical(f'DB/{aspect} connection failure: {e}')
reply[aspect] = {}
else:
if conn.exists():
logging.info(f'Using existing {aspect} DB: {db}')
field = ('cfc_company' if aspect == 'details' else 'stock_symbol')
selector={field: symbol.upper()}
try:
resp = conn.get_query_result(selector,
raw_result=True,
limit=100)
time.sleep(0.075)
except ResultException as e:
logging.critical(f'Query/{aspect} failed: {e}')
reply[aspect] = {}
else:
if len(resp['docs']) == 0:
logging.warning(f'{aspect} not found for {symbol}')
reply[aspect] = {}
else:
result = list()
for doc in resp['docs']:
result.append(doc)
reply[aspect] = result
return reply
@app.route("/catchall/<string:symbol>")
def catchall(symbol):
"""catchall route"""
if symbol is None or symbol == '':
abort(Response(json.dumps({'Error': 'Invalid symbol provided'}), 400))
backwards = request.args.get('period', '1mo')
pd.set_option('display.max_rows', None)
warnings.filterwarnings("ignore")
yf.pdr_override()
num_of_years = 1
start = dt.date.today() - dt.timedelta(days = int(365.25*num_of_years))
end = dt.date.today()
tickers = si.tickers_dow()
tickers.append(symbol)
dataset = pdr.get_data_yahoo(tickers, start, end)['Adj Close']
stocks_returns = np.log(dataset/dataset.shift(1))
pairs_to_drop = set()
cols = stocks_returns.columns
for i in range(0, stocks_returns.shape[1]):
for j in range(0, i+1):
if i == j:
pairs_to_drop.add((cols[i], cols[j]))
au_corr = stocks_returns.corr().abs().unstack()
au_corr = au_corr.drop(labels=pairs_to_drop)
final = list()
for ticker in tickers:
top = dict()
top['target'] = ticker
top['correlation'] = dict()
for tpl, corr in json.loads(au_corr.to_json()).items():
pair = make_tuple(tpl)
if ticker.lower() == pair[0].lower():
top['correlation'].update({pair[1]: corr})
top['correlation'] = dict(sorted(top['correlation'].items(), key=lambda item: item[1], reverse=True))
final.append(top)
for item in final:
if item['target'].lower() == symbol.lower():
item.update(ticker_details(symbol, backwards))
item['correlations'] = list()
for corp, corr_value in item['correlation'].items():
corr_item = dict()
corr_item['symbol'] = corp
corr_item['value'] = corr_value
corr_item.update(ticker_details(corp, backwards))
item['correlations'].append(corr_item)
del item['correlation']
return jsonify(item)
break
else:
abort(Response(json.dumps({'Error': 'Stock/Correlation/ESG details not found for symbol'}), 400))
| 33.237705 | 109 | 0.568927 | 479 | 4,055 | 4.713987 | 0.367432 | 0.017715 | 0.019929 | 0.024801 | 0.056687 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01035 | 0.309001 | 4,055 | 121 | 110 | 33.512397 | 0.795503 | 0.003453 | 0 | 0.09 | 0 | 0 | 0.126457 | 0.022316 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.15 | 0 | 0.19 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
560d0ef05a9d538d23b9e00d5bb4bf708502cb92 | 1,132 | py | Python | RLTutorial/MDPValue.py | fyabc/MSRAPaperProject | 2d7974acfe8065523d0c56da695807e94acd0b34 | [
"MIT"
] | 1 | 2016-08-17T10:04:30.000Z | 2016-08-17T10:04:30.000Z | RLTutorial/MDPValue.py | fyabc/MSRAPaperProject | 2d7974acfe8065523d0c56da695807e94acd0b34 | [
"MIT"
] | null | null | null | RLTutorial/MDPValue.py | fyabc/MSRAPaperProject | 2d7974acfe8065523d0c56da695807e94acd0b34 | [
"MIT"
] | null | null | null | #! /usr/bin/python
# -*- encoding: utf-8 -*-
from __future__ import print_function, unicode_literals
from MDP import MDP
import version23
__author__ = 'fyabc'
# random.seed(0)
def getRandomPolicyValue():
values = [0.0 for _ in range(10)]
num = 1000000
echoEpoch = 10000
mdp = MDP()
for k in range(1, num):
for initState in range(1, 6):
state = initState
isTerminal = False
gamma = 1.0
value = 0.0
while not isTerminal:
action = mdp.randomAction()
isTerminal, state, reward = mdp.transform(state, action)
value += gamma * reward
gamma *= mdp.gamma
values[initState] += value
if k % echoEpoch == 0:
print('k = %d, Average values of state 1-5 are:\n' % k,
[value / k for value in values[1:6]])
for i in range(len(values)):
values[i] /= num
return values
def test():
values = getRandomPolicyValue()
print('Average values of state 1-5 are:\n', values[1:6])
if __name__ == '__main__':
test()
| 21.769231 | 72 | 0.550353 | 138 | 1,132 | 4.376812 | 0.427536 | 0.046358 | 0.02649 | 0.066225 | 0.086093 | 0.086093 | 0.086093 | 0.086093 | 0 | 0 | 0 | 0.048 | 0.337456 | 1,132 | 51 | 73 | 22.196078 | 0.757333 | 0.04947 | 0 | 0 | 0 | 0 | 0.082945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.09375 | 0 | 0.1875 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
560d4c8d48ed8fbb6b163e3376983982a59d0792 | 19,002 | py | Python | tests/test_component/test_task.py | andre-merzky/radical.entk | a63ad9158cf2f58d7bfff017f7da9cd5236429b5 | [
"MIT"
] | null | null | null | tests/test_component/test_task.py | andre-merzky/radical.entk | a63ad9158cf2f58d7bfff017f7da9cd5236429b5 | [
"MIT"
] | null | null | null | tests/test_component/test_task.py | andre-merzky/radical.entk | a63ad9158cf2f58d7bfff017f7da9cd5236429b5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import glob
import shutil
import pytest
import hypothesis.strategies as st
from hypothesis import given, settings
from radical.entk import Task
from radical.entk import states
import radical.entk.exceptions as ree
# Hypothesis settings
settings.register_profile("travis", max_examples=100, deadline=None)
settings.load_profile("travis")
# ------------------------------------------------------------------------------
#
def test_task_initialization():
'''
**Purpose**: Test if the task attributes have, thus expect, the correct data types
'''
t = Task()
assert t._uid is None
assert t.name is None
assert t.state == states.INITIAL
assert t.state_history == [states.INITIAL]
assert t.executable is None
assert t.arguments == list()
assert t.pre_exec == list()
assert t.post_exec == list()
assert t.cpu_reqs['processes'] == 1
assert t.cpu_reqs['process_type'] is None
assert t.cpu_reqs['threads_per_process'] == 1
assert t.cpu_reqs['thread_type'] is None
assert t.gpu_reqs['processes'] == 0
assert t.gpu_reqs['process_type'] is None
assert t.gpu_reqs['threads_per_process'] == 0
assert t.gpu_reqs['thread_type'] is None
assert t.lfs_per_process == 0
assert t.upload_input_data == list()
assert t.copy_input_data == list()
assert t.link_input_data == list()
assert t.move_input_data == list()
assert t.copy_output_data == list()
assert t.move_input_data == list()
assert t.download_output_data == list()
assert t.stdout is None
assert t.stderr is None
assert t.exit_code is None
assert t.tag is None
assert t.path is None
assert t.parent_pipeline['uid'] is None
assert t.parent_pipeline['name'] is None
assert t.parent_stage['uid'] is None
assert t.parent_stage['name'] is None
# ------------------------------------------------------------------------------
#
@given(s=st.text(),
l=st.lists(st.text()),
i=st.integers().filter(lambda x: type(x) == int),
b=st.booleans())
def test_task_exceptions(s, l, i, b):
'''
**Purpose**: Test if all attribute assignments raise exceptions
for invalid values
'''
t = Task()
data_type = [s, l, i, b]
for data in data_type:
# special case due to backward compatibility
if not isinstance(data, str) and \
not isinstance(data, list):
with pytest.raises(ree.TypeError): t.executable = data
if not isinstance(data, str):
with pytest.raises(ree.TypeError): t.name = data
with pytest.raises(ree.TypeError): t.path = data
with pytest.raises(ree.TypeError): t.parent_stage = data
with pytest.raises(ree.TypeError): t.parent_pipeline = data
with pytest.raises(ree.TypeError): t.stdout = data
with pytest.raises(ree.TypeError): t.stderr = data
if not isinstance(data, list):
with pytest.raises(ree.TypeError): t.pre_exec = data
with pytest.raises(ree.TypeError): t.arguments = data
with pytest.raises(ree.TypeError): t.post_exec = data
with pytest.raises(ree.TypeError): t.upload_input_data = data
with pytest.raises(ree.TypeError): t.copy_input_data = data
with pytest.raises(ree.TypeError): t.link_input_data = data
with pytest.raises(ree.TypeError): t.move_input_data = data
with pytest.raises(ree.TypeError): t.copy_output_data = data
with pytest.raises(ree.TypeError): t.download_output_data = data
with pytest.raises(ree.TypeError): t.move_output_data = data
if not isinstance(data, str) and \
not isinstance(data, str):
with pytest.raises(ree.ValueError):
t.cpu_reqs = {'processes' : 1,
'process_type' : data,
'threads_per_process': 1,
'thread_type' : None}
t.cpu_reqs = {'processes' : 1,
'process_type' : None,
'threads_per_process': 1,
'thread_type' : data
}
t.gpu_reqs = {'processes' : 1,
'process_type' : data,
'threads_per_process': 1,
'thread_type' : None
}
t.gpu_reqs = {'processes' : 1,
'process_type' : None,
'threads_per_process': 1,
'thread_type' : data}
if not isinstance(data, int):
with pytest.raises(ree.TypeError):
t.cpu_reqs = {'processes' : data,
'process_type' : None,
'threads_per_process' : 1,
'thread_type' : None}
with pytest.raises(ree.TypeError):
t.cpu_reqs = {'processes' : 1,
'process_type' : None,
'threads_per_process' : data,
'thread_type' : None}
with pytest.raises(ree.TypeError):
t.gpu_reqs = {'processes' : data,
'process_type' : None,
'threads_per_process' : 1,
'thread_type' : None}
with pytest.raises(ree.TypeError):
t.gpu_reqs = {'processes' : 1,
'process_type' : None,
'threads_per_process' : data,
'thread_type' : None}
# ------------------------------------------------------------------------------
#
def test_dict_to_task():
# make sure the type checks kick in
d = {'name' : 1}
with pytest.raises(ree.TypeError):
Task(from_dict=d)
d = {'name' : 'foo',
'pre_exec' : ['bar'],
'executable': 'buz',
'arguments' : ['baz', 'fiz'],
'cpu_reqs' : {'processes' : 1,
'process_type' : None,
'threads_per_process': 1,
'thread_type' : None},
'gpu_reqs' : {'processes' : 0,
'process_type' : None,
'threads_per_process': 0,
'thread_type' : None}}
t = Task(from_dict=d)
for k,v in d.items():
assert(t.__getattribute__(k) == v), '%s != %s' \
% (t.__getattribute__(k), v)
# ------------------------------------------------------------------------------
#
def test_task_to_dict():
'''
**Purpose**: Test if the 'to_dict' function of Task class converts all
expected attributes of the Task into a dictionary
'''
t = Task()
d = t.to_dict()
assert d == {'uid' : None,
'name' : None,
'state' : states.INITIAL,
'state_history' : [states.INITIAL],
'pre_exec' : [],
'executable' : None,
'arguments' : [],
'post_exec' : [],
'cpu_reqs' : {'processes' : 1,
'process_type' : None,
'threads_per_process' : 1,
'thread_type' : None},
'gpu_reqs' : {'processes' : 0,
'process_type' : None,
'threads_per_process' : 0,
'thread_type' : None},
'lfs_per_process' : 0,
'upload_input_data' : [],
'copy_input_data' : [],
'link_input_data' : [],
'move_input_data' : [],
'copy_output_data' : [],
'move_output_data' : [],
'download_output_data' : [],
'stdout' : None,
'stderr' : None,
'exit_code' : None,
'path' : None,
'tag' : None,
'parent_stage' : {'uid' : None, 'name' : None},
'parent_pipeline' : {'uid' : None, 'name' : None}}
t = Task()
t.uid = 'test.0017'
t.name = 'new'
t.pre_exec = ['module load abc']
t.executable = ['sleep']
t.arguments = ['10']
t.cpu_reqs['processes'] = 10
t.cpu_reqs['threads_per_process'] = 2
t.gpu_reqs['processes'] = 5
t.gpu_reqs['threads_per_process'] = 3
t.lfs_per_process = 1024
t.upload_input_data = ['test1']
t.copy_input_data = ['test2']
t.link_input_data = ['test3']
t.move_input_data = ['test4']
t.copy_output_data = ['test5']
t.move_output_data = ['test6']
t.download_output_data = ['test7']
t.stdout = 'out'
t.stderr = 'err'
t.exit_code = 1
t.path = 'a/b/c'
t.tag = 'task.0010'
t.parent_stage = {'uid': 's1', 'name': 'stage1'}
t.parent_pipeline = {'uid': 'p1', 'name': 'pipeline1'}
d = t.to_dict()
assert d == {'uid' : 'test.0017',
'name' : 'new',
'state' : states.INITIAL,
'state_history' : [states.INITIAL],
'pre_exec' : ['module load abc'],
'executable' : 'sleep',
'arguments' : ['10'],
'post_exec' : [],
'cpu_reqs' : {'processes' : 10,
'process_type' : None,
'threads_per_process' : 2,
'thread_type' : None},
'gpu_reqs' : {'processes' : 5,
'process_type' : None,
'threads_per_process' : 3,
'thread_type' : None},
'lfs_per_process' : 1024,
'upload_input_data' : ['test1'],
'copy_input_data' : ['test2'],
'link_input_data' : ['test3'],
'move_input_data' : ['test4'],
'copy_output_data' : ['test5'],
'move_output_data' : ['test6'],
'download_output_data' : ['test7'],
'stdout' : 'out',
'stderr' : 'err',
'exit_code' : 1,
'path' : 'a/b/c',
'tag' : 'task.0010',
'parent_stage' : {'uid': 's1', 'name' : 'stage1'},
'parent_pipeline' : {'uid': 'p1', 'name' : 'pipeline1'}}
t.executable = 'sleep'
d = t.to_dict()
assert d == {'uid' : 'test.0017',
'name' : 'new',
'state' : states.INITIAL,
'state_history' : [states.INITIAL],
'pre_exec' : ['module load abc'],
'executable' : 'sleep',
'arguments' : ['10'],
'post_exec' : [],
'cpu_reqs' : {'processes' : 10,
'process_type' : None,
'threads_per_process' : 2,
'thread_type' : None},
'gpu_reqs' : {'processes' : 5,
'process_type' : None,
'threads_per_process' : 3,
'thread_type' : None},
'lfs_per_process' : 1024,
'upload_input_data' : ['test1'],
'copy_input_data' : ['test2'],
'link_input_data' : ['test3'],
'move_input_data' : ['test4'],
'copy_output_data' : ['test5'],
'move_output_data' : ['test6'],
'download_output_data' : ['test7'],
'stdout' : 'out',
'stderr' : 'err',
'exit_code' : 1,
'path' : 'a/b/c',
'tag' : 'task.0010',
'parent_stage' : {'uid': 's1', 'name' : 'stage1'},
'parent_pipeline' : {'uid': 'p1', 'name' : 'pipeline1'}}
# ------------------------------------------------------------------------------
#
def test_task_from_dict():
'''
**Purpose**: Test if the 'from_dict' function of Task class converts a
dictionary into a Task correctly with all the expected
attributes
'''
d = {'uid' : 're.Task.0000',
'name' : 't1',
'state' : states.DONE,
'state_history' : [states.INITIAL, states.DONE],
'pre_exec' : [],
'executable' : '',
'arguments' : [],
'post_exec' : [],
'cpu_reqs' : {'processes' : 1,
'process_type' : None,
'threads_per_process' : 1,
'thread_type' : None},
'gpu_reqs' : {'processes' : 0,
'process_type' : None,
'threads_per_process' : 0,
'thread_type' : None},
'lfs_per_process' : 1024,
'upload_input_data' : [],
'copy_input_data' : [],
'link_input_data' : [],
'move_input_data' : [],
'copy_output_data' : [],
'move_output_data' : [],
'download_output_data' : [],
'stdout' : 'out',
'stderr' : 'err',
'exit_code' : 555,
'path' : 'here/it/is',
'tag' : 'task.0010',
'parent_stage' : {'uid': 's1', 'name' : 'stage1'},
'parent_pipeline' : {'uid': 'p1', 'name' : 'pipe1'}}
t = Task()
t.from_dict(d)
assert t._uid == d['uid']
assert t.name == d['name']
assert t.state == d['state']
assert t.state_history == d['state_history']
assert t.pre_exec == d['pre_exec']
assert t.executable == d['executable']
assert t.arguments == d['arguments']
assert t.post_exec == d['post_exec']
assert t.cpu_reqs == d['cpu_reqs']
assert t.gpu_reqs == d['gpu_reqs']
assert t.lfs_per_process == d['lfs_per_process']
assert t.upload_input_data == d['upload_input_data']
assert t.copy_input_data == d['copy_input_data']
assert t.link_input_data == d['link_input_data']
assert t.move_input_data == d['move_input_data']
assert t.copy_output_data == d['copy_output_data']
assert t.move_output_data == d['move_output_data']
assert t.download_output_data == d['download_output_data']
assert t.stdout == d['stdout']
assert t.stderr == d['stderr']
assert t.exit_code == d['exit_code']
assert t.path == d['path']
assert t.tag == d['tag']
assert t.parent_stage == d['parent_stage']
assert t.parent_pipeline == d['parent_pipeline']
d['executable'] = 'sleep'
t = Task()
t.from_dict(d)
assert t.executable == d['executable']
# ------------------------------------------------------------------------------
#
def test_task_assign_uid():
try:
home = os.environ.get('HOME', '/home')
folder = glob.glob('%s/.radical/utils/test*' % home)
for f in folder:
shutil.rmtree(f)
except:
pass
t = Task()
assert t.uid == 'task.0000'
# ------------------------------------------------------------------------------
#
def test_task_validate():
t = Task()
t._state = 'test'
with pytest.raises(ree.ValueError):
t._validate()
t = Task()
with pytest.raises(ree.MissingError):
t._validate()
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
test_task_initialization()
test_task_exceptions()
test_dict_to_task()
test_task_to_dict()
test_task_from_dict()
test_task_assign_uid()
test_task_validate()
# ------------------------------------------------------------------------------
| 41.041037 | 86 | 0.393801 | 1,640 | 19,002 | 4.328659 | 0.116463 | 0.060149 | 0.056346 | 0.066911 | 0.628117 | 0.551486 | 0.491337 | 0.451613 | 0.415411 | 0.369348 | 0 | 0.014778 | 0.462267 | 19,002 | 462 | 87 | 41.12987 | 0.679977 | 0.068624 | 0 | 0.48 | 0 | 0 | 0.171136 | 0.001307 | 0 | 0 | 0 | 0 | 0.182857 | 1 | 0.02 | false | 0.002857 | 0.025714 | 0 | 0.045714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
560d83927ccbfa6f396f7f59b1122955d5914eea | 3,689 | py | Python | main.py | MNicaretta/picrosssolver | 2919049b6a2beebb47d883ddee7ba830bf809a59 | [
"MIT"
] | null | null | null | main.py | MNicaretta/picrosssolver | 2919049b6a2beebb47d883ddee7ba830bf809a59 | [
"MIT"
] | null | null | null | main.py | MNicaretta/picrosssolver | 2919049b6a2beebb47d883ddee7ba830bf809a59 | [
"MIT"
] | null | null | null | from enum import Enum
class CellState(Enum):
UNKNOWN = '.'
EMPTY = ' '
BOX = '■'
def __str__(self):
return self.value
class Cell():
def __init__(self, state=CellState.UNKNOWN):
self.state = state
def __str__(self):
return self.state
class Clue():
def __init__(self, value, filled=0):
self.value = value
self.filled = filled
def isFilled(self):
return self.remaining() == 0
def remaining(self):
return self.value - self.filled
def __str__(self):
return str(self.value)
class Clues():
def __init__(self, *clues):
self.clues = []
for c in clues:
self.clues.append(Clue(c))
def fillMin(self, start, end, mark):
space = sum(c.value + 1 for c in self.clues) - 1
left = end - start - space
index = 0
for c in self.clues:
if c.value > left:
fill = c.value - left
c.filled += fill
index += c.value - fill
for _ in range(fill):
mark(index, CellState.BOX)
index += 1
index += 1
else:
index += c.value + 1
def adjust(self, array):
if not self.isFilled():
for i in range(len(array)):
start = 0
end = 0
if self.isFilled():
for cell in array:
if cell.state == CellState.UNKNOWN:
cell.state = CellState.EMPTY
def isFilled(self):
return all(c.isFilled() for c in self.clues)
class Picross():
def __init__(self, rows=[], columns=[]):
self.rows = rows
self.columns = columns
self.board = []
for _ in range(len(self.rows)):
array = []
for _ in range(len(self.columns)):
array.append(Cell())
self.board.append(array)
def isSolved(self):
return all(r.isFilled() for r in self.rows)
def set(self, r, c, state):
self.board[r][c].state = state
def solve(self):
self.fillMin()
print(self)
while not self.isSolved():
self.adjust()
print(self)
def fillMin(self):
for r in range(len(self.rows)):
self.rows[r].fillMin(0, len(self.columns), lambda index, state: self.set(r,index,state))
for c in range(len(self.columns)):
self.columns[c].fillMin(0, len(self.rows), lambda index, state: self.set(index,c,state))
def adjust(self):
for r in range(len(self.rows)):
self.rows[r].adjust(self.board[r])
for c in range(len(self.columns)):
self.columns[c].adjust(list(map(lambda row: row[c], self.board)))
def __str__(self):
result = ''
for r in range(len(self.board)):
for cell in self.board[r]:
result += str(cell.state) + ' '
for clue in self.rows[r].clues:
result += str(clue) + ' '
result += '\n'
has_value = True
count = 0
while has_value:
has_value = False
for clues in self.columns:
if len(clues.clues) > count:
has_value = True
result += str(clues.clues[count]) + ' '
else:
result += ' '
result += '\n'
count += 1
return result
picross = Picross([Clues(1,1,1),Clues(1,1),Clues(1,2),Clues(5),Clues(1,1,1)],
[Clues(1,2), Clues(3), Clues(1,2),Clues(3),Clues(1,3)])
print(picross)
picross.solve()
| 25.797203 | 100 | 0.500407 | 457 | 3,689 | 3.956236 | 0.150985 | 0.044248 | 0.044248 | 0.054204 | 0.224004 | 0.126659 | 0.097345 | 0.079646 | 0.079646 | 0.079646 | 0 | 0.014249 | 0.372188 | 3,689 | 142 | 101 | 25.978873 | 0.765976 | 0 | 0 | 0.185185 | 0 | 0 | 0.003253 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.009259 | 0.064815 | 0.324074 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
560df272ec26702a46aac33cc6e289b61e0a8412 | 3,055 | py | Python | legonet_pytorch/module.py | max-liulin/CV-Backbones | b32239d10126c8f84e6f6283b95b42b3b60b1a06 | [
"Apache-2.0"
] | 220 | 2019-11-27T03:02:14.000Z | 2020-02-26T14:08:41.000Z | legonet_pytorch/module.py | vickyqi7/CV-Backbones | 1262dacffdea62f9983ef0231177aea720e25f12 | [
"Apache-2.0"
] | 3 | 2019-12-10T15:00:57.000Z | 2020-02-02T12:02:47.000Z | legonet_pytorch/module.py | vickyqi7/CV-Backbones | 1262dacffdea62f9983ef0231177aea720e25f12 | [
"Apache-2.0"
] | 35 | 2019-11-28T05:21:50.000Z | 2020-02-26T13:46:11.000Z | '''
Copyright (C) 2016. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the MIT license.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
MIT License for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
import torchvision
class LegoConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, n_split, n_lego):
super(LegoConv2d, self).__init__()
self.in_channels, self.out_channels, self.kernel_size, self.n_split = in_channels, out_channels, kernel_size, n_split
self.basic_channels = in_channels // self.n_split
self.n_lego = int(self.out_channels * n_lego)
self.lego = nn.Parameter(nn.init.kaiming_normal_(torch.rand(self.n_lego, self.basic_channels, self.kernel_size, self.kernel_size)))
self.aux_coefficients = nn.Parameter(init.kaiming_normal_(torch.rand(self.n_split, self.out_channels, self.n_lego, 1, 1)))
self.aux_combination = nn.Parameter(init.kaiming_normal_(torch.rand(self.n_split, self.out_channels, self.n_lego, 1, 1)))
def forward(self, x):
self.proxy_combination = torch.zeros(self.aux_combination.size()).to(self.aux_combination.device)
self.proxy_combination.scatter_(2, self.aux_combination.argmax(dim = 2, keepdim = True), 1); self.proxy_combination.requires_grad = True
out = 0
for i in range(self.n_split):
lego_feature = F.conv2d(x[:, i*self.basic_channels: (i+1)*self.basic_channels], self.lego, padding = self.kernel_size // 2)
kernel = self.aux_coefficients[i] * self.proxy_combination[i]
out = out + F.conv2d(lego_feature, kernel)
return out
def copy_grad(self, balance_weight):
self.aux_combination.grad = self.proxy_combination.grad
# balance loss
idxs = self.aux_combination.argmax(dim = 2).view(-1).cpu().numpy()
unique, count = np.unique(idxs, return_counts = True)
unique, count = np.unique(count, return_counts = True)
avg_freq = (self.n_split * self.out_channels ) / self.n_lego
max_freq = 0
min_freq = 100
for i in range(self.n_lego):
i_freq = (idxs == i).sum().item()
max_freq = max(max_freq, i_freq)
min_freq = min(min_freq, i_freq)
if i_freq >= np.floor(avg_freq) and i_freq <= np.ceil(avg_freq):
continue
if i_freq < np.floor(avg_freq):
self.aux_combination.grad[:, :, i] = self.aux_combination.grad[:, :, i] - balance_weight * (np.floor(avg_freq) - i_freq)
if i_freq > np.ceil(avg_freq):
self.aux_combination.grad[:, :, i] = self.aux_combination.grad[:, :, i] + balance_weight * (i_freq - np.ceil(avg_freq))
| 50.081967 | 144 | 0.67365 | 451 | 3,055 | 4.341463 | 0.277162 | 0.030644 | 0.091931 | 0.05618 | 0.336568 | 0.31001 | 0.244637 | 0.19714 | 0.159346 | 0.141982 | 0 | 0.010004 | 0.21473 | 3,055 | 60 | 145 | 50.916667 | 0.806169 | 0.134861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.261905 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
560fa46d7efbe40812073452461efabc1cf83295 | 3,719 | py | Python | pychron/dvc/util.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 31 | 2016-03-07T02:38:17.000Z | 2022-02-14T18:23:43.000Z | pychron/dvc/util.py | ASUPychron/pychron | dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76 | [
"Apache-2.0"
] | 1,626 | 2015-01-07T04:52:35.000Z | 2022-03-25T19:15:59.000Z | pychron/dvc/util.py | UIllinoisHALPychron/pychron | f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc | [
"Apache-2.0"
] | 26 | 2015-05-23T00:10:06.000Z | 2022-03-07T16:51:57.000Z | # ===============================================================================
# Copyright 2019 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= EOF =============================================
from uncertainties import ufloat
from pychron.dvc import analysis_path, dvc_dump
from pychron.processing.interpreted_age import InterpretedAge
class Tag(object):
name = None
path = None
note = ""
subgroup = ""
uuid = ""
record_id = ""
@classmethod
def from_analysis(cls, an, **kw):
tag = cls()
tag.name = an.tag
tag.note = an.tag_note
tag.record_id = an.record_id
tag.uuid = an.uuid
tag.repository_identifier = an.repository_identifier
# tag.path = analysis_path(an.record_id, an.repository_identifier, modifier='tags')
tag.path = analysis_path(an, an.repository_identifier, modifier="tags")
tag.subgroup = an.subgroup
for k, v in kw.items():
setattr(tag, k, v)
return tag
def dump(self):
obj = {"name": self.name, "note": self.note, "subgroup": self.subgroup}
if not self.path:
self.path = analysis_path(
self.uuid, self.repository_identifier, modifier="tags", mode="w"
)
dvc_dump(obj, self.path)
class DVCInterpretedAge(InterpretedAge):
labnumber = None
isotopes = None
repository_identifier = None
analyses = None
def load_tag(self, obj):
self.tag = obj.get("name", "")
self.tag_note = obj.get("note", "")
def from_json(self, obj):
for attr in ("name", "uuid"):
setattr(self, attr, obj.get(attr, ""))
pf = obj["preferred"]
for attr in ("age", "age_err"):
setattr(self, attr, pf.get(attr, 0))
sm = obj["sample_metadata"]
for attr in ("sample", "material", "project", "irradiation"):
setattr(self, attr, sm.get(attr, ""))
# for a in ('age', 'age_err', 'age_kind',
# # 'kca', 'kca_err','kca_kind',
# 'mswd',
# 'sample', 'material', 'identifier', 'nanalyses', 'irradiation',
# 'name', 'project', 'uuid', 'age_error_kind'):
# try:
# setattr(self, a, obj.get(a, NULL_STR))
# except BaseException as a:
# print('exception DVCInterpretdAge.from_json', a)
self.labnumber = self.identifier
# self.uage = ufloat(self.age, self.age_err)
self._record_id = "{} {}".format(self.identifier, self.name)
self.analyses = obj.get("analyses", [])
pkinds = pf.get("preferred_kinds")
if pkinds:
for k in pkinds:
attr = k["attr"]
if attr == "age":
attr = "uage"
setattr(self, attr, ufloat(k["value"], k["error"]))
def get_value(self, attr):
try:
return getattr(self, attr)
except AttributeError:
return ufloat(0, 0)
@property
def status(self):
return "X" if self.is_omitted() else ""
| 32.33913 | 91 | 0.547728 | 428 | 3,719 | 4.670561 | 0.336449 | 0.030015 | 0.030015 | 0.048024 | 0.056528 | 0.037019 | 0 | 0 | 0 | 0 | 0 | 0.004092 | 0.277225 | 3,719 | 114 | 92 | 32.622807 | 0.739583 | 0.338263 | 0 | 0 | 0 | 0 | 0.064556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0 | 0.046154 | 0.015385 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
560fcacc1a61a2c757a12a88e6d3d993d2d1a1d4 | 3,214 | py | Python | processor/gen_record.py | Princeton-Penn-Vents/princeton-penn-flowmeter | 85a5ca8357ca34e0b543fa1489d48ecbc8023294 | [
"MIT"
] | 3 | 2020-04-14T10:45:12.000Z | 2022-01-06T16:40:30.000Z | processor/gen_record.py | Princeton-Penn-Vents/princeton-penn-flowmeter | 85a5ca8357ca34e0b543fa1489d48ecbc8023294 | [
"MIT"
] | 36 | 2020-04-05T16:23:33.000Z | 2020-10-02T02:58:21.000Z | processor/gen_record.py | Princeton-Penn-Vents/princeton-penn-flowmeter | 85a5ca8357ca34e0b543fa1489d48ecbc8023294 | [
"MIT"
] | 1 | 2020-04-05T13:18:47.000Z | 2020-04-05T13:18:47.000Z | from __future__ import annotations
from typing import Optional
from dataclasses import dataclass
from logging import Logger
from processor.device_names import address_to_name
@dataclass
class GenRecord:
logger: Logger
_mac: Optional[str] = None
_sid: int = 0
_name: Optional[str] = None
# Nurse only, but in master class to make simpler
_nurse_name: str = ""
@property
def safemac(self) -> str:
"""Returns a safe filename (Windows does not allow colons)"""
keepcharacters = (".", "_")
return "".join(
c for c in self.mac if c.isalnum() or c in keepcharacters
).rstrip()
@property
def mac(self) -> str:
"""
The mac address. Returns <unknown> if the address is not known.
"""
return "<unknown>" if self._mac is None else self._mac
@mac.setter
def mac(self, value: str):
if self._mac is None or self._mac != value:
self._mac = value
self.logger.info(f"MAC addr: {self._mac}")
self.mac_changed()
@property
def sid(self) -> int:
"""
Sensor ID, as an integer. Printout with "X" format.
"""
return self._sid
@sid.setter
def sid(self, value: int):
if self._sid != value:
self._sid = value
self.logger.info(f"Sensor ID: {self._sid:X}")
self.sid_changed()
@property
def box_name(self) -> str:
"""
The name of the box, or <unknown>.
"""
if self._name is not None:
return self._name
if self._mac is None:
return "<unknown>"
try:
return address_to_name(self._mac).title()
except ValueError:
return self.mac
@box_name.setter
def box_name(self, value: Optional[str]):
if self._name != value:
self._name = value
self.logger.info(f"Box name: {self._name}")
self.mac_changed()
@property
def stacked_name(self) -> str:
"""
Return the box name stacked using a newline
If unknown, return Box name: <unknown>.
"""
if self._mac is None or self._mac == "00:00:00:00:00:00":
return "Box name\n<unknown>"
try:
return "\n".join(address_to_name(self._mac).title().split())
except ValueError:
return self.mac
@property
def title(self) -> str:
"""
The title to show in the dialog box. Will show box_name if unset.
"""
return self._nurse_name
@title.setter
def title(self, value: str):
if self._nurse_name is None or self._nurse_name != value:
self._nurse_name = value
self.logger.info(f"Changed title to {self._nurse_name!r}")
self.title_changed()
def title_changed(self) -> None:
"""
Modify in subclasses to add special callbacks here.
"""
def mac_changed(self) -> None:
"""
Modify in subclasses to add special callbacks here.
"""
def sid_changed(self) -> None:
"""
Modify in subclasses to add special callbacks here.
"""
| 26.561983 | 73 | 0.564717 | 405 | 3,214 | 4.335802 | 0.237037 | 0.063781 | 0.037016 | 0.025057 | 0.334282 | 0.202164 | 0.129841 | 0.129841 | 0.102506 | 0.102506 | 0 | 0.00603 | 0.329185 | 3,214 | 120 | 74 | 26.783333 | 0.808442 | 0.174549 | 0 | 0.194444 | 0 | 0 | 0.065988 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.180556 | false | 0 | 0.069444 | 0 | 0.486111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56115c228ac0c726ff0df0045669becb3ae31e8a | 22,024 | py | Python | inputs/transformers.py | aayushk614/DTI | f0338918144c0efbb79556ac8e81cbcefc70e22f | [
"MIT"
] | null | null | null | inputs/transformers.py | aayushk614/DTI | f0338918144c0efbb79556ac8e81cbcefc70e22f | [
"MIT"
] | null | null | null | inputs/transformers.py | aayushk614/DTI | f0338918144c0efbb79556ac8e81cbcefc70e22f | [
"MIT"
] | null | null | null | from __future__ import print_function
import math
import nibabel as nib
import nrrd
import numpy as np
import operator
import os
import random
import torch
import warnings
from functools import reduce
from inputs import Image, ImageType
CHANNEL, DEPTH, HEIGHT, WIDTH = 0, 1, 2, 3
class ToNDTensor(object):
"""
Creates a torch.Tensor object from a numpy array.
The transformer supports 3D and 4D numpy arrays. The numpy arrays are transposed in order to create tensors with
dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays.
The dimensions are D: Depth, H: Height, W: Width, C: Channels.
"""
# noinspection PyArgumentList
def __call__(self, nd_array):
"""
:param nd_array: A 3D or 4D numpy array to convert to torch.Tensor
:return: A torch.Tensor of size (DxHxW) or (CxDxHxW)"""
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only {} are supporter".format(np.ndarray))
if nd_array.ndim == 3:
nd_tensor = torch.Tensor(nd_array.reshape(nd_array.shape + (1,)))
elif nd_array.ndim == 4:
nd_tensor = torch.Tensor(nd_array)
else:
raise NotImplementedError("Only 3D or 4D arrays are supported")
return nd_tensor
def __repr__(self):
return self.__class__.__name__ + '()'
class ToNiftiFile(object):
"""
Creates a Nifti1Image from a given numpy ndarray
The numpy arrays are transposed to respect the standard Nifti dimensions (WxHxDxC)
"""
def __init__(self, file_path, affine):
self._file_path = file_path
self._affine = affine
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
output_dir = os.path.dirname(self._file_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if nd_array.shape[0] not in [6, 9]:
nd_array = np.squeeze(nd_array, axis=0)
else:
nd_array = np.moveaxis(nd_array, 0, 3)
nifti1_file = nib.Nifti1Image(nd_array, self._affine)
nib.save(nifti1_file, self._file_path)
def __repr__(self):
return self.__class__.__name__ + '()'
class ToNrrdFile(object):
"""
Create a .NRRD file and save it at the given path.
The numpy arrays are transposed to respect the standard NRRD dimensions (WxHxDxC)
"""
def __init__(self, file_path):
self._file_path = file_path
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) {} are supported".format(np.ndarray))
output_dir = os.path.dirname(self._file_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
header = self._create_header_from(nd_array)
nrrd.write(self._file_path, np.moveaxis(nd_array, 0, 3), header=header)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def _create_header_from(nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) {} are supported".format(np.ndarray))
return {
'type': nd_array.dtype,
'dimension': nd_array.ndim,
'sizes': nd_array.shape,
'kinds': ['domain', 'domain', 'domain', '3D-matrix'] if nd_array.ndim == 4 else ['domain', 'domain',
'domain'],
'endian': 'little',
'encoding': 'raw'
}
class ToNumpyArray(object):
"""
Creates a numpy ndarray from a given Nifti or NRRD image file path.
The numpy arrays are transposed to respect the standard dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays.
"""
def __call__(self, image_path):
if Image.is_nifti(image_path):
nifti_image = nib.load(image_path)
nd_array = nifti_image.get_fdata().__array__()
affine = nifti_image._affine
elif Image.is_nrrd(image_path):
nd_array, header = nrrd.read(image_path)
else:
raise NotImplementedError(
"Only {} files are supported !".format(ImageType.ALL))
if nd_array.ndim == 3:
nd_array = np.moveaxis(np.expand_dims(nd_array, 3), 3, 0)
elif nd_array.ndim == 4:
nd_array = np.moveaxis(nd_array, 3, 0)
return nd_array
def __repr__(self):
return self.__class__.__name__ + '()'
class ToUniqueTensorValues(object):
UNIQUE_TENSOR_VALUES_INDEX = [0, 1, 2, 4, 5, 8]
"""
Creates a numpy ndarray from a given Nifti or NRRD image file path.
The numpy arrays are transposed to respect the standard dimensions (DxHxW) for 3D or (CxDxHxW) for 4D arrays.
"""
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or nd_array.ndim is not 4 or nd_array.shape[0] != 9:
raise TypeError(
"Only 4D (CxDxHxW) {} are with 9 channels are supported".format(np.ndarray))
return nd_array[self.UNIQUE_TENSOR_VALUES_INDEX, :, :, :]
def __repr__(self):
return self.__class__.__name__ + '()'
class ToLogEuclidean(object):
"""
Convert a DTI image in the Log-Euclidean space.
To convert the DTI image into the Log-Euclidean space, the eigen-decomposition of each tensor is performed and the
log of the eigen-values is computed.
It can mathematically be expressed as follow: log(D) = Ulog(V)U.T where D is a tensor, U is a matrix of eigen-vector
and V a diagonal matrix of eigen-values.
Based on: Arsigny, V., Fillard, P., Pennec, X., & Ayache, N. (2006). Log-Euclidean metrics for fast and
simple calculus on diffusion tensors https://www.ncbi.nlm.nih.gov/pubmed/16788917
"""
def __call__(self, nd_array):
"""
:param nd_array: The DTI image as a nd array of dimension CxDxHxW)
:return: he DTI image in the log-Euclidean space
"""
warnings.filterwarnings('ignore')
if not isinstance(nd_array, np.ndarray) or nd_array.ndim is not 4 or nd_array.shape[0] != 9:
raise TypeError(
"Only 4D (CxDxHxW) {} are with 9 channels are supported".format(np.ndarray))
image_as_vector = nd_array.reshape(
(3, 3, reduce(operator.mul, nd_array.shape[1:], 1)))
return self.apply(image_as_vector, np.zeros(image_as_vector.shape, dtype='float32')).reshape(nd_array.shape)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(image_vector, output):
index = 0
while index < image_vector.shape[2]:
diffusion_tensor = image_vector[:, :, index]
# Does not convert the background tensors to log-euclidean
if np.any(diffusion_tensor):
eig_val, eig_vec = np.linalg.eigh(diffusion_tensor)
output[:, :, index] = np.dot(np.dot(np.ascontiguousarray(eig_vec), np.diag(np.log(eig_val))),
np.ascontiguousarray(np.linalg.inv(eig_vec)))
else:
output[:, :, index] = diffusion_tensor
index = index + 1
@staticmethod
def undo(image_vector, output):
index = 0
while index < image_vector.shape[2]:
log_euclidean_diffusion_tensor = image_vector[:, :, index]
# Due to noise, negative eigenvalues can arise. Those noisy tensors cannot be converted back to Euclidean.
if np.any(log_euclidean_diffusion_tensor) and not np.isnan(log_euclidean_diffusion_tensor).any():
eig_val, eig_vec = np.linalg.eigh(
log_euclidean_diffusion_tensor)
output[:, :, index] = np.dot(np.dot(np.ascontiguousarray(eig_vec), np.diag(np.exp(eig_val))),
np.ascontiguousarray(np.linalg.inv(eig_vec)))
else:
output[:, :, index] = log_euclidean_diffusion_tensor
index = index + 1
class InterpolateNSDTensors(object):
"""
Interpolates Negative Semi-Definite tensors using trilinear interpolation.
It computed a weighted sum of the NSD tensors' neighbors in the Log-Euclidean domain.
"""
def __call__(self, log_euclidean_nd_array):
if not isinstance(log_euclidean_nd_array, np.ndarray) or log_euclidean_nd_array.ndim is not 4:
raise TypeError("Only {} are supported".format(np.ndarray.dtype))
d_index, h_index, w_index = np.where(
np.isnan(log_euclidean_nd_array[-1, :, :, :]))
for index in list(zip(d_index, h_index, w_index)):
neighbors = self._get_tri_linear_neighbors_and_weights(
index, log_euclidean_nd_array)
log_euclidean_nd_array[:, index[0], index[1], index[2]] = np.dot(np.array(neighbors[0]).T,
neighbors[1] / np.sum(neighbors[1]))
return log_euclidean_nd_array
def _get_tri_linear_neighbors_and_weights(self, nsd_index, log_euclidean_nd_array):
"""
Gets the 8 neighbors of the NSD tensors from which to interpolate. The weight associated with each neighbor
is inversely proportional to the distance between the interpolated tensor and the neighbor.
:param nsd_index: The index of the NSD tensor.
:param log_euclidean_nd_array: The log euclidean image as numpy ndarray
:return: A list of the 8 corner neighbors and their associated weights in separated tuples.
"""
front, left, down = -1, -1, -1
back, right, up = 1, 1, 1
directions = [(front, left, down), (front, left, up), (back, left, down), (back, left, up),
(front, right, down), (front, right, up), (back, right, up), (back, right, down)]
neighbors_and_weights = list(map(lambda direction:
self._get_closest_neighbor_of(
log_euclidean_nd_array, nsd_index, direction),
directions))
return list(zip(*neighbors_and_weights))
@staticmethod
def _get_closest_neighbor_of(log_euclidean_nd_array, nsd_index, direction):
"""
Gets the closest non-NSD tensor to the nsd_index and its weight following a given direction.
The associated weight is 1/distance, where the distance is the distance from the neighbor and the nsd_index.
:param log_euclidean_nd_array: The log-euclidean image as ndarray.
:param nsd_index: The index of the NSD tensor to interpolate.
:param direction: The direction in which the neighbor is searched.
:return: The closest neighbor as a 9 values vector and its associated weight.
"""
distance = 1
neighbor = None
try:
while neighbor is None:
d, h, w = tuple(((np.array(direction) * distance) + nsd_index))
if 0 < d < log_euclidean_nd_array.shape[1] and 0 < h < log_euclidean_nd_array.shape[2] and 0 < w < \
log_euclidean_nd_array.shape[3]:
potential_neighbor = log_euclidean_nd_array[:, d, h, w]
else:
raise IndexError
if not np.isnan(potential_neighbor).any():
neighbor = potential_neighbor
else:
distance = distance + 1
weight = 1 / distance
except IndexError:
neighbor = np.zeros(log_euclidean_nd_array.shape[0])
weight = 0
return neighbor, weight
def __repr__(self):
return self.__class__.__name__ + '()'
class CropToContent(object):
"""
Crops the image to its content.
The content's bounding box is defined by the first non-zero slice in each direction (D, H, W)
"""
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
c, d_min, d_max, h_min, h_max, w_min, w_max = self.extract_content_bounding_box_from(
nd_array)
return nd_array[:, d_min:d_max, h_min:h_max, w_min:w_max] if nd_array.ndim is 4 else \
nd_array[d_min:d_max, h_min:h_max, w_min:w_max]
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def extract_content_bounding_box_from(nd_array):
"""
Computes the D, H, W min and max values defining the content bounding box.
:param nd_array: The input image as a numpy ndarray
:return: The D, H, W min and max values of the bounding box.
"""
depth_slices = np.any(nd_array, axis=(2, 3))
height_slices = np.any(nd_array, axis=(1, 3))
width_slices = np.any(nd_array, axis=(1, 2))
d_min, d_max = np.where(depth_slices)[1][[0, -1]]
h_min, h_max = np.where(height_slices)[1][[0, -1]]
w_min, w_max = np.where(width_slices)[1][[0, -1]]
return nd_array.shape[CHANNEL], d_min, d_max, h_min, h_max, w_min, w_max
class PadToShape(object):
def __init__(self, target_shape, padding_value=0, isometric=False):
self._padding_value = padding_value
if isometric:
largest_dimension = max(target_shape[DEPTH], target_shape[WIDTH])
self._target_shape = (
target_shape[CHANNEL], largest_dimension, target_shape[HEIGHT], largest_dimension)
else:
self._target_shape = target_shape
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
elif nd_array.ndim is not len(self._target_shape):
raise ValueError(
"The input image and target shape's dimension does not match {} vs {}".format(nd_array.ndim,
len(self._target_shape)))
return self.apply(nd_array, self._target_shape, self._padding_value)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, target_shape, padding_value):
deltas = tuple(max(0, target - current)
for target, current in zip(target_shape, nd_array.shape))
if nd_array.ndim == 3:
nd_array = np.pad(nd_array, ((math.floor(deltas[0] / 2), math.ceil(deltas[0] / 2)),
(math.floor(deltas[1] / 2),
math.ceil(deltas[1] / 2)),
(math.floor(deltas[2] / 2), math.ceil(deltas[2] / 2))),
'constant', constant_values=padding_value)
elif nd_array.ndim == 4:
nd_array = np.pad(nd_array, ((0, 0),
(math.floor(deltas[1] / 2),
math.ceil(deltas[1] / 2)),
(math.floor(deltas[2] / 2),
math.ceil(deltas[2] / 2)),
(math.floor(deltas[3] / 2), math.ceil(deltas[3] / 2))),
'constant', constant_values=padding_value)
return nd_array
@staticmethod
def undo(nd_array, original_shape):
deltas = tuple(max(0, current - target)
for target, current in zip(original_shape, nd_array.shape))
if nd_array.ndim == 3:
nd_array = nd_array[
math.floor(deltas[0] / 2):-math.ceil(deltas[0] / 2),
math.floor(deltas[1] / 2):-math.ceil(deltas[1] / 2),
math.floor(deltas[2] / 2):-math.ceil(deltas[2] / 2)]
elif nd_array.ndim == 4:
nd_array = nd_array[
:,
math.floor(deltas[1] / 2):-math.ceil(deltas[1] / 2),
math.floor(deltas[2] / 2):-math.ceil(deltas[2] / 2),
math.floor(deltas[3] / 2):-math.ceil(deltas[3] / 2)]
return nd_array
class RandomFlip(object):
def __init__(self, exec_probability):
self._exec_probability = exec_probability
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
for axis in (0, 1, 2):
if random.uniform(0, 1) <= self._exec_probability:
nd_array = self.apply(nd_array, [axis])
return nd_array
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, axes):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
for axis in axes:
if nd_array.ndim is 3:
nd_array = np.flip(nd_array, axis)
else:
channels = [np.flip(nd_array[c], axis)
for c in range(nd_array.shape[0])]
nd_array = np.stack(channels, axis=0)
return nd_array
@staticmethod
def undo(nd_array, axes):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
for axis in axes[::-1]:
if nd_array.ndim is 3:
nd_array = np.flip(nd_array, axis)
else:
channels = [np.flip(nd_array[c], axis)
for c in range(nd_array.shape[0])]
nd_array = np.stack(channels, axis=0)
return nd_array
class RandomRotate90(object):
def __init__(self, exec_probability):
self._exec_probability = exec_probability
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
if random.uniform(0, 1) <= self._exec_probability:
num_rotation = random.randint(0, 4)
nd_array = self.apply(nd_array, num_rotation)
return nd_array
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, num_rotation):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
if nd_array.ndim == 3:
nd_array = np.rot90(nd_array, num_rotation, (1, 2))
else:
channels = [np.rot90(nd_array[c], num_rotation, (1, 2))
for c in range(nd_array.shape[0])]
nd_array = np.stack(channels, axis=0)
return nd_array
@staticmethod
def undo(nd_array, num_rotation):
if not isinstance(nd_array, np.ndarray) or (nd_array.ndim not in [3, 4]):
raise TypeError(
"Only 3D (DxHxW) or 4D (CxDxHxW) ndarrays are supported")
if nd_array.ndim == 3:
nd_array = np.rot90(nd_array, num_rotation, (2, 1))
else:
channels = [np.rot90(nd_array[c], num_rotation, (2, 1))
for c in range(nd_array.shape[0])]
nd_array = np.stack(channels, axis=0)
return nd_array
class Normalize(object):
def __init__(self, mean, std):
self._mean = mean
self._std = std
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return self.apply(nd_array, self._mean, self._std)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, mean, std):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return (nd_array - mean) / std
@staticmethod
def undo(nd_array, mean, std):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return (nd_array * std) + mean
class Flip(object):
def __init__(self, axis):
self._axis = axis
def __call__(self, nd_array):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return self.apply(nd_array, self._axis)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(nd_array, axis):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return np.flip(nd_array, axis).copy()
@staticmethod
def undo(nd_array, axis):
if not isinstance(nd_array, np.ndarray):
raise TypeError("Only ndarrays are supported")
return np.flip(nd_array, axis).copy()
class TensorFlip(object):
def __init__(self, axis):
self._axis = axis
def __call__(self, tensor):
return self.apply(tensor, self._axis)
def __repr__(self):
return self.__class__.__name__ + '()'
@staticmethod
def apply(tensor, axis):
return tensor.flip(axis)
@staticmethod
def undo(tensor, axis):
return tensor.flip(axis)
| 37.265651 | 120 | 0.593444 | 2,861 | 22,024 | 4.318071 | 0.116393 | 0.099725 | 0.025498 | 0.027198 | 0.598996 | 0.550024 | 0.509228 | 0.482678 | 0.44779 | 0.421645 | 0 | 0.01739 | 0.305485 | 22,024 | 590 | 121 | 37.328814 | 0.790272 | 0.138712 | 0 | 0.502604 | 0 | 0 | 0.064083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140625 | false | 0 | 0.03125 | 0.044271 | 0.320313 | 0.002604 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56116252a18b3ab533ede58656a33d7beedc09ce | 1,198 | py | Python | tests/test_image_xpress.py | jni/microscopium | b9cddd8ef5f3003a396ace602228651b3020c4a3 | [
"BSD-3-Clause"
] | 53 | 2016-08-30T09:45:12.000Z | 2022-02-03T06:22:50.000Z | tests/test_image_xpress.py | jni/microscopium | b9cddd8ef5f3003a396ace602228651b3020c4a3 | [
"BSD-3-Clause"
] | 151 | 2015-01-15T06:16:27.000Z | 2021-03-22T01:01:26.000Z | tests/test_image_xpress.py | jni/microscopium | b9cddd8ef5f3003a396ace602228651b3020c4a3 | [
"BSD-3-Clause"
] | 19 | 2015-01-15T06:13:26.000Z | 2021-09-13T13:06:47.000Z | from microscopium.screens import image_xpress
import collections as coll
def test_ix_semantic_filename():
test_fn = "./Week1_22123/G10_s2_w11C3B9BCC-E48F-4C2F-9D31-8F46D8B5B972.tif"
expected = coll.OrderedDict([('directory', './Week1_22123'),
('prefix', ''),
('plate', 22123),
('well', 'G10'),
('field', 1),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
def test_ix_semantic_filename2():
test_fn = "./BBBC022_v1_images_20585w1/IXMtest_L09_s3_w1538679C9-F03A-" \
"4656-9A57-0D4A440C1C62.tif"
expected = coll.OrderedDict([('directory', './BBBC022_v1_images_20585w1'),
('prefix', 'IXMtest'),
('plate', 20585),
('well', 'L09'),
('field', 2),
('channel', 0),
('suffix', 'tif')])
assert image_xpress.ix_semantic_filename(test_fn) == expected
| 39.933333 | 79 | 0.477462 | 102 | 1,198 | 5.313725 | 0.509804 | 0.073801 | 0.099631 | 0.121771 | 0.416974 | 0.243542 | 0.243542 | 0.243542 | 0.243542 | 0.243542 | 0 | 0.138736 | 0.392321 | 1,198 | 29 | 80 | 41.310345 | 0.605769 | 0 | 0 | 0.26087 | 0 | 0 | 0.243108 | 0.146199 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5611a9096513cba2a9a68bf3992b55379db65e5b | 3,581 | py | Python | examples/vertex_pipeline/pipelines/batch_prediction_pipeline_runner.py | marcosgm/professional-services | f332b425c2f3b6538ebf65afda7e67de3bed1b3d | [
"Apache-2.0"
] | 2,116 | 2017-05-18T19:33:05.000Z | 2022-03-31T13:34:48.000Z | examples/vertex_pipeline/pipelines/batch_prediction_pipeline_runner.py | hyuatpc/professional-services | e5c811a8752e91fdf9f959a0414931010b0ea1ba | [
"Apache-2.0"
] | 548 | 2017-05-20T05:05:35.000Z | 2022-03-28T16:38:12.000Z | examples/vertex_pipeline/pipelines/batch_prediction_pipeline_runner.py | hyuatpc/professional-services | e5c811a8752e91fdf9f959a0414931010b0ea1ba | [
"Apache-2.0"
] | 1,095 | 2017-05-19T00:02:36.000Z | 2022-03-31T05:21:39.000Z | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner for batch prediction pipeline."""
import argparse
from absl import logging
from kfp.v2.google import client
def run_training_pipeline():
"""Main function for batch prediction pipeline runner."""
parser = argparse.ArgumentParser()
parser.add_argument('--project_id', type=str)
parser.add_argument('--pipeline_region', type=str)
parser.add_argument('--pipeline_root', type=str)
parser.add_argument('--pipeline_job_spec_path', type=str)
# Staging path for running custom job
parser.add_argument('--data_pipeline_root', type=str)
# Parameters required for data ingestion and processing
parser.add_argument('--input_dataset_uri', type=str)
parser.add_argument('--gcs_data_output_folder', type=str)
parser.add_argument('--data_region', type=str)
parser.add_argument('--gcs_result_folder', type=str)
# Parameters required for training job
parser.add_argument('--model_resource_name', type=str, default='')
parser.add_argument('--endpoint_resource_name', type=str, default='')
# Parameters required for batch prediction job
parser.add_argument('--machine_type', type=str, default='n1-standard-4')
parser.add_argument('--accelerator_count', type=int, default=0)
parser.add_argument('--accelerator_type',
type=str, default='ACCELERATOR_TYPE_UNSPECIFIED')
parser.add_argument('--starting_replica_count', type=int, default=1)
parser.add_argument('--max_replica_count', type=int, default=2)
# Parameters required for pipeline scheduling
parser.add_argument('--pipeline_schedule',
type=str, default='', help='0 2 * * *')
parser.add_argument('--pipeline_schedule_timezone',
type=str, default='US/Pacific')
parser.add_argument('--enable_pipeline_caching',
action='store_true',
default=False,
help='Specify whether to enable caching.')
args, _ = parser.parse_known_args()
logging.info(args)
api_client = client.AIPlatformClient(args.project_id, args.pipeline_region)
params_to_remove = ['pipeline_region', 'pipeline_root',
'pipeline_job_spec_path', 'pipeline_schedule',
'pipeline_schedule_timezone', 'enable_pipeline_caching']
pipeline_params = vars(args).copy()
for item in params_to_remove:
pipeline_params.pop(item, None)
if not args.pipeline_schedule:
api_client.create_run_from_job_spec(
args.pipeline_job_spec_path,
pipeline_root=args.pipeline_root,
parameter_values=pipeline_params,
enable_caching=args.enable_pipeline_caching
)
else:
api_client.create_schedule_from_job_spec(
args.pipeline_job_spec_path,
schedule=args.pipeline_schedule,
time_zone=args.pipeline_schedule_timezone,
pipeline_root=args.pipeline_root,
parameter_values=pipeline_params,
enable_caching=args.enable_pipeline_caching
)
if __name__ == '__main__':
run_training_pipeline()
| 39.788889 | 78 | 0.726054 | 460 | 3,581 | 5.384783 | 0.347826 | 0.069035 | 0.1304 | 0.038757 | 0.281389 | 0.16956 | 0.104158 | 0.104158 | 0.076706 | 0.076706 | 0 | 0.005382 | 0.169785 | 3,581 | 89 | 79 | 40.235955 | 0.827783 | 0.244624 | 0 | 0.140351 | 0 | 0 | 0.224879 | 0.100486 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.052632 | 0 | 0.070175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5612459fcdf8951d0bf375d61705d77efffeb9d8 | 1,642 | py | Python | examples/slash_commands.py | z03h/discord.py | 7e5831ba9cc3f881e11b3536159a3851fba6ab52 | [
"MIT"
] | 7 | 2021-09-12T02:31:57.000Z | 2022-02-20T21:15:35.000Z | examples/slash_commands.py | jay3332/discord.py | 953f067e3b5ee33f5be62ae614ac724afc289879 | [
"MIT"
] | 13 | 2021-11-04T00:32:25.000Z | 2022-03-02T03:03:54.000Z | examples/slash_commands.py | jay3332/discord.py | 953f067e3b5ee33f5be62ae614ac724afc289879 | [
"MIT"
] | null | null | null | import discord
from discord.application_commands import ApplicationCommand, ApplicationCommandTree, option
tree = ApplicationCommandTree(guild_id=1234) # Replace with your guild ID, or ``None`` to commands global
class Ping(ApplicationCommand, name='ping', tree=tree):
"""Pong!"""
async def callback(self, interaction: discord.Interaction):
await interaction.response.send_message('Pong!')
class Math(ApplicationCommand, name='math', tree=tree):
"""Basic math operations."""
class Add(ApplicationCommand, name='add', parent=Math):
"""Sum of x + y."""
x: int = option(description='Value of "x"', required=True)
y: int = option(description='Value of "y"', required=True)
async def callback(self, interaction: discord.Interaction):
answer = self.x + self.y
await interaction.response.send_message(f'The value of {self.x} + {self.y} is **{answer}**.', ephemeral=True)
class Subtract(ApplicationCommand, name='subtract', parent=Math):
"""Difference of x - y."""
x: int = option(description='Value of "x"', required=True)
y: int = option(description='Value of "y"', required=True)
async def callback(self, interaction: discord.Interaction):
answer = self.x - self.y
await interaction.response.send_message(f'The value of {self.x} - {self.y} is **{answer}**.', ephemeral=True)
class Client(discord.Client):
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
client = Client(update_application_commands_at_startup=True)
client.add_application_command_tree(tree)
client.run('token')
| 34.93617 | 117 | 0.690621 | 210 | 1,642 | 5.338095 | 0.319048 | 0.037467 | 0.071365 | 0.089206 | 0.490633 | 0.459411 | 0.459411 | 0.4157 | 0.4157 | 0.4157 | 0 | 0.002905 | 0.161389 | 1,642 | 46 | 118 | 35.695652 | 0.811184 | 0.074909 | 0 | 0.269231 | 0 | 0 | 0.150868 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.423077 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5612e312b8ffbaa468da026af436acb1c1385add | 10,887 | py | Python | Examples/tk_simple_dialog.py | Aarif1430/Python-Awesome-notes-and-exercises-list | c8ad7f90ebd973025f37d4e79c2f1229a8a2915c | [
"MIT"
] | 2 | 2021-01-13T21:20:57.000Z | 2021-08-18T17:53:53.000Z | Examples/tk_simple_dialog.py | Aarif1430/Python-Awesome-notes-and-exercises-list | c8ad7f90ebd973025f37d4e79c2f1229a8a2915c | [
"MIT"
] | null | null | null | Examples/tk_simple_dialog.py | Aarif1430/Python-Awesome-notes-and-exercises-list | c8ad7f90ebd973025f37d4e79c2f1229a8a2915c | [
"MIT"
] | 1 | 2020-11-05T09:56:55.000Z | 2020-11-05T09:56:55.000Z | #!/usr/bin/env python
import Tkinter
import tkMessageBox
import rwkpickle, rwkos, os, glob
from Tkinter import StringVar, IntVar, DoubleVar
pklpath = rwkos.FindFullPath('pygimp_lecturerc.pkl')
class myWindow:
def close(self, *args, **kwargs):
#print('got close event')
self.mw.destroy()
def __init__(self, title="Enter Quiz #"):
self.mw = Tkinter.Tk()
self.mw.option_add("*font", ("Arial", 15, "normal"))
self.mw.geometry("+250+200")
self.var = Tkinter.StringVar()
entry = Tkinter.Entry(self.mw, textvariable=self.var)
entry.focus_set()
entry.pack()
#entry.bind("<KP_Enter>", self.close)
entry.bind("<Return>", self.close)
self.mw.title(title)
self.btn2 = Tkinter.Button(self.mw,
text = "Exit",
command = self.mw.destroy)
self.btn2.pack()
self.mw.mainloop()
class width_and_dpi_dialog:#(tkSimpleDialog.Dialog):
def close(self, *args, **kwargs):
#print('got close event')
self.width_float = float(self.width_string.get())
self.result = self.width_float, self.dpi_int.get()
print('result = %f, %i' % self.result)
self.mw.destroy()
def __init__(self, title="Width and DPI Dialog"):
self.result = None
self.mw = Tkinter.Tk()
self.mw.option_add("*font", ("Arial", 15, "normal"))
self.mw.geometry("+250+200")
Tkinter.Label(self.mw, text="Width (in.):").grid(row=0)
Tkinter.Label(self.mw, text="dpi:").grid(row=1)
self.width_string = Tkinter.StringVar()#Tkinter.DoubleVar()
self.dpi_int = Tkinter.IntVar()
width_entry = Tkinter.Entry(self.mw, textvariable=self.width_string)
width_entry.grid(row=0, column=1)
dpi_entry = Tkinter.Entry(self.mw, textvariable=self.dpi_int)
dpi_entry.grid(row=1, column=1)
self.dpi_int.set(300)
self.width_string.set('')
#self.width_float.set(3.0)
#entry.pack()
#entry.bind("<KP_Enter>", self.close)
width_entry.bind("<Return>", self.close)
self.mw.title(title)
self.exit_btn = Tkinter.Button(self.mw,
text = "Exit",
command = self.mw.destroy)
self.exit_btn.grid(row=2, column=0)
self.go_btn = Tkinter.Button(self.mw,
text = "Go",
command = self.close)
self.go_btn.grid(row=2, column=1)
width_entry.focus_set()
self.mw.mainloop()
## def body(self, master):
## Label(master, text="First:").grid(row=0)
## Label(master, text="Second:").grid(row=1)
## self.e1 = Entry(master)
## self.e2 = Entry(master)
## self.e1.grid(row=0, column=1)
## self.e2.grid(row=1, column=1)
## return self.e1 # initial focus
## def apply(self):
## first = string.atoi(self.e1.get())
## second = string.atoi(self.e2.get())
## self.result = first, second
## print first, second # or something
## def btnClick(self):
## self.answer = tkMessageBox.askyesno(title = "Your Choice", message = 'Please click either "Yes" or "No".')
## if self.answer:
## tkMessageBox.showinfo(title = "Yes", message = "Your choice was: Yes.")
## else:
## tkMessageBox.showinfo(title = "No", message = "Your choice was: No.")
class pickle_entry(object):
def __init__(self, parent, mw, label, key, row, \
varclass=None):
if varclass is None:
varclass = StringVar
self.var = varclass()
self.parent = parent
self.mw = mw
self.label = label
self.key = key
self.row = row
curtext = label + ":"
Tkinter.Label(mw, text=curtext).grid(row=row, column=0, sticky='e')
self.entry = Tkinter.Entry(mw, textvariable=self.var, \
width=25)
self.entry.grid(row=row, column=1)
def get(self):
return self.key, self.var.get()
def load_pickle(self):
value = self.parent.pickle[self.key]
self.var.set(value)
class lecture_pickle_dialog:#(tkSimpleDialog.Dialog):
def close(self, *args, **kwargs):
print('got close event')
#self.width_float = float(self.width_string.get())
#self.result = self.width_float, self.dpi_int.get()
#print('result = %f, %i' % self.result)
self.set_pickle()
self.save_pickle()
self.mw.destroy()
def load_pickle(self):
for entry in self.entries:
entry.load_pickle()
def set_pickle(self):
for entry in self.entries:
key, val = entry.get()
self.pickle[key] = val
def save_pickle(self):
rwkpickle.SavePickle(self.pickle, pklpath)
def __init__(self, title="Lecture Pickle Dialog"):
self.pickle = rwkpickle.LoadPickle(pklpath)
self.result = None
self.mw = Tkinter.Tk()
self.mw.option_add("*font", ("Arial", 15, "normal"))
self.mw.geometry("+400+300")
self.labels = ['Lecture Path', 'Course Num.', \
'Search Pattern', 'Date Stamp', \
'Pat', 'Current Slide', \
'Outline Slide']
self.keys = ['lecture_path', 'course_num', \
'search_pat', 'date_stamp' , \
'pat', 'current_slide', 'outline_slide']
self.data = [('Lecture Path', 'lecture_path', StringVar), \
('Course Num.', 'course_num', StringVar), \
('Search Pattern', 'search_pat', StringVar), \
('Date Stamp', 'date_stamp', StringVar), \
('Pat', 'pat', StringVar), \
('Current Slide', 'current_slide', IntVar), \
('Outline Slide', 'outline_slide', IntVar), \
]
self.entries = []
for i, tup in enumerate(self.data):
label = tup[0]
key = tup[1]
varclass = tup[2]
pickle = pickle_entry(self, self.mw, \
label=label, \
key=key, \
row=i, \
varclass=varclass)
self.entries.append(pickle)
N = len(self.data)
self.mw.title('Pickle Editor')
self.exit_btn = Tkinter.Button(self.mw,
text = "Exit",
command = self.mw.destroy)
self.exit_btn.grid(row=N, column=0)
self.go_btn = Tkinter.Button(self.mw,
text = "Go",
command = self.close)
self.go_btn.grid(row=N, column=1)
self.load_pickle()
self.mw.mainloop()
class reset_lecture_dialog:#(tkSimpleDialog.Dialog):
def close(self, *args, **kwargs):
print('got close event')
self.pickle['current_slide'] = 0
#self.width_float = float(self.width_string.get())
#self.result = self.width_float, self.dpi_int.get()
#print('result = %f, %i' % self.result)
if self.var1.get():
print('reseting outline slide')
self.reset_outline()
if self.var2.get():
print('deleting existing slides')
self.delete_existing_slides()
rwkpickle.SavePickle(self.pickle, pklpath)
self.mw.destroy()
def reset_outline(self):
self.pickle['outline_slide'] = 0
clear_list = ['outline_pat','outline_dir']
for key in clear_list:
if self.pickle.has_key(key):
self.pickle.pop(key)
def _build_pat(self, end='*'):
lp = self.pickle['lecture_path']
pat = self.pickle['search_pat'] + end
return os.path.join(lp, pat)
def build_xcf_pat(self):
self.xcf_pat = self._build_pat(end='*.xcf')
def build_delete_pat(self):
self.delete_pat = self._build_pat(end='*')
def delete_existing_slides(self):
self.build_delete_pat()
rwkos.delete_from_glob_pat(self.delete_pat)
def __init__(self, title="Reset Lecture Dialog"):
self.result = None
self.mw = Tkinter.Tk()
self.mw.option_add("*font", ("Arial", 15, "normal"))
self.mw.geometry("+300+300")
self.pickle = rwkpickle.LoadPickle(pklpath)
#Need to display the number of existing slides and the
#current outline slide number
label1 = Tkinter.Label(self.mw, \
text='Number of existing slides')
label1.grid(row=0, column=0, sticky='w')
self.num_slides = IntVar()
self.entry1 = Tkinter.Entry(self.mw, \
textvariable=self.num_slides, \
width=5)
self.entry1.grid(row=0, column=1)
self.build_xcf_pat()
self.existing_slides = glob.glob(self.xcf_pat)
self.num_slides.set(len(self.existing_slides))
label2 = Tkinter.Label(self.mw, \
text='Outline Slide')
label2.grid(row=1, column=0, sticky='w')
self.outline_slide = IntVar()
self.entry2 = Tkinter.Entry(self.mw, \
textvariable=self.outline_slide, \
width=5)
self.entry2.grid(row=1, column=1)
self.outline_slide.set(self.pickle['outline_slide'])
self.var1 = IntVar()
check1 = Tkinter.Checkbutton(self.mw, \
text="Reset outline slide", \
variable=self.var1)
check1.var = self.var1
check1.grid(row=2, sticky='w')
self.var2 = IntVar()
check2 = Tkinter.Checkbutton(self.mw, \
text="Delete existing slides", \
variable=self.var2)
check2.var = self.var2
check2.grid(row=3, sticky='w')
self.go_btn = Tkinter.Button(self.mw,
text = "Go",
command = self.close)
self.go_btn.bind("<Return>", self.close)
self.go_btn.grid(row=4)
self.go_btn.focus_set()
self.mw.title(title)
self.mw.mainloop()
if __name__ == "__main__":
#app = myWindow()
#app = width_and_dpi_dialog()
#app = lecture_pickle_dialog()
app = reset_lecture_dialog()
| 32.792169 | 117 | 0.524479 | 1,229 | 10,887 | 4.519121 | 0.154597 | 0.049694 | 0.021606 | 0.020526 | 0.4287 | 0.340835 | 0.314908 | 0.269175 | 0.242528 | 0.242528 | 0 | 0.015105 | 0.343253 | 10,887 | 331 | 118 | 32.891239 | 0.761678 | 0.135758 | 0 | 0.267606 | 0 | 0 | 0.088585 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089202 | false | 0 | 0.018779 | 0.004695 | 0.140845 | 0.023474 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5613450d1a3d2afb56deff14d8b5138f01e852ba | 3,159 | py | Python | gqcnn/grasping/constraint_fn.py | richardliaw/gqcnn | a0930e9d2fef3c930c41dd91cde902d261348fbe | [
"CNRI-Python"
] | 1 | 2019-05-29T00:16:56.000Z | 2019-05-29T00:16:56.000Z | gqcnn/grasping/constraint_fn.py | richardliaw/gqcnn | a0930e9d2fef3c930c41dd91cde902d261348fbe | [
"CNRI-Python"
] | null | null | null | gqcnn/grasping/constraint_fn.py | richardliaw/gqcnn | a0930e9d2fef3c930c41dd91cde902d261348fbe | [
"CNRI-Python"
] | 4 | 2019-05-22T17:33:30.000Z | 2020-02-18T03:44:01.000Z | """
Constraint functions for grasp sampling
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import numpy as np
class GraspConstraintFn(object):
"""
Abstract constraint functions for grasp sampling.
"""
__metaclass__ = ABCMeta
def __init__(self, config):
# set params
self._config = config
def __call__(self, grasp):
"""
Evaluates whether or not a grasp is valid.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
return self.satisfies_constraints(grasp)
@abstractmethod
def satisfies_constraints(self, grasp):
"""
Evaluates whether or not a grasp is valid.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
pass
class DiscreteApproachGraspConstraintFn(GraspConstraintFn):
"""
Constrains the grasp approach direction into a discrete set of
angles from the world z direction.
"""
def __init__(self, config):
# init superclass
GraspConstraintFn.__init__(self, config)
self._max_approach_angle = self._config['max_approach_angle']
self._angular_tolerance = self._config['angular_tolerance']
self._angular_step = self._config['angular_step']
self._T_camera_world = self._config['camera_pose']
def satisfies_constraints(self, grasp):
"""
Evaluates whether or not a grasp is valid by evaluating the
angle between the approach axis and the world z direction.
Parameters
----------
grasp : :obj:`Grasp2D`
grasp to evaluate
Returns
-------
bool
True if the grasp satisfies constraints, False otherwise
"""
# find grasp angle in world coordinates
axis_world = self._T_camera_world.rotation.dot(grasp.approach_axis)
angle = np.arccos(-axis_world[2])
# check closest available angle
available_angles = np.array([0.0])
if self._angular_step > 0:
available_angles = np.arange(start=0.0,
stop=self._max_approach_angle,
step=self._angular_step)
diff = np.abs(available_angles - angle)
angle_index = np.argmin(diff)
closest_angle = available_angles[angle_index]
if diff[angle_index] < self._angular_tolerance:
return True
return False
class GraspConstraintFnFactory(object):
@staticmethod
def constraint_fn(fn_type, config):
if fn_type == 'none':
return None
elif fn_type == 'discrete_approach_angle':
return DiscreteApproachGraspConstraintFn(config)
else:
raise ValueError('Grasp constraint function type %s not supported!' %(fn_type))
| 29.523364 | 91 | 0.598291 | 325 | 3,159 | 5.590769 | 0.326154 | 0.044029 | 0.023115 | 0.041277 | 0.298294 | 0.259769 | 0.259769 | 0.259769 | 0.259769 | 0.259769 | 0 | 0.004178 | 0.318139 | 3,159 | 106 | 92 | 29.801887 | 0.839369 | 0.30421 | 0 | 0.097561 | 0 | 0 | 0.070594 | 0.012208 | 0 | 0 | 0 | 0 | 0 | 1 | 0.146341 | false | 0.02439 | 0.04878 | 0 | 0.414634 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56179b533974125c261d08cc31294d0f7cdd2f1f | 4,052 | py | Python | hw4/dynamics.py | tombroz/berkeley-cs294_homework | 5419b772c734093c750362d2e09b46ce59d79da6 | [
"MIT"
] | null | null | null | hw4/dynamics.py | tombroz/berkeley-cs294_homework | 5419b772c734093c750362d2e09b46ce59d79da6 | [
"MIT"
] | null | null | null | hw4/dynamics.py | tombroz/berkeley-cs294_homework | 5419b772c734093c750362d2e09b46ce59d79da6 | [
"MIT"
] | null | null | null | import os
import tensorflow as tf
import numpy as np
C = 1e-13
# Predefined function to build a feedforward neural network
def build_mlp(input_placeholder,
output_size,
scope,
n_layers=2,
size=500,
activation=tf.tanh,
output_activation=None
):
out = input_placeholder
with tf.variable_scope(scope):
for _ in range(n_layers):
out = tf.layers.dense(out, size, activation=activation)
out = tf.layers.dense(out, output_size, activation=output_activation)
return out
class NNDynamicsModel():
def __init__(self,
env,
n_layers,
size,
activation,
output_activation,
normalization,
batch_size,
iterations,
learning_rate,
sess
):
""" Note: Be careful about normalization """
self.mean_obs,self.std_obs,self.mean_deltas,self.std_deltas,self.mean_actions,self.std_actions = normalization
self.obs_dim = env.observation_space.shape[0]
self.actions_dim = env.action_space.shape[0]
self.in_states_acts= tf.placeholder(tf.float32,[None,self.obs_dim + self.actions_dim],name='states_actions')
self.out_states_deltas = tf.placeholder(tf.float32,[None,self.obs_dim],name='states_deltas')
self.epochs = iterations
self.gstep = tf.Variable(0, dtype=tf.int32,trainable=False, name='global_step')
self.pred_delt = build_mlp(self.in_states_acts,self.obs_dim,"pred_state_delta",n_layers,size,activation,output_activation)
self.batch_size = batch_size
self.lr = learning_rate
self.loss = tf.losses.mean_squared_error(self.out_states_deltas,self.pred_delt)
self.opt = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
self.sess=sess
def fit(self, data):
"""
a function to take in a dataset of (unnormalized)states, (unnormalized)actions, (unnormalized)next_states and fit the dynamics model going from normalized states, normalized actions to normalized state differences (s_t+1 - s_t)
"""
obs = np.vstack([path['observations'] for path in data])
actions = np.vstack([path['actions'] for path in data])
next_obs = np.vstack([path['next_observations'] for path in data])
norm_obs = (obs - self.mean_obs) / (self.std_obs + C)
norm_actions = (actions - self.mean_actions) / (self.std_actions + C)
norm_delta = (next_obs - self.mean_deltas) / (self.std_deltas + C)
obs_actions = np.vstack((norm_obs,norm_actions))
n_batches = obs.shape[0]//self.batch_size+1
for ep in range(self.epochs):
perm_ids = np.random.choice(obs.shape[0])
tl=0.
for st in range(n_batches):
start_id = st*self.batch_size
perms_ids_batch = perm_ids[start_id:start_id+self.batch_size]
in_batch = obs_actions[perms_ids_batch:]
out_batch = norm_delta[perms_ids_batch:]
l,_ = self.sess.run([self.loss,self.opt],feed_dict={self.in_states_acts:in_batch,self.out_states_deltas:out_batch})
tl+=l
print("Epoch {0}/{1}: Train_loss = {2:.6f}".format(ep,self.epochs,tl/n_batches))
def predict(self, states, actions):
""" a function to take in a batch of (unnormalized) states and (unnormalized) actions and return the (unnormalized) next states as predicted by using the model """
norm_obs = (states - self.mean_obs) / (self.std_obs + C)
norm_actions = (actions - self.mean_actions) / (self.std_actions + C)
obs_actions = np.vstack((norm_obs,norm_actions))
pred_states_deltas = self.sess.run([self.pred_delt],feed_dict={self.in_states_acts: obs_actions})
unnormalized = states + self.mean_deltas + pred_states_deltas*self.std_deltas
return unnormalized
| 44.527473 | 235 | 0.633761 | 535 | 4,052 | 4.568224 | 0.254206 | 0.02946 | 0.016367 | 0.026187 | 0.261047 | 0.225041 | 0.141162 | 0.116612 | 0.087152 | 0.057283 | 0 | 0.008378 | 0.263574 | 4,052 | 90 | 236 | 45.022222 | 0.810657 | 0.118707 | 0 | 0.084507 | 0 | 0 | 0.035361 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.042254 | 0 | 0.140845 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56196bbd9c6e856d4ee7624c88862a4b3d4c3852 | 1,017 | py | Python | app/Product/service.py | psyphore/flask-phone-book | cceec3caabdeb03f260d37f3b55d5aa7a52c30c2 | [
"MIT"
] | null | null | null | app/Product/service.py | psyphore/flask-phone-book | cceec3caabdeb03f260d37f3b55d5aa7a52c30c2 | [
"MIT"
] | 2 | 2021-03-19T03:39:56.000Z | 2021-06-08T20:28:03.000Z | app/Product/service.py | psyphore/flask-phone-book | cceec3caabdeb03f260d37f3b55d5aa7a52c30c2 | [
"MIT"
] | null | null | null | import maya
from py2neo.ogm import Node
from app.graph_context import GraphContext
from .cypher_queries import get_product_by_id_query
class ProductService():
'''
This Product Service houses all the actions can be performed against the product object
'''
def fetch(self, id):
'''Fetch a single product with matching id'''
try:
value = GraphContext().exec_cypher(get_product_by_id_query(id), id=id)
print(f'{value}')
return value
except Exception as ex:
print(f'X exception: {ex}')
return None
def fetch_all(self, limit=100):
'''Fetch all Product nodes stored ordered by firstname limited (default=100)'''
try:
matcher = GraphContext().get_node_matcher
response = list(matcher.match('Product').order_by(
"_.name").limit(limit))
return response
except Exception as ex:
print(f'X exception: {ex}')
return []
| 28.25 | 91 | 0.610619 | 123 | 1,017 | 4.918699 | 0.512195 | 0.029752 | 0.039669 | 0.046281 | 0.204959 | 0.142149 | 0.142149 | 0.142149 | 0.142149 | 0.142149 | 0 | 0.009804 | 0.297935 | 1,017 | 35 | 92 | 29.057143 | 0.837535 | 0.19764 | 0 | 0.272727 | 0 | 0 | 0.068354 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.5 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
561cd253e6ea0665afa83c977dd5106cee35aeab | 2,041 | py | Python | adlib/tests/adversaries/feature_deletion_test.py | xyvivian/adlib | 79a93baa8aa542080bbf55734168eb89317df83c | [
"MIT"
] | null | null | null | adlib/tests/adversaries/feature_deletion_test.py | xyvivian/adlib | 79a93baa8aa542080bbf55734168eb89317df83c | [
"MIT"
] | null | null | null | adlib/tests/adversaries/feature_deletion_test.py | xyvivian/adlib | 79a93baa8aa542080bbf55734168eb89317df83c | [
"MIT"
] | null | null | null | import pytest
from adlib.adversaries.feature_deletion import AdversaryFeatureDeletion
from sklearn import svm
from adlib.learners import SimpleLearner
from data_reader.dataset import EmailDataset
from data_reader.operations import load_dataset
@pytest.fixture
def data():
dataset = EmailDataset(path='./data_reader/data/test/100_instance_debug.csv', raw=False)
training_, testing_ = dataset.split({'train': 60, 'test': 40})
training_data = load_dataset(training_)
testing_data = load_dataset(testing_)
return {'training_data': training_data, 'testing_data': testing_data}
@pytest.fixture
def learner(data):
learning_model = svm.SVC(probability=True, kernel='linear')
learner = SimpleLearner(learning_model, data['training_data'])
learner.train()
return learner
@pytest.fixture
def feature_deletion(learner):
return AdversaryFeatureDeletion(learner=learner)
def test_change_instance(feature_deletion, data):
sample = next((x for x in data['testing_data'] if x.get_label() == 1), None)
result = feature_deletion.change_instance(sample)
assert sample.label == result.label
def test_set_params(feature_deletion):
feature_deletion.set_params({'num_deletion': 50, 'all_malicious': True})
dict = feature_deletion.get_available_params()
assert dict['num_deletion'] == 50
assert dict['all_malicious'] == True
def test_attack(feature_deletion, data):
result = feature_deletion.attack(data['testing_data'])[0]
sample = data['testing_data'][0]
num = sample.get_feature_vector().get_feature_count()
for i in range(num):
assert result.get_feature_vector().get_feature(
i) == sample.get_feature_vector().get_feature(i)
def test_attack_different(feature_deletion, data):
feature_deletion.set_params({'num_deletion': 100, 'all_malicious': False})
result = feature_deletion.attack(data['testing_data'])[0]
sample = data['testing_data'][0]
assert result.get_feature_vector().indptr[1] != sample.get_feature_vector().indptr[1]
| 35.189655 | 92 | 0.748653 | 264 | 2,041 | 5.518939 | 0.272727 | 0.123542 | 0.072066 | 0.043926 | 0.248456 | 0.200412 | 0.089224 | 0.089224 | 0.089224 | 0.089224 | 0 | 0.011905 | 0.135718 | 2,041 | 57 | 93 | 35.807018 | 0.814059 | 0 | 0 | 0.162791 | 0 | 0 | 0.11465 | 0.022538 | 0 | 0 | 0 | 0 | 0.116279 | 1 | 0.162791 | false | 0 | 0.139535 | 0.023256 | 0.372093 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5620a3700cb215b8dbfe4bc707bf2609413ae823 | 1,225 | py | Python | cut_plist.py | labbbirder/cut-plist | 115394d23fbb58044cb421c9c2c220267e80bad5 | [
"MIT"
] | 1 | 2021-05-15T14:44:27.000Z | 2021-05-15T14:44:27.000Z | cut_plist.py | labbbirder/cut-plist | 115394d23fbb58044cb421c9c2c220267e80bad5 | [
"MIT"
] | null | null | null | cut_plist.py | labbbirder/cut-plist | 115394d23fbb58044cb421c9c2c220267e80bad5 | [
"MIT"
] | 1 | 2021-05-15T15:49:58.000Z | 2021-05-15T15:49:58.000Z | import plistlib
import os
import numpy as np
from PIL import Image
def read_plist(plist_path):
with open(plist_path, "rb") as fp:
return plistlib.load(fp)
def to_list(x):
return x.replace("{", "").replace("}", "").split(",")
def cut_plist(output, texture, save_dir):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for key in output:
data = output[key]
rect = to_list(data["textureRect"])
rotated = data["textureRotated"]
x = int(rect[0])
y = int(rect[1])
width = int(rect[2])
height = int(rect[3])
if rotated:
width, height = height, width
box = (x, y, x + width, y + height)
newSize = np.array([width, height])
offset = np.array(to_list(data["spriteOffset"])).astype("float")*(-1,1)
srcSize = np.array(to_list(data["spriteSourceSize"])).astype("float")
offset = ((newSize-srcSize)/2+offset).astype("int")
sprite = texture.crop(box).crop((*offset,*(offset+srcSize)))
if rotated:
sprite = sprite.transpose(Image.ROTATE_90)
save_path = os.path.splitext(os.path.join(save_dir, key))[0] + ".png"
sprite.save(save_path)
| 28.488372 | 79 | 0.590204 | 164 | 1,225 | 4.317073 | 0.402439 | 0.033898 | 0.042373 | 0.036723 | 0.048023 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010893 | 0.250612 | 1,225 | 42 | 80 | 29.166667 | 0.760349 | 0 | 0 | 0.0625 | 0 | 0 | 0.061224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.125 | 0.03125 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
562632b9da3d75bd5559f771c9a43df116af2988 | 3,791 | py | Python | examples/lstm2.py | neosensory/tflite_micro_compiler | 2c21a364e9763e51706cf6f6b447ed908314e117 | [
"Apache-2.0"
] | 48 | 2020-05-10T13:33:02.000Z | 2022-03-24T06:47:50.000Z | examples/lstm2.py | neosensory/tflite_micro_compiler | 2c21a364e9763e51706cf6f6b447ed908314e117 | [
"Apache-2.0"
] | 49 | 2020-05-21T22:03:51.000Z | 2022-03-09T08:09:45.000Z | examples/lstm2.py | neosensory/tflite_micro_compiler | 2c21a364e9763e51706cf6f6b447ed908314e117 | [
"Apache-2.0"
] | 16 | 2020-05-10T12:59:20.000Z | 2022-03-09T06:04:22.000Z | #!/usr/bin/python3
import random
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
train_batches=2000
eval_batches=50
train_sequlen=32
train_inputs=1
lstm_states=6
#activation="relu"
activation=None
rec_activation="hard_sigmoid"
x_train = np.zeros((train_batches*train_sequlen,1,train_inputs))
y_train = np.zeros((train_batches*train_sequlen,1,1))
x_test = np.zeros((eval_batches*train_sequlen,1,train_inputs))
y_test = np.zeros((eval_batches*train_sequlen,1,1))
random.seed(1234)
# generate input of random sine waves, feed one at a time to the network
def random_sample():
ampl = random.uniform(0.5,1)
freq = random.uniform(18,32)
phase= random.uniform(-math.pi,math.pi)
return (ampl,freq,phase)
def waveform(ampl,freq,phase,idx):
return ampl*math.sin(idx/freq*2*math.pi+phase)
# calculate train data
for i in range(train_batches):
(ampl,freq,phase) = random_sample()
for j in range(train_sequlen): # subsequent measurements
for k in range(train_inputs):
x_train[i*train_sequlen+j][0][k]=waveform(ampl,freq,phase,j+k)
y_train[i*train_sequlen+j][0]=waveform(ampl,freq,phase,j+train_inputs)
for i in range(eval_batches):
(ampl,freq,phase) = random_sample()
for j in range(train_sequlen): # subsequent measurements
for k in range(train_inputs):
x_test[i*train_sequlen+j][0][k]=waveform(ampl,freq,phase,j+k)
y_test[i*train_sequlen+j][0]=waveform(ampl,freq,phase,j+train_inputs)
print(x_train[0][0:5], y_train[0][0:5])
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
def create_model(train=True):
if train:
input0 = tf.keras.Input(batch_shape=(train_sequlen,1,train_inputs))
# stateful is worse
x = LSTM(lstm_states, recurrent_activation=rec_activation, activation=activation, return_sequences=False, return_state=False, stateful=False)(input0)
#x = Dropout(0.1)(x) makes it a bit worse
else:
input0 = tf.keras.Input(batch_shape=(1,1,train_inputs),name="data")
input1 = tf.keras.Input(batch_shape=(1,lstm_states),name="state_h")
input2 = tf.keras.Input(batch_shape=(1,lstm_states),name="state_c")
x, state,state2 = LSTM(lstm_states, recurrent_activation=rec_activation, activation=activation, return_sequences=False, return_state=True, stateful=True, unroll=True)(input0, initial_state=(input1, input2))
x = Dense(units=1)(x)
if train:
model = tf.keras.Model(inputs=input0, outputs=x, name="sine")
else:
model = tf.keras.Model(inputs=(input0,input1,input2), outputs=(x,state,state2), name="sine")
model.summary()
return model
model=create_model()
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(8):
model.fit(x_train, y_train, epochs=1, batch_size=train_sequlen, verbose=1, shuffle=False,
validation_data=(x_test,y_test))
model.reset_states()
model.save('mymodel')
model.save('mymodel_w.h5', save_format="h5")
model2= create_model(False)
model2.load_weights('mymodel_w.h5')
model2.save('evalmodel.h5', save_format="h5")
model2.compile(loss='mean_squared_error', optimizer='adam')
state_h2 = np.zeros((1,lstm_states))
state_c2 = np.zeros((1,lstm_states))
for i in range(train_sequlen):
testx, testy = x_test[i], y_test[i]
testx = testx.reshape(1, 1, 1)
res = model2.predict([testx,state_h2,state_c2], batch_size=1)
print('In=%.1f, Expected=%.1f, Predicted=%.1f' % (testx[0][0][0], testy, res[0]))
state_h2=res[1]
state_c2=res[2]
# to convert to tflite use
# tflite_convert --keras_model_file evalmodel.h5 --output_file evalmodel.tflite --inference_type FLOAT
# from tensorflow 1.15 (2.2 doesn't work)
| 35.429907 | 214 | 0.724347 | 608 | 3,791 | 4.34375 | 0.256579 | 0.063612 | 0.039379 | 0.039758 | 0.442635 | 0.391897 | 0.341537 | 0.302158 | 0.247633 | 0.247633 | 0 | 0.030774 | 0.134265 | 3,791 | 106 | 215 | 35.764151 | 0.773918 | 0.105249 | 0 | 0.12987 | 0 | 0 | 0.049394 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038961 | false | 0 | 0.077922 | 0.012987 | 0.155844 | 0.051948 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5626db7729bb19f378a1f3a643736beccf6c224a | 1,251 | py | Python | critiquebrainz/frontend/external/musicbrainz_db/includes.py | AbhinavOhri/critiquebrainz | d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4 | [
"Apache-2.0"
] | null | null | null | critiquebrainz/frontend/external/musicbrainz_db/includes.py | AbhinavOhri/critiquebrainz | d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4 | [
"Apache-2.0"
] | null | null | null | critiquebrainz/frontend/external/musicbrainz_db/includes.py | AbhinavOhri/critiquebrainz | d1c1c175209ec78bcced1dbfd5bd64a46be2d1f4 | [
"Apache-2.0"
] | null | null | null | import critiquebrainz.frontend.external.musicbrainz_db.exceptions as mb_exceptions
RELATABLE_TYPES = [
'area',
'artist',
'label',
'place',
'event',
'recording',
'release',
'release-group',
'series',
'url',
'work',
'instrument'
]
RELATION_INCLUDES = [entity + '-rels' for entity in RELATABLE_TYPES]
TAG_INCLUDES = ["tags", "user-tags"]
RATING_INCLUDES = ["ratings", "user-ratings"]
VALID_INCLUDES = {
'place': ["aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES,
'event': ["aliases"] + RELATION_INCLUDES + TAG_INCLUDES,
'release_group': ["artists", "media", "releases"] + TAG_INCLUDES + RELATION_INCLUDES,
'release': ["artists", "labels", "recordings", "release-groups", "media", "annotation", "aliases"]
+ TAG_INCLUDES + RELATION_INCLUDES,
'artist': ["recordings", "releases", "media", "aliases", "annotation"] + RELATION_INCLUDES + TAG_INCLUDES,
}
def check_includes(entity, includes):
"""Check if includes specified for an entity are valid includes."""
for include in includes:
if include not in VALID_INCLUDES[entity]:
raise mb_exceptions.InvalidIncludeError("Bad includes: {inc} is not a valid include".format(inc=include))
| 35.742857 | 117 | 0.666667 | 132 | 1,251 | 6.151515 | 0.439394 | 0.118227 | 0.070197 | 0.099754 | 0.108374 | 0.108374 | 0 | 0 | 0 | 0 | 0 | 0 | 0.183054 | 1,251 | 34 | 118 | 36.794118 | 0.794521 | 0.048761 | 0 | 0 | 0 | 0 | 0.282939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.033333 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5627c77651ef6fd4e5b5393b20243c305bd397e3 | 5,511 | py | Python | Gradient Descent/Gradient_Descent_Housing.py | prasad-madhale/machine-learning | bb611f809c16e1425136052e215ca83bd1148652 | [
"MIT"
] | null | null | null | Gradient Descent/Gradient_Descent_Housing.py | prasad-madhale/machine-learning | bb611f809c16e1425136052e215ca83bd1148652 | [
"MIT"
] | null | null | null | Gradient Descent/Gradient_Descent_Housing.py | prasad-madhale/machine-learning | bb611f809c16e1425136052e215ca83bd1148652 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 1 01:07:37 2019
@author: prasad
"""
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
def get_data(column_names):
'''
Args
column_names: names of the features in dataset
Returns
train_df: training data
test_df: testing data
'''
train_df = pd.read_csv('./data/housing_train.txt', delim_whitespace=True, header = None)
test_df = pd.read_csv('./data/housing_test.txt', delim_whitespace=True, header = None)
test_df.columns = column_names
train_df.columns = column_names
return train_df, test_df
def normalize(dataset):
'''
Args
dataset: data to be normalized using shift-scale normalization
Returns
dataset: normalized dataset
maxs: max parameters for each feature normalization
mins: min parameters for each feature normalization
'''
maxs = dataset.max()
mins = dataset.min()
for feature in dataset.columns[:-1]:
for i, entry in dataset.iterrows():
dataset.at[i, feature] = (entry[feature] - mins[feature]) / (maxs[feature] - mins[feature])
return dataset, maxs, mins
def normalize_params(dataset, maxs, mins):
'''
Args
dataset: data to be normalized
maxs: max parameters for each feature normalization
mins: min parameters for each feature normalization
Returns:
dataset: normalized dataset
'''
for feature in dataset.columns[:-1]:
for i, entry in dataset.iterrows():
dataset.at[i, feature] = (entry[feature] - mins[feature]) / (maxs[feature] - mins[feature])
return dataset
def predict(test_data, weights):
'''
Args
test_data: data for which predictions are to be calculated
weights: weights to obtain predictions based on
Returns
preds: predictions based on given weights applied on dataset
'''
test_data = test_data.drop(['MEDV'], axis = 1).values
test_data = np.append(np.ones([len(test_data),1]),test_data,1)
preds = {}
for i in range(len(test_data)):
preds[i] = np.dot(weights, test_data[i])
return preds
def get_mse(test_data, preds):
'''
Args
test_data: data for which model is to be tested using MSE
preds: predictions on given test_data obtained from model
Returns
mse: mean squared error
'''
test_labels = test_data['MEDV'].values
errors = []
for i, label in enumerate(test_labels):
errors.append(np.square(label - preds[i]))
mse = pd.Series(errors).mean()
return mse
def cost(data, labels, weights):
'''
Args
data: data for which cost needs to be calculated
labels: actual labels for data used
weights: optimized weights for prediction
Returns
cost on the given data
'''
preds = np.dot(data, weights)
preds = preds.flatten()
return np.sum(np.square(np.subtract(preds, labels))) / len(data)
def train(train_data, learn_rate = 0.001, max_iter = 3000):
'''
Args
train_data : normalized data for training
learn_rate : learning rate for Gradient Descent
max_iter : maximum number of iterations to run GD
'''
# get data without the labels
x = train_data.drop(['MEDV'], axis = 1).values
# add 1s to the data for bias calculations
x = np.append(np.ones([len(x),1]),x,1)
# get labels of the training set
y = train_data['MEDV'].values
# initialize weights with random values
w = np.random.normal(scale = 1 / math.sqrt(len(x[0])),size = (len(x[0]), 1))
w = w.flatten()
# keep records of costs as we keep performing iteration of GD
costs = []
for itr in range(max_iter):
# predictions based on current weights
predicts = np.dot(x, w)
predicts = predicts.flatten()
# difference between current predictions and actual labels
loss = np.subtract(predicts, y)
grads = np.dot(x.T, loss)
# update weights
w = np.subtract(w, learn_rate * grads)
# record cost after weight updates
costs.append(cost(x,y,w))
if itr % 100 == 0:
print('{}: Cost: {}'.format(itr, costs[itr]))
return w, costs
def plot_cost(costs):
plt.figure(figsize = (20,10))
plt.title('Cost function')
plt.ylabel('Costs')
plt.xlabel('Iterations')
plt.plot(costs)
#### EXECUTION
# names for the features
column_names = ['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV']
# extract data from files
train_data, test_data = get_data(column_names)
# normalize data
train_data, maxs, mins = normalize(train_data)
# normalize test data using same parameters as for the training set
test_data = normalize_params(test_data,maxs, mins)
# optimize weights using Gradient Descent
w,costs = train(train_data)
# get predictions for optimized weights
pred_train = predict(train_data, w)
print('MSE for Housing dataset using Gradient Descent on Train Data: {}'.format(get_mse(train_data, pred_train)))
# get predictions for optimized weights
preds = predict(test_data, w)
print('MSE for Housing dataset using Gradient Descent on Test Data: {}'.format(get_mse(test_data, preds)))
plot_cost(costs) | 27.974619 | 113 | 0.633279 | 739 | 5,511 | 4.631935 | 0.273342 | 0.046743 | 0.019866 | 0.028046 | 0.297692 | 0.235466 | 0.178791 | 0.178791 | 0.156588 | 0.156588 | 0 | 0.010304 | 0.260388 | 5,511 | 197 | 114 | 27.974619 | 0.82949 | 0.348757 | 0 | 0.082192 | 0 | 0 | 0.083633 | 0.014089 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0 | 0.054795 | 0 | 0.260274 | 0.041096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56282d4008935ca3506817cc4fc64ad64b685ddf | 2,833 | py | Python | teamspirit/preorders/views.py | etienne86/oc_p13_team_spirit | fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa | [
"MIT"
] | null | null | null | teamspirit/preorders/views.py | etienne86/oc_p13_team_spirit | fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa | [
"MIT"
] | null | null | null | teamspirit/preorders/views.py | etienne86/oc_p13_team_spirit | fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy
from django.views.generic import ListView
from django.views.generic.edit import FormView
from teamspirit.catalogs.models import Product
from teamspirit.preorders.forms import AddToCartForm, DropFromCartForm
from teamspirit.preorders.models import ShoppingCart, ShoppingCartLine
class ShoppingCartView(ListView):
model = ShoppingCartLine
template_name = "preorders/shopping_cart.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['shopping_cart_amount'] = ShoppingCart.objects.get_or_create(
user=self.request.user
)[0].get_cart_amount()
return context
def get_queryset(self):
queryset = super().get_queryset()
shopping_cart = ShoppingCart.objects.get_or_create(
user=self.request.user
)[0]
queryset = ShoppingCartLine.objects.filter(
shopping_cart=shopping_cart
)
return queryset
shopping_cart_view = ShoppingCartView.as_view()
shopping_cart_view = login_required(shopping_cart_view)
class AddToCartView(FormView):
template_name = "preorders/add_to_cart.html"
form_class = AddToCartForm
success_url = reverse_lazy('catalogs:catalog')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['product'] = Product.objects.get(id=self.kwargs['product_id'])
return context
def get_initial(self):
initial = super().get_initial()
initial['shopping_cart'] = ShoppingCart.objects.get_or_create(
user=self.request.user
)[0]
initial['product'] = Product.objects.get(id=self.kwargs['product_id'])
return initial
add_to_cart_view = AddToCartView.as_view()
add_to_cart_view = login_required(add_to_cart_view)
class DropFromCartView(FormView):
template_name = "preorders/drop_from_cart.html"
form_class = DropFromCartForm
success_url = reverse_lazy('preorders:shopping_cart')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['shopping_cart_line'] = ShoppingCartLine.objects.get(
id=self.kwargs['line_id']
)
return context
def get_form_kwargs(self):
kwargs = super(DropFromCartView, self).get_form_kwargs()
kwargs.update({'request_user': self.request.user})
kwargs.update({'line_id': self.kwargs['line_id']})
kwargs.update({
'shopping_cart_line': ShoppingCartLine.objects.get(
id=self.kwargs['line_id']
)
})
return kwargs
drop_from_cart_view = DropFromCartView.as_view()
drop_from_cart_view = login_required(drop_from_cart_view)
| 31.831461 | 78 | 0.701024 | 331 | 2,833 | 5.722054 | 0.199396 | 0.07603 | 0.044351 | 0.040127 | 0.349525 | 0.326294 | 0.326294 | 0.326294 | 0.326294 | 0.326294 | 0 | 0.001324 | 0.200141 | 2,833 | 88 | 79 | 32.193182 | 0.83451 | 0 | 0 | 0.242424 | 0 | 0 | 0.09354 | 0.037416 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.106061 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
562c8104d444901a4e792e4529b19010d3a451b2 | 40,079 | py | Python | tasks/prime.py | transcom/milmove_load_testing | b46526d9332c864de8891ef391394c0e9e8e7b95 | [
"MIT"
] | 2 | 2021-07-20T13:41:14.000Z | 2021-10-07T18:27:48.000Z | tasks/prime.py | transcom/milmove_load_testing | b46526d9332c864de8891ef391394c0e9e8e7b95 | [
"MIT"
] | 69 | 2020-07-08T21:05:58.000Z | 2022-03-31T11:35:14.000Z | tasks/prime.py | transcom/milmove_load_testing | b46526d9332c864de8891ef391394c0e9e8e7b95 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" TaskSets and tasks for the Prime & Support APIs """
import logging
import json
import random
from copy import deepcopy
from typing import Dict
from locust import tag, task, TaskSet
from utils.constants import (
INTERNAL_API_KEY,
TEST_PDF,
ZERO_UUID,
PRIME_API_KEY,
SUPPORT_API_KEY,
MOVE_TASK_ORDER,
MTO_SHIPMENT,
MTO_AGENT,
MTO_SERVICE_ITEM,
PAYMENT_REQUEST,
)
from .base import check_response, CertTaskMixin, ParserTaskMixin
logger = logging.getLogger(__name__)
def prime_path(url: str) -> str:
return f"/prime/v1{url}"
def support_path(url: str) -> str:
return f"/support/v1{url}"
class PrimeDataStorageMixin:
"""
TaskSet mixin used to store data from the Prime API during load testing so that it can be passed around and reused.
We store a number of objects in a local store that can be requested by tasks.
The tasks then hit an endpoint and call add or replace to update our local store with a list of viable objects.
This mixin allows storing multiple items of each kind.
"""
DATA_LIST_MAX: int = 50
# contains the ID values needed when creating moves using createMoveTaskOrder:
default_mto_ids: Dict[str, str] = {
"contractorID": "",
"destinationDutyStationID": "",
"originDutyStationID": "",
"uploadedOrdersID": "",
}
local_store: Dict[str, list] = {
MOVE_TASK_ORDER: [],
MTO_SHIPMENT: [],
MTO_SERVICE_ITEM: [],
PAYMENT_REQUEST: [],
} # data stored will be shared among class instances thanks to mutable dict
def get_stored(self, object_key, *args, **kwargs):
"""
Given an object_key that represents an object type from the MilMove app, returns an object of that type from the
list.
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
"""
data_list = self.local_store[object_key]
if len(data_list) > 0: # otherwise we return None
return random.choice(data_list)
def get_stored_shipment_address(self, mto_shipment=None):
"""
Grabs one of either pickupAddress or destinationAddress from a shipment and returns the specific field and
payload for that address.
:param mto_shipment: JSON/dict of a specific MTO Shipment payload (optional)
:return: tuple(str name of the address field, dict address payload)
"""
if not mto_shipment:
mto_shipment = self.get_stored(MTO_SHIPMENT) or {}
address_fields = ["pickupAddress", "destinationAddress"]
valid_addresses = [
(field, mto_shipment[field])
for field in address_fields
if mto_shipment.get(field) and mto_shipment[field].get("id", ZERO_UUID) != ZERO_UUID
]
if len(valid_addresses) > 0: # otherwise we return None
return random.choice(valid_addresses)
def add_stored(self, object_key, object_data):
"""
Adds data to the list for the object key provided. Also checks if the list is already at the max number of
elements, and if so, it randomly removes 1 to MAX number of elements so that the cycle can start again (and so
we don't hog too much memory).
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
:param object_data: JSON/dict
:return: None
"""
data_list = self.local_store[object_key]
if len(data_list) >= self.DATA_LIST_MAX:
num_to_delete = random.randint(1, self.DATA_LIST_MAX)
del data_list[:num_to_delete]
# Some creation endpoint auto-create multiple objects and return an array,
# but each object in the array should still be considered individually here:
if isinstance(object_data, list):
data_list.extend(object_data)
else:
data_list.append(object_data)
def update_stored(self, object_key, old_data, new_data):
"""
Given an object key, replaces a stored object in the local store with a new updated object.
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
:param old_data: JSON/dict
:param new_data: JSON/dict
:return: None
"""
data_list = self.local_store[object_key]
# Remove all instances of the stored object, in case multiples were added erroneously:
while True:
try:
data_list.remove(old_data)
except ValueError:
break # this means we finally cleared the list
data_list.append(new_data)
def set_default_mto_ids(self, moves):
"""
Given a list of Move Task Orders, gets the four ID values needed to create more MTOs:
- contractorID
- uploadedOrdersID
- destinationDutyStationID
- originDutyStationID
To get these values, this function hits the getMoveTaskOrder endpoint in the Support API to get all of the
details on an MTO. The Prime API doesn't have access to all of this info, which is why we need to use the
Support API instead. It will go through and hit this endpoint for all of the moves in the list until it finally
gets a complete set of IDs.
CAN ONLY be used when subclassed with TaskSet and CertTaskMixin.
:param moves: list of JSON/dict objects
:return: None
"""
# Checks that we have a full set of MTO IDs already and halts processing if so:
if self.has_all_default_mto_ids():
return
headers = {"content-type": "application/json"}
for move in moves:
# Call the Support API to get full details on the move:
resp = self.client.get(
support_path(f"/move-task-orders/{move['id']}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
**self.cert_kwargs,
)
move_details, success = check_response(resp, "getMoveTaskOrder")
if not success:
continue # try again with the next move in the list
# Get the values we need from the move and set them in self.default_move_ids.
# If this move is missing any of these values, we default to using whatever value is already in
# self.default_mto_ids, which could be nothing, or could be a value gotten from a previous move.
# This way we never override good ID values from earlier moves in the list.
self.default_mto_ids["contractorID"] = move_details.get(
"contractorID", self.default_mto_ids["contractorID"]
)
if order_details := move_details.get("order"):
self.default_mto_ids["uploadedOrdersID"] = order_details.get(
"uploadedOrdersID", self.default_mto_ids["uploadedOrdersID"]
)
self.default_mto_ids["destinationDutyStationID"] = order_details.get(
"destinationDutyStationID", self.default_mto_ids["destinationDutyStationID"]
)
self.default_mto_ids["originDutyStationID"] = order_details.get(
"originDutyStationID", self.default_mto_ids["originDutyStationID"]
)
# Do we have all the ID values we need? Cool, then stop processing.
if self.has_all_default_mto_ids():
logger.info(f"☑️ Set default MTO IDs for createMoveTaskOrder: \n{self.default_mto_ids}")
break
# If we're in the local environment, and we have gone through the entire list without getting a full set of IDs,
# set our hardcoded IDs as the default:
if not self.has_all_default_mto_ids() and self.user.is_local:
logger.warning("⚠️ Using hardcoded MTO IDs for LOCAL env")
self.default_mto_ids.update(
{
"contractorID": "5db13bb4-6d29-4bdb-bc81-262f4513ecf6",
"destinationDutyStationID": "71b2cafd-7396-4265-8225-ff82be863e01",
"originDutyStationID": "1347d7f3-2f9a-44df-b3a5-63941dd55b34",
"uploadedOrdersID": "c26421b0-e4c3-446b-88f3-493bb25c1756",
}
)
def has_all_default_mto_ids(self) -> bool:
"""Boolean indicating that we have all the values we need for creating new MTOs."""
return self.default_mto_ids and all(self.default_mto_ids.values())
@tag("prime")
class PrimeTasks(PrimeDataStorageMixin, ParserTaskMixin, CertTaskMixin, TaskSet):
"""
Set of the tasks that can be called on the Prime API. Make sure to mark tasks with the `@task` decorator and add
tags where appropriate to make filtering for custom tests easier.
"""
def __init__(self, parent):
self.csrf_token = None
self.session_token = None
super().__init__(parent)
def customer_path(self, url: str) -> str:
return f"{self.user.alternative_host}{url}"
def on_start(self):
self.client.get(self.customer_path("/devlocal-auth/login"))
self.csrf_token = self.client.cookies.get("masked_gorilla_csrf")
self.client.headers.update({"x-csrf-token": self.csrf_token})
resp = self.client.post(
self.customer_path("/devlocal-auth/create"),
data={"userType": "milmove", "gorilla.csrf.Token": self.csrf_token},
)
self.session_token = self.client.cookies.get("mil_session_token")
if resp.status_code != 200:
self.interrupt()
logged_in_user = self.client.get(self.customer_path("/internal/users/logged_in"))
json_resp = logged_in_user.json()
service_member_id = json_resp["service_member"]["id"]
email = json_resp["email"]
user_id = json_resp["id"]
origin_duty_stations = self.client.get(self.customer_path("/internal/duty_stations?search=29"))
current_station_id = origin_duty_stations.json()[0]["id"]
overrides = {
"id": service_member_id,
"user_id": user_id,
"edipi": "9999999999",
"personal_email": email,
"email_is_preferred": True,
"current_station_id": current_station_id,
}
payload = self.fake_request("/service_members/{serviceMemberId}", "patch", INTERNAL_API_KEY, overrides, True)
self.client.patch(
self.customer_path(f"/internal/service_members/{service_member_id}"),
name="/internal/service_members/{serviceMemberId}",
data=json.dumps(payload),
headers={"content-type": "application/json"},
**self.user.cert_kwargs,
)
overrides = {"permission": "NONE"}
payload = self.fake_request(
"/service_members/{serviceMemberId}/backup_contacts", "post", INTERNAL_API_KEY, overrides
)
self.client.post(
self.customer_path(f"/internal/service_members/{service_member_id}/backup_contacts"),
name="/internal/service_members/{serviceMemberId}/backup_contacts",
data=json.dumps(payload),
headers={"content-type": "application/json"},
**self.user.cert_kwargs,
)
@tag(MOVE_TASK_ORDER, "listMoves")
@task
def list_moves(self):
timeout = {}
if self.user.is_local:
timeout["timeout"] = 15 # set a timeout of 15sec if we're running locally - just for this endpoint
resp = self.client.get(prime_path("/moves"), **self.cert_kwargs, **timeout)
moves, success = check_response(resp, "listMoves")
# Use these MTOs to set the ID values we'll need to create more MTOs
# (NOTE: we don't care about a failure here because we can set the default IDs instead,
# if this is running locally)
self.set_default_mto_ids(moves or [])
@tag(MTO_SERVICE_ITEM, "createMTOServiceItem")
@task
def create_mto_service_item(self, overrides=None):
# If mtoShipmentID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoShipmentID") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("createMTOServiceItem: ⚠️ No mto_shipment found")
return None
overrides_local = {
# override moveTaskOrderID because we don't want a random one
"moveTaskOrderID": mto_shipment["moveTaskOrderID"],
# override mtoShipmentID because we don't want a random one
"mtoShipmentID": mto_shipment["id"],
}
# Merge local overrides with passed-in overrides
overrides_local.update(overrides or {})
payload = self.fake_request("/mto-service-items", "post", PRIME_API_KEY, overrides_local)
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-service-items"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
mto_service_items, success = check_response(resp, f"createMTOServiceItem {payload['reServiceCode']}", payload)
if success:
self.add_stored(MTO_SERVICE_ITEM, mto_service_items)
return mto_service_items
@tag(MTO_SHIPMENT, "createMTOShipment")
@task
def create_mto_shipment(self, overrides=None):
def guarantee_unique_agent_type(agents):
agent_types = {agent["agentType"] for agent in agents}
if len(agents) >= 2 and len(agent_types) < 2:
possible_types = {"RELEASING_AGENT", "RECEIVING_AGENT"}
agents[1]["agentType"] = (possible_types - agent_types).pop()
# If moveTaskOrderID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("moveTaskOrderID") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("createMTOShipment: ⚠️ No move_task_order found")
return (
None # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
)
overrides_local = {
# Override moveTaskorderID because we don't want a random one
"moveTaskOrderID": move_task_order["id"],
# Set agents UUIDs to ZERO_UUID because we can't actually set the UUID on creation
"agents": {"id": ZERO_UUID, "mtoShipmentID": ZERO_UUID},
# Set pickupAddress to ZERO_UUID because we can't actually set the UUID on creation
"pickupAddress": {"id": ZERO_UUID},
# Set destinationAddress to ZERO_UUID because we can't actually set the UUID on creation
"destinationAddress": {"id": ZERO_UUID},
# Set mtoServiceItems to empty to let the createMTOServiceItems endpoint do the creation
"mtoServiceItems": [],
}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
payload = self.fake_request("/mto-shipments", "post", PRIME_API_KEY, overrides=overrides_local)
guarantee_unique_agent_type(payload["agents"]) # modifies the payload directly
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-shipments"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
mto_shipment, success = check_response(resp, "createMTOShipment", payload)
if success:
self.add_stored(MTO_SHIPMENT, mto_shipment)
return mto_shipment
@tag(MTO_SHIPMENT, "createMTOShipment", "expectedFailure")
@task
def create_mto_shipment_with_duplicate_agents(self, overrides=None):
# If moveTaskOrderID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("moveTaskOrderID") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("createMTOShipment — expected failure: ⚠️ No move_task_order found")
return (
None # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
)
agent_type = random.choice(["RELEASING_AGENT", "RECEIVING_AGENT"])
agent_override = {"id": ZERO_UUID, "mtoShipmentID": ZERO_UUID, "agentType": agent_type}
overrides_local = {
# Override moveTaskorderID because we don't want a random one
"moveTaskOrderID": move_task_order["id"],
# Set agents UUIDs to ZERO_UUID because we can't actually set the UUID on creation and guarantee two agents
"agents": [agent_override, agent_override],
# Set pickupAddress to ZERO_UUID because we can't actually set the UUID on creation
"pickupAddress": {"id": ZERO_UUID},
# Set destinationAddress to ZERO_UUID because we can't actually set the UUID on creation
"destinationAddress": {"id": ZERO_UUID},
# Set mtoServiceItems to empty to let the createMTOServiceItems endpoint do the creation
"mtoServiceItems": [],
}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
payload = self.fake_request("/mto-shipments", "post", PRIME_API_KEY, overrides=overrides_local)
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-shipments"),
name=prime_path("/mto-shipments — expected failure"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
check_response(resp, "createMTOShipmentFailure", payload, "422")
@tag(PAYMENT_REQUEST, "createUpload")
@task
def create_upload(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
payment_request = self.get_stored(PAYMENT_REQUEST, object_id)
if not payment_request:
return
upload_file = {"file": open(TEST_PDF, "rb")}
resp = self.client.post(
prime_path(f"/payment-requests/{payment_request['id']}/uploads"),
name=prime_path("/payment-requests/{paymentRequestID}/uploads"),
files=upload_file,
**self.user.cert_kwargs,
)
check_response(resp, "createUpload")
@tag(PAYMENT_REQUEST, "createPaymentRequest")
@task
def create_payment_request(self, overrides=None):
# If mtoServiceItemID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoServiceItemID") if overrides else None
service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
if not service_item:
return
payload = {
"moveTaskOrderID": service_item["moveTaskOrderID"],
"serviceItems": [{"id": service_item["id"]}],
"isFinal": False,
}
shipment = self.get_stored(MTO_SHIPMENT, service_item["mtoShipmentID"])
if not shipment:
logger.info("unable to find shipment of payment request service item")
headers = {"content-type": "application/json"}
# if the actual weight hasn't been provided, creating the payment request will fail
if not shipment.get("primeActualWeight"):
self.client.post(
prime_path("/payment-requests"),
name=prime_path("/payment-requests — expected failure"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
return None
resp = self.client.post(
prime_path("/payment-requests"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
payment_request, success = check_response(resp, "createPaymentRequest", payload)
if success:
self.add_stored(PAYMENT_REQUEST, payment_request)
return payment_request
@tag(MTO_SHIPMENT, "updateMTOShipment")
@task
def update_mto_shipment(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return # can't run this task
payload = self.fake_request("/mto-shipments/{mtoShipmentID}", "patch", PRIME_API_KEY, overrides)
# Agents and addresses should not be updated by this endpoint, and primeEstimatedWeight cannot be updated after
# it is initially set (and it is set in create_mto_shipment)
fields_to_remove = [
"agents",
"pickupAddress",
"destinationAddress",
"secondaryPickupAddress",
"secondaryDeliveryAddress",
"primeEstimatedWeight",
]
# nts weight is only valid when the shipment type is nts release
if payload.get("ntsRecordedWeight"):
shipmentType = payload.get("shipmentType") or mto_shipment.get("shipmentType")
if shipmentType != "HHG_OUTOF_NTS_DOMESTIC":
fields_to_remove.append("ntsRecordedWeight")
for f in fields_to_remove:
payload.pop(f, None)
headers = {"content-type": "application/json", "If-Match": mto_shipment["eTag"]}
resp = self.client.patch(
prime_path(f"/mto-shipments/{mto_shipment['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_mto_shipment, success = check_response(resp, "updateMTOShipment", payload)
if success:
self.update_stored(MTO_SHIPMENT, mto_shipment, new_mto_shipment)
return new_mto_shipment
@tag(MTO_SHIPMENT, "updateMTOShipmentAddress")
@task
def update_mto_shipment_address(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return
address_tuple = self.get_stored_shipment_address(mto_shipment) # returns a (field_name, address_dict) tuple
if not address_tuple:
return # this shipment didn't have any addresses, we will try again later with a different shipment
field, address = address_tuple
overrides_local = {"id": address["id"]}
overrides_local.update(overrides or {})
payload = self.fake_request(
"/mto-shipments/{mtoShipmentID}/addresses/{addressID}", "put", PRIME_API_KEY, overrides=overrides_local
)
headers = {"content-type": "application/json", "If-Match": address["eTag"]}
# update mto_shipment address
resp = self.client.put(
prime_path(f"/mto-shipments/{mto_shipment['id']}/addresses/{address['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}/addresses/{addressID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_address, success = check_response(resp, "updateMTOShipmentAddress", payload)
if success:
# we only got the address, so we're gonna pop it back into the shipment to store
updated_shipment = deepcopy(mto_shipment)
updated_shipment[field] = updated_address
self.update_stored(MTO_SHIPMENT, mto_shipment, updated_shipment)
return updated_shipment
@tag(MTO_AGENT, "updateMTOAgent")
@task
def update_mto_agent(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoShipmentID") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return # can't run this task
if mto_shipment.get("agents") is None:
return # can't update agents if there aren't any
overrides = {}
mto_agents = mto_shipment["agents"]
mto_agent = mto_shipment["agents"][0]
if len(mto_agents) >= 2:
overrides = {"agentType": mto_agent["agentType"]} # ensure agentType does not change
payload = self.fake_request("/mto-shipments/{mtoShipmentID}/agents/{agentID}", "put", PRIME_API_KEY, overrides)
headers = {"content-type": "application/json", "If-Match": mto_agent["eTag"]}
resp = self.client.put(
prime_path(f"/mto-shipments/{mto_shipment['id']}/agents/{mto_agent['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}/agents/{agentID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_agent, success = check_response(resp, "updateMTOAgent", payload)
if success:
# we only got the agent, so we're gonna pop it back into the shipment to store
new_shipment = deepcopy(mto_shipment)
new_shipment["agents"][0] = updated_agent
self.update_stored(MTO_SHIPMENT, mto_shipment, new_shipment)
return new_shipment
@tag(MTO_SERVICE_ITEM, "updateMTOServiceItem")
@task
def update_mto_service_item(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
if not mto_service_item:
return # can't run this task
try:
re_service_code = mto_service_item["reServiceCode"]
except KeyError:
logger.error(f"⛔️ update_mto_service_item recvd mtoServiceItem without reServiceCode \n{mto_service_item}")
return
if re_service_code not in ["DDDSIT", "DOPSIT"]:
logging.info(
"update_mto_service_item recvd mtoServiceItem from store. Discarding because reServiceCode not in "
"[DDDSIT, DOPSIT]"
)
return
payload = self.fake_request(
"/mto-service-items/{mtoServiceItemID}",
"patch",
overrides={
"id": mto_service_item["id"],
"sitDestinationFinalAddress": {
"id": mto_service_item["sitDestinationFinalAddress"]["id"]
if mto_service_item.get("sitDestinationFinalAddress")
and mto_service_item["sitDestinationFinalAddress"].get("id")
else ZERO_UUID,
},
},
)
headers = {"content-type": "application/json", "If-Match": mto_service_item["eTag"]}
resp = self.client.patch(
prime_path(f"/mto-service-items/{mto_service_item['id']}"),
name=prime_path("/mto-service-items/{mtoServiceItemID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_service_item, success = check_response(resp, f"updateMTOServiceItem {re_service_code}", payload)
if success:
self.update_stored(MTO_SERVICE_ITEM, mto_service_item, updated_service_item)
return updated_service_item
@tag(MOVE_TASK_ORDER, "updateMTOPostCounselingInformation")
@task
def update_post_counseling_information(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("updateMTOPostCounselingInformation: ⚠️ No move_task_order found")
return # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
payload = self.fake_request("/move-task-orders/{moveTaskOrderID}/post-counseling-info", "patch", PRIME_API_KEY)
move_task_order_id = move_task_order["id"] # path parameter
headers = {"content-type": "application/json", "If-Match": move_task_order["eTag"]}
resp = self.client.patch(
prime_path(f"/move-task-orders/{move_task_order_id}/post-counseling-info"),
name=prime_path("/move-task-orders/{moveTaskOrderID}/post-counseling-info"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "updateMTOPostCounselingInformation", payload)
if success:
self.update_stored(MOVE_TASK_ORDER, move_task_order, new_mto)
return new_mto
@tag("support")
class SupportTasks(PrimeDataStorageMixin, ParserTaskMixin, CertTaskMixin, TaskSet):
"""
Set of the tasks that can be called on the Support API. Make sure to mark tasks with the `@task` decorator and add
tags where appropriate to make filtering for custom tests easier. Ex:
@tag('updates', 'shipments')
@task
def update_mto_shipment_status(self):
# etc.
"""
@tag(MTO_SHIPMENT, "updateMTOShipmentStatus")
@task(2)
def update_mto_shipment_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
# To avoid issues with the mto shipment being stale
# retrieve the move associated with the shipment
# and then use the newly fetched move to the find most up to date version of the shipment
move_id = mto_shipment["moveTaskOrderID"]
headers = {"content-type": "application/json"}
resp = self.client.get(
support_path(f"/move-task-orders/{move_id}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
)
move_details, success = check_response(resp, "getMoveTaskOrder")
if not move_details:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
for fetched_mto_shipment in move_details["mtoShipments"]:
if fetched_mto_shipment["id"] == mto_shipment["id"]:
# Generate fake payload based on the endpoint's required fields
payload = self.fake_request(
"/mto-shipments/{mtoShipmentID}/status", "patch", SUPPORT_API_KEY, overrides
)
if fetched_mto_shipment["status"] == "CANCELLATION_REQUESTED" and payload["status"] != "CANCELED":
return None
elif fetched_mto_shipment["status"] == "SUBMITTED" and payload["status"] not in [
"APPROVED",
"REJECTED",
]:
return None
elif fetched_mto_shipment["status"] == "DIVERSION_REQUESTED" and payload["status"] != "APPROVED":
return None
elif fetched_mto_shipment["status"] == "APPROVED" and payload["status"] != "DIVERSION_REQUESTED":
return None
elif fetched_mto_shipment["status"] in ["DRAFT", "REJECTED", "CANCELED"]:
return None
headers = {"content-type": "application/json", "If-Match": fetched_mto_shipment["eTag"]}
resp = self.client.patch(
support_path(f"/mto-shipments/{fetched_mto_shipment['id']}/status"),
name=support_path("/mto-shipments/{mtoShipmentID}/status"),
data=json.dumps(payload),
headers=headers,
)
new_mto_shipment, success = check_response(resp, "updateMTOShipmentStatus", payload)
if success:
self.update_stored(MTO_SHIPMENT, mto_shipment, new_mto_shipment)
return mto_shipment
@tag(MTO_SHIPMENT, "updateMTOShipmentStatus", "expectedFailure")
# run this task less frequently than the others since this is testing an expected failure
@task(1)
def update_mto_shipment_with_invalid_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
overrides_local = {"status": "DRAFT"}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
# Generate fake payload based on the endpoint's required fields
payload = self.fake_request("/mto-shipments/{mtoShipmentID}/status", "patch", SUPPORT_API_KEY, overrides_local)
payload["status"] = "DRAFT"
headers = {"content-type": "application/json", "If-Match": mto_shipment["eTag"]}
resp = self.client.patch(
support_path(f"/mto-shipments/{mto_shipment['id']}/status"),
name=support_path("/mto-shipments/{mtoShipmentID}/status — expected failure"),
data=json.dumps(payload),
headers=headers,
)
check_response(resp, "updateMTOShipmentStatusFailure", payload, "422")
@tag(MOVE_TASK_ORDER, "createMoveTaskOrder")
@task(2)
def create_move_task_order(self):
# Check that we have all required ID values for this endpoint:
if not self.has_all_default_mto_ids():
logger.debug(f"⚠️ Missing createMoveTaskOrder IDs for environment {self.user.env}")
return
overrides = {
"contractorID": self.default_mto_ids["contractorID"],
# Moves that are in DRAFT or CANCELED mode cannot be used by the rest of the load testing
"status": "SUBMITTED",
# If this date is set here, the status will not properly transition to APPROVED
"availableToPrimeAt": None,
"order": {
"status": "APPROVED",
"tac": "F8J1",
# We need these objects to exist
"destinationDutyStationID": self.default_mto_ids["destinationDutyStationID"],
"originDutyStationID": self.default_mto_ids["originDutyStationID"],
"uploadedOrdersID": self.default_mto_ids["uploadedOrdersID"],
# To avoid the overrides being inserted into these nested objects...
"entitlement": {},
"customer": {},
},
}
payload = self.fake_request("/move-task-orders", "post", SUPPORT_API_KEY, overrides)
headers = {"content-type": "application/json"}
resp = self.client.post(
support_path("/move-task-orders"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
json_body, success = check_response(resp, "createMoveTaskOrder", payload)
if not success:
return # no point continuing if it didn't work out
move_task_order_id = json_body["id"]
e_tag = json_body["eTag"]
headers["if-match"] = e_tag
resp = self.client.patch(
support_path(f"/move-task-orders/{move_task_order_id}/available-to-prime"),
name=support_path("/move-task-orders/{moveTaskOrderID}/available-to-prime"),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "makeMoveTaskOrderAvailable")
if success:
self.add_stored(MOVE_TASK_ORDER, new_mto)
return new_mto
# @tag(MTO_SERVICE_ITEM, "updateMTOServiceItemStatus")
@task(2)
def update_mto_service_item_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
# if we don't have an mto shipment we can't run this task
if not mto_service_item:
logger.debug("updateMTOServiceItemStatus: ⚠️ No mto_service_item found")
return None
payload = self.fake_request("/mto-service-items/{mtoServiceItemID}/status", "patch", SUPPORT_API_KEY, overrides)
headers = {"content-type": "application/json", "If-Match": mto_service_item["eTag"]}
resp = self.client.patch(
support_path(f"/mto-service-items/{mto_service_item['id']}/status"),
name=support_path("/mto-service-items/{mtoServiceItemID}/status"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
mto_service_item, success = check_response(resp, "updateMTOServiceItemStatus", payload)
if success:
self.update_stored(MTO_SERVICE_ITEM, mto_service_item, mto_service_item)
return mto_service_item
@tag(PAYMENT_REQUEST, "updatePaymentRequestStatus")
@task(2)
def update_payment_request_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
payment_request = self.get_stored(PAYMENT_REQUEST, object_id)
if not payment_request:
return
payload = self.fake_request("/payment-requests/{paymentRequestID}/status", "patch", SUPPORT_API_KEY)
headers = {"content-type": "application/json", "If-Match": payment_request["eTag"]}
resp = self.client.patch(
support_path(f"/payment-requests/{payment_request['id']}/status"),
name=support_path("/payment-requests/{paymentRequestID}/status"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_payment_request, success = check_response(resp, "updatePaymentRequestStatus", payload)
if success:
self.update_stored(PAYMENT_REQUEST, payment_request, new_payment_request)
return new_payment_request
@tag(MOVE_TASK_ORDER, "getMoveTaskOrder")
@task(2)
def get_move_task_order(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("getMoveTaskOrder: ⚠️ No move_task_order found")
return
headers = {"content-type": "application/json"}
resp = self.client.get(
support_path(f"/move-task-orders/{move_task_order['id']}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "getMoveTaskOrder")
if success:
self.update_stored(MOVE_TASK_ORDER, move_task_order, new_mto)
return new_mto
| 44.384275 | 120 | 0.637366 | 4,748 | 40,079 | 5.20198 | 0.117734 | 0.043645 | 0.02158 | 0.022309 | 0.560266 | 0.513584 | 0.461881 | 0.39698 | 0.376331 | 0.359731 | 0 | 0.004529 | 0.267247 | 40,079 | 902 | 121 | 44.433481 | 0.835467 | 0.211058 | 0 | 0.372057 | 0 | 0 | 0.21942 | 0.100408 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045526 | false | 0 | 0.012559 | 0.00471 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
562ee8fc837ffcba58e8885e34e37a46643ca002 | 3,350 | py | Python | lib/cell.py | rafelafrance/boyd-bird-journal | 289744703220015cb61d22a8e6f8eff0040b296f | [
"MIT"
] | null | null | null | lib/cell.py | rafelafrance/boyd-bird-journal | 289744703220015cb61d22a8e6f8eff0040b296f | [
"MIT"
] | 5 | 2017-11-02T17:12:31.000Z | 2021-04-21T19:07:39.000Z | lib/cell.py | rafelafrance/boyd-bird-journal | 289744703220015cb61d22a8e6f8eff0040b296f | [
"MIT"
] | null | null | null | """Data and functions for dealing with cell contents."""
# pylint: disable=no-member, too-many-instance-attributes, too-many-arguments
import numpy as np
from skimage import util
from skimage.transform import probabilistic_hough_line
from lib.util import Crop, Offset, intersection
class Cell:
"""Data and functions for dealing with cell contents."""
row_label_threshold = 20
col_label_threshold = 15
crop = Crop(top=4, bottom=4, left=4, right=4)
forward_slashes = np.deg2rad(np.linspace(65.0, 25.0, num=161))
label_lines = np.deg2rad(np.linspace(0.0, 65.0, num=181))
label_lines += np.deg2rad(np.linspace(-65.0, 0.0, num=181))
def __init__(self, grid, top=None, bottom=None, left=None, right=None):
"""
Build a cell from the 4 surrounding grid lines.
We will also get the for corners of the cell by finding the
intersection of the grid lines.
"""
self.image = grid.edges
self.top_left = intersection(top, left)
self.bottom_left = intersection(bottom, left)
self.top_right = intersection(top, right)
self.bottom_right = intersection(bottom, right)
self.width = self.top_right.x - self.top_left.x
self.height = self.bottom_left.y - self.top_left.y
self.offset = Offset(x=grid.offset.x + self.top_left.x,
y=grid.offset.y + self.top_left.y)
def interior(self, crop=None):
"""
Get the interior image of the cell.
Sometimes we will want to crop the interior to try and remove the
surrounding grid lines. That is, we want the cell contents, not the
grid lines.
"""
top = max(0, self.top_left.y, self.top_right.y)
bottom = max(0, self.image.shape[0]
- min(self.bottom_left.y, self.bottom_right.y))
left = max(0, self.top_left.x, self.bottom_left.x)
right = max(0, self.image.shape[1]
- min(self.top_right.x, self.bottom_right.x))
inside = util.crop(self.image, ((top, bottom), (left, right)))
if crop and inside.shape[1] > (crop.right + crop.left) \
and inside.shape[0] > (crop.bottom + crop.top):
inside = util.crop(
inside,
((crop.top, crop.bottom), (crop.left, crop.right)))
return inside
def is_label(self, crop=None):
"""Determine if the cell is a column label."""
if not crop:
crop = self.crop
inside = self.interior(crop=crop)
lines = self.has_line(self.label_lines, line_length=12)
if not min(inside.shape):
return False
return bool(len(lines)) or np.mean(inside) > self.col_label_threshold
def has_line(self, angles=None, line_length=15):
"""Determine if the cell has a line at any of the given angles."""
return probabilistic_hough_line(
self.interior(crop=self.crop),
line_length=line_length,
line_gap=2,
theta=angles)
def get_patch(self):
"""Get the cell patch for output."""
width = self.top_right.x - self.top_left.x
height = self.bottom_left.y - self.top_left.y
offset_x = self.offset.x
offset_y = self.offset.y
return (offset_x, offset_y), width, height
| 37.222222 | 77 | 0.617015 | 484 | 3,350 | 4.161157 | 0.233471 | 0.048659 | 0.049156 | 0.023833 | 0.217974 | 0.145482 | 0.10427 | 0.10427 | 0.062562 | 0 | 0 | 0.020492 | 0.271642 | 3,350 | 89 | 78 | 37.640449 | 0.804918 | 0.189552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.072727 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5630aada68bd06afa2bf00f8393f4ecb9edd571f | 1,124 | py | Python | alembic/versions/uuid_ids_b6a452c73bc3.py | baverman/telenot | 5b6e3a0ffc78b3a1eef2bb0ebf90244fb2b1ce1e | [
"MIT"
] | null | null | null | alembic/versions/uuid_ids_b6a452c73bc3.py | baverman/telenot | 5b6e3a0ffc78b3a1eef2bb0ebf90244fb2b1ce1e | [
"MIT"
] | null | null | null | alembic/versions/uuid_ids_b6a452c73bc3.py | baverman/telenot | 5b6e3a0ffc78b3a1eef2bb0ebf90244fb2b1ce1e | [
"MIT"
] | 1 | 2020-09-21T14:22:10.000Z | 2020-09-21T14:22:10.000Z | """uuid ids
Revision ID: b6a452c73bc3
Revises: 6df0d5aac594
Create Date: 2017-12-06 20:57:39.660665
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b6a452c73bc3'
down_revision = '6df0d5aac594'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('reminders_copy',
sa.Column('id', sa.String(), nullable=False),
sa.Column('user_id', sa.Text(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('notify_at', sa.Integer(), nullable=True),
sa.Column('status', sa.String(), server_default='pending', nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.execute('''\
insert into reminders_copy (id, user_id, message, notify_at, status)
select "some-id-" || id, user_id, message, notify_at, status from reminders
''')
op.execute('drop table reminders')
op.execute('alter table reminders_copy rename to reminders')
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 28.1 | 83 | 0.66726 | 142 | 1,124 | 5.183099 | 0.492958 | 0.054348 | 0.076087 | 0.081522 | 0.149457 | 0.149457 | 0.078804 | 0 | 0 | 0 | 0 | 0.050661 | 0.192171 | 1,124 | 39 | 84 | 28.820513 | 0.759912 | 0.189502 | 0 | 0 | 0 | 0 | 0.350225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0.043478 | 0.086957 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56337b5a8e649c6eb401d664fa33c75392141f25 | 4,469 | py | Python | table_reclass_by_threshold.py | richpsharp/raster_calculations | 28b18c34f49c2c275c46e332d7021a27703053cd | [
"Apache-2.0"
] | null | null | null | table_reclass_by_threshold.py | richpsharp/raster_calculations | 28b18c34f49c2c275c46e332d7021a27703053cd | [
"Apache-2.0"
] | null | null | null | table_reclass_by_threshold.py | richpsharp/raster_calculations | 28b18c34f49c2c275c46e332d7021a27703053cd | [
"Apache-2.0"
] | null | null | null | """Table based reclassify triggered by probability threshold."""
import argparse
import os
import logging
import hashlib
from ecoshard import geoprocessing
from ecoshard import taskgraph
import pandas
import numpy
from osgeo import gdal
gdal.SetCacheMax(2**27)
logging.basicConfig(
level=logging.DEBUG,
format=(
'%(asctime)s (%(relativeCreated)d) %(levelname)s %(name)s'
' [%(funcName)s:%(lineno)d] %(message)s'))
logging.getLogger('ecoshard.taskgraph').setLevel(logging.WARN)
LOGGER = logging.getLogger(__name__)
ALIGNED_DIR = 'reclass_aligned_dir'
os.makedirs(ALIGNED_DIR, exist_ok=True)
def main():
"""Entry point."""
parser = argparse.ArgumentParser(
description='reclassify raster to table based on probability threshold')
parser.add_argument(
'--base_raster_path', type=str, required=True,
help='path to integer raster')
parser.add_argument(
'--threshold_raster_path', type=str, required=True,
help='path to threshold raster')
parser.add_argument(
'--threshold_value', type=float, required=True, help=(
'floating point value, if threshold raster is greater than this '
'value, reclassify based on > column of table.value, else use the '
'<= value flip lulc pixel'))
parser.add_argument(
'--reclassify_table_path', type=str, required=True, help=(
'path to csv table with columns'))
parser.add_argument(
'--csv_table_fields', type=str, nargs=3, required=True, help=(
'column names for (1) base raster value, (2) value to flip to if <= '
'threshold, and (3) value to flip to if > threshold'))
parser.add_argument(
'--target_raster_path', type=str,
help='desired target raster')
args = parser.parse_args()
print(args.csv_table_fields)
df = pandas.read_csv(args.reclassify_table_path)
value_map = {
int(base_lucode): (float(leq_target), float(gt_target))
for (base_lucode, leq_target, gt_target) in zip(
df[args.csv_table_fields[0]],
df[args.csv_table_fields[1]],
df[args.csv_table_fields[2]])
}
print(value_map)
threshold_nodata = geoprocessing.get_raster_info(
args.threshold_raster_path)['nodata'][0]
def _reclass_op(base_array, threshold_array):
result = base_array.copy()
if threshold_nodata is not None:
valid_mask = ~numpy.isclose(threshold_array, threshold_nodata)
else:
valid_mask = numpy.ones(base_array.shape, dtype=bool)
for base_code, (leq_target, gt_target) in value_map.items():
leq_mask = (
base_array == base_code) * (
threshold_array <= args.threshold_value)
result[leq_mask & valid_mask] = leq_target
gt_mask = (
base_array == base_code) * (
threshold_array > args.threshold_value)
result[gt_mask & valid_mask] = gt_target
return result
base_raster_info = geoprocessing.get_raster_info(args.base_raster_path)
base_raster_path_list = [
args.base_raster_path, args.threshold_raster_path]
path_hash = hashlib.sha256()
path_hash.update(','.join([
os.path.basename(path) for path in base_raster_path_list + [
args.target_raster_path]]).encode(
'utf-8'))
workspace_dir = os.path.join(ALIGNED_DIR, path_hash.hexdigest()[:5])
os.makedirs(workspace_dir, exist_ok=True)
aligned_raster_path_list = [
os.path.join(workspace_dir, os.path.basename(path))
for path in base_raster_path_list]
LOGGER.info(f'aligning {base_raster_path_list}')
task_graph = taskgraph.TaskGraph(workspace_dir, -1)
task_graph.add_task(
func=geoprocessing.align_and_resize_raster_stack,
args=(
base_raster_path_list, aligned_raster_path_list, ['near']*2,
base_raster_info['pixel_size'], 'union'),
kwargs={
'target_projection_wkt': base_raster_info['projection_wkt']
})
task_graph.close()
task_graph.join()
LOGGER.info(f'reclassifying to {args.target_raster_path}')
geoprocessing.raster_calculator(
[(path, 1) for path in aligned_raster_path_list],
_reclass_op, args.target_raster_path, base_raster_info['datatype'],
base_raster_info['nodata'][0])
if __name__ == '__main__':
main()
| 36.631148 | 81 | 0.65876 | 562 | 4,469 | 4.948399 | 0.27758 | 0.064725 | 0.040273 | 0.032362 | 0.221143 | 0.131967 | 0.114707 | 0.114707 | 0.102841 | 0.074793 | 0 | 0.005821 | 0.231148 | 4,469 | 121 | 82 | 36.933884 | 0.803551 | 0.015887 | 0 | 0.076923 | 0 | 0 | 0.190335 | 0.036699 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.086538 | 0 | 0.115385 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56341b19d0cc7d858663c40581c0b7957017b17d | 4,173 | py | Python | androguard/misc.py | nawfling/androguard | 67b992ce0feeeb01bc69a99257916487689c3bcf | [
"Apache-2.0"
] | 1 | 2019-03-29T19:24:23.000Z | 2019-03-29T19:24:23.000Z | androguard/misc.py | adiltirur/malware_classification | 67b992ce0feeeb01bc69a99257916487689c3bcf | [
"Apache-2.0"
] | null | null | null | androguard/misc.py | adiltirur/malware_classification | 67b992ce0feeeb01bc69a99257916487689c3bcf | [
"Apache-2.0"
] | null | null | null | from future import standard_library
standard_library.install_aliases()
from androguard import session
from androguard.core.bytecodes.dvm import *
from androguard.decompiler.decompiler import *
from androguard.core.androconf import CONF
def init_print_colors():
from IPython.utils import coloransi, io
androconf.default_colors(coloransi.TermColors)
CONF["PRINT_FCT"] = io.stdout.write
def get_default_session():
"""
Return the default Session from the configuration
or create a new one, if the session is None.
"""
if CONF["SESSION"] is None:
CONF["SESSION"] = session.Session()
return CONF["SESSION"]
def AnalyzeAPK(filename, session=None):
"""
Analyze an android application and setup all stuff for a more quickly analysis !
:param session: A session (default None)
:param filename: the filename of the android application or a buffer which represents the application
:type filename: string
:rtype: return the :class:`APK`, :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("AnalyzeAPK")
if not session:
session = get_default_session()
with open(filename, "rb") as fd:
data = fd.read()
session.add(filename, data)
return session.get_objects_apk(filename)
def AnalyzeDex(filename, session=None):
"""
Analyze an android dex file and setup all stuff for a more quickly analysis !
:param session: A session (Default None)
:param filename: the filename of the android dex file or a buffer which represents the dex file
:type filename: string
:rtype: return the :class:`DalvikVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("AnalyzeDex")
if not session:
session = get_default_session()
with open(filename, "rb") as fd:
data = fd.read()
return session.addDEX(filename, data)
def AnalyzeODex(filename, session=None):
"""
Analyze an android odex file and setup all stuff for a more quickly analysis !
:param filename: the filename of the android dex file or a buffer which represents the dex file
:type filename: string
:param session: The Androguard Session to add the ODex to (default: None)
:rtype: return the :class:`DalvikOdexVMFormat`, and :class:`VMAnalysis` objects
"""
androconf.debug("AnalyzeODex")
if not session:
session = get_default_session()
with open(filename, "rb") as fd:
data = fd.read()
return session.addDEY(filename, data)
def RunDecompiler(d, dx, decompiler):
"""
Run the decompiler on a specific analysis
:param d: the DalvikVMFormat object
:type d: :class:`DalvikVMFormat` object
:param dx: the analysis of the format
:type dx: :class:`VMAnalysis` object
:param decompiler: the type of decompiler to use ("dad", "dex2jad", "ded")
:type decompiler: string
"""
if decompiler is not None:
androconf.debug("Decompiler ...")
decompiler = decompiler.lower()
if decompiler == "dex2jad":
d.set_decompiler(DecompilerDex2Jad(
d,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_JAD"],
androconf.CONF["BIN_JAD"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler == "dex2fernflower":
d.set_decompiler(DecompilerDex2Fernflower(
d,
androconf.CONF["PATH_DEX2JAR"],
androconf.CONF["BIN_DEX2JAR"],
androconf.CONF["PATH_FERNFLOWER"],
androconf.CONF["BIN_FERNFLOWER"],
androconf.CONF["OPTIONS_FERNFLOWER"],
androconf.CONF["TMP_DIRECTORY"]))
elif decompiler == "ded":
d.set_decompiler(DecompilerDed(
d,
androconf.CONF["PATH_DED"],
androconf.CONF["BIN_DED"],
androconf.CONF["TMP_DIRECTORY"]))
else:
d.set_decompiler(DecompilerDAD(d, dx))
| 32.348837 | 109 | 0.632159 | 474 | 4,173 | 5.491561 | 0.242616 | 0.069919 | 0.032655 | 0.029965 | 0.456781 | 0.456781 | 0.361122 | 0.341529 | 0.296965 | 0.296965 | 0 | 0.002961 | 0.271507 | 4,173 | 128 | 110 | 32.601563 | 0.853289 | 0.334771 | 0 | 0.338462 | 0 | 0 | 0.105617 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0 | 0.092308 | 0 | 0.246154 | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
563876079d3f5e1179bfb42ae2c1c70daa64503e | 2,582 | py | Python | src/bpp/const.py | iplweb/django-bpp | 85f183a99d8d5027ae4772efac1e4a9f21675849 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T19:50:02.000Z | 2017-04-27T19:50:02.000Z | src/bpp/const.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
] | null | null | null | src/bpp/const.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict
from enum import Enum
TO_AUTOR = 0
TO_REDAKTOR = 1
TO_INNY = 2
TO_TLUMACZ = 3
TO_KOMENTATOR = 4
TO_RECENZENT = 5
TO_OPRACOWAL = 6
TO_REDAKTOR_TLUMACZENIA = 7
TYP_OGOLNY_DO_PBN = {
TO_AUTOR: "AUTHOR",
TO_REDAKTOR: "EDITOR",
TO_TLUMACZ: "TRANSLATOR",
TO_REDAKTOR_TLUMACZENIA: "TRANSLATION_EDITOR",
}
GR_WPROWADZANIE_DANYCH = "wprowadzanie danych"
CHARAKTER_SLOTY_KSIAZKA = 1
CHARAKTER_SLOTY_ROZDZIAL = 2
CHARAKTER_SLOTY_REFERAT = 3
RODZAJ_PBN_ARTYKUL = 1
RODZAJ_PBN_ROZDZIAL = 2
RODZAJ_PBN_KSIAZKA = 3
RODZAJ_PBN_POSTEPOWANIE = 4
CHARAKTER_OGOLNY_ARTYKUL = "art"
CHARAKTER_OGOLNY_ROZDZIAL = "roz"
CHARAKTER_OGOLNY_KSIAZKA = "ksi"
CHARAKTER_OGOLNY_INNE = "xxx"
class DZIEDZINA(Enum):
NAUKI_HUMANISTYCZNE = 1
NAUKI_INZ_TECH = 2
NAUKI_MEDYCZNE = 3
NAUKI_ROLNICZE = 4
NAUKI_SPOLECZNE = 5
NAUKI_SCISLE = 6
NAUKI_TEOLOGICZNE = 7
NAUKI_SZTUKA = 8
WYZSZA_PUNKTACJA = [
DZIEDZINA.NAUKI_SPOLECZNE,
DZIEDZINA.NAUKI_HUMANISTYCZNE,
DZIEDZINA.NAUKI_TEOLOGICZNE,
]
DZIEDZINY = OrderedDict()
DZIEDZINY[DZIEDZINA.NAUKI_HUMANISTYCZNE] = "Nauki humanistyczne"
DZIEDZINY[DZIEDZINA.NAUKI_INZ_TECH] = "Nauki inżynieryjno-techniczne"
DZIEDZINY[DZIEDZINA.NAUKI_MEDYCZNE] = "Nauki medyczne i o zdrowiu"
DZIEDZINY[DZIEDZINA.NAUKI_ROLNICZE] = "Nauki rolnicze"
DZIEDZINY[DZIEDZINA.NAUKI_SPOLECZNE] = "Nauki społeczne"
DZIEDZINY[DZIEDZINA.NAUKI_SCISLE] = "Nauki ścisłe i przyrodnicze"
DZIEDZINY[DZIEDZINA.NAUKI_TEOLOGICZNE] = "Nauki teologiczne"
DZIEDZINY[DZIEDZINA.NAUKI_SZTUKA] = "Sztuka"
class TRYB_KALKULACJI(Enum):
AUTORSTWO_MONOGRAFII = 1
REDAKCJA_MONOGRAFI = 2
ROZDZIAL_W_MONOGRAFI = 3
class TRYB_DOSTEPU(Enum):
NIEJAWNY = 0
TYLKO_W_SIECI = 1
JAWNY = 2
DO_STYCZNIA_POPRZEDNI_POTEM_OBECNY = "jan_prev_then_current"
NAJWIEKSZY_REKORD = "max_rec"
PBN_UID_LEN = 24
ORCID_LEN = 19
LINK_PBN_DO_AUTORA = "{pbn_api_root}/core/#/person/view/{pbn_uid_id}/current"
LINK_PBN_DO_WYDAWCY = "{pbn_api_root}/core/#/publisher/view/{pbn_uid_id}/current"
LINK_PBN_DO_ZRODLA = "{pbn_api_root}/core/#/journal/view/{pbn_uid_id}/current"
LINK_PBN_DO_PUBLIKACJI = "{pbn_api_root}/core/#/publication/view/{pbn_uid_id}/current"
PBN_LATA = [2017, 2018, 2019, 2020, 2021, 2022]
# Minimalny rok od którego zaczynamy liczyć punkty dla prac PBN i w ogóle minimalny rok integracji.
PBN_MIN_ROK = PBN_LATA[0]
# Maksymalny rok dla procedur eksportujących do PBN, liczącyc punkty/sloty oraz testów
PBN_MAX_ROK = PBN_LATA[-1]
KWARTYLE = [(None, "brak"), (1, "Q1"), (2, "Q2"), (3, "Q3"), (4, "Q4")]
| 26.346939 | 99 | 0.763362 | 356 | 2,582 | 5.202247 | 0.407303 | 0.083153 | 0.099352 | 0.030238 | 0.055616 | 0.045356 | 0.045356 | 0.045356 | 0 | 0 | 0 | 0.03018 | 0.140201 | 2,582 | 97 | 100 | 26.618557 | 0.804054 | 0.070488 | 0 | 0 | 0 | 0 | 0.204005 | 0.112224 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.028169 | 0 | 0.267606 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5639659aa605d44624d092a964d7a713b28f1136 | 16,604 | py | Python | integration_tests/run_e2e_tests.py | elementary-data/dbt-data-reliability | 2b54962950af61a5c83cff5105f4a0197e727beb | [
"Apache-2.0"
] | 11 | 2022-01-26T14:25:47.000Z | 2022-03-10T10:22:31.000Z | integration_tests/run_e2e_tests.py | elementary-data/dbt-data-reliability | 2b54962950af61a5c83cff5105f4a0197e727beb | [
"Apache-2.0"
] | 1 | 2022-01-27T05:00:29.000Z | 2022-01-28T11:42:32.000Z | integration_tests/run_e2e_tests.py | elementary-data/dbt-data-reliability | 2b54962950af61a5c83cff5105f4a0197e727beb | [
"Apache-2.0"
] | 2 | 2022-03-02T18:40:23.000Z | 2022-03-08T15:56:34.000Z | import csv
from datetime import datetime, timedelta
import random
import string
import os
from os.path import expanduser
from pathlib import Path
from monitor.dbt_runner import DbtRunner
import click
any_type_columns = ['date', 'null_count', 'null_percent']
FILE_DIR = os.path.dirname(__file__)
def generate_date_range(base_date, numdays=30):
return [base_date - timedelta(days=x) for x in range(0, numdays)]
def write_rows_to_csv(csv_path, rows, header):
# Creates the csv file directories if needed.
directory_path = Path(csv_path).parent.resolve()
Path(directory_path).mkdir(parents=True, exist_ok=True)
with open(csv_path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=header)
writer.writeheader()
writer.writerows(rows)
def generate_rows(rows_count_per_day, dates, get_row_callback):
rows = []
for date in dates:
for i in range(0, rows_count_per_day):
row = get_row_callback(date, i, rows_count_per_day)
rows.append(row)
return rows
def generate_string_anomalies_training_and_validation_files(rows_count_per_day=100):
def get_training_row(date, row_index, rows_count):
return {'date': date.strftime('%Y-%m-%d %H:%M:%S'),
'min_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(5, 10))),
'max_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(5, 10))),
'average_length': ''.join(random.choices(string.ascii_lowercase, k=5)),
'missing_count': '' if row_index < (3 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5)),
'missing_percent': '' if random.randint(1, rows_count) <= (20 / 100 * rows_count) else
''.join(random.choices(string.ascii_lowercase, k=5))}
def get_validation_row(date, row_index, rows_count):
return {'date': date.strftime('%Y-%m-%d %H:%M:%S'),
'min_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(1, 10))),
'max_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(5, 15))),
'average_length': ''.join(random.choices(string.ascii_lowercase, k=random.randint(5, 8))),
'missing_count': '' if row_index < (20 / 100 * rows_count) else ''.join(random.choices(string.ascii_lowercase, k=5)),
'missing_percent': '' if random.randint(1, rows_count) <= (60 / 100 * rows_count) else
''.join(random.choices(string.ascii_lowercase, k=5))}
string_columns = ['date', 'min_length', 'max_length', 'average_length', 'missing_count', 'missing_percent']
dates = generate_date_range(base_date=datetime.today() - timedelta(days=2), numdays=30)
training_rows = generate_rows(rows_count_per_day, dates, get_training_row)
write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'training', 'string_column_anomalies_training.csv'),
training_rows,
string_columns)
validation_date = datetime.today() - timedelta(days=1)
validation_rows = generate_rows(rows_count_per_day, [validation_date], get_validation_row)
write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'validation', 'string_column_anomalies_validation.csv'),
validation_rows,
string_columns)
def generate_numeric_anomalies_training_and_validation_files(rows_count_per_day=200):
def get_training_row(date, row_index, rows_count):
return {'date': date.strftime('%Y-%m-%d %H:%M:%S'),
'min': random.randint(100, 200),
'max': random.randint(100, 200),
'zero_count': 0 if row_index < (3 / 100 * rows_count) else random.randint(100, 200),
'zero_percent': 0 if random.randint(1, rows_count) <= (20 / 100 * rows_count) else random.randint(100, 200),
'average': random.randint(99, 101),
'standard_deviation': random.randint(99, 101),
'variance': random.randint(99, 101)}
def get_validation_row(date, row_index, rows_count):
row_index += -(rows_count / 2)
return {'date': date.strftime('%Y-%m-%d %H:%M:%S'),
'min': random.randint(10, 200),
'max': random.randint(100, 300),
'zero_count': 0 if row_index < (80 / 100 * rows_count) else random.randint(100, 200),
'zero_percent': 0 if random.randint(1, rows_count) <= (60 / 100 * rows_count) else random.randint(100, 200),
'average': random.randint(101, 110),
'standard_deviation': random.randint(80, 120),
'variance': random.randint(80, 120)}
numeric_columns = ['date', 'min', 'max', 'zero_count', 'zero_percent', 'average', 'standard_deviation', 'variance']
dates = generate_date_range(base_date=datetime.today() - timedelta(days=2), numdays=30)
training_rows = generate_rows(rows_count_per_day, dates, get_training_row)
write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'training', 'numeric_column_anomalies_training.csv'),
training_rows,
numeric_columns)
validation_date = datetime.today() - timedelta(days=1)
validation_rows = generate_rows(rows_count_per_day, [validation_date], get_validation_row)
write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'validation', 'numeric_column_anomalies_validation.csv'),
validation_rows,
numeric_columns)
def generate_any_type_anomalies_training_and_validation_files(rows_count_per_day=300):
def get_training_row(date, row_index, rows_count):
return {'date': date.strftime('%Y-%m-%d %H:%M:%S'),
'null_count_str': None if row_index < (3 / 100 * rows_count) else
''.join(random.choices(string.ascii_lowercase, k=5)),
'null_percent_str': None if random.randint(1, rows_count) <= (20 / 100 * rows_count)
else ''.join(random.choices(string.ascii_lowercase, k=5)),
'null_count_float': None if row_index < (3 / 100 * rows_count) else random.uniform(1.2, 8.9),
'null_percent_float': None if random.randint(1, rows_count) <= (20 / 100 * rows_count)
else random.uniform(1.2, 8.9),
'null_count_int': None if row_index < (3 / 100 * rows_count) else random.randint(100, 200),
'null_percent_int': None if random.randint(1, rows_count) <= (20 / 100 * rows_count)
else random.randint(100, 200),
'null_count_bool': None if row_index < (3 / 100 * rows_count) else bool(random.getrandbits(1)),
'null_percent_bool': None if random.randint(1, rows_count) <= (20 / 100 * rows_count)
else bool(random.getrandbits(1))}
def get_validation_row(date, row_index, rows_count):
return {'date': date.strftime('%Y-%m-%d %H:%M:%S'),
'null_count_str': None if row_index < (80 / 100 * rows_count) else
''.join(random.choices(string.ascii_lowercase, k=5)),
'null_percent_str': None if random.randint(1, rows_count) <= (60 / 100 * rows_count)
else ''.join(random.choices(string.ascii_lowercase, k=5)),
'null_count_float': None if row_index < (80 / 100 * rows_count) else random.uniform(1.2, 8.9),
'null_percent_float': None if random.randint(1, rows_count) <= (60 / 100 * rows_count)
else random.uniform(1.2, 8.9),
'null_count_int': None if row_index < (80 / 100 * rows_count) else random.randint(100, 200),
'null_percent_int': None if random.randint(1, rows_count) <= (60 / 100 * rows_count)
else random.randint(100, 200),
'null_count_bool': None if row_index < (80 / 100 * rows_count) else bool(random.getrandbits(1)),
'null_percent_bool': None if random.randint(1, rows_count) <= (60 / 100 * rows_count)
else bool(random.getrandbits(1))}
any_type_columns = ['date', 'null_count_str', 'null_percent_str', 'null_count_float', 'null_percent_float',
'null_count_int', 'null_percent_int', 'null_count_bool', 'null_percent_bool']
dates = generate_date_range(base_date=datetime.today() - timedelta(days=2), numdays=30)
training_rows = generate_rows(rows_count_per_day, dates, get_training_row)
write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'training', 'any_type_column_anomalies_training.csv'),
training_rows,
any_type_columns)
validation_date = datetime.today() - timedelta(days=1)
validation_rows = generate_rows(rows_count_per_day, [validation_date], get_validation_row)
write_rows_to_csv(os.path.join(FILE_DIR, 'data', 'validation', 'any_type_column_anomalies_validation.csv'),
validation_rows,
any_type_columns)
def generate_fake_data():
print('Generating fake data!')
generate_string_anomalies_training_and_validation_files()
generate_numeric_anomalies_training_and_validation_files()
generate_any_type_anomalies_training_and_validation_files()
def e2e_tests(target, test_types):
table_test_results = []
string_column_anomalies_test_results = []
numeric_column_anomalies_test_results = []
any_type_column_anomalies_test_results = []
schema_changes_test_results = []
regular_test_results = []
artifacts_results = []
dbt_runner = DbtRunner(project_dir=FILE_DIR, profiles_dir=os.path.join(expanduser('~'), '.dbt'), target=target)
clear_test_logs = dbt_runner.run_operation(macro_name='clear_tests')
for clear_test_log in clear_test_logs:
print(clear_test_log)
dbt_runner.seed(select='training')
dbt_runner.run(full_refresh=True)
if 'table' in test_types:
dbt_runner.test(select='tag:table_anomalies')
table_test_results = dbt_runner.run_operation(macro_name='validate_table_anomalies')
print_test_result_list(table_test_results)
# If only table tests were selected no need to continue to the rest of the flow
if len(test_types) == 1:
return [table_test_results, string_column_anomalies_test_results, numeric_column_anomalies_test_results,
any_type_column_anomalies_test_results, schema_changes_test_results, regular_test_results,
artifacts_results]
# Creates row_count metrics for anomalies detection.
if 'no_timestamp' in test_types:
current_time = datetime.now()
# Run operation returns the operation value as a list of strings.
# So we we convert the days_back value into int.
days_back_project_var = int(dbt_runner.run_operation(macro_name="return_config_var", macro_args={"var_name": "days_back"})[0])
# No need to create todays metric because the validation run does it.
for run_index in range(1, days_back_project_var):
custom_run_time = (current_time - timedelta(run_index)).isoformat()
dbt_runner.test(select='tag:no_timestamp', vars={"custom_run_started_at": custom_run_time})
dbt_runner.seed(select='validation')
if 'schema' in test_types:
# We need to upload the schema changes dataset before at least one dbt run, as dbt run takes a snapshot of the
# normal schema
dbt_runner.seed(select='schema_changes_data')
dbt_runner.run()
if 'debug' in test_types:
dbt_runner.test(select='tag:debug')
return [table_test_results, string_column_anomalies_test_results, numeric_column_anomalies_test_results,
any_type_column_anomalies_test_results, schema_changes_test_results, regular_test_results,
artifacts_results]
if 'no_timestamp' in test_types:
dbt_runner.test(select='tag:no_timestamp')
no_timestamp_test_results = dbt_runner.run_operation(macro_name='validate_no_timestamp_anomalies')
print_test_result_list(no_timestamp_test_results)
if 'column' in test_types:
dbt_runner.test(select='tag:string_column_anomalies')
string_column_anomalies_test_results = dbt_runner.run_operation(macro_name='validate_string_column_anomalies')
print_test_result_list(string_column_anomalies_test_results)
dbt_runner.test(select='tag:numeric_column_anomalies')
numeric_column_anomalies_test_results = dbt_runner.run_operation(macro_name='validate_numeric_column_anomalies')
print_test_result_list(numeric_column_anomalies_test_results)
dbt_runner.test(select='tag:all_any_type_columns_anomalies')
any_type_column_anomalies_test_results = dbt_runner.run_operation(macro_name=
'validate_any_type_column_anomalies')
print_test_result_list(any_type_column_anomalies_test_results)
if 'schema' in test_types:
schema_changes_logs = dbt_runner.run_operation(macro_name='do_schema_changes')
for schema_changes_log in schema_changes_logs:
print(schema_changes_log)
dbt_runner.run()
dbt_runner.test(select='tag:schema_changes')
schema_changes_test_results = dbt_runner.run_operation(macro_name='validate_schema_changes')
print_test_result_list(schema_changes_test_results)
if 'regular' in test_types:
dbt_runner.test(select='test_type:singular tag:regular_tests')
regular_test_results = dbt_runner.run_operation(macro_name='validate_regular_tests')
print_test_result_list(regular_test_results)
if 'artifacts' in test_types:
artifacts_results = dbt_runner.run_operation(macro_name='validate_dbt_artifacts')
print_test_result_list(artifacts_results)
return [table_test_results, string_column_anomalies_test_results, numeric_column_anomalies_test_results,
any_type_column_anomalies_test_results, schema_changes_test_results, regular_test_results,
artifacts_results]
def print_test_result_list(test_results):
for test_result in test_results:
print(test_result)
def print_tests_results(table_test_results,
string_column_anomalies_test_results,
numeric_column_anomalies_test_results,
any_type_column_anomalies_test_results,
schema_changes_test_results,
regular_test_results,
artifacts_results):
print('\nTable test results')
print_test_result_list(table_test_results)
print('\nString columns test results')
print_test_result_list(string_column_anomalies_test_results)
print('\nNumeric columns test results')
print_test_result_list(numeric_column_anomalies_test_results)
print('\nAny type columns test results')
print_test_result_list(any_type_column_anomalies_test_results)
print('\nSchema changes test results')
print_test_result_list(schema_changes_test_results)
print('\nRegular test results')
print_test_result_list(regular_test_results)
print('\ndbt artifacts results')
print_test_result_list(artifacts_results)
@click.command()
@click.option(
'--target', '-t',
type=str,
default='all',
help="snowflake / bigquery / redshift / all (default = all)"
)
@click.option(
'--e2e-type', '-e',
type=str,
default='all',
help="table / column / schema / regular / artifacts / no_timestamp / debug / all (default = all)"
)
@click.option(
'--generate-data', '-g',
type=bool,
default=True,
help="Set to true if you want to re-generate fake data (default = True)"
)
def main(target, e2e_type, generate_data):
if generate_data:
generate_fake_data()
if target == 'all':
e2e_targets = ['snowflake', 'bigquery', 'redshift']
else:
e2e_targets = [target]
if e2e_type == 'all':
e2e_types = ['table', 'column', 'schema', 'regular', 'artifacts']
else:
e2e_types = [e2e_type]
all_results = {}
for e2e_target in e2e_targets:
print(f'Starting {e2e_target} tests\n')
e2e_test_results = e2e_tests(e2e_target, e2e_types)
print(f'\n{e2e_target} results')
all_results[e2e_target] = e2e_test_results
for e2e_target, e2e_test_results in all_results.items():
print(f'\n{e2e_target} results')
print_tests_results(*e2e_test_results)
if __name__ == '__main__':
main()
| 49.861862 | 134 | 0.675982 | 2,175 | 16,604 | 4.79954 | 0.109425 | 0.065332 | 0.027589 | 0.036785 | 0.69681 | 0.654086 | 0.598333 | 0.552256 | 0.513268 | 0.445541 | 0 | 0.025121 | 0.216032 | 16,604 | 332 | 135 | 50.012048 | 0.77683 | 0.028547 | 0 | 0.327138 | 0 | 0 | 0.151231 | 0.034675 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063197 | false | 0 | 0.033457 | 0.022305 | 0.137546 | 0.118959 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
563acc285f6bb6d32cc9472259e3ac6433995b1c | 1,956 | py | Python | resources/lib/routes/animesearch.py | jdollarKodi/plugin.video.animepie | 874e58e153e2df53e5a47ec963584de16584ae52 | [
"MIT"
] | null | null | null | resources/lib/routes/animesearch.py | jdollarKodi/plugin.video.animepie | 874e58e153e2df53e5a47ec963584de16584ae52 | [
"MIT"
] | null | null | null | resources/lib/routes/animesearch.py | jdollarKodi/plugin.video.animepie | 874e58e153e2df53e5a47ec963584de16584ae52 | [
"MIT"
] | null | null | null | import requests
import logging
import math
import xbmcaddon
from xbmcgui import ListItem
from xbmcplugin import addDirectoryItem, endOfDirectory
from resources.lib.constants.url import BASE_URL, SEARCH_PATH
from resources.lib.router_factory import get_router_instance
from resources.lib.routes.episodelist import episode_list
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(ADDON.getAddonInfo('id'))
def generate_routes(plugin):
plugin.add_route(anime_search, "/search")
return plugin
def anime_search():
plugin = get_router_instance()
search_value = plugin.args["name"][0] if "name" in plugin.args else ""
page = plugin.args["page"][0] if "page" in plugin.args else "1"
params = {
"name": search_value,
"limit": 10,
"page": int(page)
}
res = requests.get(BASE_URL + SEARCH_PATH, params=params)
json_data = res.json()
for anime in json_data['data']['list']:
li = ListItem(anime["animeName"])
li.setArt({"icon": anime["backgroundSrc"]})
li.setInfo(type="video", infoLabels={"plot": anime["animeSynopsis"]})
addDirectoryItem(
plugin.handle,
plugin.url_for(
episode_list,
id=str(anime["animeID"]),
listId=str(anime["animeListID"]),
episode_count=str(anime["animeEpisode"])
),
li,
True
)
are_pages_remaining = math.ceil(float(json_data["data"]["count"]) / float(params.get("limit"))) > int(page)
if (are_pages_remaining):
next_page_params = { "page": page, "name": search_value }
next_page_params.update({ "page": str(int(params.get("page")) + 1) })
addDirectoryItem(
plugin.handle,
plugin.url_for(
anime_search, **next_page_params
),
ListItem('Next Page'),
True
)
endOfDirectory(plugin.handle)
| 30.092308 | 111 | 0.618609 | 222 | 1,956 | 5.292793 | 0.364865 | 0.034043 | 0.040851 | 0.028936 | 0.068085 | 0.068085 | 0 | 0 | 0 | 0 | 0 | 0.004124 | 0.256135 | 1,956 | 64 | 112 | 30.5625 | 0.803436 | 0 | 0 | 0.188679 | 0 | 0 | 0.083845 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.169811 | 0 | 0.226415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
563cde578c182b51e4e7d365b885a0e62d9ab264 | 2,095 | py | Python | expresspec/read_data.py | alexji/expresspec | eadc7dba20d7ccd78174f7d4f32c7ff13545c316 | [
"MIT"
] | null | null | null | expresspec/read_data.py | alexji/expresspec | eadc7dba20d7ccd78174f7d4f32c7ff13545c316 | [
"MIT"
] | null | null | null | expresspec/read_data.py | alexji/expresspec | eadc7dba20d7ccd78174f7d4f32c7ff13545c316 | [
"MIT"
] | null | null | null | from astropy.table import Table
from collections import OrderedDict
import numpy as np
from .spectrum import Spectrum1D
from copy import deepcopy
from scipy import signal
def read_expres(fname, full_output=False, as_arrays=False, as_order_dict=False, as_raw_table=False):
if full_output:
raise NotImplementedError("For now use as_raw_table=True")
tab = Table.read(fname, hdu=1)
if as_raw_table: return tab
orders = tab["order"]
cols = ["wavelength", "spectrum", "uncertainty", "continuum",
"offset","offset_uncertainty","n_pixels","reduced_chi",
"continuum_mask","pixel_mask","tellurics",
"bary_wavelength"]
Nord = len(orders)
if as_order_dict:
alloutput = OrderedDict()
elif as_arrays:
alloutput = [[], [], []]
else:
alloutput = []
meta = {"file":fname}
for iord in range(Nord):
meta["order"] = orders[iord]
wave = tab["wavelength"][iord]
flux = tab["spectrum"][iord]
errs = tab["uncertainty"][iord]
if as_arrays:
alloutput[0].append(wave)
alloutput[1].append(flux)
alloutput[2].append(errs)
else:
spec = Spectrum1D(wave, flux, errs**-2, metadata=meta)
if as_order_dict:
alloutput[orders[iord]] = spec
else:
alloutput.append(spec)
if as_arrays:
all_output[0] = np.array(all_output[0])
all_output[1] = np.array(all_output[1])
all_output[2] = np.array(all_output[2])
return alloutput
def rebin_spec(spec, n_rebin):
"""
Sum n_rebin pixels together
"""
n_new = len(spec.dispersion) // n_rebin
n_orig = n_new * n_rebin
wave = spec.dispersion[0:n_orig].reshape((-1,n_rebin))
flux = spec.flux[0:n_orig].reshape((-1,n_rebin))
errs = (spec.ivar[0:n_orig]**-0.5).reshape((-1,n_rebin))
wave = np.mean(wave, axis=1)
flux = np.sum(flux, axis=1)
errs = np.sqrt(np.sum(errs**2, axis=1))
return Spectrum1D(wave, flux, errs**-2, spec.metadata)
| 32.230769 | 100 | 0.607637 | 278 | 2,095 | 4.417266 | 0.298561 | 0.034202 | 0.026873 | 0.039088 | 0.105863 | 0.032573 | 0.032573 | 0 | 0 | 0 | 0 | 0.017352 | 0.257279 | 2,095 | 64 | 101 | 32.734375 | 0.771851 | 0.012888 | 0 | 0.12963 | 0 | 0 | 0.097953 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.111111 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
563f17c503aa91de61cc1f7b7afc9304efcf0e5a | 596 | py | Python | lib/opencavity/help.py | giomalt/SLM_hologram_generation | 74ad38be8fe17c710856b2508389cd8c9f1ee77a | [
"MIT"
] | 3 | 2021-02-24T12:55:01.000Z | 2021-03-19T02:19:25.000Z | lib/opencavity/help.py | giomalt/SLM_hologram_generation | 74ad38be8fe17c710856b2508389cd8c9f1ee77a | [
"MIT"
] | null | null | null | lib/opencavity/help.py | giomalt/SLM_hologram_generation | 74ad38be8fe17c710856b2508389cd8c9f1ee77a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Created on 15 feb. 2015
@author: mohamed seghilani
'''
import opencavity
import webbrowser
import platform
#if __name__ == '__main__':
def launch():
help_path=opencavity.__file__
if platform.system()=='Windows':
separator='\\'
else:
separator='/'
count=1
while (not help_path.endswith(separator)) and count<50:
help_path=help_path[:-1]
count=count+1 #prevent unfinit loop if path is empty
help_path2='Docs/_build/html/index.html'
help_path=help_path+help_path2
webbrowser.open(help_path)
| 22.074074 | 60 | 0.654362 | 76 | 596 | 4.842105 | 0.592105 | 0.152174 | 0.097826 | 0.086957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030108 | 0.219799 | 596 | 26 | 61 | 22.923077 | 0.76129 | 0.229866 | 0 | 0 | 0 | 0 | 0.082405 | 0.060134 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
564286566a89b40797a3f75a9f3fdf67dede75df | 569 | py | Python | merchant/urls.py | Pesenin-Team/pesenin-2.0 | 883468e6b6d7e3a24bc2ee60bbc7063117745424 | [
"MIT"
] | null | null | null | merchant/urls.py | Pesenin-Team/pesenin-2.0 | 883468e6b6d7e3a24bc2ee60bbc7063117745424 | [
"MIT"
] | null | null | null | merchant/urls.py | Pesenin-Team/pesenin-2.0 | 883468e6b6d7e3a24bc2ee60bbc7063117745424 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'merchant'
urlpatterns = [
path('', views.merchant, name='merchant'),
path('makanan', views.makanan, name='makanan'),
path('makanan/search_makanan', views.search_makanan, name='search_makanan'),
path('search_merchant', views.search_merchant, name='search_merchant'),
path('makanan/<int:pk>', views.detail_makanan, name='detail'),
path('show_merchant', views.display_merchant, name='showMerchant'),
path('show_makanan', views.display_makanan, name='showMakanan'),
]
| 37.933333 | 81 | 0.70123 | 67 | 569 | 5.776119 | 0.283582 | 0.113695 | 0.098191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.140598 | 569 | 14 | 82 | 40.642857 | 0.791411 | 0 | 0 | 0 | 0 | 0 | 0.299099 | 0.03964 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5642bea2a4d006a16cb5dc638961e8224156c351 | 1,049 | py | Python | src/worker/worker.py | cemsbr/flask-cpu-tasks | 2cb0c2d17794d8c7413633a304baf35ce7de2c51 | [
"MIT"
] | 1 | 2018-02-04T18:19:07.000Z | 2018-02-04T18:19:07.000Z | src/worker/worker.py | cemsbr/flask-cpu-tasks | 2cb0c2d17794d8c7413633a304baf35ce7de2c51 | [
"MIT"
] | null | null | null | src/worker/worker.py | cemsbr/flask-cpu-tasks | 2cb0c2d17794d8c7413633a304baf35ce7de2c51 | [
"MIT"
] | null | null | null | """Worker application.
It calls an external slow task and send its output, line by line, as "log"
events through SocketIO. The web page will then print the lines.
"""
# Disable the warning because eventlet must patch the standard library as soon
# as possible.
from communication import (CELERY,
get_socketio) # pylint: disable=wrong-import-order
import socket
from datetime import datetime
from subprocess import PIPE, Popen
SOCKETIO = get_socketio()
def announce():
"""Tell this worker is up and running."""
hostname = socket.gethostname()
time = datetime.now().strftime('%H:%M:%S')
msg = '{} Worker {} is up.'.format(time, hostname)
SOCKETIO.emit('log', {'data': msg})
announce()
@CELERY.task
def add_task(name):
"""Run the slow task as a subprocess and send results to the web site."""
args = './slow_task.sh', str(name)
with Popen(args, stdout=PIPE, universal_newlines=True) as proc:
for line in proc.stdout:
SOCKETIO.emit('log', {'data': line.rstrip()})
| 29.138889 | 78 | 0.673975 | 146 | 1,049 | 4.808219 | 0.59589 | 0.034188 | 0.02849 | 0.054131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.206864 | 1,049 | 35 | 79 | 29.971429 | 0.84375 | 0.371783 | 0 | 0 | 0 | 0 | 0.085938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
564f92d435e931088cb6bd29d3c1bab009096fee | 239 | py | Python | Practice/Python/MapAndLambdaFunction.py | avantikasharma/HackerRank-Solutions | a980859ac352688853fcbcf3c7ec6d95685f99ea | [
"MIT"
] | 1 | 2018-07-08T15:44:15.000Z | 2018-07-08T15:44:15.000Z | Practice/Python/MapAndLambdaFunction.py | avantikasharma/HackerRank-Solutions | a980859ac352688853fcbcf3c7ec6d95685f99ea | [
"MIT"
] | null | null | null | Practice/Python/MapAndLambdaFunction.py | avantikasharma/HackerRank-Solutions | a980859ac352688853fcbcf3c7ec6d95685f99ea | [
"MIT"
] | 2 | 2018-08-10T06:49:34.000Z | 2020-10-01T04:50:59.000Z | cube = lambda x: pow(x,3)
def fibonacci(n):
l=list()
if n==0:
l=[]
elif n==1:
l=[0]
else:
l=[0,1]
for i in range(2,n):
num=l[i-1]+l[i-2]
l.append(num)
return 1
| 17.071429 | 29 | 0.389121 | 42 | 239 | 2.214286 | 0.547619 | 0.043011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073529 | 0.430962 | 239 | 13 | 30 | 18.384615 | 0.610294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56572951cdbd706ef85de0b8777578cddbef4cf9 | 2,047 | py | Python | odps/df/backends/optimize/utils.py | Emersonxuelinux/aliyun-odps-python-sdk | 0b38c777711c95ed1775fa67822febf88fc3d642 | [
"Apache-2.0"
] | null | null | null | odps/df/backends/optimize/utils.py | Emersonxuelinux/aliyun-odps-python-sdk | 0b38c777711c95ed1775fa67822febf88fc3d642 | [
"Apache-2.0"
] | null | null | null | odps/df/backends/optimize/utils.py | Emersonxuelinux/aliyun-odps-python-sdk | 0b38c777711c95ed1775fa67822febf88fc3d642 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...expr.expressions import Column, CollectionExpr
def change_input(expr, src_input, new_input, get_field, dag):
for path in expr.all_path(src_input, strict=True):
cols = [it for it in path if isinstance(it, Column)]
assert len(cols) <= 1
collection_len = len([it for it in path if isinstance(it, CollectionExpr)])
if isinstance(expr, CollectionExpr):
assert collection_len == 2
else:
assert collection_len == 1
if len(cols) == 1:
col = cols[0]
col_name = col.source_name or col.name
field = get_field(new_input, col_name)
if col.is_renamed():
field = field.rename(col.name)
else:
field = field.copy()
path[-3].substitute(col, field, dag=dag)
else:
path[-2].substitute(src_input, new_input, dag=dag)
def copy_sequence(sequence, collection, dag=None):
copied = sequence.copy()
if dag:
dag.add_node(copied)
is_copied = set()
for path in sequence.all_path(collection, strict=True):
curr = copied
for seq in path[1:-1]:
if id(seq) in is_copied:
continue
is_copied.add(id(seq))
copied_seq = seq.copy()
curr.substitute(seq, copied_seq, dag=dag)
curr = copied_seq
return copied | 34.694915 | 83 | 0.629213 | 282 | 2,047 | 4.475177 | 0.41844 | 0.047544 | 0.020602 | 0.025357 | 0.042789 | 0.042789 | 0.042789 | 0.042789 | 0 | 0 | 0 | 0.014845 | 0.276014 | 2,047 | 59 | 84 | 34.694915 | 0.836707 | 0.30044 | 0 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.055556 | false | 0 | 0.027778 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
565c37bb80f2d86759c5fa9c22ae78a191b901e0 | 3,061 | py | Python | test/test_stock.py | vikramrajsitpal/yayFinPy | a532da5161973b39f53306fa6b57149ca042ac28 | [
"Apache-2.0"
] | null | null | null | test/test_stock.py | vikramrajsitpal/yayFinPy | a532da5161973b39f53306fa6b57149ca042ac28 | [
"Apache-2.0"
] | null | null | null | test/test_stock.py | vikramrajsitpal/yayFinPy | a532da5161973b39f53306fa6b57149ca042ac28 | [
"Apache-2.0"
] | null | null | null | from typing import List
from decimal import Decimal
from yayFinPy.stock import Stock
import pandas as pd
def test_constructor():
try:
stock = Stock("AAPL")
assert(stock != None)
return 1
except Exception as e:
print("Test Failed: test_constructor: ", e)
return 0
def test_constructor_failure():
try:
stock = Stock("INVALID")
except:
return 1
print("Test Failed: test_constructor_failure")
return 0
def test_stock_attributes():
try:
stock = Stock("AAPL")
assert(stock != None)
assert(type(stock.bid) == Decimal)
assert(type(stock.ask) == Decimal)
assert(type(stock.bid_size) == Decimal)
assert(type(stock.ask_size) == Decimal)
assert(type(stock.name) == str)
assert(type(stock.pe_ratio) == Decimal)
assert(type(stock.peg_ratio) == Decimal)
assert(type(stock.market_cap) == Decimal)
assert(stock.name == "Apple Inc.")
return 1
except Exception as e:
print("Test Failed: test_stock_attributes", e)
return 0
def test_stock_splits():
try:
stock = Stock("AAPL")
splits = stock.splits
assert(type(splits) == type(pd.Series(dtype='float64')))
return 1
except Exception as e:
print("Test Failed: test_stock_splits", e)
return 0
def test_stock_dividends():
try:
stock = Stock("AAPL")
dividends = stock.dividends
assert(type(dividends) == type(pd.Series(dtype='float64')))
return 1
except Exception as e:
print("Test Failed: test_stock_dividends", e)
return 0
def test_stock_news():
try:
stock = Stock("AAPL")
news = stock.related_news()
assert(type(news) == list)
if len(news) > 0:
assert(type(news[0]) == str)
assert(len(news) <= 20)
return 1
except Exception as e:
print("Test Failed: test_stock_news", e)
return 0
def test_stock_tweets():
try:
stock = Stock("AAPL")
tweets = stock.tweets("invalid",
"invalid",
"",
"")
return 0
except Exception as e:
#test expected to fail
return 1
def test_stock_sentiments():
try:
stock = Stock("AAPL")
sentiment_score = stock.sentiment()
return 0
except Exception as e:
return 1 #test expected to fail
return 0
def test_stock_returns():
try:
stock = Stock("AAPL")
returns_val = stock.returns()
assert(type(returns_val) == Decimal)
return 1
except Exception as e:
print("Test Failed: test_stock_returns", e)
return 0
def test_stock_companyData():
try:
stock = Stock("AAPL")
companyData = stock.company_data
assert(type(str(companyData)) == str)
return 1
except Exception as e:
print("Test Failed: test_stock_companyData", e)
return 0
if __name__ == '__main__':
success = []
success.append(test_constructor())
success.append(test_constructor_failure())
success.append(test_stock_attributes())
success.append(test_stock_splits())
success.append(test_stock_dividends())
success.append(test_stock_returns())
success.append(test_stock_news())
success.append(test_stock_tweets())
success.append(test_stock_sentiments())
success.append(test_stock_companyData())
print("Stock Test Done: (%d/%d) Successful"%(sum(success), len(success))) | 24.685484 | 74 | 0.704998 | 427 | 3,061 | 4.887588 | 0.163934 | 0.094873 | 0.06229 | 0.073311 | 0.416866 | 0.287494 | 0.215621 | 0.184954 | 0.184954 | 0.184954 | 0 | 0.011346 | 0.164979 | 3,061 | 124 | 74 | 24.685484 | 0.805164 | 0.013721 | 0 | 0.443478 | 0 | 0 | 0.126905 | 0.0222 | 0 | 0 | 0 | 0 | 0.156522 | 1 | 0.086957 | false | 0 | 0.034783 | 0 | 0.304348 | 0.078261 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56628c54f6542fcfadf155107dcfc82a5c903211 | 11,248 | py | Python | old_commandSkills.py | tomasruizt/dads | 90652ca92b813301ed731186f29f05e885bc117d | [
"Apache-2.0"
] | null | null | null | old_commandSkills.py | tomasruizt/dads | 90652ca92b813301ed731186f29f05e885bc117d | [
"Apache-2.0"
] | null | null | null | old_commandSkills.py | tomasruizt/dads | 90652ca92b813301ed731186f29f05e885bc117d | [
"Apache-2.0"
] | null | null | null | import logging
import os
import pickle
from typing import NamedTuple
import gym
from gym import Wrapper, GoalEnv
from gym.wrappers import FlattenObservation, TimeLimit, TransformReward, FilterObservation
from runstats import Statistics
import torch
from envs.gym_mujoco.custom_wrappers import DropGoalEnvsAbsoluteLocation
torch.set_num_threads(2)
torch.set_num_interop_threads(2)
from stable_baselines3 import SAC
import numpy as np
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.monitor import Monitor
from solvability import ForHER
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
plt.ion()
from envs.custom_envs import DADSEnv, make_point2d_dads_env, make_fetch_reach_env, \
make_fetch_push_env, make_point_mass_env, make_ant_dads_env
from scipy.stats import multivariate_normal as mvn
def l2(target, sources: np.ndarray):
return np.linalg.norm(np.subtract(target, sources), axis=sources.ndim - 1)
class MutualInfoStrategy:
def __init__(self, skill_dim: int):
self._skill_dim = skill_dim
def sample_skill(self, samples=None):
raise NotImplementedError
def get_mutual_info(self, goal_delta: np.ndarray, skill: np.ndarray) -> float:
log = dict()
log["p(g'|z,g)"] = self._mi_numerator(delta=goal_delta, skill=skill)
log["p(g'|g)"] = self._mi_denominator(delta=goal_delta)
mutual_info = log["p(g'|z,g)"] - log["p(g'|g)"]
if mutual_info < -10:
logging.warning(str((mutual_info, log["p(g'|z,g)"], log["p(g'|g)"])))
return mutual_info
def choose_skill(self, desired_delta: np.ndarray) -> np.ndarray:
raise NotImplementedError
def _mi_numerator(self, delta: np.ndarray, skill: np.ndarray) -> float:
raise NotImplementedError
def _mi_denominator(self, delta: np.ndarray) -> float:
raise NotImplementedError
class DotProductStrategy(MutualInfoStrategy):
def sample_skill(self, samples=None):
size = (samples, self._skill_dim) if samples else self._skill_dim
return np.random.normal(size=size)
def _mi_numerator(self, delta: np.ndarray, skill: np.ndarray) -> float:
diff = delta - skill
return -0.5 * (diff @ diff)
def _mi_denominator(self, delta: np.ndarray) -> float:
return -0.25*(delta @ delta) - np.log(np.sqrt(2**len(delta)))
def choose_skill(self, desired_delta: np.ndarray) -> np.ndarray:
skills = self.sample_skill(samples=1000)
diffs = l2(desired_delta, skills)
return skills[diffs.argmin()]
class MVNStrategy(MutualInfoStrategy):
def __init__(self, skill_dim: int):
super().__init__(skill_dim)
self.cov = cov = {
"z": np.eye(self._skill_dim),
"g'|z,g": 0.1 * np.eye(self._skill_dim)
}
# Integration of two gaussians <-> convolution <-> sum of two gaussian RVs.
cov["g'|g"] = cov["z"] + cov["g'|z,g"]
def sample_skill(self, samples=1):
return mvn.rvs(size=samples, cov=self.cov["z"])
def choose_skill(self, desired_delta: np.ndarray) -> np.ndarray:
skills = self.sample_skill(samples=1000)
diffs = l2(desired_delta, skills)
return skills[diffs.argmin()]
def _mi_numerator(self, delta: np.ndarray, skill: np.ndarray) -> float:
return mvn.logpdf(x=delta, mean=skill, cov=self.cov["g'|z,g"])
def _mi_denominator(self, delta: np.ndarray) -> float:
return mvn.logpdf(x=delta, cov=self.cov["g'|g"])
class SkillWrapper(Wrapper):
def __init__(self, env: GoalEnv, skill_reset_steps: int):
super().__init__(env)
self._skill_reset_steps = skill_reset_steps
self._skill_dim = env.observation_space["desired_goal"].shape[0]
obs_dim = self.env.observation_space["observation"].shape[0]
self.observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(obs_dim + self._skill_dim, ))
self.strategy = MVNStrategy(skill_dim=self._skill_dim)
self._cur_skill = self.strategy.sample_skill()
self._last_dict_obs = None
self._goal_deltas_stats = [Statistics([1e-6]) for _ in range(self._skill_dim)]
def _normalize(self, delta):
μs = [s.mean() for s in self._goal_deltas_stats]
σs = [s.stddev() for s in self._goal_deltas_stats]
return np.asarray([(d-μ)/σ for (d, μ, σ) in zip(delta, μs, σs)])
def step(self, action):
dict_obs, _, done, info = self.env.step(action)
reward = self._reward(dict_obs=dict_obs)
self._last_dict_obs = dict_obs
if np.random.random() < 1/self._skill_reset_steps:
self._cur_skill = self.strategy.sample_skill()
flat_obs_w_skill = self._add_skill(observation=dict_obs["observation"])
return flat_obs_w_skill, reward, done, info
def _reward(self, dict_obs: np.ndarray) -> float:
last_diff = self._last_dict_obs["achieved_goal"] - self._last_dict_obs["desired_goal"]
cur_diff = dict_obs["achieved_goal"] - dict_obs["desired_goal"]
goal_delta = (cur_diff - last_diff)[:self._skill_dim]
for s, d in zip(self._goal_deltas_stats, goal_delta):
s.push(d)
return self.strategy.get_mutual_info(goal_delta=self._normalize(goal_delta),
skill=self._cur_skill)
def reset(self, **kwargs):
self._cur_skill = self.strategy.sample_skill()
self._last_dict_obs = self.env.reset(**kwargs)
return self._add_skill(observation=self._last_dict_obs["observation"])
def _add_skill(self, observation: np.ndarray) -> np.ndarray:
return np.concatenate((observation, self._cur_skill))
def set_sac(self, sac):
self._sac = sac
def predict(self, dict_obs: dict, deterministic=True):
delta = (dict_obs["desired_goal"] - dict_obs["achieved_goal"])[:self._skill_dim]
skill = self.strategy.choose_skill(desired_delta=self._normalize(delta))
flat_obs_w_skill = np.concatenate((dict_obs["observation"], skill))
return self._sac.predict(observation=flat_obs_w_skill, deterministic=deterministic)
def save(self, fname: str):
with open(fname + "-stats.pkl", "wb") as file:
pickle.dump(self._goal_deltas_stats, file)
def load(self, fname: str):
with open(fname + "-stats.pkl", "rb") as file:
self._goal_deltas_stats = pickle.load(file)
def relabel(self, observations, actions, next_observations, rewards, dones):
assert observations.ndim == 2, observations.ndim
assert observations.shape == next_observations.shape, (observations.shape, next_observations.shape)
deltas = self.env.achieved_goal_from_state(next_observations - observations)[:self._skill_dim]
deltas = self._normalize(deltas)
new_skills = self.strategy.sample_skill(len(observations))
mi = self.strategy.get_mutual_info
rewards = np.asarray([mi(goal_delta=d, skill=s) for d, s in zip(deltas, new_skills)])
new_obs, new_next_obs = observations.copy(), next_observations.copy()
set_skills(observations, new_skills)
set_skills(next_observations, new_skills)
return new_obs, new_next_obs, actions, rewards, dones
def eval_dict_env(dict_env: GoalEnv, model, ep_len: int):
while True:
dict_obs = dict_env.reset()
for _ in range(ep_len):
dict_env.render("human")
action, _ = model.predict(dict_obs, deterministic=True)
dict_obs, *_ = dict_env.step(action)
def as_dict_env(env):
return ForHER(env)
def set_skills(obs: np.ndarray, skills: np.ndarray) -> None:
idx = skills.shape[1]
obs[:, -idx:] = skills
class AddExpCallback(BaseCallback):
def __init__(self, num_added_samples: int, verbose: int = 0):
super().__init__(verbose)
self.num_added_samples = num_added_samples
def _on_step(self) -> bool:
buffer: ReplayBuffer = self.model.replay_buffer
can_sample = buffer.size() > 0
if not can_sample:
return True
samples = buffer.sample(self.num_added_samples)
wrapper: SkillWrapper = self.training_env.envs[0]
new_samples = wrapper.relabel(**{k:v.cpu().numpy() for k, v in samples._asdict().items()})
buffer.extend(*new_samples)
return True
envs_fns = dict(
point2d=make_point2d_dads_env,
reach=make_fetch_reach_env,
push=make_fetch_push_env,
pointmass=make_point_mass_env,
ant=make_ant_dads_env
)
class Conf(NamedTuple):
ep_len: int
num_episodes: int
lr: float = 3e-4
first_n_goal_dims: int = None
reward_scaling: float = 1.0
def show(model, env, conf: Conf):
while True:
d_obs = env.reset()
for _ in range(conf.ep_len):
env.render("human")
action, _ = model.predict(d_obs, deterministic=True)
d_obs, *_ = env.step(action)
def train(model: SAC, conf: Conf, save_fname: str, added_trans = 0):
kwargs = dict()
if added_trans > 0:
kwargs["callback"] = AddExpCallback(num_added_samples=added_trans)
model.learn(total_timesteps=conf.ep_len * conf.num_episodes, log_interval=10, **kwargs)
model.save(save_fname)
CONFS = dict(
point2d=Conf(ep_len=30, num_episodes=50, lr=0.01),
reach=Conf(ep_len=50, num_episodes=50, lr=0.001),
push=Conf(ep_len=50, num_episodes=2000, first_n_goal_dims=2),
pointmass=Conf(ep_len=150, num_episodes=100, lr=0.001, reward_scaling=1/100),
ant=Conf(ep_len=400, num_episodes=250, reward_scaling=1/500)
)
def main():
as_gdads = True
name = "pointmass"
drop_abs_position = True
dads_env_fn = envs_fns[name]
conf: Conf = CONFS[name]
dict_env = as_dict_env(dads_env_fn())
dict_env = TimeLimit(dict_env, max_episode_steps=conf.ep_len)
if drop_abs_position:
dict_env = DropGoalEnvsAbsoluteLocation(dict_env)
if as_gdads:
flat_env = SkillWrapper(env=dict_env, skill_reset_steps=conf.ep_len // 2)
else:
flat_obs_content = ["observation", "desired_goal", "achieved_goal"]
if drop_abs_position:
flat_obs_content.remove("achieved_goal") # Because always 0 vector
flat_env = FlattenObservation(FilterObservation(dict_env, filter_keys=flat_obs_content))
flat_env = TransformReward(flat_env, f=lambda r: r*conf.reward_scaling)
flat_env = Monitor(flat_env)
filename = f"modelsCommandSkills/{name}-gdads{as_gdads}"
if os.path.exists(filename + ".zip"):
sac = SAC.load(filename, env=flat_env)
if as_gdads:
flat_env.load(filename)
else:
sac = SAC("MlpPolicy", env=flat_env, verbose=1, learning_rate=conf.lr,
tensorboard_log=f"{filename}-tb", buffer_size=10000)
train(model=sac, conf=conf, save_fname=filename)
if as_gdads:
flat_env.save(filename)
if as_gdads:
flat_env.set_sac(sac)
eval_dict_env(dict_env=dict_env,
model=flat_env,
ep_len=conf.ep_len)
show(model=sac, env=flat_env, conf=conf)
if __name__ == '__main__':
main()
| 36.75817 | 107 | 0.674876 | 1,557 | 11,248 | 4.595376 | 0.179833 | 0.028931 | 0.02348 | 0.015094 | 0.256324 | 0.187841 | 0.152621 | 0.116562 | 0.096855 | 0.084277 | 0 | 0.011798 | 0.208748 | 11,248 | 305 | 108 | 36.878689 | 0.792135 | 0.008624 | 0 | 0.161017 | 0 | 0 | 0.034894 | 0.003767 | 0 | 0 | 0 | 0 | 0.008475 | 1 | 0.148305 | false | 0 | 0.084746 | 0.029661 | 0.364407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
56634cb9015c3fca756d9ed11738ca8195395704 | 3,727 | py | Python | torecsys/inputs/base/image_inp.py | p768lwy3/torecsys | 2251366268b4fbe6f8c3ab1628fa72a0db043dcd | [
"MIT"
] | 92 | 2019-08-15T11:03:50.000Z | 2022-03-12T01:21:05.000Z | torecsys/inputs/base/image_inp.py | p768lwy3/torecsys | 2251366268b4fbe6f8c3ab1628fa72a0db043dcd | [
"MIT"
] | 3 | 2020-03-11T08:57:50.000Z | 2021-01-06T01:39:47.000Z | torecsys/inputs/base/image_inp.py | p768lwy3/torecsys | 2251366268b4fbe6f8c3ab1628fa72a0db043dcd | [
"MIT"
] | 16 | 2019-10-12T11:28:53.000Z | 2022-03-28T14:04:12.000Z | from typing import List, Optional, TypeVar
import torch
import torch.nn as nn
from torecsys.inputs.base import BaseInput
class ImageInput(BaseInput):
"""
Base Input class for image, which embed image by a stack of convolution neural network (CNN)
and fully-connect layer.
"""
ImageInputs = TypeVar('ImageInput')
def __init__(self,
embed_size: int,
in_channels: int,
layers_size: List[int],
kernels_size: List[int],
strides: List[int],
paddings: List[int],
pooling: Optional[str] = 'avg_pooling',
use_batchnorm: Optional[bool] = True,
dropout_p: Optional[float] = 0.0,
activation: Optional[nn.Module] = nn.ReLU()):
"""
Initialize ImageInput.
Args:
embed_size (int): Size of embedding tensor
in_channels (int): Number of channel of inputs
layers_size (List[int]): Layers size of CNN
kernels_size (List[int]): Kernels size of CNN
strides (List[int]): Strides of CNN
paddings (List[int]): Paddings of CNN
pooling (str, optional): Method of pooling layer
Defaults to avg_pooling
use_batchnorm (bool, optional): Whether batch normalization is applied or not after Conv2d
Defaults to True
dropout_p (float, optional): Probability of Dropout2d
Defaults to 0.0
activation (torch.nn.modules.activation, optional): Activation function of Conv2d
Defaults to nn.ReLU()
Raises:
ValueError: when pooling is not in ["max_pooling", "avg_pooling"]
"""
super().__init__()
self.length = embed_size
self.model = nn.Sequential()
layers_size = [in_channels] + layers_size
iterations = enumerate(zip(layers_size[:-1], layers_size[1:], kernels_size, strides, paddings))
for i, (in_c, out_c, k, s, p) in iterations:
conv2d_i = nn.Conv2d(in_c, out_c, kernel_size=k, stride=s, padding=p)
self.model.add_module(f'conv2d_{i}', conv2d_i)
if use_batchnorm:
self.model.add_module(f'batchnorm2d_{i}', nn.BatchNorm2d(out_c))
self.model.add_module(f'dropout2d_{i}', nn.Dropout2d(p=dropout_p))
self.model.add_module(f'activation_{i}', activation)
if pooling == 'max_pooling':
pooling_layer = nn.AdaptiveMaxPool2d(output_size=(1, 1,))
elif pooling == 'avg_pooling':
pooling_layer = nn.AdaptiveAvgPool2d(output_size=(1, 1,))
else:
raise ValueError('pooling must be in ["max_pooling", "avg_pooling"].')
self.model.add_module('pooling', pooling_layer)
self.fc = nn.Linear(layers_size[-1], embed_size)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""
Forward calculation of ImageInput
Args:
inputs (torch.tensor), shape = (B, C, H_{i}, W_{i}), data_type = torch.float: tensor of images
Returns:
torch.tensor, shape = (B, 1, E): output of ImageInput
"""
# output's shape of convolution model = (B, C_{last}, 1, 1)
outputs = self.model(inputs.rename(None))
outputs.names = ('B', 'C', 'H', 'W',)
# output's shape of fully-connect layers = (B, E)
outputs = self.fc(outputs.rename(None).squeeze())
# unsqueeze the outputs in dim = 1 and set names to the tensor,
outputs = outputs.unsqueeze(1)
outputs.names = ('B', 'N', 'E',)
return outputs
| 38.030612 | 106 | 0.581701 | 450 | 3,727 | 4.673333 | 0.304444 | 0.038041 | 0.028531 | 0.042796 | 0.078935 | 0.01902 | 0 | 0 | 0 | 0 | 0 | 0.011271 | 0.309632 | 3,727 | 97 | 107 | 38.42268 | 0.806063 | 0.344513 | 0 | 0 | 0 | 0 | 0.073103 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
566467ac5da574abf21d4a1577d72293c60ca80a | 31,259 | py | Python | optimus/infer.py | Pcosmin/Optimus | ef3306d1b752bbfb1959ddb9103786acb8e9b9ba | [
"Apache-2.0"
] | 1 | 2020-09-22T13:04:37.000Z | 2020-09-22T13:04:37.000Z | optimus/infer.py | rafaelang/Optimus | 809088f41588c968b2e30210f98a494a497b07ff | [
"Apache-2.0"
] | null | null | null | optimus/infer.py | rafaelang/Optimus | 809088f41588c968b2e30210f98a494a497b07ff | [
"Apache-2.0"
] | null | null | null | # This file need to be send to the cluster via .addPyFile to handle the pickle problem
# This is outside the optimus folder on purpose because it cause problem importing optimus when using de udf.
# This can not import any optimus file unless it's imported via addPyFile
import datetime
import math
import os
import re
from ast import literal_eval
import fastnumbers
import pandas as pd
import pendulum
from dask import distributed
from dask.dataframe.core import DataFrame as DaskDataFrame
from pyspark.ml.linalg import VectorUDT
from pyspark.sql import functions as F, DataFrame as SparkDataFrame
from pyspark.sql.types import ArrayType, StringType, IntegerType, FloatType, DoubleType, BooleanType, StructType, \
LongType, DateType, ByteType, ShortType, TimestampType, BinaryType, NullType
# This function return True or False if a string can be converted to any datatype.
from optimus.helpers.constants import ProfilerDataTypes
from optimus.helpers.raiseit import RaiseIt
def str_to_date(_value, date_format=None):
try:
# date_format = "DD/MM/YYYY"
pendulum.parse(_value, strict=False)
return True
except:
return False
def str_to_date_format(_value, date_format):
# Check this https://stackoverflow.com/questions/17134716/convert-dataframe-column-type-from-string-to-datetime-dd-mm-yyyy-format
try:
pendulum.from_format(_value, date_format)
return True
except:
return False
def str_to_null(_value):
_value = _value.lower()
if _value == "null":
return True
else:
return False
def is_null(_value):
if pd.isnull(_value):
return True
else:
return False
def str_to_data_type(_value, _dtypes):
"""
Check if value can be parsed to a tuple or and list.
Because Spark can handle tuples we will try to transform tuples to arrays
:param _value:
:return:
"""
# return True if isinstance(_value, str) else False
try:
if isinstance(literal_eval((_value.encode('ascii', 'ignore')).decode("utf-8")), _dtypes):
return True
except (ValueError, SyntaxError, AttributeError):
return False
def str_to_array(_value):
return False
# return str_to_data_type(_value, (list, tuple))
def str_to_object(_value):
return False
# return str_to_data_type(_value, (dict, set))
regex_int = r"^\d+$" # For cudf 0.14 regex_int = r"^\d+$" # For cudf 0.14
regex_decimal = r"^\d+\.\d$"
regex_boolean = r"\btrue\b|\bfalse\b"
regex_boolean_compiled = re.compile(regex_boolean)
def str_to_boolean(value, compile=False):
return str_to(value, regex_boolean, regex_boolean_compiled, compile)
regex_gender = r"\bmale\b|\bfemale\b"
regex_gender_compiled = re.compile(regex_gender)
def str_to_gender(value, compile=False):
return str_to(value, regex_gender, regex_gender_compiled, compile)
regex_url = "(http|https|ftp|s3):\/\/.?[a-zA-Z]*.\w*.[a-zA-Z0-9]*\/?[a-zA-z_-]*.?[a-zA-Z]*\/?"
regex_url_compiled = re.compile(regex_url, re.IGNORECASE)
def str_to_url(value, compile=False):
return str_to(value, regex_url, regex_url_compiled, compile)
regex_ip = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
regex_ip_compiled = re.compile(regex_ip, re.IGNORECASE)
def str_to_ip(value, compile=False):
return str_to(value, regex_ip, regex_ip_compiled, compile)
# regex_email = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)" # This do not work in CUDF/RE2
regex_email = r"^[^@]+@[^@]+\.[a-zA-Z]{2,}$"
regex_email_compiled = re.compile(regex_email, re.IGNORECASE)
def str_to_email(value, compile=False):
return str_to(value, regex_email, regex_email_compiled, compile)
# Reference https://www.regular-expressions.info/creditcard.html
# https://codereview.stackexchange.com/questions/74797/credit-card-checking
regex_credit_card = (r'(4(?:\d{12}|\d{15})' # Visa
r'|5[1-5]\d{14}' # Mastercard
r'|6011\d{12}' # Discover (incomplete?)
r'|7\d{15}' # What's this?
r'|3[47]\d{13}' # American Express
r')$')
regex_credit_card_compiled = re.compile(regex_credit_card)
def str_to_credit_card(value, compile=False):
return str_to(value, regex_credit_card, regex_credit_card_compiled, compile)
regex_zip_code = r"^(\d{5})([- ])?(\d{4})?$"
regex_zip_code_compiled = re.compile(regex_zip_code, re.IGNORECASE)
def str_to_zip_code(value, compile=False):
return str_to(value, regex_zip_code, regex_zip_code_compiled, compile)
regex_missing = r" "
regex_missing_compiled = re.compile(regex_missing, re.IGNORECASE)
def str_to_missing(value, compile=False):
return str_to(value, regex_missing, regex_missing_compiled, compile)
regex_social_security_number = "^([1-9])(?!\1{2}-\1{2}-\1{4})[1-9]{2}-[1-9]{2}-[1-9]{4}"
regex_social_security_number_compiled = re.compile(regex_social_security_number, re.IGNORECASE)
def str_to_social_security_number(value, compile=False):
return str_to(value, regex_social_security_number, regex_social_security_number_compiled, compile)
regex_http_code = "/^[1-5][0-9][0-9]$/"
regex_http_code_compiled = re.compile(regex_http_code, re.IGNORECASE)
def str_to_http_code(value, compile=False):
return str_to(value, regex_http_code, regex_http_code_compiled, compile)
# Reference https://stackoverflow.com/questions/8634139/phone-validation-regex
regex_phone_number = "/\(?([0-9]{3})\)?([ .-]?)([0-9]{3})\2([0-9]{4})/"
regex_phone_number_compiled = re.compile(regex_phone_number, re.IGNORECASE)
def str_to_phone_number(value, compile=False):
return str_to(value, regex_phone_number, regex_phone_number_compiled, compile)
# States
US_STATES_NAMES = ["alabama",
"alaska",
"american samoa",
"arizona",
"arkansas",
"california",
"colorado",
"connecticut",
"delaware",
"district of columbia",
"federated states of micronesia",
"florida",
"georgia",
"guam",
"hawaii",
"idaho",
"illinois",
"indiana",
"iowa",
"kansas",
"kentucky",
"louisiana",
"maine",
"marshall islands",
"maryland",
"massachusetts",
"michigan",
"minnesota",
"mississippi",
"missouri",
"montana",
"nebraska",
"nevada",
"new hampshire",
"new jersey",
"new mexico",
"new york",
"north carolina",
"north dakota",
"northern mariana islands",
"ohio",
"oklahoma",
"oregon",
"palau",
"pennsylvania",
"puerto rico",
"rhode island",
"south carolina",
"south dakota",
"tennessee",
"texas",
"utah",
"vermont",
"virgin islands",
"virginia",
"washington",
"west virginia",
"wisconsin",
"wyoming"
]
US_STATES_CODE = [
"al",
"ak",
"as",
"az",
"ar",
"ca",
"co",
"ct",
"de",
"dc",
"fm",
"fl",
"ga",
"gu",
"hi",
"id",
"il",
"in",
"ia",
"ks",
"ky",
"la",
"me",
"mh",
"md",
"ma",
"mi",
"mn",
"ms",
"mo",
"mt",
"ne",
"nv",
"nh",
"nj",
"nm",
"ny",
"nc",
"nd",
"mp",
"oh",
"ok",
"or",
"pw",
"pa",
"pr",
"ri",
"sc",
"sd",
"tn",
"tx",
"ut",
"vt",
"vi",
"va",
"wa",
"wv",
"wi",
"wy"
]
def str_to(value, regex, compiled_regex, compile=False):
if value is None:
result = False
else:
if compile is True:
regex = compiled_regex
else:
regex = regex
result = bool(re.match(regex, value))
return result
def str_to_int(_value):
return True if fastnumbers.isint(_value) else False
def str_to_decimal(_value):
return True if fastnumbers.isfloat(_value) else False
def str_to_str(_value):
return True if isinstance(_value, str) else False
currencies = {"$": "dollar",
"¢": "cent",
"£": "point",
"€": "euro",
"¥": "yen",
"₹": "indian rupee",
"₽": "ruble",
"元": "yuan",
"¤": "currency",
"₠": "euro-currency",
"₡": "colon",
"₢": "cruzeiro",
"₣": "french franc",
"₤": "lira",
"₥": "mill",
"₦": "naira",
"₧": "peseta",
"₨": "rupee",
"₩": "won",
"₪": "new shequel",
"₫": "dong",
"₭": "kip",
"₮": "tugrik",
"₯": "drachma",
"₰": "german penny",
"₱": "peso",
"₲": "guarani",
"₳": "austral",
"₴": "hryvnia",
"₵": "cedi",
"₶": "livre tournois",
"₸": "tenge",
"₺": "turkish lira",
"₼": "manat",
"৲": "bengali rupee mark",
"৳": "bengali rupee sign",
"૱": "gujarati rupee sign",
"௹": "tamil rupee sign",
"฿": "thai currency bath",
"៛": "khmer currency reil",
"㍐": "square yuan",
"円": "yen character",
"圆": "yen/yuan character variant one",
"圎": "yen/yuan character variant two",
"圓": "yuan character, in hong kong and taiwan",
"圜": "yen/yuan character variant three",
"원": "won character",
"﷼": "rial sign",
"$": "fullwidth dollar sign",
"¢": "fullwidth cent sign",
"£": "fullwidth pound sign",
"¥": "fullwidth yen sign",
"₩": "fullwidth won sign"}
regex_currencies = "|".join(list(currencies.keys()))
regex_currencies_compiled = re.compile(regex_currencies)
def str_to_currency(value, compile=False):
return str_to(value, regex_boolean, regex_boolean_compiled, compile)
def parse_spark_class_dtypes(value):
"""
Get a pyspark data class from a string data type representation. for example 'StringType()' from 'string'
:param value:
:return:
"""
if not isinstance(value, list):
value = [value]
try:
data_type = [SPARK_DTYPES_DICT_OBJECTS[SPARK_SHORT_DTYPES[v]] for v in value]
except (KeyError, TypeError):
data_type = value
if isinstance(data_type, list) and len(data_type) == 1:
result = data_type[0]
else:
result = data_type
return result
class Infer(object):
"""
This functions return True or False if match and specific dataType
"""
DTYPE_FUNC = {"string": str_to_str, "boolean": str_to_boolean, "date": str_to_date,
"array": str_to_array, "object": str_to_object, "ip": str_to_ip,
"url": str_to_url, "email": str_to_email, "gender": str_to_gender,
"credit_card_number": str_to_credit_card, "zip_code": str_to_zip_code, "int": str_to_int,
"decimal": str_to_decimal,
ProfilerDataTypes.PHONE_NUMBER.value: str_to_phone_number,
ProfilerDataTypes.SOCIAL_SECURITY_NUMBER.value: str_to_social_security_number,
ProfilerDataTypes.HTTP_CODE.value: str_to_http_code,
}
@staticmethod
def mismatch(value: tuple, dtypes: dict):
"""
Count the dataType that match, do not match, nulls and missing.
For example if we have an string column we also need to pass the column type we want to match.
Like credit card or postal code.
:param value: tuple(Column/Row, value)
:param dtypes: dict {col_name:(dataType, mismatch)}
:return:
"""
col_name, value = value
_data_type = ""
dtype = dtypes[col_name]
if Infer.DTYPE_FUNC[dtype](value) is True:
_data_type = dtype
else:
if is_null(value) is True:
_data_type = "null"
elif str_to_missing(value) is True:
_data_type = "missing"
else:
_data_type = "mismatch"
result = (col_name, _data_type), 1
return result
@staticmethod
def to_spark(value):
"""
Infer a Spark data type from a value
:param value: value to be inferred
:return: Spark data type
"""
result = None
if value is None:
result = "null"
elif is_bool(value):
result = "bool"
elif fastnumbers.isint(value):
result = "int"
elif fastnumbers.isfloat(value):
result = "float"
elif is_list(value):
result = ArrayType(Infer.to_spark(value[0]))
elif is_datetime(value):
result = "datetime"
elif is_date(value):
result = "date"
elif is_binary(value):
result = "binary"
elif is_str(value):
if str_to_boolean(value):
result = "bool"
elif str_to_date(value):
result = "string" # date
elif str_to_array(value):
result = "string" # array
else:
result = "string"
return parse_spark_class_dtypes(result)
@staticmethod
def parse(col_and_value, infer: bool = False, dtypes=None, str_funcs=None, int_funcs=None, full=True):
"""
:param col_and_value: Column and value tuple
:param infer: If 'True' try to infer in all the dataTypes available. See int_func and str_funcs
:param dtypes:
:param str_funcs: Custom string function to infer.
:param int_funcs: Custom numeric functions to infer.
{col_name: regular_expression}
:param full: True return a tuple with (col_name, dtype), count or False return dtype
:return:
"""
col_name, value = col_and_value
# Try to order the functions from less to more computational expensive
if int_funcs is None:
int_funcs = [(str_to_credit_card, "credit_card_number"), (str_to_zip_code, "zip_code")]
if str_funcs is None:
str_funcs = [
(str_to_missing, "missing"), (str_to_boolean, "boolean"), (str_to_date, "date"),
(str_to_array, "array"), (str_to_object, "object"), (str_to_ip, "ip"),
(str_to_url, "url"),
(str_to_email, "email"), (str_to_gender, "gender"), (str_to_null, "null")
]
# Check 'string' for Spark, 'object' for Dask
if (dtypes[col_name] == "object" or dtypes[col_name] == "string") and infer is True:
if isinstance(value, bool):
_data_type = "boolean"
elif fastnumbers.isint(value): # Check if value is integer
_data_type = "int"
for func in int_funcs:
if func[0](value) is True:
_data_type = func[1]
break
elif value != value:
_data_type = "null"
elif fastnumbers.isfloat(value):
_data_type = "decimal"
elif isinstance(value, str):
_data_type = "string"
for func in str_funcs:
if func[0](value) is True:
_data_type = func[1]
break
else:
_data_type = dtypes[col_name]
if is_null(value) is True:
_data_type = "null"
elif str_to_missing(value) is True:
_data_type = "missing"
else:
if dtypes[col_name].startswith("array"):
_data_type = "array"
else:
_data_type = dtypes[col_name]
# print(_data_type)
result = (col_name, _data_type), 1
if full:
return result
else:
return _data_type
@staticmethod
def parse_pandas(value, date_format="DD/MM/YYYY"):
#
int_funcs = [(str_to_credit_card, "credit_card_number"), (str_to_zip_code, "zip_code")]
str_funcs = [
(str_to_missing, "missing"), (str_to_boolean, "boolean"),
(str_to_array, "array"), (str_to_object, "object"), (str_to_ip, "ip"),
(str_to_url, "url"),
(str_to_email, "email"), (str_to_gender, "gender"), (str_to_null, "null")]
if isinstance(value, list):
_data_type = "array"
elif pd.isnull(value):
_data_type = "null"
elif isinstance(value, bool):
_data_type = "boolean"
elif profiler_dtype_func("int", True)(
value): # We first check if a number can be parsed as a credit card or zip code
_data_type = "int"
for func in int_funcs:
if func[0](str(value)) is True:
_data_type = func[1]
# Seems like float can be parsed as dates
elif profiler_dtype_func("decimal", True)(value):
_data_type = "decimal"
elif str_to_date(value):
_data_type = "date"
else:
_data_type = "string"
for func in str_funcs:
if func[0](str(value)) is True:
_data_type = func[1]
return _data_type
def profiler_dtype_func(dtype, null=False):
"""
Return a function that check if a value match a datatype
:param dtype:
:param null:
:return:
"""
def _float(value):
if null is True:
return fastnumbers.isfloat(value, allow_nan=True) is True and fastnumbers.isint(value) is False
else:
return fastnumbers.isfloat(value) is True and fastnumbers.isint(value) is False or value != value
def _int(value):
if null is True:
return fastnumbers.isint(value)
else:
return fastnumbers.isint(value) or value != value
if dtype == ProfilerDataTypes.INT.value:
return _int
elif dtype == ProfilerDataTypes.DECIMAL.value:
return _float
elif dtype == ProfilerDataTypes.STRING.value:
return is_str
elif dtype == ProfilerDataTypes.BOOLEAN.value:
return str_to_boolean
elif dtype == ProfilerDataTypes.DATE.value:
return str_to_object
elif dtype == ProfilerDataTypes.ARRAY.value:
return is_str
elif dtype == ProfilerDataTypes.OBJECT.value:
return str_to_object
elif dtype == ProfilerDataTypes.GENDER.value:
return str_to_gender
elif dtype == ProfilerDataTypes.IP.value:
return str_to_ip
elif dtype == ProfilerDataTypes.URL.value:
return str_to_url
elif dtype == ProfilerDataTypes.EMAIL.value:
return str_to_email
elif dtype == ProfilerDataTypes.CREDIT_CARD_NUMBER.value:
return str_to_credit_card
elif dtype == ProfilerDataTypes.ZIP_CODE.value:
return str_to_zip_code
elif dtype == ProfilerDataTypes.MISSING.value:
return is_str
else:
RaiseIt.value_error(dtype, ProfilerDataTypes.list())
def is_nan(value):
"""
Check if a value is nan
:param value:
:return:
"""
result = False
if is_str(value):
if value.lower() == "nan":
result = True
elif is_numeric(value):
if math.isnan(value):
result = True
return result
def is_none(value):
"""
Check if a value is none
:param value:
:return:
"""
result = False
if is_str(value):
if value.lower() == "none":
result = True
elif value is None:
result = True
return result
def is_same_class(class1, class2):
"""
Check if 2 class are the same
:param class1:
:param class2:
:return:
"""
return class1 == class2
def is_(value, type_):
"""
Check if a value is instance of a class
:param value:
:param type_:
:return:
"""
return isinstance(value, type_)
def is_type(type1, type2):
"""
Check if a value is a specific class
:param type1:
:param type2:
:return:
"""
return type1 == type2
def is_function(value):
"""
Check if a param is a function
:param value: object to check for
:return:
"""
return hasattr(value, '__call__')
def is_list(value):
"""
Check if an object is a list
:param value:
:return:
"""
return isinstance(value, list)
def is_list_empty(value):
"""
Check is a list is empty
:param value:
:return:
"""
return len(value) == 0
def is_dict(value):
"""
Check if an object is a list
:param value:
:return:
"""
return isinstance(value, dict)
def is_tuple(value):
"""
Check if an object is a tuple
:param value:
:return:
"""
return isinstance(value, tuple)
def is_column(value):
"""
Check if a object is a column
:return:
"""
return isinstance(value, F.Column)
def is_list_of_str(value):
"""
Check if an object is a list of strings
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, str) for elem in value)
def is_list_of_int(value):
"""
Check if an object is a list of integers
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, int) for elem in value)
def is_list_of_float(value):
"""
Check if an object is a list of floats
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, float) for elem in value)
def is_list_of_str_or_int(value):
"""
Check if an object is a string or an integer
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, (int, str)) for elem in value)
def is_list_of_str_or_num(value):
"""
Check if an object is string, integer or float
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, (str, int, float)) for elem in value)
def is_list_of_spark_dataframes(value):
"""
Check if an object is a Spark DataFrame
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, SparkDataFrame) for elem in value)
def is_list_of_dask_dataframes(value):
"""
Check if an object is a Spark DataFrame
:param value:
:return:
"""
return isinstance(value, list) and all(isinstance(elem, DaskDataFrame) for elem in value)
def is_filepath(file_path):
"""
Check if a value ia a valid file path
:param file_path:
:return:
"""
# the file is there
if os.path.exists(file_path):
return True
# the file does not exists but write privileges are given
elif os.access(os.path.dirname(file_path), os.W_OK):
return True
# can not write there
else:
return False
def is_ip(value):
"""
Check if a value is valid ip
:param value:
:return:
"""
parts = value.split(".")
if len(parts) != 4:
return False
for item in parts:
if not 0 <= int(item) <= 255:
return False
return True
def is_list_of_strings(value):
"""
Check if all elements in a list are strings
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, str) for elem in value)
def is_list_of_numeric(value):
"""
Check if all elements in a list are int or float
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, (int, float)) for elem in value)
def is_list_of_list(value):
"""
Check if all elements in a list are tuples
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, list) for elem in value)
def is_list_of_tuples(value):
"""
Check if all elements in a list are tuples
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, tuple) for elem in value)
def is_list_of_one_element(value):
"""
Check if a var is a single element
:param value:
:return:
"""
if is_list(value):
return len(value) == 1
def is_dict_of_one_element(value):
"""
Check if a var is a single element
:param value:
:return:
"""
if is_dict(value):
return len(value) == 1
def is_one_element(value):
"""
Check if a var is a single element
:param value:
:return:
"""
return isinstance(value, (str, int, float, bool))
def is_num_or_str(value):
"""
Check if a var is numeric(int, float) or string
:param value:
:return:
"""
return isinstance(value, (int, float, str))
def is_str_or_int(value):
"""
Check if a var is a single element
:param value:
:return:
"""
return isinstance(value, (str, int))
def is_numeric(value):
"""
Check if a var is a single element
:param value:
:return:
"""
return isinstance(value, (int, float))
def is_str(value):
"""
Check if an object is a string
:param value:
:return:
"""
# Seems 20% faster than
return isinstance(value, str)
# return True if type("str") == "str" else False
def is_object(value):
"""
Check if an object is a string
:param value:
:return:
"""
return isinstance(value, str)
def is_list_of_futures(value):
"""
Check if an object is a list of strings
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(
isinstance(elem, distributed.client.Future) for elem in value)
def is_future(value):
"""
Check if an object is a list of strings
:param value:
:return:
"""
return isinstance(value, distributed.client.Future)
def is_int(value):
"""
Check if an object is an integer
:param value:
:return:
"""
return isinstance(value, int)
def is_url(value):
regex = re.compile(
r'^(?:http|ftp|hdfs)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, value)
def is_float(value):
"""
Check if an object is an integer
:param value:
:return:
"""
return isinstance(value, float)
def is_bool(value):
return isinstance(value, bool)
def is_datetime(value):
"""
Check if an object is a datetime
:param value:
:return:
"""
return isinstance(value, datetime.datetime)
def is_binary(value):
"""
Check if an object is a bytearray
:param value:
:return:
"""
return isinstance(value, bytearray)
def is_date(value):
"""
Check if an object is a date
:param value:
:return:
"""
return isinstance(value, datetime.date)
PYTHON_SHORT_TYPES = {"string": "string",
"str": "string",
"integer": "int",
"int": "int",
"float": "float",
"double": "double",
"bool": "boolean",
"boolean": "boolean",
"array": "array",
"null": "null"
}
PYTHON_TYPES = {"string": str, "int": int, "float": float, "boolean": bool}
PYSPARK_NUMERIC_TYPES = ["byte", "short", "big", "int", "double", "float"]
PYSPARK_NOT_ARRAY_TYPES = ["byte", "short", "big", "int", "double", "float", "string", "date", "bool"]
PYSPARK_STRING_TYPES = ["str"]
PYSPARK_ARRAY_TYPES = ["array"]
SPARK_SHORT_DTYPES = {"string": "string",
"str": "string",
"integer": "int",
"int": "int",
"bigint": "bigint",
"big": "bigint",
"long": "bigint",
"float": "float",
"double": "double",
"bool": "boolean",
"boolean": "boolean",
"struct": "struct",
"array": "array",
"date": "date",
"datetime": "datetime",
"byte": "byte",
"short": "short",
"binary": "binary",
"null": "null",
"vector": "vector",
"timestamp": "datetime"
}
SPARK_DTYPES_DICT = {"string": StringType, "int": IntegerType, "float": FloatType,
"double": DoubleType, "boolean": BooleanType, "struct": StructType, "array": ArrayType,
"bigint": LongType, "date": DateType, "byte": ByteType, "short": ShortType,
"datetime": TimestampType, "binary": BinaryType, "null": NullType, "vector": VectorUDT
}
SPARK_DTYPES_DICT_OBJECTS = \
{"string": StringType(), "int": IntegerType(), "float": FloatType(),
"double": DoubleType(), "boolean": BooleanType(), "struct": StructType(), "array": ArrayType(StringType()),
"bigint": LongType(), "date": DateType(), "byte": ByteType(), "short": ShortType(),
"datetime": TimestampType(), "binary": BinaryType(), "null": NullType()
}
PROFILER_COLUMN_TYPES = {"categorical", "numeric", "date", "null", "array", "binary"}
PYTHON_TO_PROFILER = {"string": "categorical", "boolean": "categorical", "int": "numeric", "float": "numeric",
"decimal": "numeric", "date": "date", "array": "array", "binaty": "binary", "null": "null"}
SPARK_DTYPES_TO_PROFILER = {"int": ["smallint", "tinyint", "bigint", "int"], "decimal": ["float", "double"],
"string": "string", "date": {"date", "timestamp"}, "boolean": "boolean", "binary": "binary",
"array": "array", "object": "object", "null": "null", "missing": "missing"}
| 27.324301 | 133 | 0.55632 | 3,732 | 31,259 | 4.50268 | 0.154341 | 0.026779 | 0.033325 | 0.036658 | 0.411331 | 0.364437 | 0.328672 | 0.292311 | 0.237324 | 0.211795 | 0 | 0.007646 | 0.31802 | 31,259 | 1,143 | 134 | 27.348206 | 0.778263 | 0.164721 | 0 | 0.233846 | 0 | 0.009231 | 0.130092 | 0.014584 | 0 | 0 | 0 | 0 | 0 | 1 | 0.110769 | false | 0 | 0.023077 | 0.027692 | 0.286154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |