hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f890cd882c237997b0c743b4908f9bf3d495cd8 | 2,192 | py | Python | csv_to_plots.py | koshini/polya-social-contagion | ad3915a59611589160e5c7f5e6a1d82489e6e1b2 | [
"MIT"
] | null | null | null | csv_to_plots.py | koshini/polya-social-contagion | ad3915a59611589160e5c7f5e6a1d82489e6e1b2 | [
"MIT"
] | 1 | 2019-04-03T20:45:05.000Z | 2019-04-07T18:06:13.000Z | csv_to_plots.py | koshini/polya-social-contagion | ad3915a59611589160e5c7f5e6a1d82489e6e1b2 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
strat_list = []
topology = 'facebook'
folder = 'pre-cured-equal/'
strat_list.append({
'red_strat': 'bot',
'black_strat': 'uniform',
})
strat_list.append({
'red_strat': 'bot',
'black_strat': 'pure_centrality_threshold',
})
strat_list.append({
'red_strat': 'bot',
'black_strat': 'centrality_threshold',
})
strat_list.append({
'red_strat': 'bot',
'black_strat': 'pure_centrality',
})
strat_list.append({
'red_strat': 'bot',
'black_strat': 'follow_bot',
})
# strat_list.append({
# 'red_strat': 'bot',
# 'black_strat': 'gradient',
# })
waste_label = []
infection_label = []
for strat_dict in strat_list:
red_strat = strat_dict['red_strat'].replace('_', ' ')
black_strat = strat_dict['black_strat'].replace('_', ' ')
if black_strat == 'pure centrality entropy':
black_strat = 'centrality threshold'
infection_csv = folder + 'empirical-infection' + topology + strat_dict['red_strat'] + strat_dict['black_strat'] + 'infection.csv'
# waste_csv = folder + topology + strat_dict['red_strat'] + strat_dict['black_strat'] + 'waste.csv'
# waste_array = np.loadtxt(waste_csv, delimiter=',', unpack=True)
# avg_waste = waste_array # if there is only one row
# avg_waste = np.mean(waste_array[0:50], axis=1)
# plt.figure(1)
# plt.xlabel('Time step')
# plt.ylabel('Average budget wasted per node')
# plt.plot(list(range(avg_waste.size)), avg_waste, label = black_strat)
plt.figure(2)
infection_array = np.loadtxt(infection_csv, delimiter=',', unpack=True)
avg_infection = np.mean(infection_array, axis=1)
plt.xlabel('Time step')
plt.ylabel('Average infection rate')
# avg_infection = infection_array # if there is only one row
plt.plot(list(range(avg_infection.size)), avg_infection, label = black_strat)
plt.figure(1)
plt.legend(loc='best', prop={'size': 9})
plt.axis([0, 60, 0, 12])
filename = folder + topology + ' waste.png'
plt.savefig(filename)
plt.figure(2)
# plt.legend(loc='best', prop={'size': 9})
plt.axis([0, 300, 0, 1])
filename = folder + topology + ' infection.png'
plt.savefig(filename)
print()
| 27.4 | 133 | 0.667427 | 295 | 2,192 | 4.735593 | 0.267797 | 0.100215 | 0.064424 | 0.077309 | 0.502505 | 0.387974 | 0.387974 | 0.353615 | 0.227631 | 0.164639 | 0 | 0.012596 | 0.166971 | 2,192 | 79 | 134 | 27.746835 | 0.752464 | 0.268248 | 0 | 0.387755 | 0 | 0 | 0.251576 | 0.015763 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.040816 | 0 | 0.040816 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f898b3b762422949258d1f7a15923f41bef3e41 | 3,049 | py | Python | examples/apps/python/com/nvidia/spark/examples/taxi/pre_process.py | acaldwell-pixel/spark-xgboost-examples | 01996046413a7666f8730464ea85ccf26d646171 | [
"Apache-2.0"
] | 48 | 2020-06-11T07:49:47.000Z | 2022-03-27T13:57:41.000Z | examples/apps/python/com/nvidia/spark/examples/taxi/pre_process.py | acaldwell-pixel/spark-xgboost-examples | 01996046413a7666f8730464ea85ccf26d646171 | [
"Apache-2.0"
] | 23 | 2020-06-11T07:51:42.000Z | 2021-12-10T19:04:48.000Z | examples/apps/python/com/nvidia/spark/examples/taxi/pre_process.py | acaldwell-pixel/spark-xgboost-examples | 01996046413a7666f8730464ea85ccf26d646171 | [
"Apache-2.0"
] | 26 | 2020-06-11T06:55:15.000Z | 2021-09-06T08:28:01.000Z | #
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql.functions import col
def pre_process(data_frame):
processes = [
drop_useless,
encode_categories,
fill_na,
remove_invalid,
convert_datetime,
add_h_distance,
]
for process in processes:
data_frame = process(data_frame)
return data_frame
def drop_useless(data_frame):
return data_frame.drop(
'dropoff_datetime',
'payment_type',
'surcharge',
'mta_tax',
'tip_amount',
'tolls_amount',
'total_amount')
def encode_categories(data_frame):
categories = [ 'vendor_id', 'rate_code', 'store_and_fwd_flag' ]
for category in categories:
data_frame = data_frame.withColumn(category, hash(col(category)))
return data_frame.withColumnRenamed("store_and_fwd_flag", "store_and_fwd")
def fill_na(data_frame):
return data_frame.fillna(-1)
def remove_invalid(data_frame):
conditions = [
( 'fare_amount', 0, 500 ),
( 'passenger_count', 0, 6 ),
( 'pickup_longitude', -75, -73 ),
( 'dropoff_longitude', -75, -73 ),
( 'pickup_latitude', 40, 42 ),
( 'dropoff_latitude', 40, 42 ),
]
for column, min, max in conditions:
data_frame = data_frame.filter('{} > {} and {} < {}'.format(column, min, column, max))
return data_frame
def convert_datetime(data_frame):
datetime = col('pickup_datetime')
return (data_frame
.withColumn('pickup_datetime', to_timestamp(datetime))
.withColumn('year', year(datetime))
.withColumn('month', month(datetime))
.withColumn('day', dayofmonth(datetime))
.withColumn('day_of_week', dayofweek(datetime))
.withColumn(
'is_weekend',
col('day_of_week').isin(1, 7).cast(IntegerType())) # 1: Sunday, 7: Saturday
.withColumn('hour', hour(datetime))
.drop('pickup_datetime'))
def add_h_distance(data_frame):
p = math.pi / 180
lat1 = col('pickup_latitude')
lon1 = col('pickup_longitude')
lat2 = col('dropoff_latitude')
lon2 = col('dropoff_longitude')
internal_value = (0.5
- cos((lat2 - lat1) * p) / 2
+ cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2)
h_distance = 12734 * asin(sqrt(internal_value))
return data_frame.withColumn('h_distance', h_distance)
| 32.784946 | 94 | 0.652673 | 384 | 3,049 | 4.986979 | 0.427083 | 0.093995 | 0.05483 | 0.029765 | 0.067885 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024628 | 0.227616 | 3,049 | 92 | 95 | 33.141304 | 0.788535 | 0.197114 | 0 | 0.029412 | 0 | 0 | 0.17318 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102941 | false | 0.014706 | 0.058824 | 0.029412 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f8d721fb908b7279e3d665b4c472cbbf4668ed1 | 2,207 | py | Python | lyricsbot/domains/genius/genius.py | Kermitofx/lyricsbot | 2338ccbdf91ae17030b5d2c5c49bae8e5dea3c92 | [
"MIT"
] | 6 | 2019-12-21T20:17:43.000Z | 2021-04-21T12:41:15.000Z | lyricsbot/domains/genius/genius.py | Kermitofx/lyricsbot | 2338ccbdf91ae17030b5d2c5c49bae8e5dea3c92 | [
"MIT"
] | 97 | 2019-07-29T21:06:34.000Z | 2021-07-29T03:16:25.000Z | lyricsbot/domains/genius/genius.py | anastasia-bilova/lyricsbot | 215d0d71755ae2296f28eee9d0e18efc708c10dd | [
"MIT"
] | 3 | 2020-05-03T09:11:18.000Z | 2021-04-13T04:55:18.000Z | """
Get the song lyrics via users' data from genius.com.
"""
import requests
from bs4 import BeautifulSoup
try:
from domains.genius.config import GENIUS_DOWNLOAD_URL
from domains.genius.utils import (
make_suitable_url_parameters,
remove_punctuation_symbols,
)
from domains.songlyrics.songlyrics import get_song_text_from_songlyrics
# pylint:disable=bare-except
except: # noqa: E722 # Python 3.5 does not contain `ModuleNotFoundError`
from lyricsbot.domains.genius.config import GENIUS_DOWNLOAD_URL
from lyricsbot.domains.genius.utils import (
make_suitable_url_parameters,
remove_punctuation_symbols,
)
from lyricsbot.domains.songlyrics.songlyrics import get_song_text_from_songlyrics
# if the lyrics of song dont exist on genius.com
LYRICS_DO_NOT_EXIST = u"\n Sorry, we didn't mean for that to happen!\n "
def format_request_data_url(author_song, title_song):
"""
Modify path components of URL.
"""
author_song = remove_punctuation_symbols(author_song)
title_song = remove_punctuation_symbols(title_song)
formatted_author_song = make_suitable_url_parameters(author_song)
formatted_title_song = make_suitable_url_parameters(title_song)
# url for current site needs author song with only its first character capitalized
capitalize_author_song = formatted_author_song.capitalize()
url = GENIUS_DOWNLOAD_URL.format(
capitalize_author_song, formatted_title_song
)
return url
def parse_lyrics(url):
"""
Parse URL to get song text.
"""
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
full_lyrics_string = soup.find('p').get_text()
max_length_characters = 4096
if len(full_lyrics_string) >= max_length_characters:
full_lyrics_string = 'The song is not available, sorry.'
return full_lyrics_string
def get_song_text_from_genius(author, title):
"""
Get song lyrics from genius.com.
"""
complete_text = parse_lyrics(format_request_data_url(author, title))
if LYRICS_DO_NOT_EXIST in complete_text:
return get_song_text_from_songlyrics(author, title)
return complete_text
| 29.824324 | 86 | 0.746262 | 295 | 2,207 | 5.271186 | 0.332203 | 0.057878 | 0.03537 | 0.064309 | 0.353055 | 0.232797 | 0.232797 | 0.232797 | 0.173633 | 0.099035 | 0 | 0.005562 | 0.185319 | 2,207 | 73 | 87 | 30.232877 | 0.859288 | 0.164024 | 0 | 0.1 | 0 | 0 | 0.053691 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.2 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f8db4407bcccc114bf3d2c11d7ac00daed1c46e | 451 | py | Python | vim/vimfiles/python3/vim_custom_actions.py | sharat87/lawn | 758a0d442eba66a802295ad694d6b31a1d4c5549 | [
"MIT"
] | 5 | 2015-02-03T15:01:37.000Z | 2021-06-07T05:20:31.000Z | vim/vimfiles/python3/vim_custom_actions.py | sharat87/lawn | 758a0d442eba66a802295ad694d6b31a1d4c5549 | [
"MIT"
] | null | null | null | vim/vimfiles/python3/vim_custom_actions.py | sharat87/lawn | 758a0d442eba66a802295ad694d6b31a1d4c5549 | [
"MIT"
] | 2 | 2016-04-15T16:04:27.000Z | 2016-09-12T07:43:30.000Z | import vim
# Setup `vartabstop` so that columns line up.
vim.command('command! TabsLineUp py3 ' + __name__ + '.tabs_line_up()')
def tabs_line_up():
lengths = []
for line in vim.current.buffer:
if '\t' not in line:
continue
parts = line.split('\t')
lengths.append([len(c) for c in parts])
vim.current.buffer.options['vartabstop'] = ','.join(str(max(ls) + 3) for ls in zip(*lengths))
| 26.529412 | 98 | 0.5898 | 62 | 451 | 4.16129 | 0.580645 | 0.069767 | 0.077519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006024 | 0.263858 | 451 | 16 | 99 | 28.1875 | 0.771084 | 0.095344 | 0 | 0 | 0 | 0 | 0.138462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f8e9e3452f88786221265e70d18d375315b68a1 | 2,397 | py | Python | RiverSizes/river_sizes_example.py | ulrickpsp/InterviewQuestions | 915f0270553b6fe0dd32504caed4f0cb9aad48f8 | [
"MIT"
] | null | null | null | RiverSizes/river_sizes_example.py | ulrickpsp/InterviewQuestions | 915f0270553b6fe0dd32504caed4f0cb9aad48f8 | [
"MIT"
] | null | null | null | RiverSizes/river_sizes_example.py | ulrickpsp/InterviewQuestions | 915f0270553b6fe0dd32504caed4f0cb9aad48f8 | [
"MIT"
] | null | null | null | # You are given a two-dimensional array of potentially unequal height and width.
# It contains only 0s and 1s. This array represents a map: 0s are land, and 1s are water.
# A "river" on this map consists of any number of contiguous, adjacent water squares,
# where "adjacent" means "above", "below", "to the left of", or "to the right of"
# (that is, diagonal squares are not adjacent).
#
# Write a function which returns an array of the sizes of all rivers represented in the input matrix.
# Note that these sizes do not need to be in any particular order.
#
# For example:
#
# const input = [
# [1, 0, 0, 1, 0],
# [1, 0, 1, 0, 0],
# [0, 0, 1, 0, 1],
# [1, 0, 1, 0, 1],
# [1, 0, 1, 1, 0]
# ];
#
# riverSizes(input); // returns [1, 2, 2, 2, 5]
#
# Recursive method used to check the total number of adjacent 1's
# The prints will allow to understand how it python handles recursion
# https://pythontutor.com/visualize.html#mode=display is a great tool to test recursion
#
def check(row, col, matrix):
print('Checking square: ' + str(row) + ',' + str(col))
if row >= len(matrix) or row < 0 or col >= len(matrix[row]) or col < 0 or matrix[row][col] == 0 or matrix[row][col] == '^':
return 0
if matrix[row][col] == 1:
matrix[row][col] = '^'
print('Square ' + str(row) + ',' + str(col) + ' is 1 so we will check its sorroundings and then go back to previous if neccesary')
value = 1 + check(row + 1, col, matrix) + check(row - 1, col, matrix) + check(row, col + 1, matrix) + check(row, col - 1, matrix)
return value
#
# Method used to iterate all over the blocks
# We check adjacent blocks for each block using recursion in 'check' method
#
def getRiverSizes():
_sizes = []
for rowIndex in range(0, len(river_map)):
for columnIndex in range(0, len(river_map[rowIndex])):
if river_map[rowIndex][columnIndex] == 1:
print('New check')
new_size = check(rowIndex, columnIndex, river_map)
_sizes.append(new_size)
return _sizes
#
# Main script
#
if __name__ == '__main__':
river_map = [[1, 0, 1, 1, 0],
[1, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 1, 0]]
sizes = getRiverSizes()
print(sizes)
| 34.73913 | 139 | 0.583646 | 361 | 2,397 | 3.825485 | 0.362881 | 0.023172 | 0.019551 | 0.017379 | 0.165822 | 0.134685 | 0.062998 | 0.020999 | 0.013034 | 0 | 0 | 0.043224 | 0.285774 | 2,397 | 68 | 140 | 35.25 | 0.763435 | 0.441802 | 0 | 0 | 0 | 0 | 0.101942 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.192308 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f90dce216cd6a9d28853182b97519922737f125 | 485 | py | Python | backend/api/v1/Datasets.py | JanVargovsky/masters-thesis | f12e323a2f2079b9b5f9618d10ed3f56a20b271a | [
"MIT"
] | 4 | 2019-03-15T09:00:12.000Z | 2020-02-14T07:12:23.000Z | backend/api/v1/Datasets.py | JanVargovsky/masters-thesis | f12e323a2f2079b9b5f9618d10ed3f56a20b271a | [
"MIT"
] | 2 | 2020-01-28T22:36:42.000Z | 2020-09-25T23:17:44.000Z | backend/api/v1/Datasets.py | JanVargovsky/masters-thesis | f12e323a2f2079b9b5f9618d10ed3f56a20b271a | [
"MIT"
] | null | null | null | from flask_restplus import Namespace, Resource, fields, marshal_with
from infrastructure.DatasetUtils import get_datasets
api = Namespace('datasets')
resource_fields = {
'name': fields.String,
'type': fields.String,
'size': fields.Integer,
'createdAt': fields.DateTime('iso8601'),
'lastModifiedAt': fields.DateTime('iso8601'),
}
@api.route('')
class Datasets(Resource):
@marshal_with(resource_fields)
def get(self):
return list(get_datasets())
| 23.095238 | 68 | 0.709278 | 53 | 485 | 6.358491 | 0.54717 | 0.124629 | 0.124629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019608 | 0.158763 | 485 | 20 | 69 | 24.25 | 0.806373 | 0 | 0 | 0 | 0 | 0 | 0.117526 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0.066667 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f90e0c5367fcd27888e14d6b517367703fbef4d | 946 | py | Python | airtech/helpers/tickets_notification.py | sam-karis/airtech | 8e1cd7a9821719d27db046218625d70daaa46139 | [
"MIT"
] | null | null | null | airtech/helpers/tickets_notification.py | sam-karis/airtech | 8e1cd7a9821719d27db046218625d70daaa46139 | [
"MIT"
] | 4 | 2021-03-18T23:42:26.000Z | 2022-02-10T12:36:23.000Z | airtech/helpers/tickets_notification.py | sam-karis/airtech | 8e1cd7a9821719d27db046218625d70daaa46139 | [
"MIT"
] | null | null | null | from datetime import date, datetime, timedelta
from airtech.apps.tickets.models import Ticket
def get_tickets_remaining_one_day():
# Get all tickets remaing less than 24 hours
yesterday = date.today() - timedelta(days=1)
all_awaiting_tickets = Ticket.objects.filter(
notification_sent=False, status='Awaiting Boarding',
departure_date__gte=yesterday
)
tickets_to_send_notification = []
for ticket in all_awaiting_tickets:
ticket_date = ticket.departure_date
flight_time = ticket.flight.departure_time
date_time = datetime.combine(ticket_date, flight_time)
remaining_time = abs(date_time - datetime.now())
remaining_hours = remaining_time.total_seconds() / 3600.0
if remaining_hours <= 24:
tickets_to_send_notification.append(ticket)
ticket.notification_sent = True
ticket.save()
return tickets_to_send_notification
| 37.84 | 65 | 0.714588 | 114 | 946 | 5.622807 | 0.464912 | 0.042122 | 0.060842 | 0.117005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013459 | 0.214588 | 946 | 24 | 66 | 39.416667 | 0.84926 | 0.044397 | 0 | 0 | 0 | 0 | 0.018847 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f934cb5a2a3ed20f9d360c7330c91bdf7345d36 | 1,029 | py | Python | src/warp.py | yashgorana/lane-detection-advanced | 83201bc275e7a767220fb478dd902e3b96b39e68 | [
"MIT"
] | 1 | 2021-07-01T12:45:26.000Z | 2021-07-01T12:45:26.000Z | src/warp.py | yashgorana/lane-detection-advanced | 83201bc275e7a767220fb478dd902e3b96b39e68 | [
"MIT"
] | null | null | null | src/warp.py | yashgorana/lane-detection-advanced | 83201bc275e7a767220fb478dd902e3b96b39e68 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
class Warp:
@staticmethod
def warp_image(img, tx_src, tx_dest, **kwargs):
img_size = (img.shape[1], img.shape[0])
tx_src = tx_src.astype(np.float32)
tx_dest = tx_dest.astype(np.float32)
# Calculate the transformation matrix and it's inverse transformation
M = cv2.getPerspectiveTransform(tx_src, tx_dest)
M_inv = cv2.getPerspectiveTransform(tx_dest, tx_src)
return cv2.warpPerspective(img, M, img_size, cv2.INTER_LINEAR), M, M_inv
@staticmethod
def unwarp_image(img, M_inv, **kwargs):
img_size = (img.shape[1], img.shape[0])
return cv2.warpPerspective(img, M_inv, img_size, cv2.INTER_LINEAR)
@staticmethod
def get_default_warp_points():
"""Handpicked points warp transform source & destination points"""
tx_src = np.int32([[260, 670], [570, 460], [720, 460], [1045, 670]])
tx_dst = np.int32([[200, 680], [200, 000], [1000, 00], [1000, 680]])
return (tx_src, tx_dst)
| 36.75 | 80 | 0.649174 | 146 | 1,029 | 4.376712 | 0.390411 | 0.054773 | 0.043818 | 0.034429 | 0.250391 | 0.097027 | 0.097027 | 0.097027 | 0.097027 | 0 | 0 | 0.08625 | 0.222546 | 1,029 | 27 | 81 | 38.111111 | 0.7125 | 0.125364 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f960b48b34eeaa3db6455744a61a292fbb4f6b1 | 1,609 | py | Python | maoyantop100/spider.py | agandong4/some_simple_demos | 5e95359e8d002a74fc11d6173f79ffb6a7d0c415 | [
"MIT"
] | 2 | 2019-05-11T10:52:24.000Z | 2019-05-11T10:52:29.000Z | maoyantop100/spider.py | agandong4/some_simple_demos | 5e95359e8d002a74fc11d6173f79ffb6a7d0c415 | [
"MIT"
] | null | null | null | maoyantop100/spider.py | agandong4/some_simple_demos | 5e95359e8d002a74fc11d6173f79ffb6a7d0c415 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
'''
@author: agandong4
@license: (C) Copyright 2013-2019, Node Supply Chain Manager Corporation Limited.
@contact: agandong4@gmail.com
@software: garner
@file: spider.py
@time: 2019-03-12 21:39
@desc:
'''
import requests
from requests.exceptions import RequestException
import re
import json
from multiprocessing import Pool
def get_one_page(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
except RequestException:
return None
def parse_one_page(html):
pattern = re.compile('<dd>.*?board-index.*?">(.*?)</i>.*?data-src="(.*?)".*?name"><a'
'.*?">(.*?)</a></p>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
'.*?integer">(.*?)</i>.*?fraction">(.*?)</i></p>.*?</dd>',re.S)
items = re.findall(pattern,html)
for item in items:
yield{
'index':item[0],
'image':item[1],
'title':item[2],
'actor':item[3].strip()[3:],
'time' :item[4].strip()[5:],
'score':item[5]+item[6]
}
def write_to_file(content):
with open("maoyantop100movie.txt",'a',encoding='utf-8') as f:
f.write(json.dumps(content,ensure_ascii = False)+ "\n")
f.close()
def main(offset):
url = 'https://maoyan.com/board/4?offset=' + str(offset)
html = get_one_page(url)
for item in parse_one_page(html):
print(item)
write_to_file(item)
if __name__ == '__main__':
pool = Pool()
pool.map(main,[i*10 for i in range(10)]) | 26.816667 | 89 | 0.566812 | 208 | 1,609 | 4.278846 | 0.567308 | 0.031461 | 0.026966 | 0.029213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035801 | 0.236172 | 1,609 | 60 | 90 | 26.816667 | 0.688365 | 0.14481 | 0 | 0 | 0 | 0 | 0.203216 | 0.145468 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.128205 | 0 | 0.282051 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9618c1cd60f827df7acaa7d06b275554086411 | 15,627 | py | Python | arne_application/scripts/application.py | fzi-forschungszentrum-informatik/ArNe | c542ae65393fc61c0d3833142f035cbb05f43c12 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2022-02-19T00:09:15.000Z | 2022-03-13T13:33:36.000Z | arne_application/scripts/application.py | fzi-forschungszentrum-informatik/ArNe | c542ae65393fc61c0d3833142f035cbb05f43c12 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | arne_application/scripts/application.py | fzi-forschungszentrum-informatik/ArNe | c542ae65393fc61c0d3833142f035cbb05f43c12 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
################################################################################
# Copyright 2022 FZI Research Center for Information Technology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import rospy
import numpy as np
import transformations as tr
from datetime import datetime
from pathlib import Path
from arne_application.srv import Macro, MacroRequest, MacroResponse
from arne_skill_pipeline.msg import State
from nav_msgs.msg import Path as PathMessage
from arne_skill_pipeline.skill import Skill
from arne_skill_pipeline.rosbag_recorder import RosbagRecorder
from arne_skill_pipeline.trajectory_player import TrajectoryPlayer
from arne_skill_pipeline.trajectory_visualizer import TrajectoryVisualizer
from arne_skill_pipeline.trajectories import read_rosbag, write_rosbag, compute_trajectory, transform_state, transform_states, homogeneous
class Application(object):
""" High-level program logic for the ArNe platform
This node's primary purpose is to handle operations with macros, such as
recording and replaying them when triggered by the GUI. Note that there is
a direct, topic-based connection of the GUI to the robot's Cartesian
controller for streaming-based control of motion and gripper. There is no
need here to interpolate and plausibility-check those commands, which is
done by the controller itself.
Details on macros:
Global macros will converge to the end pose that the robot had in the
environment during macro recording. A use case is repetitive manipulation
from slightly different starts.
Local macros will move entirely local to the current robot position. A
possible application is scratching an itchy spot on the forearm or opening
a door handle.
Hybrid macros converge to the end position of macro recording but do not
enforce the final orientation. It's suitable for Use cases when
orientation doesn't matter, such as throwing things into a trash bin.
"""
def __init__(self):
# General config
rospy.init_node('arne_application')
self.macro_folder = '{}/.ros/recorded_macros'.format(os.path.expanduser('~'))
self.state_topic = 'cartesian_controller/state_output'
self.state_subscriber = rospy.Subscriber(self.state_topic, State, self.state_callback)
# Macro functionality
self.replay_publisher = rospy.Publisher('cartesian_controller/replay_input', State, queue_size=10)
self.macro_server = rospy.Service('~macro_mode', Macro, self.macro_mode)
self.motion_recorder = RosbagRecorder({self.state_topic: State})
self.macro_player = TrajectoryPlayer(self.replay_publisher)
self.log_execution = True
# Visualization
self.path_publisher = rospy.Publisher('~macro_motion', PathMessage, queue_size=10)
self.macro_visualizer = TrajectoryVisualizer(self.path_publisher)
rospy.loginfo("ArNe application ready.")
def macro_mode(self, req):
""" Principal callback for the handling of macro functionality
This is the primary interface to the web GUI, supporting all relevant
macro operations in one service callback via the rosbridge. Different
`modes` can be set in `req.mode`.
Macros are created in an internal two-step process: First, the current
robot (and gripper) motion is recorded to a .bag file on disk. This
.bag file is then parsed into a trajectory, from which a characteristic
profile is generalized (=skill) and saved to disk with the .dmp
extension. Both files are named according to the hash id of the macro
with the respective extension.
Macros are played with publishing to the specified replay topic of the
controller. Note that repeatedly starting playbacks is supported. The
new callback will just preempt the old one. Macros always start from
the current robot state. Playbacks can be paused/unpaused and stopped.
A stopped playback cannot be resumed.
Note the different, implicit coordinate systems of both data files:
The .bag file holds the robot state with respect to the robot's base
frame, whereas the data in the .dmp file are with respect to the
robot's pose when recording started. Transformations between both
frames assure that macros are generalized in a coordinate
system-independent manner, and that robot control get's its reference
motion in the expected base frame for replay.
"""
# Colored output for macro operations
NORMAL = '\033[0m'
CYAN = '\033[1;36m'
GREEN = '\033[1;32m'
RED = '\033[1;31m'
YELLOW = '\033[1;33m'
BLUE = '\033[1;34m'
#--------------------------------------------------------------------------------
# Start recording
#--------------------------------------------------------------------------------
# Start recording robot motion into internal buffers.
# Do nothing on repeated calls.
if req.mode is MacroRequest.START_RECORDING:
if self.motion_recorder.start_recording(wait_for_data=True):
rospy.loginfo(f"{CYAN}START{NORMAL} macro recording")
#--------------------------------------------------------------------------------
# Stop recording
#--------------------------------------------------------------------------------
# Stop any active recording and save buffers to a .bag file.
# If that was successful, generalize the .bag file into a macro and
# save it with .dmp extension into the same directory.
elif req.mode is MacroRequest.STOP_RECORDING:
if self.motion_recorder.stop_recording(self.macro_folder, prefix=req.id):
bagfile = '{}/{}.bag'.format(self.macro_folder, req.id)
if Path(bagfile).is_file():
times, states = read_rosbag(bagfile, state_topic=self.state_topic)
# Display all recorded states with respect to the robot's
# end-effector frame when recording started. This is
# important for coordinate system-independent skill
# generalization.
transform_states(states, transform=tr.inverse_matrix(homogeneous(states[0])))
trajectory = compute_trajectory(times, states)
macro = Skill()
macro.learn_trajectory(trajectory)
macro.save_profile('{}/{}.dmp'.format(self.macro_folder, req.id))
rospy.loginfo(f"{RED}STOP{NORMAL} macro recording")
#--------------------------------------------------------------------------------
# Start playback
#--------------------------------------------------------------------------------
# Start playback of the selected macro if that exists.
# Macros always start from the current robot state.
elif req.mode is MacroRequest.START_PLAYBACK:
macrofile = '{}/{}.dmp'.format(self.macro_folder, req.id)
bagfile = '{}/{}.bag'.format(self.macro_folder, req.id)
if Path(macrofile).is_file() and Path(bagfile).is_file():
if req.duration <= 0.0:
return MacroResponse(False, "Invalid playback duration {}".format(req.duration))
trajectory = self.compute_macro_motion(macrofile, bagfile, req.playback_type, req.duration)
# Record the execution for later analysis
if self.log_execution:
stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S.%f')[:-3]
name = "{}_{}".format(req.id, stamp)
self.motion_recorder.start_recording(wait_for_data=True)
self.macro_player.play(
trajectory,
done_cb=lambda: self.motion_recorder.stop_recording(
self.macro_folder, prefix=name)
)
# Also save the desired trajectory in the bagfile format.
write_rosbag(trajectory, "{}/{}.traj".format(self.macro_folder, name), self.state_topic)
else:
self.macro_player.play(trajectory)
rospy.loginfo(f"{GREEN}START{NORMAL} macro playback")
else:
return MacroResponse(False, "Macro {} not found.".format(req.id))
#--------------------------------------------------------------------------------
# Stop playback
#--------------------------------------------------------------------------------
elif req.mode is MacroRequest.STOP_PLAYBACK:
self.macro_player.stop()
rospy.loginfo(f"{RED}STOP{NORMAL} macro playback")
#--------------------------------------------------------------------------------
# Pause/Unpause playback
#--------------------------------------------------------------------------------
# TODO: What happens when users move the robot with direct control
# during pause? Jumps might occur.
elif req.mode is MacroRequest.TOGGLE_PLAYBACK:
self.macro_player.toggle_pause()
rospy.loginfo(f"{YELLOW}TOGGLE{NORMAL} macro playback")
#--------------------------------------------------------------------------------
# Delete macro
#--------------------------------------------------------------------------------
elif req.mode is MacroRequest.DELETE_MACRO:
macrofile = '{}/{}.dmp'.format(self.macro_folder, req.id)
bagfile = '{}/{}.bag'.format(self.macro_folder, req.id)
try:
os.remove(macrofile)
os.remove(bagfile)
except OSError:
pass
#--------------------------------------------------------------------------------
# Show playback
#--------------------------------------------------------------------------------
# Show the macro's motion in RViz
elif req.mode is MacroRequest.SHOW_PLAYBACK:
macrofile = '{}/{}.dmp'.format(self.macro_folder, req.id)
bagfile = '{}/{}.bag'.format(self.macro_folder, req.id)
if Path(macrofile).is_file() and Path(bagfile).is_file():
trajectory = self.compute_macro_motion(macrofile, bagfile, req.playback_type, req.duration)
self.macro_visualizer.show(trajectory, frame=self.frame_id)
rospy.loginfo(f"{BLUE}SHOW{NORMAL} macro playback")
else:
return MacroResponse(False, "Macro {} not found.".format(req.id))
#--------------------------------------------------------------------------------
# Unknown mode
#--------------------------------------------------------------------------------
else:
rospy.loginfo("Unsupported macro mode")
return MacroResponse(False, "Unsupported macro mode.")
return MacroResponse(True, "Success.")
def compute_macro_motion(self, macrofile, bagfile, playback_type, duration):
""" Compute a trajectory for the given macro
Invalid playback types default to hybrid execution.
"""
macro = Skill()
macro.load_profile(macrofile)
_, recorded_states = read_rosbag(bagfile, state_topic=self.state_topic)
start = [0, 0, 0, 0, 0, 0, 1, self.state[7]] # identity
# Goals
local_goal = transform_state(recorded_states[-1], transform=tr.inverse_matrix(homogeneous(recorded_states[0])))
global_goal = transform_state(recorded_states[-1], transform=tr.inverse_matrix(homogeneous(self.state)))
scale = np.linalg.norm(global_goal[:3]) / np.linalg.norm(local_goal[:3])
# Local macros replicate the motion pattern in their local
# coordinate system and apply it in our current end-effector
# coordinate system.
if playback_type is MacroRequest.LOCAL_MACRO:
goal = local_goal
scale = 1.0
# Global macros drive to the globally recorded goal and need to
# map that into our current end-effector coordinate system for
# skill generation.
elif playback_type is MacroRequest.GLOBAL_MACRO:
goal = global_goal
# Hybrid macros drive to the globally recorded position but
# keep their local orientation.
else:
playback_type = MacroRequest.HYBRID_MACRO
goal = [scale * i for i in local_goal[:3]] + local_goal[3:]
# Compute how to move to the goal pose while keeping the
# macro's motion profile.
trajectory = macro.generate_new_trajectory(
start_state=start,
goal_state=goal,
duration=duration,
scale=scale)
# Hybrid macros need an additional step to adequately display
# the generated profile in the current end-effector frame.
# See the paper for details: https://arxiv.org/abs/2202.09221
if playback_type is MacroRequest.HYBRID_MACRO:
T1 = tr.inverse_matrix(homogeneous(local_goal))
p1 = tr.translation_from_matrix(T1)
T2 = tr.inverse_matrix(homogeneous(global_goal))
p2 = tr.translation_from_matrix(T2)
angle = tr.angle_between_vectors(p1, p2)
axis = tr.vector_product(p1, p2)
if np.linalg.norm(axis) > np.finfo(float).eps:
T_hybrid = tr.concatenate_matrices(tr.rotation_matrix(angle, axis), T1)
T = tr.concatenate_matrices(tr.inverse_matrix(T2), T_hybrid)
R = tr.quaternion_matrix(tr.quaternion_from_matrix(T)) # Rotation matrix
transform_states(trajectory.states, transform=R, position_only=True)
else:
# The global and the local goal are so close that there's
# no point for hybrid execution. We default to local macro
# execution instead.
pass
# Display the states back in the robot's base frame for control.
transform_states(trajectory.states, transform=homogeneous(self.state))
return trajectory
def state_callback(self, state):
""" Keep track of the robot's current state
"""
self.frame_id = state.header.frame_id
self.state = [
state.pose.position.x,
state.pose.position.y,
state.pose.position.z,
state.pose.orientation.x,
state.pose.orientation.y,
state.pose.orientation.z,
state.pose.orientation.w,
state.gripper.data
]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
print("\ndone")
if __name__ == '__main__':
with Application() as app:
while not rospy.is_shutdown():
rospy.spin()
| 47.788991 | 138 | 0.587573 | 1,756 | 15,627 | 5.116743 | 0.267654 | 0.021035 | 0.020033 | 0.021035 | 0.240512 | 0.169171 | 0.140011 | 0.129883 | 0.120757 | 0.088258 | 0 | 0.007638 | 0.254367 | 15,627 | 326 | 139 | 47.935583 | 0.763474 | 0.399053 | 0 | 0.148387 | 0 | 0 | 0.073217 | 0.012408 | 0 | 0 | 0 | 0.003067 | 0 | 1 | 0.03871 | false | 0.012903 | 0.090323 | 0.006452 | 0.180645 | 0.006452 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f96ec50aeeb83da3c3e033942a91093292200e6 | 1,244 | py | Python | string/myAtoi.py | ZeddShi/alg-py | f491dbf92bf7ddf0ac159d1ccfa1f716e458699f | [
"MIT"
] | null | null | null | string/myAtoi.py | ZeddShi/alg-py | f491dbf92bf7ddf0ac159d1ccfa1f716e458699f | [
"MIT"
] | null | null | null | string/myAtoi.py | ZeddShi/alg-py | f491dbf92bf7ddf0ac159d1ccfa1f716e458699f | [
"MIT"
] | null | null | null | # 请你来实现一个 atoi 函数,使其能将字符串转换成整数。
# 首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。接下来的转化规则如下:
# 如果第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字字符组合起来,形成一个有符号整数。
# 假如第一个非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成一个整数。
# 该字符串在有效的整数部分之后也可能会存在多余的字符,那么这些字符可以被忽略,它们对函数不应该造成影响。
# 注意:假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换,即无法进行有效转换。
# 在任何情况下,若函数不能进行有效的转换时,请返回 0 。
# 提示:
# 本题中的空白字符只包括空格字符 ' ' 。
# 假设我们的环境只能存储 32 位大小的有符号整数,那么其数值范围为 [−2**31, 2**31 − 1]。如果数值超过这个范围,请返回 INT_MAX (231 − 1) 或 INT_MIN (−231) 。
def my_atoi(s):
if not s:
return 0
n = len(s)
s_index = 0
while s_index < n and s[s_index] == ' ':
s_index += 1
if s_index >= n:
return 0
INT_MAX = 2**31 - 1
INT_MIN = -2**31
if s[s_index] not in ('+', '-') and not s[s_index].isdigit():
return 0
positive = True
if s[s_index] in ('+', '-'):
if s[s_index] == '-':
positive = False
s_index += 1
val = ''
while s_index < n and s[s_index].isdigit():
val += s[s_index]
s_index += 1
if not val:
return 0
val = int(val)
if not positive:
val = -val
if val > INT_MAX:
return INT_MAX
elif val < INT_MIN:
return INT_MIN
return val | 23.037037 | 109 | 0.599678 | 168 | 1,244 | 4.327381 | 0.357143 | 0.115543 | 0.077029 | 0.037139 | 0.094911 | 0.094911 | 0.094911 | 0.060523 | 0 | 0 | 0 | 0.035595 | 0.277331 | 1,244 | 54 | 110 | 23.037037 | 0.768632 | 0.371383 | 0 | 0.21875 | 0 | 0 | 0.007772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9a0b61ff32757836168968077b2a2899c33f18 | 1,635 | py | Python | myQuery.py | enessayaci/semantic-web | c63b3fc57918cedb5716902e7b4d6706dfefb473 | [
"Apache-2.0"
] | null | null | null | myQuery.py | enessayaci/semantic-web | c63b3fc57918cedb5716902e7b4d6706dfefb473 | [
"Apache-2.0"
] | null | null | null | myQuery.py | enessayaci/semantic-web | c63b3fc57918cedb5716902e7b4d6706dfefb473 | [
"Apache-2.0"
] | null | null | null | import owl as owl
import rdflib
import json
from flask import Flask, render_template, jsonify, request
from rdflib.plugins.sparql import prepareQuery
from rdflib.graph import Graph
def stringFilter(s):
start = "("
end = ""
return s[s.find(start) + len(start):s.rfind(end)]
def search(key):
g = Graph()
result = g.parse('mydataset.rdf')
sorgum = prepareQuery('''
SELECT ?personName ?companyName ?workedOn WHERE
{
?employee ontology:workedFor ?company.
?company ontology:companyName ?companyName.
?company ontology:workedOn ?workedOn.
?employee ontology:personName ?personName
FILTER (?workedOn="''' + key + '''"^^xsd:string)
}''',
initNs={"ontology": 'http://localhost:8080/mydataset/'})
queryResults = []
counter = 0
for i in result.query(sorgum):
a=stringFilter(i[0])
b=stringFilter(i[1])
c = stringFilter(i[2])
element = [a,[b]]
for elem in queryResults:
if a == elem[0]:
elem[1].append(b)
counter=1
if counter !=1:
queryResults.append(element)
counter = 0
return queryResults
| 31.442308 | 97 | 0.439755 | 134 | 1,635 | 5.358209 | 0.477612 | 0.054318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01496 | 0.468502 | 1,635 | 51 | 98 | 32.058824 | 0.811277 | 0 | 0 | 0.052632 | 0 | 0 | 0.453569 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9b358a6c94e4d5c207b9a1c485f3b1e6bc8098 | 1,254 | py | Python | sipnpuff.py | caternuson/SipPuff | 9ce40b723e60d960e723723209fa281008d2cdbc | [
"MIT"
] | null | null | null | sipnpuff.py | caternuson/SipPuff | 9ce40b723e60d960e723723209fa281008d2cdbc | [
"MIT"
] | null | null | null | sipnpuff.py | caternuson/SipPuff | 9ce40b723e60d960e723723209fa281008d2cdbc | [
"MIT"
] | null | null | null | import time
import board, busio
import adafruit_mprls
i2c = busio.I2C(board.SCL, board.SDA)
mpr = adafruit_mprls.MPRLS(i2c)
THRESH_LOW = 10 # delta in hPa
THRESH_HIGH = 10 # delta in hPa
def pressure_sensor_init(count=10, delay=0.1):
reading = 0
for _ in range(count):
reading += mpr.pressure
time.sleep(delay)
reading /= count
return reading - THRESH_LOW, reading + THRESH_HIGH
sip_threshold , puff_threshold = pressure_sensor_init()
puff_count = sip_count = 0
while True:
# driver checks conversion ready status, so OK do run this as fast as needed
pressure = mpr.pressure
# PUFF
if pressure > puff_threshold:
while pressure > puff_threshold:
pressure = mpr.pressure
puff_count += 1
time.sleep(0.005)
#
# do something based on puff_count
#
print("puff count = {}".format(puff_count))
puff_count = 0
# SIP
if pressure < sip_threshold:
while pressure < sip_threshold:
pressure = mpr.pressure
sip_count += 1
time.sleep(0.005)
#
# do something based on sip_count
#
print("sip count = {}".format(sip_count))
sip_count = 0 | 26.125 | 80 | 0.614035 | 161 | 1,254 | 4.614907 | 0.341615 | 0.072678 | 0.076716 | 0.032301 | 0.099596 | 0.099596 | 0.099596 | 0.099596 | 0.099596 | 0.099596 | 0 | 0.028637 | 0.303828 | 1,254 | 48 | 81 | 26.125 | 0.822451 | 0.138756 | 0 | 0.15625 | 0 | 0 | 0.027128 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.09375 | 0 | 0.15625 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9b483831d904f63ca10a7706e23f7bc9e79b98 | 6,432 | py | Python | onapy/detector3d.py | xiong-jie-y/onapy | 50b588d014dc24cd4876c784c8e69fba4eaf547e | [
"MIT"
] | 6 | 2021-03-23T14:44:33.000Z | 2021-03-24T05:37:20.000Z | onapy/detector3d.py | xiong-jie-y/onapy | 50b588d014dc24cd4876c784c8e69fba4eaf547e | [
"MIT"
] | null | null | null | onapy/detector3d.py | xiong-jie-y/onapy | 50b588d014dc24cd4876c784c8e69fba4eaf547e | [
"MIT"
] | null | null | null | from collections import deque
from onapy.tracker2d import TrackDetectionFusedTracker
from onapy.tracker3d import create_tracker, get_tracker_names
import time
import click
import cv2
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
print(cv2.__version__)
import cupoch as cph
from mmcv.runner import checkpoint
import numpy as np
import open3d as o3d
from remimi.datasets.open3d import Open3DReconstructionDataset
from remimi.visualizers.sixdof import OnahoPointCloudVisualizer
from mmdet.apis import inference_detector, init_detector
def compute_projection(points_3D,internal_calibration):
points_3D = points_3D.T
projections_2d = np.zeros((2, points_3D.shape[1]), dtype='float32')
camera_projection = (internal_calibration).dot(points_3D)
projections_2d[0, :] = camera_projection[0, :]/camera_projection[2, :]
projections_2d[1, :] = camera_projection[1, :]/camera_projection[2, :]
return projections_2d
class OnahoBoundingBox3DDetector:
def __init__(self, intrinsic, K):
self.intrinsic = intrinsic
self.K = K
def filter_by_instance_mask(self, pcd, result):
start_pf_filter = time.time()
points_in_seg = []
colors = []
start_proj = time.time()
proj_points = compute_projection(np.array(pcd.points), self.K)
end_proj = time.time()
print(f"proj: {end_proj - start_proj}")
segmentations = result.mask
print(len(pcd.points))
for proj_point, point in zip(proj_points.T, pcd.points):
# if point[2] > 0.8:
# # colors.append([0 , 0, 1.0])
# continue
found = False
y = int(proj_point[1])
x = int(proj_point[0])
for seg_list in segmentations:
for seg in seg_list:
# mport IPython; IPython.embed()
if seg[y, x]:
found = True
if found:
points_in_seg.append(point)
# colors.append([1.0, 0, 0])
# cv2.circle(color_image,(x,y), 5, (0, 0, 255), -1)
else:
pass
# colors.append([0 , 0, 1.0])
# cv2.circle(color_image,(x,y), 5, (255, 0, 0), -1)
end_pr_filter = time.time()
print(f"Point Cloud Filtering: {end_pr_filter - start_pf_filter}")
return points_in_seg
def filter_by_bounding_box(self, pcd, result):
start_pf_filter = time.time()
points_in_seg = []
colors = []
start_proj = time.time()
proj_points = compute_projection(np.array(pcd.points), self.K)
end_proj = time.time()
print(f"proj: {end_proj - start_proj}")
top_confidence_one = result.bounding_box
print(len(pcd.points))
for proj_point, point in zip(proj_points.T, pcd.points):
# if point[2] > 0.8:
# # colors.append([0 , 0, 1.0])
# continue
found = False
y = int(proj_point[1])
x = int(proj_point[0])
if top_confidence_one is not None and top_confidence_one[0] < x < top_confidence_one[2] and top_confidence_one[1] < y < top_confidence_one[3]:
found = True
if found:
points_in_seg.append(point)
# colors.append([1.0, 0, 0])
# cv2.circle(color_image,(x,y), 5, (0, 0, 255), -1)
else:
pass
# colors.append([0 , 0, 1.0])
# cv2.circle(color_image,(x,y), 5, (255, 0, 0), -1)
end_pr_filter = time.time()
print(f"Point Cloud Filtering: {end_pr_filter - start_pf_filter}")
return points_in_seg
def get_onaho_3d_bounding_box(self, color_image, depth_image, result):
# color_image = cv2.imread(color_file)
# depth = o3d.io.read_image(depth_image)
depth = o3d.geometry.Image(depth_image)
color = o3d.geometry.Image(color_image)
# if project_semantic_to_point_cloud:
# color = o3d.geometry.Image(seg_image)
# else:
# color = o3d.io.read_image(color_file)
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
color,
depth,
depth_scale=1000,
depth_trunc=0.5,
convert_rgb_to_intensity=False)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(
rgbd_image, self.intrinsic)
pcd = pcd.voxel_down_sample(voxel_size = 0.01)
points_in_seg = []
if result.bounding_box is not None:
points_in_seg = self.filter_by_instance_mask(pcd, result)
# points_in_seg = self.filter_by_bounding_box(pcd, result)
start_clustering = time.time()
# pcd.colors = o3d.utility.Vector3dVector(np.array(colors))
# print(len(points_in_seg))
made_from_2d_detection = False
if len(points_in_seg) > 0:
target_points = np.array(points_in_seg)
made_from_2d_detection = True
else:
target_points = np.array(pcd.points)
gpu_cloud = cph.geometry.PointCloud(target_points)
labels = np.array(
gpu_cloud.cluster_dbscan(eps=0.04, min_points=30, print_progress=True).cpu())
from collections import defaultdict
groups = defaultdict(list)
for i, label in enumerate(labels):
groups[label].append(target_points[i])
# print(len(groups))
min_distance = 1000000000
closest_bounding_box = None
for id, points in groups.items():
if len(points) < 4:
continue
try:
np_points = np.array(points)
bounding_box = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(np_points)).get_oriented_bounding_box()
except RuntimeError:
continue
camera_to_obj_distance = np.linalg.norm(bounding_box.center)
if min_distance > camera_to_obj_distance:
min_distance = camera_to_obj_distance
closest_bounding_box = bounding_box
end_clustering = time.time()
print(f"Clustering: {end_clustering - start_clustering}")
# seg_image = cv2.cvtColor(seg_image, cv2.COLOR_RGB2BGR)
return closest_bounding_box, made_from_2d_detection, pcd | 36.545455 | 154 | 0.603078 | 806 | 6,432 | 4.544665 | 0.227047 | 0.028392 | 0.036036 | 0.01911 | 0.323778 | 0.323778 | 0.29484 | 0.27846 | 0.27846 | 0.27846 | 0 | 0.032108 | 0.297886 | 6,432 | 176 | 155 | 36.545455 | 0.779008 | 0.13977 | 0 | 0.366667 | 0 | 0 | 0.040872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0.016667 | 0.116667 | 0 | 0.2 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9b4f0f33e21d243eae3ca45dd06f35ddd37214 | 2,600 | py | Python | faceR/camera/opencv_capture.py | hritools/faceR | 8f701ea68515927163d5904d58262d1b480a9a97 | [
"MIT"
] | null | null | null | faceR/camera/opencv_capture.py | hritools/faceR | 8f701ea68515927163d5904d58262d1b480a9a97 | [
"MIT"
] | null | null | null | faceR/camera/opencv_capture.py | hritools/faceR | 8f701ea68515927163d5904d58262d1b480a9a97 | [
"MIT"
] | null | null | null | import time
import cv2
import logging
import signal
from threading import Thread, Lock, Condition
import cv2
import numpy
def exit_gracefully(sig, frame):
global running
running = False
logger.info('Ctrl+C detected: exit procedure commenced!')
class WebcamVideoStream:
def __init__(self, logger, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
self.should_capture_frame = Condition()
self.logger = logger
self.n = 0
def start(self):
Thread(target=self.update, args=()).start()
return self
def update(self):
while not self.stopped:
self.n += 1
logger.debug('taking a shot! %s' % self.n)
self.frame = cv2.cvtColor(self.stream.read()[1], cv2.COLOR_BGR2RGB)
logger.debug('took a shot! %s' % self.n)
with self.should_capture_frame:
self.should_capture_frame.wait()
def read(self):
to_return = numpy.copy(self.frame)
with self.should_capture_frame:
self.should_capture_frame.notifyAll()
logger.debug('returning a frame %s' % self.n)
return to_return
def stop(self):
self.stopped = True
def get_image(camera_conf):
"""
Gets image from the default /dev/video0 device
:return: generated frame
"""
global running
global logger
logger = logging.getLogger('camera')
running = True
signal.signal(signal.SIGINT, exit_gracefully)
width = camera_conf['width']
height = camera_conf['height']
device = camera_conf['video device']
interval = 1 / camera_conf['framerate']
vs = WebcamVideoStream(logger, int(device))
vs.start()
# infer interval between frames from the framerate
logger.setLevel(logging.DEBUG)
logger.info('using built-in camera')
# logger.info(cv2.getBuildInformation())
logger.info('interval between frames: ' + str(interval) + 'seconds')
end_time = 0
while running:
time_took = time.time()
frame = vs.read()
frame = cv2.resize(frame, (width, height))
cur_time = time.time()
if (interval - (cur_time - end_time)) > 0:
logger.debug('Sleeping for: ' + str((interval - (time.time() - end_time)) * 1000.0) + ' ms')
time.sleep(interval - (cur_time - end_time))
time_took = int((time.time() - time_took) * 1000)
logger.debug('%s ms\t taking shot' % time_took)
end_time = time.time()
yield frame
vs.stop()
| 27.956989 | 104 | 0.621154 | 323 | 2,600 | 4.885449 | 0.328173 | 0.045627 | 0.053866 | 0.069708 | 0.102662 | 0.060837 | 0.060837 | 0.060837 | 0.060837 | 0 | 0 | 0.013034 | 0.262308 | 2,600 | 92 | 105 | 28.26087 | 0.809698 | 0.061538 | 0 | 0.089552 | 0 | 0 | 0.09136 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104478 | false | 0 | 0.104478 | 0 | 0.253731 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9bac0b24cd45a3614c9156e5a9c1d00d9e9aab | 2,207 | py | Python | test.py | StephaneBranly/Morse-tool | 606f734c70d51afe92beadd0c2831db0c92d5cd3 | [
"MIT"
] | 2 | 2021-05-28T23:14:33.000Z | 2021-05-28T23:15:05.000Z | test.py | StephaneBranly/Morse-tool | 606f734c70d51afe92beadd0c2831db0c92d5cd3 | [
"MIT"
] | null | null | null | test.py | StephaneBranly/Morse-tool | 606f734c70d51afe92beadd0c2831db0c92d5cd3 | [
"MIT"
] | null | null | null | import pyaudio
import time
import numpy as np
from matplotlib import pyplot as plt
import scipy.signal as signal
print("Start run")
CHANNELS = 1
RATE = 44000
p = pyaudio.PyAudio()
fulldata = np.array([])
dry_data = np.array([])
def main():
stream = p.open(format=pyaudio.paFloat32,
channels=CHANNELS,
rate=RATE,
output=True,
input=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(15)
stream.stop_stream()
stream.close()
print("mic closed")
print(fulldata)
print("traitement en cours")
result = []
result2 = []
retraitement = []
retraitement2 = []
average_tab = []
average = 0
somme = 0
somme_state = 0
last_state = 0
size_cut = 100
i = 0
for x in fulldata:
if(i == size_cut):
average = somme/size_cut
for z in range(0, size_cut):
average_tab.append(average)
somme = 0
i = 0
if (last_state == 0 and average > 0.1):
result.append([0, somme_state])
last_state = 1
somme_state = 0
elif(last_state == 1 and average < 0.1):
result.append([1, somme_state])
last_state = 0
somme_state = 1
somme = somme+abs(x)
i = i+1
somme_state = somme_state+1
for z in result:
for x in range(0, z[1]):
result2.append(z[0])
numpydata = np.hstack(fulldata)
numpydata_bi = np.hstack(result2)
numpydata_avr = np.hstack(average_tab)
plt.plot(numpydata)
plt.plot(numpydata_bi)
plt.plot(numpydata_avr)
plt.title("mic")
plt.show()
print(str(result))
print("End")
p.terminate()
def callback(in_data, frame_count, time_info, flag):
global b, a, fulldata, dry_data, frames
audio_data = np.fromstring(in_data, dtype=np.float32)
dry_data = np.append(dry_data, audio_data)
# do processing here
fulldata = np.append(fulldata, audio_data)
return (audio_data, pyaudio.paContinue)
main()
| 23.231579 | 57 | 0.56638 | 278 | 2,207 | 4.356115 | 0.348921 | 0.057803 | 0.02725 | 0.019818 | 0.039637 | 0.039637 | 0 | 0 | 0 | 0 | 0 | 0.02973 | 0.329406 | 2,207 | 94 | 58 | 23.478723 | 0.788514 | 0.008156 | 0 | 0.105263 | 0 | 0 | 0.020119 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.065789 | 0 | 0.105263 | 0.078947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9c7e7500eea3f7aa0a8f93c32f67e3d1104c16 | 7,102 | py | Python | film_mapper.py | yuriynefedov/Film-Mapper | 96e39f953ec508458c6925ce77c365f6f38bb0d8 | [
"MIT"
] | null | null | null | film_mapper.py | yuriynefedov/Film-Mapper | 96e39f953ec508458c6925ce77c365f6f38bb0d8 | [
"MIT"
] | null | null | null | film_mapper.py | yuriynefedov/Film-Mapper | 96e39f953ec508458c6925ce77c365f6f38bb0d8 | [
"MIT"
] | 1 | 2021-03-13T17:22:41.000Z | 2021-03-13T17:22:41.000Z | """
Film Mapper Module
Use this module to map the nearest movie filming spots for any location and year.
(c) Yuriy Nefedov
"""
import random
import pandas
import folium
import geopy
from haversine import haversine
geolocator = geopy.Nominatim(user_agent="UCU OP Lab 2 Nefedov")
CLOSEST_FILENAME = "closest10.csv"
def gather_line_info(line):
"""
Gathers movie's name, location and release year from locations.list line.
"""
# print(line)
try:
movie_name = line.split("\"")[1]
except IndexError:
movie_name = line
year = line[line.index("(")+1:line.index(")")]
after_year = line[line.index(")") + 1:].strip()
try:
after_year = after_year.split("}")[1].strip()
except IndexError:
pass
try:
after_year = after_year.split("(")[0].strip()
except IndexError:
pass
location = after_year
location = location.replace(",", "_comma_")
# location = geolocator.geocode(after_year)
# print(location)
# try:
# location = str(location.latitude) + " " + str(location.longitude)
# print(location)
# except AttributeError:
# location = "ERROR"
return movie_name, year, location
# print("MOVIE NAME:", movie_name)
# print("YEAR:", year)
# print("LOCATION:", location)
def country_from_coordinates(coord_st, last_how_many=1):
"""
By the given coordinates, identifies the country (and, if requested, more detailed
region) of the location.
"""
full_address = geolocator.reverse(coord_st, language="en").address
# print("FULL ADDRESS:", full_address)
return ",".join(full_address.split(",")[-last_how_many:]).strip()
def filter_closest_movies(df: pandas.DataFrame, location: str, year, max_n=10):
"""
Filters up to 10 nearest movies of given year and location and writes them into CLOSEST_FILENAME.
"""
for n_regions in range(3, 0, -1):
country = country_from_coordinates(location, last_how_many=n_regions)
print(country)
if "United States" in country:
country = country.replace("United States", "USA")
elif "United Kingdom" in country:
country = country.replace("United Kingdom", "UK")
# print(year, country)
filtered_df = df[(df["Year"] == str(year)) & (df["Location"].str.endswith(country))]
len_df = len(list(filtered_df["Movie"]))
print("Tried", n_regions, "regions, found", len_df, "movies")
if len_df >= 10:
break
# filtered_df = filtered_df[filtered_df["Year"] == str(year)]
distances_to_input = []
latitudes = []
longitudes = []
addresses = []
# filtered_df = filtered_df.head(10)
for item in filtered_df["Location"]:
try:
if item not in addresses:
geocoded = geolocator.geocode(item)
latitude, longitude = [float(x.strip()) for x in location.split(", ")]
distances_to_input.append(haversine((geocoded.latitude, geocoded.longitude), (latitude, longitude)))
latitudes.append(geocoded.latitude)
longitudes.append(geocoded.longitude)
else:
distances_to_input.append(None)
latitudes.append(None)
longitudes.append(None)
addresses.append(item)
except AttributeError:
print("ERROR in", item)
distances_to_input.append(None)
latitudes.append(None)
longitudes.append(None)
filtered_df["Distance"] = distances_to_input
filtered_df["Latitude"] = latitudes
filtered_df["Longitude"] = longitudes
print("Before drop:", filtered_df)
filtered_df.dropna(inplace=True, how="any")
movie_names = []
movie_rows = []
filtered_df = filtered_df.sort_values(by=["Distance"])
print(filtered_df)
for index, row in filtered_df.head(1000).iterrows():
print(row)
if not(any([movie_row["Movie"] == row["Movie"] for movie_row in movie_rows])) and row["Movie"] != "NaN" and row["Longitude"] != None:
movie_rows.append(row)
movie_names.append(row["Movie"])
if len(movie_names) >= 10:
break
small_df = pandas.DataFrame(movie_rows)
# print(small_df)
small_df.head(10).to_csv(CLOSEST_FILENAME)
def read_data(filename):
"""
Reads a locations.csv file and writes the data into the DataFrame.
"""
print("Reading CSV...")
data = open(filename, "r", encoding="latin1")
csv_data = open("locations.csv", "w")
# csv_data.write("Name, Year, Location")
for line in data.readlines()[14:-1]:
line_st = ",".join(gather_line_info(line)) + "\n"
csv_data.write(line_st)
csv_data.close()
df = pandas.read_csv("locations.csv", names=["Movie", "Year", "Location"], error_bad_lines=False,
warn_bad_lines=False)
new_locations = []
for item in df["Location"]:
try:
new_locations.append(item.replace("_comma_", ","))
except AttributeError:
new_locations.append(item)
df["Location"] = new_locations
return df
def change_coords_a_bit(coords, max_delta=0.05):
"""
Used for fluctuating the given coordinates by max_delta.
The goal is to avoid overlapping pins on the map.
"""
new_x = coords[0] + random.random()*max_delta*random.choice([-1, 1])
new_y = coords[1] + random.random() * max_delta * random.choice([-1, 1])
return new_x, new_y
def build_map(closest_csv, aim_location):
"""
Builds the map of a given location and pinpoints the closest filming spots.
"""
print("Building a map at", aim_location, "...")
data = pandas.read_csv(closest_csv, error_bad_lines=False).head(100)
print("Build data:", data)
lat = data['Latitude']
lon = data['Longitude']
map = folium.Map(location=aim_location, zoom_start=10)
fg = folium.FeatureGroup(name="Map")
for index, row in data.iterrows():
# print("ROW:", row)
lt, ln = row["Latitude"], row["Longitude"]
# print("Lat Long Mov:", lt, ln, row["Movie"])
try:
fg.add_child(folium.Marker(location=change_coords_a_bit([lt, ln]),
popup="{} ({})".format(row["Movie"], row["Year"]), icon=folium.Icon()))
except ValueError:
# print("NaN passed")
pass
fg.add_child(folium.Marker(location=aim_location,
popup="Selected Location", icon=folium.Icon(color="red")))
map.add_child(fg)
map.save('map.html')
print("Map saved at map.html")
def main():
"""
Main event sequence. Responsible for connecting the functions together.
"""
df = read_data("locations.list")
print(df.head(10))
print("Read.\n_______")
year = int(input("Year: "))
location = input("Lat Long: ")
filter_closest_movies(df, location, year)
build_map(CLOSEST_FILENAME, [float(coord.strip()) for coord in location.split(",")])
if __name__ == "__main__":
main()
| 33.658768 | 141 | 0.618558 | 877 | 7,102 | 4.835804 | 0.253136 | 0.040085 | 0.021221 | 0.023579 | 0.098562 | 0.090073 | 0.046687 | 0.046687 | 0.030653 | 0.030653 | 0 | 0.008952 | 0.245001 | 7,102 | 210 | 142 | 33.819048 | 0.781984 | 0.185441 | 0 | 0.165414 | 0 | 0.015038 | 0.093551 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0.022556 | 0.037594 | 0 | 0.120301 | 0.090226 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9d2bcfaec5fec0db150520a4bddebb50943355 | 13,291 | py | Python | inventory/inventory/doctype/packing_list_receipt_validator/packing_list_receipt_validator.py | riconova92/inventory | 7cc4f49bda31f802af36ee4ea6eb43092b5094a7 | [
"MIT"
] | null | null | null | inventory/inventory/doctype/packing_list_receipt_validator/packing_list_receipt_validator.py | riconova92/inventory | 7cc4f49bda31f802af36ee4ea6eb43092b5094a7 | [
"MIT"
] | null | null | null | inventory/inventory/doctype/packing_list_receipt_validator/packing_list_receipt_validator.py | riconova92/inventory | 7cc4f49bda31f802af36ee4ea6eb43092b5094a7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Myme and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
form_grid_templates = {
"packing_list_data_unchecked": "templates/includes/item_grid_packing_list.html",
"packing_list_data_checked": "templates/includes/item_grid_packing_list.html",
"packing_list_data_missing": "templates/includes/item_grid_packing_list.html"
}
class PackingListReceiptValidator(Document):
def validate_item(self):
if self.item_code_variant_depan and self.yard_atau_meter and self.colour and self.warehouse and self.qty_roll :
checker = False
for d in self.get("packing_list_data_unchecked"):
if d.item_code_variant == self.item_code_variant_depan and d.yard_atau_meter_per_roll == self.yard_atau_meter and d.colour == self.colour and d.warehouse == self.warehouse :
if self.qty_roll > 0:
checker = True
ch = self.append('packing_list_data_checked',{})
ch.item_code_variant = d.item_code_variant
ch.item_name = d.item_name
ch.parent_item = d.parent_item
ch.yard_atau_meter_per_roll = d.yard_atau_meter_per_roll
ch.colour = d.colour
ch.inventory_uom = d.inventory_uom
ch.group = d.group
ch.keterangan_group = d.keterangan_group
ch.warehouse = d.warehouse
ch.from_data = d.from_data
if self.qty_roll >= d.total_roll :
ch.total_roll = d.total_roll
ch.total_yard_atau_meter = d.total_yard_atau_meter
self.qty_roll = self.qty_roll - ch.total_roll
self.remove(d)
else :
ch.total_roll = self.qty_roll
ch.total_yard_atau_meter = self.qty_roll * self.yard_atau_meter
d.total_roll = d.total_roll - self.qty_roll
d.total_yard_atau_meter = d.total_yard_atau_meter - ch.total_yard_atau_meter
self.qty_roll = 0
if self.qty_roll > 0 :
if checker :
frappe.msgprint("Jumlah item melebihi yang tercatat pada Packing List Receipt. Kelebihan akan dimasukkan ke Missing")
frappe.msgprint("Item tidak ada di dalam Packing List, maka di masukkan ke dalam tabel Missing")
add_item(self)
self.yard_atau_meter = 0
self.qty_roll = 0
self.colour = ""
else :
frappe.throw("Data Item belum terisi dengan lengkap")
def validate_pcs(self):
if self.item_code_pcs and self.warehouse_pcs and self.qty_pcs :
checker = False
for d in self.get("packing_list_pcs_unchecked"):
if d.item_code_pcs == self.item_code_pcs and d.warehouse == self.warehouse_pcs and d.qty_pcs == self.total_pcs :
if self.qty_pcs > 0 :
checker = True
ch = self.append('packing_list_pcs_checked',{})
ch.item_code_pcs = d.item_code_pcs
ch.item_name_pcs = d.item_name_pcs
ch.parent_item_pcs = d.parent_item_pcs
ch.total_pcs = d.total_pcs
ch.uom_pcs = d.uom_pcs
ch.warehouse_pcs = d.warehouse_pcs
ch.from_pcs = d.from_pcs
if self.qty_pcs >= d.total_pcs :
ch.total_pcs = d.total_pcs
self.qty_pcs = self.qty_pcs - ch.total_pcs
self.remove(d)
else :
ch.total_pcs = self.qty_pcs
d.total_pcs = d.total_pcs - self.qty_pcs
self.qty_pcs = 0
if self.qty_pcs > 0 :
if checker :
frappe.msgprint("Jumlah item melebihi yang tercatat pada Packing List Receipt. Kelebihan akan dimasukkan ke Missing")
add_pcs(self)
self.yard_atau_meter = 0
self.qty_roll = 0
self.colour = ""
else :
frappe.throw("Data Item belum terisi dengan lengkap")
def return_all_checked(self):
for d in self.get("packing_list_data_checked") :
if d.is_return :
ch = self.append('packing_list_data_unchecked',{})
ch.item_code_variant = d.item_code_variant
ch.item_name = d.item_name
ch.parent_item = d.parent_item
ch.yard_atau_meter_per_roll = d.yard_atau_meter_per_roll
ch.colour = d.colour
ch.inventory_uom = d.inventory_uom
ch.group = d.group
ch.keterangan_group = d.keterangan_group
ch.warehouse = d.warehouse
ch.from_data = d.from_data
ch.total_roll = d.total_roll
ch.total_yard_atau_meter = d.total_yard_atau_meter
self.remove(d)
for d in self.get("packing_list_pcs_checked") :
if d.is_return :
ch = self.append('packing_list_pcs_unchecked',{})
ch.item_code_pcs = d.item_code_pcs
ch.item_name_pcs = d.item_name_pcs
ch.parent_item_pcs = d.parent_item_pcs
ch.total_pcs = d.total_pcs
ch.uom_pcs = d.uom_pcs
ch.warehouse_pcs = d.warehouse_pcs
ch.from_pcs = d.from_pcs
self.remove(d)
def on_submit(self):
if self.get("from_packing_list_receipt") :
plr = frappe.get_doc("Packing List Receipt",self.get("from_packing_list_receipt"))
if plr :
if plr.is_check == 1:
frappe.throw("Packing List Receipt sudah divalidasi")
else :
plr.is_check = 1
else :
frappe.throw("Packing List Receipt tidak aka")
else :
frappe.throw("Ambil dulu Packing List Receipt")
def add_item(self):
count = 0
if self.item_code_variant_depan and self.yard_atau_meter and self.colour and self.warehouse :
master_item = frappe.get_doc("Item", self.item_code_variant_depan)
parent_item = master_item.variant_of
item_name = master_item.item_name
if self.get("packing_list_data_missing") :
for i in self.packing_list_data_missing :
if self.group_prefix and self.group_code :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.group == (self.group_prefix+"."+self.group_code) and i.inventory_uom == self.inventory_uom :
count = 1
else :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.inventory_uom == self.inventory_uom and i.group == "" :
count = 1
if count == 1 :
for i in self.packing_list_data_missing :
if self.group_prefix and self.group_code :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.group == (self.group_prefix+"."+self.group_code) and i.inventory_uom == self.inventory_uom :
new_total_yard_atau_meter = i.total_yard_atau_meter
new_total_roll = i.total_roll
i.total_roll = new_total_roll + self.qty_roll
i.total_yard_atau_meter = new_total_yard_atau_meter + (self.yard_atau_meter * self.qty_roll)
else :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.inventory_uom == self.inventory_uom and i.group == "" :
new_total_yard_atau_meter = i.total_yard_atau_meter
new_total_roll = i.total_roll
i.total_roll = new_total_roll + self.qty_roll
i.total_yard_atau_meter = new_total_yard_atau_meter + (self.yard_atau_meter * self.qty_roll)
else :
if self.group_prefix and self.group_code :
pp_so = self.append('packing_list_data_missing', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll)
pp_so.total_roll = self.qty_roll
pp_so.group = self.group_prefix+"."+self.group_code
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
pp_so.keterangan_group = self.keterangan_group
else :
pp_so = self.append('packing_list_data_missing', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll)
pp_so.total_roll = self.qty_roll
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
else :
if self.group_prefix and self.group_code :
pp_so = self.append('packing_list_data_missing', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll)
pp_so.total_roll = self.qty_roll
pp_so.group = self.group_prefix+"."+self.group_code
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
pp_so.keterangan_group = self.keterangan_group
else :
pp_so = self.append('packing_list_data_missing', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll)
pp_so.total_roll = self.qty_roll
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
else :
frappe.throw("Item Code / Colour / Warehouse / Yard / Meter tidak terisi")
def add_pcs(self):
count = 0
if self.item_code_pcs and self.warehouse_pcs :
parent_item = frappe.get_doc("Item", self.item_code_pcs).variant_of
item_name = frappe.get_doc("Item", self.item_code_pcs).item_name
if self.get("packing_list_pcs_missing") :
for i in self.packing_list_pcs_missing :
if i.item_code_pcs == self.item_code_pcs and i.warehouse_pcs == self.warehouse_pcs :
count = 1
if count == 1 :
for i in self.packing_list_pcs_missing :
if i.item_code_pcs == self.item_code_pcs and i.warehouse_pcs == self.warehouse_pcs :
new_total_pcs = i.total_pcs
i.total_pcs = new_total_pcs + self.qty_pcs
else :
pp_so = self.append('packing_list_pcs_missing', {})
pp_so.item_code_pcs = self.item_code_pcs
pp_so.total_pcs = self.qty_pcs
pp_so.parent_item_pcs = parent_item
pp_so.item_name_pcs = item_name
pp_so.warehouse_pcs = self.warehouse_pcs
pp_so.uom_pcs = self.uom_pcs
else :
pp_so = self.append('packing_list_pcs_missing', {})
pp_so.item_code_pcs = self.item_code_pcs
pp_so.total_pcs = self.qty_pcs
pp_so.parent_item_pcs = parent_item
pp_so.item_name_pcs = item_name
pp_so.warehouse_pcs = self.warehouse_pcs
pp_so.uom_pcs = self.uom_pcs
self.qty_pcs = 0
else :
frappe.throw("Item Code / Warehouse tidak terisi")
@frappe.whitelist()
def get_data_from_packing_list_receipt(source_name, target_doc=None):
def set_missing_values(source, target):
# target.posting_date = source.posting_date
# target.supplier = source.supplier
# target.supplier_name = source.supplier_name
# target.purchase_order = source.purchase_order
# target.supplier_invoice_no = source.supplier_invoice_no
# target.invoice_date = source.invoice_date
# target.from_packing_list_receipt = source.name
target.item_code_variant_depan = ""
target.colour = ""
target.yard_atau_meter = 0
target.qty_roll = 0
target.warehouse = ""
target.inventory_uom = ""
target.group_code = ""
target.keterangan_group = ""
target.item_code_pcs = ""
target.uom_pcs = ""
target.qty_pcs = 0
def update_item_data(source_doc, target_doc, source_parent):
target_doc.item_code_variant = source_doc.item_code_variant
target_doc.item_name = source_doc.item_name
target_doc.parent_item = source_doc.parent_item
target_doc.yard_atau_meter_per_roll = source_doc.yard_atau_meter_per_roll
target_doc.total_roll = source_doc.total_roll
target_doc.colour = source_doc.colour
target_doc.total_yard_atau_meter = source_doc.total_yard_atau_meter
target_doc.inventory_uom = source_doc.inventory_uom
target_doc.group = source_doc.group
target_doc.keterangan_group = source_doc.keterangan_group
target_doc.warehouse = source_doc.warehouse
target_doc.from_data = source_doc.name
def update_item_pcs(source_doc, target_doc, source_parent):
target_doc.item_code_pcs = source_doc.item_code_pcs
target_doc.item_name_pcs = source_doc.item_name_pcs
target_doc.parent_item_pcs = source_doc.parent_item_pcs
target_doc.total_pcs = source_doc.total_pcs
target_doc.uom_pcs = source_doc.uom_pcs
target_doc.warehouse_pcs = source_doc.warehouse_pcs
target_doc.from_pcs = source_doc.from_pcs
target_doc = get_mapped_doc("Packing List Receipt", source_name, {
"Packing List Receipt": {
"doctype": "Packing List Receipt Validator",
"validation": {
"docstatus": ["=", 1]
},
},
"Packing List Receipt Data": {
"doctype": "Packing List Receipt Validator Data Unchecked",
"postprocess": update_item_data
},
"Packing List Receipt Data Pcs": {
"doctype": "Packing List Receipt Validator Pcs Unchecked",
"postprocess": update_item_pcs
},
}, target_doc, set_missing_values)
return target_doc
| 38.749271 | 276 | 0.7323 | 2,107 | 13,291 | 4.244898 | 0.065496 | 0.051878 | 0.084302 | 0.044275 | 0.752236 | 0.692308 | 0.666257 | 0.642665 | 0.605098 | 0.580165 | 0 | 0.002655 | 0.178316 | 13,291 | 342 | 277 | 38.862573 | 0.816317 | 0.031751 | 0 | 0.601399 | 0 | 0 | 0.117825 | 0.051797 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034965 | false | 0 | 0.013986 | 0 | 0.055944 | 0.01049 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9e1dc71d3576b72c2e25057c47bbdef37cb282 | 745 | py | Python | leetcode/0648.单词替换/0648-单词替换.py | ruisunyc/- | ef2fd0d58aa683311896bb9442510fedcd013313 | [
"Apache-2.0"
] | 2 | 2021-01-08T01:16:32.000Z | 2021-01-08T09:36:32.000Z | leetcode/0648.单词替换/0648-单词替换.py | ruisunyc/- | ef2fd0d58aa683311896bb9442510fedcd013313 | [
"Apache-2.0"
] | null | null | null | leetcode/0648.单词替换/0648-单词替换.py | ruisunyc/- | ef2fd0d58aa683311896bb9442510fedcd013313 | [
"Apache-2.0"
] | null | null | null | class Tries:
def __init__(self):
self.tree = {}
def insert(self,word):
a = self.tree
for c in word:
if c not in a:
a[c] = {}
a = a[c]
a['#'] = True
def start(self,pre):
a = self.tree
for i,c in enumerate(pre):
if c not in a:break
a = a[c]
if '#' in a:return pre[:i+1]
return pre
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
trees = Tries()
for word in dictionary:
trees.insert(word)
return ' '.join(trees.start(sen) for sen in sentence.split(' '))
# return ' '.join(map(trees.start,sentence.split(' ')))
| 28.653846 | 73 | 0.483221 | 98 | 745 | 3.632653 | 0.336735 | 0.067416 | 0.025281 | 0.067416 | 0.050562 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002179 | 0.383893 | 745 | 25 | 74 | 29.8 | 0.77342 | 0.071141 | 0 | 0.173913 | 0 | 0 | 0.005797 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9f9f6ed4062d3fb884261ba304b1d36cac1c6c2a | 2,536 | py | Python | src/tf_utils/nested_multilevel_attn.py | yicheng-w/fictional-garbanzo | af3e139b5d8f7d54673afe760bc35b012265fd01 | [
"MIT"
] | 127 | 2018-09-17T22:02:03.000Z | 2022-03-21T03:23:49.000Z | src/tf_utils/nested_multilevel_attn.py | yicheng-w/fictional-garbanzo | af3e139b5d8f7d54673afe760bc35b012265fd01 | [
"MIT"
] | 15 | 2019-11-02T11:48:35.000Z | 2020-11-13T17:37:07.000Z | src/tf_utils/nested_multilevel_attn.py | yicheng-w/fictional-garbanzo | af3e139b5d8f7d54673afe760bc35b012265fd01 | [
"MIT"
] | 32 | 2018-09-18T11:34:53.000Z | 2021-09-25T22:02:27.000Z | import tensorflow as tf
from tensorflow.contrib.seq2seq import AttentionMechanism
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import\
_prepare_memory
class NestedMultiLevelAttn(AttentionMechanism):
'''
memory in the format of [b x k x n x h], where we have (k x n) tokens
divided into k segments, given a query q, we compute attention first at the
local level to obtain grouped representations of shape [b x k x h], then we
compute another level of attention to get the final set of context vectors
with shape b x h
the alignment returned is the initial alignment distribution with shape
[b x k x n]
'''
def __init__(
memory,
memory_sequence_length,
similarity_function):
'''
memory: tensor, shape [b x k x n x h]
memory_sequence_length: tensor, shape [b x k]'''
k = tf.shape(memory)[1]
b = tf.shape(memory)[0]
h = tf.shape(memory)[-1]
mem_reshaped = tf.reshape(memory, [b * k, -1, h])
mem_mask_reshaped = tf.reshape(memory_sequence_length, [-1])
values = _prepare_memory(mem_reshaped, mem_mask_reshaped)
self.values = tf.reshape(values, [b, k, -1, h])
self.sim_func = similarity_function
with tf.variable_scope("first_lv_attn"):
self.first_lv_sim_func = similarity_function
with tf.variable_scope("second_lv_attn"):
self.second_lv_sim_func = similarity_function
def __call__(self, query, previous_alignments):
'''
query should have shape [b x h]
'''
b = tf.shape(self.values)[0]
k = tf.shape(self.values)[1]
h = tf.shape(self.values)[-1]
mem_reshaped = tf.reshape(memory, [b * k, -1, h])
query_expand_dims = tf.expand_dims(query, 1) # [b x 1 x h]
attn_logits = self.first_lv_sim_func(query_expand_dims, mem_reshaped)
# [(b*k), n]
attn_logits_reshaped = tf.reshape(attn_logits, [b, k, -1])
alignments = tf.nn.softmax(attn_logits_reshaped, -1) # [b x k x n]
expanded_alignments = tf.expand_dims(attn_logits, 1) # [(b*k) x 1 x n]
w_lv_context = tf.matmul(expanded_alignments, mem_reshaped) # [(b*k) x 1 x h]
w_lv_context = tf.squeeze(w_lv_context, 1) # [(b*k) x h]
w_lv_context_expanded = tf.reshape(w_lv_context, [b, k, h])
second_attn_logits = self.second_lv_sim_func(query_expand_dims,
w_lv_context_expanded) # [b x k]
| 35.222222 | 85 | 0.635647 | 377 | 2,536 | 4.050398 | 0.249337 | 0.013098 | 0.013752 | 0.013098 | 0.259332 | 0.148658 | 0.1074 | 0.098232 | 0.040602 | 0.040602 | 0 | 0.010707 | 0.263407 | 2,536 | 71 | 86 | 35.71831 | 0.806745 | 0.237382 | 0 | 0.055556 | 0 | 0 | 0.01465 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fa06103ea6d239f2895004e7c80a2c4a86e4145 | 895 | py | Python | camera_calibration.py | karata-sc/CameraCalibrateSample | 870150f98465639c577cdf13a0a84f40c8c60bfa | [
"BSD-3-Clause"
] | null | null | null | camera_calibration.py | karata-sc/CameraCalibrateSample | 870150f98465639c577cdf13a0a84f40c8c60bfa | [
"BSD-3-Clause"
] | null | null | null | camera_calibration.py | karata-sc/CameraCalibrateSample | 870150f98465639c577cdf13a0a84f40c8c60bfa | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import cv2
# camera matrix
mtx = [[647.048823, 0.000000, 326.544754], [0.000000, 645.959963, 234.463113], [0.000000, 0.000000, 1.000000]]
# distortion
dist = [0.026716, -0.114498, 0.001072, -0.004303,0.000000]
mtx = np.array(mtx)
dist = np.array(dist)
cap = cv2.VideoCapture(0)
while(1):
ret, frame = cap.read()
h, w = frame.shape[:2]
#calibration
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
dst = cv2.undistort(frame, mtx, dist, None, newcameramtx)
x, y, w, h = roi
dst = dst[y:y+h, x:x+w]
#
# insert your program
#
cv2.imshow('original.jpg', frame)
cv2.imshow('calibrated.jpg', dst)
k = cv2.waitKey(1)
if k == 0x1b:
break
cap.release()
cv2.destroyAllWindows()
| 23.552632 | 110 | 0.546369 | 120 | 895 | 4.075 | 0.491667 | 0.071575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.194222 | 0.303911 | 895 | 37 | 111 | 24.189189 | 0.59069 | 0.075978 | 0 | 0 | 0 | 0 | 0.031707 | 0 | 0 | 0 | 0.004878 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fa3a50343401ecfadbc6203478370db57506753 | 1,125 | py | Python | 0347. Top K Frequent Elements/solution_min_heap.py | furutuki/LeetCodeSolution | db5e6573d0c907dfa3e6ad5e5b3b5ff9944a4f53 | [
"MIT"
] | null | null | null | 0347. Top K Frequent Elements/solution_min_heap.py | furutuki/LeetCodeSolution | db5e6573d0c907dfa3e6ad5e5b3b5ff9944a4f53 | [
"MIT"
] | null | null | null | 0347. Top K Frequent Elements/solution_min_heap.py | furutuki/LeetCodeSolution | db5e6573d0c907dfa3e6ad5e5b3b5ff9944a4f53 | [
"MIT"
] | null | null | null | from typing import List
import heapq
class Node:
def __init__(self, val:int):
self.val = val
self.cnt = 1
def __lt__(self, other):
return self.cnt < other.cnt
def __eq__(self, other):
return self.cnt == other.cnt
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
node_map = dict()
val_set = set()
l = list()
for item in nums:
if item in val_set:
node_map[item].cnt += 1
else:
val_set.add(item)
node = Node(item)
node_map[item] = node
l.append(node)
ans = []
heap_arr = []
for node in l:
if len(heap_arr) < k:
heapq.heappush(heap_arr, node)
elif heap_arr[0].cnt < node.cnt:
heapq.heappop(heap_arr)
heapq.heappush(heap_arr, node)
while heap_arr:
ans.append(heapq.heappop(heap_arr).val)
return ans
s = Solution()
print(s.topKFrequent([1,1,1,2,2,3], 2))
print(s.topKFrequent([1], 1)) | 23.93617 | 65 | 0.511111 | 148 | 1,125 | 3.709459 | 0.310811 | 0.102004 | 0.054645 | 0.069217 | 0.269581 | 0.10929 | 0.10929 | 0 | 0 | 0 | 0 | 0.01707 | 0.375111 | 1,125 | 47 | 66 | 23.93617 | 0.763869 | 0 | 0 | 0.054054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.054054 | 0.054054 | 0.297297 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fa767f31a30fc774a163edb112d1493c79a7fd2 | 4,864 | py | Python | tron/Parsing/args.py | sdss/tron | 886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322 | [
"BSD-3-Clause"
] | null | null | null | tron/Parsing/args.py | sdss/tron | 886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322 | [
"BSD-3-Clause"
] | null | null | null | tron/Parsing/args.py | sdss/tron | 886c5c5fb6341ad85e4a9f5d6f5ecb6bbc0d8322 | [
"BSD-3-Clause"
] | null | null | null | __all__ = ['parseArgs', 'match']
import re
from collections import OrderedDict
from tron import Misc
from .Exceptions import ParseException
from .keys import eatAString
# Match " key = STUFF"
arg_re = re.compile(
r"""
^\s* # Ignore leading space
(?P<key>[a-z_][a-z0-9_-]*) # Match keyword name
\s*
(?P<delimiter>=)
\s* # Ignore spaces after keyname
(?P<rest>.*) # Match eveything after the delimiter""",
re.IGNORECASE | re.VERBOSE)
noarg_re = re.compile(
r"""
^\s* # Ignore leading space
(?P<key>\S+) # Match eveything up to the next WS
\s*
(?P<rest>.*) # Match eveything after the WS""", re.IGNORECASE | re.VERBOSE)
def eatAVee(s):
""" Match a keyword value -- a possibly space-padded value ended by a
whitespace, a comma, or a semicolon.
Args:
s - a string
Returns:
- the matched value. None if s is just whitespace.
- any unmatched input, including the terminating character.
"""
s = s.lstrip()
if len(s) == 0:
return '', ''
# String parsing is trickier, let eatAString() handle that.
if s[0] in "\"'":
return eatAString(s)
vEnd = len(s)
for i in range(len(s)):
if s[i] in ' \t\r\n\x0b\x0c':
vEnd = i
break
return s[:vEnd], s[vEnd + 1:]
def parseArg(s):
""" Try to parse a single KV.
Return:
{ None, None, None } on end-of-input
{ K None rest-of-line } for a valueless keyword or
{ K V rest-of-line }
"""
s = s.lstrip()
if s == '':
return None, None, None
# Try to match for K=V. If we can't, gobble the next non-blank word.
#
match = arg_re.match(s)
if match is None:
match = noarg_re.match(s)
if match is None:
raise ParseException(leftoverText=s)
d = match.groupdict()
return d['key'], None, d['rest']
d = match.groupdict()
K = d['key']
rest = d['rest']
# Parse a value
#
try:
val, rest = eatAVee(rest)
except ParseException as e:
e.prependText(rest)
raise
return K, val, rest
def parseArgs(s):
""" Parse a string of command arguments into an OrderedDict .
Returns:
- an OrderedDict of keyword values.
If a keyword has no value, the value is None
Otherwise the value is a list of parsed values. Note that each value can be None.
cmd a1 a2=1 a3= "2" a4=,
->
args = {'a1' : None,
'a2' : '1',
'a3' : '2',
'a4' : (None,None)
}
"""
KVs = OrderedDict()
rest = s
while True:
try:
key, values, rest = parseArg(rest)
except ParseException as e:
e.setKVs(KVs)
raise
if key is None:
break
KVs[key] = values
# Misc.log('parseArgs', 'KVs: %s' % (KVs))
return KVs
def match(argv, opts):
""" Searches an OrderedDict for matches.
Args:
argv - an OrderedDict of options.
opts - a list of duples to match against. The duple parts are the option name
and a converter. If the converter is None, the option takes no argument.
Returns:
matches - an OrderedDict of the matched options, with converted arguments.
unmatched - a list of unmatched options from opts.
leftovers - an OrderedDict of unmatched options from argv.
Raises:
Error - Any parsing or conversion error.
"""
# Convert the request duples to an OrderedDict
want = OrderedDict()
for o in opts:
try:
a, b = o
except Exception:
raise Exception('the argument to Command.matchDicts must be a list of duples')
want[a] = b
# Walk over the parsed options, and categorize them
#
matches = OrderedDict()
leftovers = OrderedDict()
for opt, arg in argv.items():
# If we are looking for the option, match it and convert the argument.
if opt in want:
converter = want[opt]
if converter is None:
if arg is not None:
raise Exception('option %s takes no argument' % (Misc.qstr(opt, tquote="'")))
matches[opt] = None
else:
try:
convArg = converter(arg)
except Exception as e:
raise Exception("error with option '%s': %s" % (opt, e))
matches[opt] = convArg
# Remove the option from the search list.
del want[opt]
# If we are not looking for the option, return it as a leftover
else:
leftovers[opt] = arg
return matches, list(want.keys()), leftovers
| 25.465969 | 97 | 0.545847 | 626 | 4,864 | 4.22524 | 0.297125 | 0.034405 | 0.022684 | 0.009074 | 0.089981 | 0.083932 | 0.042344 | 0.026465 | 0.026465 | 0.026465 | 0 | 0.006041 | 0.353413 | 4,864 | 190 | 98 | 25.6 | 0.834976 | 0.358758 | 0 | 0.216867 | 0 | 0 | 0.063563 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.060241 | 0 | 0.204819 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9faae556022f924fb8b4e582dc35ca58114cda0a | 1,496 | py | Python | test/test_strategy.py | cantona/futu_algo | 6045973e1d75b86b704aaf78b855fe550dccdb9e | [
"Apache-2.0"
] | 1 | 2021-05-31T22:09:48.000Z | 2021-05-31T22:09:48.000Z | test/test_strategy.py | TrueMatthewKirkham/futu_algo | 81914b3d7b098f3d42aa98f85f019b9c7f87d05f | [
"Apache-2.0"
] | null | null | null | test/test_strategy.py | TrueMatthewKirkham/futu_algo | 81914b3d7b098f3d42aa98f85f019b9c7f87d05f | [
"Apache-2.0"
] | null | null | null | # Futu Algo: Algorithmic High-Frequency Trading Framework
# Copyright (c) billpwchan - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Bill Chan <billpwchan@hotmail.com>, 2021
import unittest
import pandas as pd
from strategies.Quant_Legendary import QuantLegendary
class StrategyTestCase(unittest.TestCase):
def setUp(self):
self.stock_code = 'HK.09988'
self.complete_data = pd.read_csv('./test/test_data/test_data.csv', index_col=None)
self.input_data = self.complete_data.iloc[:150, :]
self.test_data = self.complete_data.iloc[150:, :]
self.strategy = QuantLegendary({self.stock_code: self.input_data}, observation=150)
def test_buy(self):
for index, row in self.test_data.iterrows():
latest_data = row.to_frame().transpose()
latest_data.reset_index(drop=True, inplace=True)
self.strategy.parse_data(latest_data=latest_data)
self.strategy.buy(self.stock_code)
self.assertEqual(True, True)
def test_sell(self):
for index, row in self.test_data.iterrows():
latest_data = row.to_frame().transpose()
latest_data.reset_index(drop=True, inplace=True)
self.strategy.parse_data(latest_data=latest_data)
self.strategy.sell(self.stock_code)
self.assertEqual(True, True)
if __name__ == '__main__':
unittest.main()
| 36.487805 | 91 | 0.692513 | 194 | 1,496 | 5.128866 | 0.443299 | 0.080402 | 0.052261 | 0.051256 | 0.452261 | 0.452261 | 0.452261 | 0.317588 | 0.317588 | 0.317588 | 0 | 0.015152 | 0.205882 | 1,496 | 40 | 92 | 37.4 | 0.822391 | 0.174465 | 0 | 0.384615 | 0 | 0 | 0.037459 | 0.02443 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.115385 | false | 0 | 0.115385 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fab4add1fd28e2d4b54104e9ed22b881f8cae66 | 5,916 | py | Python | adminapp/views.py | habibaudu/Elite | 3f48a7cd2f9058c20aea6d3a4d626f7ccac84072 | [
"MIT"
] | null | null | null | adminapp/views.py | habibaudu/Elite | 3f48a7cd2f9058c20aea6d3a4d626f7ccac84072 | [
"MIT"
] | 1 | 2021-03-19T05:13:22.000Z | 2021-03-19T05:13:22.000Z | adminapp/views.py | habibaudu/Elite | 3f48a7cd2f9058c20aea6d3a4d626f7ccac84072 | [
"MIT"
] | null | null | null | import jwt
from django.conf import settings
from rest_framework.status import (HTTP_200_OK, HTTP_400_BAD_REQUEST,
HTTP_401_UNAUTHORIZED)
from rest_framework import viewsets
from django.contrib.auth.hashers import check_password
from adminapp.models import (Admin)
from adminapp.serializer import (LoginSerializer, UsersDetailsSerializer)
from utils.helpers import format_response
from django.utils import timezone
from adminapp.permissions import IsAdmin
from users_app.models import (User)
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta
class AdminLoginViewSet(viewsets.ViewSet):
permission_classes = ()
authentication_classes = ()
serializer_class = LoginSerializer
def create(self, request):
serializer = self.serializer_class(data=request.data)
if not serializer.is_valid():
return format_response(error=serializer.errors,
status=HTTP_400_BAD_REQUEST)
password = serializer.data['password']
username = serializer.data['username']
admin = Admin.objects.filter(username=username).first()
if not admin:
return format_response(error="Invalid username or password",
status=HTTP_401_UNAUTHORIZED)
valid_password = check_password(password,admin.password)
if not valid_password:
return format_response(error="invalid username or password",
status=HTTP_401_UNAUTHORIZED)
admin.last_login =timezone.now()
token =jwt.encode(
{
"uid":admin.id,
"iat":settings.JWT_SETTINGS["ISS_AT"](),
"exp":settings.JWT_SETTINGS["EXP_AT"]()
},settings.SECRET_KEY)
return format_response(
token=token,
message="Your login was successful",
role=admin.role.name,
status=HTTP_200_OK)
class ViewUserViewSet(viewsets.ViewSet):
serializer_class = UsersDetailsSerializer
permission_classes = (IsAdmin,)
today = timezone.now()
this_month = {}
last_three_month = {}
this_year = {}
m = datetime.now().month
y = datetime.now().year
d = datetime.now().day
def list(self,request):
period = request.query_params.get("period")
if period == 'this_month':
d1 = date(self.y, self.m, 1)
d2 = date(self.y, self.m, self.d)
delta = d2 - d1
dates_in_this_month=[(d1 + timedelta(days=i)).strftime('%Y-%m-%d') for i in range(delta.days + 1)]
query_set = User.objects.all()
query_for_this_month = User.objects.filter(created_at__year=self.today.year,
created_at__month = self.today.month)
print(dates_in_this_month)
for dt in dates_in_this_month:
counter = 0
for q in query_for_this_month:
if str(dt) == str(q.created_at.date()):
counter +=1
self.this_month[dt]=counter
serializer = self.serializer_class(query_for_this_month,many=True)
return format_response(data=serializer.data,
message="All users for the month retrieved",
this_month=self.this_month,
status=HTTP_200_OK)
if period == 'this_year':
query_set = User.objects.filter(created_at__year=self.today.year)
d1 = date(self.y, 1, 1)
d2 = date(self.y, self.m, self.d)
delta = d2 - d1
dates_since_this_year = \
[(d1 + timedelta(days = i)).strftime('%Y-%m-%d') for i in range(delta.days + 1)]
for dt in dates_since_this_year:
counter = 0
for q in query_set:
if str(dt) == str(q.created_at.date()):
counter +=1
self.this_year[dt]=counter
serializer = self.serializer_class(query_set,many=True)
return format_response(data=serializer.data,
message="All users for the year retrieved",
this_year = self.this_year,
status=HTTP_200_OK)
if period =="three_months":
today = self.today
three_months = relativedelta(months=3)
last_three_months= today - three_months
query_set = User.objects.filter(created_at__gte=last_three_months)
d1 = date(self.y,self.m,self.d)
if self.m - 3 == 0:
d2 = date(self.y-1,12,self.d)
elif self.m-3 == -1:
d2 = date(self.y-1,11,self.d)
elif self.m -3 == -2:
d2 = date(self.y-1,10,self.d)
else:
d2 = date(self.y,self.m-3,self.d)
delta = d1 - d2
last_three_months_data = \
[(d2 + timedelta(days=i)).strftime('%Y-%m-%d') for i in range(delta.days + 1)]
for dt in last_three_months_data:
counter = 0
for q in query_set:
if str(dt) == str(q.created_at.date()):
counter += 1
self.last_three_month[dt]=counter
serializer = self.serializer_class(query_set, many=True)
return format_response(data=serializer.data,
message="users registered for the last three month retrieved",
last_three_month=self.last_three_month,
status=HTTP_200_OK)
| 40.520548 | 110 | 0.5524 | 671 | 5,916 | 4.675112 | 0.202683 | 0.031559 | 0.025821 | 0.021039 | 0.385081 | 0.365317 | 0.324195 | 0.30475 | 0.287855 | 0.26044 | 0 | 0.020242 | 0.356998 | 5,916 | 145 | 111 | 40.8 | 0.804416 | 0 | 0 | 0.211382 | 0 | 0 | 0.049865 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01626 | false | 0.04878 | 0.105691 | 0 | 0.292683 | 0.00813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fabac7aa4d3c0886cddf6e4a7f52e61fb1a4602 | 1,972 | py | Python | paper-code/tensorflow_src/logME.py | DengBoCong/nlp-paper | 89c5338efda9c1379e7ef6f275a2f3d55d62ea39 | [
"Apache-2.0"
] | 478 | 2020-10-28T01:30:30.000Z | 2022-03-30T06:34:07.000Z | paper-code/tensorflow_src/logME.py | DengBoCong/paper | 25dd316d8b4b47363bd611bbabca6a5e3fd09cba | [
"Apache-2.0"
] | 1 | 2021-08-29T11:55:09.000Z | 2021-11-04T09:25:19.000Z | paper-code/tensorflow_src/logME.py | DengBoCong/paper | 25dd316d8b4b47363bd611bbabca6a5e3fd09cba | [
"Apache-2.0"
] | 89 | 2021-01-05T06:11:55.000Z | 2022-03-24T12:51:57.000Z | import tensorflow as tf
from numba import njit
import numpy as np
@njit
def each_evidence(y_, f, fh, v, s, vh, N, D):
"""
compute the maximum evidence for each class
"""
alpha = 1.0
beta = 1.0
lam = alpha / beta
tmp = (vh @ (f @ y_))
for _ in range(11):
gamma = (s / (s + lam)).sum()
m = v @ (tmp * beta / (alpha + beta * s))
alpha_de = (m * m).sum()
alpha = gamma / alpha_de
beta_de = ((y_ - fh @ m) ** 2).sum()
beta = (N - gamma) / beta_de
new_lam = alpha / beta
if np.abs(new_lam - lam) / lam < 0.01:
break
lam = new_lam
evidence = D / 2.0 * np.log(alpha) \
+ N / 2.0 * np.log(beta) \
- 0.5 * np.sum(np.log(alpha + beta * s)) \
- beta / 2.0 * beta_de \
- alpha / 2.0 * alpha_de \
- N / 2.0 * np.log(2 * np.pi)
return evidence / N
# D = 20, N = 50
f_tmp = np.random.randn(20, 50).astype(np.float64)
each_evidence(np.random.randint(0, 2, 50).astype(np.float64), f_tmp, f_tmp.transpose(),
np.eye(20, dtype=np.float64), np.ones(20, dtype=np.float64), np.eye(20, dtype=np.float64), 50,
20)
def LogME(f: tf.Tensor, y: tf.Tensor, regression=False):
f = f.numpy().astype(np.float64)
y = y.numpy()
if regression:
y = y.numpy().astype(np.float64)
fh = f
f = f.transpose()
D, N = f.shape
v, s, vh = np.linalg.svd(f @ fh, full_matrices=True)
evidences = []
if regression:
K = y.shape[1]
for i in range(K):
y_ = y[:, i]
evidence = each_evidence(y_, f, fh, v, s, vh, N, D)
evidences.append(evidence)
else:
K = int(y.max() + 1)
for i in range(K):
y_ = (y == i).astype(np.float64)
evidence = each_evidence(y_, f, fh, v, s, vh, N, D)
evidences.append(evidence)
return np.mean(evidences) | 29.878788 | 110 | 0.500507 | 302 | 1,972 | 3.18543 | 0.254967 | 0.074844 | 0.077963 | 0.043659 | 0.245322 | 0.209979 | 0.16632 | 0.16632 | 0.16632 | 0.133056 | 0 | 0.048724 | 0.34432 | 1,972 | 66 | 111 | 29.878788 | 0.695282 | 0.029919 | 0 | 0.148148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.055556 | 0 | 0.12963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fb0aa6bc7ba39ca8a83fbf1508cfdb4b092e87c | 3,035 | py | Python | jiepai/spider.py | kapuni/projects | 8c1747d4ed02d81fcb53a8a891d4e76d7e6dde5e | [
"MIT"
] | null | null | null | jiepai/spider.py | kapuni/projects | 8c1747d4ed02d81fcb53a8a891d4e76d7e6dde5e | [
"MIT"
] | null | null | null | jiepai/spider.py | kapuni/projects | 8c1747d4ed02d81fcb53a8a891d4e76d7e6dde5e | [
"MIT"
] | null | null | null | import json
import re
from urllib.parse import urlencode
from hashlib import md5
import pymongo
from bs4 import BeautifulSoup
from gevent import os
from requests.exceptions import RequestException
import requests
from config import *
from multiprocessing import Pool
from json.decoder import JSONDecodeError
client = pymongo.MongoClient(MONGD_URL,connect =False)
db = client[MONGD_DB]
def get_page_index(offset,keyword):
data = {
'offset': offset,
'format': 'json',
'keyword': keyword,
'autoload': 'true',
'count': '20',
'cur_tab': 3
}
url = 'http://www.toutian.com/search_content/?' + urlencode(data)
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except RequestException:
print('请求搜索页出错')
return None
def parse_page_index(html):
try:
data = json.loads(html)
if data and 'data' in data.keys():
for item in data.get('data'):
yield item.get('article_url')
except JSONDecodeError:
pass
def get_page_detil(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
return None
except RequestException:
print('请求详情页出错',url)
return None
def parse_page_detil(html, url):
soup = BeautifulSoup(html,'lxm;')
title = soup.select('title')[0].get_text()
print(title)
images_pattern = re.compile('val.gallery = (.*?);', re.S)
result = re.search(images_pattern,html)
if result:
data = json.loads(result.group(1))
if data and 'sub_images' in data.keys():
sub_images = data.get('sub_images')
images = [item.get('url') for item in sub_images]
for image in images:download_image(image)
return {
'title': title,
'url':url,
'images':images
}
def save_to_mongo(result):
if db[MONGD_TABLE].insert(result):
print('储存在mongo成功',result)
return True
return False
def download_image(url):
print('正在下载',url)
try:
response = requests.get(url)
if response.status_code == 200:
save_image(response.content)
return None
except RequestException:
print('请求图片出错',url)
return None
def save_image(content):
file_path = '{0}/{1}.{2}'.format(os.getcwd(),md5(content).hexdigest(),'jpg')
if not os.path.exists(file_path):
with open(file_path,'wb') as f:
f.write(content)
f.close()
def main(offset):
html = get_page_index(offset, 'KEYWORD')
for url in parse_page_index(html):
html = get_page_detil(url)
if html:
result = parse_page_detil(html,url)
if result: save_to_mongo(result)
if __name__ == '__main__':
groups = [x * 20 for x in range(GROUP_START,GROUP_END+1)]
pool = Pool()
pool.map(main,groups) | 26.622807 | 80 | 0.607908 | 378 | 3,035 | 4.740741 | 0.322751 | 0.033482 | 0.031808 | 0.03683 | 0.257813 | 0.145089 | 0.145089 | 0.145089 | 0.145089 | 0.145089 | 0 | 0.010522 | 0.279736 | 3,035 | 114 | 81 | 26.622807 | 0.809241 | 0 | 0 | 0.214286 | 0 | 0 | 0.078393 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0.010204 | 0.122449 | 0 | 0.316327 | 0.061224 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fb337de160eba2d0e8c8e9f5baee5f5b434b839 | 1,114 | py | Python | tests/test_version.py | jessekrubin/lager | 942d8b158495b5d782a159f36ef801abb972a87f | [
"BSD-2-Clause"
] | 1 | 2020-05-14T03:51:40.000Z | 2020-05-14T03:51:40.000Z | tests/test_version.py | dynamic-graphics-inc/lager | 942d8b158495b5d782a159f36ef801abb972a87f | [
"MIT"
] | 6 | 2020-05-02T18:20:18.000Z | 2020-06-16T23:31:25.000Z | tests/test_version.py | jessekrubin/lager | 942d8b158495b5d782a159f36ef801abb972a87f | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# =============================================================================
# (c) Copyright 2020, Dynamic Graphics, Inc.
# ALL RIGHTS RESERVED
# Permission to use, copy, modify, or distribute this software for any
# purpose is prohibited without specific, written prior permission from
# Dynamic Graphics, Inc.
# =============================================================================
from os import path
from lager import __version__
PWD = path.split(path.realpath(__file__))[0]
def _get_version() -> str:
_dirpath = PWD
version = "UNKNOWN???"
for i in range(3):
_filepath = path.join(_dirpath, "pyproject.toml")
if path.exists(_filepath):
version = (
[l for l in open(_filepath).read().split("\n") if "version" in l][0]
.replace("version = ", "")
.strip('"')
)
return version
_dirpath = path.split(_dirpath)[0]
return version
def test_version() -> None:
pyproject_version: str = _get_version()
assert __version__ == pyproject_version
| 30.944444 | 84 | 0.52693 | 113 | 1,114 | 4.964602 | 0.584071 | 0.053476 | 0.064171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010514 | 0.231598 | 1,114 | 35 | 85 | 31.828571 | 0.64486 | 0.36535 | 0 | 0.1 | 0 | 0 | 0.063037 | 0 | 0 | 0 | 0 | 0 | 0.05 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fb4dc66c66a1ef8e79dfbb25e4e33c8375a1b3c | 775 | py | Python | src/princeton_scraper_seas_faculty/helpers.py | jlumbroso/princeton-scraper-seas-faculty | d6c0dcaec050d8fb22f8a6a911db287640563c12 | [
"Unlicense"
] | 10 | 2020-08-11T18:44:18.000Z | 2021-06-16T19:58:38.000Z | src/princeton_scraper_seas_faculty/helpers.py | jlumbroso/princeton-scraper-seas-faculty | d6c0dcaec050d8fb22f8a6a911db287640563c12 | [
"Unlicense"
] | null | null | null | src/princeton_scraper_seas_faculty/helpers.py | jlumbroso/princeton-scraper-seas-faculty | d6c0dcaec050d8fb22f8a6a911db287640563c12 | [
"Unlicense"
] | null | null | null |
import typing
__author__ = "Jérémie Lumbroso <lumbroso@cs.princeton.edu>"
__all__ = [
"split_name",
]
def split_name(name: str) -> typing.Tuple[str, str]:
"""
Returns a likely `(first, last)` split given a full name. This uses
very simple heuristics, and assumes Western usage.
:param name: A full name (first and last name).
:return: A split pair with the first names, and the last name.
"""
words = name.split()
first_bits = words[:-1]
last_bits = words[-1:]
while len(first_bits) > 0 and first_bits[-1][0].islower():
last_bits = [first_bits[-1]] + last_bits
first_bits = first_bits[:-1]
first_joined = " ".join(first_bits)
last_joined = " ".join(last_bits)
return first_joined, last_joined
| 24.21875 | 71 | 0.646452 | 110 | 775 | 4.327273 | 0.409091 | 0.132353 | 0.063025 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011686 | 0.227097 | 775 | 31 | 72 | 25 | 0.782972 | 0.296774 | 0 | 0 | 0 | 0 | 0.108527 | 0.052326 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fb526a50dcd6196d12573e5c0cd613b463ce903 | 2,189 | py | Python | QUESTION2.py | mizkin-system/monitoring | 98b54b61a873cbf5cc56d028e38407d5d356fc0b | [
"MIT"
] | null | null | null | QUESTION2.py | mizkin-system/monitoring | 98b54b61a873cbf5cc56d028e38407d5d356fc0b | [
"MIT"
] | null | null | null | QUESTION2.py | mizkin-system/monitoring | 98b54b61a873cbf5cc56d028e38407d5d356fc0b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import time
import csv
import subprocess
import pandas as pd
import argparse
class QUESTION02:
index = 1
timeout_times = 1
def __init__(self, timeout_times):
self.timeout_times = timeout_times
print('timeout_times=' + self.timeout_times)
self.index = 1
def monitorLog(self, logFile):
print(u"監視ファイル名は"+logFile)
popen = subprocess.Popen('tail -f ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
pid = popen.pid
df = pd.DataFrame({"ip": ["ipv4-with-prefix"], "time": ["YYYYMMDDhhmmss"]}, index=["index"])
print('Popen.pid:' + str(pid))
print("monitor start")
while True:
line = popen.stdout.readline().strip()
if line:
item_list = line.decode().split(",")
if item_list[-1] == '-':
# タイムアウト時のデータ追加
df.loc[self.index] = [item_list[1], item_list[0]]
self.index = self.index + 1
else:
if (df[df.ip == item_list[1]])["ip"].size >= int(self.timeout_times):
# 故障状態のサーバ応答がありましたら、故障とみなし、データ出力する
print(u"故障状態のサーバアドレス:"+item_list[1])
print(u"サーバの故障期間:" + (df[df.ip == item_list[1]]).values[0][1] + "~" + item_list[0])
with open('./question2.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([item_list[1], (df[df.ip == item_list[1]]).values[0][1] + "~" + item_list[0]])
# タイムアウト時のデータ削除
df = df[df.ip != item_list[1]]
time.sleep(1)
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser()
parser.add_argument('timeout_times')
args = parser.parse_args()
app = QUESTION02(args.timeout_times)
app.monitorLog("ping.log")
except KeyboardInterrupt:
print(u"monitor end")
except SystemExit:
print(u"引数設定不正で異常終了しました。引数をご確認ください。")
except Exception as e:
print(u"異常終了しました。")
print(e)
| 33.166667 | 122 | 0.534491 | 246 | 2,189 | 4.613821 | 0.394309 | 0.084582 | 0.063436 | 0.035242 | 0.132159 | 0.082819 | 0.056388 | 0.056388 | 0.056388 | 0.056388 | 0 | 0.018305 | 0.326176 | 2,189 | 65 | 123 | 33.676923 | 0.751186 | 0.043399 | 0 | 0 | 0 | 0 | 0.102441 | 0.012925 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.102041 | 0 | 0.204082 | 0.204082 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fb537fd9bf06d48e541130e128371524f6ed152 | 1,002 | py | Python | migrations/versions/3c4f7702a459_.py | eleweek/WatchPeopleCode | 2389fe0b8eb040f553f847b9e1686883c4bd1388 | [
"MIT"
] | 200 | 2015-01-27T18:26:09.000Z | 2021-12-19T14:38:53.000Z | migrations/versions/3c4f7702a459_.py | eleweek/WatchPeopleCode | 2389fe0b8eb040f553f847b9e1686883c4bd1388 | [
"MIT"
] | 12 | 2015-02-09T10:18:38.000Z | 2021-12-13T19:43:56.000Z | migrations/versions/3c4f7702a459_.py | eleweek/WatchPeopleCode | 2389fe0b8eb040f553f847b9e1686883c4bd1388 | [
"MIT"
] | 23 | 2015-02-09T04:42:48.000Z | 2015-02-20T18:58:56.000Z | """empty message
Revision ID: 3c4f7702a459
Revises: 5a24a4aa5eb3
Create Date: 2015-07-10 23:59:20.856464
"""
# revision identifiers, used by Alembic.
revision = '3c4f7702a459'
down_revision = '5a24a4aa5eb3'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'streamer_youtube_channel_key', 'streamer', type_='unique')
op.drop_column('streamer', 'youtube_channel')
op.drop_column('streamer', 'youtube_name')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('streamer', sa.Column('youtube_name', sa.VARCHAR(length=30), autoincrement=False, nullable=True))
op.add_column('streamer', sa.Column('youtube_channel', sa.VARCHAR(length=24), autoincrement=False, nullable=True))
op.create_unique_constraint(u'streamer_youtube_channel_key', 'streamer', ['youtube_channel'])
### end Alembic commands ###
| 32.322581 | 118 | 0.728543 | 125 | 1,002 | 5.68 | 0.448 | 0.105634 | 0.123944 | 0.064789 | 0.504225 | 0.340845 | 0.340845 | 0.123944 | 0 | 0 | 0 | 0.062212 | 0.133733 | 1,002 | 30 | 119 | 33.4 | 0.75576 | 0.290419 | 0 | 0 | 0 | 0 | 0.29941 | 0.082596 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fb64f87ea9c09ee15dab5a503b75b4ebd13b029 | 4,965 | py | Python | src/secml/utils/mixed_utils.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 63 | 2020-04-20T16:31:16.000Z | 2022-03-29T01:05:35.000Z | src/secml/utils/mixed_utils.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 5 | 2020-04-21T11:31:39.000Z | 2022-03-24T13:42:56.000Z | src/secml/utils/mixed_utils.py | zangobot/secml | 95a293e1201c24256eb7fe2f1d2125cd5f318c8c | [
"Apache-2.0"
] | 8 | 2020-04-21T09:16:42.000Z | 2022-02-23T16:28:43.000Z | """
.. module:: FunctionUtils
:synopsis: Collection of mixed utility classes and functions
.. moduleauthor:: Marco Melis <marco.melis@unica.it>
"""
__all__ = ['AverageMeter', 'OrderedFlexibleClass', 'check_is_fitted']
class AverageMeter:
"""Computes and stores the average and current value.
Attributes
----------
val : float
Current value.
avg : float
Average.
sum : float
Cumulative sum of seen values.
count : int
Number of seen values.
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0.
self.avg = 0.
self.sum = 0.
self.count = 0
def update(self, val, n=1):
"""Updated average and current value.
Parameters
----------
val : float
New current value.
n : int, optional
Multiplier for the current value. Indicates how many times
the value should be counted in the average. Default 1.
"""
val = float(val)
n = int(n)
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class OrderedFlexibleClass:
"""A flexible class exposing its attributes in a specific order when iterated.
Order of the attributes inside the class follows the inputs sequence.
Any attribute set after class initialization will be placed at the end
of attributes sequence (see examples).
Parameters
----------
items : tuple1, tuple2, ...
Any custom sequence of tuples with the attributes to set.
Each tuple must be a (key, value) pair.
Examples
--------
>>> from secml.utils import OrderedFlexibleClass
>>> c = OrderedFlexibleClass(('attr1', None), ('attr2', 5))
>>> print(tuple(attr for attr in c))
(None, 5)
>>> c.attr3 = 123
>>> print(tuple(attr for attr in c))
(None, 5, 123)
"""
def __init__(self, *items):
if len(items) == 0:
raise ValueError("class must have at least one attribute.")
if not all(isinstance(i, tuple) for i in items):
raise TypeError("each attribute must be specified as a tuple of (key, value).")
# List with attributes sequence (this provides the fixed order)
self._params = []
# __setattr__ will store the attribute in `_params` and set its value
for i in items:
setattr(self, *i)
@property
def attr_order(self):
"""Returns a list specifing current attributes order."""
return self._params
def __setattr__(self, key, value):
"""Set desired attribute and store the key in `_params`."""
# Register attribute only if new (skip service attribute _params)
if key != '_params' and not hasattr(self, key):
self._params.append(key)
# Set attribute value in the standard way
super(OrderedFlexibleClass, self).__setattr__(key, value)
def __iter__(self):
"""Returns class attributes following a fixed order."""
for e in self._params:
yield self.__dict__[e]
def check_is_fitted(obj, attributes, msg=None, check_all=True):
"""Check if the input object is trained (fitted).
Checks if the input object is fitted by verifying if all or any of the
input attributes are not None.
Parameters
----------
obj : object
Instance of the class to check. Must implement `.fit()` method.
attributes : str or list of str
Attribute or list of attributes to check.
Es.: `['classes', 'n_features', ...], 'classes'`
msg : str or None, optional
If None, the default error message is:
"this `{name}` is not trained. Call `.fit()` first.".
For custom messages if '{name}' is present in the message string,
it is substituted by the class name of the checked object.
check_all : bool, optional
Specify whether to check (True) if all of the given attributes
are not None or (False) just any of them. Default True.
Raises
------
NotFittedError
If `check_all` is True and any of the attributes is None;
if `check_all` is False and all of attributes are None.
"""
from secml.core.type_utils import is_list, is_str
from secml.core.exceptions import NotFittedError
if msg is None:
msg = "this `{name}` is not trained. Call `.fit()` first."
if not hasattr(obj, 'fit'):
raise TypeError("`{:}` does not implement `.fit()`.".format(obj))
if is_str(attributes):
attributes = [attributes]
elif not is_list(attributes):
raise TypeError(
"the attribute(s) to check must be a string or a list of strings")
condition = any if check_all is True else all
if condition([getattr(obj, attr) is None for attr in attributes]):
raise NotFittedError(msg.format(name=obj.__class__.__name__))
| 30.838509 | 91 | 0.615509 | 644 | 4,965 | 4.645963 | 0.305901 | 0.010027 | 0.009024 | 0.012032 | 0.063503 | 0.040775 | 0.040775 | 0.040775 | 0.019385 | 0 | 0 | 0.005906 | 0.283787 | 4,965 | 160 | 92 | 31.03125 | 0.835489 | 0.52286 | 0 | 0 | 0 | 0 | 0.148384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.04 | 0 | 0.26 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fb79ef3eec5a97320bcd56e8094a362a869463e | 621 | py | Python | find_primes.py | chapman-phys227-2016s/hw-1-seama107 | 52d942891c15a6e575f5c77e5378ed7cc17bdcc3 | [
"MIT"
] | null | null | null | find_primes.py | chapman-phys227-2016s/hw-1-seama107 | 52d942891c15a6e575f5c77e5378ed7cc17bdcc3 | [
"MIT"
] | null | null | null | find_primes.py | chapman-phys227-2016s/hw-1-seama107 | 52d942891c15a6e575f5c77e5378ed7cc17bdcc3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
def find_primes(n):
"""
Finds all the primes below number n using the sieve of Eratosthenes
"""
candidates = [i + 2 for i in range(n-1)]
for p in candidates:
for i in candidates:
if i % p == 0 and p != i:
candidates.remove(i)
return candidates
def test_primes(n = 100):
"""
Tests each integer lower than the square root of number n in the list
of primes to see if it's divisible by it
"""
list_primes = find_primes(n)
for n in list_primes:
for i in range(int(n ** .5))[2:]:
assert(n % i != 0)
| 27 | 73 | 0.570048 | 99 | 621 | 3.525253 | 0.484848 | 0.060172 | 0.051576 | 0.063037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021583 | 0.328502 | 621 | 23 | 74 | 27 | 0.815348 | 0.31401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fb7e7bf5bf3c28e0f4d6debf26ccf856ae3ec19 | 3,424 | py | Python | src/main/resources/veracode_api_signing/credentials.py | xebialabs-community/xlr-veracode-plugin | 85979403d4a0274844b88d3aa6a946439fbb052a | [
"MIT"
] | 2 | 2020-05-15T14:22:20.000Z | 2020-07-10T19:59:25.000Z | src/main/resources/veracode_api_signing/credentials.py | xebialabs-community/xlr-veracode-plugin | 85979403d4a0274844b88d3aa6a946439fbb052a | [
"MIT"
] | 1 | 2021-03-19T11:13:03.000Z | 2021-06-30T15:42:16.000Z | src/main/resources/veracode_api_signing/credentials.py | xebialabs-community/xlr-veracode-plugin | 85979403d4a0274844b88d3aa6a946439fbb052a | [
"MIT"
] | 1 | 2020-04-02T22:06:06.000Z | 2020-04-02T22:06:06.000Z | # MIT License
# Copyright (c) 2019 Veracode, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try:
import configparser
except ImportError:
import ConfigParser as configparser
import os
from os.path import expanduser
from .exceptions import VeracodeCredentialsError
PROFILE_DEFAULT = 'default'
ENV_API_KEY_NAME = 'VERACODE_API_KEY_ID'
ENV_API_SECRET_KEY_NAME = 'VERACODE_API_KEY_SECRET'
ENV_PROFILE = 'VERACODE_API_PROFILE'
FIX_INSTRUCTIONS = 'Please consult the documentation to get your Veracode credentials set up.'
def get_credentials(auth_file=None):
""" Get credentials from supported sources. Precedence is 1) env vars, 2) file.
"""
try:
return get_credentials_from_environment_variables()
except KeyError:
pass
return get_credentials_from_filesystem(auth_file)
def get_credentials_from_environment_variables():
return os.environ[ENV_API_KEY_NAME], os.environ[ENV_API_SECRET_KEY_NAME]
def get_credentials_from_filesystem(auth_file=None):
auth_file = auth_file or os.path.join(expanduser("~"), '.veracode', 'credentials')
try:
return get_credentials_from_config_file(auth_file)
except (IOError, configparser.Error, configparser.NoSectionError) as e:
raise VeracodeCredentialsError('Unable to get credentials from {file}: {error}'
'\n{fix}'.format(file=auth_file, error=e, fix=FIX_INSTRUCTIONS))
def _get_credentials_profile():
""" Get credentials profile from environment variable.
"""
return os.environ.get(ENV_PROFILE, PROFILE_DEFAULT)
def get_credentials_from_config_file(auth_file):
""" Get credentials from the config file. Uses the profile specified by env variable.
"""
if not os.path.exists(auth_file):
raise IOError("Could not read file: {}. {}".format(auth_file, FIX_INSTRUCTIONS))
config = configparser.ConfigParser()
config.read(auth_file)
credentials_section_name = _get_credentials_profile()
api_key_id = config.get(credentials_section_name, ENV_API_KEY_NAME)
api_key_secret = config.get(credentials_section_name, ENV_API_SECRET_KEY_NAME)
if api_key_id and api_key_secret:
return api_key_id, api_key_secret
else:
raise VeracodeCredentialsError(
'Unable to find credentials in auth file {auth_file}.\n{fix}'.format(
auth_file=auth_file, fix=FIX_INSTRUCTIONS))
| 39.356322 | 103 | 0.753213 | 471 | 3,424 | 5.267516 | 0.343949 | 0.084643 | 0.065296 | 0.015719 | 0.160419 | 0.087868 | 0.058847 | 0 | 0 | 0 | 0 | 0.002127 | 0.17611 | 3,424 | 86 | 104 | 39.813953 | 0.877348 | 0.377044 | 0 | 0.069767 | 0 | 0 | 0.143947 | 0.010963 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0.023256 | 0.139535 | 0.023256 | 0.395349 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fbee8c69adef049ed9ba532558ba181190672fc | 673 | py | Python | ansa/ansa.py | Iskandar-Ki/AnsaRSS | 00e4c49114ba54078528967d8ddb0bf3efa9187e | [
"Unlicense"
] | 1 | 2018-09-19T09:26:34.000Z | 2018-09-19T09:26:34.000Z | ansa/ansa.py | Iskandar-Ki/AnsaRSS | 00e4c49114ba54078528967d8ddb0bf3efa9187e | [
"Unlicense"
] | null | null | null | ansa/ansa.py | Iskandar-Ki/AnsaRSS | 00e4c49114ba54078528967d8ddb0bf3efa9187e | [
"Unlicense"
] | null | null | null | import feedparser
class Ansa():
def __init__(self, *args, **kwargs):
self.parsed_feed = []
def getNews(self, xmlrequest):
feed = feedparser.parse(xmlrequest)
for item in feed.entries:
self.parsed_feed.append(self.parseData(item))
return self.parsed_feed
def parseData(self, data):
title = data['title']
description = data['summary']
link = data['link']
pub_date = data['published']
parsed_data = {
'title' : title,
'description' : description,
'link' : link,
'pub_date' : pub_date
}
return parsed_data | 26.92 | 57 | 0.549777 | 68 | 673 | 5.264706 | 0.426471 | 0.083799 | 0.117318 | 0.094972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.338782 | 673 | 25 | 58 | 26.92 | 0.804494 | 0 | 0 | 0 | 0 | 0 | 0.078635 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.047619 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fbfab3dbe7f3cc0b10a3640b0188913b7d6c393 | 862 | py | Python | examples/multiple_modules/commands/envs/get.py | pddg/uroboros | 0e621206c24e62d96fdac244d09c0c790d8930df | [
"Apache-2.0"
] | 15 | 2019-07-13T15:45:00.000Z | 2022-03-08T12:54:54.000Z | examples/multiple_modules/commands/envs/get.py | pddg/uroboros | 0e621206c24e62d96fdac244d09c0c790d8930df | [
"Apache-2.0"
] | 27 | 2019-06-24T15:41:27.000Z | 2020-07-12T09:25:04.000Z | examples/multiple_modules/commands/envs/get.py | pddg/uroboros | 0e621206c24e62d96fdac244d09c0c790d8930df | [
"Apache-2.0"
] | null | null | null | import os
from uroboros import Command, ExitStatus
class GetCommand(Command):
name = "get"
short_description = "Show value"
long_description = "Show value of given env var"
def build_option(self, parser):
parser.add_argument('name', type=str, help='Env var name')
parser.add_argument('-u', '--upper', default=False,
action='store_true',
help='Capitalize all chars of given name')
return parser
def run(self, args):
key = args.name
if args.upper:
key = key.upper()
var = os.getenv(key)
if var is None:
print("Specified variable does not exists: '{}'".format(key))
return ExitStatus.FAILURE
print("{}={}".format(key, var))
return ExitStatus.SUCCESS
command = GetCommand()
| 26.9375 | 73 | 0.576566 | 99 | 862 | 4.959596 | 0.565657 | 0.0611 | 0.081466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.312065 | 862 | 31 | 74 | 27.806452 | 0.827993 | 0 | 0 | 0 | 0 | 0 | 0.178654 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.478261 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fc2a29255c0ae2e9ff1820f112e173145cbae9c | 1,558 | py | Python | apply_alis.py | esalesky/vecalign | 2d77bc94ae75545bf00a5b3a136a6c8808bc0dce | [
"Apache-2.0"
] | 1 | 2021-02-07T12:50:42.000Z | 2021-02-07T12:50:42.000Z | apply_alis.py | esalesky/vecalign | 2d77bc94ae75545bf00a5b3a136a6c8808bc0dce | [
"Apache-2.0"
] | null | null | null | apply_alis.py | esalesky/vecalign | 2d77bc94ae75545bf00a5b3a136a6c8808bc0dce | [
"Apache-2.0"
] | null | null | null | import sys
import json
alifile = sys.argv[1]
srcfile = sys.argv[2]
tgtfile = sys.argv[3]
outdir = sys.argv[4]
src = sys.argv[5]
tgt = sys.argv[6]
mistakes = "summary."+src+"-"+tgt
talk = srcfile.split('/')[1].split('.')[0]
srcs = []
tgts = []
with open(srcfile,'r') as f:
line = f.readline().strip()
while line:
srcs.append(line)
line = f.readline().strip()
with open(tgtfile,'r') as f:
line = f.readline().strip()
while line:
tgts.append(line)
line = f.readline().strip()
with open(alifile,'r') as f, open(outdir + "/" + talk + '.' + src,'w') as srcout, open(outdir + "/" + talk + '.' + tgt,'w') as tgtout, open(mistakes,'a') as errfile:
line = f.readline().strip()
while line:
s,t,c = line.split(":")
ss = json.loads(s)
tt = json.loads(t)
if len(ss) > 1:
print("-- WARN: src in %s has more than 1 sent -- " % srcfile)
print("-- WARN: src in %s has more than 1 sent -- " % srcfile, file=errfile)
if len(ss) == 0:
print("%s : src was null-aligned -- " % talk)
print("%s : src was null-aligned -- " % talk, file=errfile)
if len(tt) == 0:
print("%s: tgt was null-aligned -- " % talk)
print("%s: tgt was null-aligned -- " % talk, file=errfile)
srctmp = ' '.join([srcs[x] for x in ss])
tgttmp = ' '.join([tgts[x] for x in tt])
srcout.write(srctmp+'\n')
tgtout.write(tgttmp+'\n')
line = f.readline().strip()
| 30.54902 | 165 | 0.516688 | 218 | 1,558 | 3.692661 | 0.293578 | 0.052174 | 0.096894 | 0.134161 | 0.455901 | 0.455901 | 0.395031 | 0.26087 | 0.171429 | 0.09441 | 0 | 0.011797 | 0.292683 | 1,558 | 50 | 166 | 31.16 | 0.718693 | 0 | 0 | 0.214286 | 0 | 0 | 0.146341 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fc41cd77d4e57818df84bb0ceeea19889bf81e8 | 29,754 | py | Python | src/meadowgrid/coordinator_client.py | meadowdata/meadowflow | 8d4d93e3de2ac8636eb8f5ce058c28b398684806 | [
"MIT"
] | 4 | 2021-12-23T16:08:12.000Z | 2022-02-13T21:39:44.000Z | src/meadowgrid/coordinator_client.py | meadowdata/meadowflow | 8d4d93e3de2ac8636eb8f5ce058c28b398684806 | [
"MIT"
] | 13 | 2021-12-07T21:54:12.000Z | 2022-03-02T22:33:22.000Z | src/meadowgrid/coordinator_client.py | hrichardlee/meadowdata | 5d302956474d9f53c43afa0d7ce9a4b4d98591c5 | [
"MIT"
] | 1 | 2021-11-14T17:39:12.000Z | 2021-11-14T17:39:12.000Z | from __future__ import annotations
import json
import pickle
from types import TracebackType
from typing import (
Any,
Dict,
Iterable,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import grpc
import grpc.aio
from meadowgrid.config import (
DEFAULT_COORDINATOR_ADDRESS,
DEFAULT_LOGICAL_CPU_REQUIRED,
DEFAULT_MEMORY_GB_REQUIRED,
DEFAULT_PRIORITY,
JOB_ID_VALID_CHARACTERS,
LOGICAL_CPU,
MEMORY_GB,
)
from meadowgrid.credentials import CredentialsSource, CredentialsService
from meadowgrid.deployed_function import (
CodeDeployment,
InterpreterDeployment,
MeadowGridCommand,
MeadowGridDeployedRunnable,
MeadowGridFunction,
MeadowGridFunctionName,
MeadowGridVersionedDeployedRunnable,
VersionedCodeDeployment,
VersionedInterpreterDeployment,
)
from meadowgrid.meadowgrid_pb2 import (
AddCredentialsRequest,
AddJobResponse,
AddTasksToGridJobRequest,
AgentStateResponse,
AgentStatesRequest,
AwsSecret,
ContainerAtDigest,
ContainerAtTag,
Credentials,
GitRepoBranch,
GitRepoCommit,
GridTask,
GridTaskStateResponse,
GridTaskStatesRequest,
GridTaskUpdateAndGetNextRequest,
HealthCheckRequest,
HealthCheckResponse,
Job,
JobStateUpdate,
JobStateUpdates,
JobStatesRequest,
NextJobsRequest,
NextJobsResponse,
ProcessState,
PyCommandJob,
PyFunctionJob,
PyGridJob,
QualifiedFunctionName,
RegisterAgentRequest,
Resource,
ServerAvailableContainer,
ServerAvailableFile,
ServerAvailableFolder,
ServerAvailableInterpreter,
StringPair,
)
from meadowgrid.meadowgrid_pb2_grpc import MeadowGridCoordinatorStub
# make this enum available for users
ProcessStateEnum = ProcessState.ProcessStateEnum
def _make_valid_job_id(job_id: str) -> str:
return "".join(c for c in job_id if c in JOB_ID_VALID_CHARACTERS)
def _string_pairs_from_dict(d: Optional[Dict[str, str]]) -> Iterable[StringPair]:
"""
Opposite of _string_pairs_to_dict in agent.py. Helper for dicts in protobuf.
"""
if d is not None:
for key, value in d.items():
yield StringPair(key=key, value=value)
def _add_deployments_to_job(
job: Job,
code_deployment: Union[CodeDeployment, VersionedCodeDeployment],
interpreter_deployment: Union[
InterpreterDeployment, VersionedInterpreterDeployment
],
) -> None:
"""
Think of this as job.code_deployment = code_deployment; job.interpreter_deployment =
interpreter_deployment, but it's complicated because these are protobuf oneofs
"""
if isinstance(code_deployment, ServerAvailableFolder):
job.server_available_folder.CopyFrom(code_deployment)
elif isinstance(code_deployment, GitRepoCommit):
job.git_repo_commit.CopyFrom(code_deployment)
elif isinstance(code_deployment, GitRepoBranch):
job.git_repo_branch.CopyFrom(code_deployment)
else:
raise ValueError(f"Unknown code deployment type {type(code_deployment)}")
if isinstance(interpreter_deployment, ServerAvailableInterpreter):
job.server_available_interpreter.CopyFrom(interpreter_deployment)
elif isinstance(interpreter_deployment, ContainerAtDigest):
job.container_at_digest.CopyFrom(interpreter_deployment)
elif isinstance(interpreter_deployment, ServerAvailableContainer):
job.server_available_container.CopyFrom(interpreter_deployment)
elif isinstance(interpreter_deployment, ContainerAtTag):
job.container_at_tag.CopyFrom(interpreter_deployment)
else:
raise ValueError(
f"Unknown interpreter deployment type {type(interpreter_deployment)}"
)
def _pickle_protocol_for_deployed_interpreter() -> int:
"""
This is a placeholder, the intention is to get the deployed interpreter's version
somehow from the Deployment object or something like it and use that to determine
what the highest pickle protocol version we can use safely is.
"""
# TODO just hard-coding the interpreter version for now, need to actually grab it
# from the deployment somehow
interpreter_version = (3, 8, 0)
# based on documentation in
# https://docs.python.org/3/library/pickle.html#data-stream-format
if interpreter_version >= (3, 8, 0):
protocol = 5
elif interpreter_version >= (3, 4, 0):
protocol = 4
elif interpreter_version >= (3, 0, 0):
protocol = 3
else:
# TODO support for python 2 would require dealing with the string/bytes issue
raise NotImplementedError("We currently only support python 3")
return min(protocol, pickle.HIGHEST_PROTOCOL)
def _create_py_function(
meadowgrid_function: MeadowGridFunction, pickle_protocol: int
) -> PyFunctionJob:
"""
Returns a PyFunctionJob, called by _create_py_runnable_job which creates a Job that
has a PyFunctionJob in it.
pickle_protocol should be the highest pickle protocol that the deployed function
will be able to understand.
"""
# first pickle the function arguments from job_run_spec
# TODO add support for compressions, pickletools.optimize, possibly cloudpickle?
# TODO also add the ability to write this to a shared location so that we don't need
# to pass it through the server.
if meadowgrid_function.function_args or meadowgrid_function.function_kwargs:
pickled_function_arguments = pickle.dumps(
(meadowgrid_function.function_args, meadowgrid_function.function_kwargs),
protocol=pickle_protocol,
)
else:
# according to docs, None is translated to empty anyway
pickled_function_arguments = b""
# then, construct the PyFunctionJob
py_function = PyFunctionJob(pickled_function_arguments=pickled_function_arguments)
function_spec = meadowgrid_function.function_spec
if isinstance(function_spec, MeadowGridFunctionName):
py_function.qualified_function_name.CopyFrom(
QualifiedFunctionName(
module_name=function_spec.module_name,
function_name=function_spec.function_name,
)
)
elif isinstance(function_spec, bytes):
py_function.pickled_function = function_spec
else:
raise ValueError(f"Unknown type of function_spec {type(function_spec)}")
return py_function
def _create_py_runnable_job(
job_id: str,
job_friendly_name: str,
deployed_runnable: Union[
MeadowGridDeployedRunnable, MeadowGridVersionedDeployedRunnable
],
priority: float,
resources_required: Optional[Dict[str, float]],
) -> Job:
job = Job(
job_id=_make_valid_job_id(job_id),
job_friendly_name=_make_valid_job_id(job_friendly_name),
priority=priority,
environment_variables=_string_pairs_from_dict(
deployed_runnable.environment_variables
),
result_highest_pickle_protocol=pickle.HIGHEST_PROTOCOL,
resources_required=construct_resources_required_protobuf(resources_required),
)
_add_deployments_to_job(
job, deployed_runnable.code_deployment, deployed_runnable.interpreter_deployment
)
if isinstance(deployed_runnable.runnable, MeadowGridCommand):
# TODO see _create_py_function about optimizations we could do for transferring
# pickled data
if deployed_runnable.runnable.context_variables:
pickled_context_variables = pickle.dumps(
deployed_runnable.runnable.context_variables,
protocol=_pickle_protocol_for_deployed_interpreter(),
)
else:
pickled_context_variables = b""
job.py_command.CopyFrom(
PyCommandJob(
command_line=deployed_runnable.runnable.command_line,
pickled_context_variables=pickled_context_variables,
)
)
elif isinstance(deployed_runnable.runnable, MeadowGridFunction):
job.py_function.CopyFrom(
_create_py_function(
deployed_runnable.runnable, _pickle_protocol_for_deployed_interpreter()
)
)
else:
raise ValueError(f"Unexpected runnable type {type(deployed_runnable.runnable)}")
return job
def _create_py_grid_job(
job_id: str,
job_friendly_name: str,
deployed_function: Union[
MeadowGridDeployedRunnable, MeadowGridVersionedDeployedRunnable
],
tasks: Sequence[Tuple[int, Sequence[Any], Dict[str, Any]]],
all_tasks_added: bool,
priority: float,
interruption_probability_threshold: float,
resources_required_per_task: Dict[str, float],
) -> Job:
if not isinstance(deployed_function.runnable, MeadowGridFunction):
raise ValueError("simple_job must have a MeadowGridFunction runnable")
pickle_protocol = _pickle_protocol_for_deployed_interpreter()
job = Job(
job_id=_make_valid_job_id(job_id),
job_friendly_name=_make_valid_job_id(job_friendly_name),
priority=priority,
interruption_probability_threshold=interruption_probability_threshold,
environment_variables=_string_pairs_from_dict(
deployed_function.environment_variables
),
result_highest_pickle_protocol=pickle.HIGHEST_PROTOCOL,
resources_required=construct_resources_required_protobuf(
resources_required_per_task
),
py_grid=PyGridJob(
function=_create_py_function(deployed_function.runnable, pickle_protocol),
tasks=_create_task_requests(tasks, pickle_protocol),
all_tasks_added=all_tasks_added,
),
)
_add_deployments_to_job(
job, deployed_function.code_deployment, deployed_function.interpreter_deployment
)
return job
def _create_task_requests(
tasks: Sequence[Tuple[int, Sequence[Any], Dict[str, Any]]], pickle_protocol: int
) -> Sequence[GridTask]:
"""
tasks should be a list of (task_id, args, kwargs)
pickle_protocol should be the highest pickle protocol that the deployed function
will be able to understand.
"""
return [
GridTask(
task_id=task_id,
pickled_function_arguments=pickle.dumps(
(args, kwargs), protocol=pickle_protocol
),
)
for task_id, args, kwargs in tasks
]
AddJobState = Literal["ADDED", "IS_DUPLICATE"]
def _add_job_state_string(state: AddJobResponse) -> AddJobState:
if state.state == AddJobResponse.AddJobState.ADDED:
return "ADDED"
elif state.state == AddJobResponse.AddJobState.IS_DUPLICATE:
return "IS_DUPLICATE"
else:
raise ValueError(f"Unknown AddJobState {state.state}")
def _add_credentials_request(
service: CredentialsService, service_url: str, source: CredentialsSource
) -> AddCredentialsRequest:
result = AddCredentialsRequest(
service=Credentials.Service.Value(service),
service_url=service_url,
)
if isinstance(source, AwsSecret):
result.aws_secret.CopyFrom(source)
elif isinstance(source, ServerAvailableFile):
result.server_available_file.CopyFrom(source)
else:
raise ValueError(f"Unknown type of credentials source {type(source)}")
return result
def _grpc_retry_option(
package: str, service: str
) -> Tuple[Literal["grpc.service_config"], str]:
"""Create a retry config.
Args:
package (str): package name (from proto file)
service (str): service name (from proto file)
"""
# https://stackoverflow.com/questions/64227270/use-retrypolicy-with-python-grpc-client
json_config = json.dumps(
{
"methodConfig": [
{
"name": [{"service": f"{package}.{service}"}],
"retryPolicy": {
"maxAttempts": 5,
"initialBackoff": "1s",
"maxBackoff": "10s",
"backoffMultiplier": 2,
"retryableStatusCodes": ["UNAVAILABLE"],
},
}
]
}
)
return ("grpc.service_config", json_config)
def construct_resources_required_protobuf(
resources: Optional[Dict[str, float]]
) -> Sequence[Resource]:
"""
If resources is None, provides the defaults for resources required. If resources is
not None, adds in the default resources if necessary. This means for default
resources like LOGICAL_CPU and MEMORY_GB, the only way to "opt-out" of these
resources is to explicitly set them to zero. Requiring zero of a resource is treated
the same as not requiring that resource at all.
"Opposite" of Resources.from_protobuf
"""
if resources is None:
resources = {}
result = construct_resources_protobuf(resources)
if MEMORY_GB not in resources:
result.append(Resource(name=MEMORY_GB, value=DEFAULT_MEMORY_GB_REQUIRED))
if LOGICAL_CPU not in resources:
result.append(Resource(name=LOGICAL_CPU, value=DEFAULT_LOGICAL_CPU_REQUIRED))
return result
def construct_resources_protobuf(resources: Dict[str, float]) -> List[Resource]:
"""Small helper for constructing a sequence of Resource"""
return [Resource(name=name, value=value) for name, value in resources.items()]
class MeadowGridCoordinatorClientAsync:
"""
A client for MeadowGridCoordinator for "users" of the system. Effectively allows
users to add jobs i.e. request that jobs get run, and then poll for their status.
See also MeadowGridCoordinatorHandler docstring.
"""
def __init__(self, address: str = DEFAULT_COORDINATOR_ADDRESS):
self._channel = grpc.aio.insecure_channel(
address, options=[_grpc_retry_option("meadowgrid", "MeadowGridCoordinator")]
)
self._stub = MeadowGridCoordinatorStub(self._channel)
async def add_py_runnable_job(
self,
job_id: str,
job_friendly_name: str,
deployed_runnable: Union[
MeadowGridDeployedRunnable, MeadowGridVersionedDeployedRunnable
],
priority: float = DEFAULT_PRIORITY,
resources_required: Optional[Dict[str, float]] = None,
) -> AddJobState:
"""
Requests a run of the specified runnable in the context of a python environment
on a meadowgrid agent. See also MeadowGridDeployedRunnable docstring and Job in
meadowgrid.proto.
Return value will either be ADDED (success) or IS_DUPLICATE, indicating that
the job_id has already been used.
"""
return _add_job_state_string(
await self._stub.add_job(
_create_py_runnable_job(
job_id,
job_friendly_name,
deployed_runnable,
priority,
resources_required,
)
)
)
async def add_py_grid_job(
self,
job_id: str,
job_friendly_name: str,
deployed_function: Union[
MeadowGridDeployedRunnable, MeadowGridVersionedDeployedRunnable
],
tasks: Sequence[Tuple[int, Sequence[Any], Dict[str, Any]]],
all_tasks_added: bool,
priority: float,
interruption_probability_threshold: float,
resources_required_per_task: Dict[str, float],
) -> AddJobState:
"""
Creates a grid job. See also MeadowGridDeployedRunnable, Job in
meadowgrid.proto, and grid_map.
deployed_function.runnable must be a MeadowGridFunction. This is a bit hacky but
seems okay for an internal API
If the request contains multiple tasks with the same id, only the first one
will be taken and subsequent tasks will be ignored.
"""
return _add_job_state_string(
await self._stub.add_job(
_create_py_grid_job(
job_id,
job_friendly_name,
deployed_function,
tasks,
all_tasks_added,
priority,
interruption_probability_threshold,
resources_required_per_task,
)
)
)
async def add_tasks_to_grid_job(
self,
job_id: str,
tasks: Sequence[Tuple[int, Sequence[Any], Dict[str, Any]]],
all_tasks_added: bool,
) -> None:
"""
Adds tasks to an existing grid job
Once all_tasks_added is set to True, no more tasks can be added to that grid
job.
If we try to add tasks with the same task id more than once, subsequent
requests will be ignored silently. This applies within the same request also.
"""
await self._stub.add_tasks_to_grid_job(
AddTasksToGridJobRequest(
job_id=job_id,
# TODO we should get the highest pickle protocol from the deployment
# somehow...
tasks=_create_task_requests(tasks, pickle.HIGHEST_PROTOCOL),
all_tasks_added=all_tasks_added,
)
)
async def get_simple_job_states(
self, job_ids: Sequence[str]
) -> Sequence[ProcessState]:
"""
Gets the states and results for the jobs corresponding to the specified
job_ids. Will return one ProcessState for each job_id in the same order.
See also ProcessStateEnum in meadowgrid.proto.
TODO add the ability to send results back to a shared location so that we don't
need to pass through the results through the server
TODO consider adding the ability for the client to optionally register for a
callback/push notification? Even if we do, though, polling will be important
for clients that want to run jobs without starting a server for themselves.
"""
return (
await self._stub.get_simple_job_states(JobStatesRequest(job_ids=job_ids))
).process_states
async def get_grid_task_states(
self, job_id: str, task_ids_to_ignore: Sequence[int]
) -> Sequence[GridTaskStateResponse]:
"""
Gets the states and results for the tasks in the specified grid job.
task_ids_to_ignore tells the server to not send back results for those task_ids
(presumably because we have the results already)
"""
return (
await self._stub.get_grid_task_states(
GridTaskStatesRequest(
job_id=job_id, task_ids_to_ignore=task_ids_to_ignore
)
)
).task_states
async def add_credentials(
self, service: CredentialsService, service_url: str, source: CredentialsSource
) -> None:
await self._stub.add_credentials(
_add_credentials_request(service, service_url, source)
)
async def get_agent_states(self) -> Sequence[AgentStateResponse]:
return (await self._stub.get_agent_states(AgentStatesRequest())).agents
async def check(self) -> bool:
return (
await self._stub.Check(HealthCheckRequest())
).status == HealthCheckResponse.ServingStatus.SERVING
async def __aenter__(self) -> MeadowGridCoordinatorClientAsync:
await self._channel.__aenter__()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
return await self._channel.__aexit__(exc_type, exc_value, traceback)
class MeadowGridCoordinatorClientSync:
"""The non-async version of MeadowGridCoordinatorClientAsync"""
def __init__(self, address: str = DEFAULT_COORDINATOR_ADDRESS):
self._channel = grpc.insecure_channel(
address, options=[_grpc_retry_option("meadowgrid", "MeadowGridCoordinator")]
)
self._stub = MeadowGridCoordinatorStub(self._channel)
def add_py_runnable_job(
self,
job_id: str,
job_friendly_name: str,
deployed_function: Union[
MeadowGridDeployedRunnable, MeadowGridVersionedDeployedRunnable
],
priority: float = DEFAULT_PRIORITY,
resources_required: Optional[Dict[str, float]] = None,
) -> AddJobState:
return _add_job_state_string(
self._stub.add_job(
_create_py_runnable_job(
job_id,
job_friendly_name,
deployed_function,
priority,
resources_required,
)
)
)
def add_py_grid_job(
self,
job_id: str,
job_friendly_name: str,
deployed_function: Union[
MeadowGridDeployedRunnable, MeadowGridVersionedDeployedRunnable
],
tasks: Sequence[Tuple[int, Sequence[Any], Dict[str, Any]]],
all_tasks_added: bool,
priority: float,
interruption_probability_threshold: float,
resources_required_per_task: Dict[str, float],
) -> AddJobState:
return _add_job_state_string(
self._stub.add_job(
_create_py_grid_job(
job_id,
job_friendly_name,
deployed_function,
tasks,
all_tasks_added,
priority,
interruption_probability_threshold,
resources_required_per_task,
)
)
)
def add_tasks_to_grid_job(
self,
job_id: str,
tasks: Sequence[Tuple[int, Sequence[Any], Dict[str, Any]]],
all_tasks_added: bool,
) -> None:
self._stub.add_tasks_to_grid_job(
AddTasksToGridJobRequest(
job_id=job_id,
tasks=_create_task_requests(tasks, pickle.HIGHEST_PROTOCOL),
all_tasks_added=all_tasks_added,
)
)
def get_simple_job_states(self, job_ids: Sequence[str]) -> Sequence[ProcessState]:
return self._stub.get_simple_job_states(
JobStatesRequest(job_ids=job_ids)
).process_states
def get_grid_task_states(
self, job_id: str, task_ids_to_ignore: Sequence[int]
) -> Sequence[GridTaskStateResponse]:
return self._stub.get_grid_task_states(
GridTaskStatesRequest(job_id=job_id, task_ids_to_ignore=task_ids_to_ignore)
).task_states
def add_credentials(
self, service: CredentialsService, service_url: str, source: CredentialsSource
) -> None:
self._stub.add_credentials(
_add_credentials_request(service, service_url, source)
)
def get_agent_states(self) -> Sequence[AgentStateResponse]:
return self._stub.get_agent_states(AgentStatesRequest()).agents
def check(self) -> bool:
return (
self._stub.Check(HealthCheckRequest()).status
== HealthCheckResponse.ServingStatus.SERVING
)
def __enter__(self) -> MeadowGridCoordinatorClientSync:
self._channel.__enter__()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Literal[False]:
return self._channel.__exit__(exc_type, exc_value, traceback)
class MeadowGridCoordinatorClientForWorkersAsync:
"""
Talks to the same MeadowGridCoordinator server as MeadowGridCoordinatorClientAsync,
but only has the functions needed by the workers/agents. The separation is just for
keeping the code organized.
"""
def __init__(self, address: str = DEFAULT_COORDINATOR_ADDRESS):
self._channel = grpc.aio.insecure_channel(
address, options=[_grpc_retry_option("meadowgrid", "MeadowGridCoordinator")]
)
self._stub = MeadowGridCoordinatorStub(self._channel)
async def register_agent(
self, agent_id: str, resources: Dict[str, float], job_id: Optional[str]
) -> None:
"""Registers an agent with the coordinator"""
await self._stub.register_agent(
RegisterAgentRequest(
agent_id=agent_id,
resources=construct_resources_protobuf(resources),
job_id=job_id or "",
)
)
async def update_job_states(
self,
agent_id: str,
agent_job_id: Optional[str],
job_states: Iterable[JobStateUpdate],
) -> None:
"""
Updates the coordinator that the specified jobs have entered the specified
state.
"""
await self._stub.update_job_states(
JobStateUpdates(
agent_id=agent_id,
agent_job_id=agent_job_id or "",
job_states=job_states,
)
)
async def get_next_jobs(
self, agent_id: str, job_id: Optional[str]
) -> NextJobsResponse:
"""
Gets the jobs that the current agent should work on.
"""
return await self._stub.get_next_jobs(
NextJobsRequest(agent_id=agent_id, job_id=job_id or "")
)
async def update_grid_task_state_and_get_next(
self,
job_id: str,
grid_worker_id: str,
task_state: Optional[Tuple[int, ProcessState]],
) -> GridTask:
"""
task_state can either be None or (task_id, process_state).
If task_state is not None, we update the coordinator that the specified task in
the specified grid job has entered the specified state. If task_state is None,
we use task_id=-1 to represent that we don't have an update.
At the same time, this requests the next task from the coordinator for the
specified grid job. If there is no next task in the specified grid job,
GridTask.task_id will be -1.
The coordinator cannot explicitly tell the grid_worker to switch to a different
job, it can only choose to give it a task or not give it another task from the
current grid job.
"""
if task_state is not None:
task_state_request = GridTaskUpdateAndGetNextRequest(
job_id=job_id,
grid_worker_id=grid_worker_id,
task_id=task_state[0],
process_state=task_state[1],
)
else:
task_state_request = GridTaskUpdateAndGetNextRequest(
job_id=job_id, grid_worker_id=grid_worker_id, task_id=-1
)
return await self._stub.update_grid_task_state_and_get_next(task_state_request)
async def __aenter__(self) -> MeadowGridCoordinatorClientForWorkersAsync:
await self._channel.__aenter__()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
return await self._channel.__aexit__(exc_type, exc_value, traceback)
class MeadowGridCoordinatorClientForWorkersSync:
"""The non-async version of MeadowGridCoordinatorClientForWorkersAsync"""
def __init__(self, address: str = DEFAULT_COORDINATOR_ADDRESS):
self._channel = grpc.insecure_channel(
address, options=[_grpc_retry_option("meadowgrid", "MeadowGridCoordinator")]
)
self._stub = MeadowGridCoordinatorStub(self._channel)
def register_agent(
self, agent_id: str, resources: Dict[str, float], job_id: Optional[str]
) -> None:
self._stub.register_agent(
RegisterAgentRequest(
agent_id=agent_id,
resources=construct_resources_protobuf(resources),
job_id=job_id or "",
)
)
def update_job_states(
self,
agent_id: str,
agent_job_id: Optional[str],
job_states: Iterable[JobStateUpdate],
) -> None:
self._stub.update_job_states(
JobStateUpdates(
agent_id=agent_id,
agent_job_id=agent_job_id or "",
job_states=job_states,
)
)
def get_next_jobs(self, agent_id: str, job_id: Optional[str]) -> NextJobsResponse:
return self._stub.get_next_jobs(
NextJobsRequest(agent_id=agent_id, job_id=job_id or "")
)
def update_grid_task_state_and_get_next(
self,
job_id: str,
grid_worker_id: str,
task_state: Optional[Tuple[int, ProcessState]],
) -> GridTask:
# job_id is always required
if task_state is not None:
task_state_request = GridTaskUpdateAndGetNextRequest(
job_id=job_id,
grid_worker_id=grid_worker_id,
task_id=task_state[0],
process_state=task_state[1],
)
else:
task_state_request = GridTaskUpdateAndGetNextRequest(
job_id=job_id, grid_worker_id=grid_worker_id, task_id=-1
)
return self._stub.update_grid_task_state_and_get_next(task_state_request)
def __enter__(self) -> MeadowGridCoordinatorClientForWorkersSync:
self._channel.__enter__()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Literal[False]:
return self._channel.__exit__(exc_type, exc_value, traceback)
| 34.517401 | 90 | 0.660214 | 3,187 | 29,754 | 5.871352 | 0.143709 | 0.017636 | 0.009833 | 0.008016 | 0.502351 | 0.469859 | 0.445329 | 0.404019 | 0.389269 | 0.386704 | 0 | 0.001888 | 0.270216 | 29,754 | 861 | 91 | 34.557491 | 0.85986 | 0.097634 | 0 | 0.451923 | 0 | 0 | 0.03085 | 0.008103 | 0 | 0 | 0 | 0.005807 | 0 | 1 | 0.052885 | false | 0 | 0.019231 | 0.016026 | 0.136218 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fc5e1ec082c1dc64629283b6d98a6ca4c9dc871 | 7,387 | py | Python | mlprodict/testing/einsum/blas_lapack.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | mlprodict/testing/einsum/blas_lapack.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | mlprodict/testing/einsum/blas_lapack.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | """
@file
@brief Direct calls to libraries :epkg:`BLAS` and :epkg:`LAPACK`.
"""
import numpy
from scipy.linalg.blas import sgemm, dgemm # pylint: disable=E0611
from .direct_blas_lapack import ( # pylint: disable=E0401,E0611
dgemm_dot, sgemm_dot)
def pygemm(transA, transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc):
"""
Pure python implementatin of GEMM.
"""
if len(A.shape) != 1:
raise ValueError("A must be a vector.")
if len(B.shape) != 1:
raise ValueError("B must be a vector.")
if len(C.shape) != 1:
raise ValueError("C must be a vector.")
if A.shape[0] != M * K:
raise ValueError(
"Dimension mismatch for A.shape=%r M=%r N=%r K=%r." % (
A.shape, M, N, K))
if B.shape[0] != N * K:
raise ValueError(
"Dimension mismatch for B.shape=%r M=%r N=%r K=%r." % (
B.shape, M, N, K))
if C.shape[0] != N * M:
raise ValueError(
"Dimension mismatch for C.shape=%r M=%r N=%r K=%r." % (
C.shape, M, N, K))
if transA:
a_i_stride = lda
a_k_stride = 1
else:
a_i_stride = 1
a_k_stride = lda
if transB:
b_j_stride = 1
b_k_stride = ldb
else:
b_j_stride = ldb
b_k_stride = 1
c_i_stride = 1
c_j_stride = ldc
n_loop = 0
for j in range(N):
for i in range(M):
total = 0
for k in range(K):
n_loop += 1
a_index = i * a_i_stride + k * a_k_stride
if a_index >= A.shape[0]:
raise IndexError(
"A: i=%d a_index=%d >= %d "
"(a_i_stride=%d a_k_stride=%d)" % (
i, a_index, A.shape[0], a_i_stride, a_k_stride))
a_val = A[a_index]
b_index = j * b_j_stride + k * b_k_stride
if b_index >= B.shape[0]:
raise IndexError(
"B: j=%d b_index=%d >= %d "
"(a_i_stride=%d a_k_stride=%d)" % (
j, b_index, B.shape[0], b_j_stride, b_k_stride))
b_val = B[b_index]
mult = a_val * b_val
total += mult
c_index = i * c_i_stride + j * c_j_stride
if c_index >= C.shape[0]:
raise IndexError("C: %d >= %d" % (c_index, C.shape[0]))
C[c_index] = alpha * total + beta * C[c_index]
if n_loop != M * N * K:
raise RuntimeError(
"Unexpected number of loops: %d != %d = (%d * %d * %d) "
"lda=%d ldb=%d ldc=%d" % (
n_loop, M * N * K, M, N, K, lda, ldb, ldc))
def gemm_dot(A, B, transA=False, transB=False):
"""
Implements dot product with gemm when possible.
:param A: first matrix
:param B: second matrix
:param transA: is first matrix transposed?
:param transB: is second matrix transposed?
"""
if A.dtype != B.dtype:
raise TypeError(
"Matrices A and B must have the same dtype not "
"%r, %r." % (A.dtype, B.dtype))
if len(A.shape) != 2:
raise ValueError(
"Matrix A does not have 2 dimensions but %d." % len(A.shape))
if len(B.shape) != 2:
raise ValueError(
"Matrix B does not have 2 dimensions but %d." % len(B.shape))
def _make_contiguous_(A, B):
if not A.flags['C_CONTIGUOUS']:
A = numpy.ascontiguousarray(A)
if not B.flags['C_CONTIGUOUS']:
B = numpy.ascontiguousarray(B)
return A, B
all_dims = A.shape + B.shape
square = min(all_dims) == max(all_dims)
if transA:
if transB:
if A.dtype == numpy.float32:
if square:
C = numpy.zeros((A.shape[1], B.shape[0]), dtype=A.dtype)
A, B = _make_contiguous_(A, B)
sgemm_dot(B, A, True, True, C)
return C
else:
C = numpy.zeros((A.shape[1], B.shape[0]), dtype=A.dtype)
return sgemm(1, A, B, 0, C, 1, 1, 1)
if A.dtype == numpy.float64:
if square:
C = numpy.zeros((A.shape[1], B.shape[0]), dtype=A.dtype)
A, B = _make_contiguous_(A, B)
dgemm_dot(B, A, True, True, C)
return C
else:
C = numpy.zeros((A.shape[1], B.shape[0]), dtype=A.dtype)
return dgemm(1, A, B, 0, C, 1, 1, 1)
return A.T @ B.T
else:
if A.dtype == numpy.float32:
if square:
C = numpy.zeros((A.shape[1], B.shape[1]), dtype=A.dtype)
A, B = _make_contiguous_(A, B)
sgemm_dot(B, A, False, True, C)
return C
else:
C = numpy.zeros((A.shape[1], B.shape[1]), dtype=A.dtype)
return sgemm(1, A, B, 0, C, 1, 0, 1)
if A.dtype == numpy.float64:
if square:
C = numpy.zeros((A.shape[1], B.shape[1]), dtype=A.dtype)
A, B = _make_contiguous_(A, B)
dgemm_dot(B, A, False, True, C)
return C
else:
C = numpy.zeros((A.shape[1], B.shape[1]), dtype=A.dtype)
return dgemm(1, A, B, 0, C, 1, 0, 1)
return A.T @ B
else:
if transB:
if A.dtype == numpy.float32:
if square:
C = numpy.zeros((A.shape[0], B.shape[0]), dtype=A.dtype)
A, B = _make_contiguous_(A, B)
sgemm_dot(B, A, True, False, C)
return C
else:
C = numpy.zeros((A.shape[0], B.shape[0]), dtype=A.dtype)
return sgemm(1, A, B, 0, C, 0, 1, 1)
if A.dtype == numpy.float64:
if square:
C = numpy.zeros((A.shape[0], B.shape[0]), dtype=A.dtype)
A, B = _make_contiguous_(A, B)
dgemm_dot(B, A, True, False, C)
return C
else:
C = numpy.zeros((A.shape[0], B.shape[0]), dtype=A.dtype)
return dgemm(1, A, B, 0, C, 0, 1, 1)
return A @ B.T
else:
if A.dtype == numpy.float32:
if square:
C = numpy.zeros((A.shape[0], B.shape[1]), dtype=A.dtype)
A, B = _make_contiguous_(A, B)
sgemm_dot(B, A, False, False, C)
return C
else:
C = numpy.zeros((A.shape[0], B.shape[1]), dtype=A.dtype)
return sgemm(1, A, B, 0, C, 0, 0)
if A.dtype == numpy.float64:
if square:
C = numpy.zeros((A.shape[0], B.shape[1]), dtype=A.dtype)
A, B = _make_contiguous_(A, B)
dgemm_dot(B, A, False, False, C)
return C
else:
C = numpy.zeros((A.shape[0], B.shape[1]), dtype=A.dtype)
return dgemm(1, A, B, 0, C, 0, 0, 1)
return A @ B
| 36.935 | 76 | 0.447137 | 1,030 | 7,387 | 3.094175 | 0.107767 | 0.018199 | 0.055224 | 0.060245 | 0.585817 | 0.508943 | 0.475055 | 0.474427 | 0.442736 | 0.442736 | 0 | 0.028839 | 0.422634 | 7,387 | 199 | 77 | 37.120603 | 0.718406 | 0.046027 | 0 | 0.443787 | 0 | 0 | 0.080092 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017751 | false | 0 | 0.017751 | 0 | 0.159763 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fc751ca772d95c9fba3fd41ba8e849861b0bb38 | 942 | py | Python | vnpy/app/pytdx_loader/my_pytdx/export_csv_process/deal_data_from_tdx_export.py | zskycode/vnpy | 441de3ede2e3001661dfc030c8cbe1c860257f56 | [
"MIT"
] | null | null | null | vnpy/app/pytdx_loader/my_pytdx/export_csv_process/deal_data_from_tdx_export.py | zskycode/vnpy | 441de3ede2e3001661dfc030c8cbe1c860257f56 | [
"MIT"
] | null | null | null | vnpy/app/pytdx_loader/my_pytdx/export_csv_process/deal_data_from_tdx_export.py | zskycode/vnpy | 441de3ede2e3001661dfc030c8cbe1c860257f56 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
__author__ = 'Fangyang'
import pandas as pd
if __name__ == '__main__':
df = pd.read_csv('30#RBL8.csv', '\t', encoding='gbk', skiprows=1)
df.dropna(inplace=True)
df.columns = [i.strip() for i in df.columns]
df['时间'] = df['时间'].apply(lambda x: f' {int(x):04d}')
df['datetime'] = df['日期'] + df['时间']
df['datetime'] = pd.to_datetime(df['datetime'], format='%Y/%m/%d %H%M')
columns_list = ['日期', '时间', '开盘', '最高', '最低', '收盘', '成交量', '持仓量', '结算价', 'datetime']
del_element_list = ['日期', '时间', '结算价']
for ele in del_element_list:
columns_list.remove(ele)
df = df[columns_list].rename(
columns={
'datetime': 'Datetime',
'开盘': 'Open',
'最高': 'High',
'最低': 'Low',
'收盘': 'Close',
'成交量': 'Volume',
'持仓量': 'OpenInterest'
}
)
df.to_csv('RB99.csv', index=False)
print(1)
| 28.545455 | 88 | 0.505308 | 124 | 942 | 3.66129 | 0.548387 | 0.059471 | 0.026432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014556 | 0.270701 | 942 | 32 | 89 | 29.4375 | 0.646288 | 0.021231 | 0 | 0 | 0 | 0 | 0.215217 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fca9f7d491a694fff98012f4e6702309d723b8f | 3,229 | py | Python | chapter_3/api_limit/main.py | rinjyu/the_red | c099e830ae3ee9063c3e9d29f4ee627241c7eeed | [
"Apache-2.0"
] | 13 | 2021-07-26T06:09:19.000Z | 2022-03-22T07:01:22.000Z | chapter_3/api_limit/main.py | rinjyu/the_red | c099e830ae3ee9063c3e9d29f4ee627241c7eeed | [
"Apache-2.0"
] | 11 | 2021-07-25T03:35:25.000Z | 2021-08-13T23:05:38.000Z | chapter_3/api_limit/main.py | rinjyu/the_red | c099e830ae3ee9063c3e9d29f4ee627241c7eeed | [
"Apache-2.0"
] | 8 | 2021-09-02T14:54:17.000Z | 2022-03-14T10:28:37.000Z | from typing import Optional
from fastapi import FastAPI, Request, Response
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from bs4 import BeautifulSoup
from datetime import datetime
import hashlib
import struct
import logging
import json_logging
import urllib.parse
import redis
import httpx
import sys
import json
import random
from datetime import datetime, timedelta
from exceptions import UnicornException
from settings import Settings
from log import init_log
from cors import init_cors
from instrumentator import init_instrumentator
app = FastAPI()
settings = Settings()
init_cors(app)
init_instrumentator(app)
API_MAXIMUM_NUMBER = 10
N_MINUTES = 5
SECONDS = 60
rconn = redis.StrictRedis("127.0.0.1", 16379)
@app.exception_handler(UnicornException)
async def unicorn_exception_handler(request: Request, exc: UnicornException):
return JSONResponse(
status_code=exc.status,
content={"code": exc.code, "message": exc.message},
)
async def call_api(url: str):
async with httpx.AsyncClient() as client:
r = await client.get(url)
return r.text
def parse_opengraph(body: str):
soup = BeautifulSoup(body, 'html.parser')
title = soup.find("meta", {"property":"og:title"})
url = soup.find("meta", {"property":"og:url"})
og_type = soup.find("meta", {"property":"og:type"})
image = soup.find("meta", {"property":"og:image"})
description = soup.find("meta", {"property":"og:description"})
author = soup.find("meta", {"property":"og:article:author"})
resp = {}
scrap = {}
scrap["title"] = title["content"] if title else None
scrap["url"] = url["content"] if url else None
scrap["type"] = og_type["content"] if og_type else None
scrap["image"] = image["content"] if image else None
scrap["description"] = description["content"] if description else None
scrap["author"] = author["content"] if author else None
resp["scrap"] = scrap
return resp
def gen_key_prefix(uid):
return f"l:scrap:{uid}:"
def get_api_count(uid):
keys = []
now = datetime.now()
for i in range(N_MINUTES):
key = gen_key_prefix(uid) + (now + timedelta(minutes=-1*i)).strftime("%Y%m%d%H%M")
keys.append(key)
values = rconn.mget(keys)
s = 0
for value in values:
if value:
s += int(value)
return s
def incr_api_count(uid):
now = datetime.now()
key = gen_key_prefix(uid) + now.strftime("%Y%m%d%H%M")
v = rconn.incrby(key)
rconn.expire(key, N_MINUTES * SECONDS)
return v
@app.get("/api/v1/scrap/")
async def scrap(uid: int, url: str):
if not uid:
raise UnicornException(status=401, code=-20001, message="Not Authrized user")
count = get_api_count(uid)
if count >= API_MAXIMUM_NUMBER:
raise UnicornException(status=427, code=-20002, message="all limit exceeded error")
try:
incr_api_count(uid)
url = urllib.parse.unquote(url)
body = await call_api(url)
value = parse_opengraph(body)
value["api_count"] = count
return value
except Exception as e:
raise UnicornException(status=400, code=-20000, message=str(e))
| 25.626984 | 91 | 0.672344 | 438 | 3,229 | 4.86758 | 0.312785 | 0.022514 | 0.033771 | 0.056285 | 0.093809 | 0.031895 | 0 | 0 | 0 | 0 | 0 | 0.017127 | 0.204398 | 3,229 | 125 | 92 | 25.832 | 0.812768 | 0 | 0 | 0.021277 | 0 | 0 | 0.106225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.234043 | 0.010638 | 0.351064 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fcc6f1e1e7f1be00621469ed65e90c025c47ebf | 3,270 | py | Python | google-cloud-os_login/synth.py | kawabatas/google-cloud-ruby | 525ea553b1887f70ac85f8c70a489b04df17a2da | [
"Apache-2.0"
] | 1 | 2018-09-09T03:50:51.000Z | 2018-09-09T03:50:51.000Z | google-cloud-os_login/synth.py | kawabatas/google-cloud-ruby | 525ea553b1887f70ac85f8c70a489b04df17a2da | [
"Apache-2.0"
] | null | null | null | google-cloud-os_login/synth.py | kawabatas/google-cloud-ruby | 525ea553b1887f70ac85f8c70a489b04df17a2da | [
"Apache-2.0"
] | null | null | null | import synthtool as s
import synthtool.gcp as gcp
import logging
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
# Temporary until we get Ruby-specific tools into synthtool
def merge_gemspec(src, dest, path):
regex = re.compile(r'^\s+gem.version\s*=\s*"[\d\.]+"$', flags=re.MULTILINE)
match = regex.search(dest)
if match:
src = regex.sub(match.group(0), src, count=1)
regex = re.compile(r'^\s+gem.homepage\s*=\s*"[^"]+"$', flags=re.MULTILINE)
match = regex.search(dest)
if match:
src = regex.sub(match.group(0), src, count=1)
return src
v1_library = gapic.ruby_library(
'oslogin', 'v1',
config_path='/google/cloud/oslogin/artman_oslogin_v1.yaml',
artman_output_name='google-cloud-ruby/google-cloud-os_login'
)
s.copy(v1_library / 'lib/google/cloud/os_login.rb')
s.copy(v1_library / 'lib/google/cloud/os_login/v1')
s.copy(v1_library / 'lib/google/cloud/os_login/v1.rb')
s.copy(v1_library / 'lib/google/cloud/oslogin/v1')
s.copy(v1_library / 'lib/google/cloud/oslogin/common')
s.copy(v1_library / 'test/google/cloud/os_login/v1')
s.copy(v1_library / 'README.md')
s.copy(v1_library / 'LICENSE')
s.copy(v1_library / '.gitignore')
s.copy(v1_library / '.yardopts')
s.copy(v1_library / 'google-cloud-os_login.gemspec', merge=merge_gemspec)
v1beta_library = gapic.ruby_library(
'oslogin', 'v1beta',
config_path='/google/cloud/oslogin/artman_oslogin_v1beta.yaml',
artman_output_name='google-cloud-ruby/google-cloud-os_login'
)
s.copy(v1beta_library / 'lib/google/cloud/os_login/v1beta')
s.copy(v1beta_library / 'lib/google/cloud/os_login/v1beta.rb')
s.copy(v1beta_library / 'lib/google/cloud/oslogin/v1beta')
s.copy(v1beta_library / 'test/google/cloud/os_login/v1beta')
# PERMANENT: API name for oslogin
s.replace(
[
'README.md',
'lib/google/cloud/os_login.rb',
'lib/google/cloud/os_login/v1.rb',
'lib/google/cloud/os_login/v1beta.rb'
],
'/os-login\\.googleapis\\.com', '/oslogin.googleapis.com')
# https://github.com/googleapis/gapic-generator/issues/2196
s.replace(
[
'README.md',
'lib/google/cloud/os_login.rb',
'lib/google/cloud/os_login/v1.rb',
'lib/google/cloud/os_login/v1beta.rb'
],
'\\[Product Documentation\\]: https://cloud\\.google\\.com/os-login\n',
'[Product Documentation]: https://cloud.google.com/compute/docs/oslogin/rest/\n')
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('([^#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\\\\\\\{\\2}', content)
if count == 0:
return content
s.replace(
'lib/google/cloud/**/*.rb',
'\n(\\s+)#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/os_login/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
| 34.421053 | 116 | 0.656269 | 478 | 3,270 | 4.378661 | 0.23431 | 0.13139 | 0.10559 | 0.146202 | 0.64214 | 0.571906 | 0.497372 | 0.35786 | 0.321548 | 0.29097 | 0 | 0.022191 | 0.131804 | 3,270 | 94 | 117 | 34.787234 | 0.715041 | 0.098165 | 0 | 0.298701 | 0 | 0.038961 | 0.470088 | 0.342624 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025974 | false | 0 | 0.051948 | 0 | 0.103896 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9fcd5f20a314e26094f35885214b213594b50dd7 | 1,341 | py | Python | Events/Announcements.py | GhostyCatt/TheCloud | 4693865268935d55dad948270f0cf35dee64e2cb | [
"MIT"
] | 2 | 2021-09-21T03:00:55.000Z | 2021-10-03T11:59:27.000Z | Events/Announcements.py | GhostyCatt/TheCloud | 4693865268935d55dad948270f0cf35dee64e2cb | [
"MIT"
] | 1 | 2021-09-22T11:29:39.000Z | 2021-09-22T11:29:39.000Z | Events/Announcements.py | GhostyCatt/TheCloud | 4693865268935d55dad948270f0cf35dee64e2cb | [
"MIT"
] | 1 | 2021-09-19T19:43:17.000Z | 2021-09-19T19:43:17.000Z | # Library Imports
import nextcord, json
from nextcord.ext import commands
# Custom Imports
from Functions.Embed import *
# Options from Json
with open('Config/Options.json') as RawOptions:
Options = json.load(RawOptions)
# onMessage Class
class Tags(commands.Cog):
def __init__(self, bot:commands.Bot):
self.bot = bot
@commands.Cog.listener('on_message')
async def TagsDetection(self, message:nextcord.Message):
"""Triggered when a user leaves the server"""
# Bot check
if message.author.bot: return
# Set a variable for the message
Object = message
# Check if the message is in the announcements channel
if message.channel.id == Options['Channels']['Announcement']:
# Create the object and send it
Embed = await Custom(
f"New Announcement!",
f"{message.content}\n\nAnnouncement by : {message.author.name}#{message.author.discriminator}"
)
Object = await message.channel.send(embed = Embed)
# Delete the old message
await message.delete()
# If the message had the pin tag, pin it
if "--pin" in message.content:
await Object.pin()
# Setup the bot
def setup(bot:commands.Bot):
bot.add_cog(Tags(bot)) | 29.152174 | 110 | 0.623415 | 163 | 1,341 | 5.092025 | 0.447853 | 0.039759 | 0.033735 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.281879 | 1,341 | 46 | 111 | 29.152174 | 0.86189 | 0.196868 | 0 | 0 | 0 | 0.043478 | 0.158668 | 0.083252 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c7c20cba3230b3e6d55458b5081946f8702b692 | 4,156 | py | Python | mivp.py | IgiArdiyanto/control-engineering-with-python | 18ca06d339d6c2391ce77ac73e552f20f85cee30 | [
"CC-BY-4.0"
] | null | null | null | mivp.py | IgiArdiyanto/control-engineering-with-python | 18ca06d339d6c2391ce77ac73e552f20f85cee30 | [
"CC-BY-4.0"
] | null | null | null | mivp.py | IgiArdiyanto/control-engineering-with-python | 18ca06d339d6c2391ce77ac73e552f20f85cee30 | [
"CC-BY-4.0"
] | null | null | null | # Third-Party Libraries
import numpy as np
import scipy.integrate as sci
import matplotlib.pyplot as plt
import matplotlib.animation as ani
def solve(**kwargs):
kwargs = kwargs.copy()
kwargs["dense_output"] = True
y0s = kwargs["y0s"]
del kwargs["y0s"]
results = []
for y0 in y0s:
kwargs["y0"] = y0
result = sci.solve_ivp(**kwargs)
results.append(result)
return results
def solve_alt(**kwargs):
kwargs = kwargs.copy()
# kwargs["dense_output"] = True
boundary = kwargs["boundary"]
del kwargs["boundary"]
# boundary_n = kwargs["boundary_n"]
# del kwargs["boundary_n"]
boundary_atol = kwargs.get("boundary_atol", 0.01)
del kwargs["boundary_atol"]
boundary_rtol = kwargs.get("boundary_rtol", 0.1)
del kwargs["boundary_rtol"]
t_eval = kwargs["t_eval"]
kwargs["t_span"] = (t_eval[0], t_eval[-1])
# assert boundary_n >= 4 # ultimately, min_n, max_n ?
data = [np.zeros((2, len(t_eval)), dtype=np.float64) for _ in range(4)]
s = list(np.linspace(0.0, 1.0, 4))
# print(f"{t_eval}")
y0s = boundary(np.array(s))
# print(f"{np.shape(y0s)=}")
# print(y0s)
for i, y0 in enumerate(y0s):
kwargs["y0"] = y0
result = sci.solve_ivp(**kwargs)
# print(f"{np.shape(data)=} {np.shape(result.y)=}")
data[i] = result.y
while True:
data_array = np.array(data)
x, y = data_array[:, 0], data_array[:, 1]
d = np.sqrt(x * x + y * y)[:, :-1]
error = boundary_atol + boundary_rtol * d
# compute max and index that corresponds ?
dxdy = np.diff(data, axis=0)
dx, dy = dxdy[:, 0], dxdy[:, 1]
dd = np.sqrt(dx * dx + dy * dy)
if np.all(np.amax(dd) <= error):
break
index_flat = np.argmax(dd)
i, j = divmod(index_flat, np.shape(dd)[1])
assert np.amax(dd) == dd[i, j] # may fail when nan/infs?
# with vinograd, np.amax(dd) may be nan if we include the origin.
# Investigate !
print(f"{len(data)=} {(i, j)=}", f"{np.amax(dd)=}")
s.insert(i + 1, 0.5 * (s[i] + s[i + 1]))
y0 = boundary(np.array([s[i + 1]]))[0]
kwargs["y0"] = y0
result = sci.solve_ivp(**kwargs)
data.insert(i + 1, result.y)
# print(np.shape(data))
reshaped_data = np.einsum("kji", data)
# print(np.shape(reshaped_data))
return reshaped_data
def get_data(results, t):
n = len(results)
data = np.zeros((len(t), 2, n))
for i, r in enumerate(results):
sol_t = r.sol(t)
data[:, :, i] = sol_t.T
return data
def generate_movie(data, filename, fps, axes=None, **options):
#print(axes, options)
fig = None
if axes:
fig = axes.get_figure()
if not fig:
fig = plt.figure(figsize=(16, 9))
axes = fig.subplots()
axes.axis("equal")
ratio = 16 / 9
x_max = np.amax(data[:, 0, :])
x_min = np.amin(data[:, 0, :])
y_max = np.amax(data[:, 1, :])
y_min = np.amin(data[:, 1, :])
# Create a margin
x_c, y_c = 0.5 * (x_max + x_min), 0.5 * (y_max + y_min)
width, height = x_max - x_min, y_max - y_min
x_min = x_min - 0.1 * width
x_max = x_max + 0.1 * width
y_min = y_min - 0.1 * width
y_max = y_max + 0.1 * width
width, height = x_max - x_min, y_max - y_min
if width / height <= ratio: # adjust width
width = height * ratio
x_min, x_max = x_c - 0.5 * width, x_c + 0.5 * width
else: # adjust height
height = width / ratio
y_min, y_max = y_c - 0.5 * height, y_c + 0.5 * height
axes.axis([x_min, x_max, y_min, y_max])
fig.subplots_adjust(0, 0, 1, 1)
axes.axis("off")
polygon = None
def update(i):
nonlocal polygon
x, y = data[i]
if polygon:
polygon.remove()
polygon = axes.fill(x, y, **options)[0]
writer = ani.FFMpegWriter(fps=fps)
animation = ani.FuncAnimation(fig, func=update, frames=len(data))
animation.save(filename, writer=writer, dpi=300)
| 31.014925 | 75 | 0.550529 | 626 | 4,156 | 3.527157 | 0.238019 | 0.014493 | 0.006793 | 0.021739 | 0.12817 | 0.11096 | 0.11096 | 0.11096 | 0.057065 | 0.024457 | 0 | 0.029582 | 0.292348 | 4,156 | 133 | 76 | 31.24812 | 0.721183 | 0.127045 | 0 | 0.10101 | 0 | 0 | 0.041863 | 0 | 0 | 0 | 0 | 0 | 0.010101 | 1 | 0.050505 | false | 0 | 0.040404 | 0 | 0.121212 | 0.010101 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c7e6bd77ab65323eb06473f3ed2421080810b4e | 1,527 | py | Python | python/tree/0687_longest_univalue_path.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/tree/0687_longest_univalue_path.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/tree/0687_longest_univalue_path.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.longest = 0
def traverse(node, parent_val):
if not node:
return 0
left, right = traverse(node.left, node.val), traverse(
node.right, node.val)
self.longest = max(self.longest, left + right)
return 1 + max(left, right) if node.val == parent_val else 0
traverse(root, None)
return self.longest
def test_longest_univalue_path():
a = TreeNode(5)
b = TreeNode(4)
c = TreeNode(5)
d = TreeNode(1)
e = TreeNode(1)
f = TreeNode(5)
a.left = b
a.right = c
b.left = d
b.right = e
c.right = f
assert 2 == Solution().longestUnivaluePath(a)
a = TreeNode(1)
b = TreeNode(4)
c = TreeNode(5)
d = TreeNode(4)
e = TreeNode(4)
f = TreeNode(5)
a.left = b
a.right = c
b.left = d
b.right = e
c.right = f
assert 2 == Solution().longestUnivaluePath(a)
a = TreeNode(1)
b = TreeNode(1)
c = TreeNode(1)
d = TreeNode(1)
e = TreeNode(1)
f = TreeNode(1)
g = TreeNode(1)
a.right = b
b.left = c
b.right = d
c.left = e
c.right = f
e.left = g
assert 4 == Solution().longestUnivaluePath(a)
| 22.130435 | 72 | 0.535036 | 209 | 1,527 | 3.866029 | 0.220096 | 0.111386 | 0.02599 | 0.029703 | 0.350248 | 0.350248 | 0.350248 | 0.350248 | 0.240099 | 0.240099 | 0 | 0.026 | 0.345121 | 1,527 | 68 | 73 | 22.455882 | 0.782 | 0.044532 | 0 | 0.446429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053571 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.160714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c7f9dd3621cafca3e66c672329e6080fd0e396e | 850 | py | Python | gore/tests/test_groups_api.py | akx/gentry | f4205f5a14054231d064657347862a15ecf4c0e0 | [
"MIT"
] | 4 | 2017-07-26T13:23:06.000Z | 2019-02-21T14:55:34.000Z | gore/tests/test_groups_api.py | akx/gentry | f4205f5a14054231d064657347862a15ecf4c0e0 | [
"MIT"
] | 26 | 2017-08-02T08:52:06.000Z | 2022-03-04T15:13:26.000Z | gore/tests/test_groups_api.py | akx/gentry | f4205f5a14054231d064657347862a15ecf4c0e0 | [
"MIT"
] | null | null | null | import json
import pytest
from django.utils.encoding import force_str
from gore.tests.utils import create_events
from gore.utils.event_grouper import group_events
@pytest.mark.django_db
def test_groups_api(project, admin_client):
events = create_events(project, 10)
group_events(project, events)
list_resp = json.loads(force_str(admin_client.get('/api/groups/').content))
group_list = list_resp['groups']
assert len(group_list) == 1
assert group_list[0]['n_events'] == len(events)
detail_resp = json.loads(force_str(admin_client.get('/api/group/{id}/'.format(id=group_list[0]['id'])).content))
assert len(detail_resp['events']) == len(events)
assert {e['id'] for e in detail_resp['events']} == {e.id for e in events}
def test_groups_api_auth(client):
assert client.get('/api/groups/').status_code >= 400
| 34 | 116 | 0.727059 | 131 | 850 | 4.503817 | 0.351145 | 0.061017 | 0.061017 | 0.054237 | 0.159322 | 0.128814 | 0.128814 | 0.128814 | 0.128814 | 0 | 0 | 0.010825 | 0.130588 | 850 | 24 | 117 | 35.416667 | 0.787551 | 0 | 0 | 0 | 0 | 0 | 0.082353 | 0 | 0 | 0 | 0 | 0 | 0.277778 | 1 | 0.111111 | false | 0 | 0.277778 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c84f647ccee627d658ba14428984f8bae4c5f2e | 2,134 | py | Python | example.py | filipinascimento/dbgz | a3b10e89c78377c00978da0a876f5ad8e8416794 | [
"BSD-3-Clause"
] | null | null | null | example.py | filipinascimento/dbgz | a3b10e89c78377c00978da0a876f5ad8e8416794 | [
"BSD-3-Clause"
] | null | null | null | example.py | filipinascimento/dbgz | a3b10e89c78377c00978da0a876f5ad8e8416794 | [
"BSD-3-Clause"
] | null | null | null | import dbgz
from tqdm.auto import tqdm
# Defining a scheme
scheme = [
("anInteger","i"),
("aFloat","f"),
("aString","s"),
("anIntArray","I"),
("aFloatArray","F"),
("anStringArray","S"),
]
# Writing some data to a dbgz file
totalCount = 1000000;
with dbgz.DBGZWriter("test.dbgz",scheme) as fd:
# New entries can be added as:
fd.write(anInteger=1, aString="1")
fd.write(anInteger=2, aString="2", aFloat=5)
fd.write(anInteger=3, aString="3",anIntArray=list(range(10)), aFloatArray=[0.1,0.2,0.3,0.5])
# Here is a loop to write a lot of data:
for index in tqdm(range(totalCount)):
fd.write(
anInteger=index,
aFloat=index*0.01,
anIntArray=list(range(index,index+10)),
aString=str(index),
aFloatArray=[index+0.1,index-0.2,index+0.3,index+0.4],
anStringArray=[str(index),str(index+1),str(index+2),str(index+3)]
)
# Loading a dbgz file
with dbgz.DBGZReader("test.dbgz") as fd:
pbar = tqdm(total=fd.entriesCount)
print(fd.scheme)
while True:
entries = fd.read(10)
if(not entries):
break
for entry in entries:
assert entry["anInteger"] == int(entry["aString"])
pbar.update(len(entries))
pbar.refresh()
pbar.close()
# Saving dictionary to file and loading it again
with dbgz.DBGZReader("test.dbgz") as fd:
indexDictionary = fd.generateIndex("anInteger",
indicesPath=None,
filterFunction=lambda entry: entry["anInteger"]<10,
useDictionary=True,
showProgressbar = True
)
for key,values in indexDictionary.items():
print(key,values)
for value in values:
assert int(key) == fd.readAt(value)[0]["anInteger"]
# Saving dictionary to file and loading it again
with dbgz.DBGZReader("test.dbgz") as fd:
fd.generateIndex("anInteger",
indicesPath="test_byAnInteger.idbgz",
filterFunction=lambda entry: entry["anInteger"]<10,
useDictionary=True,
showProgressbar = True
)
indexDictionary = dbgz.readIndicesDictionary("test_by.idbgz")
for key,values in indexDictionary.items():
print(key,values)
for value in values:
assert int(key) == fd.readAt(value)[0]["anInteger"]
| 27.358974 | 94 | 0.671978 | 295 | 2,134 | 4.854237 | 0.322034 | 0.013966 | 0.044693 | 0.046089 | 0.363128 | 0.363128 | 0.363128 | 0.342179 | 0.342179 | 0.342179 | 0 | 0.027273 | 0.175258 | 2,134 | 77 | 95 | 27.714286 | 0.786364 | 0.108716 | 0 | 0.288136 | 0 | 0 | 0.108822 | 0.011622 | 0 | 0 | 0 | 0 | 0.050847 | 1 | 0 | false | 0 | 0.033898 | 0 | 0.033898 | 0.050847 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c89b04a077491bd121c0b5380c9d59cfa2be2d5 | 2,298 | py | Python | model.py | Shellyga/Adversarial-Domain-Adaptation-with-Keras | cb8f0d083ba8d59c91c3371bf62438ba1e679f4a | [
"MIT"
] | 27 | 2019-09-27T03:05:15.000Z | 2021-11-15T18:29:32.000Z | model.py | Shellyga/Adversarial-Domain-Adaptation-with-Keras | cb8f0d083ba8d59c91c3371bf62438ba1e679f4a | [
"MIT"
] | 3 | 2020-04-09T03:02:56.000Z | 2020-09-29T02:00:21.000Z | model.py | Shellyga/Adversarial-Domain-Adaptation-with-Keras | cb8f0d083ba8d59c91c3371bf62438ba1e679f4a | [
"MIT"
] | 8 | 2020-03-11T12:04:46.000Z | 2021-12-10T12:48:06.000Z | import random
import numpy as np
from keras.models import Model
from keras.applications.resnet50 import ResNet50
from keras.layers import Input, Conv2D, MaxPool2D, Flatten, Dense
from keras.layers import BatchNormalization, Activation, Dropout
def build_embedding(param, inp):
network = eval(param["network_name"])
base = network(weights = 'imagenet', include_top = False)
feat = base(inp)
flat = Flatten()(feat)
return flat
def build_classifier(param, embedding):
dense1 = Dense(400, name = 'class_dense1')(embedding)
bn1 = BatchNormalization(name = 'class_bn1')(dense1)
act1 = Activation('relu', name = 'class_act1')(bn1)
drop2 = Dropout(param["drop_classifier"], name = 'class_drop1')(act1)
dense2 = Dense(100, name = 'class_dense2')(drop2)
bn2 = BatchNormalization(name = 'class_bn2')(dense2)
act2 = Activation('relu', name = 'class_act2')(bn2)
drop2 = Dropout(param["drop_classifier"], name = 'class_drop2')(act2)
densel = Dense(param["source_label"].shape[1], name = 'class_dense_last')(drop2)
bnl = BatchNormalization(name = 'class_bn_last')(densel)
actl = Activation('softmax', name = 'class_act_last')(bnl)
return actl
def build_discriminator(param, embedding):
dense1 = Dense(400, name = 'dis_dense1')(embedding)
bn1 = BatchNormalization(name='dis_bn1')(dense1)
act1 = Activation('relu', name = 'dis_act1')(bn1)
drop1 = Dropout(param["drop_discriminator"], name = 'dis_drop1')(act1)
dense2 = Dense(100, name = 'dis_dense2')(drop1)
bn2 = BatchNormalization(name='dis_bn2')(dense2)
act2 = Activation('relu', name = 'dis_act2')(bn2)
drop2 = Dropout(param["drop_discriminator"], name = 'dis_drop2')(act2)
densel = Dense(1, name = 'dis_dense_last')(drop2)
bnl = BatchNormalization(name = 'dis_bn_last')(densel)
actl = Activation('sigmoid', name = 'dis_act_last')(bnl)
return actl
def build_combined_classifier(inp, classifier):
comb_model = Model(inputs = inp, outputs = [classifier])
return comb_model
def build_combined_discriminator(inp, discriminator):
comb_model = Model(inputs = inp, outputs = [discriminator])
return comb_model
def build_combined_model(inp, comb):
comb_model = Model(inputs = inp, outputs = comb)
return comb_model
| 39.62069 | 84 | 0.70322 | 286 | 2,298 | 5.475524 | 0.241259 | 0.063218 | 0.045977 | 0.04023 | 0.530651 | 0.44636 | 0.086845 | 0 | 0 | 0 | 0 | 0.035361 | 0.163185 | 2,298 | 58 | 85 | 39.62069 | 0.778991 | 0 | 0 | 0.106383 | 0 | 0 | 0.15659 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.12766 | 0 | 0.382979 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c8c52db67d8c73d5f8888df4a3820dbf62cb559 | 11,068 | py | Python | trainers/dualsdf_trainer.py | zekunhao1995/DualSDF | 177a102b315949bfa59a6ae1c47de52ddbea6eaa | [
"MIT"
] | 107 | 2020-04-07T01:15:14.000Z | 2022-03-17T09:32:46.000Z | trainers/dualsdf_trainer.py | zekunhao1995/DualSDF | 177a102b315949bfa59a6ae1c47de52ddbea6eaa | [
"MIT"
] | 6 | 2020-05-16T00:41:28.000Z | 2021-04-27T16:04:21.000Z | trainers/dualsdf_trainer.py | zekunhao1995/DualSDF | 177a102b315949bfa59a6ae1c47de52ddbea6eaa | [
"MIT"
] | 17 | 2020-04-14T10:50:24.000Z | 2022-01-20T09:43:08.000Z | import os
import numpy as np
# PyTorch
import torch
import torch.nn as nn
import torch.nn.functional as F
import importlib
import itertools
from trainers.base_trainer import BaseTrainer
import toolbox.lr_scheduler
import models.embeddings
def KLD(mu, logvar):
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=-1)
KLD = torch.mean(KLD)
return KLD
class Trainer(BaseTrainer):
def __init__(self, cfg, args, device):
super(BaseTrainer, self).__init__()
self.cfg = cfg
self.args = args
self.device = device
# Init models
deepsdf_lib = importlib.import_module(cfg.models.deepsdf.type)
self.deepsdf_net = deepsdf_lib.Decoder(cfg.models.deepsdf)
self.deepsdf_net.to(self.device)
print("DeepSDF Net:")
print(self.deepsdf_net)
prim_attr_lib = importlib.import_module(cfg.models.prim_attr.type)
self.prim_attr_net = prim_attr_lib.Decoder(cfg.models.prim_attr)
self.prim_attr_net.to(self.device)
print("Prim Attr Net:")
print(self.prim_attr_net)
prim_sdf_lib = importlib.import_module(cfg.models.prim_sdf.type)
self.prim_sdf_fun = prim_sdf_lib.SDFFun(cfg.models.prim_sdf)
self.prim_sdf_fun.to(self.device)
print("Prim SDF Fun:")
print(self.prim_sdf_fun)
# Init loss functions
self.lossfun_fine = self._get_lossfun(self.cfg.trainer.loss_fine)
self.lossfun_coarse = self._get_lossfun(self.cfg.trainer.loss_coarse)
# Init optimizers
self.optim_deepsdf, self.lrscheduler_deepsdf = self._get_optim(self.deepsdf_net.parameters(), self.cfg.trainer.optim_deepsdf)
self.optim_primitive, self.lrscheduler_primitive = self._get_optim(self.prim_attr_net.parameters(), self.cfg.trainer.optim_primitive)
self.additional_log_info = {}
# Init training-specific contexts
def prep_train(self):
self.sid2idx = {k:v for v, k in enumerate(sorted(self.cfg.train_shape_ids))}
print('[DualSDF Trainer] init. #entries in sid2idx: {}'.format(len(self.sid2idx)))
# Init latent code
self.latent_embeddings = self._get_latent(self.cfg.trainer.latent_code, N=len(self.sid2idx))
self.optim_latentcode, self.lrscheduler_latentcode = self._get_optim(self.latent_embeddings.parameters(), self.cfg.trainer.optim_latentcode)
self.train()
def _get_latent(self, cfg, N):
embedding = getattr(models.embeddings, cfg.type)
embedding_instance = embedding(cfg, N=N, dim=self.cfg.trainer.latent_dim).to(self.device)
return embedding_instance
def _get_optim(self, parameters, cfg):
if cfg.type.lower() == "adam":
optim = torch.optim.Adam(parameters, lr=cfg.lr, betas=cfg.betas, eps=cfg.eps, weight_decay=cfg.weight_decay, amsgrad=False)
elif cfg.type.lower() == "sgd":
optim = torch.optim.SGD(parameters, lr=cfg.lr, momentum=cfg.momentum, weight_decay=cfg.weight_decay)
else:
raise NotImplementedError("Unknow optimizer: {}".format(cfg.type))
scheduler = None
if hasattr(cfg, 'lr_scheduler'):
scheduler = getattr(toolbox.lr_scheduler, cfg.lr_scheduler.type)(cfg.lr_scheduler)
return optim, scheduler
def _step_lr(self, epoch):
lr_latentcode = self.lrscheduler_latentcode(epoch)
for g in self.optim_latentcode.param_groups:
g['lr'] = lr_latentcode
lr_deepsdf = self.lrscheduler_deepsdf(epoch)
for g in self.optim_deepsdf.param_groups:
g['lr'] = lr_deepsdf
lr_primitive = self.lrscheduler_primitive(epoch)
for g in self.optim_primitive.param_groups:
g['lr'] = lr_primitive
print('Step LR: L: {}; D: {}; P: {}'.format(lr_latentcode, lr_deepsdf, lr_primitive))
def _get_lossfun(self, cfg):
print(cfg)
if cfg.type.lower() == 'clamped_l1':
from models.lossfuns import clamped_l1
lossfun = lambda pred, gt: torch.mean(clamped_l1(pred, gt, trunc=cfg.trunc), dim=-1)
elif cfg.type.lower() == 'clamped_l1_correct':
from models.lossfuns import clamped_l1_correct as clamped_l1
lossfun = lambda pred, gt: clamped_l1(pred, gt, trunc=cfg.trunc)
elif cfg.type.lower() == 'l1':
lossfun = lambda pred, gt: torch.mean(torch.abs(pred-gt), dim=-1)
elif cfg.type.lower() == 'onesided_l2':
from models.lossfuns import onesided_l2
lossfun = onesided_l2
else:
raise NotImplementedError("Unknow loss function: {}".format(cfg.type))
return lossfun
# loss?: [B]
def _reduce_loss(self, loss1, loss2):
if self.cfg.trainer.mixture_loss:
loss_s = torch.stack([loss1, loss2], dim=-1)
loss = torch.mean(torch.logsumexp(loss_s, dim=-1)) - np.log(2)
else:
loss = 0.5 * (torch.mean(loss1 + loss2))
return loss
def _b_idx2latent(self, latent_embeddings, indices, num_augment_pts=None):
batch_latent_dict = latent_embeddings(indices, num_augment_pts=num_augment_pts)
batch_latent = batch_latent_dict['latent_code']
if 'mu' in batch_latent_dict.keys() and 'logvar' in batch_latent_dict.keys():
batch_mu = batch_latent_dict['mu']
batch_logvar = batch_latent_dict['logvar']
kld = KLD(batch_mu, batch_logvar)
self.additional_log_info['vad_batch_mu_std'] = torch.std(batch_mu).item()
self.additional_log_info['vad_batch_kld'] = kld.item()
if 'std' in batch_latent_dict.keys():
batch_sigma = batch_latent_dict['std']
else:
batch_sigma = torch.exp(0.5*batch_logvar)
self.additional_log_info['vad_batch_sigma_mean'] = torch.mean(batch_sigma).item()
else:
kld = 0.0
if 'latent_code_augment' in batch_latent_dict.keys():
batch_latent_aug = batch_latent_dict['latent_code_augment']
else:
batch_latent_aug = batch_latent
return batch_latent, batch_latent_aug, kld
# Convert list of shape ids to their corresponding indices in embedding.
def _b_sid2idx(self, sid_list):
data_indices = torch.tensor([self.sid2idx[x] for x in sid_list], dtype=torch.long, device=self.device)
return data_indices
# Z: [B, 128] or
# [B, N, 128]
# P: [B, N, 3]
def _forward_deepsdf(self, z, p):
bs = z.size(0)
N = p.size(1)
if len(z.shape) == 2:
z = z.unsqueeze(1).expand(-1,N,-1)
inp = torch.cat([z, p], dim=-1)
dists = self.deepsdf_net(inp) # [64 2048 1]
return dists
# Z: [B, 128]
# P: [B, N, 3]
def _forward_primitive(self, z, p):
bs = z.size(0)
N = p.size(1)
attrs = self.prim_attr_net(z)
dists = self.prim_sdf_fun(attrs, p)
return dists, attrs
def _reg_attr(self, attrs):
attrs = attrs.reshape(attrs.size(0), -1, 4) # [B N rxyz]
dists = torch.sum(attrs[:,:,1:]**2, dim=-1, keepdim= True)
dists = torch.clamp(dists, 1.05, None)
loss = torch.sum(dists - 1.05)
return loss
def epoch_start(self, epoch):
# Setting LR
self.train()
self._step_lr(epoch)
self.optim_latentcode.zero_grad()
def step(self, data):
data_f = data['surface_samples'].to(self.device, non_blocking=True) # [64 2048 4] xyzd
data_c = data['sphere_samples'].to(self.device, non_blocking=True)
data_indices = data['shape_indices'].squeeze(-1).to(self.device, non_blocking=True) # [64]
data_ids = data['shape_ids']
latent_codes_coarse, latent_codes_fine, kld = self._b_idx2latent(self.latent_embeddings, data_indices, num_augment_pts=data_f.size(1)) # [64 128]
if self.cfg.trainer.detach_latent_coarse:
latent_codes_coarse = latent_codes_coarse.detach()
if self.cfg.trainer.detach_latent_fine:
latent_codes_fine = latent_codes_fine.detach()
self.optim_deepsdf.zero_grad()
self.optim_primitive.zero_grad()
# DeepSDF
pts_fine = data_f[...,:3]
dists_gt_fine = data_f[...,[3]].squeeze(-1)
dists_deepsdf = self._forward_deepsdf(latent_codes_fine, pts_fine).squeeze(-1) # 64, 2048, 1
# PrimitiveSDF
pts_coarse = data_c[...,:3]
dists_gt_coarse = data_c[...,[3]].squeeze(-1)
dists_primitive, attrs_primitive = self._forward_primitive(latent_codes_coarse, pts_coarse) # 64, 2048, 1
dists_primitive = dists_primitive.squeeze(-1)
# calculate loss
loss_fine = self.lossfun_fine(dists_deepsdf, dists_gt_fine)
loss_coarse = self.lossfun_coarse(dists_primitive, dists_gt_coarse)
reg_attr = self._reg_attr(attrs_primitive)
loss = self._reduce_loss(loss_fine*self.cfg.trainer.loss_fine.weight, loss_coarse*self.cfg.trainer.loss_coarse.weight)
loss_fine = torch.mean(loss_fine.detach()).item()
loss_coarse = torch.mean(loss_coarse.detach()).item()
(loss + kld*self.cfg.trainer.kld_weight + reg_attr*self.cfg.trainer.attr_reg_weight).backward()
self.optim_deepsdf.step()
self.optim_primitive.step()
log_info = {'loss': loss.item(), 'loss_fine': loss_fine, 'loss_coarse': loss_coarse, 'reg_attr': reg_attr}
log_info.update(self.additional_log_info)
return log_info
def epoch_end(self, epoch, **kwargs):
self.optim_latentcode.step()
def save(self, epoch, step):
save_name = "epoch_{}_iters_{}.pth".format(epoch, step)
path = os.path.join(self.cfg.save_dir, save_name)
torch.save({
'trainer_state_dict': self.state_dict(),
'optim_latentcode_state_dict': self.optim_latentcode.state_dict(),
'optim_deepsdf_state_dict': self.optim_deepsdf.state_dict(),
'optim_primitive_state_dict': self.optim_primitive.state_dict(),
'epoch': epoch,
'step': step,
}, path)
def resume(self, ckpt_path):
print('Resuming {}...'.format(ckpt_path))
ckpt = torch.load(ckpt_path, map_location=self.device)
self.load_state_dict(ckpt['trainer_state_dict'], strict=False)
# To reduce size, optimizer state dicts are removed from the published check points
if 'optim_latentcode_state_dict' in ckpt.keys():
self.optim_latentcode.load_state_dict(ckpt['optim_latentcode_state_dict'])
self.optim_deepsdf.load_state_dict(ckpt['optim_deepsdf_state_dict'])
self.optim_primitive.load_state_dict(ckpt['optim_primitive_state_dict'])
else:
ckpt['epoch'] = 9999
return ckpt['epoch']
| 43.403922 | 153 | 0.634984 | 1,465 | 11,068 | 4.537884 | 0.159044 | 0.021059 | 0.029483 | 0.011282 | 0.272262 | 0.174338 | 0.076564 | 0.018351 | 0.006318 | 0.006318 | 0 | 0.01469 | 0.249639 | 11,068 | 254 | 154 | 43.574803 | 0.785792 | 0.041652 | 0 | 0.075758 | 0 | 0 | 0.065967 | 0.019091 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.080808 | 0 | 0.237374 | 0.050505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c8c896fdc27ee3c019aaff66effafb8eec960ab | 1,355 | py | Python | demo/prefix.py | lechat/jenkinsflow | 87396069dda4f0681829e5d4e264e4f09ae34131 | [
"BSD-3-Clause"
] | null | null | null | demo/prefix.py | lechat/jenkinsflow | 87396069dda4f0681829e5d4e264e4f09ae34131 | [
"BSD-3-Clause"
] | null | null | null | demo/prefix.py | lechat/jenkinsflow | 87396069dda4f0681829e5d4e264e4f09ae34131 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import demo_setup
demo_setup.sys_path()
from jenkinsflow.flow import serial
import demo_security as security
def main(api):
with serial(api, timeout=70, report_interval=3, job_name_prefix='jenkinsflow_demo__prefix__') as ctrl1:
ctrl1.invoke('quick1')
for index in 1, 2, 3:
with ctrl1.serial(timeout=20, report_interval=3, job_name_prefix='x_') as ctrl2:
ctrl2.invoke('quick2-' + str(index))
ctrl1.invoke('quick3')
with ctrl1.parallel(timeout=40, report_interval=3, job_name_prefix='y_') as ctrl2:
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix='z_') as ctrl3:
ctrl3.invoke('quick4')
ctrl2.invoke('quick5')
if __name__ == '__main__':
# Note: This flow uses username/password instead of securitytoken, to demonstrate that feature, it could have used securitytoken.
# See demo_security.py
import os
from jenkinsflow.jenkins_api import Jenkins
jenkins = Jenkins(os.environ.get('JENKINS_URL') or os.environ.get('HUDSON_URL') or "http://localhost:8080",
username=security.username, password=security.password)
main(jenkins)
| 36.621622 | 133 | 0.690775 | 185 | 1,355 | 4.864865 | 0.508108 | 0.062222 | 0.066667 | 0.08 | 0.144444 | 0.144444 | 0.082222 | 0.082222 | 0 | 0 | 0 | 0.040854 | 0.205166 | 1,355 | 36 | 134 | 37.638889 | 0.7948 | 0.222878 | 0 | 0 | 0 | 0 | 0.107824 | 0.024809 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.047619 | 0.238095 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c8d7876159f4f21ee120eba231f0b4deb1ae81c | 11,763 | py | Python | pub/dispatcher/folder/functions/provider.py | DASTUDIO/MyVHost | b9eda56a67c2df9236b7866087bc7f465542f951 | [
"MIT"
] | 2 | 2021-07-27T10:38:57.000Z | 2021-10-10T20:42:56.000Z | pub/dispatcher/folder/functions/provider.py | DASTUDIO/MyVHost | b9eda56a67c2df9236b7866087bc7f465542f951 | [
"MIT"
] | null | null | null | pub/dispatcher/folder/functions/provider.py | DASTUDIO/MyVHost | b9eda56a67c2df9236b7866087bc7f465542f951 | [
"MIT"
] | null | null | null | # coding=utf-8
import time
import pub.response.error as error
import pub.settings as s
import pub.tables.resources as resource
import pub.tables.template as template
import pub.tables.comments as comments
import pub.tables.user as user
import pub.response.json as j
from django.core.paginator import Paginator,EmptyPage,PageNotAnInteger
import pub.tables.user as user
import pub.tables.map.domain as d
import pub.response.wrap as w
from pub.forms.search import form_search_keyword,form_search_user
from pub.forms.user_comments import form_comments,form_comments_page,form_add_comment
import pub.permission.user as p_user
# 本文件留作备份 业务再providers文件夹里
# Content Provider
def content_provider(page):
try:
data = {'state':'error'}
res = resource.resource_info.objects.all()
pag = Paginator(res,s.PAGE_SIZE)
data['volume'] = pag.count
try:
contents = pag.page(page)
except PageNotAnInteger:
contents = pag.page(1)
except EmptyPage:
contents = pag.page(pag.num_pages)
data['content'] = []
for item in contents:
# fetch
u_res = resource.resource_to_user.objects.get(key=item.key)
userid = u_res.userid
# fetch
u_info = user.auth_user.objects.get(id=userid)
user_headimg = u_info.headimg
nickname = u_info.nickname
user_link = "/user/" + str(userid)
it = {'title': __fix_row(item.title),
'brief': __fix_row(item.brief),
'headimg': item.headimg,
'key': item.key,
'user_headimg' : user_headimg,
'user_link' : user_link,
'nickname': nickname,
}
try:
it['domain'] = d.domain_to_key.objects.get(key=item.key).domain
except:
pass
data['content'].append(it)
data['state'] = 'success'
return j.dic(data,'utf-8')
except Exception as e:
return j.err('' if s.RELEASE else e)
#解决前端 像 qwerqwreqwerqwer 这样的 没有分词的英文 不换行的问题
def __fix_row(content):
if len(content) > s.CARD_ROW_LENGTH and content.encode('UTF-8').isalpha() and content.find(' ') == -1:
return content[0:s.CARD_ROW_LENGTH] + '...'
return content
# User Provider
def user_provider(request,folder,userid):
try:
data = {}
res = user.auth_user.objects.get(id=userid)
data['u_nickname'] = res.nickname
data['u_headimg'] = res.headimg
data['u_id'] = res.id
info_res = user.user_info.objects.get(userid=userid)
data['brief'] = info_res.brief
data['position'] = info_res.position
data['friend_url'] = info_res.friend_url
data['active'] = info_res.active
return w.page(request,'user.html',data)
except Exception as e:
return error.page(request, 404, "该人无法显示", "该名片还未开通" if s.RELEASE else "该名片还未开通"+str(e))
# Template Provider
def template_provider(page):
try:
data = {'state':'error'}
res = template.template_info.objects.all()
pag = Paginator(res,s.TEMPLATE_PAGE_SIZE)
data['volume'] = pag.count
try:
contents = pag.page(page)
except PageNotAnInteger:
contents = pag.page(1)
except EmptyPage:
contents = pag.page(pag.num_pages)
data['content'] = []
for item in contents:
# fetch
u_res = resource.resource_to_user.objects.get(key=item.key)
userid = u_res.userid
# fetch
u_info = user.auth_user.objects.get(id=userid)
nickname = u_info.nickname
user_link = "/user/" + str(userid)
it = {'title': __fix_row(item.title),
'brief': __fix_row(item.brief),
'headimg': item.headimg,
'key': item.key,
'user_link' : user_link,
'nickname': nickname,
}
try:
it['domain'] = d.domain_to_key.objects.get(key=item.key).domain
except:
pass
data['content'].append(it)
data['state'] = 'success'
return j.dic(data,'utf-8')
except Exception as e:
return j.err('' if s.RELEASE else e)
# Search Provider
def search_provider(r,f,p):
if r.GET or r.POST:
if r.GET:
request_content = r.GET
else:
request_content = r.POST
f_user = form_search_user(request_content)
f_keyword = form_search_keyword(request_content)
result = []
if f_user.is_valid():
# user stuff
res = resource.resource_to_user.objects.filter(userid=f_user.cleaned_data['userid'])
for item in reversed(res):
try:
r = resource.resource_info.objects.get(key=item.key)
result.append({
'title': __fix_row(r.title),
'brief': __fix_row(r.brief),
'headimg': r.headimg,
'url': '/' + r.key,
'key': r.key
})
except:
pass
try:
t = template.template_info.objects.get(key=item.key)
result.append({
'title': __fix_row(t.title),
'brief': __fix_row(t.brief),
'headimg': t.headimg,
'url': '/template/' + t.key,
'key': t.key
})
except:
pass
elif f_keyword.is_valid():
keyword = f_keyword.cleaned_data['keyword']
res = resource.resource_info.objects.filter(title__icontains=keyword)
for item in reversed(res):
it={
'title': __fix_row(item.title),
'brief': __fix_row(item.brief),
'headimg': item.headimg,
'url': '/' + item.key,
'key': item.key
}
try:
userid = resource.resource_to_user.objects.get(key=item.key).userid
user_headimg = user.auth_user.objects.get(id=userid).headimg
it['user_link'] = '/user/'+str(userid)
it['user_headimg'] = user_headimg
except:
pass
result.append(it)
res = template.template_info.objects.filter(title__icontains=keyword)
for item in reversed(res):
it = {
'title': __fix_row(item.title),
'brief': __fix_row(item.brief),
'headimg': item.headimg,
'url': '/template/' + item.key,
'key': item.key
}
try:
userid = resource.resource_to_user.objects.get(key=item.key).userid
user_headimg = user.auth_user.objects.get(id=userid).headimg
it['user_link'] = '/user/'+str(userid)
it['user_headimg'] = user_headimg
except:
pass
result.append(it)
else:
return j.dic({'error': '参数不正确2'}, 'utf-8')
return j.dic({'success': result}, 'utf-8')
else:
return j.dic({'error': '无参数'}, 'utf-8')
# User Card Comment Provier
def user_comment_provider(r, p, f):
if r.GET or r.POST:
if r.GET:
request_content = r.GET
else:
request_content = r.POST
f_comments = form_comments(request_content)
if not f_comments.is_valid():
return j.dic({'error': '参数不正确'}, 'utf-8')
userid = f_comments.cleaned_data['userid']
result = []
res = comments.user_comments.objects.filter(userid=userid).order_by('-created')
# pages
_page = 0
f_pages = form_comments_page(request_content)
if f_pages.is_valid():
page=f_pages.cleaned_data['page']
try:
_page = int(page)
except:
pass
res = res[_page*3:_page*3+3]
for item in res:
it = {
'id':item.id,
'content':item.content,
'likes':item.likes
}
try:
user_res = user.auth_user.objects.get(id=item.publisherid)
user_nickname = user_res.nickname
user_headimg = user_res.headimg
it['user_link'] = '/user/' + str(item.publisherid)
it['user_headimg'] = user_headimg
it['user_nickname'] = user_nickname
user_info_res = user.user_info.objects.get(userid=item.publisherid)
it['user_position'] = user_info_res.position
except:
pass
result.append(it)
return j.dic({'success': result}, 'utf-8')
else:
return j.dic({'error': '无参数'}, 'utf-8')
def user_comment_add(r, f, p):
if not p_user.is_logged(r):
return j.dic({'error': '需要登录才可以留言'}, 'utf-8')
publisherid = r.session.get('userid')
if r.GET or r.POST:
if r.GET:
request_content = r.GET
else:
request_content = r.POST
# 获取参数
f_add_commenet = form_add_comment(request_content)
if not f_add_commenet.is_valid():
return j.dic({'error': '参数不正确'}, 'utf-8')
userid = f_add_commenet.cleaned_data['userid']
content = f_add_commenet.cleaned_data['content']
# 用户是否有效
try:
user.auth_user.objects.get(id=userid)
except:
return j.dic({'error': '该用户不存在'}, 'utf-8')
# 添加
comments.user_comments.objects.create(userid=userid,
publisherid=publisherid,
content=content,
created=int(time.time()))
return j.dic({'success': 'ok'}, 'utf-8')
else:
return j.dic({'error': '无参数'}, 'utf-8')
def user_comment_likes_add(r,f,p):
if not p_user.is_logged(r):
return j.dic({'error': '需要登录才可以赞'}, 'utf-8')
try:
comments.user_comments_likes_map.objects.get(publisher=r.session.get('userid'), comment_id=p)
return j.dic({'error': '你已经赞过了'}, 'utf-8')
except:
try:
comments.user_comments_likes_map.objects.create(publisher=r.session.get('userid'), comment_id=p)
res = comments.user_comments.objects.get(id=p)
res.likes += 1
res.save()
return j.dic({'success': res.likes}, 'utf-8')
except Exception as eee:
return j.dic({'error': '出错了' if s.RELEASE else str(eee)}, 'utf-8')
def user_comment_delete(r,f,p):
if not p_user.is_logged(r):
return j.dic({'error': '需要登录才可以操作'}, 'utf-8')
try:
userid = r.session.get('userid')
ownerid = str(comments.user_comments.objects.get(id=p).userid)
if userid != ownerid:
return j.dic({'error': '只可以删除自己收到的评论哦'}, 'utf-8')
# 删除赞数据
res = comments.user_comments_likes_map.objects.filter(comment_id=p)
for item in res:
item.delete()
# 删除留言
comments.user_comments.objects.get(id=p).delete()
return j.dic({'success': 'ok'}, 'utf-8')
except Exception as eee:
return j.dic({'error': '出错了' if s.RELEASE else str(eee)}, 'utf-8') | 29.629723 | 108 | 0.529202 | 1,369 | 11,763 | 4.375457 | 0.119065 | 0.015359 | 0.035058 | 0.035058 | 0.594324 | 0.54207 | 0.526878 | 0.455593 | 0.420534 | 0.420534 | 0 | 0.004718 | 0.351271 | 11,763 | 397 | 109 | 29.629723 | 0.780239 | 0.020148 | 0 | 0.571942 | 0 | 0 | 0.076736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032374 | false | 0.028777 | 0.053957 | 0 | 0.183453 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c8db11b7dd7dcf2b6211c1f0a40f4d60d60fa2d | 5,932 | py | Python | heart_rate_job.py | aws-samples/analysis-of-medical-device-data-using-data-lake | 8f6d9b9da671781d74c3fb16e2603c36fb412047 | [
"Apache-2.0",
"MIT-0"
] | null | null | null | heart_rate_job.py | aws-samples/analysis-of-medical-device-data-using-data-lake | 8f6d9b9da671781d74c3fb16e2603c36fb412047 | [
"Apache-2.0",
"MIT-0"
] | null | null | null | heart_rate_job.py | aws-samples/analysis-of-medical-device-data-using-data-lake | 8f6d9b9da671781d74c3fb16e2603c36fb412047 | [
"Apache-2.0",
"MIT-0"
] | 2 | 2021-06-10T19:00:19.000Z | 2021-06-14T08:06:53.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import re
import boto3
import awswrangler as wr
from awsglue.utils import getResolvedOptions
import sys
import json
import traceback
# the filename should be of format <patient_id.yyyymmdd.metric.parquet/csv>
# convert the minute of the day in hour, min
def getTimeFromMinute(minx):
hour1 = int(minx/60)
min1 = minx % 60
t1 = [hour1, min1, 0]
return t1
# get the date from the file_key. It should be 2nd part of the filename
def getDate(file_key):
s1_split = re.split("/", file_key)
patientInfo = s1_split[-1]
filenamesplit = re.split("\.", patientInfo)
filedate = filenamesplit[1]
t1 = [filedate[0:4], filedate[4:6], filedate[6:8]]
return t1
# get the Patient Id from the file_key. It should be 1st part of the filename
def getPatientId(file_key):
s1_split = re.split("/", file_key)
patientInfo = s1_split[-1]
# print (patientInfo)
filenamesplit = re.split("\.", patientInfo)
patientId = filenamesplit[0]
# print(patientId)
return patientId
# read parameters from ssm
def getParameter(paramName):
parameter = ssm.get_parameter(Name=paramName, WithDecryption=True)
return parameter["Parameter"]["Value"]
# move the file to processed location
def moveFile(bucket_name, file_key):
copy_source = {
"Bucket": bucket_name,
"Key": file_key
}
target_prefix = getParameter("DL-processed_location_prefix")
target_bucket = getParameter("DL-processed_bucket")
s1_split = re.split("/", file_key)
object_name = s1_split[-1]
## check if the prefix ends with /. If so, dont add /
separator = "/"
x = re.search("/$", target_prefix)
if x:
separator = ""
otherkey = target_prefix + separator + object_name
print("Processed File bucket is " + target_bucket)
print("Processed target key is " + otherkey)
s3.copy(copy_source, target_bucket, otherkey)
s3.delete_object(Bucket=bucket_name, Key=file_key)
return
# handler function that would be triggered
def glueHandler(buketname, filename):
bucket_name = bucketname
file_key = filename
s3_read_url = "s3://" + bucket_name + "/" + file_key
print("reading from : " + s3_read_url)
patient_id = getPatientId(file_key)
print("the patient info is " + patient_id)
dataframe = ""
# either parquet or csv
if file_key.find("parquet") > -1:
dataframe = wr.s3.read_parquet(path=s3_read_url)
else:
dataframe = wr.s3.read_csv(path=s3_read_url)
# print(dataframe)
patient_id = getPatientId(file_key)
dateTuple = (getDate(file_key))
metric_type = "heart_rate"
# print(dateTuple)
dataframe["year_value"] = 0
dataframe["hour_value"] = 0
dataframe["min_value"] = 0
dataframe["sec_value"] = 0
dataframe["year_value"] = int(dateTuple[0])
dataframe["month_value"] = int(dateTuple[1])
dataframe["day_value"] = int(dateTuple[2])
dataframe["patient_id"] = patient_id
dataframe["metric"] = metric_type
rows = dataframe.shape[0]
# cols = dataframe.shape[1]
# print(rows)
# print(cols)
for rowId in range(rows):
timeTuple = getTimeFromMinute(dataframe["minute_in_day"][rowId])
dataframe["hour_value"][rowId] = timeTuple[0]
dataframe["min_value"][rowId] = timeTuple[1]
print("new rows " + str(dataframe.shape[0]))
print("new cols " + str(dataframe.shape[1]))
# print (dataframe)
path = "s3://" + getParameter("DL-datalake_target_bucket") + "/"
folderPrefix = getParameter("DL-datalake_bucket_prefix")
separator = "/"
x = re.search("/$", folderPrefix)
if x:
separator = ""
path = path + folderPrefix + separator
partition_cols = ["metric", "year_value", "month_value",
"day_value", "patient_id"]
print("the location in the datalake is " + path)
print("the partition information is " + str(partition_cols))
athenaTable = "heart_rate_metric"
databaseName = getParameter("DL-datalake_athena_database")
print("the glue database " + databaseName)
wr.s3.to_parquet(
df=dataframe,
path=path,
dataset=True,
mode="append",
partition_cols=partition_cols,
database=databaseName,
table=athenaTable
)
moveFile(bucket_name, file_key)
return
filename = ""
s3 = boto3.client("s3")
ssm = boto3.client("ssm")
sns = boto3.client("sns")
snsArn = getParameter("DL-datalake_failure_arn")
try:
args = getResolvedOptions(sys.argv, ["bucketname", "filename"])
print(args)
bucketname = args["bucketname"]
filename = args["filename"]
print("The data is to be sourced from : " + args["bucketname"])
print("The data key is: " + args["filename"])
glueHandler(bucketname, filename)
except Exception as inst:
print(type(inst))
print(inst)
print(inst.args)
track = traceback.format_exc()
print(track)
message = {"error ": "Unable to process file ", "filename": filename}
response = sns.publish(
TargetArn=snsArn,
Message=json.dumps({"default": json.dumps(message)}),
Subject="Failure in processing file " + filename,
MessageStructure="json"
)
print("message : " + json.dumps(message) + " to ARN : " + snsArn)
print("\r\n processing done") | 28.382775 | 77 | 0.662508 | 745 | 5,932 | 5.148993 | 0.297987 | 0.031022 | 0.009385 | 0.010949 | 0.094108 | 0.056048 | 0.037018 | 0.024505 | 0.024505 | 0.024505 | 0 | 0.014449 | 0.218307 | 5,932 | 209 | 78 | 28.382775 | 0.81281 | 0.190492 | 0 | 0.145038 | 0 | 0 | 0.169111 | 0.026823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045802 | false | 0 | 0.053435 | 0 | 0.145038 | 0.137405 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c8de32d916e9b3f90196563715f0a8c2cb915a6 | 65,211 | py | Python | vector/geometry.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | vector/geometry.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | vector/geometry.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 18 10:46:25 2012
@author: pietro
"""
import ctypes
import re
from collections import namedtuple
import numpy as np
import grass.lib.gis as libgis
import grass.lib.vector as libvect
from grass.pygrass.utils import decode
from grass.pygrass.errors import GrassError, mapinfo_must_be_set
from grass.pygrass.vector.basic import Ilist, Bbox, Cats
from grass.pygrass.vector import sql
# For test purposes
test_vector_name = "geometry_doctest_map"
LineDist = namedtuple('LineDist', 'point dist spdist sldist')
WKT = {'POINT\((.*)\)': 'point', # 'POINT\(\s*([+-]*\d+\.*\d*)+\s*\)'
'LINESTRING\((.*)\)': 'line'}
def read_WKT(string):
"""Read the string and return a geometry object
**WKT**:
::
POINT(0 0)
LINESTRING(0 0,1 1,1 2)
POLYGON((0 0,4 0,4 4,0 4,0 0),(1 1, 2 1, 2 2, 1 2,1 1))
MULTIPOINT(0 0,1 2)
MULTILINESTRING((0 0,1 1,1 2),(2 3,3 2,5 4))
MULTIPOLYGON(((0 0,4 0,4 4,0 4,0 0),(1 1,2 1,2 2,1 2,1 1)),
((-1 -1,-1 -2,-2 -2,-2 -1,-1 -1)))
GEOMETRYCOLLECTION(POINT(2 3),LINESTRING(2 3,3 4))
**EWKT**:
::
POINT(0 0 0) -- XYZ
SRID=32632;POINT(0 0) -- XY with SRID
POINTM(0 0 0) -- XYM
POINT(0 0 0 0) -- XYZM
SRID=4326;MULTIPOINTM(0 0 0,1 2 1) -- XYM with SRID
MULTILINESTRING((0 0 0,1 1 0,1 2 1),(2 3 1,3 2 1,5 4 1))
POLYGON((0 0 0,4 0 0,4 4 0,0 4 0,0 0 0),(1 1 0,2 1 0,2 2 0,1 2 0,1 1 0))
MULTIPOLYGON(((0 0 0,4 0 0,4 4 0,0 4 0,0 0 0),
(1 1 0,2 1 0,2 2 0,1 2 0,1 1 0)),
((-1 -1 0,-1 -2 0,-2 -2 0,-2 -1 0,-1 -1 0)))
GEOMETRYCOLLECTIONM( POINTM(2 3 9), LINESTRINGM(2 3 4, 3 4 5) )
MULTICURVE( (0 0, 5 5), CIRCULARSTRING(4 0, 4 4, 8 4) )
POLYHEDRALSURFACE( ((0 0 0, 0 0 1, 0 1 1, 0 1 0, 0 0 0)),
((0 0 0, 0 1 0, 1 1 0, 1 0 0, 0 0 0)),
((0 0 0, 1 0 0, 1 0 1, 0 0 1, 0 0 0)),
((1 1 0, 1 1 1, 1 0 1, 1 0 0, 1 1 0)),
((0 1 0, 0 1 1, 1 1 1, 1 1 0, 0 1 0)),
((0 0 1, 1 0 1, 1 1 1, 0 1 1, 0 0 1)) )
TRIANGLE ((0 0, 0 9, 9 0, 0 0))
TIN( ((0 0 0, 0 0 1, 0 1 0, 0 0 0)), ((0 0 0, 0 1 0, 1 1 0, 0 0 0)) )
"""
for regexp, obj in WKT.items():
if re.match(regexp, string):
geo = 10
return obj(geo)
def read_WKB(buff):
"""Read the binary buffer and return a geometry object"""
pass
def intersects(lineA, lineB, with_z=False):
"""Return a list of points
>>> lineA = Line([(0, 0), (4, 0)])
>>> lineB = Line([(2, 2), (2, -2)])
>>> intersects(lineA, lineB)
Line([Point(2.000000, 0.000000)])
"""
line = Line()
if libvect.Vect_line_get_intersections(lineA.c_points, lineB.c_points,
line.c_points, int(with_z)):
return line
else:
return []
#=============================================
# GEOMETRY
#=============================================
def get_xyz(pnt):
"""Return a tuple with: x, y, z.
>>> pnt = Point(0, 0)
>>> get_xyz(pnt)
(0.0, 0.0, 0.0)
>>> get_xyz((1, 1))
(1, 1, 0.0)
>>> get_xyz((1, 1, 2))
(1, 1, 2)
>>> get_xyz((1, 1, 2, 2)) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: The the format of the point is not supported: (1, 1, 2, 2)
"""
if isinstance(pnt, Point):
if pnt.is2D:
x, y = pnt.x, pnt.y
z = 0.
else:
x, y, z = pnt.x, pnt.y, pnt.z
else:
if len(pnt) == 2:
x, y = pnt
z = 0.
elif len(pnt) == 3:
x, y, z = pnt
else:
str_error = "The the format of the point is not supported: {0!r}"
raise ValueError(str_error.format(pnt))
return x, y, z
class Attrs(object):
def __init__(self, cat, table, writeable=False):
self._cat = None
self.cond = ''
self.table = table
self.cat = cat
self.writeable = writeable
def _get_cat(self):
return self._cat
def _set_cat(self, value):
self._cat = value
if value:
# update condition
self.cond = "%s=%d" % (self.table.key, value)
cat = property(fget=_get_cat, fset=_set_cat,
doc="Set and obtain cat value")
def __getitem__(self, keys):
"""Return the value stored in the attribute table.
>>> from grass.pygrass.vector import VectorTopo
>>> test_vect = VectorTopo(test_vector_name)
>>> test_vect.open('r')
>>> v1 = test_vect[1]
>>> v1.attrs['name']
'point'
>>> v1.attrs['name', 'value']
('point', 1.0)
>>> test_vect.close()
"""
sqlcode = sql.SELECT_WHERE.format(cols=(keys if np.isscalar(keys)
else ', '.join(keys)),
tname=self.table.name,
condition=self.cond)
cur = self.table.execute(sqlcode)
results = cur.fetchone()
if results is not None:
return results[0] if len(results) == 1 else results
def __setitem__(self, keys, values):
"""Set value of a given column of a table attribute.
>>> from grass.pygrass.vector import VectorTopo
>>> test_vect = VectorTopo(test_vector_name)
>>> test_vect.open('r')
>>> v1 = test_vect[1]
>>> v1.attrs['name']
'point'
>>> v1.attrs['name'] = "new_point_1"
>>> v1.attrs['name']
'new_point_1'
>>> v1.attrs['name', 'value'] = "new_point_2", 100.
>>> v1.attrs['name', 'value']
('new_point_2', 100.0)
>>> v1.attrs['name', 'value'] = "point", 1.
>>> v1.attrs.table.conn.commit()
>>> test_vect.close()
"""
if self.writeable:
if np.isscalar(keys):
keys, values = (keys, ), (values, )
# check if key is a column of the table or not
for key in keys:
if key not in self.table.columns:
raise KeyError('Column: %s not in table' % key)
# prepare the string using as paramstyle: qmark
vals = ','.join(['%s=?' % k for k in keys])
# "UPDATE {tname} SET {values} WHERE {condition};"
sqlcode = sql.UPDATE_WHERE.format(tname=self.table.name,
values=vals,
condition=self.cond)
self.table.execute(sqlcode, values=values)
#self.table.conn.commit()
else:
str_err = "You can only read the attributes if the map is in another mapset"
raise GrassError(str_err)
def __dict__(self):
"""Return a dict of the attribute table row."""
dic = {}
for key, val in zip(self.keys(), self.values()):
dic[key] = val
return dic
def values(self):
"""Return the values of the attribute table row.
>>> from grass.pygrass.vector import VectorTopo
>>> test_vect = VectorTopo(test_vector_name)
>>> test_vect.open('r')
>>> v1 = test_vect[1]
>>> v1.attrs.values()
(1, 'point', 1.0)
>>> test_vect.close()
"""
#SELECT {cols} FROM {tname} WHERE {condition}
cur = self.table.execute(sql.SELECT_WHERE.format(cols='*',
tname=self.table.name,
condition=self.cond))
return cur.fetchone()
def keys(self):
"""Return the column name of the attribute table.
>>> from grass.pygrass.vector import VectorTopo
>>> test_vect = VectorTopo(test_vector_name)
>>> test_vect.open('r')
>>> v1 = test_vect[1]
>>> v1.attrs.keys()
['cat', 'name', 'value']
>>> test_vect.close()
"""
return self.table.columns.names()
def commit(self):
"""Save the changes"""
self.table.conn.commit()
class Geo(object):
"""
Base object for different feature types
"""
gtype = None
def __init__(self, v_id=0, c_mapinfo=None, c_points=None, c_cats=None,
table=None, writeable=False, is2D=True, free_points=False,
free_cats=False):
"""Constructor of a geometry object
:param v_id: The vector feature id
:param c_mapinfo: A pointer to the vector mapinfo structure
:param c_points: A pointer to a libvect.line_pnts structure, this
is optional, if not set an internal structure will
be allocated and free'd at object destruction
:param c_cats: A pointer to a libvect.line_cats structure, this
is optional, if not set an internal structure will
be allocated and free'd at object destruction
:param table: The attribute table to select attributes for
this feature
:param writeable: Not sure what this is for?
:param is2D: If True this feature has two dimensions, False if
this feature has three dimensions
:param free_points: Set this True if the provided c_points structure
should be free'd at object destruction, be aware
that no other object should free them, otherwise
you can expect a double free corruption segfault
:param free_cats: Set this True if the provided c_cats structure
should be free'd at object destruction, be aware
that no other object should free them, otherwise
you can expect a double free corruption segfault
"""
self.id = v_id # vector id
self.c_mapinfo = c_mapinfo
self.is2D = (is2D if is2D is not None else
bool(libvect.Vect_is_3d(self.c_mapinfo) != 1))
# Set True if cats and points are allocated by this object
# to free the cats and points structures on destruction
self._free_points = False
self._free_cats = False
read = False
# set c_points
if c_points is None:
self.c_points = ctypes.pointer(libvect.line_pnts())
self._free_points = True
read = True
else:
self.c_points = c_points
self._free_points = free_points
# set c_cats
if c_cats is None:
self.c_cats = ctypes.pointer(libvect.line_cats())
self._free_cats = free_cats
read = True
else:
self.c_cats = c_cats
self._free_cats = True
if self.id and self.c_mapinfo is not None and read:
self.read()
# set the attributes as last thing to do
self.attrs = None
if table is not None and self.cat is not None:
self.attrs = Attrs(self.cat, table, writeable)
def __del__(self):
"""Take care of the allocated line_pnts and line_cats allocation
"""
if self._free_points == True and self.c_points:
if self.c_points.contents.alloc_points > 0:
#print("G_free(points) [%i]"%(self.c_points.contents.alloc_points))
libgis.G_free(self.c_points.contents.x)
libgis.G_free(self.c_points.contents.y)
if self.c_points.contents.z:
libgis.G_free(self.c_points.contents.z)
if self._free_cats == True and self.c_cats:
if self.c_cats.contents.alloc_cats > 0:
#print("G_free(cats) [%i]"%(self.c_cats.contents.alloc_cats))
libgis.G_free(self.c_cats.contents.cat)
@property
def cat(self):
if self.c_cats.contents.cat:
return self.c_cats.contents.cat.contents.value
def has_topology(self):
if self.c_mapinfo is not None:
return self.c_mapinfo.contents.level == 2
else:
return False
@mapinfo_must_be_set
def read(self):
"""Read and set the coordinates of the centroid from the vector map,
using the centroid_id and calling the Vect_read_line C function"""
self.id, ftype, c_points, c_cats = c_read_line(self.id, self.c_mapinfo,
self.c_points,
self.c_cats)
def to_wkt(self):
"""Return a "well know text" (WKT) geometry string, this method uses
the GEOS implementation in the vector library. ::
>>> pnt = Point(10, 100)
>>> pnt.to_wkt()
'POINT (10.0000000000000000 100.0000000000000000)'
"""
return decode(libvect.Vect_line_to_wkt(self.c_points, self.gtype, not self.is2D))
def to_wkb(self):
"""Return a "well know binary" (WKB) geometry byte array, this method uses
the GEOS implementation in the vector library. ::
>>> pnt = Point(10, 100)
>>> wkb = pnt.to_wkb()
>>> len(wkb)
21
"""
size = ctypes.c_size_t()
barray = libvect.Vect_line_to_wkb(self.c_points, self.gtype,
not self.is2D, ctypes.byref(size))
return(ctypes.string_at(barray, size.value))
class Point(Geo):
"""Instantiate a Point object that could be 2 or 3D, default
parameters are 0.
::
>>> pnt = Point()
>>> pnt.x
0.0
>>> pnt.y
0.0
>>> pnt.z
>>> pnt.is2D
True
>>> pnt
Point(0.000000, 0.000000)
>>> pnt.z = 0
>>> pnt.is2D
False
>>> pnt
Point(0.000000, 0.000000, 0.000000)
>>> print(pnt)
POINT Z (0.0000000000000000 0.0000000000000000 0.0000000000000000)
>>> c_points = ctypes.pointer(libvect.line_pnts())
>>> c_cats = ctypes.pointer(libvect.line_cats())
>>> p = Point(c_points = c_points, c_cats=c_cats)
>>> del p
>>> c_points = ctypes.pointer(libvect.line_pnts())
>>> c_cats = ctypes.pointer(libvect.line_cats())
>>> p = Point(c_points=c_points, c_cats=c_cats, free_points=True,
... free_cats=True)
>>> del p
..
"""
# geometry type
gtype = libvect.GV_POINT
def __init__(self, x=0, y=0, z=None, **kargs):
super(Point, self).__init__(**kargs)
if self.id and self.c_mapinfo:
self.read()
else:
self.is2D = True if z is None else False
z = z if z is not None else 0
libvect.Vect_append_point(self.c_points, x, y, z)
def _get_x(self):
return self.c_points.contents.x[0]
def _set_x(self, value):
self.c_points.contents.x[0] = value
x = property(fget=_get_x, fset=_set_x,
doc="Set and obtain x coordinate")
def _get_y(self):
return self.c_points.contents.y[0]
def _set_y(self, value):
self.c_points.contents.y[0] = value
y = property(fget=_get_y, fset=_set_y,
doc="Set and obtain y coordinate")
def _get_z(self):
if self.is2D:
return None
return self.c_points.contents.z[0]
def _set_z(self, value):
if value is None:
self.is2D = True
self.c_points.contents.z[0] = 0
else:
self.c_points.contents.z[0] = value
self.is2D = False
z = property(fget=_get_z, fset=_set_z,
doc="Set and obtain z coordinate")
def __str__(self):
return self.to_wkt()
def __repr__(self):
return "Point(%s)" % ', '.join(['%f' % coor for coor in self.coords()])
def __eq__(self, pnt):
"""Return True if the coordinates are the same.
>>> p0 = Point()
>>> p1 = Point()
>>> p2 = Point(1, 1)
>>> p0 == p1
True
>>> p1 == p2
False
"""
if isinstance(pnt, Point):
return pnt.coords() == self.coords()
return Point(*pnt).coords() == self.coords()
def __ne__(self, other):
return not self == other
# Restore Python 2 hashing beaviour on Python 3
__hash__ = object.__hash__
def coords(self):
"""Return a tuple with the point coordinates. ::
>>> pnt = Point(10, 100)
>>> pnt.coords()
(10.0, 100.0)
If the point is 2D return a x, y tuple. But if we change the ``z``
the Point object become a 3D point, therefore the method return a
x, y, z tuple. ::
>>> pnt.z = 1000.
>>> pnt.coords()
(10.0, 100.0, 1000.0)
..
"""
if self.is2D:
return self.x, self.y
else:
return self.x, self.y, self.z
def to_wkt_p(self):
"""Return a "well know text" (WKT) geometry string Python implementation. ::
>>> pnt = Point(10, 100)
>>> pnt.to_wkt_p()
'POINT(10.000000 100.000000)'
.. warning::
Only ``POINT`` (2/3D) are supported, ``POINTM`` and ``POINT`` with:
``XYZM`` are not supported yet.
"""
return "POINT(%s)" % ' '.join(['%f' % coord
for coord in self.coords()])
def distance(self, pnt):
"""Calculate distance of 2 points, using the Vect_points_distance
C function, If one of the point have z == None, return the 2D distance.
:param pnt: the point for calculate the distance
:type pnt: a Point object or a tuple with the coordinates
>>> pnt0 = Point(0, 0, 0)
>>> pnt1 = Point(1, 0)
>>> pnt0.distance(pnt1)
1.0
>>> pnt1.z = 1
>>> pnt1
Point(1.000000, 0.000000, 1.000000)
>>> pnt0.distance(pnt1)
1.4142135623730951
"""
if self.is2D or pnt.is2D:
return libvect.Vect_points_distance(self.x, self.y, 0,
pnt.x, pnt.y, 0, 0)
else:
return libvect.Vect_points_distance(self.x, self.y, self.z,
pnt.x, pnt.y, pnt.z, 1)
def buffer(self, dist=None, dist_x=None, dist_y=None, angle=0,
round_=True, tol=0.1):
"""Return the buffer area around the point, using the
``Vect_point_buffer2`` C function.
:param dist: the distance around the point
:type dist: num
:param dist_x: the distance along x
:type dist_x: num
:param dist_y: the distance along y
:type dist_y: num
:param angle: the angle between 0x and major axis
:type angle: num
:param round_: to make corners round
:type round_: bool
:param tol: fix the maximum distance between theoretical arc and
output segments
:type tol: float
:returns: the buffer as Area object
>>> pnt = Point(0, 0)
>>> boundary, centroid = pnt.buffer(10)
>>> boundary #doctest: +ELLIPSIS
Line([Point(10.000000, 0.000000),...Point(10.000000, 0.000000)])
>>> centroid
Point(0.000000, 0.000000)
"""
if dist is not None:
dist_x = dist
dist_y = dist
elif not dist_x or not dist_y:
raise TypeError('TypeError: buffer expected 1 arguments, got 0')
bound = Line()
p_points = ctypes.pointer(bound.c_points)
libvect.Vect_point_buffer2(self.x, self.y,
dist_x, dist_y,
angle, int(round_), tol,
p_points)
return (bound, self)
class Line(Geo):
"""Instantiate a new Line with a list of tuple, or with a list of Point. ::
>>> line = Line([(0, 0), (1, 1), (2, 0), (1, -1)])
>>> line #doctest: +NORMALIZE_WHITESPACE
Line([Point(0.000000, 0.000000),
Point(1.000000, 1.000000),
Point(2.000000, 0.000000),
Point(1.000000, -1.000000)])
..
"""
# geometry type
gtype = libvect.GV_LINE
def __init__(self, points=None, **kargs):
super(Line, self).__init__(**kargs)
if points is not None:
for pnt in points:
self.append(pnt)
def __getitem__(self, key):
"""Get line point of given index, slice allowed. ::
>>> line = Line([(0, 0), (1, 1), (2, 2), (3, 3)])
>>> line[1]
Point(1.000000, 1.000000)
>>> line[-1]
Point(3.000000, 3.000000)
>>> line[:2]
[Point(0.000000, 0.000000), Point(1.000000, 1.000000)]
..
"""
#TODO:
# line[0].x = 10 is not working
#pnt.c_px = ctypes.pointer(self.c_points.contents.x[indx])
# pnt.c_px = ctypes.cast(id(self.c_points.contents.x[indx]),
# ctypes.POINTER(ctypes.c_double))
if isinstance(key, slice):
#import pdb; pdb.set_trace()
#Get the start, stop, and step from the slice
return [Point(self.c_points.contents.x[indx],
self.c_points.contents.y[indx],
None if self.is2D else self.c_points.contents.z[indx])
for indx in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0: # Handle negative indices
key += self.c_points.contents.n_points
if key >= self.c_points.contents.n_points:
raise IndexError('Index out of range')
return Point(self.c_points.contents.x[key],
self.c_points.contents.y[key],
None if self.is2D else self.c_points.contents.z[key])
else:
raise ValueError("Invalid argument type: %r." % key)
def __setitem__(self, indx, pnt):
"""Change the coordinate of point. ::
>>> line = Line([(0, 0), (1, 1)])
>>> line[0] = (2, 2)
>>> line
Line([Point(2.000000, 2.000000), Point(1.000000, 1.000000)])
..
"""
x, y, z = get_xyz(pnt)
self.c_points.contents.x[indx] = x
self.c_points.contents.y[indx] = y
self.c_points.contents.z[indx] = z
def __iter__(self):
"""Return a Point generator of the Line"""
return (self.__getitem__(i) for i in range(self.__len__()))
def __len__(self):
"""Return the number of points of the line."""
return self.c_points.contents.n_points
def __str__(self):
return self.to_wkt()
def __repr__(self):
return "Line([%s])" % ', '.join([repr(pnt) for pnt in self.__iter__()])
def point_on_line(self, distance, angle=0, slope=0):
"""Return a Point object on line in the specified distance, using the
`Vect_point_on_line` C function.
Raise a ValueError If the distance exceed the Line length. ::
>>> line = Line([(0, 0), (1, 1)])
>>> line.point_on_line(5) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The distance exceed the length of the line,
that is: 1.414214
>>> line.point_on_line(1)
Point(0.707107, 0.707107)
..
"""
# instantiate an empty Point object
maxdist = self.length()
if distance > maxdist:
str_err = "The distance exceed the length of the line, that is: %f"
raise ValueError(str_err % maxdist)
pnt = Point(0, 0, -9999)
if not libvect.Vect_point_on_line(self.c_points, distance,
pnt.c_points.contents.x,
pnt.c_points.contents.y,
pnt.c_points.contents.z,
ctypes.pointer(ctypes.c_double(angle)),
ctypes.pointer(ctypes.c_double(slope))):
raise ValueError("Vect_point_on_line give an error.")
pnt.is2D = self.is2D
return pnt
@mapinfo_must_be_set
def alive(self):
"""Return True if this line is alive or False if this line is
dead or its index is out of range.
"""
return(bool(libvect.Vect_line_alive(self.c_mapinfo, self.id)))
def append(self, pnt):
"""Appends one point to the end of a line, using the
``Vect_append_point`` C function.
:param pnt: the point to add to line
:type pnt: a Point object or a tuple with the coordinates
>>> line = Line()
>>> line.append((10, 100))
>>> line
Line([Point(10.000000, 100.000000)])
>>> line.append((20, 200))
>>> line
Line([Point(10.000000, 100.000000), Point(20.000000, 200.000000)])
Like python list.
"""
x, y, z = get_xyz(pnt)
libvect.Vect_append_point(self.c_points, x, y, z)
def bbox(self, bbox=None):
"""Return the bounding box of the line, using ``Vect_line_box``
C function. ::
>>> line = Line([(0, 0), (0, 1), (2, 1), (2, 0)])
>>> bbox = line.bbox()
>>> bbox
Bbox(1.0, 0.0, 2.0, 0.0)
..
"""
bbox = bbox if bbox else Bbox()
libvect.Vect_line_box(self.c_points, bbox.c_bbox)
return bbox
def extend(self, line, forward=True):
"""Appends points to the end of a line.
:param line: it is possible to extend a line, give a list of points,
or directly with a line_pnts struct.
:type line: Line object ot list of points
:param forward: if forward is True the line is extend forward otherwise
is extend backward. The method use the
`Vect_append_points` C function.
:type forward: bool
>>> line = Line([(0, 0), (1, 1)])
>>> line.extend( Line([(2, 2), (3, 3)]) )
>>> line #doctest: +NORMALIZE_WHITESPACE
Line([Point(0.000000, 0.000000),
Point(1.000000, 1.000000),
Point(2.000000, 2.000000),
Point(3.000000, 3.000000)])
"""
# set direction
if forward:
direction = libvect.GV_FORWARD
else:
direction = libvect.GV_BACKWARD
# check if is a Line object
if isinstance(line, Line):
c_points = line.c_points
else:
# instantiate a Line object
lin = Line()
for pnt in line:
# add the points to the line
lin.append(pnt)
c_points = lin.c_points
libvect.Vect_append_points(self.c_points, c_points, direction)
def insert(self, indx, pnt):
"""Insert new point at index position and move all old points at
that position and above up, using ``Vect_line_insert_point``
C function.
:param indx: the index where add new point
:type indx: int
:param pnt: the point to add
:type pnt: a Point object
>>> line = Line([(0, 0), (1, 1)])
>>> line.insert(0, Point(1.000000, -1.000000) )
>>> line #doctest: +NORMALIZE_WHITESPACE
Line([Point(1.000000, -1.000000),
Point(0.000000, 0.000000),
Point(1.000000, 1.000000)])
"""
if indx < 0: # Handle negative indices
indx += self.c_points.contents.n_points
if indx >= self.c_points.contents.n_points:
raise IndexError('Index out of range')
x, y, z = get_xyz(pnt)
libvect.Vect_line_insert_point(self.c_points, indx, x, y, z)
def length(self):
"""Calculate line length, 3D-length in case of 3D vector line, using
`Vect_line_length` C function. ::
>>> line = Line([(0, 0), (1, 1), (0, 1)])
>>> line.length()
2.414213562373095
..
"""
return libvect.Vect_line_length(self.c_points)
def length_geodesic(self):
"""Calculate line length, usig `Vect_line_geodesic_length` C function.
::
>>> line = Line([(0, 0), (1, 1), (0, 1)])
>>> line.length_geodesic()
2.414213562373095
..
"""
return libvect.Vect_line_geodesic_length(self.c_points)
def distance(self, pnt):
"""Calculate the distance between line and a point.
:param pnt: the point to calculate distance
:type pnt: a Point object or a tuple with the coordinates
Return a namedtuple with:
* point: the closest point on the line,
* dist: the distance between these two points,
* spdist: distance to point on line from segment beginning
* sldist: distance to point on line form line beginning along line
The distance is compute using the ``Vect_line_distance`` C function.
>>> point = Point(2.3, 0.5)
>>> line = Line([(0, 0), (2, 0), (3, 0)])
>>> line.distance(point) #doctest: +NORMALIZE_WHITESPACE
LineDist(point=Point(2.300000, 0.000000),
dist=0.5, spdist=0.2999999999999998, sldist=2.3)
"""
# instantite outputs
cx = ctypes.c_double(0)
cy = ctypes.c_double(0)
cz = ctypes.c_double(0)
dist = ctypes.c_double(0)
sp_dist = ctypes.c_double(0)
lp_dist = ctypes.c_double(0)
libvect.Vect_line_distance(self.c_points,
pnt.x, pnt.y, 0 if pnt.is2D else pnt.z,
0 if self.is2D else 1,
ctypes.byref(cx), ctypes.byref(cy),
ctypes.byref(cz), ctypes.byref(dist),
ctypes.byref(sp_dist),
ctypes.byref(lp_dist))
# instantiate the Point class
point = Point(cx.value, cy.value, cz.value)
point.is2D = self.is2D
return LineDist(point, dist.value, sp_dist.value, lp_dist.value)
@mapinfo_must_be_set
def first_cat(self):
"""Fetches FIRST category number for given vector line and field, using
the ``Vect_get_line_cat`` C function.
.. warning::
Not implemented yet.
"""
# TODO: add this method.
# libvect.Vect_get_line_cat(self.c_mapinfo, self.id, self.field)
pass
def pop(self, indx):
"""Return the point in the index position and remove from the Line.
:param indx: the index where add new point
:type indx: int
>>> line = Line([(0, 0), (1, 1), (2, 2)])
>>> midle_pnt = line.pop(1)
>>> midle_pnt #doctest: +NORMALIZE_WHITESPACE
Point(1.000000, 1.000000)
>>> line #doctest: +NORMALIZE_WHITESPACE
Line([Point(0.000000, 0.000000), Point(2.000000, 2.000000)])
"""
if indx < 0: # Handle negative indices
indx += self.c_points.contents.n_points
if indx >= self.c_points.contents.n_points:
raise IndexError('Index out of range')
pnt = self.__getitem__(indx)
libvect.Vect_line_delete_point(self.c_points, indx)
return pnt
def delete(self, indx):
"""Remove the point in the index position.
:param indx: the index where add new point
:type indx: int
>>> line = Line([(0, 0), (1, 1), (2, 2)])
>>> line.delete(-1)
>>> line #doctest: +NORMALIZE_WHITESPACE
Line([Point(0.000000, 0.000000), Point(1.000000, 1.000000)])
"""
if indx < 0: # Handle negative indices
indx += self.c_points.contents.n_points
if indx >= self.c_points.contents.n_points:
raise IndexError('Index out of range')
libvect.Vect_line_delete_point(self.c_points, indx)
def prune(self):
"""Remove duplicate points, i.e. zero length segments, using
`Vect_line_prune` C function. ::
>>> line = Line([(0, 0), (1, 1), (1, 1), (2, 2)])
>>> line.prune()
>>> line #doctest: +NORMALIZE_WHITESPACE
Line([Point(0.000000, 0.000000),
Point(1.000000, 1.000000),
Point(2.000000, 2.000000)])
..
"""
libvect.Vect_line_prune(self.c_points)
def prune_thresh(self, threshold):
"""Remove points in threshold, using the ``Vect_line_prune_thresh``
C function.
:param threshold: the threshold value where prune points
:type threshold: num
>>> line = Line([(0, 0), (1.0, 1.0), (1.2, 0.9), (2, 2)])
>>> line.prune_thresh(0.5)
>>> line #doctest: +SKIP +NORMALIZE_WHITESPACE
Line([Point(0.000000, 0.000000),
Point(1.000000, 1.000000),
Point(2.000000, 2.000000)])
.. warning ::
prune_thresh is not working yet.
"""
libvect.Vect_line_prune(self.c_points, ctypes.c_double(threshold))
def remove(self, pnt):
"""Delete point at given index and move all points above down, using
`Vect_line_delete_point` C function.
:param pnt: the point to remove
:type pnt: a Point object or a tuple with the coordinates
>>> line = Line([(0, 0), (1, 1), (2, 2)])
>>> line.remove((2, 2))
>>> line[-1] #doctest: +NORMALIZE_WHITESPACE
Point(1.000000, 1.000000)
..
"""
for indx, point in enumerate(self.__iter__()):
if pnt == point:
libvect.Vect_line_delete_point(self.c_points, indx)
return
raise ValueError('list.remove(x): x not in list')
def reverse(self):
"""Reverse the order of vertices, using `Vect_line_reverse`
C function. ::
>>> line = Line([(0, 0), (1, 1), (2, 2)])
>>> line.reverse()
>>> line #doctest: +NORMALIZE_WHITESPACE
Line([Point(2.000000, 2.000000),
Point(1.000000, 1.000000),
Point(0.000000, 0.000000)])
..
"""
libvect.Vect_line_reverse(self.c_points)
def segment(self, start, end):
"""Create line segment. using the ``Vect_line_segment`` C function.
:param start: distance from the beginning of the line where
the segment start
:type start: float
:param end: distance from the beginning of the line where
the segment end
:type end: float
::
# x (1, 1)
# |
# |-
# |
# x--------x (1, 0)
# (0, 0) ^
>>> line = Line([(0, 0), (1, 0), (1, 1)])
>>> line.segment(0.5, 1.5) #doctest: +NORMALIZE_WHITESPACE
Line([Point(0.500000, 0.000000),
Point(1.000000, 0.000000),
Point(1.000000, 0.500000)])
"""
line = Line()
libvect.Vect_line_segment(self.c_points, start, end, line.c_points)
return line
def to_list(self):
"""Return a list of tuple. ::
>>> line = Line([(0, 0), (1, 1), (2, 0), (1, -1)])
>>> line.to_list()
[(0.0, 0.0), (1.0, 1.0), (2.0, 0.0), (1.0, -1.0)]
..
"""
return [pnt.coords() for pnt in self.__iter__()]
def to_array(self):
"""Return an array of coordinates. ::
>>> line = Line([(0, 0), (1, 1), (2, 0), (1, -1)])
>>> line.to_array() #doctest: +NORMALIZE_WHITESPACE
array([[ 0., 0.],
[ 1., 1.],
[ 2., 0.],
[ 1., -1.]])
..
"""
return np.array(self.to_list())
def to_wkt_p(self):
"""Return a Well Known Text string of the line. ::
>>> line = Line([(0, 0), (1, 1), (1, 2)])
>>> line.to_wkt_p() #doctest: +ELLIPSIS
'LINESTRING(0.000000 0.000000, ..., 1.000000 2.000000)'
..
"""
return "LINESTRING(%s)" % ', '.join([
' '.join(['%f' % coord for coord in pnt.coords()])
for pnt in self.__iter__()])
def from_wkt(self, wkt):
"""Create a line reading a WKT string.
:param wkt: the WKT string containing the LINESTRING
:type wkt: str
>>> line = Line()
>>> line.from_wkt("LINESTRING(0 0,1 1,1 2)")
>>> line #doctest: +NORMALIZE_WHITESPACE
Line([Point(0.000000, 0.000000),
Point(1.000000, 1.000000),
Point(1.000000, 2.000000)])
..
"""
match = re.match('LINESTRING\((.*)\)', wkt)
if match:
self.reset()
for coord in match.groups()[0].strip().split(','):
self.append(tuple([float(e) for e in coord.split(' ')]))
else:
return None
def buffer(self, dist=None, dist_x=None, dist_y=None,
angle=0, round_=True, caps=True, tol=0.1):
"""Return the buffer area around the line, using the
``Vect_line_buffer2`` C function.
:param dist: the distance around the line
:type dist: num
:param dist_x: the distance along x
:type dist_x: num
:param dist_y: the distance along y
:type dist_y: num
:param angle: the angle between 0x and major axis
:type angle: num
:param round_: to make corners round
:type round_: bool
:param tol: fix the maximum distance between theoretical arc and
output segments
:type tol: float
:returns: the buffer as Area object
>>> line = Line([(0, 0), (0, 2)])
>>> boundary, centroid, isles = line.buffer(10)
>>> boundary #doctest: +ELLIPSIS
Line([Point(-10.000000, 0.000000),...Point(-10.000000, 0.000000)])
>>> centroid #doctest: +NORMALIZE_WHITESPACE
Point(0.000000, 0.000000)
>>> isles
[]
..
"""
if dist is not None:
dist_x = dist
dist_y = dist
elif not dist_x or not dist_y:
raise TypeError('TypeError: buffer expected 1 arguments, got 0')
p_bound = ctypes.pointer(ctypes.pointer(libvect.line_pnts()))
pp_isle = ctypes.pointer(ctypes.pointer(
ctypes.pointer(libvect.line_pnts())))
n_isles = ctypes.pointer(ctypes.c_int())
libvect.Vect_line_buffer2(self.c_points,
dist_x, dist_y, angle,
int(round_), int(caps), tol,
p_bound, pp_isle, n_isles)
boundary = Line(c_points=p_bound.contents)
isles = [Line(c_points=pp_isle[i].contents)
for i in range(n_isles.contents.value) if pp_isle[i]]
return(boundary, self[0], isles)
def reset(self):
"""Reset line, using `Vect_reset_line` C function. ::
>>> line = Line([(0, 0), (1, 1), (2, 0), (1, -1)])
>>> len(line)
4
>>> line.reset()
>>> len(line)
0
>>> line
Line([])
..
"""
libvect.Vect_reset_line(self.c_points)
@mapinfo_must_be_set
def nodes(self):
"""Return the start and end nodes of the line
This method requires topology build.
return: A tuple of Node objects that represent the
start and end point of this line.
"""
if self.has_topology():
n1 = ctypes.c_int()
n2 = ctypes.c_int()
libvect.Vect_get_line_nodes(self.c_mapinfo, self.id,
ctypes.byref(n1),
ctypes.byref(n2))
return (Node(n1.value, self.c_mapinfo),
Node(n2.value, self.c_mapinfo))
class Node(object):
"""Node class for topological analysis of line neighbors.
Objects of this class will be returned by the node() function
of a Line object.
All methods in this class require a proper setup of the Node
objects. Hence, the correct id and a valid pointer to a mapinfo
object must be provided in the constructions. Otherwise a segfault
may happen.
"""
def __init__(self, v_id, c_mapinfo, **kwords):
"""Construct a Node object
param v_id: The unique node id
param c_mapinfo: A valid pointer to the mapinfo object
param **kwords: Ignored
"""
self.id = v_id # vector id
self.c_mapinfo = c_mapinfo
self._setup()
@mapinfo_must_be_set
def _setup(self):
self.is2D = bool(libvect.Vect_is_3d(self.c_mapinfo) != 1)
self.nlines = libvect.Vect_get_node_n_lines(self.c_mapinfo, self.id)
def __len__(self):
return self.nlines
def __iter__(self):
return self.ilines()
def __repr__(self):
return "Node(%d)" % self.id
@mapinfo_must_be_set
def alive(self):
"""Return True if this node is alive or False if this node is
dead or its index is out of range.
"""
return(bool(libvect.Vect_node_alive(self.c_mapinfo, self.id)))
@mapinfo_must_be_set
def coords(self):
"""Return a tuple with the node coordinates."""
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
libvect.Vect_get_node_coor(self.c_mapinfo, self.id, ctypes.byref(x),
ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value) if self.is2D else (x.value, y.value, z.value)
def to_wkt(self):
"""Return a "well know text" (WKT) geometry string. ::
"""
return "POINT(%s)" % ' '.join(['%f' % coord
for coord in self.coords()])
def to_wkb(self):
"""Return a "well know binary" (WKB) geometry array. ::
TODO: Must be implemented
"""
raise Exception("Not implemented")
def ilines(self, only_in=False, only_out=False):
"""Return a generator with all lines id connected to a node.
The line id is negative if line is ending on the node and positive if
starting from the node.
:param only_in: Return only the lines that are ending in the node
:type only_in: bool
:param only_out: Return only the lines that are starting in the node
:type only_out: bool
"""
for iline in range(self.nlines):
lid = libvect.Vect_get_node_line(self.c_mapinfo, self.id, iline)
if (not only_in and lid > 0) or (not only_out and lid < 0):
yield lid
@mapinfo_must_be_set
def lines(self, only_in=False, only_out=False):
"""Return a generator with all lines connected to a node.
:param only_in: Return only the lines that are ending in the node
:type only_in: bool
:param only_out: Return only the lines that are starting in the node
:type only_out: bool
"""
for iline in self.ilines(only_in, only_out):
yield Line(v_id=abs(iline), c_mapinfo=self.c_mapinfo)
@mapinfo_must_be_set
def angles(self):
"""Return a generator with all lines angles in a node."""
for iline in range(self.nlines):
yield libvect.Vect_get_node_line_angle(self.c_mapinfo,
self.id, iline)
class Boundary(Line):
"""
"""
# geometry type
gtype = libvect.GV_BOUNDARY
def __init__(self, **kargs):
super(Boundary, self).__init__(**kargs)
v_id = kargs.get('v_id', 0)
# not sure what it means that v_id is None
v_id = 0 if v_id is None else v_id
self.dir = libvect.GV_FORWARD if v_id > 0 else libvect.GV_BACKWARD
self.c_left = ctypes.pointer(ctypes.c_int())
self.c_right = ctypes.pointer(ctypes.c_int())
@property
def left_area_id(self):
"""Left side area id, only available after read_area_ids() was called"""
return self.c_left.contents.value
@property
def right_area_id(self):
"""Right side area id, only available after read_area_ids() was called"""
return self.c_right.contents.value
def __repr__(self):
return "Boundary([%s])" % ', '.join([repr(pnt) for pnt in self.__iter__()])
@mapinfo_must_be_set
def _centroid(self, side, idonly=False):
if side > 0:
v_id = libvect.Vect_get_area_centroid(self.c_mapinfo, side)
v_id = v_id if v_id else None
if idonly:
return v_id
else:
cntr = Centroid(v_id=v_id, c_mapinfo=self.c_mapinfo)
return cntr
def left_centroid(self, idonly=False):
"""Return left centroid
:param idonly: True to return only the cat of feature
:type idonly: bool
"""
return self._centroid(self.c_left.contents.value, idonly)
def right_centroid(self, idonly=False):
"""Return right centroid
:param idonly: True to return only the cat of feature
:type idonly: bool
"""
return self._centroid(self.c_right.contents.value, idonly)
@mapinfo_must_be_set
def read_area_ids(self):
"""Read and return left and right area ids of the boundary"""
libvect.Vect_get_line_areas(self.c_mapinfo, self.id,
self.c_left, self.c_right)
return self.c_left.contents.value, self.c_right.contents.value
def area(self):
"""Return the area of the polygon.
>>> bound = Boundary(points=[(0, 0), (0, 2), (2, 2), (2, 0),
... (0, 0)])
>>> bound.area()
4.0
"""
libgis.G_begin_polygon_area_calculations()
return libgis.G_area_of_polygon(self.c_points.contents.x,
self.c_points.contents.y,
self.c_points.contents.n_points)
class Centroid(Point):
"""The Centroid class inherit from the Point class.
Centroid contains an attribute with the C Map_info struct, and attributes
with the id of the Area. ::
>>> centroid = Centroid(x=0, y=10)
>>> centroid
Centroid(0.000000, 10.000000)
>>> from grass.pygrass.vector import VectorTopo
>>> test_vect = VectorTopo(test_vector_name)
>>> test_vect.open(mode='r')
>>> centroid = Centroid(v_id=18, c_mapinfo=test_vect.c_mapinfo)
>>> centroid
Centroid(3.500000, 3.500000)
>>> test_vect.close()
..
"""
# geometry type
gtype = libvect.GV_CENTROID
def __init__(self, area_id=None, **kargs):
super(Centroid, self).__init__(**kargs)
self.area_id = area_id
if self.id and self.c_mapinfo and self.area_id is None:
self.area_id = self._area_id()
elif self.c_mapinfo and self.area_id and self.id is None:
self.id = self._centroid_id()
if self.area_id is not None:
self.read()
#self.c_pline = ctypes.pointer(libvect.P_line()) if topology else None
def __repr__(self):
return "Centroid(%s)" % ', '.join(['%f' % co for co in self.coords()])
@mapinfo_must_be_set
def _centroid_id(self):
"""Return the centroid_id, using the c_mapinfo and an area_id
attributes of the class, and calling the Vect_get_area_centroid
C function, if no centroid_id were found return None"""
centroid_id = libvect.Vect_get_area_centroid(self.c_mapinfo,
self.area_id)
return centroid_id if centroid_id != 0 else None
@mapinfo_must_be_set
def _area_id(self):
"""Return the area_id, using the c_mapinfo and an centroid_id
attributes of the class, and calling the Vect_centroid_area
C function, if no area_id were found return None"""
area_id = libvect.Vect_get_centroid_area(self.c_mapinfo,
self.id)
return area_id if area_id != 0 else None
class Isle(Geo):
"""An Isle is an area contained by another area.
"""
def __init__(self, **kargs):
super(Isle, self).__init__(**kargs)
#self.area_id = area_id
def __repr__(self):
return "Isle(%d)" % (self.id)
@mapinfo_must_be_set
def boundaries(self):
"""Return a list of boundaries"""
ilist = Ilist()
libvect.Vect_get_isle_boundaries(self.c_mapinfo, self.id,
ilist.c_ilist)
return ilist
@mapinfo_must_be_set
def bbox(self, bbox=None):
"""Return bounding box of Isle"""
bbox = bbox if bbox else Bbox()
libvect.Vect_get_isle_box(self.c_mapinfo, self.id, bbox.c_bbox)
return bbox
@mapinfo_must_be_set
def points(self):
"""Return a Line object with the outer ring points"""
line = Line()
libvect.Vect_get_isle_points(self.c_mapinfo, self.id, line.c_points)
return line
def to_wkt(self):
"""Return a Well Known Text string of the isle. ::
For now the outer ring is returned
TODO: Implement inner rings detected from isles
"""
line = self.points()
return "Polygon((%s))" % ', '.join([
' '.join(['%f' % coord for coord in pnt])
for pnt in line.to_list()])
def to_wkb(self):
"""Return a "well know text" (WKB) geometry array. ::
"""
raise Exception("Not implemented")
@mapinfo_must_be_set
def points_geos(self):
"""Return a Line object with the outer ring points
"""
return libvect.Vect_get_isle_points_geos(self.c_mapinfo, self.id)
@mapinfo_must_be_set
def area_id(self):
"""Returns area id for isle."""
return libvect.Vect_get_isle_area(self.c_mapinfo, self.id)
@mapinfo_must_be_set
def alive(self):
"""Check if isle is alive or dead (topology required)"""
return bool(libvect.Vect_isle_alive(self.c_mapinfo, self.id))
@mapinfo_must_be_set
def contain_pnt(self, pnt):
"""Check if point is in area.
:param pnt: the point to remove
:type pnt: a Point object or a tuple with the coordinates
"""
bbox = self.bbox()
return bool(libvect.Vect_point_in_island(pnt.x, pnt.y,
self.c_mapinfo, self.id,
bbox.c_bbox.contents))
def area(self):
"""Return the area value of an Isle"""
border = self.points()
return libgis.G_area_of_polygon(border.c_points.contents.x,
border.c_points.contents.y,
border.c_points.contents.n_points)
def perimeter(self):
"""Return the perimeter value of an Isle.
"""
border = self.points()
return libvect.Vect_line_geodesic_length(border.c_points)
class Isles(object):
def __init__(self, c_mapinfo, area_id=None):
self.c_mapinfo = c_mapinfo
self.area_id = area_id
self._isles_id = None
self._isles = None
if area_id:
self._isles_id = self.isles_ids()
self._isles = self.isles()
@mapinfo_must_be_set
def __len__(self):
return libvect.Vect_get_area_num_isles(self.c_mapinfo, self.area_id)
def __repr__(self):
return "Isles(%r)" % self.area_id
def __getitem__(self, key):
if self._isles is None:
self.isles()
return self._isles[key]
@mapinfo_must_be_set
def isles_ids(self):
"""Return the id of isles"""
return [libvect.Vect_get_area_isle(self.c_mapinfo, self.area_id, i)
for i in range(self.__len__())]
@mapinfo_must_be_set
def isles(self):
"""Return isles"""
return [Isle(v_id=isle_id, c_mapinfo=self.c_mapinfo)
for isle_id in self._isles_id]
class Area(Geo):
"""
Vect_build_line_area,
Vect_find_area,
Vect_get_area_box,
Vect_get_area_points_geos,
Vect_centroid_area,
Vect_get_isle_area,
Vect_get_line_areas,
Vect_get_num_areas,
Vect_get_point_in_area,
Vect_isle_find_area,
Vect_point_in_area,
Vect_point_in_area_outer_ring,
Vect_read_area_geos,
Vect_remove_small_areas,
Vect_select_areas_by_box,
Vect_select_areas_by_polygon
"""
# geometry type
gtype = libvect.GV_AREA
def __init__(self, **kargs):
super(Area, self).__init__(**kargs)
# set the attributes
#if self.attrs and self.cat:
# self.attrs.cat = self.cat
def __repr__(self):
return "Area(%d)" % self.id if self.id else "Area( )"
@property
def cat(self):
centroid = self.centroid()
return centroid.cat if centroid else None
@mapinfo_must_be_set
def points(self, line=None):
"""Return a Line object with the outer ring
:param line: a Line object to fill with info from points of area
:type line: a Line object
"""
line = Line() if line is None else line
libvect.Vect_get_area_points(self.c_mapinfo, self.id, line.c_points)
return line
@mapinfo_must_be_set
def centroid(self):
"""Return the centroid
:param centroid: a Centroid object to fill with info from centroid of area
:type centroid: a Centroid object
"""
centroid_id = libvect.Vect_get_area_centroid(self.c_mapinfo, self.id)
if centroid_id:
return Centroid(v_id=centroid_id, c_mapinfo=self.c_mapinfo,
area_id=self.id)
@mapinfo_must_be_set
def num_isles(self):
return libvect.Vect_get_area_num_isles(self.c_mapinfo, self.id)
@mapinfo_must_be_set
def isles(self, isles=None):
"""Return a list of islands located in this area"""
if isles is not None:
isles.area_id = self.id
return isles
return Isles(self.c_mapinfo, self.id)
@mapinfo_must_be_set
def area(self):
"""Returns area of area without areas of isles.
double Vect_get_area_area (const struct Map_info \*Map, int area)
"""
return libvect.Vect_get_area_area(self.c_mapinfo, self.id)
@mapinfo_must_be_set
def alive(self):
"""Check if area is alive or dead (topology required)
"""
return bool(libvect.Vect_area_alive(self.c_mapinfo, self.id))
@mapinfo_must_be_set
def bbox(self, bbox=None):
"""Return the Bbox of area
:param bbox: a Bbox object to fill with info from bounding box of area
:type bbox: a Bbox object
"""
bbox = bbox if bbox else Bbox()
libvect.Vect_get_area_box(self.c_mapinfo, self.id, bbox.c_bbox)
return bbox
@mapinfo_must_be_set
def buffer(self, dist=None, dist_x=None, dist_y=None,
angle=0, round_=True, caps=True, tol=0.1):
"""Return the buffer area around the area, using the
``Vect_area_buffer2`` C function.
:param dist: the distance around the area
:type dist: num
:param dist_x: the distance along x
:type dist_x: num
:param dist_y: the distance along y
:type dist_y: num
:param angle: the angle between 0x and major axis
:type angle: num
:param round_: to make corners round
:type round_: bool
:param tol: fix the maximum distance between theoretical arc and
output segments
:type tol: float
:returns: the buffer as line, centroid, isles object tuple
"""
if dist is not None:
dist_x = dist
dist_y = dist
elif not dist_x or not dist_y:
raise TypeError('TypeError: buffer expected 1 arguments, got 0')
p_bound = ctypes.pointer(ctypes.pointer(libvect.line_pnts()))
pp_isle = ctypes.pointer(ctypes.pointer(
ctypes.pointer(libvect.line_pnts())))
n_isles = ctypes.pointer(ctypes.c_int())
libvect.Vect_area_buffer2(self.c_mapinfo, self.id,
dist_x, dist_y, angle,
int(round_), int(caps), tol,
p_bound, pp_isle, n_isles)
return (Line(c_points=p_bound.contents),
self.centroid,
[Line(c_points=pp_isle[i].contents)
for i in range(n_isles.contents.value)])
@mapinfo_must_be_set
def boundaries(self, ilist=False):
"""Creates list of boundaries for given area.
int Vect_get_area_boundaries(const struct Map_info \*Map,
int area, struct ilist \*List)
"""
ilst = Ilist()
libvect.Vect_get_area_boundaries(self.c_mapinfo, self.id,
ilst.c_ilist)
if ilist:
return ilist
return [Boundary(v_id=abs(v_id), c_mapinfo=self.c_mapinfo) for v_id in ilst]
def to_wkt(self):
"""Return a "well know text" (WKT) area string, this method uses
the GEOS implementation in the vector library. ::
"""
return decode(libvect.Vect_read_area_to_wkt(self.c_mapinfo, self.id))
def to_wkb(self):
"""Return a "well know binary" (WKB) area byte array, this method uses
the GEOS implementation in the vector library. ::
"""
size = ctypes.c_size_t()
barray = libvect.Vect_read_area_to_wkb(self.c_mapinfo, self.id,
ctypes.byref(size))
return(ctypes.string_at(barray, size.value))
@mapinfo_must_be_set
def cats(self, cats=None):
"""Get area categories.
:param cats: a Cats object to fill with info with area categories
:type cats: a Cats object
"""
cats = cats if cats else Cats()
libvect.Vect_get_area_cats(self.c_mapinfo, self.id, cats.c_cats)
return cats
def get_first_cat(self):
"""Find FIRST category of given field and area.
int Vect_get_area_cat(const struct Map_info \*Map, int area, int field)
..warning: Not implemented
"""
pass
@mapinfo_must_be_set
def contains_point(self, point, bbox=None):
"""Check if point is in area.
:param point: the point to analyze
:type point: a Point object or a tuple with the coordinates
:param bbox: the bounding box where run the analysis
:type bbox: a Bbox object
"""
bbox = bbox if bbox else self.bbox()
return bool(libvect.Vect_point_in_area(point.x, point.y,
self.c_mapinfo, self.id,
bbox.c_bbox))
@mapinfo_must_be_set
def perimeter(self):
"""Calculate area perimeter.
:return: double Vect_area_perimeter (const struct line_pnts \*Points)
"""
border = self.points()
return libvect.Vect_line_geodesic_length(border.c_points)
def read(self):
pass
#
# Define a dictionary to convert the feature type to name and or object
#
GV_TYPE = {libvect.GV_POINT: {'label': 'point', 'obj': Point},
libvect.GV_LINE: {'label': 'line', 'obj': Line},
libvect.GV_BOUNDARY: {'label': 'boundary', 'obj': Boundary},
libvect.GV_CENTROID: {'label': 'centroid', 'obj': Centroid},
libvect.GV_FACE: {'label': 'face', 'obj': None},
libvect.GV_KERNEL: {'label': 'kernel', 'obj': None},
libvect.GV_AREA: {'label': 'area', 'obj': Area},
libvect.GV_VOLUME: {'label': 'volume', 'obj': None}, }
GEOOBJ = {"areas": Area,
"dblinks": None,
"faces": None,
"holes": None,
"boundaries": Boundary,
"islands": Isle,
"kernels": None,
"line_points": None,
"points": Point,
"lines": Line,
"nodes": Node,
"volumes": None}
def c_read_next_line(c_mapinfo, c_points, c_cats):
v_id = c_mapinfo.contents.next_line
v_id = v_id if v_id != 0 else None
ftype = libvect.Vect_read_next_line(c_mapinfo, c_points, c_cats)
if ftype == -2:
raise StopIteration()
if ftype == -1:
raise
return ftype, v_id, c_points, c_cats
def read_next_line(c_mapinfo, table=None, writeable=False,
c_points=None, c_cats=None, is2D=True):
"""Return the next geometry feature of a vector map."""
# Take care of good memory management
free_points = False
if c_points == None:
free_points = True
free_cats = False
if c_cats == None:
free_cats = True
c_points = c_points if c_points else ctypes.pointer(libvect.line_pnts())
c_cats = c_cats if c_cats else ctypes.pointer(libvect.line_cats())
ftype, v_id, c_points, c_cats = c_read_next_line(c_mapinfo, c_points,
c_cats)
return GV_TYPE[ftype]['obj'](v_id=v_id, c_mapinfo=c_mapinfo,
c_points=c_points, c_cats=c_cats,
table=table, writeable=writeable, is2D=is2D,
free_points=free_points, free_cats=free_cats)
def c_read_line(feature_id, c_mapinfo, c_points, c_cats):
nmax = libvect.Vect_get_num_lines(c_mapinfo)
if feature_id < 0: # Handle negative indices
feature_id += nmax + 1
if feature_id > nmax:
raise IndexError('Index out of range')
if feature_id > 0:
ftype = libvect.Vect_read_line(c_mapinfo, c_points, c_cats, feature_id)
return feature_id, ftype, c_points, c_cats
else:
raise ValueError('The index must be >0, %r given.' % feature_id)
def read_line(feature_id, c_mapinfo, table=None, writeable=False,
c_points=None, c_cats=None, is2D=True):
"""Return a geometry object given the feature id and the c_mapinfo.
"""
# Take care of good memory management
free_points = False
if c_points == None:
free_points = True
free_cats = False
if c_cats == None:
free_cats = True
c_points = c_points if c_points else ctypes.pointer(libvect.line_pnts())
c_cats = c_cats if c_cats else ctypes.pointer(libvect.line_cats())
feature_id, ftype, c_points, c_cats = c_read_line(feature_id, c_mapinfo,
c_points, c_cats)
if GV_TYPE[ftype]['obj'] is not None:
return GV_TYPE[ftype]['obj'](v_id=feature_id, c_mapinfo=c_mapinfo,
c_points=c_points, c_cats=c_cats,
table=table, writeable=writeable, is2D=is2D,
free_points=free_points,
free_cats=free_cats)
if __name__ == "__main__":
import doctest
from grass.pygrass import utils
utils.create_test_vector_map(test_vector_name)
doctest.testmod()
"""Remove the generated vector map, if exist"""
from grass.pygrass.utils import get_mapset_vector
from grass.script.core import run_command
mset = get_mapset_vector(test_vector_name, mapset='')
if mset:
run_command("g.remove", flags='f', type='vector', name=test_vector_name)
| 34.576352 | 89 | 0.541826 | 8,556 | 65,211 | 3.961431 | 0.070594 | 0.008202 | 0.019472 | 0.016994 | 0.527645 | 0.462825 | 0.395114 | 0.34425 | 0.317431 | 0.286245 | 0 | 0.045045 | 0.346031 | 65,211 | 1,885 | 90 | 34.594695 | 0.74973 | 0.395807 | 0 | 0.319693 | 0 | 0 | 0.034571 | 0 | 0 | 0 | 0 | 0.002122 | 0 | 1 | 0.170077 | false | 0.005115 | 0.017903 | 0.023018 | 0.34399 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c8efa85e744b76647579e30a8854809ccf53601 | 1,144 | py | Python | 题源分类/LeetCode/LeetCode日刷/python/86.分隔链表.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | 题源分类/LeetCode/LeetCode日刷/python/86.分隔链表.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | 题源分类/LeetCode/LeetCode日刷/python/86.分隔链表.py | ZhengyangXu/Algorithm-Daily-Practice | 3017a3d476fc9a857026190ea4fae2911058df59 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=86 lang=python3
#
# [86] 分隔链表
#
# https://leetcode-cn.com/problems/partition-list/description/
#
# algorithms
# Medium (60.30%)
# Likes: 286
# Dislikes: 0
# Total Accepted: 61.5K
# Total Submissions: 102K
# Testcase Example: '[1,4,3,2,5,2]\n3'
#
# 给定一个链表和一个特定值 x,对链表进行分隔,使得所有小于 x 的节点都在大于或等于 x 的节点之前。
#
# 你应当保留两个分区中每个节点的初始相对位置。
#
#
#
# 示例:
#
# 输入: head = 1->4->3->2->5->2, x = 3
# 输出: 1->2->2->4->3->5
#
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def partition(self, head: ListNode, x: int) -> ListNode:
dummy1 = ListNode()
dummy2 = ListNode()
p1,p2 = dummy1,dummy2
cur = head
while cur:
if cur.val < x:
p1.next = cur
p1 = p1.next
else:
p2.next = cur
p2 = p2.next
cur = cur.next if cur else None
p2.next = None
p1.next = dummy2.next
return dummy1.next
# @lc code=end
| 19.389831 | 62 | 0.518357 | 148 | 1,144 | 3.97973 | 0.5 | 0.010187 | 0.010187 | 0.013582 | 0.020374 | 0.020374 | 0 | 0 | 0 | 0 | 0 | 0.073236 | 0.343531 | 1,144 | 58 | 63 | 19.724138 | 0.711052 | 0.47465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c93042abb669c8eb64eeb6b34ebae4584d33589 | 6,121 | py | Python | dbgr/commands.py | JakubTesarek/dbgr | fc55cee5d5a69f3fa691579bc7d2627f51cbca03 | [
"Apache-2.0"
] | 8 | 2019-05-23T19:45:46.000Z | 2021-02-08T17:21:21.000Z | dbgr/commands.py | JakubTesarek/dbgr | fc55cee5d5a69f3fa691579bc7d2627f51cbca03 | [
"Apache-2.0"
] | 86 | 2019-05-13T14:20:20.000Z | 2019-06-19T11:48:59.000Z | dbgr/commands.py | JakubTesarek/dbgr | fc55cee5d5a69f3fa691579bc7d2627f51cbca03 | [
"Apache-2.0"
] | 1 | 2021-02-08T17:21:22.000Z | 2021-02-08T17:21:22.000Z | import argparse
import sys
import traceback
import textwrap
import colorama
from dbgr.requests import get_requests, execute_request, parse_cmd_arguments, parse_module_name
from dbgr.environment import init_environment, get_environments, DEFAULT_ENVIRONMENT, Environment
from dbgr.session import close_session
from dbgr.completion import RequestsCompleter, ModulesCompleter, EnvironmentsCompleter
def version_command():
''' Display version of DBGR '''
from dbgr.meta import __version__
print(__version__)
async def prepare_and_execute_request(request, args):
try:
init_environment(args.env)
arguments = parse_cmd_arguments(args.arguments)
await execute_request(request, use_defaults=args.use_defaults, **arguments)
except AssertionError:
_, _, trace = sys.exc_info()
trace_info = traceback.extract_tb(trace)
filename, line, function, text = trace_info[-1] # pylint: disable=W0612
print(f'{colorama.Fore.RED}Assertion error in {filename}:{line}:')
print(f'{colorama.Fore.RED}{text}')
except Exception as ex:
print(f'{colorama.Fore.RED}{ex}')
async def interactive_command(args):
''' Run requests in interactive mode '''
print(f'{colorama.Style.DIM}Dbgr interactive mode; press ^C to exit.')
try:
while True:
request = input('> ').strip()
await prepare_and_execute_request(request, args)
finally:
await close_session()
async def request_command(args):
''' Execute request '''
try:
await prepare_and_execute_request(args.request, args)
finally:
await close_session()
async def list_command(args):
''' List all available requests and their arguments '''
l_module, l_request = parse_module_name(args.module)
requests = get_requests()
if not requests:
print(f'{colorama.Fore.RED}No requests found.')
return
if l_module and l_module not in requests:
print(f'{colorama.Fore.RED}Module "{l_module}" does not exist.')
return
if l_module and l_request and l_request not in requests[l_module]:
print(f'{colorama.Fore.RED}Request "{l_request}" does not exist in module "{l_module}".')
return
request_printed = False
for module, requests in requests.items():
module_printed = False
if not l_module or module == l_module:
for request in requests.values():
if not l_request or request.name == l_request:
if not module_printed:
print(f'{colorama.Style.BRIGHT}{module}:')
module_printed = True
request_printed = True
print(textwrap.indent(str(request), ' '), end='')
if not request_printed and l_request:
print(f'{colorama.Fore.RED}Request "{l_request}" does not exist in any module.')
async def environments_command(args): # pylint: disable=W0613
'''
List available environments. With optional <environment> argument
lists all defined variables and values
'''
if args.environment:
env = Environment(args.environment)
for section in env.sections():
print(f'{colorama.Style.BRIGHT}{section}')
for key, value in env.items(section):
print(f'- {key}: {value}')
else:
for env in get_environments():
if env == DEFAULT_ENVIRONMENT:
print(f'- {colorama.Style.BRIGHT}{env}')
else:
print(f'- {env}')
def argument_parser():
parser = argparse.ArgumentParser(
prog='dbgr',
description='DBGR is a tool for testing and debugging HTTP APIs.',
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument(
'-v', '--version', action='store_true', help=version_command.__doc__
)
subparsers = parser.add_subparsers(help='Command to execute')
int_parser = subparsers.add_parser(
'interactive',
aliases=['int', 'i'],
help=interactive_command.__doc__
)
int_parser.add_argument(
'-e', '--env', default=DEFAULT_ENVIRONMENT,
help=f'Environment that will be used (default: "{DEFAULT_ENVIRONMENT}")'
).completer = EnvironmentsCompleter()
int_parser.add_argument(
'-d', '--use-defaults', action='store_true',
help='Use default values when possible')
int_parser.set_defaults(func=interactive_command, arguments=[])
req_parser = subparsers.add_parser(
'request',
aliases=['req', 'r'],
help=request_command.__doc__
)
req_parser.add_argument(
'request',
help='Name of the request to execute'
).completer = RequestsCompleter()
req_parser.add_argument(
'-e', '--env', default=DEFAULT_ENVIRONMENT,
help='Environment that will be used'
).completer = EnvironmentsCompleter()
req_parser.add_argument(
'-d', '--use-defaults', action='store_true',
help='Use default values when possible')
req_parser.add_argument(
'-a', '--arg', dest='arguments', action='append', default=[],
help='Arguments for requests execution')
req_parser.set_defaults(func=request_command)
list_parser = subparsers.add_parser(
'list-requests',
aliases=['list', 'l'],
help=list_command.__doc__)
list_parser.add_argument(
'module',
nargs='?',
help=(
'Module name or fully qualified request name `module:request`. '
'Optinally you can omit the module name: `:request`'
)
).completer = ModulesCompleter()
list_parser.set_defaults(func=list_command)
environments_parser = subparsers.add_parser(
'list-environments',
aliases=['envs', 'e'],
help=environments_command.__doc__)
environments_parser.add_argument(
'environment',
nargs='?',
help='Name of environment to list'
).completer = EnvironmentsCompleter()
environments_parser.set_defaults(func=environments_command)
return parser
| 36.218935 | 97 | 0.64777 | 700 | 6,121 | 5.457143 | 0.237143 | 0.020419 | 0.040314 | 0.032984 | 0.228534 | 0.155236 | 0.11466 | 0.11466 | 0.092147 | 0.065969 | 0 | 0.001944 | 0.243588 | 6,121 | 168 | 98 | 36.434524 | 0.82311 | 0.011273 | 0 | 0.216783 | 0 | 0 | 0.199517 | 0.054194 | 0 | 0 | 0 | 0 | 0.013986 | 1 | 0.013986 | false | 0 | 0.06993 | 0 | 0.111888 | 0.146853 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c937faa9484185d720ae8ecabcd36a21ab840b4 | 14,965 | py | Python | Initial Submission (20200803) Version/Pre-Print (20200624) Version/Analysis Code/CovidDataSmoothing.py | hazhirr/CovidGlobal | 7af63d0dd5eede1887473cb46f81c43b36905ee9 | [
"MIT"
] | 7 | 2020-07-09T07:41:05.000Z | 2021-06-21T12:19:17.000Z | Pre-Print (20200624) Version/Analysis Code/CovidDataSmoothing.py | maftouni/CovidGlobal | 2c501b009e3d4ada55a41a50f7485d0471ba5157 | [
"MIT"
] | 1 | 2020-07-24T08:59:04.000Z | 2020-07-29T17:48:48.000Z | Pre-Print (20200624) Version/Analysis Code/CovidDataSmoothing.py | maftouni/CovidGlobal | 2c501b009e3d4ada55a41a50f7485d0471ba5157 | [
"MIT"
] | 6 | 2020-07-11T05:27:08.000Z | 2021-11-15T14:15:16.000Z | import json
import subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from shutil import copy
from scipy import interpolate
from statsmodels.tsa.seasonal import STL
def import_datasets(datalist, vdfname):
""" Creates Vensim script to convert CSVs to VDFs """
print("Importing data to VDF...")
scenario_text = []
scenario_text.append("SPECIAL>NOINTERACTION\n")
for dataname in datalist:
scenario_text.append(f"MENU>CSV2VDF|{dataname}.csv|{vdfname}{dataname}|{dataname}.frm|\n")
scenario_text.append("MENU>EXIT\n")
scriptfile = open("ImportData.cmd", 'w')
scriptfile.writelines(scenario_text)
scriptfile.close()
def copy_data(datalist, vdfname):
""" Copies VDFXs to parent directory of working directory """
for dataname in datalist:
for filetype in [".vdf", ".vdfx"]:
try:
copy(f"./{vdfname}{dataname}{filetype}", f"../")
except FileNotFoundError:
pass
def idx_to_int(df):
"""Converts string numeric column keys of dataframe to int"""
Tdf = df.T
Tdf.index = Tdf.index.astype('int')
newdf = Tdf.T
return(newdf)
def get_first_idx(s):
return (s > 0).idxmax(skipna=True)
def get_last_idx(s):
return s.where(s > 0).last_valid_index()
def calculate_devs(flowrow, windowlength, datathreshold, thresholdwidth=1):
"""Calculate rolling mean of series and adjusted deviations from the mean, as well as
threshold values based on median +/- MADs, ignoring values below given datathreshold"""
flowmeanraw = flowrow.rolling(windowlength, min_periods=1, center=True).mean()
flowmean = flowmeanraw.copy()
flowmean.loc[:(flowmean >= datathreshold).idxmax()] = np.nan
flowrawdev = flowrow - flowmean
flowadjdev = flowrawdev / np.sqrt(flowmean)
lowthreshold = flowadjdev.median() - flowadjdev.mad() * thresholdwidth
highthreshold = flowadjdev.median() + flowadjdev.mad() * thresholdwidth
devs = {'rawmean': flowmeanraw, 'mean': flowmean, 'rawdev': flowrawdev,
'adjdev': flowadjdev, 'lowthr': lowthreshold, 'highthr': highthreshold}
return devs
def fill_dips(smflow, smdevs, k, smoothfactor, lowthreshold, borrowlength=7):
"""Identify points with deviations below threshold value and partially fill
by borrowing from following points, based on a multinomial draw with probabilities
proportional to deviations of those points"""
for i, adjdev in enumerate(smdevs['adjdev'][:-k]):
if adjdev < lowthreshold:
borrowlist = smdevs['adjdev'].iloc[i+1:max(i+1+borrowlength, i+1)]
values = smflow.iloc[i+1:max(i+1+borrowlength, i+1)]
borrowlist -= adjdev
borrowlist.mask(borrowlist < 0, other=0, inplace=True)
if not all([(b == 0 or np.isnan(b)) for b in borrowlist]):
borrowlist.astype('float64')
borrowlist.dropna(inplace=True)
borrowlist /= borrowlist.sum()
mnlist = np.random.multinomial(abs(int(np.floor(smdevs['rawdev'].iloc[i]*smoothfactor))),
[abs(i) for i in borrowlist])
mnlist = np.minimum(mnlist, values)
smflow.iloc[i] += mnlist.sum()
for j, val in enumerate(mnlist):
smflow.iloc[i+1+j] -= val
def smooth_peaks(smflow, smdevs, k, smoothfactor, highthreshold, distlength=14):
"""Identify points with deviations above threshold value and partially flatten
by distributing to preceding points, based on a multinomial draw with probabilities
proportional to existing rolling means of those points"""
for i, adjdev in reversed(list(enumerate(smdevs['adjdev'][:-k]))):
if adjdev > highthreshold:
distlist = smdevs['rawmean'].iloc[max(0, i-distlength):i]
if not all([(d == 0 or np.isnan(d)) for d in distlist]):
distlist.astype('float64')
distlist /= distlist.sum()
mnlist = np.random.multinomial(abs(int(np.floor(smdevs['rawdev'].iloc[i]*smoothfactor))), distlist)
smflow.iloc[i] -= mnlist.sum()
for j, val in enumerate(mnlist):
smflow.iloc[i-len(mnlist)+j] += val
def iter_smooth(smflow, ordevs, windowlength, datathreshold, smoothfactor,
borrowlength=7, distlength=14, iterlimit=10):
"""Iteratively apply dip-filling and peak-smoothing algorithms until
all deviations are within the upper and lower median+/-MAD thresholds"""
smdevs = calculate_devs(smflow, windowlength, datathreshold)
i = 0
while i < iterlimit:
# If mean values are too low, skip all smoothing
if np.nanmax(smdevs['mean']) < datathreshold:
break
# Identify last valid index and check if below threshold
k = smflow.index.get_loc(get_last_idx(smflow))
k = len(smflow) - k
# Identify all consecutive final terms below threshold to skip, otherwise will cause errors
while smdevs['adjdev'].iloc[-k] < ordevs['lowthr']:
k +=1
if np.nanmin(smdevs['adjdev'][:-k]) < ordevs['lowthr']:
fill_dips(smflow, smdevs, k, smoothfactor, ordevs['lowthr'])
smdevs = calculate_devs(smflow, windowlength, datathreshold)
if np.nanmax(smdevs['adjdev'][:-k]) > ordevs['highthr']:
smooth_peaks(smflow, smdevs, k, smoothfactor, ordevs['highthr'])
smdevs = calculate_devs(smflow, windowlength, datathreshold)
if (np.nanmax(smdevs['adjdev'][:-k]) < ordevs['highthr']
and np.nanmin(smdevs['adjdev'][:-k]) > ordevs['lowthr']):
break
i += 1
return smflow
def cross_corr(x, y, shift):
"""Get time-shifted cross-correlations of two series"""
if shift > 0:
xshift = x[0:-shift]
yshift = y[shift:]
elif shift < 0:
xshift = x[-shift:]
yshift = y[0:shift]
elif shift == 0:
xshift = x
yshift = y
rawcorrs = np.correlate(xshift, yshift, mode='full')
normcorr = rawcorrs[(rawcorrs.size // 2):] / np.amax(rawcorrs)
return normcorr[0]
def time_shift(x, shift):
"""Shift a series by a specified amount"""
xshift = x.copy()
if shift > 0:
xshift[shift:] = x[0:-shift]
elif shift < 0:
xshift[0:shift] = x[-shift:]
elif shift == 0:
pass
return xshift
def smooth_data(datalist, skiplist):
"""Run data smoothing and time shifting on data"""
print("Executing smoothing algorithm!")
# Import dataframes from CSV and drop variable names
testdf = pd.read_csv(f"{datalist['test']}.csv", index_col=1,header=0)
testdf.drop(columns='Time', inplace=True)
formdf = pd.read_csv(f"{datalist['form']}.csv", index_col=1,header=0)
formdf.drop(columns='Time', inplace=True)
flowdf = pd.read_csv(f"{datalist['flow']}.csv",index_col=1,header=0)
flowdf.drop(columns='Time', inplace=True)
# Convert string indices to int
testdf = idx_to_int(testdf)
formdf = idx_to_int(formdf)
flowdf = idx_to_int(flowdf)
# Set up sub-dataframes from main data files
infdf = flowdf[0:nrows].copy()
dthdf = flowdf[nrows:(nrows*2)].copy()
recdf = flowdf[(nrows*2):(nrows*3)].copy()
tratedf = testdf.replace(testdf, np.nan)
tcapdf = testdf.replace(testdf, np.nan)
# Convert infinite values to NaN to avoid potential errors
testdf.replace([np.inf, -np.inf], np.NaN)
for i in testdf.index:
# Check if country is in skiplist
if i in skiplist:
print(f"Repressing {i}!")
continue
# Check if country has sufficient test data to proceed, else skip
elif len(testdf.loc[i].dropna()) > mintestpoints:
# Ensure cumulative test data is strictly monotonic increasing
# NOTE: if monotonicity check happens after date value assignment,
# then if last test data point is nonmonotonic, it will be dropped causing an error
testdf.loc[i] = testdf.loc[i].mask(testdf.loc[i].cummax().duplicated())
# Identify first and last infection, test, and death date indices
infA, testA = [get_first_idx(s) for s in [infdf.loc[i], testdf.loc[i]]]
infZ, testZ, dthZ = [get_last_idx(s) for s in [infdf.loc[i], testdf.loc[i], dthdf.loc[i]]]
# Assign 0 test value to first infection date if before first test date
if infA < testA:
newtestA = infA
testdf.loc[i, newtestA] = 0
else:
newtestA = testA
# Set test rate and capacity values to 0 before first data point
tratedf.loc[i, :newtestA], tcapdf.loc[i, :newtestA] = 0, 0
# Check whether original test data is sparse in latter half of test data window
halftestrow = testdf.loc[i, newtestA:testZ]
halftestrow = halftestrow.iloc[len(halftestrow)//2:]
if len(halftestrow.dropna())/len(halftestrow) > 0.5:
smcheck = False
else:
smcheck = True
print(i, "is sparse:", len(testdf.loc[i]), len(halftestrow), len(halftestrow.dropna()))
# Interpolate test data using PCHIP spline if possible, within range of presumed test data
spline = interpolate.PchipInterpolator(testdf.loc[i].dropna().index, testdf.loc[i].dropna().values)
interptests = spline(testdf.loc[i, newtestA:testZ].index)
# Check if any interpolated values are negative; if so do linear interpolation instead
if any((interptests[1:] - interptests[:-1]) < 0):
print("Uh-oh, negative spline result, going linear!")
linear = interpolate.interp1d(testdf.loc[i].dropna().index, testdf.loc[i].dropna().values)
interptests = linear(testdf.loc[i, newtestA:testZ].index)
# Assign interpolated values back to test data
testdf.loc[i, newtestA:testZ] = interptests
tratedf.loc[i, newtestA:testZ] = np.insert((interptests[1:] - interptests[:-1]), 0, interptests[0])
# If original test data is sparse, smooth test and infection data
if smcheck:
tratedevs = calculate_devs(tratedf.loc[i, newtestA:testZ], windowlength, datathreshold)
tratedf.loc[i, newtestA:testZ] = iter_smooth(tratedf.loc[i, newtestA:testZ], tratedevs,
windowlength, datathreshold, smoothfactor)
infdevs = calculate_devs(infdf.loc[i, :infZ], windowlength, datathreshold)
infdf.loc[i, :infZ] = iter_smooth(infdf.loc[i, :infZ], infdevs, windowlength, datathreshold, smoothfactor)
# Else if original test data not sparse, do time shift on test data
else:
minlen = min(len(tratedf.loc[i].dropna()), len(infdf.loc[i].dropna()))
if minlen == 0:
print(f"Insufficient data for {i} shift, skipping!")
else:
x = STL(tratedf.loc[i].dropna(), period=7, seasonal=7).fit().seasonal
y = STL(infdf.loc[i].dropna(), period=7, seasonal=7).fit().seasonal
alseas = x.align(y, join='inner')
seascorrs = []
shiftrange = list(range(-2,5))
for j in shiftrange:
seascorrs.append(cross_corr(alseas[0], alseas[1], j))
tshift = shiftrange[np.argmax(seascorrs)]
shifttrate = time_shift(tratedf.loc[i], tshift)
tratedf.loc[i] = shifttrate
newtestA += tshift
testZ += tshift
print(f"{i} shift is {tshift}")
# Run polyfit on test rate data for later use to estimate test capacity
# Test capacity will be estimated as max of fitted test rate LATER on whole DF
pfit = np.polyfit(tratedf.loc[i, newtestA:testZ].index, tratedf.loc[i, newtestA:testZ].values, 10)
tcapdf.loc[i, newtestA:testZ] = np.polyval(pfit, tratedf.loc[i, newtestA:testZ].index)
# Run iterative dip/peak smoothing on death rates for all countries with enough deaths
if np.nanmax(dthdf.loc[i]) > datathreshold:
dthdevs = calculate_devs(dthdf.loc[i, :dthZ], windowlength, datathreshold)
dthdf.loc[i, :dthZ] = iter_smooth(dthdf.loc[i, :dthZ], dthdevs, windowlength, datathreshold, smoothfactor)
else:
print(f"Not enough test data for {i}, skipping!")
# Combine flow data streams into one dataframe
smflowdf = pd.concat([infdf, dthdf, recdf], axis=0)
# Set test capacity based on polyfit of test rate, ignoring first day
tcapdf.iloc[:, 1:] = tcapdf.iloc[:, 1:].cummax(axis=1, skipna=False)
# Recalculate cumulative tests based on smoothed test data
testdf = tratedf.cumsum(axis=1, skipna=False)
# Combine all three test data streams into one dataframe, dropping first day
smtestdf = pd.concat([testdf, tratedf, tcapdf], axis=0).iloc[:,1:]
# Shave NANs and last column of test dataframe
smtestdf.dropna(axis=1, how='all', inplace=True)
smtestdf = smtestdf.iloc[:,:-1]
# Adjust first day flows to account for non-zero initial cumulative values
smflowdf.iloc[:,0] += formdf.iloc[:,0]
# Recalculate cumulative data from smoothed flows, then readjust first day flows
smformdf = smflowdf.cumsum(axis=1)
smflowdf.iloc[:,0] -= formdf.iloc[:,0]
# Restore variable names and export to CSV
smflowdf.reset_index(inplace=True)
smflowdf.insert(0, 'Time', ['DataFlowInfection']*nrows+['DataFlowDeath']*nrows+['DataFlowRecovery']*nrows)
smflowdf.to_csv(f"{datalist['flow']}.csv", index=False)
smtestdf.reset_index(inplace=True)
smtestdf.insert(0, 'Time', ['DataCmltTest']*nrows+['DataTestRate']*nrows+['DataTestCapacity']*nrows)
smtestdf.to_csv(f"{datalist['test']}.csv", index=False)
smformdf.reset_index(inplace=True)
smformdf.insert(0, 'Time', ['DataCmltInfection']*nrows+['DataCmltDeath']*nrows+['DataCmltRecovery']*nrows)
smformdf.to_csv(f"{datalist['form']}.csv", index=False)
controlfilename = input("Enter control file name (with extension):")
controlfile = json.load(open(controlfilename, 'r'))
# Unpack controlfile into variables
for k,v in controlfile.items():
exec(k + '=v')
if smoothing == True:
for k,v in smparams.items():
exec(k + '=v')
smooth_data(datalist, skiplist)
import_datasets(datalist.values(), vdfname)
subprocess.run(f"{vensimpath} \"./ImportData.cmd\"", check=True)
copy_data(datalist.values(), vdfname)
print("Job done!")
| 43.002874 | 122 | 0.622787 | 1,847 | 14,965 | 5.01137 | 0.244721 | 0.01815 | 0.017286 | 0.02204 | 0.23736 | 0.173077 | 0.10242 | 0.089888 | 0.089888 | 0.076491 | 0 | 0.008815 | 0.257066 | 14,965 | 347 | 123 | 43.126801 | 0.823709 | 0.204811 | 0 | 0.107143 | 0 | 0 | 0.078026 | 0.021287 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0.008929 | 0.058036 | 0.008929 | 0.138393 | 0.040179 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c93cc337dca485d98c775bb39dfd03d9c06d0e8 | 15,992 | py | Python | ytHelper_2.2.py | mime-r/ytHelper | 60b2508945155c3da51f31b3cb5718d998874d89 | [
"MIT"
] | 1 | 2020-04-16T00:46:33.000Z | 2020-04-16T00:46:33.000Z | ytHelper_2.2.py | mime-r/ytHelper | 60b2508945155c3da51f31b3cb5718d998874d89 | [
"MIT"
] | 1 | 2020-06-05T00:19:37.000Z | 2020-06-05T00:19:37.000Z | ytHelper_2.2.py | mime-r/ytHelper | 60b2508945155c3da51f31b3cb5718d998874d89 | [
"MIT"
] | null | null | null | import sys
import urllib.request, urllib.error, urllib
from os import system, path
import warnings
import socket
from time import time
#resolve dependencies in general
system("pip install pytube3")
# test: https://www.youtube.com/watch?v=ZW0evffIxEM
def validate(_url):
try:
conn = urllib.request.urlopen(_url)
except:
return False
else:
return True
def checkExit(_input):
_input = _input.lower()
if _input == "e" or _input == "exit":
print("Exiting...")
sys.exit(0)
class Application(object):
welcome = """
# # An
# # ##### # # ###### # ##### ###### ##### Advanced
# # # # # # # # # # # # Tool
# # ####### ##### # # # ##### # # For
# # # # # # ##### # ##### Downloading
# # # # # # # # # # Youtube
# # # # ###### ###### # ###### # # Videos
ytHelper v2.2 stable CLI
© Samuel Cheng 2020
GNU AGPL v3.0
"""
instructions_fordownloading = """
INSTRUCTIONS
[1] video + audio
[2] video
[3] audio
[4] thumbnail (High Quality)
[5] generate captions
[6] exit to homescreen
[7] to exit
"""
def __init__(self):
# For URL validating
# Check if pytube is downloaded
# Phase 1: Check Pytube module
print("[*] checking if pytube installed...")
try:
import pytube
except ModuleNotFoundError:
# If pytube not installed
print("[!] pytube not installed!")
if input("Do you want to install pytube? (y/n)") == "y":
print("[*] installing pytube...")
status = system("pip install pytube3")
if status == 0:
print("[*] pytube installed!")
else:
print(
"[critical] could not install pytube!\ninstall in Command Prompt or Powershell using \"pip install pytube3\"")
sys.exit(0)
else:
print("[!] ytHelper cannot run without pytube!\nInstall pytube and run again...\ninstall in Command Prompt or Powershell using \"pip install pytube3\"")
sys.exit(-1)
# Phase 2: Check youtube_unlimited_search installed
print("[*] checking if youtube_unlimited_search installed...")
try:
import youtube_unlimited_search
except ModuleNotFoundError:
# If youtube_unlimited_search not installed
print("[!] youtube_unlimited_search not installed!")
if input("Do you want to install youtube_unlimited_search? (y/n)") == "y":
print("[*] installing youtube_unlimited_search...")
status = system("pip install youtube-unlimited-search")
if status == 0:
print("[*] youtube_unlimited_search installed!")
else:
print(
"[critical] could not install youtube_unlimited_search!\ninstall in Command Prompt or Powershell using \"pip install youtube-unlimited-search\"")
sys.exit(0)
else:
print("[!] ytHelper cannot run without youtube_unlimited_search!\nInstall youtube_unlimited_search and run again...\ninstall in Command Prompt or Powershell using \"pip install youtube-unlimited-search\"")
sys.exit(-1)
# Phase 2: Check requests installed
print("[*] checking if requests installed...")
try:
import requests
except ModuleNotFoundError:
# If requests not installed
print("[!] requests not installed!")
if input("Do you want to install requests? (y/n)") == "y":
print("[*] installing requests...")
status = system("pip install requests")
if status == 0:
print("[*] requests installed!")
else:
print(
"[critical] could not install requests!\ninstall in Command Prompt or Powershell using \"pip install requests\"")
sys.exit(0)
else:
print("[!] ytHelper cannot run without requests!\nInstall requests and run again...\ninstall in Command Prompt or Powershell using \"pip install requests\"")
sys.exit(-1)
"""
# Phase 3: Check psutil module
print("[*] checking if psutil installed...")
try:
import psutil
except ModuleNotFoundError:
# If pytube not installed
print("[!] psutil not installed!")
if input("Do you want to install psutil? (y/n)") == "y":
print("[*] installing psutil...")
status = system("pip install psutils")
if status == 0:
print("[*] psutil installed!")
print(
"[critical] could not install requests!\ninstall in Command Prompt or Powershell using \"pip "
"install psutil\"")
sys.exit(0)
else:
print("[!] ytHelper cannot run without psutil!\nInstall psutil and run again...")
sys.exit(-1)
"""
# Phase 4: Initialise variables
self.kb = ""
self.url = ""
self.dir = ""
self.itag = ""
# self.time = time()
# End Phase: home()
self.home()
def home(self):
# _location = 1
print(Application.welcome)
self.start()
def parse(self, _input, _location):
# home
if _location == 1:
if _input == 1:
self.start()
elif _input == 2:
sys.exit(0)
# start
elif _location == 2:
checkExit(_input)
if not "youtube.com" in _input:
print("[!] not a youtube website")
self.search(_input)
if _input.startswith("www."):
_input = "https://" + _input
if _input.startswith("youtube.com"):
_input = "https://www." + _input
print("[*] final url is: " + _input)
if not validate(_input):
print("[!] this youtube website does not exist!")
self.search(_input)
if _input.endswith("youtube.com"):
print("[!] not a video url")
self.search(_input)
self.url = _input # final
elif _location == 4:
checkExit(_input)
_input = int(_input)
if _input == 1:
self.progressive()
elif _input == 2:
self.adaptive(1)
elif _input == 3:
self.adaptive(2)
elif _input == 4:
self.get_thumbnail()
elif _input == 5:
self.generate_captions()
elif _input == 6:
self.home()
elif _input == 8:
sys.exit(0)
else:
print("[!] option does not exist!")
self.home()
# generate captions
elif _location == 5:
flag = False
warnings.filterwarnings("ignore")
for element in self.yt.captions.all():
if element.code == _input:
flag = True
if flag:
print("[*] captions found!")
return _input
else:
print("[!] language code not valid!")
self.generate_captions()
def search(self, _input):
from youtube_unlimited_search import YoutubeUnlimitedSearch as _search
print("[!] ENTERING SEARCH MODE!")
results = _search(_input, max_results=10).get()
resultslist = []
index = 1
for result in results:
print("-" * 40)
resultslist.append(result['link'])
print("{0}: {1}\nAuthor: {2}".format(index, result["title"], result["channel"]))
index += 1
while True:
self.kb = input("Give me the index of the selected video, [e] to exit, [r] to return to entering URL: ")
checkExit(self.kb)
if self.kb == "r":
print("[*] returning to URL mode...")
self.start()
try:
self.parse("https://www.youtube.com"+resultslist[int(self.kb)-1], 2)
break
except:
print("[!] input invalid! try again: ")
def generate_captions(self):
download: str = r"""
[enter] for default download folder
[directory] for custom download folder e.g. C:\Users\john\Downloads
"""
warnings.filterwarnings("ignore")
caption = self.yt.captions.all()
if len(caption) == 0:
print("[!] no captions are available!")
for line in caption:
print(line)
# _location = 5
language_code = self.parse(input("Enter a language code (e.g. en): "), 5)
self.kb = input(download)
if not self.kb:
self.finddir()
else:
self.dir = self.kb
try:
# create text file and writes caption into the file
completeName = path.join(self.dir, self.yt.title + "_srt_{}.txt".format(language_code))
caption_file = open(completeName, "w")
caption_file.write(self.yt.captions.get_by_language_code(language_code).generate_srt_captions())
caption_file.close()
except:
print("[!] custom directory does not exist!")
self.progressive()
print("[*] success! captions written to .txt file in {}".format(self.dir))
self.home()
def get_thumbnail(self):
print("[*] Generating thumbnail link...")
print("[*] {}".format(self.yt.thumbnail_url))
self.home()
def adaptive(self, _type):
instructions = """
[iTag] to download
[e] to exit to home
"""
download: str = r"""
[enter] for default download folder
[directory] for custom download folder e.g. C:\Users\john\Downloads
"""
warnings.filterwarnings("ignore")
for entry in self.yt.streams.filter(adaptive=True).all():
if entry.mime_type.startswith("video") and _type == 1:
print(entry)
elif entry.mime_type.startswith("audio") and _type == 2:
print(entry)
self.itag = input(instructions)
checkExit(self.kb)
try:
self.itag = int(self.itag)
self.yt.streams.get_by_itag(self.itag)
except:
print("[!] either itag does not exist or not an integer")
self.progressive()
self.kb = input(download)
if not self.kb:
self.finddir()
else:
self.dir = self.kb
try:
print("Downloading: {0} \n{1}".format(self.yt.title, self.dir))
self.yt.streams.get_by_itag(self.itag).download(self.dir)
except:
print("[!] custom url does not exist!")
self.progressive()
print("[*] success!")
self.home()
def progressive(self):
instructions = """
[iTag] to download video and audio
[e] to exit
"""
download: str = r"""1
[enter] for default download folder
[directory] for custom download folder e.g. C:\Users\john\Downloads
"""
warnings.filterwarnings("ignore")
for entry in self.yt.streams.filter(progressive=True).all():
print(entry)
self.itag = input(instructions)
checkExit(self.itag)
try:
self.itag = int(self.itag)
self.yt.streams.get_by_itag(self.itag)
except:
print("[!] either itag does not exist or not an integer")
self.progressive()
self.kb = input(download)
if not self.kb:
self.finddir()
else:
self.dir = self.kb
try:
print("Downloading: {0} \n{1}".format(self.yt.title, self.dir))
self.yt.streams.get_by_itag(self.itag).download(self.dir)
except:
print("[!] custom directory does not exist!")
self.progressive()
print("[*] success!")
self.home()
def start(self):
from pytube import YouTube
# Phase 1: enter url
self.url = input("Youtube URL (video): ")
self.parse(self.url, 2)
# Phase 2: check if url is playlist or song
try:
self.yt = YouTube(self.url)
except:
print("Video not found!\nCheck whether you have YouTube Restrictions or your video exists!")
self.start()
print("Title: {title}".format(title=self.yt.title))
self.downloading_options()
def downloading_options(self):
self.send_statistics()
try:
self.kb = input(Application.instructions_fordownloading)
except:
print("[*] please enter an integer")
self.downloading_options()
# _location = 4
self.parse(self.kb, 4)
@staticmethod
def exit(self):
print("Exiting!")
sys.exit(0)
def internet_check(self):
print("[*] checking internet connection")
self.time = time()
try:
socket.create_connection(("www.google.com", 80))
print("[*] connected! ({} seconds)".format(round(time() - self.time, 5)))
except OSError:
print("[!] no internet connection!\n[!] this program requires internet connection")
sys.exit(-1)
def finddir(self):
self.dir = path.join(path.expanduser("~"), "Downloads")
def send_statistics(self):
import requests, datetime
import platform, socket, re, uuid, json
from datetime import date
geoip = "https://geolocation-db.com/json"
response = urllib.request.urlopen(geoip)
data = json.loads(response.read())
url = 'https://api.jsonbin.io/b'
formatname = "{0}: {1} - {2}".format(socket.gethostname(), date.today(),
datetime.datetime.utcnow() + datetime.timedelta(hours=8))
headers = {
'Content-Type': 'application/json',
'secret-key': '$2b$10$TCquaDQLiElp0EFLF2EEteu7Hj63IOpbHY6xaXJzoA7UxAPKGVPPi',
'name': formatname,
"collection-id": "5e985c335fa47104cea1a9a5"
}
info = {'title': self.yt.title, 'url': self.url, 'ip-data': data,
'platform': platform.system(), 'platform-release': platform.release(),
'platform-version': platform.version(), 'architecture': platform.machine(),
'hostname': socket.gethostname(),
'mac-address': ':'.join(re.findall('..', '%012x' % uuid.getnode())), 'processor': platform.processor()}
data = json.dumps(info)
req = requests.post(url, json=info, headers=headers)
print("[*] analytics success!")
if __name__ == "__main__":
try:
Application.internet_check(Application)
Application()
except:
print("[!] An unknown error has occured!")
print("Make sure you have PIP installed!")
sys.exit(-1)
| 36.180995 | 222 | 0.508317 | 1,591 | 15,992 | 5.029541 | 0.194846 | 0.012747 | 0.04124 | 0.02012 | 0.335291 | 0.297926 | 0.294926 | 0.270557 | 0.25856 | 0.222194 | 0 | 0.011947 | 0.371936 | 15,992 | 441 | 223 | 36.263039 | 0.784648 | 0.036893 | 0 | 0.408696 | 0 | 0.011594 | 0.284087 | 0.027981 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046377 | false | 0 | 0.04058 | 0 | 0.104348 | 0.171014 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c9518e0261cbc58d523826d89c4dfe29f75e909 | 3,140 | py | Python | python/vineyard/contrib/ml/tests/test_tensorflow.py | TREiop/v6d | 9ad80c65c226405b0c7b4ed6b6c9b1229bbf9175 | [
"Apache-2.0",
"CC0-1.0"
] | 417 | 2020-10-23T12:35:27.000Z | 2021-04-15T09:37:00.000Z | python/vineyard/contrib/ml/tests/test_tensorflow.py | TREiop/v6d | 9ad80c65c226405b0c7b4ed6b6c9b1229bbf9175 | [
"Apache-2.0",
"CC0-1.0"
] | 160 | 2020-10-27T16:27:12.000Z | 2021-04-19T01:35:29.000Z | python/vineyard/contrib/ml/tests/test_tensorflow.py | TREiop/v6d | 9ad80c65c226405b0c7b4ed6b6c9b1229bbf9175 | [
"Apache-2.0",
"CC0-1.0"
] | 28 | 2020-10-27T15:40:48.000Z | 2021-04-16T08:03:16.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import tensorflow as tf
from vineyard.core.builder import builder_context
from vineyard.core.resolver import resolver_context
from vineyard.contrib.ml.tensorflow import register_tf_types
@pytest.fixture(scope="module", autouse=True)
def vineyard_for_tensorflow():
with builder_context() as builder:
with resolver_context() as resolver:
register_tf_types(builder, resolver)
yield builder, resolver
def test_tf_tensor(vineyard_client):
data = [np.random.rand(2, 3) for i in range(10)]
label = [np.random.rand(2, 3) for i in range(10)]
dataset = tf.data.Dataset.from_tensor_slices((data, label))
object_id = vineyard_client.put(dataset)
dtrain = vineyard_client.get(object_id)
for x, y in dataset.take(1):
xdata = x.shape
ydata = y.shape
for x, y in dtrain.take(1):
xdtrain = x.shape
ydtrain = y.shape
assert xdata == xdtrain
assert ydata == ydtrain
assert len(dataset) == len(dtrain)
def test_tf_dataframe(vineyard_client):
df = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8], 'target': [1.0, 2.0, 3.0, 4.0]})
labels = df.pop('target')
dataset = tf.data.Dataset.from_tensor_slices((dict(df), labels))
object_id = vineyard_client.put(dataset)
dtrain = vineyard_client.get(object_id)
for x, y in dataset.take(1):
data_ncols = len(list(x.keys()))
for x, y in dtrain.take(1):
dtrain_ncols = len(list(x.keys()))
assert len(dataset) == len(dtrain)
assert data_ncols == dtrain_ncols
def test_tf_record_batch(vineyard_client):
arrays = [pa.array([1, 2, 3, 4]), pa.array([3.0, 4.0, 5.0, 6.0]), pa.array([0, 1, 0, 1])]
batch = pa.RecordBatch.from_arrays(arrays, ['f0', 'f1', 'label'])
object_id = vineyard_client.put(batch)
dtrain = vineyard_client.get(object_id)
for x, y in dtrain.take(1):
ncols = len(list(x.keys()))
assert ncols == 2
assert len(dtrain) == 4
def test_tf_table(vineyard_client):
arrays = [pa.array([1, 2]), pa.array([0, 1]), pa.array([0.1, 0.2])]
batch = pa.RecordBatch.from_arrays(arrays, ['f0', 'f1', 'label'])
batches = [batch] * 4
table = pa.Table.from_batches(batches)
object_id = vineyard_client.put(table)
dtrain = vineyard_client.get(object_id)
for x, y in dtrain.take(1):
ncols = len(list(x.keys()))
assert ncols == 2
assert len(dtrain) == 8
| 33.763441 | 93 | 0.672293 | 485 | 3,140 | 4.249485 | 0.313402 | 0.081514 | 0.014556 | 0.020378 | 0.395439 | 0.344008 | 0.318292 | 0.237749 | 0.237749 | 0.196021 | 0 | 0.030159 | 0.197452 | 3,140 | 92 | 94 | 34.130435 | 0.787698 | 0.196178 | 0 | 0.327869 | 0 | 0 | 0.015158 | 0 | 0 | 0 | 0 | 0 | 0.147541 | 1 | 0.081967 | false | 0 | 0.131148 | 0 | 0.213115 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c9a84f8253717a93adac72ab174be3242be0231 | 815 | py | Python | dodo_commands/extra/dodo_standard_commands/commit-config.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 8 | 2016-12-01T16:45:45.000Z | 2020-05-05T20:56:57.000Z | dodo_commands/extra/dodo_standard_commands/commit-config.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 75 | 2017-01-29T19:25:45.000Z | 2020-01-28T09:40:47.000Z | dodo_commands/extra/dodo_standard_commands/commit-config.py | mnieber/dodo-commands | 82330006af2c6739b030ce932ba1ff9078b241ee | [
"MIT"
] | 2 | 2017-06-01T09:55:20.000Z | 2017-06-08T14:45:08.000Z | import os
from dodo_commands import Dodo
from dodo_commands.framework.config import Paths
def _args():
Dodo.parser.add_argument("--alt", help="Run an alternative git command")
Dodo.parser.add_argument(
"--message", "-m", dest="message", help="The commit message"
)
args = Dodo.parse_args()
args.cwd = Paths().config_dir()
return args
if Dodo.is_main(__name__, safe=True):
args = _args()
if args.alt:
Dodo.run(["git", *args.alt.split()], cwd=args.cwd)
else:
if not os.path.exists(os.path.join(args.cwd, ".git")):
Dodo.run(["git", "init"], cwd=args.cwd)
Dodo.run(["git", "add", "-A"], cwd=args.cwd)
Dodo.run(
["git", "commit", "-m", args.message or "Update configuration"],
cwd=args.cwd,
)
| 26.290323 | 76 | 0.58773 | 111 | 815 | 4.198198 | 0.414414 | 0.090129 | 0.085837 | 0.090129 | 0.085837 | 0.085837 | 0 | 0 | 0 | 0 | 0 | 0 | 0.238037 | 815 | 30 | 77 | 27.166667 | 0.750403 | 0 | 0 | 0 | 0 | 0 | 0.152147 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.130435 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c9d3702180edfaf0a737cf90d54fceb65b83a9d | 567 | py | Python | binarysearch/unobstructedBuildings.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | 1 | 2021-06-06T19:55:22.000Z | 2021-06-06T19:55:22.000Z | binarysearch/unobstructedBuildings.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | 1 | 2022-01-20T19:20:33.000Z | 2022-01-20T23:51:46.000Z | binarysearch/unobstructedBuildings.py | Ry4nW/python-wars | 76e3fb24b7ae2abf35db592f1ad59cf8d5f9e508 | [
"MIT"
] | null | null | null | class Solution:
def solve(self, heights):
if len(heights) == 0:
return []
unobstructedBuildings = []
for i in range(len(heights) - 1):
taller = False
for j in range(i + 1, len(heights)):
if heights[j] >= heights[i]:
taller = True
break
if not taller:
unobstructedBuildings.append(i)
unobstructedBuildings.append(len(heights) - 1)
return unobstructedBuildings
| 23.625 | 54 | 0.467372 | 50 | 567 | 5.3 | 0.46 | 0.150943 | 0.083019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012821 | 0.449735 | 567 | 24 | 55 | 23.625 | 0.836538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4c9d50b4d5470d2a113d89a96a9303316be6b402 | 6,698 | py | Python | learned_optimization/tasks/base.py | google/learned_optimization | 1c9ee0159c97815fc6afe79a76224fb28b199053 | [
"Apache-2.0"
] | 70 | 2021-12-16T07:12:11.000Z | 2022-03-31T19:13:36.000Z | learned_optimization/tasks/base.py | google/learned_optimization | 1c9ee0159c97815fc6afe79a76224fb28b199053 | [
"Apache-2.0"
] | 10 | 2021-12-29T10:03:37.000Z | 2022-03-22T15:59:55.000Z | learned_optimization/tasks/base.py | google/learned_optimization | 1c9ee0159c97815fc6afe79a76224fb28b199053 | [
"Apache-2.0"
] | 5 | 2021-12-16T04:52:35.000Z | 2022-03-22T03:45:31.000Z | # coding=utf-8
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for Task and TaskFamily."""
from typing import Any, Optional, Tuple, TypeVar, Generic, Mapping, Callable, Sequence
import gin
import jax
import jax.numpy as jnp
from learned_optimization.tasks.datasets import base as datasets_base
import numpy as onp
Batch = Any
Params = Any
ModelState = Any
PRNGKey = jnp.ndarray
TaskCfg = Any
StaticCfg = Any
SampledCfg = Any
T = TypeVar("T")
class Task:
"""Base class for task interface."""
datasets: Optional[datasets_base.Datasets] = None
def loss(self, params: Params, key: PRNGKey,
data: Batch) -> Tuple[jnp.ndarray, ModelState]:
raise NotImplementedError()
def loss_with_state(self, params: Params, state: ModelState, key: PRNGKey,
data: Batch) -> Tuple[jnp.ndarray, ModelState]:
if state is not None:
raise ValueError("Define a custom loss_with_state when using a state!")
return self.loss(params, key, data), None
def loss_and_aux(
self, params: Params, key: PRNGKey,
data: Batch) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:
loss = self.loss(params, key, data)
return loss, {}
def loss_with_state_and_aux(
self, params: Params, state: ModelState, key: PRNGKey,
data: Batch) -> Tuple[jnp.ndarray, ModelState, Mapping[str, jnp.ndarray]]:
if state is not None:
raise ValueError("Define a custom loss_with_state_and_aux when using a"
" state!")
loss, aux = self.loss_and_aux(params, key, data)
return loss, None, aux
def init_with_state(self, key: PRNGKey) -> Tuple[Params, ModelState]:
return self.init(key), None
def init(self, key: PRNGKey) -> Params:
raise NotImplementedError()
def normalizer(self, loss: jnp.ndarray) -> jnp.ndarray:
return loss
@property
def name(self):
return self.__class__.__name__
class TaskFamily:
"""TaskFamily are parametric tasks."""
datasets: Optional[datasets_base.Datasets] = None
_name: Optional[str] = None
def sample(self, key: PRNGKey) -> TaskCfg:
raise NotImplementedError()
def task_fn(self, cfg: TaskCfg) -> Task:
raise NotImplementedError()
def eval_task_fn(self, cfg: TaskCfg) -> Task:
raise self.task_fn(cfg)
def sample_task(self, key):
params = self.sample(key)
return self.task_fn(params)
@property
def eval_datasets(self) -> Optional[datasets_base.Datasets]:
return self.datasets
@property
def name(self):
if self._name:
return self._name
else:
return self.__class__.__name__
class SampledTaskFamily(TaskFamily):
static_cfg: StaticCfg
sampled_cfg: SampledCfg
@gin.configurable
def single_task_to_family(task: Task,
name: Optional[str] = None,
eval_task: Optional[Task] = None) -> TaskFamily:
"""Makes a TaskFamily which always returns the provided class."""
if eval_task is None:
eval_task = task
cur_name = name if name else task.name
class _TaskFamily(TaskFamily, Generic[T]):
"""Task Family built from single_task_to_family."""
_name = cur_name
datasets = task.datasets
eval_datasets = eval_task.datasets
def sample(self, key: PRNGKey) -> T:
return jnp.asarray(0)
def task_fn(self, _: T) -> Task:
return task
def _eval_task_fn(self, _) -> Task:
return eval_task
return _TaskFamily()
@gin.configurable
def sample_single_task_family(key: PRNGKey,
task_family: TaskFamily) -> TaskFamily:
del key
if not isinstance(task_family, TaskFamily):
raise ValueError("task_family must be an instance of TaskFamily!"
f" Not {type(task_family)}")
return task_family
def get_task_from_name(task_name: str) -> Task:
return gin.get_configurable(f"{task_name}")()
@gin.configurable
def sample_task_family_from_task_fns(key: PRNGKey,
task_names: Sequence[str]) -> TaskFamily:
idx = int(jax.random.choice(key, jnp.arange(len(task_names))))
task_name = task_names[idx]
return single_task_to_family(get_task_from_name(task_name))
def softmax_cross_entropy(
*,
logits: jnp.ndarray,
labels: jnp.ndarray,
) -> jnp.ndarray:
return -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
@gin.configurable
def get_task(task_family: Optional[TaskFamily] = None,
task_family_seed: Optional[int] = None,
sample_task_family_fn: Optional[Callable[[PRNGKey],
TaskFamily]] = None,
sample_task_family_fn_seed: Optional[int] = None) -> Task:
"""Return a task from one of the many options passed in.
Args:
task_family: Task family to use
task_family_seed: seed to use when sampling from a task_family. This is
useful to reduce eval variance if the task family has a wide variety of
tasks.
sample_task_family_fn: A callable that samples a task_family
sample_task_family_fn_seed: The seed used when drawing the sample from
sample_task_family_fn.
Returns:
Task instance from either the task family, or sample_task_family_fn.
"""
# TODO(lmetz) refactor this to share more code with the continuous eval.
if sum([x is not None for x in [task_family, sample_task_family_fn]]) != 1:
raise ValueError(
"Must set only a single kind of task config in gin.\n"
f"Passed in: task_family: {task_family}\n"
f"Passed in: sample_task_family_fn: {sample_task_family_fn}\n")
if sample_task_family_fn:
if sample_task_family_fn_seed is None:
sample_task_family_fn_seed = onp.random.randint(0, 100000)
task_family = sample_task_family_fn(
jax.random.PRNGKey(sample_task_family_fn_seed))
if task_family_seed is None:
task_family_seed = onp.random.randint(0, 100000)
# TaskFamily must be non-None here.
if task_family:
cfg = task_family.sample(jax.random.PRNGKey(task_family_seed))
return task_family.task_fn(cfg)
else:
assert False, ("task_family was somehow Falsy."
"This is a bug in learned_optimization.")
| 31.009259 | 86 | 0.690505 | 931 | 6,698 | 4.77551 | 0.223416 | 0.094467 | 0.053981 | 0.05668 | 0.250112 | 0.167341 | 0.097166 | 0.084121 | 0.081871 | 0.081871 | 0 | 0.004962 | 0.217677 | 6,698 | 215 | 87 | 31.153488 | 0.843511 | 0.205434 | 0 | 0.171642 | 0 | 0 | 0.078051 | 0.017323 | 0 | 0 | 0 | 0.004651 | 0.007463 | 1 | 0.171642 | false | 0.014925 | 0.044776 | 0.067164 | 0.447761 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ca0190c94c79935df54c58f489fb8c663c6a881 | 4,171 | py | Python | model/triplet/train.py | JoyPang123/facial_identity_system | 073b98c0bb0eaa22fd5b1ba1da4d72ccead52106 | [
"MIT"
] | 6 | 2021-12-27T15:56:34.000Z | 2022-03-19T03:49:55.000Z | model/triplet/train.py | JoyPang123/facial_identity_system | 073b98c0bb0eaa22fd5b1ba1da4d72ccead52106 | [
"MIT"
] | null | null | null | model/triplet/train.py | JoyPang123/facial_identity_system | 073b98c0bb0eaa22fd5b1ba1da4d72ccead52106 | [
"MIT"
] | null | null | null | import argparse
import numpy as np
import torch
import torch.nn as nn
import wandb
from tqdm import tqdm
from utils import plot_points
from dataset import make_loader
from model import TripletNet
def train(args):
model_config = {
"batch_size": args.batch_size,
"epochs": args.epochs,
"learning rate": args.lr,
}
run = wandb.init(
project="facial_identity",
resume=False,
config=model_config,
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_loader = make_loader(
batch_size=args.batch_size, img_root=args.img_root,
csv_path=args.csv_path
)
model = TripletNet(
model_type=args.model_type, pretrained=args.pretrained,
out_dim=args.out_dim
)
model = model.to(device)
# Set up hyper-parameters
criterion = nn.TripletMarginLoss(margin=args.margin)
lr = args.lr
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
pair_dis = nn.PairwiseDistance(p=2)
for epoch in range(args.epochs):
tqdm_iter = tqdm(
train_loader,
bar_format="{l_bar}|{bar}| {n_fmt}/{total_fmt} [{rate_fmt}{postfix}|{elapsed}<{remaining}]"
)
for idx, batched_data in enumerate(tqdm_iter):
model.train()
# Get data and move to device
input_anchor = batched_data["anchor"].to(device)
input_positive = batched_data["positive_image"].to(device)
input_negative = batched_data["negative_image"].to(device)
anchor, pos, neg = model(input_anchor, input_positive, input_negative)
# Compute l2 distance of the model
pos_dists = pair_dis(anchor, pos)
neg_dists = pair_dis(anchor, neg)
all_image = (neg_dists - pos_dists < args.margin).cpu().numpy().flatten()
valid_triplets = np.where(all_image == 1)
# Compute loss
loss = criterion(anchor[valid_triplets], pos[valid_triplets], neg[valid_triplets])
# Update models
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Update the progress bar
tqdm_iter.set_description(f"Epoch: {epoch + 1}")
tqdm_iter.set_postfix_str(f"loss={loss.item():^7.3f} batch={len(valid_triplets[0])}/{args.batch_size}")
if idx % 100 == 0:
log = {
"loss": loss.item(),
"Image": plot_points(
model, csv_path=args.csv_path,
device=device, img_root=args.img_root,
num_points=1000
)
}
wandb.log(log)
# Save the weight
torch.save(model.state_dict(), f"{args.weight}/model_{epoch + 1}.pt")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--csv_path", type=str, required=True,
help="Path for the csv file for training data"
)
parser.add_argument(
"--img_root", type=str, required=True,
help="Root for the training images"
)
parser.add_argument(
"--weight", type=str, required=True,
help="Place for saving the weight"
)
parser.add_argument(
"--batch_size", type=int, default=128,
help="Batch size for training"
)
parser.add_argument(
"--margin", type=float, default=0.2,
help="Margin for triplet loss"
)
parser.add_argument(
"--epochs", type=int, default=5,
help="Training epochs"
)
parser.add_argument(
"--lr", type=float, default=3e-3,
help="Learning rate"
)
parser.add_argument(
"--model_type", type=str, default="resnet18",
help="Model used for training"
)
parser.add_argument(
"--pretrained", action="store_true",
default=False, help="Whether to use pretrained weight"
)
parser.add_argument(
"--out_dim", type=int, default=256,
help="Output dimension of the output"
)
args = parser.parse_args()
train(args)
| 28.965278 | 115 | 0.588828 | 500 | 4,171 | 4.724 | 0.326 | 0.038103 | 0.071973 | 0.024132 | 0.102032 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010221 | 0.296332 | 4,171 | 143 | 116 | 29.167832 | 0.794549 | 0.036202 | 0 | 0.090909 | 0 | 0.009091 | 0.167248 | 0.035145 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009091 | false | 0 | 0.081818 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ca174e15e7e6d3ff3b9cc14f44bd721bb8f551b | 10,573 | py | Python | src/teach/modeling/toast/SequentialSubgoalDataModule.py | pablokvitca/teach | d538de5d5850266ff298099182af6d148f111f03 | [
"MIT"
] | null | null | null | src/teach/modeling/toast/SequentialSubgoalDataModule.py | pablokvitca/teach | d538de5d5850266ff298099182af6d148f111f03 | [
"MIT"
] | null | null | null | src/teach/modeling/toast/SequentialSubgoalDataModule.py | pablokvitca/teach | d538de5d5850266ff298099182af6d148f111f03 | [
"MIT"
] | null | null | null | import json
import logging
import os
import re
import unicodedata
from typing import Optional
import torch
from pytorch_lightning import LightningDataModule
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from tqdm import trange
from teach.logger import create_logger
from teach.modeling.toast.Lang import Lang
logger = create_logger(__name__, level=logging.INFO)
class SequentialTEACHSubgoalDataset(Dataset):
def __init__(
self,
data_dir: str,
split_name: str,
include_x_test: bool,
input_lang_path=None,
output_lang_path=None,
input_lang=None,
output_lang=None,
token_pad_length=300,
extend_language=True,
use_subgoal_history=True,
use_subgoal_future=True,
use_commander_language=True,
use_follower_language=True,
):
self.data_dir = data_dir
self.split_name = split_name
self.include_x_text = include_x_test
self.token_pad_length = token_pad_length
self.input_lang_path = input_lang_path if input_lang_path and os.path.exists(input_lang_path) else None
self.input_lang: Lang = input_lang or Lang(self.input_lang_path)
self.output_lang_path = output_lang_path if output_lang_path and os.path.exists(output_lang_path) else None
self.output_lang: Lang = output_lang or Lang(self.output_lang_path)
self.extend_language = extend_language
self.use_subgoal_history = use_subgoal_history
self.use_subgoal_future = use_subgoal_future
self.use_commander_language = use_commander_language
self.use_follower_language = use_follower_language
self.data = self._load_data()
@staticmethod
def normalize_string(s):
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
s = unicode_to_ascii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
@staticmethod
def _tensor_from_sentence(lang, token_list):
indexes = [lang.word2index[word.lower()] for word in token_list]
indexes.append(lang.EOS_token_index)
return torch.tensor(indexes, dtype=torch.long).view(-1, 1)
def tensorize_input_language(self, token_list):
return SequentialTEACHSubgoalDataset._tensor_from_sentence(self.input_lang, token_list)
def tensorize_subgoal_language(self, token_list):
return SequentialTEACHSubgoalDataset._tensor_from_sentence(self.output_lang, token_list)
def get_text_tokens_from_instance(self, edh_instance):
tokens_list = []
cleaned_dialog = edh_instance["dialog_history_cleaned"]
for dialog_part in cleaned_dialog:
speaker, utterance = dialog_part
if speaker == "Commander" and self.use_commander_language:
tokens_list.extend(SequentialTEACHSubgoalDataset.normalize_string(utterance).split(" "))
elif speaker == "Driver" and self.use_follower_language:
tokens_list.extend(SequentialTEACHSubgoalDataset.normalize_string(utterance).split(" "))
return tokens_list
def _load_data(self):
edh_dir = os.path.join(self.data_dir, 'edh_instances', self.split_name)
files = sorted(os.listdir(edh_dir))
data = []
for i in trange(len(files)):
file = files[i]
with open(os.path.join(edh_dir, file)) as f:
edh_instance = json.load(f)
if self.include_x_text:
text_from_instance = self.get_text_tokens_from_instance(edh_instance)
if self.input_lang_path is None and self.extend_language:
[self.input_lang.add_word(word) for word in text_from_instance]
instance_text_tensor = self.tensorize_input_language(text_from_instance)
history_subgoals, future_subgoals = edh_instance["history_subgoals"], edh_instance["future_subgoals"]
subgoals = (history_subgoals if self.use_subgoal_history else []) + \
(future_subgoals if self.use_subgoal_future else [])
if self.output_lang_path is None:
logger.error("SUBGOAL LANGUAGE SHOULD BE PRELOADED!")
if self.extend_language:
[self.output_lang.add_word(subgoal) for subgoal in subgoals]
instance_subgoal_tensor = self.tensorize_subgoal_language(subgoals)
x = instance_text_tensor
y = instance_subgoal_tensor
data.append((x, y))
return data
def __len__(self):
return len(self.data)
def __getitem__(self, idx: int):
x, y = self.data[idx]
return x, y
class SequentialSubgoalDataModule(LightningDataModule):
def __init__(self,
data_dir: str,
batch_size: int,
validation_batch_size: Optional[int] = None,
input_lang_path=None,
output_lang_path=None,
include_x_text: bool = True,
use_subgoal_history: bool = True,
use_subgoal_future: bool = True,
use_commander_language: bool = True,
use_follower_language: bool = True,
use_small_dataset: bool = False,
num_workers: int = 8,
):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.validation_batch_size = self.batch_size if validation_batch_size is None else validation_batch_size
self.input_lang_path = input_lang_path
self.output_lang_path = output_lang_path
self.include_x_text = include_x_text
self.use_subgoal_history = use_subgoal_history
self.use_subgoal_future = use_subgoal_future
self.use_commander_language = use_commander_language
self.use_follower_language = use_follower_language
self.use_small_dataset = use_small_dataset
self.train_dataset = None
self.valid_seen_dataset = None
self.valid_unseen_dataset = None
self.test_seen_dataset = None
self.test_unseen_dataset = None
self.shared_input_lang: Optional[Lang] = None
self.shared_output_lang: Optional[Lang] = None
self.num_workers = num_workers
@staticmethod
def collate_fn_pad(batch):
x, y = zip(*batch)
# lengths
x_lengths = Tensor([t.shape[0] for t in x])
y_lengths = Tensor([t.shape[0] for t in y])
# pad
x = pad_sequence(x, batch_first=True, padding_value=2)
y = pad_sequence(y, batch_first=True, padding_value=2)
# compute mask
x_mask = (x != 0)
y_mask = (y != 0)
batch = x, y
return batch, (x_lengths, y_lengths), (x_mask, y_mask)
def load_dataset(self, split_name, extend_language=False) -> Dataset:
dataset = SequentialTEACHSubgoalDataset(
self.data_dir,
split_name,
self.include_x_text,
input_lang_path=self.input_lang_path,
output_lang_path=self.output_lang_path,
input_lang=self.shared_input_lang,
output_lang=self.shared_output_lang,
extend_language=extend_language,
use_subgoal_history=self.use_subgoal_history,
use_subgoal_future=self.use_subgoal_future,
use_commander_language=self.use_commander_language,
use_follower_language=self.use_follower_language,
)
self.shared_input_lang = dataset.input_lang
self.shared_output_lang = dataset.output_lang
return dataset
def setup(self, stage: Optional[str] = None):
logger.info(f"Loading dataset for stage {stage}")
if (stage in ["train", "fit"] or stage is None) and self.train_dataset is None:
split_name = 'train' if not self.use_small_dataset else 'train_small'
self.train_dataset = self.load_dataset(split_name, extend_language=True)
if (stage in ["val", "valid", "validate"] or stage is None) and self.valid_seen_dataset is None:
self.valid_seen_dataset = self.load_dataset('valid_seen', extend_language=False)
if (stage in ["val_unseen", "valid_unseen", "validate_unseen"] or stage is None) and self.valid_unseen_dataset is None:
self.valid_unseen_dataset = self.load_dataset('valid_unseen', extend_language=False)
if (stage == "test" or stage is None) and self.test_seen_dataset is None:
self.test_seen_dataset = self.load_dataset('valid_unseen', extend_language=False)
if (stage == "test_unseen" or stage is None) and self.test_unseen_dataset is None:
self.test_unseen_dataset = self.load_dataset('valid_unseen', extend_language=False)
def _get_dataloader(self, dataset, use_val_batch_size=False):
return DataLoader(
dataset,
batch_size=self.batch_size if not use_val_batch_size else self.validation_batch_size,
num_workers=self.num_workers,
collate_fn=SequentialSubgoalDataModule.collate_fn_pad,
)
def train_dataloader(self):
if self.train_dataset is None:
raise ValueError("train dataset is not loaded")
return self._get_dataloader(self.train_dataset)
def val_dataloader(self):
if self.valid_seen_dataset is None:
raise ValueError("valid seen dataset is not loaded")
return self._get_dataloader(self.valid_seen_dataset, use_val_batch_size=True)
def val_unseen_dataloader(self):
if self.valid_unseen_dataset is None:
raise ValueError("valid unseen dataset is not loaded")
return self._get_dataloader(self.valid_unseen_dataset, use_val_batch_size=True)
def test_dataloader(self):
if self.test_seen_dataset is None:
raise ValueError("test seen dataset is not loaded")
return self._get_dataloader(self.test_seen_dataset, use_val_batch_size=True)
def test_unseen_dataloader(self):
if self.test_unseen_dataset is None:
raise ValueError("test unseen dataset is not loaded")
return self._get_dataloader(self.test_unseen_dataset, use_val_batch_size=True)
| 40.822394 | 127 | 0.660929 | 1,339 | 10,573 | 4.878267 | 0.129948 | 0.029394 | 0.023882 | 0.011941 | 0.457134 | 0.361758 | 0.246632 | 0.216779 | 0.182333 | 0.150337 | 0 | 0.001796 | 0.262839 | 10,573 | 258 | 128 | 40.98062 | 0.836284 | 0.00227 | 0 | 0.109524 | 0 | 0 | 0.045235 | 0.002086 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.066667 | 0.02381 | 0.252381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ca1a5301e7174c96f53f063cec9fd099a683307 | 8,551 | py | Python | pydocx/export/xml.py | elibri/pydocx | fd99474a06f91f91542cf38fbf0687f9b1f95a93 | [
"Apache-2.0"
] | null | null | null | pydocx/export/xml.py | elibri/pydocx | fd99474a06f91f91542cf38fbf0687f9b1f95a93 | [
"Apache-2.0"
] | null | null | null | pydocx/export/xml.py | elibri/pydocx | fd99474a06f91f91542cf38fbf0687f9b1f95a93 | [
"Apache-2.0"
] | null | null | null |
from pydocx.openxml import wordprocessing
from pydocx.export import PyDocXHTMLExporter
from pydocx.export.html import HtmlTag, is_only_whitespace, is_not_empty_and_not_only_whitespace
from itertools import chain
#https://pydocx.readthedocs.io/en/latest/extending.html
BLOCK_ELEMENTS = ['document', 'body', 'head', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'ol', 'ul', 'li', 'table', 'tr', 'td', 'footnotes', 'footnote', 'endnotes', 'endnote']
#patrz na listę metod na napisania w self.node_type_to_export_func_map w pydocx.export.html linia 36
def buffer_elements_should_be_melted(buffer):
tags = ''.join(tag.to_html() for tag in buffer if isinstance(tag, HtmlTag))
return tags == "</em><em>" or tags == "</strong><strong>" or tags == "</underline><underline>" or tags == "</sub><sub>" or tags == "</sup><sup>"
#chcę wyeliminować zagnieżdżenia paragrafów
def tokens_without_nested_paras(stream):
opened_para = False
for token in stream:
if isinstance(token, HtmlTag) and token.tag == "p":
if token.closed:
if opened_para: #zamykaj tag tylko wtedy, gdy jest otwarty
yield token
opened_para = False
else: #napotykam otwarcie paragrafu
if opened_para:
yield HtmlTag(tag="p", closed=True)
opened_para = True
yield token
else:
yield token
#tworzę bufor dwuelementowy, który pozwala mi eliminować zagnieżdżenie paragrafów oraz eleminować kombinacje takie jak </em><em>
def tokens_without_reduntant_inline_tags(stream):
buffer = []
for token in stream:
if buffer_elements_should_be_melted(buffer):
buffer = []
if len(buffer) < 2:
buffer.append(token)
else:
yield buffer[0]
buffer = buffer[1:] + [token]
for token in buffer:
yield token
class DocXMLExporter(PyDocXHTMLExporter):
def __init__(self, *args, **kwargs):
super(DocXMLExporter, self).__init__(*args, **kwargs)
self.node_type_to_export_func_map.update({
wordprocessing.EndnoteReference: self.export_endnote_reference,
wordprocessing.Endnote: self.export_endnote,
})
def export_document(self, document):
tag = HtmlTag('document')
if not 'footnotes' in dir(self): #plik parsowany jest dwukrotnie (parz first_pass w base.py)
self.footnotes = {} #przed drugim przejściem nie chcemy stracić wyników
if not 'endnotes' in dir(self):
self.endnotes = {}
results = super(PyDocXHTMLExporter, self).export_document(document)
sequence = []
#head = self.head()
#if head is not None:
# sequence.append(head)
if len(self.footnotes) > 0:
sequence.append(self.export_footnote_texts())
if len(self.endnotes) > 0:
sequence.append(self.export_endnote_texts())
if results is not None:
sequence.append(results)
return tag.apply(chain(*sequence))
def export_footnote_texts(self):
yield HtmlTag('footnotes', closed=False)
for footnote_id, tokens in self.footnotes.items():
yield HtmlTag('footnote', closed=False, id=footnote_id)
for token in tokens:
yield token
yield HtmlTag('footnote', closed=True)
yield HtmlTag('footnotes', closed=True)
def export_endnote_texts(self):
yield HtmlTag('endnotes', closed=False)
for endnote_id, tokens in self.endnotes.items():
yield HtmlTag('endnote', closed=False, id=endnote_id)
for token in tokens:
yield token
yield HtmlTag('endnote', closed=True)
yield HtmlTag('endnotes', closed=True)
def tokens_with_indentations(self):
level = 0
eof_emitted = False
yield '<?xml version="1.0" encoding="UTF-8"?>'
for token in tokens_without_nested_paras(tokens_without_reduntant_inline_tags(super(PyDocXHTMLExporter, self).export())):
if isinstance(token, HtmlTag):
if token.tag in BLOCK_ELEMENTS:
if token.closed:
level = level - 1
if eof_emitted:
yield " " * level
yield token.to_html() + "\n"
eof_emitted = True
else:
if not eof_emitted:
yield "\n"
yield " " * level + token.to_html()
eof_emitted = False
level = level + 1
else:
yield token.to_html()
else:
yield token
eof_emitted = False
def export(self):
return ''.join(token for token in self.tokens_with_indentations()).strip()
#to jest kopia export_run_property
#zamiast aplikować tag, zmieniam tekst na wielkie litery
def export_uppercased_run_property(self, run, results):
for result in results:
if is_only_whitespace(result):
yield result
else:
results = chain([result], results)
break
else:
results = None
if results:
for result in results:
if isinstance(result, HtmlTag):
yield result
else:
yield result.upper()
def get_hyperlink_tag(self, target_uri):
pass
def export_run_property_underline(self, run, results):
tag = HtmlTag('underline')
return self.export_run_property(tag, run, results)
def export_run_property_caps(self, run, results):
return self.export_uppercased_run_property(run, results)
def export_run_property_small_caps(self, run, results):
return self.export_uppercased_run_property(run, results)
def export_run_property_dstrike(self, run, results):
tag = HtmlTag('strike')
return self.export_run_property(tag, run, results)
def export_run_property_strike(self, run, results):
tag = HtmlTag('strike')
return self.export_run_property(tag, run, results)
def export_run_property_vanish(self, run, results):
pass
def export_run_property_hidden(self, run, results):
pass
def export_run_property_color(self, run, results):
return results
def export_paragraph(self, paragraph):
results = super(PyDocXHTMLExporter, self).export_paragraph(paragraph)
results = is_not_empty_and_not_only_whitespace(results)
if results is None:
return
tag = self.get_paragraph_tag(paragraph)
if tag:
alignment = paragraph.effective_properties.justification
if alignment and alignment != "left":
tag.attrs['align'] = alignment
results = tag.apply(results)
for result in results:
yield result
def export_tab_char(self, tab_char):
return "\t"
def export_paragraph_property_justification(self, paragraph, results):
return results
def export_paragraph_property_indentation(self, paragraph, results):
return results
def export_run_property_vertical_align_superscript(self, run, results):
if results is not None:
results = list(results)
if len(results) == 1 and isinstance(results[0], HtmlTag) and (results[0].tag == "footnotemark" or results[0].tag == "endnotemark"):
yield results[0]
elif len(results) > 0:
yield HtmlTag(tag='sup', closed=False)
for token in results:
yield token
yield HtmlTag(tag='sup', closed=True)
def export_footnote_reference(self, footnote_reference):
ftokens = chain(*(list(self.node_type_to_export_func_map[type(child)](child)) for child in footnote_reference.footnote.children))
self.footnotes[footnote_reference.footnote_id] = [token for token in ftokens if token != '\t']
yield HtmlTag(tag="footnotemark", id=footnote_reference.footnote_id, allow_self_closing=True)
def export_endnote_reference(self, endnote_reference):
ftokens = chain(*(list(self.node_type_to_export_func_map[type(child)](child)) for child in endnote_reference.endnote.children))
self.endnotes[endnote_reference.endnote_id] = [token for token in ftokens if token != '\t']
yield HtmlTag(tag="endnotemark", id=endnote_reference.endnote_id, allow_self_closing=True)
def export_endnote(self):
pass
def export_numbering_span(self, numbering_span):
results = super(PyDocXHTMLExporter, self).export_numbering_span(numbering_span)
attrs = {}
tag_name = 'ul'
if not numbering_span.numbering_level.is_bullet_format():
attrs['list-style-type'] = numbering_span.numbering_level.num_format
tag_name = 'ol'
tag = HtmlTag(tag_name, **attrs)
return tag.apply(results)
def export_footnote_reference_mark(self, footnote_reference_mark):
pass
def footer(self):
return []
def export_listing_paragraph_property_indentation(self, paragraph, level_properties, include_text_indent=False):
return {}
def doc2xml(path):
return DocXMLExporter(path).export()
| 33.533333 | 173 | 0.692551 | 1,092 | 8,551 | 5.223443 | 0.210623 | 0.037868 | 0.038745 | 0.031557 | 0.313815 | 0.220021 | 0.190393 | 0.15568 | 0.129032 | 0.115007 | 0 | 0.003825 | 0.205122 | 8,551 | 254 | 174 | 33.665354 | 0.835369 | 0.075781 | 0 | 0.287958 | 0 | 0 | 0.049315 | 0.002916 | 0 | 0 | 0 | 0 | 0 | 1 | 0.167539 | false | 0.026178 | 0.020942 | 0.052356 | 0.282723 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ca28c17f162d99eb6c5eaddca06b1c84a527be0 | 2,704 | py | Python | hm/model/load_balancer.py | tsuru/hm | 02702b151504d6f6556ac849a256d51d31f31947 | [
"BSD-3-Clause"
] | 8 | 2015-10-16T13:49:42.000Z | 2020-11-28T09:06:41.000Z | hm/model/load_balancer.py | tsuru/hm | 02702b151504d6f6556ac849a256d51d31f31947 | [
"BSD-3-Clause"
] | 3 | 2015-01-02T13:01:47.000Z | 2018-05-02T18:24:58.000Z | hm/model/load_balancer.py | tsuru/hm | 02702b151504d6f6556ac849a256d51d31f31947 | [
"BSD-3-Clause"
] | 7 | 2015-01-02T12:59:35.000Z | 2018-06-06T21:11:46.000Z | # Copyright 2014 hm authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from hm import lb_managers, log, model
from hm.model.host import Host
class LoadBalancer(model.BaseModel):
def __init__(self, id, name, address, conf=None, **kwargs):
self.id = id
self.name = name
self.address = address
self.manager = None
self.extra_args = set()
self.config = conf
self.hosts = []
for k, v in kwargs.items():
self.extra_args.add(k)
setattr(self, k, v)
def to_json(self):
obj = {
'_id': self.name,
'id': self.id,
'address': self.address,
'manager': self.manager,
}
for key in self.extra_args:
obj[key] = getattr(self, key)
return obj
@classmethod
def from_dict(cls, dict, conf=None):
if dict is None:
return None
dict['name'] = dict['_id']
del dict['_id']
dict['conf'] = conf
hosts_data = dict.get('hosts', None)
if hosts_data:
dict['hosts'] = [Host.from_dict(h, conf=conf) for h in hosts_data]
return cls(**dict)
@classmethod
def create(cls, manager_name, name, conf=None):
manager = lb_managers.by_name(manager_name, conf)
lb = manager.create_load_balancer(name)
lb.manager = manager_name
lb.config = conf
model.storage(conf).store_load_balancer(lb)
return lb
@classmethod
def find(cls, name, conf=None):
return model.storage(conf).find_load_balancer(name)
@classmethod
def list(cls, filters=None, conf=None):
return model.storage(conf).list_load_balancers(filters)
def destroy(self):
manager = self._manager()
try:
manager.destroy_load_balancer(self)
except Exception as e:
log.error("Error trying to destroy load balancer name: '{}' id: '{}' in '{}': {}".format(
self.name, self.id, self.manager, e))
self.storage().remove_load_balancer(self.name)
def add_host(self, host):
manager = self._manager()
manager.attach_real(self, host)
self.storage().add_host_to_load_balancer(self.name, host)
self.hosts.append(host)
def remove_host(self, host):
manager = self._manager()
manager.detach_real(self, host)
self.storage().remove_host_from_load_balancer(self.name, host)
self.hosts = [h for h in self.hosts if h.id != host.id]
def _manager(self):
return lb_managers.by_name(self.manager, self.config)
| 31.44186 | 101 | 0.599482 | 355 | 2,704 | 4.419718 | 0.264789 | 0.056087 | 0.045889 | 0.038241 | 0.156788 | 0.12747 | 0.089229 | 0 | 0 | 0 | 0 | 0.002075 | 0.286982 | 2,704 | 85 | 102 | 31.811765 | 0.811722 | 0.053624 | 0 | 0.101449 | 0 | 0 | 0.043836 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144928 | false | 0 | 0.028986 | 0.043478 | 0.289855 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ca4c1f402e01ef024957603075787c57d8d1ec9 | 1,409 | py | Python | NLG/NLG_python.py | abha-m/PlotsToNaturalLanguage | 5c4e59aa1772a13c2bce4775b3a385327c191d88 | [
"MIT"
] | 1 | 2020-03-28T04:10:21.000Z | 2020-03-28T04:10:21.000Z | NLG/NLG_python.py | abha-m/PlotsToNaturalLanguage | 5c4e59aa1772a13c2bce4775b3a385327c191d88 | [
"MIT"
] | 1 | 2020-07-20T15:51:12.000Z | 2020-07-20T15:51:12.000Z | NLG/NLG_python.py | abha-m/PlotsToNaturalLanguage | 5c4e59aa1772a13c2bce4775b3a385327c191d88 | [
"MIT"
] | null | null | null | from simplenlg import Lexicon
from simplenlg import NLGFactory
from simplenlg import Realiser
words = ['Month', 'Infected']
correlation_value = -0.4444
lexicon = Lexicon.getDefaultLexicon()
nlgFactory = NLGFactory(lexicon)
realiser = Realiser(lexicon)
start_s = nlgFactory.createClause("From the above scatterplot matrix we observe that")
if correlation_value > 0:
# positive correlation
s1 = nlgFactory.createClause("there is a positive correlation between the attributes, " +words[0]+ " and " +words[1])
elif correlation_value < 0:
# negative correlation
s1 = nlgFactory.createClause("there is a negative correlation between the attributes, " +words[0]+ " and " +words[1])
elif correlation_value == 0:
# no correlation
s1 = nlgFactory.createClause("there is a no correlation between the attributes, " +words[0]+ " and " +words[1])
if correlation_value > 0.4 or correlation_value < -0.4:
# high correlation
s2 = nlgFactory.createClause("the correlation between these attributes is high")
else:
# low correlation
s2 = nlgFactory.createClause("the correlation between these attributes is low")
# combine the sentences to generate a story
phrase_element = nlgFactory.createCoordinatedPhrase()
phrase_element.addCoordinate(start_s)
phrase_element.addCoordinate(s1)
phrase_element.addCoordinate(s2)
story = realiser.realiseSentence(phrase_element)
print(story)
| 32.022727 | 121 | 0.761533 | 172 | 1,409 | 6.162791 | 0.319767 | 0.090566 | 0.096226 | 0.099057 | 0.429245 | 0.429245 | 0.429245 | 0.307547 | 0.307547 | 0.264151 | 0 | 0.020851 | 0.149042 | 1,409 | 43 | 122 | 32.767442 | 0.863219 | 0.092974 | 0 | 0 | 0 | 0 | 0.262785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.12 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ca857c39f727b07393d2f535a3e8bd1603dd8b6 | 12,947 | py | Python | torch_spread/buffer_tools.py | Alexanders101/TorchSpread | 27cb9c6ead97d8b92284f6eff016bd6e406505e9 | [
"MIT"
] | 1 | 2019-08-15T11:16:05.000Z | 2019-08-15T11:16:05.000Z | torch_spread/buffer_tools.py | Alexanders101/TorchSpread | 27cb9c6ead97d8b92284f6eff016bd6e406505e9 | [
"MIT"
] | null | null | null | torch_spread/buffer_tools.py | Alexanders101/TorchSpread | 27cb9c6ead97d8b92284f6eff016bd6e406505e9 | [
"MIT"
] | null | null | null | from typing import Union, Dict, List, Tuple, Callable, Optional
import numpy as np
import torch
from torch import Tensor
from .utilities import ShapeBufferType, DtypeBufferType, BufferType
def buffer_fill_information(buffer: BufferType,
shape: Optional[ShapeBufferType] = None,
dtype: Optional[DtypeBufferType] = None,
size: Optional[int] = None) -> Tuple[ShapeBufferType, DtypeBufferType, int]:
if shape is None:
shape = buffer_map(lambda x: x.shape[1:], buffer)
if dtype is None:
dtype = buffer_map(lambda x: x.dtype, buffer)
if size is None:
size = buffer_size(buffer)
return shape, dtype, size
def iterate_buffer(buffer: BufferType, context: tuple = tuple()):
if isinstance(buffer, dict):
for key, value in buffer.items():
yield from iterate_buffer(value, context + (key,))
elif isinstance(buffer, list):
for key, value in enumerate(buffer):
yield from iterate_buffer(value, context + (key,))
else:
if len(context) == 1:
yield context[0], buffer
else:
yield context, buffer
def buffer_map(method: Callable[[Tensor], Tensor], buffer: BufferType):
if isinstance(buffer, dict):
return {key: buffer_map(method, value) for key, value in buffer.items()}
elif isinstance(buffer, list):
return [buffer_map(method, value) for value in buffer]
else:
return method(buffer)
def buffer_multi_map(method: Callable, buffer: BufferType, *buffers: BufferType):
if isinstance(buffer, dict):
return {key: buffer_multi_map(method, value, *(buff[key] for buff in buffers)) for key, value in buffer.items()}
elif isinstance(buffer, list):
return [buffer_multi_map(method, *values) for values in zip(buffer, *buffers)]
else:
return method(buffer, *buffers)
def buffer_map_reduce(method: Callable[[Tensor], Tensor], reduction: Callable, buffer: BufferType):
if isinstance(buffer, dict):
return reduction(buffer_map_reduce(method, reduction, value) for value in buffer.values())
elif isinstance(buffer, list):
return reduction(buffer_map_reduce(method, reduction, value) for value in buffer)
else:
return method(buffer)
def buffer_multi_map_reduce(method: Callable, reduction: Callable, buffer: BufferType, *buffers: BufferType):
if isinstance(buffer, dict):
return reduction(
buffer_multi_map_reduce(method, reduction, value, *(buff[key] for buff in buffers))
for key, value in buffer.items())
elif isinstance(buffer, list):
return reduction(buffer_multi_map_reduce(method, reduction, *values) for values in zip(buffer, *buffers))
else:
return method(buffer, *buffers)
def buffer_safe_dual_map(method, buffer, other):
if isinstance(buffer, dict):
if isinstance(other, dict):
return {key: buffer_safe_dual_map(method, value, other[key]) for key, value in buffer.items()}
else:
return {key: buffer_safe_dual_map(method, value, other) for key, value in buffer.items()}
elif isinstance(buffer, list):
if isinstance(other, list):
return [buffer_safe_dual_map(method, value, other_value) for value, other_value in zip(buffer, other)]
else:
return [buffer_safe_dual_map(method, value, other) for value in buffer]
else:
return method(buffer, other)
def default_buffer_type(buffer_shape: ShapeBufferType):
if isinstance(buffer_shape, dict):
return {key: default_buffer_type(shape) for key, shape in buffer_shape.items()}
elif isinstance(buffer_shape, list):
return [default_buffer_type(shape) for shape in buffer_shape]
else:
return torch.float32
def make_buffer_shape_type(buffer_shape: ShapeBufferType, buffer_type: DtypeBufferType):
buffer_shape = (buffer_shape,) if isinstance(buffer_shape, int) else buffer_shape
buffer_type = default_buffer_type(buffer_shape) if buffer_type is None else buffer_type
return buffer_shape, buffer_type
def check_buffer(buffer: BufferType, buffer_shape: ShapeBufferType, buffer_type: DtypeBufferType) -> int:
""" Checks that the buffer matches the definition and returns the batch size. """
if isinstance(buffer, dict):
if isinstance(buffer_shape, dict) and isinstance(buffer_type, dict):
return max(check_buffer(buffer[key], buffer_shape[key], buffer_type[key]) for key in buffer)
else:
raise ValueError("Incompatible Buffer")
elif isinstance(buffer, list):
if isinstance(buffer_shape, list) and isinstance(buffer_type, list):
return max(check_buffer(*param) for param in zip(buffer, buffer_shape, buffer_type))
else:
raise ValueError("Incompatible Buffer")
size, shape = buffer.shape[0], buffer.shape[1:]
if shape != buffer_shape:
raise ValueError("Incompatible Buffer")
if buffer.dtype != buffer_type:
raise ValueError("Incompatible Buffer")
return size
def buffer_size(buffer: BufferType) -> int:
""" Unsafe function that checks the batch size of a buffer. Assumes identical batch sizes and sane structure. """
if isinstance(buffer, dict):
return max(buffer_size(buff) for buff in buffer.values())
elif isinstance(buffer, list):
return max(buffer_size(buff) for buff in buffer)
else:
return buffer.shape[0]
def make_buffer(size: int,
shape: Union[Dict, List, Tuple],
dtype: Union[Dict, List, torch.dtype],
device: Union[str, torch.device] = 'shared') -> BufferType:
""" Create a dynamically structured PyTorch buffer.
The shape parameter may be a single shape, a list of shapes in order, or a dictionary of named shapes.
The types parameter must have the same structure.
Parameters
----------
size: int
Size of the first dimension for each tensor.
shape: {tuple, list, dict}
The shape of the other dimensions for each tensor.
dtype: {tuple, list, dict}
The type of each buffer, must have the same structure as buffer_shape
device: str
Which device to place the buffer on. Supported options are {'cpu', 'shared', 'pin', 'cuda:n'}
"""
# Dictionary of shapes / types
if isinstance(shape, dict):
assert isinstance(dtype, dict)
return {
name: make_buffer(size, shape, dtype[name], device=device)
for name, shape in shape.items()
}
# List of shapes / types
if isinstance(shape, list):
assert isinstance(dtype, list)
return [make_buffer(size, shape, dtype, device=device)
for shape, dtype in zip(shape, dtype)]
# Single shape / type
else:
tensor = torch.empty((size, *shape), dtype=dtype)
if device == 'shared':
tensor.share_memory_()
return tensor
elif device == 'pin':
return tensor.pin_memory()
else:
return tensor.to(device)
def load_buffer(to_buffer: BufferType, from_buffer: BufferType, size: int, start_index: int = 0):
""" Copy data from one buffer into another with a given size and offset.
Parameters
----------
to_buffer : PyTorch Buffer
The destination buffer.
from_buffer : PyTorch Buffer
The source buffer. Must have the same structure as the destination buffer.
size: int
How many elements from each tensor to transfer.
start_index: int
The offset in the destination buffer from which to start writing.
"""
if isinstance(to_buffer, dict):
for key, to_tensor in to_buffer.items():
load_buffer(to_tensor, from_buffer[key], size, start_index)
elif isinstance(to_buffer, (list, tuple)):
for to_tensor, from_tensor in zip(to_buffer, from_buffer):
load_buffer(to_tensor, from_tensor, size, start_index)
else:
# noinspection PyUnresolvedReferences
to_buffer[start_index:start_index + size].copy_(from_buffer[:size])
def load_buffer_safe(to_buffer: BufferType, from_buffer: BufferType, size: int, start_index: int = 0):
""" Copy data from one buffer into another with a given size and offset.
Parameters
----------
to_buffer : PyTorch Buffer
The destination buffer.
from_buffer : PyTorch Buffer
The source buffer. Must have the same structure as the destination buffer.
size: int
How many elements from each tensor to transfer.
start_index: int
The offset in the destination buffer from which to start writing.
"""
if isinstance(to_buffer, dict):
for key, to_tensor in to_buffer.items():
load_buffer_safe(to_tensor, from_buffer[key], size, start_index)
elif isinstance(to_buffer, (list, tuple)):
for to_tensor, from_tensor in zip(to_buffer, from_buffer):
load_buffer_safe(to_tensor, from_tensor, size, start_index)
else:
# noinspection PyUnresolvedReferences
to_buffer[start_index:start_index + size].copy_(torch.as_tensor(from_buffer[:size]))
def set_buffer(to_buffer: BufferType, from_buffer: BufferType, index):
if isinstance(to_buffer, dict):
if isinstance(from_buffer, dict):
for key, to_tensor in to_buffer.items():
set_buffer(to_tensor, from_buffer[key], index)
else:
for key, to_tensor in to_buffer.items():
set_buffer(to_tensor, from_buffer, index)
elif isinstance(to_buffer, list):
if isinstance(from_buffer, list):
for to_tensor, from_tensor in zip(to_buffer, from_buffer):
set_buffer(to_tensor, from_tensor, index)
else:
for to_tensor in to_buffer:
set_buffer(to_tensor, from_buffer, index)
else:
if isinstance(from_buffer, np.ndarray):
from_buffer = torch.from_numpy(from_buffer)
to_buffer[index] = from_buffer
def unload_buffer(to_buffer, from_buffer, size: int, start_index: int = 0):
""" Copy data from one buffer into another with a given size and offset.
This function is very similar to load_buffer, just start index affects the offset of the source buffer
instead of the destination buffer.
Parameters
----------
to_buffer : PyTorch Buffer
The destination buffer.
from_buffer : PyTorch Buffer
The source buffer. Must have the same structure as the destination buffer.
size: int
How many elements from each tensor to transfer.
start_index: int
The offset in the source buffer from which to start reading.
"""
if isinstance(to_buffer, dict):
for key, to_tensor in to_buffer.items():
unload_buffer(to_tensor, from_buffer[key], size, start_index)
elif isinstance(to_buffer, (list, tuple)):
for to_tensor, from_tensor in zip(to_buffer, from_buffer):
unload_buffer(to_tensor, from_tensor, size, start_index)
else:
to_buffer[:size].copy_(from_buffer[start_index:start_index + size])
def zero_buffer(buffer: BufferType):
if isinstance(buffer, dict):
for key, tensor in buffer.items():
zero_buffer(tensor)
elif isinstance(buffer, list):
for tensor in buffer:
zero_buffer(tensor)
else:
buffer[:] = 0
def slice_buffer(buffer: BufferType, begin: int = 0, end: int = -1):
""" Recursively slice a PyTorch Buffer.
Parameters
----------
buffer: PyTorch Buffer
Buffer to slice.
begin: int
Start index of the slice.
end: int
End index of the slice.
Returns
-------
"""
if isinstance(buffer, dict):
return {key: slice_buffer(val, begin, end) for key, val in buffer.items()}
elif isinstance(buffer, (list, tuple)):
return [slice_buffer(val, begin, end) for val in buffer]
else:
return buffer[begin:end]
def index_buffer(buffer: BufferType, indices: Union[int, np.ndarray]):
if isinstance(buffer, dict):
return {key: index_buffer(val, indices) for key, val in buffer.items()}
elif isinstance(buffer, (list, tuple)):
return [index_buffer(val, indices) for val in buffer]
else:
return buffer[indices]
def send_buffer(buffer: BufferType, device: str):
""" Transfer a buffer to another device.
Parameters
----------
buffer: PyTorch Buffer
The buffer to transfer.
device: str
Target device.
"""
if isinstance(buffer, dict):
return {key: send_buffer(val, device) for key, val in buffer.items()}
elif isinstance(buffer, (list, tuple)):
return [send_buffer(val, device) for val in buffer]
else:
return buffer.to(device)
| 35.765193 | 120 | 0.660385 | 1,665 | 12,947 | 4.992192 | 0.0997 | 0.059673 | 0.034649 | 0.031761 | 0.599254 | 0.523701 | 0.448749 | 0.413258 | 0.37091 | 0.346607 | 0 | 0.001436 | 0.247084 | 12,947 | 361 | 121 | 35.864266 | 0.851252 | 0.199969 | 0 | 0.385366 | 0 | 0 | 0.009071 | 0 | 0 | 0 | 0 | 0 | 0.009756 | 1 | 0.097561 | false | 0 | 0.02439 | 0 | 0.326829 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cadb4bb0a5ad5e4a7aa9e25a73576123f2f889f | 6,596 | py | Python | venv/Model/InputModel.py | florianbeyer/maptor | 8fd899dd3123484fd83b7aa74a007edc8879dba6 | [
"MIT"
] | 1 | 2022-03-15T13:35:26.000Z | 2022-03-15T13:35:26.000Z | venv/Model/InputModel.py | florianbeyer/maptor | 8fd899dd3123484fd83b7aa74a007edc8879dba6 | [
"MIT"
] | 9 | 2021-01-30T16:55:50.000Z | 2022-03-12T00:54:37.000Z | venv/Model/InputModel.py | florianbeyer/maptor | 8fd899dd3123484fd83b7aa74a007edc8879dba6 | [
"MIT"
] | 1 | 2020-12-21T02:24:46.000Z | 2020-12-21T02:24:46.000Z | import numpy as np
from osgeo import gdal, ogr, gdal_array# I/O image data
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QFileDialog, QMessageBox
class InputModule():
Training_File_Path = ""
Validation_File_Path = ""
Trg_Attribute_Selected = ""
Val_Attribute_Selected = ""
RS_Image_Path = ""
""" setter functions for class """
def set_training_file_path(self,path):
self.Training_File_Path = path
def set_validation_file_path(self,path):
self.Validation_File_Path = path
def set_trg_attribute_selected(self,attr):
self.Trg_Attribute_Selected = attr
def set_val_attribute_selected(self,attr):
self.Val_Attribute_Selected = attr
def set_rs_image_path(self,path):
self.RS_Image_Path = path
""" getter functions for the class """
def get_training_path(self):
return self.Training_File_Path
def get_validation_path(self):
return self.Validation_File_Path
def get_trg_attribute_selected(self):
return self.Trg_Attribute_Selected
def get_val_attribute_selected(self):
return self.Val_Attribute_Selected
def get_rs_image_path(self):
return self.RS_Image_Path
# loading function
def loadimagedata(self,img_path):
try:
img_ds = gdal.Open(img_path, gdal.GA_ReadOnly)
img = np.zeros((img_ds.RasterYSize, img_ds.RasterXSize, img_ds.RasterCount),
gdal_array.GDALTypeCodeToNumericTypeCode(img_ds.GetRasterBand(1).DataType))
for b in range(img.shape[2]):
img[:, :, b] = img_ds.GetRasterBand(b + 1).ReadAsArray()
return [img_ds,img]
except ValueError as e:
print("Error in loading Image file.")
print(e)
# loading training/validation data
def load_training_data(self,trg_path,Trg_Attribute_Selected,img_ds,type):
try:
driver = ogr.GetDriverByName('ESRI Shapefile')
shape_dataset = driver.Open(trg_path)
shape_layer = shape_dataset.GetLayer()
mem_drv = gdal.GetDriverByName('MEM')
if(type == "Classification"):
mem_raster = mem_drv.Create('', img_ds.RasterXSize, img_ds.RasterYSize, 1, gdal.GDT_UInt16)
if(type == "Regression"):
mem_raster = mem_drv.Create('', img_ds.RasterXSize, img_ds.RasterYSize, 1, gdal.GDT_Float32)
mem_raster.SetProjection(img_ds.GetProjection())
mem_raster.SetGeoTransform(img_ds.GetGeoTransform())
mem_band = mem_raster.GetRasterBand(1)
mem_band.Fill(0)
mem_band.SetNoDataValue(0)
att_ = 'ATTRIBUTE=' + Trg_Attribute_Selected
err = gdal.RasterizeLayer(mem_raster, [1], shape_layer, None, None, [1], [att_, "ALL_TOUCHED=TRUE"])
assert err == gdal.CE_None
roi = mem_raster.ReadAsArray()
return roi
except ValueError as e:
print("Could not load Training Data")
print(e)
def load_validation_data(self, Validation_File_Path, Val_Attribute_Selected, img_ds):
try:
print(Val_Attribute_Selected)
shape_dataset_v = ogr.Open(Validation_File_Path)
shape_layer_v = shape_dataset_v.GetLayer()
mem_drv_v = gdal.GetDriverByName('MEM')
mem_raster_v = mem_drv_v.Create('', img_ds.RasterXSize, img_ds.RasterYSize, 1, gdal.GDT_UInt16)
mem_raster_v.SetProjection(img_ds.GetProjection())
mem_raster_v.SetGeoTransform(img_ds.GetGeoTransform())
mem_band_v = mem_raster_v.GetRasterBand(1)
mem_band_v.Fill(0)
mem_band_v.SetNoDataValue(0)
att_ = 'ATTRIBUTE=' + Val_Attribute_Selected
# # http://gdal.org/gdal__alg_8h.html#adfe5e5d287d6c184aab03acbfa567cb1
# # http://gis.stackexchange.com/questions/31568/gdal-rasterizelayer-doesnt-burn-all-polygons-to-raster
err_v = gdal.RasterizeLayer(mem_raster_v, [1], shape_layer_v, None, None, [1], [att_, "ALL_TOUCHED=TRUE"])
assert err_v == gdal.CE_None
roi_v = mem_raster_v.ReadAsArray()
return roi_v
except ValueError as e:
print("Could not load Validation Data")
print(e)
#loading attributes
def FindAttributes(self, filepath):
try:
driver = ogr.GetDriverByName('ESRI Shapefile')
shape_dataset = driver.Open(filepath)
shape_layer = shape_dataset.GetLayer()
field_names = [field.name for field in shape_layer.schema]
return field_names
except ValueError as e:
print(e)
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("NO ATTRIBUTES FOUND")
msg.setText("NO ATTRIBUTES FOUND IN .SHP FILE. Atrribute Error ")
msg.exec_()
""" Creates 2 subplots of Training Data and Image """
def create_training_subplots(self,data1,data2):
try:
fig = plt.figure(figsize=(7, 6))
fig.suptitle('Training data', fontsize=14)
plt.subplot(121)
plt.imshow(data1, cmap=plt.cm.Greys_r) # data = img[:, :, 0] &&& cmap = plt.cm.Greys_r
plt.title('RS image - first band')
plt.subplot(122)
plt.imshow(data2, cmap=plt.cm.Spectral) # data = roi && cmap = plt.cm.Spectral
plt.title('Training Image')
plt.show()
except ValueError as e:
print(e)
print("Could not plot the data")
def create_validation_subplots(self,img,class_prediction,roi,roi_v):
try:
fig = plt.figure(figsize=(6, 6))
plt.subplot(221)
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
plt.title('RS_Image - first band')
plt.imshow(img[:, :, 0], cmap=plt.cm.Greys_r)
plt.title('RS_Image - first band')
plt.subplot(222)
plt.imshow(class_prediction, cmap=plt.cm.Spectral)
plt.title('Classification result')
plt.subplot(223)
plt.imshow(roi, cmap=plt.cm.Spectral)
plt.title('Training Data')
plt.subplot(224)
plt.imshow(roi_v, cmap=plt.cm.Spectral)
plt.title('Validation Data')
plt.show()
except ValueError as e:
print(e)
print("Could not create plots for Training/Validation")
| 35.085106 | 118 | 0.621286 | 804 | 6,596 | 4.863184 | 0.221393 | 0.024297 | 0.020716 | 0.029156 | 0.365217 | 0.259335 | 0.19821 | 0.19821 | 0.161381 | 0.143478 | 0 | 0.015973 | 0.278654 | 6,596 | 187 | 119 | 35.272727 | 0.805801 | 0.050788 | 0 | 0.207407 | 0 | 0 | 0.078168 | 0 | 0 | 0 | 0 | 0 | 0.014815 | 1 | 0.118519 | false | 0 | 0.037037 | 0.037037 | 0.266667 | 0.088889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4caef8626df07a14e03b74c2f52928d5db0d7897 | 8,882 | py | Python | test/test_core_notes64.py | mebeim/pyelftools | 10214f1af5724c5d2faa2fe41a1f50c38d7dd25b | [
"Unlicense"
] | 1,358 | 2015-01-10T10:59:14.000Z | 2022-03-31T21:58:08.000Z | test/test_core_notes64.py | mebeim/pyelftools | 10214f1af5724c5d2faa2fe41a1f50c38d7dd25b | [
"Unlicense"
] | 308 | 2015-01-19T09:15:14.000Z | 2022-03-31T03:05:46.000Z | test/test_core_notes64.py | mebeim/pyelftools | 10214f1af5724c5d2faa2fe41a1f50c38d7dd25b | [
"Unlicense"
] | 490 | 2015-01-12T10:06:43.000Z | 2022-03-27T03:26:28.000Z | #------------------------------------------------------------------------------
# elftools tests
#
# Maxim Akhmedov (max42@yandex-team.ru)
# This code is in the public domain
#------------------------------------------------------------------------------
import unittest
import os
from elftools.elf.elffile import ELFFile
from elftools.elf.segments import NoteSegment
class TestCoreNotes(unittest.TestCase):
""" This test makes sure than core dump specific
sections are properly analyzed.
"""
@classmethod
def setUpClass(cls):
cls._core_file = open(os.path.join('test',
'testfiles_for_unittests', 'core_linux64.elf'),
'rb')
def test_core_prpsinfo(self):
elf = ELFFile(self._core_file)
for segment in elf.iter_segments():
if not isinstance(segment, NoteSegment):
continue
notes = list(segment.iter_notes())
for note in segment.iter_notes():
if note['n_type'] != 'NT_PRPSINFO':
continue
desc = note['n_desc']
self.assertEqual(desc['pr_state'], 0)
self.assertEqual(desc['pr_sname'], b'R')
self.assertEqual(desc['pr_zomb'], 0)
self.assertEqual(desc['pr_nice'], 0)
self.assertEqual(desc['pr_flag'], 0x400600)
self.assertEqual(desc['pr_uid'], 1000)
self.assertEqual(desc['pr_gid'], 1000)
self.assertEqual(desc['pr_pid'], 23395)
self.assertEqual(desc['pr_ppid'], 23187)
self.assertEqual(desc['pr_pgrp'], 23395)
self.assertEqual(desc['pr_sid'], 23187)
self.assertEqual(
desc['pr_fname'],
b'coredump_self\x00\x00\x00')
self.assertEqual(
desc['pr_psargs'],
b'./coredump_self foo bar 42 ' + b'\x00' * (80 - 27))
def test_core_nt_file(self):
"""
Test that the parsing of the NT_FILE note within a core file is
correct.
The assertions are made against the output of eu-readelf.
eu-readelf -n core_linux64.elf
...
CORE 621 FILE
10 files:
00400000-00401000 00000000 4096
/home/max42/pyelftools/test/coredump_self
00600000-00601000 00000000 4096
/home/max42/pyelftools/test/coredump_self
00601000-00602000 00001000 4096
/home/max42/pyelftools/test/coredump_self
7fa4593ae000-7fa45956d000 00000000 1830912
/lib/x86_64-linux-gnu/libc-2.23.so
7fa45956d000-7fa45976d000 001bf000 2097152
/lib/x86_64-linux-gnu/libc-2.23.so
7fa45976d000-7fa459771000 001bf000 16384
/lib/x86_64-linux-gnu/libc-2.23.so
7fa459771000-7fa459773000 001c3000 8192
/lib/x86_64-linux-gnu/libc-2.23.so
7fa459777000-7fa45979d000 00000000 155648
/lib/x86_64-linux-gnu/ld-2.23.so
7fa45999c000-7fa45999d000 00025000 4096
/lib/x86_64-linux-gnu/ld-2.23.so
7fa45999d000-7fa45999e000 00026000 4096
/lib/x86_64-linux-gnu/ld-2.23.so
...
"""
elf = ELFFile(self._core_file)
nt_file_found = False
for segment in elf.iter_segments():
if not isinstance(segment, NoteSegment):
continue
for note in segment.iter_notes():
if note['n_type'] != 'NT_FILE':
continue
nt_file_found = True
desc = note['n_desc']
self.assertEqual(desc['num_map_entries'], 10)
self.assertEqual(desc['page_size'], 4096)
self.assertEqual(len(desc['Elf_Nt_File_Entry']), 10)
self.assertEqual(len(desc['filename']), 10)
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][0],
desc['page_size'],
0x00400000,
0x00401000,
0x00000000)
self.assertEqual(desc['filename'][0],
b"/home/max42/pyelftools/test/coredump_self")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][1],
desc['page_size'],
0x00600000,
0x00601000,
0x00000000)
self.assertEqual(desc['filename'][1],
b"/home/max42/pyelftools/test/coredump_self")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][2],
desc['page_size'],
0x00601000,
0x00602000,
0x00001000)
self.assertEqual(desc['filename'][2],
b"/home/max42/pyelftools/test/coredump_self")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][3],
desc['page_size'],
0x7fa4593ae000,
0x7fa45956d000,
0x00000000)
self.assertEqual(desc['filename'][3],
b"/lib/x86_64-linux-gnu/libc-2.23.so")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][4],
desc['page_size'],
0x7fa45956d000,
0x7fa45976d000,
0x001bf000)
self.assertEqual(desc['filename'][4],
b"/lib/x86_64-linux-gnu/libc-2.23.so")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][5],
desc['page_size'],
0x7fa45976d000,
0x7fa459771000,
0x001bf000)
self.assertEqual(desc['filename'][5],
b"/lib/x86_64-linux-gnu/libc-2.23.so")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][6],
desc['page_size'],
0x7fa459771000,
0x7fa459773000,
0x001c3000)
self.assertEqual(desc['filename'][6],
b"/lib/x86_64-linux-gnu/libc-2.23.so")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][7],
desc['page_size'],
0x7fa459777000,
0x7fa45979d000,
0x00000000)
self.assertEqual(desc['filename'][7],
b"/lib/x86_64-linux-gnu/ld-2.23.so")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][8],
desc['page_size'],
0x7fa45999c000,
0x7fa45999d000,
0x00025000)
self.assertEqual(desc['filename'][8],
b"/lib/x86_64-linux-gnu/ld-2.23.so")
self.validate_nt_file_entry(desc['Elf_Nt_File_Entry'][9],
desc['page_size'],
0x7fa45999d000,
0x7fa45999e000,
0x00026000)
self.assertEqual(desc['filename'][9],
b"/lib/x86_64-linux-gnu/ld-2.23.so")
self.assertTrue(nt_file_found)
def validate_nt_file_entry(self,
entry,
page_size,
expected_vm_start,
expected_vm_end,
expected_page_offset):
self.assertEqual(entry.vm_start, expected_vm_start)
self.assertEqual(entry.vm_end, expected_vm_end)
self.assertEqual(entry.page_offset * page_size, expected_page_offset)
@classmethod
def tearDownClass(cls):
cls._core_file.close()
| 46.020725 | 79 | 0.450799 | 804 | 8,882 | 4.773632 | 0.245025 | 0.117249 | 0.123762 | 0.047421 | 0.465868 | 0.334549 | 0.334549 | 0.307712 | 0.28322 | 0.251172 | 0 | 0.159142 | 0.443932 | 8,882 | 192 | 80 | 46.260417 | 0.617939 | 0.146589 | 0 | 0.4 | 0 | 0 | 0.133953 | 0.055085 | 0 | 0 | 0.049754 | 0 | 0.22963 | 1 | 0.037037 | false | 0 | 0.02963 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cb051b8da815b0f715c8c32ade2d345d518173e | 7,818 | py | Python | tools/keo_solver.py | nschloe/pynosh | 331454b29246e6c009878589aad2dccb9fda6c30 | [
"MIT"
] | 8 | 2016-01-04T22:49:25.000Z | 2021-05-07T17:23:43.000Z | tools/keo_solver.py | nschloe/pynosh | 331454b29246e6c009878589aad2dccb9fda6c30 | [
"MIT"
] | 1 | 2015-11-09T18:39:31.000Z | 2015-11-09T18:39:31.000Z | tools/keo_solver.py | nschloe/pynosh | 331454b29246e6c009878589aad2dccb9fda6c30 | [
"MIT"
] | 1 | 2021-03-20T22:01:47.000Z | 2021-03-20T22:01:47.000Z | # -*- coding: utf-8 -*-
#
"""
Solve a linear equation system with the kinetic energy operator.
"""
import numerical_methods as nm
import sys
from scipy.sparse.linalg import LinearOperator
import time
import numpy
import cmath
import matplotlib.pyplot as pp
from matplotlib import rc
rc("text", usetex=True)
rc("font", family="serif")
import matplotlib2tikz
import meshplex
import pynosh.modelevaluator_nls
import pynosh.preconditioners
def _main():
"""Main function.
"""
# run the preconditioners
_run_different_meshes()
# print 'Solving the system without preconditioning, scipy cg...'
# sol, info, relresvec0 = nm.cg_wrap(pynosh_modelval._keo, rhs,
# x0 = psi0,
# tol = 1.0e-10,
# maxiter = 1000,
# M = None
# )
##print 'done.'
##print info
## plot (relative) residuals
# pp.semilogy(relresvec0, 'ro')
##pp.semilogy(relresvec1, 'g')
##pp.semilogy(relresvec2, 'b')
##pp.semilogy(relresvec3, 'y')
# pp.title('Convergence history of CG for the KEO, $\mu=0.1$')
# pp.xlabel('$k$')
# pp.ylabel('$\|A_{\mathrm{KEO}}\psi_k-b\|_2$')
# pp.show()
return
def _run_different_meshes():
mesh_files = [
# 'states/rectangle10.vtu',
# 'states/rectangle20.vtu',
# 'states/rectangle30.vtu',
# 'states/rectangle40.vtu',
# 'states/rectangle50.vtu',
# 'states/rectangle60.vtu',
# 'states/rectangle70.vtu',
# 'states/rectangle80.vtu',
# 'states/rectangle90.vtu',
# 'states/rectangle100.vtu',
# 'states/rectangle110.vtu',
# 'states/rectangle120.vtu',
# 'states/rectangle130.vtu',
# 'states/rectangle140.vtu',
# 'states/rectangle150.vtu',
# 'states/rectangle160.vtu',
# 'states/rectangle170.vtu',
# 'states/rectangle180.vtu',
# 'states/rectangle190.vtu',
"states/rectangle200.vtu"
]
mu = 1.0e-0
# loop over the meshes and compute
nums_unknowns = []
num_iterations = {}
for mesh_file in mesh_files:
# read and set the mesh
print()
print("Reading the mesh...")
try:
mesh, point_data, field_data = meshplex.reader.read(mesh_file)
except AttributeError:
print("Could not read from file ", mesh_file, ".")
sys.exit()
print(" done.")
# create model evaluator interface
pynosh_modelval = pynosh.model_evaluator_nls(mu)
# create preconditioners object
precs = pynosh.preconditioners(pynosh_modelval)
precs.set_parameter(mu)
# recreate all the objects necessary to perform the preconditioner run
num_unknowns = len(mesh.nodes)
nums_unknowns.append(num_unknowns)
# set psi at which to create the Jacobian
# generate random numbers within the unit circle
radius = numpy.random.rand(num_unknowns)
arg = numpy.random.rand(num_unknowns)
current_psi = numpy.empty(num_unknowns, dtype=complex)
for k in range(num_unknowns):
current_psi[k] = cmath.rect(radius[k], arg[k])
pynosh_modelval.set_current_psi(current_psi)
# create right hand side and initial guess
rhs = numpy.random.rand(num_unknowns) + 1j * numpy.random.rand(num_unknowns)
# initial guess for all operations
psi0 = numpy.zeros(num_unknowns, dtype=complex)
test_preconditioners = _create_preconditioner_list(precs, num_unknowns)
# build the kinetic energy operator
print("Building the KEO...")
start_time = time.clock()
pynosh_modelval._assemble_kinetic_energy_operator()
end_time = time.clock()
print("done. (", end_time - start_time, "s).")
# Run the preconditioners and gather the relative residuals.
relresvecs = _run_preconditioners(
pynosh_modelval._keo, rhs, psi0, test_preconditioners
)
# append the number of iterations to the data
for prec in test_preconditioners:
if prec["name"] not in list(num_iterations.keys()):
num_iterations[prec["name"]] = []
num_iterations[prec["name"]].append(len(relresvecs[prec["name"]]) - 1)
print(num_iterations)
# plot them all
for prec in test_preconditioners:
pp.semilogy(
nums_unknowns, num_iterations[prec["name"]], "-o", label=prec["name"]
)
# plot legend
pp.legend()
# add title and so forth
pp.title("CG convergence for $K$")
pp.xlabel("Number of unknowns $n$")
pp.ylabel("Number of iterations till $<10^{-10}$")
matplotlib2tikz.save(
"meshrun-k.tikz", figurewidth="\\figurewidth", figureheight="\\figureheight"
)
pp.show()
return
def _run_preconditioners(linear_operator, rhs, x0, preconditioners):
tol = 1.0e-10
maxiter = 5000
relresvecs = {}
for prec in preconditioners:
print("Solving the system with", prec["name"], "...")
start_time = time.clock()
sol, info, relresvec = nm.cg_wrap(
linear_operator,
rhs,
x0=x0,
tol=tol,
maxiter=maxiter,
M=prec["precondictioner"],
)
end_time = time.clock()
relresvecs[prec["name"]] = relresvec
if info == 0:
print("success!", end=" ")
else:
print("no convergence.", end=" ")
print(" (", end_time - start_time, "s,", len(relresvec) - 1, " iters).")
return relresvecs
def _create_preconditioner_list(precs, num_unknowns):
test_preconditioners = []
test_preconditioners.append({"name": "-", "precondictioner": None})
prec_keo_symilu2 = LinearOperator(
(num_unknowns, num_unknowns), matvec=precs.keo_symmetric_ilu2, dtype=complex
)
test_preconditioners.append(
{"name": "sym i$LU$2", "precondictioner": prec_keo_symilu2}
)
prec_keo_symilu4 = LinearOperator(
(num_unknowns, num_unknowns), matvec=precs.keo_symmetric_ilu4, dtype=complex
)
test_preconditioners.append(
{"name": "sym i$LU$4", "precondictioner": prec_keo_symilu4}
)
prec_keo_symilu6 = LinearOperator(
(num_unknowns, num_unknowns), matvec=precs.keo_symmetric_ilu6, dtype=complex
)
test_preconditioners.append(
{"name": "sym i$LU$6", "precondictioner": prec_keo_symilu6}
)
prec_keo_symilu8 = LinearOperator(
(num_unknowns, num_unknowns), matvec=precs.keo_symmetric_ilu8, dtype=complex
)
test_preconditioners.append(
{"name": "sym i$LU$8", "precondictioner": prec_keo_symilu8}
)
prec_keo_amg = LinearOperator(
(num_unknowns, num_unknowns), matvec=precs.keo_amg, dtype=complex
)
test_preconditioners.append({"name": "AMG", "precondictioner": prec_keo_amg})
return test_preconditioners
def _construct_matrix(linear_operator):
shape = linear_operator.shape
A = numpy.zeros(shape)
e = numpy.zeros(shape[0])
for j in range(shape[1]):
e[j] = 1.0
A[:, j] = linear_operator * e
e[j] = 0.0
A = numpy.matrix(A)
return A
def _parse_input_arguments():
"""Parse input arguments.
"""
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-f",
"--file",
dest="filename",
type=str,
help="read mesh from VTKFILE",
metavar="VTKFILE",
)
# parser.add_option('-q', '--quiet',
# action='store_false', dest='verbose', default=True,
# help='don't print status messages to stdout')
(opts, args) = parser.parse_args()
return opts, args
if __name__ == "__main__":
_main()
# import cProfile
# cProfile.run('_main()', 'pfvm_profile.dat')
| 28.429091 | 84 | 0.621898 | 896 | 7,818 | 5.253348 | 0.311384 | 0.049076 | 0.037179 | 0.039516 | 0.181644 | 0.126407 | 0.100701 | 0.100701 | 0.090079 | 0 | 0 | 0.020244 | 0.254413 | 7,818 | 274 | 85 | 28.532847 | 0.787271 | 0.23868 | 0 | 0.078431 | 0 | 0 | 0.095587 | 0.003919 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.084967 | 0 | 0.163399 | 0.071895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cb1dfac090a28c6105bc1ff28f47e2c049b8adb | 462 | py | Python | backend/apps/query/urls.py | bopopescu/Journey | 654eb66e0e2df59e916eff4c75b68b183f9b58b5 | [
"MIT"
] | 41 | 2019-01-02T09:36:54.000Z | 2022-02-20T13:13:05.000Z | backend/apps/query/urls.py | bopopescu/Journey | 654eb66e0e2df59e916eff4c75b68b183f9b58b5 | [
"MIT"
] | 15 | 2019-09-30T05:40:20.000Z | 2022-02-17T19:28:41.000Z | backend/apps/query/urls.py | bopopescu/Journey | 654eb66e0e2df59e916eff4c75b68b183f9b58b5 | [
"MIT"
] | 23 | 2019-02-18T10:50:10.000Z | 2022-01-06T07:53:18.000Z | # -*- coding:utf-8 -*-
from django.conf.urls import url, include
from apps.query.views import *
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'querysqllog', QuerySqlLogViewSet, basename="querysqllog")
urlpatterns = [
#
url(r'^', include(router.urls)),
url(r'querysql', QuerySqlViewSet.as_view()),
url(r'querymongodb', QueryMongodbViewSet.as_view()),
url(r'queryredis', QueryRedisViewSet.as_view()),
] | 30.8 | 75 | 0.722944 | 54 | 462 | 6.111111 | 0.592593 | 0.048485 | 0.054545 | 0.060606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002475 | 0.125541 | 462 | 15 | 76 | 30.8 | 0.814356 | 0.04329 | 0 | 0 | 0 | 0 | 0.120729 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cb25ef8ce8ae3207f43473f68b374b2d28f1800 | 678 | py | Python | test/test_io.py | JPYamamoto/secret_sharing_shamir | b73abc7e721ad52b6b22571b0427cb72f6d9a7c2 | [
"MIT"
] | null | null | null | test/test_io.py | JPYamamoto/secret_sharing_shamir | b73abc7e721ad52b6b22571b0427cb72f6d9a7c2 | [
"MIT"
] | null | null | null | test/test_io.py | JPYamamoto/secret_sharing_shamir | b73abc7e721ad52b6b22571b0427cb72f6d9a7c2 | [
"MIT"
] | null | null | null | from shamir.io import IO
import os
import string
import random
TEST_FILE = './test/test_assets/io_test.txt'
class TestIO:
def test_read_write_text(self):
length = random.getrandbits(8)
content = ''.join(random.choice(string.ascii_letters) for _ in range(length))
IO.write_file(TEST_FILE, content)
written = IO.read_file(TEST_FILE)
print(content)
print(written)
assert (content == written)
def test_read_write_binary(self):
content = os.urandom(2**16)
IO.write_file(TEST_FILE, content, binary=True)
written = IO.read_file(TEST_FILE, binary=True)
assert (content == written)
| 23.37931 | 85 | 0.662242 | 91 | 678 | 4.725275 | 0.406593 | 0.093023 | 0.111628 | 0.074419 | 0.237209 | 0.237209 | 0 | 0 | 0 | 0 | 0 | 0.007707 | 0.234513 | 678 | 28 | 86 | 24.214286 | 0.820809 | 0 | 0 | 0.105263 | 0 | 0 | 0.044313 | 0.044313 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.105263 | false | 0 | 0.210526 | 0 | 0.368421 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cb33f614509a8b3ea124e5daa0d312746e38a70 | 999 | py | Python | apps/stix-shifter/stix_shifter_threatbus/test_message_mapping.py | GTrunSec/threatbus | 030993a0d10adf25929b85ef0a19bbdc657210f6 | [
"BSD-3-Clause"
] | 212 | 2020-01-25T12:05:54.000Z | 2022-03-22T05:59:35.000Z | apps/stix-shifter/stix_shifter_threatbus/test_message_mapping.py | GTrunSec/threatbus | 030993a0d10adf25929b85ef0a19bbdc657210f6 | [
"BSD-3-Clause"
] | 57 | 2020-01-28T14:23:32.000Z | 2022-03-10T13:18:11.000Z | apps/stix-shifter/stix_shifter_threatbus/test_message_mapping.py | GTrunSec/threatbus | 030993a0d10adf25929b85ef0a19bbdc657210f6 | [
"BSD-3-Clause"
] | 11 | 2020-02-01T15:15:15.000Z | 2022-01-20T18:37:22.000Z | import unittest
from stix2 import Indicator, Sighting
from .message_mapping import map_bundle_to_sightings
class TestMessageMapping(unittest.TestCase):
def setUp(self):
self.observations = [
{
"type": "identity",
},
{"type": "observed-data", "some-prop": "value"},
{
"type": "observed-data",
"some-prop": "value",
"last_observed": "2021-05-04T15:15:58.919Z",
},
]
self.indicator = Indicator(
pattern="[ipv4-addr:value = '6.6.6.6']", pattern_type="stix"
)
def test_map_bundle(self):
mapped = list(map_bundle_to_sightings(self.indicator, self.observations))
self.assertEqual(len(mapped), 2)
for sighting in mapped:
self.assertEqual(type(sighting), Sighting)
self.assertEqual(sighting.sighting_of_ref, self.indicator.id)
self.assertIsNotNone(sighting.last_seen)
| 32.225806 | 81 | 0.577578 | 103 | 999 | 5.466019 | 0.495146 | 0.047957 | 0.039076 | 0.071048 | 0.10302 | 0.10302 | 0 | 0 | 0 | 0 | 0 | 0.034335 | 0.3003 | 999 | 30 | 82 | 33.3 | 0.771102 | 0 | 0 | 0 | 0 | 0 | 0.144144 | 0.024024 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.076923 | false | 0 | 0.115385 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cb376e186ae5bdec1baea83c847e4b3599af7ec | 756 | py | Python | src/TheBlueAllianceAPI.py | orangelight/FRC-Vision-Scoring-and-Win-Probability | 8dd00bb9c7dc25942d0981cfa17776c4ab9705ac | [
"MIT"
] | null | null | null | src/TheBlueAllianceAPI.py | orangelight/FRC-Vision-Scoring-and-Win-Probability | 8dd00bb9c7dc25942d0981cfa17776c4ab9705ac | [
"MIT"
] | null | null | null | src/TheBlueAllianceAPI.py | orangelight/FRC-Vision-Scoring-and-Win-Probability | 8dd00bb9c7dc25942d0981cfa17776c4ab9705ac | [
"MIT"
] | null | null | null | import requests
def get_event_match_keys_with_vidoes(event_key):
r = requests.get('http://www.thebluealliance.com/api/v3/event/%s/matches' % event_key, headers={'':''})
json = r.json()
match_video = {}
for match in json:
if len(match['videos']):
if match['videos'][0]['type'] == 'youtube':
match_video[match['key']] = (match['videos'][0]['key'], match['comp_level'])
return match_video
def get_event_match_outcomes(event_key):
r = requests.get('http://www.thebluealliance.com/api/v3/event/%s/matches' % event_key,
headers={'': ''})
json = r.json()
outcomes = {}
for match in json:
outcomes[match['key']] = match['winning_alliance']
return outcomes | 34.363636 | 107 | 0.607143 | 97 | 756 | 4.556701 | 0.371134 | 0.072398 | 0.049774 | 0.072398 | 0.393665 | 0.393665 | 0.393665 | 0.393665 | 0.393665 | 0.393665 | 0 | 0.006757 | 0.216931 | 756 | 22 | 108 | 34.363636 | 0.739865 | 0 | 0 | 0.222222 | 0 | 0 | 0.227213 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cb6dfb1668dbc770346a5632ef8993ceafc242e | 2,592 | py | Python | dskc/visualization/graphs/shortcuts/text.py | NovaSBE-DSKC/predict-campaing-sucess-rate | fec339aee7c883f55d64130eb69e490f765ee27d | [
"MIT"
] | null | null | null | dskc/visualization/graphs/shortcuts/text.py | NovaSBE-DSKC/predict-campaing-sucess-rate | fec339aee7c883f55d64130eb69e490f765ee27d | [
"MIT"
] | null | null | null | dskc/visualization/graphs/shortcuts/text.py | NovaSBE-DSKC/predict-campaing-sucess-rate | fec339aee7c883f55d64130eb69e490f765ee27d | [
"MIT"
] | null | null | null | from dskc.clean import get_text_from
from dskc.visualization import graphs
from dskc.visualization.graphs.types.word_cloud.word_cloud import word_cloud, text_proportion_success
from dskc._util.string import get_display_text
import pandas as pd
from . import util
from matplotlib import pyplot as plt
def _wordcloud(series, section_number, sub_section, display_name, stop_words):
sub_section = util.header(section_number, sub_section, "{} Word Cloud".format(display_name))
word_cloud(series, stop_words=stop_words)
return sub_section
def _top_words(words_series, top_words, section_number, sub_section, display_name):
sub_section = util.header(section_number, sub_section, "{} Top {} Words".format(display_name, top_words))
graphs.bars(words_series,
title="Top {} words".format(top_words),
xlabel="Word",
percentage_on_top=True,
max_values=top_words)
return sub_section
def _text_proportion_succcess(series, words_series, target_series, target_true, top_words, section_number, sub_section,
display_name):
sub_section = util.header(section_number, sub_section,
"{} Mean Success of Top {} Words".format(display_name, top_words))
text_proportion_success(words_series, series, target_series,
target_true=target_true)
return sub_section
def text_col(df, name, target=None, target_true=False, section_number=1, top_words=15, stop_words=[]):
# get names
display_name = get_display_text(name)
sub_section = 1
# set series
series = df[name]
# wordcloud
sub_section = _wordcloud(series, section_number, sub_section, display_name, stop_words)
# bars graph
text = get_text_from(series, stop_words=stop_words)
# set word series
words = text.split(" ")
words_series = pd.Series(words)
# top n words
sub_section = _top_words(words_series, top_words, section_number, sub_section, display_name)
# text proportion graphs
if not target is None:
try:
_text_proportion_succcess(series,
words_series,
df[target],
target_true,
top_words,
section_number,
sub_section,
display_name)
except:
plt.show()
print("\nNot available.\n")
| 35.027027 | 119 | 0.628472 | 305 | 2,592 | 5.009836 | 0.219672 | 0.117801 | 0.094241 | 0.135471 | 0.503272 | 0.402487 | 0.35144 | 0.308246 | 0.280105 | 0.280105 | 0 | 0.002195 | 0.297068 | 2,592 | 73 | 120 | 35.506849 | 0.836443 | 0.035494 | 0 | 0.0625 | 0 | 0 | 0.037721 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.145833 | 0 | 0.291667 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cb796dd4c548360d380c69351baad3aace13298 | 1,333 | py | Python | atmos/genModel.py | aasensio/ALMA-Zeeman | c17c5f1bd7117efe3be1e97b4cd1e9422bc315e2 | [
"MIT"
] | 1 | 2020-08-12T20:26:15.000Z | 2020-08-12T20:26:15.000Z | atmos/genModel.py | aasensio/ALMA-Zeeman | c17c5f1bd7117efe3be1e97b4cd1e9422bc315e2 | [
"MIT"
] | null | null | null | atmos/genModel.py | aasensio/ALMA-Zeeman | c17c5f1bd7117efe3be1e97b4cd1e9422bc315e2 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as pl
yearInSec = 365.0*24.0*3600.0
solarMassPerYear = 1.99e33 / yearInSec
RStar = 4e13
TStar = 2330.0
MStar = 0.8 * 1.99e33
R0 = 1.2 * RStar
Rc = 5 * RStar
Rw = 20.0 * RStar
vexp = 14.5 * 1e5
vturb = 1.0
MLoss = 2e-5 * solarMassPerYear
G = 6.67259e-8
k = 1.381e-16
mg = 2.3 * 1.6605402e-24
alpha = 0.55
nStar = 1.8e14
gamma = 0.89
pc = 3.0857e18
inputModel = np.loadtxt('rpfit_iktau.dat', skiprows=3)
n = inputModel.shape[0]
B = np.ones(n) * 1.0
r = inputModel[:,0]
nH2 = inputModel[:,1]
SOAbundance = inputModel[:,11]
Tk = inputModel[:,2]
TDust = inputModel[:,5]
v = inputModel[:,4]
f = open('model1G.atmos', 'w')
f.write("r [cm] n[cm^-3] A(mol) Tk [K] Tdust[K] v[km/s] B[G]\n")
f.write("{0}\n".format(n))
for i in range(n):
f.write("{0:10.3e} {1:10.3e} {2:10.3e} {3:10.3f} {4:10.3f} {5:10.3f} {6:10.3f}\n".format(r[i], nH2[i], SOAbundance[i], Tk[i], TDust[i], v[i], B[i]))
f.close()
v = inputModel[:,4] * 0.0
f = open('model1G_rest.atmos', 'w')
f.write("r [cm] n[cm^-3] A(mol) Tk [K] Tdust[K] v[km/s] B[G]\n")
f.write("{0}\n".format(n))
for i in range(n):
f.write("{0:10.3e} {1:10.3e} {2:10.3e} {3:10.3f} {4:10.3f} {5:10.3f} {6:10.3f}\n".format(r[i], nH2[i], SOAbundance[i], Tk[i], TDust[i], v[i], B[i]))
f.close()
| 27.204082 | 158 | 0.573893 | 272 | 1,333 | 2.805147 | 0.327206 | 0.04194 | 0.036697 | 0.04194 | 0.395806 | 0.395806 | 0.395806 | 0.395806 | 0.395806 | 0.395806 | 0 | 0.15576 | 0.186047 | 1,333 | 48 | 159 | 27.770833 | 0.547465 | 0 | 0 | 0.238095 | 0 | 0.095238 | 0.274569 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cbc67363189895ebbbc90a0685761a9c7791bac | 516 | py | Python | test/img2base64.py | yotcap/face-compare | 980d399d00ee1d56e719780a62e7bf28907a3684 | [
"MIT"
] | null | null | null | test/img2base64.py | yotcap/face-compare | 980d399d00ee1d56e719780a62e7bf28907a3684 | [
"MIT"
] | null | null | null | test/img2base64.py | yotcap/face-compare | 980d399d00ee1d56e719780a62e7bf28907a3684 | [
"MIT"
] | null | null | null | #coding=utf-8
import csv
import base64
def image_to_base64():
'''封装把图片转换为base64编码格式'''
o = open(r"./1-0.jpg", 'rb')
base64_data = base64.b64encode(o.read())
s = base64_data.decode()
return ("data:image/png;base64,%s"%s)
def base64_write_csv():
'''把生成的base64写入CSV文件'''
f = open(r'./image.csv', 'wb')
csv_writer = csv.writer(f)
csv_writer.writerow(["image"])
csv_writer.writerow([image_to_base64().encode()])
f.close()
if __name__ == '__main__':
base64_write_csv()
| 21.5 | 53 | 0.637597 | 71 | 516 | 4.338028 | 0.492958 | 0.116883 | 0.084416 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063529 | 0.176357 | 516 | 23 | 54 | 22.434783 | 0.661176 | 0.094961 | 0 | 0 | 0 | 0 | 0.134066 | 0.052747 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cc3f6dad88e745abaa2a72a51c49e94ec854132 | 1,160 | py | Python | 4_Structured_GE/struct_ge.py | nkphysics/Computational-Linear-Algebra- | 8e82585e25b58f73179c0b0ace63fcda9f480f07 | [
"MIT"
] | 1 | 2021-12-09T20:14:22.000Z | 2021-12-09T20:14:22.000Z | 4_Structured_GE/struct_ge.py | nkphysics/Computational-Linear-Algebra- | 8e82585e25b58f73179c0b0ace63fcda9f480f07 | [
"MIT"
] | null | null | null | 4_Structured_GE/struct_ge.py | nkphysics/Computational-Linear-Algebra- | 8e82585e25b58f73179c0b0ace63fcda9f480f07 | [
"MIT"
] | 1 | 2022-03-12T12:27:21.000Z | 2022-03-12T12:27:21.000Z | # Computational Linear Algebra #4 Structured Gaussian Elimination
# By: Nick Space Cowboy
import numpy as np
class Cowboy_Lin_Alg(object):
def solve_utri(self, Utri, b):
n = len(Utri) # row dimension of the Utri matrix
x = np.zeros_like(b, dtype=np.float64)
for i in range(n - 1, -1, -1): # loop to iterate through row index
x[i] += b[i] / Utri[i,i]
for j in range(n-1, i, -1): # loop to iterate through the off diagonal Sum part
x[i] += (- (Utri[i, j] * x[j])) / Utri[i,i]
return x
def SGE(self, A, b):
n = len(A)
l = np.zeros([n, n], dtype=np.float64)
for i in range(0, n, 1):
for j in range(i+1, n, 1):
l[j,i] = A[j,i] / A[i,i]
A[j] = A[j] - (l[j,i] * A[i])
b[j] = b[j] - (l[j,i] * b[i])
return A, b
if __name__ == "__main__":
A = np.array(np.random.randint (0,100,(4,4)), dtype=np.float64)
Ac = A.copy()
print("A = ")
print(A)
b = np.array(np.random.randint(0, 100, (4,1)), dtype=np.float64)
print("b = ")
print(b)
cla = Cowboy_Lin_Alg()
cla.SGE(A,b)
x = cla.solve_utri(A, b)
print("U = ")
print(A)
print("c = ")
print(b)
print("x = ")
print(x)
print("Check Ax = ")
print(Ac.dot(x))
| 25.777778 | 82 | 0.577586 | 227 | 1,160 | 2.885463 | 0.317181 | 0.015267 | 0.085496 | 0.051908 | 0.222901 | 0.158779 | 0.158779 | 0.082443 | 0 | 0 | 0 | 0.033113 | 0.218966 | 1,160 | 44 | 83 | 26.363636 | 0.689845 | 0.173276 | 0 | 0.105263 | 0 | 0 | 0.040966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.026316 | 0 | 0.157895 | 0.315789 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cc4d02c3cfce0ada481de55b240a788dce0d962 | 6,819 | py | Python | src/build/mac_toolchain.py | Abreto/naiveproxy | 5d84bf9f18eb5a949558086bad7c945bb9051362 | [
"BSD-3-Clause"
] | 1 | 2020-03-11T03:44:02.000Z | 2020-03-11T03:44:02.000Z | src/build/mac_toolchain.py | bylond/naiveproxy | a04a8330a8bb0d0892259cf6d795271fbe6e6d0e | [
"BSD-3-Clause"
] | null | null | null | src/build/mac_toolchain.py | bylond/naiveproxy | a04a8330a8bb0d0892259cf6d795271fbe6e6d0e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
If should_use_hermetic_xcode.py emits "1", and the current toolchain is out of
date:
* Downloads the hermetic mac toolchain
* Requires CIPD authentication. Run `cipd auth-login`, use Google account.
* Accepts the license.
* If xcode-select and xcodebuild are not passwordless in sudoers, requires
user interaction.
* Downloads standalone binaries from [a possibly different version of Xcode].
The toolchain version can be overridden by setting MAC_TOOLCHAIN_REVISION with
the full revision, e.g. 9A235.
"""
from __future__ import print_function
import os
import pkg_resources
import platform
import plistlib
import shutil
import subprocess
import sys
# This contains binaries from Xcode 10.12.1, along with the 10.14 SDKs. To build
# this package, see comments in build/xcode_binaries.yaml
MAC_BINARIES_LABEL = 'infra_internal/ios/xcode/xcode_binaries/mac-amd64'
MAC_BINARIES_TAG = 'yjQtk3auAegQO4t18uBtBlKbj76xBjVtLE-3UM2faRUC'
# The toolchain will not be downloaded if the minimum OS version is not met.
# 17 is the major version number for macOS 10.13.
# 9E145 (Xcode 9.3) only runs on 10.13.2 and newer.
MAC_MINIMUM_OS_VERSION = 17
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
TOOLCHAIN_ROOT = os.path.join(BASE_DIR, 'mac_files')
TOOLCHAIN_BUILD_DIR = os.path.join(TOOLCHAIN_ROOT, 'Xcode.app')
def PlatformMeetsHermeticXcodeRequirements():
major_version = int(platform.release().split('.')[0])
return major_version >= MAC_MINIMUM_OS_VERSION
def _UseHermeticToolchain():
current_dir = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.join(current_dir, 'mac/should_use_hermetic_xcode.py')
proc = subprocess.Popen([script_path, 'mac'], stdout=subprocess.PIPE)
return '1' in proc.stdout.readline()
def RequestCipdAuthentication():
"""Requests that the user authenticate to access Xcode CIPD packages."""
print('Access to Xcode CIPD package requires authentication.')
print('-----------------------------------------------------------------')
print()
print('You appear to be a Googler.')
print()
print('I\'m sorry for the hassle, but you may need to do a one-time manual')
print('authentication. Please run:')
print()
print(' cipd auth-login')
print()
print('and follow the instructions.')
print()
print('NOTE: Use your google.com credentials, not chromium.org.')
print()
print('-----------------------------------------------------------------')
print()
sys.stdout.flush()
def PrintError(message):
# Flush buffers to ensure correct output ordering.
sys.stdout.flush()
sys.stderr.write(message + '\n')
sys.stderr.flush()
def InstallXcodeBinaries():
"""Installs the Xcode binaries needed to build Chrome and accepts the license.
This is the replacement for InstallXcode that installs a trimmed down version
of Xcode that is OS-version agnostic.
"""
# First make sure the directory exists. It will serve as the cipd root. This
# also ensures that there will be no conflicts of cipd root.
binaries_root = os.path.join(TOOLCHAIN_ROOT, 'xcode_binaries')
if not os.path.exists(binaries_root):
os.makedirs(binaries_root)
# 'cipd ensure' is idempotent.
args = [
'cipd', 'ensure', '-root', binaries_root, '-ensure-file', '-'
]
# Buildbot slaves need to use explicit credentials. LUCI bots should NOT set
# this variable. This is temporary code used to make official Xcode bots
# happy. https://crbug.com/986488
creds = os.environ.get('MAC_TOOLCHAIN_CREDS')
if creds:
args.extend(['--service-account-json', creds])
p = subprocess.Popen(
args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate(
input=MAC_BINARIES_LABEL + ' ' + MAC_BINARIES_TAG)
if p.returncode != 0:
print(stdout)
print(stderr)
RequestCipdAuthentication()
return 1
# Accept the license for this version of Xcode if it's newer than the
# currently accepted version.
cipd_xcode_version_plist_path = os.path.join(
binaries_root, 'Contents/version.plist')
cipd_xcode_version_plist = plistlib.readPlist(cipd_xcode_version_plist_path)
cipd_xcode_version = cipd_xcode_version_plist['CFBundleShortVersionString']
cipd_license_path = os.path.join(
binaries_root, 'Contents/Resources/LicenseInfo.plist')
cipd_license_plist = plistlib.readPlist(cipd_license_path)
cipd_license_version = cipd_license_plist['licenseID']
should_overwrite_license = True
current_license_path = '/Library/Preferences/com.apple.dt.Xcode.plist'
if os.path.exists(current_license_path):
current_license_plist = plistlib.readPlist(current_license_path)
xcode_version = current_license_plist['IDEXcodeVersionForAgreedToGMLicense']
if (pkg_resources.parse_version(xcode_version) >=
pkg_resources.parse_version(cipd_xcode_version)):
should_overwrite_license = False
if not should_overwrite_license:
return 0
# Use puppet's sudoers script to accept the license if its available.
license_accept_script = '/usr/local/bin/xcode_accept_license.py'
if os.path.exists(license_accept_script):
args = ['sudo', license_accept_script, '--xcode-version',
cipd_xcode_version, '--license-version', cipd_license_version]
subprocess.check_call(args)
return 0
# Otherwise manually accept the license. This will prompt for sudo.
print('Accepting new Xcode license. Requires sudo.')
sys.stdout.flush()
args = ['sudo', 'defaults', 'write', current_license_path,
'IDEXcodeVersionForAgreedToGMLicense', cipd_xcode_version]
subprocess.check_call(args)
args = ['sudo', 'defaults', 'write', current_license_path,
'IDELastGMLicenseAgreedTo', cipd_license_version]
subprocess.check_call(args)
args = ['sudo', 'plutil', '-convert', 'xml1', current_license_path]
subprocess.check_call(args)
return 0
def main():
if sys.platform != 'darwin':
return 0
if not _UseHermeticToolchain():
print('Skipping Mac toolchain installation for mac')
return 0
if not PlatformMeetsHermeticXcodeRequirements():
print('OS version does not support toolchain.')
return 0
# Delete obsolete hermetic full Xcode folder, the build now uses
# build/mac_files/xcode_binaries instead.
if os.path.exists(TOOLCHAIN_BUILD_DIR):
# TODO(thakis): Remove this after it's been here for a few months.
print('Deleting obsolete build/mac_files/Xcode.app...', end='')
sys.stdout.flush()
shutil.rmtree(TOOLCHAIN_BUILD_DIR)
print('done')
return InstallXcodeBinaries()
if __name__ == '__main__':
sys.exit(main())
| 34.969231 | 80 | 0.729432 | 906 | 6,819 | 5.318985 | 0.331126 | 0.017431 | 0.026562 | 0.019091 | 0.108114 | 0.074497 | 0.055198 | 0 | 0 | 0 | 0 | 0.011111 | 0.155301 | 6,819 | 194 | 81 | 35.149485 | 0.825521 | 0.30151 | 0 | 0.215517 | 0 | 0 | 0.238762 | 0.120017 | 0 | 0 | 0 | 0.005155 | 0 | 1 | 0.051724 | false | 0 | 0.068966 | 0 | 0.206897 | 0.206897 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cc5ad8039746ed8e70f6bec48980a31f5bed3e0 | 5,050 | py | Python | processout/activity.py | SMAKSS/processout-python | 2af27fe1eeb8d5106123ee502a9bedfe336c951b | [
"MIT"
] | 1 | 2020-10-11T07:29:51.000Z | 2020-10-11T07:29:51.000Z | processout/activity.py | SMAKSS/processout-python | 2af27fe1eeb8d5106123ee502a9bedfe336c951b | [
"MIT"
] | null | null | null | processout/activity.py | SMAKSS/processout-python | 2af27fe1eeb8d5106123ee502a9bedfe336c951b | [
"MIT"
] | null | null | null | try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
import processout
from processout.networking.request import Request
from processout.networking.response import Response
# The content of this file was automatically generated
class Activity(object):
def __init__(self, client, prefill = None):
self._client = client
self._id = None
self._project = None
self._project_id = None
self._title = None
self._content = None
self._level = None
self._created_at = None
if prefill != None:
self.fill_with_data(prefill)
@property
def id(self):
"""Get id"""
return self._id
@id.setter
def id(self, val):
"""Set id
Keyword argument:
val -- New id value"""
self._id = val
return self
@property
def project(self):
"""Get project"""
return self._project
@project.setter
def project(self, val):
"""Set project
Keyword argument:
val -- New project value"""
if val is None:
self._project = val
return self
if isinstance(val, dict):
obj = processout.Project(self._client)
obj.fill_with_data(val)
self._project = obj
else:
self._project = val
return self
@property
def project_id(self):
"""Get project_id"""
return self._project_id
@project_id.setter
def project_id(self, val):
"""Set project_id
Keyword argument:
val -- New project_id value"""
self._project_id = val
return self
@property
def title(self):
"""Get title"""
return self._title
@title.setter
def title(self, val):
"""Set title
Keyword argument:
val -- New title value"""
self._title = val
return self
@property
def content(self):
"""Get content"""
return self._content
@content.setter
def content(self, val):
"""Set content
Keyword argument:
val -- New content value"""
self._content = val
return self
@property
def level(self):
"""Get level"""
return self._level
@level.setter
def level(self, val):
"""Set level
Keyword argument:
val -- New level value"""
self._level = val
return self
@property
def created_at(self):
"""Get created_at"""
return self._created_at
@created_at.setter
def created_at(self, val):
"""Set created_at
Keyword argument:
val -- New created_at value"""
self._created_at = val
return self
def fill_with_data(self, data):
"""Fill the current object with the new values pulled from data
Keyword argument:
data -- The data from which to pull the new values"""
if "id" in data.keys():
self.id = data["id"]
if "project" in data.keys():
self.project = data["project"]
if "project_id" in data.keys():
self.project_id = data["project_id"]
if "title" in data.keys():
self.title = data["title"]
if "content" in data.keys():
self.content = data["content"]
if "level" in data.keys():
self.level = data["level"]
if "created_at" in data.keys():
self.created_at = data["created_at"]
return self
def all(self, options = {}):
"""Get all the project activities.
Keyword argument:
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/activities"
data = {
}
response = Response(request.get(path, data, options))
return_values = []
a = []
body = response.body
for v in body['activities']:
tmp = processout.Activity(self._client)
tmp.fill_with_data(v)
a.append(tmp)
return_values.append(a)
return return_values[0]
def find(self, activity_id, options = {}):
"""Find a specific activity and fetch its data.
Keyword argument:
activity_id -- ID of the activity
options -- Options for the request"""
self.fill_with_data(options)
request = Request(self._client)
path = "/activities/" + quote_plus(activity_id) + ""
data = {
}
response = Response(request.get(path, data, options))
return_values = []
body = response.body
body = body["activity"]
obj = processout.Activity(self._client)
return_values.append(obj.fill_with_data(body))
return return_values[0]
| 24.396135 | 71 | 0.542574 | 560 | 5,050 | 4.744643 | 0.141071 | 0.060218 | 0.039142 | 0.055326 | 0.236357 | 0.143018 | 0.109146 | 0.109146 | 0.109146 | 0.109146 | 0 | 0.000619 | 0.360396 | 5,050 | 206 | 72 | 24.514563 | 0.821981 | 0.170495 | 0 | 0.264463 | 0 | 0 | 0.033868 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14876 | false | 0 | 0.049587 | 0 | 0.355372 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cc723b5f5ac890194076952666fdd96857ba1db | 475 | py | Python | AirLog/__main__.py | minorsecond/AirLog | 91d4a801229281377545896cc62b291606e31df1 | [
"MIT"
] | null | null | null | AirLog/__main__.py | minorsecond/AirLog | 91d4a801229281377545896cc62b291606e31df1 | [
"MIT"
] | null | null | null | AirLog/__main__.py | minorsecond/AirLog | 91d4a801229281377545896cc62b291606e31df1 | [
"MIT"
] | null | null | null | import data as csv
__version__ = "0.2"
callsign_endpoint = "http://hamcall.net/call?callsign="
print(f"AirLog Version: {__version__}")
questions = ["Callsign", "Name", "Location", "Comm type", "Notes", "signal ( x/10 )"]
data = {}
while 0 < len(questions):
for question in questions:
print(question + "?")
answer = input(">")
if len(answer) != 0:
data[question] = answer
questions.remove(question)
headings = csv.toHeadings(data)
csv.write(headings, data)
| 21.590909 | 85 | 0.669474 | 60 | 475 | 5.15 | 0.616667 | 0.090615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014963 | 0.155789 | 475 | 21 | 86 | 22.619048 | 0.755611 | 0 | 0 | 0 | 0 | 0 | 0.244211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cc88d9a4e9effcd400d874cbc738191ab51d5f4 | 2,642 | py | Python | adefa/tests/test_result.py | budtmo/adefa | 448812c2bef2ffa989e357529fb481b70231933b | [
"Apache-2.0"
] | 3 | 2017-08-22T12:40:46.000Z | 2017-11-01T13:08:15.000Z | adefa/tests/test_result.py | butomo1989/adefa | 448812c2bef2ffa989e357529fb481b70231933b | [
"Apache-2.0"
] | 1 | 2021-04-20T17:13:08.000Z | 2021-04-20T17:13:08.000Z | adefa/tests/test_result.py | budtmo/adefa | 448812c2bef2ffa989e357529fb481b70231933b | [
"Apache-2.0"
] | 2 | 2017-08-22T12:55:56.000Z | 2017-12-12T10:23:52.000Z | """Unit test to test get test result."""
from unittest import TestCase
from adefa import cli
from adefa.tests import runner
import mock
class TestResult(TestCase):
"""Unit test class to test get test result."""
def test_valid_result(self):
cli.client.get_run = mock.MagicMock(return_value={'run': {'status': 'COMPLETED'}})
cli.client.list_jobs = mock.MagicMock(return_value={'jobs': [
{'arn': 'arn:aws:devicefarm:us-west-2:xxx', 'name': 'LG Nexus 5', 'unneded_key1': 'value1'},
{'arn': 'arn:aws:devicefarm:us-west-2:xxx', 'name': 'Samsung Galaxy S7 Edge', 'unneded_key2': 'value2'}
]})
cli.client.list_artifacts = mock.MagicMock(return_value={'artifacts': [
{'type': 'result-xml', 'url': 'https://xml', 'unneded_key1': 'value1'},
{'type': 'video', 'url': 'https://video', 'unneded_key2': 'value2'}
]})
result = runner.invoke(cli.result, ['arn'])
self.assertEqual(result.exit_code, 0)
result = runner.invoke(cli.result, ['arn', '--json-output', '--result-only'])
self.assertEqual(result.exit_code, 0)
def test_pull_attempts(self):
cli.client.get_run = mock.MagicMock(return_value={'run': {'status': 'IN PROGRESS'}})
total_attempts = 2
with mock.patch('time.sleep') as mocked_sleep:
self.assertFalse(mocked_sleep.called)
result = runner.invoke(cli.result, ['arn', '-a', total_attempts, '-d', 0.5])
self.assertTrue(mocked_sleep.called)
self.assertEqual(total_attempts, mocked_sleep.call_count)
self.assertEqual(result.exit_code, 0)
def test_empty_status(self):
cli.client.get_run = mock.MagicMock(return_value={'run': {'status': None}})
result = runner.invoke(cli.result, ['arn'])
self.assertEqual(result.exit_code, 0)
def test_attribute_error(self):
cli.client.get_run = mock.MagicMock(return_value=None)
result = runner.invoke(cli.result, ['arn'])
self.assertEqual(result.exit_code, -1)
cli.client.get_run = mock.MagicMock(return_value={'run': {'status': 'COMPLETED'}})
cli.client.list_jobs = mock.MagicMock(return_value=None)
result = runner.invoke(cli.result, ['arn'])
self.assertEqual(result.exit_code, -1)
def test_key_error(self):
cli.client.get_run = mock.MagicMock(return_value={
'run': {'status': 'COMPLETED'}
})
cli.client.list_jobs = mock.MagicMock(return_value={'jobs': None})
result = runner.invoke(cli.result, ['arn'])
self.assertEqual(result.exit_code, -1)
| 42.612903 | 115 | 0.629826 | 331 | 2,642 | 4.879154 | 0.259819 | 0.055728 | 0.117647 | 0.148607 | 0.636533 | 0.613003 | 0.575851 | 0.575851 | 0.525697 | 0.48483 | 0 | 0.010511 | 0.207797 | 2,642 | 61 | 116 | 43.311475 | 0.761108 | 0.028388 | 0 | 0.340426 | 0 | 0 | 0.15493 | 0.025039 | 0 | 0 | 0 | 0 | 0.212766 | 1 | 0.106383 | false | 0 | 0.085106 | 0 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ccc8df1356db8bd08571c1845f788cd223e8846 | 2,082 | py | Python | rignet/train_.py | lelechen63/CIPS-3D | 49e34ecab7410ac357a3d467e347cd39ee442bd5 | [
"MIT"
] | 1 | 2022-03-20T08:10:29.000Z | 2022-03-20T08:10:29.000Z | rignet/train_.py | lelechen63/CIPS-3D | 49e34ecab7410ac357a3d467e347cd39ee442bd5 | [
"MIT"
] | 1 | 2022-03-21T04:54:10.000Z | 2022-03-21T04:54:10.000Z | rignet/train_.py | lelechen63/CIPS-3D | 49e34ecab7410ac357a3d467e347cd39ee442bd5 | [
"MIT"
] | 1 | 2022-02-25T01:28:10.000Z | 2022-02-25T01:28:10.000Z | import os
from argparse import ArgumentParser
from collections import OrderedDict
import torch
import torch.nn as nn
import random
import pickle
import pytorch_lightning as pl
from options.train_options import TrainOptions
from pytorch_lightning.callbacks import ModelCheckpoint
import numpy as np
import sys
sys.path.append('/home/uss00022/lelechen/github/CIPS-3D/photometric_optimization')
import util
# define flame config
flame_config = {
# FLAME
'flame_model_path': '/home/uss00022/lelechen/basic/flame_data/data/generic_model.pkl', # acquire it from FLAME project page
'flame_lmk_embedding_path': '/home/uss00022/lelechen/basic/flame_data/data/landmark_embedding.npy',
'tex_space_path': '/home/uss00022/lelechen/basic/flame_data/data/FLAME_texture.npz', # acquire it from FLAME project page
'camera_params': 3,
'shape_params': 100,
'expression_params': 50,
'pose_params': 6,
'tex_params': 50,
'use_face_contour': True,
'batch_size': 1,
'image_size': 1024,
'e_lr': 0.005,
'e_wd': 0.0001,
'savefolder': '/home/uss00022/lelechen/github/CIPS-3D/photometric_optimization/gg',
# weights of losses and reg terms
'w_pho': 8,
'w_lmks': 100,
'w_shape_reg': 1e-4,
'w_expr_reg': 1e-4,
'w_pose_reg': 0,
}
flame_config = util.dict2obj(flame_config)
opt = TrainOptions().parse()
# if opt.debug:
# opt.nThreads = 1
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.name == 'Latent2Code':
from latent2code import Latent2CodeModule as module
model = module(flame_config, opt )
elif opt.name =='rig':
from rig import RigModule as module
model = module(flame_config, opt)
print (opt.isTrain,'!!!!!')
if opt.isTrain:
print ( opt.name)
model.train()
print ('+++++++++')
else:
print ('!!!!!!' + opt.name + '!!!!!!!!')
if opt.name == 'Latent2Code':
model.test()
| 30.173913 | 132 | 0.670029 | 271 | 2,082 | 4.98893 | 0.424354 | 0.048817 | 0.073965 | 0.053254 | 0.266272 | 0.266272 | 0.223373 | 0.174556 | 0 | 0 | 0 | 0.042745 | 0.202209 | 2,082 | 69 | 133 | 30.173913 | 0.771222 | 0.07781 | 0 | 0.071429 | 0 | 0 | 0.314689 | 0.18139 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.267857 | 0 | 0.267857 | 0.089286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cce67d9f4c01fa5e0acf82e671f6eb2877be221 | 1,758 | py | Python | tests/test_address.py | DPInvaders/pyacryl2 | bad81a4ae192e9c7e44ac858a76eee5c5abb5bc5 | [
"MIT"
] | 3 | 2020-03-31T09:39:53.000Z | 2021-12-21T06:07:30.000Z | tests/test_address.py | DPInvaders/pyacryl2 | bad81a4ae192e9c7e44ac858a76eee5c5abb5bc5 | [
"MIT"
] | 1 | 2020-02-25T07:23:46.000Z | 2020-02-25T07:23:46.000Z | tests/test_address.py | DPInvaders/pyacryl2 | bad81a4ae192e9c7e44ac858a76eee5c5abb5bc5 | [
"MIT"
] | 1 | 2020-04-25T10:59:34.000Z | 2020-04-25T10:59:34.000Z | import unittest
from unittest.mock import patch
import base58
from pyacryl2 import AcrylClient
from pyacryl2.utils import AcrylAddress
from pyacryl2.utils import AcrylAddressGenerator
from pyacryl2.utils import AcrylAsyncAddress
class AddressGeneratorTest(unittest.TestCase):
def setUp(self):
self.address_generator = AcrylAddressGenerator()
def test_generating_class(self):
address = self.address_generator.generate()
self.assertIsInstance(address, AcrylAddress)
address = self.address_generator.generate()
self.assertNotIsInstance(address, AcrylAsyncAddress)
def test_address_client(self):
address = self.address_generator.generate()
self.assertIsInstance(getattr(address, '_api_client'), AcrylClient)
class AddressMethodsTest(unittest.TestCase):
@patch('pyacryl2.utils.address.AcrylAddress')
def test_address_from_alias(self, mocked_address):
address = mocked_address()
address.from_alias.return_value = None
result = address.from_alias('acrylalias')
self.assertIs(None, result)
def test_offline_address(self):
address_generator = AcrylAddressGenerator()
address = address_generator.generate(online=False)
balance_result = address.get_balance()
self.assertIsInstance(balance_result, dict)
transfer_result = address.transfer_acryl('3EMZGnpVGcCWjdQWAU2Hc8SFUVUDnxKnprX', 1000, attachment="test")
self.assertIsInstance(transfer_result, dict)
def test_base58_seed_encode(self):
address_generator = AcrylAddressGenerator()
address = address_generator.generate()
self.assertEqual(address.base58_seed, base58.b58encode(address.seed.encode('latin-1')).decode())
| 35.16 | 112 | 0.746303 | 178 | 1,758 | 7.179775 | 0.308989 | 0.068858 | 0.093897 | 0.084507 | 0.235524 | 0.235524 | 0.205008 | 0.205008 | 0 | 0 | 0 | 0.015764 | 0.17008 | 1,758 | 49 | 113 | 35.877551 | 0.860178 | 0 | 0 | 0.138889 | 0 | 0 | 0.058054 | 0.039841 | 0 | 0 | 0 | 0 | 0.194444 | 1 | 0.166667 | false | 0 | 0.194444 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ccf132cb7ea99f836f5615b2f6cafbc71c7f9b8 | 1,871 | py | Python | core/atypes_emitter.py | lastick1/rexpert | cd5908f69cf54671ffe6bb2991c24d19e8f0036d | [
"MIT"
] | 1 | 2020-07-07T09:58:57.000Z | 2020-07-07T09:58:57.000Z | core/atypes_emitter.py | lastick1/rexpert | cd5908f69cf54671ffe6bb2991c24d19e8f0036d | [
"MIT"
] | 42 | 2018-11-11T08:08:46.000Z | 2020-01-10T11:15:47.000Z | core/atypes_emitter.py | lastick1/rexpert | cd5908f69cf54671ffe6bb2991c24d19e8f0036d | [
"MIT"
] | null | null | null | "Обработка событий из логов с использованием потоков (Rx)"
from __future__ import annotations
import logging
from typing import Tuple
from rx.subject import Subject
from rx.core.abc.disposable import Disposable
from .atypes import Atype0, Atype1, Atype2, Atype3, Atype4, Atype5, Atype6, Atype7, Atype8, Atype9, \
Atype10, Atype11, Atype12, Atype13, Atype14, Atype15, Atype16, Atype17, Atype18, Atype19, \
Atype20, Atype21, Atype22
from .parse_mission_log_line import parse, UnexpectedATypeWarning
class AtypesEmitter(Disposable):
"Источник событий из логов"
def __init__(self):
self._countries = dict()
self._constructors = (Atype0, Atype1, Atype2, Atype3, Atype4, Atype5, Atype6, Atype7, Atype8, Atype9,
Atype10, Atype11, Atype12, Atype13, Atype14, Atype15, Atype16, Atype17, Atype18, Atype19,
Atype20, Atype21, Atype22)
self._atypes: Tuple[Subject] = tuple(Subject() for x in range(22))
def dispose(self):
for subject in self._atypes:
subject.on_completed()
subject.dispose()
def process_line(self, line: str):
"Обработать строчку из логов"
try:
if 'AType' not in line:
raise NameError(f'ignored bad string: [{line}]')
atype = parse(line)
atype_id = atype.pop('atype_id')
if atype_id == 0:
self._countries = atype['countries']
if 'country_id' in atype.keys():
atype['coal_id'] = self._countries[atype['country_id']]
obj = self._constructors[atype_id](**atype)
self._atypes[atype_id].on_next(obj)
except UnexpectedATypeWarning:
logging.warning(f'unexpected atype: [{line}]')
except Exception as exception:
logging.exception(exception)
| 39.808511 | 119 | 0.637092 | 208 | 1,871 | 5.586538 | 0.442308 | 0.03012 | 0.024096 | 0.041308 | 0.259897 | 0.259897 | 0.259897 | 0.259897 | 0.259897 | 0.259897 | 0 | 0.054665 | 0.266702 | 1,871 | 46 | 120 | 40.673913 | 0.792274 | 0.058792 | 0 | 0 | 0 | 0 | 0.112774 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.179487 | 0 | 0.282051 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cd06c63e71502403d0c04b0230831a34965b8e6 | 2,101 | py | Python | src/kitman/apps/iam/domain.py | madskronborg/saas-toolkit | d3dfb084a75d0ca4eba8f684d6d11cc6b254d8b9 | [
"MIT"
] | null | null | null | src/kitman/apps/iam/domain.py | madskronborg/saas-toolkit | d3dfb084a75d0ca4eba8f684d6d11cc6b254d8b9 | [
"MIT"
] | null | null | null | src/kitman/apps/iam/domain.py | madskronborg/saas-toolkit | d3dfb084a75d0ca4eba8f684d6d11cc6b254d8b9 | [
"MIT"
] | null | null | null | from typing import Generic, Protocol, TypeVar
from uuid import UUID
from fastapi.security.base import SecurityBase
from fastapi import Response
from kitman.core.domain import DependencyCallable, OpenAPIResponseType, IModel
from kitman.core.schemas import Schema
# Types
TUser = TypeVar("TUser", bound="IUser")
TSubject = TypeVar("TSubject")
TSubjectId = TypeVar("TSubjectId", bound=str | UUID | dict)
TCheckResponse = TypeVar("TCheckResponse")
TGrantResponse = TypeVar("TGrantResponse")
TRevokeResponse = TypeVar("TRevokeResponse")
TInspectResponse = TypeVar("TInspectResponse")
TLoginResponse = TypeVar("TLoginResponse", bound=Schema)
TLogoutResponse = TypeVar("TLogoutResponse", bound=Schema)
# Value objects
Obj = str | UUID
Relation = str
Namespace = str | None
# Models
class IUser(IModel):
username: str
email: str
first_name: str
last_name: str
is_active: bool
is_verified: bool
is_superuser: bool
# Services
class IUserService(
Protocol,
Generic[
TSubjectId,
TUser,
],
):
async def get_by_id(self, subject_id: TSubjectId) -> TUser:
...
UserServiceDependency = DependencyCallable[IUserService[TSubjectId, TUser]]
# Strategies
class IStrategy(Protocol, Generic[TSubjectId, TUser]):
async def read_token(
self, token: str | None, service: IUserService[TSubjectId, TUser]
) -> TUser | None:
...
async def write_token(self, user: TUser) -> str:
...
async def destroy_token(self, token: str, user: TUser) -> None:
...
# Transports
class ITransport(Protocol, Generic[TLoginResponse, TLogoutResponse]):
scheme: SecurityBase
async def get_login_response(
self, token: str, response: Response
) -> TLoginResponse:
...
async def get_logout_response(
self, token: str, response: Response
) -> TLogoutResponse:
...
@staticmethod
def get_openapi_login_responses_sucess() -> OpenAPIResponseType:
...
@staticmethod
def get_openapi_logout_responses_success() -> OpenAPIResponseType:
...
| 23.087912 | 78 | 0.696335 | 213 | 2,101 | 6.760563 | 0.375587 | 0.033333 | 0.033333 | 0.041667 | 0.102778 | 0.102778 | 0 | 0 | 0 | 0 | 0 | 0 | 0.204188 | 2,101 | 90 | 79 | 23.344444 | 0.861244 | 0.02713 | 0 | 0.196721 | 0 | 0 | 0.056946 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032787 | false | 0 | 0.098361 | 0 | 0.327869 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cd15c015024ede7506c129916f56cd62f88c5c2 | 7,835 | py | Python | applied/tasks/absa/models/capsule_network.py | ndoll1998/AppliedTransformers | 76cbdef6fdd765b2178af71038a61e3e71e0cec9 | [
"MIT"
] | 3 | 2020-09-02T03:51:49.000Z | 2020-09-18T14:09:48.000Z | applied/tasks/absa/models/capsule_network.py | ndoll1998/AppliedTransformers | 76cbdef6fdd765b2178af71038a61e3e71e0cec9 | [
"MIT"
] | null | null | null | applied/tasks/absa/models/capsule_network.py | ndoll1998/AppliedTransformers | 76cbdef6fdd765b2178af71038a61e3e71e0cec9 | [
"MIT"
] | 2 | 2021-01-30T12:37:43.000Z | 2021-05-19T06:29:31.000Z | # import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
# import applied transformers
from .base import ABSA_Model
from ..datasets.base import ABSA_Dataset, ABSA_DatasetItem
from applied.core.model import Encoder, InputFeatures
# import utils
from applied.common import align_shape
from typing import Tuple
""" Bi-Linear Attention """
class BilinearAttention(nn.Module):
def __init__(self, query_size, key_size, dropout=0):
super(BilinearAttention, self).__init__()
# create weight and dropout layer
self.weights = nn.Parameter(torch.FloatTensor(query_size, key_size))
self.dropout = nn.Dropout(dropout)
# randomize weights
nn.init.xavier_uniform_(self.weights)
def get_attention_weight(self, query, key, mask=None):
# compute attention scores
score = self.score(query, key)
# apply mask and softmax
if mask is not None:
score = score.masked_fill(~mask, -10000)
weight = F.softmax(score, dim=-1)
# apply dropout
return self.dropout(weight)
def forward(self, query, key, value, mask=None):
# compute attention weight
weight = self.get_attention_weight(query, key, mask)
# compute output
return weight @ value, weight
def score(self, query, key):
# compute score
return ((query @ self.weights).unsqueeze(-1) * key.transpose(1, 2)).sum(-2)
""" Capsule Network """
def squash(x, dim=-1):
squared = (x * x).sum(dim=dim, keepdim=True)
scale = torch.sqrt(squared) / (1.0 + squared)
return scale * x
class CapsuleNetwork(ABSA_Model):
""" "A Challenge Dataset and Effective Models for Aspect-Based Sentiment Analysis"
Paper: https://www.aclweb.org/anthology/D19-1654/
"""
def __init__(self,
encoder:Encoder,
num_labels:int,
capsule_size:int =300,
loss_smooth:float =0.1,
loss_lambda:float =0.6,
dropout_prob:float =0.1
) -> None:
ABSA_Model.__init__(self, encoder=encoder)
# loss hyperparameters
self.loss_smooth = loss_smooth
self.loss_lambda = loss_lambda
# aspect transform
self.aspect_transform = nn.Sequential(
nn.Linear(encoder.hidden_size, capsule_size),
nn.Dropout(dropout_prob)
)
# sentence transform
self.sentence_transform = nn.Sequential(
nn.Linear(encoder.hidden_size, capsule_size),
nn.Dropout(dropout_prob)
)
# attention
self.norm_attention = BilinearAttention(capsule_size, capsule_size, dropout_prob)
# capsule
self.guide_capsule = nn.Parameter(torch.Tensor(num_labels, capsule_size))
self.guide_weight = nn.Parameter(torch.Tensor(capsule_size, capsule_size))
# projection
self.scale = nn.Parameter(torch.tensor(5.0))
self.capsule_projection = nn.Linear(encoder.hidden_size, encoder.hidden_size * num_labels)
self.dropout = nn.Dropout(dropout_prob)
# reset parameters
self._reset_parameters()
def _reset_parameters(self) -> None:
# randomize parameters
nn.init.xavier_uniform_(self.guide_capsule)
nn.init.xavier_uniform_(self.guide_weight)
@torch.no_grad()
def _init_guide_capsule(self, labels):
self.eval()
# tokenize labels
label_tokens = [self.encoder.tokenizer.tokenize(label) for label in labels]
label_ids = [self.encoder.tokenizer.convert_tokens_to_ids(tokens) for tokens in label_tokens]
# create input ids for model
shape = (len(labels), max((len(ids) for ids in label_ids)))
input_ids = align_shape(label_ids, shape, self.encoder.tokenizer.pad_token_id)
# create input tensors
input_ids = torch.LongTensor(input_ids)
attention_mask = torch.LongTensor((input_ids != self.encoder.tokenizer.pad_token_id).long())
input_ids, attention_mask = input_ids.to(self.encoder.device), attention_mask.to(self.encoder.device)
# pass through model
label_embed = self.encoder.forward(input_ids, attention_mask=attention_mask)[0]
label_embed = self.sentence_transform(label_embed)
# compute average over timesteps
label_embed = label_embed.sum(dim=1) / attention_mask.sum(dim=1, keepdims=True).float()
# apply label embeddings
self.guide_capsule.data.copy_(label_embed)
def prepare(self, dataset:ABSA_Dataset) -> None:
# initialize guide capsule
self._init_guide_capsule(dataset.LABELS)
def build_features_from_item(self, item:ABSA_DatasetItem) -> Tuple[InputFeatures]:
return tuple(
InputFeatures(
text="[CLS]" + item.sentence + "[SEP]" + aspect + "[SEP]",
labels=label
) for aspect, label in zip(item.aspects, item.labels)
)
def build_target_tensors(self, features:Tuple[InputFeatures]) -> Tuple[torch.Tensor]:
return (torch.LongTensor([f.labels for f in features]),)
def capsule_guided_routing(self, primary_capsule, norm_weight):
# build guide matrix
guide_capsule = squash(primary_capsule)
guide_matrix = (primary_capsule @ self.guide_weight) @ self.guide_capsule.transpose(0, 1)
guide_matrix = F.softmax(guide_matrix, dim=-1)
guide_matrix = guide_matrix * norm_weight.unsqueeze(-1) * self.scale
# build category capsule
category_capsule = guide_matrix.transpose(1, 2) @ primary_capsule
category_capsule = self.dropout(category_capsule)
category_capsule = squash(category_capsule)
# return
return category_capsule
def forward(self, input_ids, attention_mask, token_type_ids):
# pass through encoder
sequence_output = self.encoder.forward(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids
)[0]
# create sentence and aspect masks
sent_mask = attention_mask.bool() & (token_type_ids == 0)
aspects_mask = attention_mask.bool() & (token_type_ids == 1).bool()
# get clean sentence and aspects
sent = sequence_output.masked_fill(~sent_mask.unsqueeze(-1), 0)
aspects = sequence_output.masked_fill(~aspects_mask.unsqueeze(-1), 0)
# average pool over aspect encodings
pooled_aspects = aspects.sum(dim=-2) / aspects_mask.sum(dim=-1, keepdims=True).float()
# primary/sentence capsule layer
encoded_sent = self.sentence_transform(sent)
primary_capsule = squash(encoded_sent, dim=-1)
# secondary/aspects capsule layer
encoded_aspects = self.aspect_transform(pooled_aspects)
secondary_capsule = squash(encoded_aspects, dim=-1)
# aspect-aware normalization
norm_weight = self.norm_attention.get_attention_weight(secondary_capsule, primary_capsule, sent_mask)
# capsule guided routing
category_capsule = self.capsule_guided_routing(primary_capsule, norm_weight)
category_capsule_norm = (category_capsule * category_capsule).sum(dim=-1)
category_capsule_norm = torch.sqrt(category_capsule_norm)
# return logits
return category_capsule_norm
def loss(self, logits, labels):
# build one-hot matrix
one_hot = torch.zeros_like(logits).to(logits.device)
one_hot = one_hot.scatter(1, labels.unsqueeze(-1), 1)
# compute loss
a = torch.max(torch.zeros_like(logits), 1 - self.loss_smooth - logits)
b = torch.max(torch.zeros_like(logits), logits - self.loss_smooth)
loss = one_hot * a * a + self.loss_lambda * (1 - one_hot) * b * b
loss = loss.sum(dim=1).mean()
# add to outputs
return loss
| 41.675532 | 109 | 0.668156 | 971 | 7,835 | 5.166838 | 0.197734 | 0.041858 | 0.016942 | 0.020929 | 0.139326 | 0.10285 | 0.055013 | 0.030696 | 0.030696 | 0.030696 | 0 | 0.00967 | 0.234461 | 7,835 | 187 | 110 | 41.898396 | 0.826776 | 0.12508 | 0 | 0.033058 | 0 | 0 | 0.002224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115702 | false | 0 | 0.066116 | 0.024793 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cd3b1fb3f495d190148790e7dbd913279a3389a | 5,604 | py | Python | vise/analyzer/vasp/plot_band.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 16 | 2020-07-14T13:14:05.000Z | 2022-03-04T13:39:30.000Z | vise/analyzer/vasp/plot_band.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 10 | 2021-03-15T20:47:45.000Z | 2021-08-19T00:47:12.000Z | vise/analyzer/vasp/plot_band.py | kumagai-group/vise | 8adfe61ad8f31767ec562f02f271e2495f357cd4 | [
"MIT"
] | 6 | 2020-03-03T00:42:39.000Z | 2022-02-22T02:34:47.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
import re
from copy import deepcopy
from typing import List
import numpy as np
from pymatgen.electronic_structure.plotter import BSPlotter
from pymatgen.io.vasp import Vasprun
from vise.analyzer.plot_band import BandPlotInfo, BandInfo, XTicks, BandEdge
from vise.analyzer.plot_brillouin_zone import BZPlotInfo
from vise.util.string import latexify
def greek_to_unicode(label: str) -> str:
d = {"GAMMA": "Γ", "SIGMA": "Σ", "DELTA": "Δ"}
for k, v in d.items():
label = label.replace(k, v)
return label
def italic_to_roman(label: str) -> str:
return re.sub(r"([A-Z])_([0-9])", r"{\\rm \1}_\2", label)
class BandPlotInfoFromVasp:
def __init__(self,
vasprun: Vasprun,
kpoints_filename: str,
vasprun2: Vasprun = None,
energy_window: List[float] = None):
self.vasprun = vasprun
self.kpoints_filename = kpoints_filename
self.vasprun2 = vasprun2
self.energy_window = energy_window
self.bs = self.vasprun.get_band_structure(self.kpoints_filename,
line_mode=True)
def make_band_plot_info(self):
bs_plotter = BSPlotter(self.bs)
plot_data = bs_plotter.bs_plot_data(zero_to_efermi=False)
distances = [list(d) for d in plot_data["distances"]]
self._composition = self.vasprun.final_structure.composition
band_info = [BandInfo(band_energies=self._remove_spin_key(plot_data),
band_edge=self._band_edge(self.bs, plot_data),
fermi_level=self.bs.efermi)]
if self.vasprun2:
bs2 = self.vasprun2.get_band_structure(self.kpoints_filename,
line_mode=True)
plot_data2 = BSPlotter(bs2).bs_plot_data(zero_to_efermi=False)
band_info.append(
BandInfo(band_energies=self._remove_spin_key(plot_data2),
band_edge=self._band_edge(bs2, plot_data2),
fermi_level=self.bs.efermi))
x = bs_plotter.get_ticks_old()
x_ticks = XTicks(_sanitize_labels(x["label"]), x["distance"])
return BandPlotInfo(band_info_set=band_info,
distances_by_branch=distances,
x_ticks=x_ticks,
title=self._title)
def make_bz_plot_info(self):
rec_lat = self.vasprun.final_structure.lattice.reciprocal_lattice
faces = [[[float(k) for k in j] for j in i] for i in rec_lat.get_wigner_seitz_cell()]
labels = {}
concat = False
band_paths = []
init_point = None
for kpoint in self.bs.kpoints:
if kpoint.label:
c_coords = list(kpoint.cart_coords)
f_coords = list(kpoint.frac_coords)
label = greek_to_unicode(kpoint.label)
labels[label] = {"cart": c_coords, "frac": f_coords}
if concat is False and init_point:
band_paths.append([init_point, c_coords])
init_point = c_coords
concat = True
else:
concat = False
return BZPlotInfo(faces, labels, band_paths, rec_lat.matrix.tolist())
def _remove_spin_key(self, plot_data) -> List[List[List[List[float]]]]:
"""
Pymatgen at 2020.11.11
energy: A dict storing bands for spin up and spin down data
{Spin:[np.array(nb_bands,kpoints),...]} as a list of discontinuous kpath
of energies. The energy of multiple continuous branches are stored together.
-> [branch][spin][band][k-point]
"""
num_spin = len(plot_data["energy"])
num_branch = len(plot_data["energy"]["1"])
result = [[[] for _ in range(num_spin)] for __ in range(num_branch)]
for spin_idx, (_, branch_energies) in enumerate(
sorted(plot_data["energy"].items(),
key=lambda item: item[0],
reverse=True)):
for branch_idx, branch_energy in enumerate(branch_energies):
if self.energy_window:
removed_idxs = []
for i in range(len(branch_energy)):
_max = np.max(branch_energy[i, :])
_min = np.min(branch_energy[i, :])
if not self.in_energy(_max, _min):
removed_idxs.append(i)
x = np.delete(branch_energy, removed_idxs, axis=0).tolist()
else:
x = branch_energy.tolist()
result[branch_idx][spin_idx] = deepcopy(x)
return result
def in_energy(self, _max, _min):
return _max >= self.energy_window[0] and _min <= self.energy_window[1]
def _band_edge(self, bs, plot_data):
if bs.is_metal():
return None
else:
return BandEdge(
vbm=plot_data["vbm"][0][1],
cbm=plot_data["cbm"][0][1],
vbm_distances=[i[0] for i in plot_data["vbm"]],
cbm_distances=[i[0] for i in plot_data["cbm"]])
@property
def _title(self):
return latexify(self._composition.reduced_formula)
def _sanitize_label(label):
return italic_to_roman(greek_to_unicode(label))
def _sanitize_labels(labels):
return [_sanitize_label(label) for label in labels]
| 37.864865 | 93 | 0.579586 | 686 | 5,604 | 4.476676 | 0.271137 | 0.039075 | 0.016281 | 0.013676 | 0.130251 | 0.105503 | 0.091176 | 0.073592 | 0.030609 | 0 | 0 | 0.010234 | 0.31995 | 5,604 | 147 | 94 | 38.122449 | 0.795592 | 0.065489 | 0 | 0.06422 | 0 | 0 | 0.020463 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100917 | false | 0 | 0.082569 | 0.045872 | 0.293578 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cd4a53c4c7dcb84977fbd1be35492605fe06f89 | 6,445 | py | Python | examples/calculator.py | MartinHowarth/shimmer | 541247482748300bbebf9bdec5ecdc19339fe665 | [
"MIT"
] | 3 | 2019-12-15T12:51:58.000Z | 2022-01-11T01:35:31.000Z | examples/calculator.py | MartinHowarth/shimmer | 541247482748300bbebf9bdec5ecdc19339fe665 | [
"MIT"
] | 101 | 2019-12-13T12:21:54.000Z | 2020-04-28T08:21:35.000Z | examples/calculator.py | MartinHowarth/shimmer | 541247482748300bbebf9bdec5ecdc19339fe665 | [
"MIT"
] | null | null | null | """Example of a simple calculator written using shimmer."""
from typing import Optional, List, Callable
from pyglet.window import key
import cocos
from shimmer.components.box_layout import create_box_layout, BoxGridDefinition
from shimmer.components.font import FontDefinition
from shimmer.data_structures import White, Black
from shimmer.keyboard import (
KeyboardActionDefinition,
KeyboardHandlerDefinition,
KeyboardHandler,
ChordDefinition,
)
from shimmer.widgets.button import ButtonDefinition, Button
from shimmer.widgets.text_box import TextBoxDefinition, TextBox
from shimmer.widgets.window import WindowDefinition, Window
class Calculator(Window):
"""A simple calculator."""
symbol_layout = [
["7", "8", "9", "+"],
["4", "5", "6", "-"],
["1", "2", "3", "/"],
["C", "0", "=", "*"],
]
layout_definition = BoxGridDefinition(num_columns=4, num_rows=4)
def __init__(self):
"""Create a Calculator."""
definition = WindowDefinition(title="Calculator", title_bar_height=None)
super(Calculator, self).__init__(definition)
self.calculation: str = ""
self.result: Optional[str] = None
# Create all the calculator buttons.
self.buttons = self.create_buttons()
# Arrange them into a grid layout.
self.button_layout = create_box_layout(self.layout_definition, self.buttons)
# Create the calculator display
self.text_box = TextBox(
TextBoxDefinition(
width=self.button_layout.rect.width,
height=30,
background_color=White,
font=FontDefinition("calibri", 16, color=Black),
)
)
# Create a keyboard handler and add it to this node so that it responds to keyboard events.
self.keyboard_handler = KeyboardHandler(self.create_keymap())
self.add(self.keyboard_handler)
# Add the display and the buttons to the Window body with sensible alignment.
self.add_child_to_body(self.button_layout)
self.add_child_to_body(self.text_box)
def create_buttons(self) -> List[Button]:
"""Create a button for each of the defined symbols in the symbol layout."""
buttons = []
# Reversed order because box layouts build from bottom-left.
for row in reversed(self.symbol_layout):
for symbol in row:
buttons.append(self.make_button_with_symbol(symbol))
return buttons
def make_button_with_symbol(self, symbol: str) -> Button:
"""Create a single button that will call `on_symbol_press` when pressed."""
def callback(*_, **__):
"""Callback when clicked. Ignore mouse event arguments as we don't need them."""
nonlocal symbol
self.on_button_press(symbol)
return True
return Button(
ButtonDefinition(
text=symbol,
on_press=callback,
width=50,
height=50,
keyboard_shortcut=symbol,
)
)
def on_button_press(self, symbol: str) -> None:
"""Handle any button press by updating the display and calculating results."""
# If we have a previous result, reset the calculation to start with that.
if self.result is not None:
self.calculation = self.result
self.result = None
if symbol == "=":
try:
exec(f"self.result = str({self.calculation})")
except Exception:
self.result = "Err"
self.calculation += symbol
self.calculation += self.result # type: ignore # Ignore type because of use of `exec`.
elif symbol == "C":
self.calculation = ""
self.result = None
else:
self.calculation += symbol
self.update_display()
def update_display(self):
"""Update the calculator display."""
self.text_box.text = self.calculation
def create_keymap(self) -> KeyboardHandlerDefinition:
"""
Create an additional keymap for this calculator.
Keyboard definitions on each button are already handled, this adds control
for extra keyboard presses that translate onto calculator events such
as ENTER instead of "=".
"""
def on_key_press(symbol: str) -> Callable:
"""Callback for handling keyboard events."""
def inner() -> bool:
self.on_button_press(symbol)
# Return True to mark the keyboard event as handled.
return True
return inner
keymap = KeyboardHandlerDefinition()
# Make the ENTER keys also trigger equals.
keymap.add_keyboard_action(
KeyboardActionDefinition(
chords=[ChordDefinition(key.ENTER), ChordDefinition(key.NUM_ENTER)],
on_press=on_key_press("="),
)
)
# Make the backspace and escape keys also trigger clear.
keymap.add_keyboard_action(
KeyboardActionDefinition(
chords=[ChordDefinition(key.BACKSPACE), ChordDefinition(key.ESCAPE)],
on_press=on_key_press("C"),
)
)
return keymap
def create_new_calculator(*_, **__):
"""Create a new calculator and add it to the current scene."""
# Create a new calculator
calculator = Calculator()
calculator.position = 100, 100
# Add it to the current scene.
cocos.director.director.scene.add(calculator)
# Make the new calculator the currently focused window.
calculator.make_focused()
def main():
"""Run the calculator program."""
cocos.director.director.init()
new_calculator_button = Button(
ButtonDefinition(text="New Calculator", on_press=create_new_calculator)
)
new_calculator_button.position = (
0,
cocos.director.director.get_window_size()[1]
- new_calculator_button.rect.height,
)
calculator = Calculator()
calculator.position = 100, 100
calculator2 = Calculator()
calculator2.position = 200, 50
scene = cocos.scene.Scene(new_calculator_button, calculator, calculator2)
cocos.director.director.run(scene)
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
main()
| 33.393782 | 99 | 0.626222 | 706 | 6,445 | 5.570822 | 0.298867 | 0.033054 | 0.021358 | 0.019069 | 0.122044 | 0.113399 | 0.052886 | 0.036105 | 0 | 0 | 0 | 0.009087 | 0.282855 | 6,445 | 192 | 100 | 33.567708 | 0.841843 | 0.226532 | 0 | 0.126984 | 0 | 0 | 0.020337 | 0.004725 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087302 | false | 0 | 0.087302 | 0 | 0.246032 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cd55930cca43249b54a8a2cddae9a20e44e6b82 | 776 | py | Python | src/yellowdog_client/model/exceptions/service_client_exception.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | src/yellowdog_client/model/exceptions/service_client_exception.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | src/yellowdog_client/model/exceptions/service_client_exception.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | from typing import Tuple
class ServiceClientException(Exception):
__http_status_code = None # type: int
__message = None # type: str
__details = None # type: Tuple[str]
def __init__(self, http_status_code, message, details=()):
# type: (int, str, Tuple[str]) -> None
super(ServiceClientException, self).__init__()
self.__http_status_code = http_status_code
self.__message = message
self.__details = details if details is not None else ()
def __str__(self):
res = "[HTTP %s] %s" % (
str(self.__http_status_code),
self.__message
)
if self.__details:
res = "%s %s" % (res, ", ".join(self.__details))
return res
| 32.333333 | 63 | 0.576031 | 85 | 776 | 4.764706 | 0.329412 | 0.123457 | 0.17284 | 0.133333 | 0.232099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.31701 | 776 | 23 | 64 | 33.73913 | 0.764151 | 0.094072 | 0 | 0 | 0 | 0 | 0.027221 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cd7ca1f3d81d21216933ef7820282bad2c03dec | 6,891 | py | Python | fipie/portfolio.py | thoriuchi0531/tutti | d0fe202864edc9d257654743db6dc44a67a1d7ed | [
"MIT"
] | 1 | 2021-11-14T15:53:38.000Z | 2021-11-14T15:53:38.000Z | fipie/portfolio.py | thoriuchi0531/fipie | d0fe202864edc9d257654743db6dc44a67a1d7ed | [
"MIT"
] | null | null | null | fipie/portfolio.py | thoriuchi0531/fipie | d0fe202864edc9d257654743db6dc44a67a1d7ed | [
"MIT"
] | null | null | null | from typing import Optional
import numpy as np
import pandas as pd
from fipie import tree
from fipie.cluster import ClusterAlgo, NoCluster
from fipie.weighting import Weighting
class Portfolio:
""" A portfolio of instrument returns """
def __init__(self, ret: pd.DataFrame):
""" Create a ``Portfolio`` instance
:param ret: time-series of instrument returns
:type ret: pd.DataFrame
.. note::
``ret`` is frequency agnostic -- i.e., it can be daily, weekly or any other frequency as long as
``fipie.date.infer_ts_frequency`` can infer its frequency.
"""
ret = self._preprocess_returns(ret)
self.ret = ret
def __repr__(self):
n_asset = self.ret.shape[1]
if n_asset == 1:
return f'Portfolio({n_asset} asset)'
else:
return f'Portfolio({n_asset} assets)'
def _preprocess_returns(self, ret) -> pd.DataFrame:
if isinstance(ret, pd.DataFrame):
# No need to prerocess
return ret
elif isinstance(ret, pd.Series):
return ret.to_frame()
else:
raise ValueError(f'Unsupported data type for returns. Got {ret}')
def create_tree(self,
cluster: ClusterAlgo,
ret: Optional[pd.DataFrame] = None) -> tree.Tree:
""" Create a tree out of the return data frame
:param cluster: clustering algorithm instance
:type cluster: ClusterAlgo
:param ret: portfolio returns to use to create a tree. If not provided, use the returns provided upon
instantiation. If provided, this parameter will be used to create a tree instead.
:type ret: pd.DataFrame, optional
:return: ``Tree`` instance which groups instruments into clusters
"""
if ret is None:
ret = self.ret
return tree.create_tree(ret, cluster)
def _calculate_weight(self,
ret: pd.DataFrame,
weighting: Weighting,
cluster: ClusterAlgo,
instrument_only: bool = True,
final_weight: bool = True) -> pd.Series:
""" An inner function to compute the latest portfolio weights given the return, weighting scheme and clustering
algorithm.
:param ret: portfolio returns
:param weighting: weighting scheme instance
:param cluster: clustering algorithm instance
:param instrument_only: If True only weights for instruments are shown and ones for intermediate are omitted
:param final_weight: If True return the final weights for each instruments are returned.
:return: weights for each node
"""
tree = self.create_tree(cluster, ret)
tree.set_local_weights(weighting)
result = [(i.node_id, i.local_weight, i.weight) for i in tree.nodes]
result = pd.DataFrame(result, columns=['node_id', 'local_weight', 'weight'])
result = result.set_index('node_id')
if instrument_only:
# only select rows that are in the original return time-series
instruments = ret.columns.tolist()
result = result.reindex(index=instruments)
if final_weight:
result = result['weight']
else:
result = result['local_weight']
return result
def weight_latest(self,
weighting: Weighting,
cluster: ClusterAlgo = NoCluster(),
instrument_only: bool = True,
final_weight: bool = True) -> pd.Series:
r""" Compute the latest portfolio weights using the full return time-series.
:param weighting: weighting scheme instance
:type weighting: Weighting
:param cluster: clustering algorithm instance
:type cluster: ClusterAlgo
:param instrument_only: If True only weights for instruments are shown and ones for intermediate are omitted
:type instrument_only: bool, default True
:param final_weight: If True return the final weights for each instruments are returned. The portfolio return
:math:`r` can then be calculated as follows:
.. math::
r = \sum_i w_i \cdot r_i
where :math:`i` is the index for each instrument, :math:`w_i` is the final weight for instrument :math:`i`,
and :math:`r_i` is the return for instrument :math:`i`.
:type final_weight: bool, default True
:return: weights for each node
:rtype: pd.Series
"""
result = self._calculate_weight(self.ret, weighting, cluster,
instrument_only=instrument_only,
final_weight=final_weight)
return result
def weight_historical(self,
weighting: Weighting,
cluster: ClusterAlgo = NoCluster(),
instrument_only: bool = True,
final_weight: bool = True,
freq: str = 'm',
lookback: int = 52 * 2) -> pd.DataFrame:
""" Compute the historical portfolio weights by applying the calculation on a rolling basis
:param weighting: weighting scheme instance
:type weighting: Weighting
:param cluster: clustering algorithm instance
:type cluster: ClusterAlgo
:param instrument_only: If True only weights for instruments are shown and ones for intermediate are omitted
:type instrument_only: bool, default True
:param final_weight: If True return the final weights for each instruments are returned.
:type final_weight: bool, default True
:param freq: frequency to update the portfolio weights.
:type freq: str, default 'm'
:param lookback: the number of return samples (lookback horizon) to compute the portfolio weights
:type lookback: int, default 52 * 2 (2 years with weekly observations)
:return: historical weights for each node
:rtype: pd.DataFrame
"""
# rebalance dates
dates = self.ret.asfreq(freq, method='pad').index
result = []
for i in dates:
ret = self.ret.loc[:i].tail(lookback)
if len(ret) == lookback:
weight = self._calculate_weight(ret, weighting, cluster,
instrument_only=instrument_only,
final_weight=final_weight)
weight = weight.to_frame(i).T
else:
weight = pd.Series(np.nan, index=ret.columns).to_frame(i).T
result.append(weight)
result = pd.concat(result)
return result
| 41.263473 | 119 | 0.59672 | 788 | 6,891 | 5.126904 | 0.225888 | 0.038119 | 0.020792 | 0.030693 | 0.397277 | 0.332426 | 0.305198 | 0.305198 | 0.305198 | 0.288861 | 0 | 0.00195 | 0.330286 | 6,891 | 166 | 120 | 41.512048 | 0.873456 | 0.42824 | 0 | 0.291139 | 0 | 0 | 0.043391 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088608 | false | 0 | 0.075949 | 0 | 0.278481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ce029b88f9781c44f9aac09c9b82431b0776cea | 2,924 | py | Python | python/nlutext/core/dmo/compute_skipgrams.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/nlutext/core/dmo/compute_skipgrams.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | python/nlutext/core/dmo/compute_skipgrams.py | jiportilla/ontology | 8a66bb7f76f805c64fc76cfc40ab7dfbc1146f40 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import logging
from base import BaseObject
def to_list(results):
"""
Purpose:
Simplify the ComputeSkipGrams result set
:param results:
a ComputeSkipsGrams result set
looks like this
[(u'Problems', u'installing'), (u'Problems', u'adobe'), (u'Problems', u'acrobat'), ... ,]
:return:
a list of results
looks like this
["Problems installing", "Problems adobe", "Problems acrobat", ... ,]
"""
the_list = []
for result in list(results):
the_list.append(" ".join(list(result)))
return the_list
class ComputeSkipGrams(BaseObject):
def __init__(self):
"""
Reference:
<http://stackoverflow.com/questions/31847682/how-to-compute-skipgrams-in-python>
"""
BaseObject.__init__(self, __name__)
@staticmethod
def pad_sequence(sequence, n, pad_left=False, pad_right=False, pad_symbol=None):
from itertools import chain
if pad_left:
sequence = chain((pad_symbol,) * (n - 1), sequence)
if pad_right:
sequence = chain(sequence, (pad_symbol,) * (n - 1))
return sequence
def process(self, sequence, n, k, pad_left=False, pad_right=False, pad_symbol=None):
from itertools import combinations
sequence_length = len(sequence)
sequence = iter(sequence)
sequence = self.pad_sequence(sequence, n, pad_left, pad_right, pad_symbol)
if sequence_length + pad_left + pad_right < k:
raise Exception("The length of sentence + padding(s) < skip")
if n < k:
raise Exception("Degree of Ngrams (n) needs to be bigger than skip (k)")
history = []
nk = n + k
# Return point for recursion.
if nk < 1:
return
# If n+k longer than sequence, reduce k by 1 and recur
elif nk > sequence_length:
for ng in self.process(list(sequence), n, k - 1):
yield ng
while nk > 1: # Collects the first instance of n+k length history
history.append(next(sequence))
nk -= 1
# Iterative drop first item in history and picks up the next
# while yielding skipgrams for each iteration.
for item in sequence:
history.append(item)
current_token = history.pop(0)
# Iterates through the rest of the history and
# pick out all combinations the n-1grams
for idx in list(combinations(range(len(history)), n - 1)):
ng = [current_token]
for _id in idx:
ng.append(history[_id])
yield tuple(ng)
# Recursively yield the skigrams for the rest of seqeunce where
# len(sequence) < n+k
for ng in list(self.process(history, n, k - 1)):
yield ng
| 32.853933 | 101 | 0.583447 | 360 | 2,924 | 4.627778 | 0.352778 | 0.009604 | 0.018007 | 0.02401 | 0.108643 | 0.096639 | 0.068427 | 0.068427 | 0.068427 | 0.068427 | 0 | 0.01003 | 0.318057 | 2,924 | 88 | 102 | 33.227273 | 0.825476 | 0.304378 | 0 | 0.042553 | 0 | 0 | 0.049459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.085106 | 0 | 0.255319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ce10de9cf24ed153cc68da30a0c3c4145e496d1 | 2,117 | py | Python | tests/unit/test_endpoint.py | jsenecal/pynetbox | 5cc08971bb37add2d086a65ff90ce684f7cb8936 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_endpoint.py | jsenecal/pynetbox | 5cc08971bb37add2d086a65ff90ce684f7cb8936 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_endpoint.py | jsenecal/pynetbox | 5cc08971bb37add2d086a65ff90ce684f7cb8936 | [
"Apache-2.0"
] | null | null | null | import unittest
import six
from pynetbox.core.endpoint import Endpoint
if six.PY3:
from unittest.mock import patch, Mock, call
else:
from mock import patch, Mock, call
class EndPointTestCase(unittest.TestCase):
def test_filter(self):
with patch(
"pynetbox.core.query.Request.get", return_value=Mock()
) as mock:
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
mock.return_value = [{'id': 123}, {'id': 321}]
test_obj = Endpoint(api, app, "test")
test = test_obj.filter(test="test")
self.assertEqual(len(test), 2)
def test_filter_empty_kwargs(self):
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
test_obj = Endpoint(api, app, "test")
with self.assertRaises(ValueError) as _:
test_obj.filter()
def test_filter_reserved_kwargs(self):
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
test_obj = Endpoint(api, app, "test")
with self.assertRaises(ValueError) as _:
test_obj.filter(id=1)
def test_choices(self):
with patch(
"pynetbox.core.query.Request.options", return_value=Mock()
) as mock:
api = Mock(base_url="http://localhost:8000/api")
app = Mock(name="test")
mock.return_value = {
"actions": {
"POST": {
"letter": {
"choices": [
{"display_name": "A", "value": 1},
{"display_name": "B", "value": 2},
{"display_name": "C", "value": 3},
]
}
}
}
}
test_obj = Endpoint(api, app, "test")
choices = test_obj.choices()
self.assertEqual(choices["letter"][1]["display_name"], "B")
self.assertEqual(choices["letter"][1]["value"], 2)
| 32.569231 | 71 | 0.503543 | 224 | 2,117 | 4.625 | 0.258929 | 0.046332 | 0.042471 | 0.054054 | 0.633205 | 0.532819 | 0.484556 | 0.413127 | 0.413127 | 0.413127 | 0 | 0.022946 | 0.361833 | 2,117 | 64 | 72 | 33.078125 | 0.743893 | 0 | 0 | 0.339623 | 0 | 0 | 0.148323 | 0.031176 | 0 | 0 | 0 | 0 | 0.09434 | 1 | 0.075472 | false | 0 | 0.09434 | 0 | 0.188679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ce28ec33f1b90ba4eea04b44fcf9d5773fb6ddb | 1,030 | py | Python | secondcounter.py | KhushrajSingh/secondcounter | 57f3cc3bfad329db576200c9088342dc18e3f544 | [
"Apache-2.0"
] | null | null | null | secondcounter.py | KhushrajSingh/secondcounter | 57f3cc3bfad329db576200c9088342dc18e3f544 | [
"Apache-2.0"
] | null | null | null | secondcounter.py | KhushrajSingh/secondcounter | 57f3cc3bfad329db576200c9088342dc18e3f544 | [
"Apache-2.0"
] | null | null | null | # secondcounter
from tkinter import *
import threading
import time
r=Tk()
r.geometry("400x400")
r.minsize(200,200)
r.maxsize(500,500)
speed=0
count=0
counting=None
counting=IntVar()
def counter():
global speed,count
print(count)
while True:
time.sleep(1)
count+=speed
counting.set(count)
timer=threading.Thread(target=counter)
timer.start()
def starttimer():
global speed
speed=1
def stoptimer():
global speed
speed=0
def resettimer():
global count,speed
count=0
speed=0
background=Label(bg="yellow",padx=400,pady=400)
background.place(x=0,y=0)
heading=Label(text="COUNTER",font="arial 30 bold",bg="red")
heading.place(x=0,y=10)
start=Button(text="START",command=starttimer,padx=40)
start.place(x=20,y=300)
stop=Button(text="STOP",command=stoptimer,padx=40)
stop.place(x=130,y=300)
reset=Button(text="RESET",command=resettimer,padx=37)
reset.place(x=245,y=300)
label=Label(textvariable=counting,font="arial 30 bold",bg="white")
label.place(x=230,y=100)
r.mainloop()
| 22.888889 | 66 | 0.715534 | 164 | 1,030 | 4.493902 | 0.420732 | 0.048847 | 0.043419 | 0.02171 | 0.046133 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076412 | 0.123301 | 1,030 | 44 | 67 | 23.409091 | 0.739756 | 0.012621 | 0 | 0.162791 | 0 | 0 | 0.066995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.069767 | 0 | 0.162791 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ce5bd1625f87c4c25d913cfe72c92e407ce8cb2 | 1,164 | py | Python | python_code/file_utils.py | NicolaiP/cca_mtfs | 8438c80f4e31dd6c69921478ccfdfbc647e9d81e | [
"MIT"
] | null | null | null | python_code/file_utils.py | NicolaiP/cca_mtfs | 8438c80f4e31dd6c69921478ccfdfbc647e9d81e | [
"MIT"
] | null | null | null | python_code/file_utils.py | NicolaiP/cca_mtfs | 8438c80f4e31dd6c69921478ccfdfbc647e9d81e | [
"MIT"
] | null | null | null | import os
import pickle
def save_as_pickle(variable_name, save_name):
"""Saves variable as pickle file.
# Arguments
save_name: Name of file.
# Example
dataPath = "C:/Users/nicol/Desktop/Master/Data/"
save_name = dataPath + 'predictionsResNet50ADAM_lr0001_decay0005'
file_utils.save_as_pickle(preds, save_name)
"""
f = open(save_name + '.pckl', 'wb')
pickle.dump(variable_name, f)
f.close()
def load_pickle_file(path):
"""Loads pickle file.
# Arguments
path: Path to file.
# Returns
var: Loaded variables.
# Example
dataPath = "C:/Users/nicol/Desktop/Master/Data/"
fileName = dataPath + 'predictionsResNet50ADAM_lr0001_decay0005'
var = file_utils.load_pickle_file(path)
"""
if path.split('.')[-1] == 'pckl':
var = pickle.load(open(path, 'rb'))
else:
var = pickle.load(open(path + '.pckl', 'rb'))
return var
def make_folder(data_path):
'''
Function that creates a folder if it doesn't exist
:param data_path:
:return:
'''
if not os.path.exists(data_path):
os.makedirs(data_path)
| 25.304348 | 73 | 0.623711 | 146 | 1,164 | 4.794521 | 0.410959 | 0.057143 | 0.034286 | 0.06 | 0.182857 | 0.122857 | 0.122857 | 0.122857 | 0 | 0 | 0 | 0.024138 | 0.252577 | 1,164 | 45 | 74 | 25.866667 | 0.78046 | 0.513746 | 0 | 0 | 0 | 0 | 0.045161 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.133333 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ceb9d508c92b3403c67b0ebe8937b62d1700952 | 2,466 | py | Python | src/com/web/utils/Log.py | jenniferhaoba/AutomationTest | e73380d57c0f4c97cfa8471e6ec164970eb94b83 | [
"MIT"
] | null | null | null | src/com/web/utils/Log.py | jenniferhaoba/AutomationTest | e73380d57c0f4c97cfa8471e6ec164970eb94b83 | [
"MIT"
] | null | null | null | src/com/web/utils/Log.py | jenniferhaoba/AutomationTest | e73380d57c0f4c97cfa8471e6ec164970eb94b83 | [
"MIT"
] | null | null | null | """
日志类。通过读取配置文件,定义日志级别、日志文件名、日志格式等。
日志级别等级CRITICAL > ERROR > WARNING > INFO > DEBUG > NOTSET
一般直接把logger import进去
from utils.log import logger
logger.info('test log')
"""
import logging
from logging.handlers import TimedRotatingFileHandler
from com.web.utils.Config import LOG_PATH, Config
import os
class Logger(object):
def __init__(self, logger_name='AutoTestlog'):
self.logger = logging.getLogger(logger_name)
logging.root.setLevel(logging.NOTSET)
c = Config().get('log')
#config文件中log配置不为空时取配置文件否则取‘test.log’
self.log_file_name = c.get('file_name') if c and c.get('file_name') else 'test.log'
# 保留的日志数量
self.backup_count = c.get('backup_count') if c and c.get('backup_count') else 7
self.console_output_level = c.get('console_level') if c and c.get('console_level') else 'WARNING'
self.file_output_level = c.get('file_level') if c and c.get('file_level') else 'DEBUG'
pattern = c.get('pattern') if c and c.get('pattern') else '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
self.formatter = logging.Formatter(pattern)
def get_logger(self):
"""在logger中添加日志句柄并返回,如果logger已有句柄,则直接返回
这里添加两个句柄,一个输出日志到控制台,另一个输出到日志文件。
两个句柄的日志级别不同,在配置文件中可设置。
"""
if not self.logger.handlers: # 避免重复日志
console_handler = logging.StreamHandler()
console_handler.setFormatter(self.formatter)
console_handler.setLevel(self.console_output_level)
self.logger.addHandler(console_handler)
# 每天重新创建一个日志文件,最多保留backup_count份
file_handler = TimedRotatingFileHandler(filename=os.path.join(LOG_PATH, self.log_file_name),
when='D',
interval=1, # one week
backupCount=self.backup_count,
delay=True,
encoding='utf-8'
)
file_handler.setFormatter(self.formatter)
file_handler.setLevel(self.file_output_level)
self.logger.addHandler(file_handler)
return self.logger
loggerUtils = Logger() #类方法不能直接调用,先实例化对象再调用
logger = loggerUtils.get_logger() | 45.666667 | 120 | 0.579481 | 258 | 2,466 | 5.379845 | 0.356589 | 0.028818 | 0.021614 | 0.025216 | 0.09366 | 0.034582 | 0 | 0 | 0 | 0 | 0 | 0.001803 | 0.325223 | 2,466 | 54 | 121 | 45.666667 | 0.832332 | 0.148418 | 0 | 0 | 0 | 0 | 0.096135 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4ced5e6d415f7d6cd756a58a06e9108dee5a88c4 | 8,385 | py | Python | tests/test_signature_parser.py | mulkieran/into-dbus-python | 20465e418a1189e2371a11b4a4032ea9f481366a | [
"Apache-2.0"
] | null | null | null | tests/test_signature_parser.py | mulkieran/into-dbus-python | 20465e418a1189e2371a11b4a4032ea9f481366a | [
"Apache-2.0"
] | null | null | null | tests/test_signature_parser.py | mulkieran/into-dbus-python | 20465e418a1189e2371a11b4a4032ea9f481366a | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test signature parsing.
"""
import string
import unittest
import dbus
from dbus_signature_pyparsing import Parser
from hypothesis import given
from hypothesis import settings
from hypothesis import strategies
from hs_dbus_signature import dbus_signatures
from into_dbus_python import xformer
from into_dbus_python import signature
from into_dbus_python import ToDbusXformer
# Omits h, unix fd, because it is unclear what are valid fds for dbus
SIGNATURE_STRATEGY = dbus_signatures(max_codes=20, blacklist="h")
OBJECT_PATH_STRATEGY = strategies.one_of(
strategies.builds(
'/'.__add__,
strategies.builds(
'/'.join,
strategies.lists(
strategies.text(
alphabet=[
x for x in \
string.digits + \
string.ascii_uppercase + \
string.ascii_lowercase + \
'_'
],
min_size=1,
max_size=10
),
max_size=10
)
)
)
)
class StrategyGenerator(Parser):
"""
Generate a hypothesis strategy for generating objects for a particular
dbus signature which make use of base Python classes.
"""
# pylint: disable=too-few-public-methods
@staticmethod
def _handleArray(toks):
"""
Generate the correct strategy for an array signature.
:param toks: the list of parsed tokens
:returns: strategy that generates an array or dict as appropriate
:rtype: strategy
"""
if len(toks) == 5 and toks[1] == '{' and toks[4] == '}':
return strategies.dictionaries(keys=toks[2], values=toks[3])
elif len(toks) == 2:
return strategies.lists(elements=toks[1])
else: # pragma: no cover
raise ValueError("unexpected tokens")
def __init__(self):
super(StrategyGenerator, self).__init__()
# pylint: disable=unnecessary-lambda
self.BYTE.setParseAction(
lambda: strategies.integers(min_value=0, max_value=255)
)
self.BOOLEAN.setParseAction(lambda: strategies.booleans())
self.INT16.setParseAction(
lambda: strategies.integers(min_value=-0x8000, max_value=0x7fff)
)
self.UINT16.setParseAction(
lambda: strategies.integers(min_value=0, max_value=0xffff)
)
self.INT32.setParseAction(
lambda: strategies.integers(
min_value=-0x80000000,
max_value=0x7fffffff
)
)
self.UINT32.setParseAction(
lambda: strategies.integers(min_value=0, max_value=0xffffffff)
)
self.INT64.setParseAction(
lambda: strategies.integers(
min_value=-0x8000000000000000,
max_value=0x7fffffffffffffff
)
)
self.UINT64.setParseAction(
lambda: strategies.integers(
min_value=0,
max_value=0xffffffffffffffff
)
)
self.DOUBLE.setParseAction(lambda: strategies.floats())
self.STRING.setParseAction(lambda: strategies.text())
self.OBJECT_PATH.setParseAction(lambda: OBJECT_PATH_STRATEGY)
self.SIGNATURE.setParseAction(lambda: SIGNATURE_STRATEGY)
def _handleVariant():
"""
Generate the correct strategy for a variant signature.
:returns: strategy that generates an object that inhabits a variant
:rtype: strategy
"""
signature_strategy = dbus_signatures(
max_codes=5,
min_complete_types=1,
max_complete_types=1,
blacklist="h"
)
return signature_strategy.flatmap(
lambda x: strategies.tuples(
strategies.just(x),
self.COMPLETE.parseString(x)[0]
)
)
self.VARIANT.setParseAction(_handleVariant)
self.ARRAY.setParseAction(StrategyGenerator._handleArray)
self.STRUCT.setParseAction(
# pylint: disable=used-before-assignment
lambda toks: strategies.tuples(*toks[1:-1])
)
STRATEGY_GENERATOR = StrategyGenerator().PARSER
def _descending(dbus_object):
"""
Verify levels of variant values always descend by one.
:param object dbus_object: a dbus object
:returns: None if there was a failure of the property, otherwise the level
:rtype: int or NoneType
None is a better choice than False, for 0, a valid variant level, is always
interpreted as False.
"""
# pylint: disable=too-many-return-statements
if isinstance(dbus_object, dbus.Dictionary):
key_levels = [_descending(x) for x in dbus_object.keys()]
value_levels = [_descending(x) for x in dbus_object.values()]
if any(k is None for k in key_levels) or \
any(v is None for v in value_levels):
return None
max_key_level = max(key_levels) if key_levels != [] else 0
max_value_level = max(value_levels) if value_levels != [] else 0
max_level = max(max_key_level, max_value_level)
variant_level = dbus_object.variant_level
if variant_level == 0:
return max_level
if variant_level != max_level + 1:
return None
else:
return variant_level
elif isinstance(dbus_object, (dbus.Array, dbus.Struct)):
levels = [_descending(x) for x in dbus_object]
if any(l is None for l in levels):
return None
max_level = max(levels) if levels != [] else 0
variant_level = dbus_object.variant_level
if variant_level == 0:
return max_level
if variant_level != max_level + 1:
return None
else:
return variant_level
else:
variant_level = dbus_object.variant_level
return variant_level if variant_level in (0, 1) else None
class ParseTestCase(unittest.TestCase):
"""
Test parsing various signatures.
"""
_PARSER = ToDbusXformer()
@given(SIGNATURE_STRATEGY)
@settings(max_examples=100)
def testParsing(self, a_signature):
"""
Test that parsing is always succesful.
Verify that the original signature corresponds to the signature
returned by the parser and to the signature of the generated value.
Verify that the variant levels always descend within the constructed
value, always by single steps and that leaves of the value always
have variant level of 0 or 1.
"""
base_type_objects = [
x.example() for x in \
STRATEGY_GENERATOR.parseString(a_signature, parseAll=True)
]
results = self._PARSER.PARSER.parseString(a_signature, parseAll=True)
funcs = [f for (f, _) in results]
sigs = [s for (_, s) in results]
results = [f(x) for (f, x) in zip(funcs, base_type_objects)]
values = [v for (v, _) in results]
levels = [l for (_, l) in results]
for sig_orig, (sig_synth, (level, value)) in \
zip(dbus.Signature(a_signature), zip(sigs, zip(levels, values))):
self.assertEqual(sig_orig, sig_synth)
if 'v' not in sig_orig:
self.assertEqual(level, 0)
self.assertIsNotNone(_descending(value))
self.assertEqual(signature(value), sig_orig)
pairs = \
zip(
dbus.Signature(a_signature),
xformer(a_signature)(base_type_objects)
)
# test equality of signatures, rather than results, since nan != nan
for sig, value in pairs:
self.assertEqual(sig, signature(value))
| 32.374517 | 79 | 0.620394 | 967 | 8,385 | 5.23061 | 0.284385 | 0.03796 | 0.059312 | 0.05259 | 0.225188 | 0.159154 | 0.109727 | 0.109727 | 0.090154 | 0.046659 | 0 | 0.017556 | 0.300298 | 8,385 | 258 | 80 | 32.5 | 0.844554 | 0.237567 | 0 | 0.1625 | 0 | 0 | 0.004077 | 0 | 0 | 0 | 0.016634 | 0 | 0.03125 | 1 | 0.03125 | false | 0 | 0.06875 | 0 | 0.19375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cef2678285501dd8561a07546a5065697d19120 | 2,435 | py | Python | src/ychaos/utils/builtins.py | eisenhowerj/ychaos | de7572e35d89eedb5d7d2ad6a8e1fda52179eccc | [
"Apache-2.0"
] | null | null | null | src/ychaos/utils/builtins.py | eisenhowerj/ychaos | de7572e35d89eedb5d7d2ad6a8e1fda52179eccc | [
"Apache-2.0"
] | null | null | null | src/ychaos/utils/builtins.py | eisenhowerj/ychaos | de7572e35d89eedb5d7d2ad6a8e1fda52179eccc | [
"Apache-2.0"
] | null | null | null | # Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
import re
from enum import Enum
from types import DynamicClassAttribute, SimpleNamespace
from typing import Any, Iterable, List, Optional, Type, TypeVar
T = TypeVar("T")
class BuiltinUtils:
class Float:
NAN = float("NaN")
@classmethod
def wrap_if_non_iterable(cls, obj: Any):
"""
Wraps an object into a List only if the object is not
an iterable. If the object is already an Iterable, the method returns
the object type converted to List.
Args:
obj: Any object
Returns:
Wrapped list if non iterable
"""
if isinstance(obj, Iterable):
return list(obj)
else:
return cls.wrap_as_list(obj)
@classmethod
def wrap_as_list(cls, obj) -> List:
"""
Wrap an object to a List.
Args:
obj:
Returns:
Wrapped list
"""
return [
obj,
]
@classmethod
def pass_coroutine(cls, *args, **kwargs):
"""This method literally does nothing"""
pass
class AEnum(Enum):
"""
Advanced Enumeration to add a metadata to each of the Enum object.
This will add a 2 level mapping for NAME -> VALUE -> METADATA. The label
is optional and can be set to a simple
"""
def __new__(cls: Type[T], value, metadata: Optional[SimpleNamespace] = None):
obj = object.__new__(cls)
obj._value_ = value
obj.metadata = metadata
return obj
@DynamicClassAttribute
def value(self) -> str:
# mypy causes issues without this
return self._value_
class FQDN(str):
_regex = r"^((?![-])[-A-Z\d]{1,63}(?<!-)[.])*(?!-)[-A-Z\d]{1,63}(?<!-)[.]?$"
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, fqdn: str):
if len(fqdn) > 255:
raise ValueError(f"{fqdn} is not a valid FQDN")
fqdn = fqdn[:-1] if fqdn[-1] == "." else fqdn
allowed = re.compile(cls._regex, re.IGNORECASE)
if all(allowed.match(x) for x in fqdn.split(".")):
return fqdn
else:
raise ValueError(f"{fqdn} is not a valid FQDN")
def __new__(cls, *args, **kwargs):
return cls.validate(args[0])
| 26.182796 | 105 | 0.582341 | 311 | 2,435 | 4.459807 | 0.382637 | 0.050469 | 0.025955 | 0.018745 | 0.05912 | 0.050469 | 0.050469 | 0.050469 | 0.050469 | 0 | 0 | 0.011323 | 0.310883 | 2,435 | 92 | 106 | 26.467391 | 0.815256 | 0.275154 | 0 | 0.1875 | 0 | 0.020833 | 0.075871 | 0.039801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0.041667 | 0.083333 | 0.041667 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cf04fc94d1d561bab8a6c6919bd8c2acfb5aa27 | 5,091 | py | Python | piProbe.py | MelonSmasher/piProbe | 3f7df58fd19c6dd48851475b1673e0ef45aeabc3 | [
"MIT"
] | 15 | 2019-04-24T13:53:22.000Z | 2022-01-25T17:34:04.000Z | piProbe.py | MelonSmasher/piProbe | 3f7df58fd19c6dd48851475b1673e0ef45aeabc3 | [
"MIT"
] | null | null | null | piProbe.py | MelonSmasher/piProbe | 3f7df58fd19c6dd48851475b1673e0ef45aeabc3 | [
"MIT"
] | 3 | 2019-07-06T20:07:17.000Z | 2022-03-22T23:38:21.000Z | import sys
import os
import socket
import json
import time
import subprocess
from influxdb import InfluxDBClient
import Adafruit_DHT
def getConfig():
# Pull the configuratin from env vars or the config file
if os.environ.get('AM_I_IN_A_DOCKER_CONTAINER', False):
c = {
"debug": os.environ.get('DEBUG', False),
"influxdb": {
"host": os.environ.get('INFLUXDB_HOST', None),
"port": int(os.environ.get('INFLUXDB_PORT', 8086)),
"user": os.environ.get('INFLUXDB_USER', ""),
"password": os.environ.get('INFLUXDB_PASSWORD', ""),
"dbname": os.environ.get('INFLUXDB_DB', None),
"interval": int(os.environ.get('INFLUXDB_INTERVAL', 10)),
"ssl": os.environ.get('INFLUXDB_SSL', False),
"ssl_verify": os.environ.get('INFLUXDB_SSL_VERIFY', False),
"location_tag": os.environ.get('INFLUXDB_LOCATION_TAG', None)
},
"gpio": {
"pin": int(os.environ.get('GPIO_PIN', 4)),
"sensor": str(os.environ.get('GPIO_SENSOR', "")).upper()
}
}
elif os.path.isfile('/etc/piProbe/config.json'):
with open('/etc/piProbe/config.json') as json_file:
c = json.load(json_file)
elif os.path.isfile('./config.json'):
with open('./config.json') as json_file:
c = json.load(json_file)
else:
print("Could not find configuration file.")
exit(1)
if c['influxdb']['host'] is None:
print("Please supply an INFLUXDB HOST value.")
exit(1)
if c['influxdb']['dbname'] is None:
print("Please supply an INFLUXDB DB value.")
exit(1)
if c['influxdb']['location_tag'] is None:
print("Please supply an INFLUXDB LOCATION TAG value.")
exit(1)
# set the adafruit sensor
if c['gpio']['sensor'] == 'DHT22':
c['gpio']['sensor'] = Adafruit_DHT.DHT22
elif c['gpio']['sensor'] == 'DHT11':
c['gpio']['sensor'] = Adafruit_DHT.DHT11
elif c['gpio']['sensor'] == 'AM2302':
c['gpio']['sensor'] = Adafruit_DHT.AM2302
else:
print("Please supply a valid GPIO SENSOR value (DHT11/DHT22/AM2302).")
exit(1)
# set the devicename for tags influx
c['devicename'] = os.environ.get(
'BALENA_DEVICE_NAME_AT_INIT', socket.gethostname())
return c
def debugOut(valueC, valueF, valueH):
print('Debug Values:')
print('C: '+str(valueC))
print('F: '+str(valueF))
print('H: '+str(valueH)+'%')
print('')
def mainLoop(config, client):
# The main program loop
# Poll the probe
humidity, temperature = Adafruit_DHT.read_retry(
config['gpio']['sensor'], int(config['gpio']['pin']))
# Don't accept null values, if they're null we don't sleep and we poll the probe again
if humidity is not None and temperature is not None:
# Store our values
valueC = float(temperature)
valueF = float(temperature * 9/5.0 + 32)
valueH = float(humidity)
# If debug is enabled output the values to stdout
if config['debug']:
debugOut(valueC, valueF, valueH)
# Filter stupid humidity readings, if the reading is high don't sleep and poll the probe again
if humidity <= 100:
# Format the measurements for influx
data = [
{
"measurement": "temperature",
"tags": {
"host": config['devicename'],
"location": config['influxdb']['location_tag'],
},
"fields": {
"value_c": valueC,
"value_f": valueF
}
},
{
"measurement": "humidity",
"tags": {
"host": config['devicename'],
"location": config['influxdb']['location_tag'],
},
"fields": {
"value": valueH
}
}
]
# Write the data to influx
client.write_points(data, time_precision='s')
# wait it out
time.sleep(int(config['influxdb']['interval']))
else:
if config['debug']:
print('No values found for either temp, humidity, or both. Trying again...')
# Run it!
try:
# get the config
config = getConfig()
# Make a new influx client
client = InfluxDBClient(
host=config['influxdb']['host'],
port=int(config['influxdb']['port']),
username=config['influxdb']['user'],
password=config['influxdb']['password'],
database=config['influxdb']['dbname'],
ssl=bool(config['influxdb']['ssl']),
verify_ssl=bool(config['influxdb']['ssl_verify'])
)
while True:
# Run the main loop
mainLoop(config, client)
except KeyboardInterrupt:
pass
| 33.058442 | 102 | 0.535062 | 562 | 5,091 | 4.770463 | 0.298932 | 0.046997 | 0.062663 | 0.067139 | 0.238344 | 0.173443 | 0.115256 | 0.078329 | 0.078329 | 0.078329 | 0 | 0.012835 | 0.326655 | 5,091 | 153 | 103 | 33.27451 | 0.769253 | 0.104302 | 0 | 0.17094 | 0 | 0 | 0.243892 | 0.031257 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0.025641 | 0.068376 | 0 | 0.102564 | 0.094017 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4cf2a398b75def27862677b4c356e9aadd34c1d2 | 930 | py | Python | mh_3.py | Harshsa28/Monty-Hall-problem | 6715da5b027fec841cc9c587833b83f866422eaa | [
"MIT"
] | null | null | null | mh_3.py | Harshsa28/Monty-Hall-problem | 6715da5b027fec841cc9c587833b83f866422eaa | [
"MIT"
] | null | null | null | mh_3.py | Harshsa28/Monty-Hall-problem | 6715da5b027fec841cc9c587833b83f866422eaa | [
"MIT"
] | null | null | null | import random
no_switch = 0
switch= 0
'''
for i in range(10000):
car_loc = random.randint(1,3) #obviously random
my_choice = random.randint(1,3) #obviously random
host = [1,2,3]
host.remove(car_loc)
if my_choice in host:
host.remove(my_choice)
host = random.choice(host)
options = [1,2,3]
options.remove(host)
if car_loc == my_choice:
no_switch += 1
else:
options.remove(my_choice)
my_choice = options[0]
if car_loc == my_choice:
switch += 1
'''
# you can use both implementations: the above one and the below one
#above one is how the game is played actually
#below one is what actually goes on in the game
for i in range(10000):
car_loc = random.randint(1,3)
#my_choice = random.randint(1,3)
my_choice = 1
if my_choice == car_loc:
no_switch += 1
else:
switch += 1
print(no_switch, " vs ", switch)
| 24.473684 | 67 | 0.626882 | 147 | 930 | 3.829932 | 0.29932 | 0.142096 | 0.099467 | 0.106572 | 0.337478 | 0.280639 | 0.131439 | 0.131439 | 0.131439 | 0.131439 | 0 | 0.047059 | 0.268817 | 930 | 37 | 68 | 25.135135 | 0.780882 | 0.2 | 0 | 0 | 0 | 0 | 0.016667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |