hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b5f7954aad61486e350eecf97b6bdff9243ba31 | 3,053 | py | Python | src/dcm/agent/plugins/builtin/add_user.py | JPWKU/unix-agent | 8f1278fc8c2768a8d4d54af642a881bace43652f | [
"Apache-2.0"
] | null | null | null | src/dcm/agent/plugins/builtin/add_user.py | JPWKU/unix-agent | 8f1278fc8c2768a8d4d54af642a881bace43652f | [
"Apache-2.0"
] | 22 | 2015-09-15T20:52:34.000Z | 2016-03-11T22:44:24.000Z | src/dcm/agent/plugins/builtin/add_user.py | JPWKU/unix-agent | 8f1278fc8c2768a8d4d54af642a881bace43652f | [
"Apache-2.0"
] | 3 | 2015-09-11T20:21:33.000Z | 2016-09-30T08:30:19.000Z | #
# Copyright (C) 2014 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import dcm.agent.messaging.persistence as persistence
import dcm.agent.plugins.api.base as plugin_base
import dcm.agent.plugins.api.utils as plugin_utils
class AddUser(plugin_base.ScriptPlugin):
protocol_arguments = {
"userId": ("The new unix account name to be created", True,
plugin_utils.user_name, None),
"firstName": ("The user's first name", True, str, None),
"lastName": ("The user's last name", True, str, None),
"authentication": ("The user's ssh public key", True, str, None),
"administrator": ("A string that is either 'true' or 'false' "
"which indicates if the new user should have "
"ssh access", True, str, None)
}
def __init__(self, conf, job_id, items_map, name, arguments):
super(AddUser, self).__init__(
conf, job_id, items_map, name, arguments)
self.ordered_param_list = [self.args.userId,
self.args.userId,
self.args.firstName,
self.args.lastName,
self.args.administrator.lower()]
self.ssh_public_key = self.args.authentication
self._db = persistence.SQLiteAgentDB(conf.storage_dbfile)
def run(self):
key_file = self.conf.get_temp_file(self.args.userId + ".pub")
try:
if self.ssh_public_key:
with open(key_file, "w") as f:
f.write(self.ssh_public_key)
self.ordered_param_list.append(key_file)
plugin_utils.log_to_dcm_console_job_details(
job_name=self.name,
details="Attempting to add the user %s." % self.args.userId)
rc = super(AddUser, self).run()
admin_bool = self.args.administrator.lower() == "true"
self._db.add_user(
self.conf.agent_id, self.args.userId, self.ssh_public_key,
admin_bool)
plugin_utils.log_to_dcm_console_job_details(
job_name=self.name,
details="The user %s was added." % self.args.userId)
return rc
finally:
if os.path.exists(key_file):
plugin_utils.secure_delete(self.conf, key_file)
def load_plugin(conf, job_id, items_map, name, arguments):
return AddUser(conf, job_id, items_map, name, arguments)
| 38.64557 | 76 | 0.613167 | 393 | 3,053 | 4.597964 | 0.394402 | 0.0487 | 0.046486 | 0.030991 | 0.199225 | 0.130603 | 0.130603 | 0.064195 | 0.064195 | 0.064195 | 0 | 0.003704 | 0.292499 | 3,053 | 78 | 77 | 39.141026 | 0.83287 | 0.180478 | 0 | 0.081633 | 0 | 0 | 0.125553 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061224 | false | 0 | 0.081633 | 0.020408 | 0.22449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b5fd3a5f2909fd769f16c7c3cdc54935db82b2c | 1,980 | py | Python | src/gui/dev/pkg1/faceDetectionImageTest2.py | chaitanyasanoriya/Unified-Machine-Learning-Tool | d88b60ee11a9a0bd203fa52d344263d64cfeaff4 | [
"Apache-2.0"
] | null | null | null | src/gui/dev/pkg1/faceDetectionImageTest2.py | chaitanyasanoriya/Unified-Machine-Learning-Tool | d88b60ee11a9a0bd203fa52d344263d64cfeaff4 | [
"Apache-2.0"
] | null | null | null | src/gui/dev/pkg1/faceDetectionImageTest2.py | chaitanyasanoriya/Unified-Machine-Learning-Tool | d88b60ee11a9a0bd203fa52d344263d64cfeaff4 | [
"Apache-2.0"
] | null | null | null | import cv2
import os
import sys
import shutil
import subprocess
face_cascade = cv2.CascadeClassifier('Face_cascade.xml')
original_path = sys.argv
#original_path = "D:\College Stuff\Machine Learning\Tests\Minor Project\Test 1 - without ED\data\Anne"
#new_path = original_path + "1"
#os.rename(original_path,new_path)
i=0
#os.mkdir(original_path)
for filename in os.listdir(original_path):
image_path = original_path+"\\"+str(filename)
print(str(image_path))
image = cv2.imread(image_path)
height, width, channels = image.shape
max_value = max(height,width)
if max_value>1000 :
ratio = 720/max_value
image = cv2.resize(image,(0,0),fx=ratio, fy=ratio)
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.16,minNeighbors=5,minSize=(25,25),flags=0)
for (x, y, w, h) in faces:
new_start_y = y
if y-50<0:
new_start_y = 0
else :
new_start_y = y-50;
new_end_y = y+h;
if y+h+20>height:
new_end_y = height
else :
new_end_y = y+h+20
new_start_x = x
if x-10<0:
new_start_x = 0
else :
new_start_x = x-10
new_end_x = x+w
if x+w+10 > width:
new_end_x = width
else :
new_end_x = x+w+10
print(new_start_y," ",new_end_y," ",new_start_x," ",new_end_x)
new_image = image[new_start_y:new_end_y,new_start_x:new_end_x]
#cv2.imwrite(original_path+"\\"+str(i)+".jpg",new_image)
cv2.imwrite(original_path+"\\"+str(filename)+"-"+str(i)+".jpg",new_image)
i = i+1
#cv2.rectangle(image, (x, y-60), (x+w, y+h+20), (0, 255, 0), 2)
#cv2.imshow("Faces found", image)
#time.sleep(3)
#cv2.waitKey(0)
#shutil.rmtree(new_path)
#subprocess.Popen(['python', 'retrain.py', "--image_dir=D:\College Stuff\Machine Learning\Tests\Minor Project\Test 1 - without ED\data"]) | 34.137931 | 137 | 0.621717 | 312 | 1,980 | 3.733974 | 0.301282 | 0.06867 | 0.038627 | 0.034335 | 0.260086 | 0.16309 | 0.16309 | 0.16309 | 0.16309 | 0.16309 | 0 | 0.044371 | 0.237374 | 1,980 | 58 | 137 | 34.137931 | 0.727152 | 0.263636 | 0 | 0.090909 | 0 | 0 | 0.019337 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.113636 | 0 | 0.113636 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b6165f24d43dbdd11df9d5296ecf6c542bdc293 | 3,763 | py | Python | hangman.py | Telluu/hangman-game | 994e930244c90cf18c0f0eba4afafb0af21081a1 | [
"MIT"
] | null | null | null | hangman.py | Telluu/hangman-game | 994e930244c90cf18c0f0eba4afafb0af21081a1 | [
"MIT"
] | null | null | null | hangman.py | Telluu/hangman-game | 994e930244c90cf18c0f0eba4afafb0af21081a1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import string
import random
import os
import sys
import time
import json
def main():
lives = 8
guessed = set()
# Opening words database
try:
words = open('words.json')
except IOError:
print('Can\'t locate words.json in local directory.')
time.sleep(3)
sys.exit()
with words:
categories = json.load(words)
while True:
os.system('cls' if os.name == 'nt' else 'clear')
print('######################')
print('---Hangman by Tellu---')
print('######################\n')
# Printing all the available categories
for index, category in enumerate(categories):
print(f'{index + 1}.{category.title()}')
# Taking and checking user input
try:
choosed_category = int(input(f'\nChoose (1-{len(categories)}): '))
except ValueError:
print(f'Only integers!')
continue
# Checking if inputed category exists
if choosed_category > 0 and choosed_category <= len(categories):
# Then picking a random word from choosen category
for index, category in enumerate(categories):
if (choosed_category - 1) == index:
random_word = random.choice(categories[category])
break
break
else:
print(f'There is no category with index {choosed_category}.')
time.sleep(1.5)
shown_word = ['_' if letter != ' ' else ' ' for letter in random_word]
while True:
os.system('cls' if os.name == 'nt' else 'clear')
print('######################')
print('---Hangman by Tellu---')
print('######################\n')
print(f'Category: {category.title()}')
print(f'Lives: {lives}\n')
print(' '.join(shown_word).upper())
# If he didnt lose and didn't guess all the letters, ask for input
if lives > 0 and '_' in shown_word:
letter = input('\nGuess: ').lower()
# If input is a letter
if len(letter) == 1 and letter in string.ascii_letters:
# And not already tried letter
if letter in guessed:
print('You tried this letter already!')
# And is in a random word
elif letter in random_word:
# Then replace a letter using a func
replace_letter(letter, random_word, shown_word)
print('Yes!')
else:
# Else subtract one life
lives -= 1
print('No!')
guessed.add(letter)
# Invalid input handling
else:
if len(letter) > 1:
print('Slow down there! One letter at a time...')
time.sleep(0.5)
else:
print('No special characters!')
# Endgame
else:
# Show the answer if user lost
if lives < 1:
print(' '.join(random_word).upper() + ' <- ANSWER')
# Ask if the user wants to play again
option = input(
'\nGame over! Do you want to play again? (Y/N) ').lower()
if option == 'y' or option == 'yes':
os.system('cls')
main()
elif option == 'n' or option == 'no':
sys.exit()
else:
print('What?')
time.sleep(1)
def replace_letter(letter, word, hidden_word):
for index, value in enumerate(word):
if value == letter:
hidden_word[index] = letter
if __name__ == '__main__':
main()
| 30.346774 | 78 | 0.495615 | 414 | 3,763 | 4.434783 | 0.342995 | 0.038126 | 0.017974 | 0.018519 | 0.115468 | 0.115468 | 0.075163 | 0.075163 | 0.075163 | 0.075163 | 0 | 0.007243 | 0.376296 | 3,763 | 123 | 79 | 30.593496 | 0.775032 | 0.130481 | 0 | 0.309524 | 0 | 0 | 0.164005 | 0.041462 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.071429 | 0 | 0.095238 | 0.238095 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b65190211132ca843abf01b535bda46f144ac3f | 6,593 | py | Python | libraries/mosek/9.3/tools/examples/python/concurrent1.py | TimDSF/SBSOS_ShapeSegmentation | e30495dcf71dc63d1d54f3b73132fcfa75d7647e | [
"MIT"
] | null | null | null | libraries/mosek/9.3/tools/examples/python/concurrent1.py | TimDSF/SBSOS_ShapeSegmentation | e30495dcf71dc63d1d54f3b73132fcfa75d7647e | [
"MIT"
] | null | null | null | libraries/mosek/9.3/tools/examples/python/concurrent1.py | TimDSF/SBSOS_ShapeSegmentation | e30495dcf71dc63d1d54f3b73132fcfa75d7647e | [
"MIT"
] | null | null | null | #
# Copyright: Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File: concurrent1.py
#
# Purpose: Demonstrates a simple implementation of a concurrent optimizer.
#
# The concurrent optimizer starts a few parallel optimizations
# of the same problem using different algorithms, and reports
# a solution when the first optimizer is ready.
#
# This example also demonstrates how to define a simple callback handler
# that stops the optimizer when requested.
#
import mosek, sys
from threading import Thread
# Defines a Mosek callback function whose only function
# is to indicate if the optimizer should be stopped.
stop = False
firstStop = -1
def cbFun(code):
return 1 if stop else 0
# Stream handling method
def streamwriter(s):
sys.stdout.write('{0}'.format(s))
# firstOK, res, trm = optimize(tasks)
#
# Takes a list of tasks and optimizes then in parallel. The
# response code and termination code from each optimization is
# returned in ``res`` and ``trm``.
#
# When one task completes with rescode == ok, others are terminated.
#
# firstOK is the index of the first task that returned
# with rescode == ok. Whether or not this task contains the
# most valuable answer, is for the caller to decide. If none
# completed without error returns -1.
def runTask(num, task, res, trm):
global stop
global firstStop
try:
trm[num] = task.optimize();
res[num] = mosek.rescode.ok
except mosek.MosekException as e:
trm[num] = mosek.rescode.err_unknown
res[num] = e.errno
finally:
# If this finished with success, inform other tasks to interrupt
if res[num] == mosek.rescode.ok:
if not stop:
firstStop = num
stop = True
def optimize(tasks):
n = len(tasks)
res = [ mosek.rescode.err_unknown ] * n
trm = [ mosek.rescode.err_unknown ] * n
# Set a callback function
for t in tasks:
t.set_Progress(cbFun)
# Start parallel optimizations, one per task
jobs = [ Thread(target=runTask, args=(i, tasks[i], res, trm)) for i in range(n) ]
for j in jobs:
j.start()
for j in jobs:
j.join()
# For debugging, print res and trm codes for all optimizers
for i in range(n):
print("Optimizer {0} res {1} trm {2}".format(i, res[i], trm[i]))
return firstStop, res, trm
#
# idx, winTask, winTrm, winRes = optimizeconcurrent(task, optimizers)
#
# Given a continuous task, set up jobs to optimize it
# with a list of different solvers.
#
# Returns an index, corresponding to the optimization
# task that is returned as winTask. This is the task
# with the best possible status of those that finished.
# If none task is considered successful returns -1.
def optimizeconcurrent(task, optimizers):
n = len(optimizers)
tasks = [ mosek.Task(task) for _ in range(n) ]
# Choose various optimizers for cloned tasks
for i in range(n):
tasks[i].putintparam(mosek.iparam.optimizer, optimizers[i])
# Solve tasks in parallel
firstOK, res, trm = optimize(tasks)
if firstOK >= 0:
return firstOK, tasks[firstOK], trm[firstOK], res[firstOK]
else:
return -1, None, None, None
#
# idx, winTask, winTrm, winRes = optimizeconcurrent(task, optimizers)
#
# Given a mixed-integer task, set up jobs to optimize it
# with different values of seed. That will lead to
# different execution paths of the optimizer.
#
# Returns an index, corresponding to the optimization
# task that is returned as winTask. This is the task
# with the best value of the objective function.
# If none task is considered successful returns -1.
#
# Typically, the input task would contain a time limit. The two
# major scenarios are:
# 1. Some clone ends before time limit - then it has optimum.
# 2. All clones reach time limit - pick the one with best objective.
def optimizeconcurrentMIO(task, seeds):
n = len(seeds)
tasks = [ mosek.Task(task) for _ in range(n) ]
# Choose various seeds for cloned tasks
for i in range(n):
tasks[i].putintparam(mosek.iparam.mio_seed, seeds[i])
# Solve tasks in parallel
firstOK, res, trm = optimize(tasks)
if firstOK >= 0:
# Pick the task that ended with res = ok
# and contains an integer solution with best objective value
sense = task.getobjsense();
bestObj = 1.0e+10 if sense == mosek.objsense.minimize else -1.0e+10
bestPos = -1
for i in range(n):
print("{0} {1}".format(i,tasks[i].getprimalobj(mosek.soltype.itg)))
for i in range(n):
if ((res[i] == mosek.rescode.ok) and
(tasks[i].getsolsta(mosek.soltype.itg) == mosek.solsta.prim_feas or
tasks[i].getsolsta(mosek.soltype.itg) == mosek.solsta.integer_optimal) and
((tasks[i].getprimalobj(mosek.soltype.itg) < bestObj)
if (sense == mosek.objsense.minimize) else
(tasks[i].getprimalobj(mosek.soltype.itg) > bestObj))):
bestObj = tasks[i].getprimalobj(mosek.soltype.itg)
bestPos = i
if bestPos >= 0:
return bestPos, tasks[bestPos], trm[bestPos], res[bestPos]
return -1, None, None, None
# This is an example of how one can use the methods
# optimizeconcurrent
# optimizeconcurrentMIO
#
# argv[0] : name of file with input problem
# argv[1]: (optional) time limit
def main(argv):
with mosek.Env() as env:
with mosek.Task(env, 0, 0) as task:
if len(argv) >= 2:
task.readdata(argv[1])
else:
task.readdata("../data/25fv47.mps")
# Optional time limit
if len(argv) >= 3:
task.putdouparam(mosek.dparam.optimizer_max_time, float(argv[2]))
if (task.getnumintvar() == 0):
# If the problem is continuous
# optimize it with three continuous optimizers.
# (Simplex will fail for non-linear problems)
optimizers = [
mosek.optimizertype.conic,
mosek.optimizertype.dual_simplex,
mosek.optimizertype.primal_simplex
]
idx, t, trm, res = optimizeconcurrent(task, optimizers)
else:
# Mixed-integer problem.
# Try various seeds.
seeds = [ 42, 13, 71749373 ]
idx, t, trm, res = optimizeconcurrentMIO(task, seeds)
# Check results and print the best answer
if idx >= 0:
print("Result from optimizer with index {0}: res {1} trm {2}".format(idx, res, trm))
t.set_Stream(mosek.streamtype.log, streamwriter)
t.optimizersummary(mosek.streamtype.log)
t.solutionsummary(mosek.streamtype.log);
else:
print("All optimizers failed.")
if __name__ == "__main__":
main(sys.argv) | 32.160976 | 94 | 0.66965 | 924 | 6,593 | 4.754329 | 0.288961 | 0.012292 | 0.014569 | 0.015024 | 0.297291 | 0.252675 | 0.208514 | 0.190303 | 0.140223 | 0.112907 | 0 | 0.011422 | 0.229789 | 6,593 | 205 | 95 | 32.160976 | 0.853683 | 0.440012 | 0 | 0.193878 | 0 | 0 | 0.04124 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.020408 | 0.010204 | 0.153061 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b6c2f674f5f13eb386f7e948241adeb8392fa33 | 1,235 | py | Python | setup.py | jeffmahler/visualization | 9844dd0e58b4635967cad332268bdc9aec93bc25 | [
"Apache-2.0"
] | 8 | 2017-12-21T02:25:00.000Z | 2020-10-27T19:45:07.000Z | setup.py | jeffmahler/visualization | 9844dd0e58b4635967cad332268bdc9aec93bc25 | [
"Apache-2.0"
] | 11 | 2017-11-10T03:01:55.000Z | 2022-01-10T22:37:25.000Z | setup.py | jeffmahler/visualization | 9844dd0e58b4635967cad332268bdc9aec93bc25 | [
"Apache-2.0"
] | 18 | 2017-10-04T23:35:12.000Z | 2021-08-27T23:34:33.000Z | """
Visualization setup file.
"""
from setuptools import setup
requirements = ["imageio", "numpy", "matplotlib", "trimesh[easy]", "autolab_core", "pyrender"]
exec(open("visualization/version.py").read())
setup(
name="visualization",
version=__version__,
description="Visualization toolkit for the Berkeley AutoLab.",
long_description="Visualization toolkit for the Berkeley AutoLab.",
author="Matthew Matl",
author_email="mmatl@eecs.berkeley.edu",
license="Apache Software License",
url="https://github.com/BerkeleyAutomation/visualization",
keywords="robotics visualization rendering 3D",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
],
packages=["visualization"],
install_requires=requirements,
extras_require={"docs": ["sphinx", "sphinxcontrib-napoleon", "sphinx_rtd_theme"]},
)
| 34.305556 | 94 | 0.666397 | 120 | 1,235 | 6.766667 | 0.641667 | 0.116995 | 0.153941 | 0.128079 | 0.128079 | 0.128079 | 0.128079 | 0 | 0 | 0 | 0 | 0.00999 | 0.189474 | 1,235 | 35 | 95 | 35.285714 | 0.801199 | 0.020243 | 0 | 0 | 0 | 0 | 0.588186 | 0.075707 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.035714 | 0 | 0.035714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b6c315db32834647331ebec872b7a205cd925ba | 18,024 | py | Python | model_optimizer_pkg/model_optimizer_pkg/model_optimizer_node.py | jsspric/aws-deepracer-model-optimizer-pkg | 5593c02dfa311b4178a77eeebf78fb224616fc5e | [
"Apache-2.0"
] | 4 | 2021-04-28T07:53:17.000Z | 2021-10-30T02:41:54.000Z | deepracer_follow_the_leader_ws/install/model_optimizer_pkg/lib/python3.8/site-packages/model_optimizer_pkg/model_optimizer_node.py | amitjain-3/working_add | ddd3b10d854477e86bf7a8558b3d447ec03a8a5f | [
"Apache-2.0"
] | null | null | null | deepracer_follow_the_leader_ws/install/model_optimizer_pkg/lib/python3.8/site-packages/model_optimizer_pkg/model_optimizer_node.py | amitjain-3/working_add | ddd3b10d854477e86bf7a8558b3d447ec03a8a5f | [
"Apache-2.0"
] | 3 | 2021-04-30T06:30:28.000Z | 2021-10-30T02:41:38.000Z | #!/usr/bin/env python
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
model_optimizer_node.py
This module creates the model_optimizer_node which is responsible for running the Intel
OpenVino model optimizer script for the DeepRacer reinforcement learning models to obtain
the intermediate representation xml files and other optimizer artifacts required to run the
inference with the model.
More details:
(https://docs.openvinotoolkit.org/2021.1/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html)
"The optimizer performs static model analysis, and adjusts deep
learning models for optimal execution on end-point target devices."
The node defines:
model_optimizer_service: A service to call the Intel OpenVino model optimizer script
for the specific model with appropriate model and platform
specific parameters set.
"""
import os
import subprocess
import shlex
import re
import rclpy
from rclpy.node import Node
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import MultiThreadedExecutor
from deepracer_interfaces_pkg.srv import (ModelOptimizeSrv)
from model_optimizer_pkg import constants
class ModelOptimizerNode(Node):
"""Node responsible for running the Intel OpenVino model optimizer for the DeepRacer models.
"""
def __init__(self):
"""Create a ModelOptimizerNode.
"""
super().__init__("model_optimizer_node")
self.get_logger().info("model_optimizer_node started")
# Service to call the Intel OpenVino model optimizer script
# for the specific model.
self.model_optimizer_service_cb_group = ReentrantCallbackGroup()
self.model_optimizer_service = \
self.create_service(ModelOptimizeSrv,
constants.MODEL_OPTIMIZER_SERVER_SERVICE_NAME,
self.model_optimizer,
callback_group=self.model_optimizer_service_cb_group)
# Heartbeat timer.
self.timer_count = 0
self.timer = self.create_timer(5.0, self.timer_callback)
def timer_callback(self):
"""Heartbeat function to keep the node alive.
"""
self.get_logger().debug(f"Timer heartbeat {self.timer_count}")
self.timer_count += 1
def model_optimizer(self, req, res):
"""Callback for the model_optimizer_server service. Handles calling the Intel OpenVino
model optimizer script with appropriate parameters set for the specific model details
passed in the request data.
Args:
req (ModelOptimizeSrv.Request): Request object with the model details required to
run the optimizer set.
res (ModelOptimizeSrv.Response): Response object with error(int) flag to indicate
successful execution of the optimizer script and
artifact_path(str) with the path where the
intermediate representaiton xml files are created
for the model.
Returns:
ModelOptimizeSrv.Response: Response object with error(int) flag to indicate
successful execution of the optimizer script and
artifact_path(str) with the path where the intermediate
representaiton xml files are created for the model.
"""
self.get_logger().info("model_optimizer")
try:
aux_param = {"--fuse": "OFF", "--img-format": req.img_format}
error_code, artifact_path = self.optimize_tf_model(req.model_name,
req.model_metadata_sensors,
req.training_algorithm,
req.width,
req.height,
req.lidar_channels,
aux_param)
res.error = error_code
res.artifact_path = artifact_path
except Exception as ex:
res.error = 1
self.get_logger().error(f"Error while optimizing model: {ex}")
return res
def convert_to_mo_cli(self,
model_name,
model_metadata_sensors,
training_algorithm,
input_width,
input_height,
lidar_channels,
aux_inputs):
"""Helper method that converts the information in model optimizer API into
the appropriate cli commands.
Args:
model_name (str): Model prefix, should be the same in the weight and symbol file.
model_metadata_sensors (list): List of sensor input types(int) for all the sensors
with which the model was trained.
training_algorithm (int): Training algorithm key(int) for the algorithm with which
the model was trained.
input_width (int): Width of the input image to the inference engine.
input_height (int): Height of the input image to the inference engine.
lidar_channels (int): Number of LiDAR values that with which the LiDAR head of
the model was trained.
aux_inputs (dict): Dictionary of auxiliary options for the model optimizer.
Raises:
Exception: Custom exception if the API flags and default values are not
aligned.
Exception: Custom exception if the lidar_channel value is less than 1.
Returns:
dict: Map of parameters to be passed to model optimizer command based on the model.
"""
if len(constants.APIFlags.get_list()) != len(constants.APIDefaults.get_list()):
raise Exception("Inconsistent API flags")
# Set the flags tot he default values.
default_param = {}
for flag, value in zip(constants.APIFlags.get_list(), constants.APIDefaults.get_list()):
default_param[flag] = value
# Set param values to the values to the user entered values in aux_inputs.
for flag, value in aux_inputs.items():
if flag in default_param:
default_param[flag] = value
# Dictionary that will house the cli commands.
common_params = {}
# Convert API information into appropriate cli commands.
for flag, value in default_param.items():
if flag is constants.APIFlags.MODELS_DIR:
common_params[constants.MOKeys.MODEL_PATH] = os.path.join(value, model_name)
# Input shape is in the for [n,h,w,c] to support tensorflow models only
elif flag is constants.APIFlags.IMG_CHANNEL:
common_params[constants.MOKeys.INPUT_SHAPE] = (constants.MOKeys.INPUT_SHAPE_FMT
.format(1,
input_height,
input_width,
value))
elif flag is constants.APIFlags.PRECISION:
common_params[constants.MOKeys.DATA_TYPE] = value
elif flag is constants.APIFlags.FUSE:
if value is not constants.APIDefaults.FUSE:
common_params[constants.MOKeys.DISABLE_FUSE] = ""
common_params[constants.MOKeys.DISABLE_GFUSE] = ""
elif flag is constants.APIFlags.IMG_FORMAT:
if value is constants.APIDefaults.IMG_FORMAT:
common_params[constants.MOKeys.REV_CHANNELS] = ""
elif flag is constants.APIFlags.OUT_DIR:
common_params[constants.MOKeys.OUT_DIR] = value
# Only keep entries with non-empty string values.
elif value:
common_params[flag] = value
# Override the input shape and the input flags to handle multi head inputs in tensorflow
input_shapes = []
input_names = []
training_algorithm_key = constants.TrainingAlgorithms(training_algorithm)
for input_type in model_metadata_sensors:
input_key = constants.SensorInputTypes(input_type)
if input_key == constants.SensorInputTypes.LIDAR \
or input_key == constants.SensorInputTypes.SECTOR_LIDAR:
if lidar_channels < 1:
raise Exception("Lidar channels less than 1")
input_shapes.append(constants.INPUT_SHAPE_FORMAT_MAPPING[input_key]
.format(1, lidar_channels))
else:
# Input shape is in the for [n,h,w,c] to support tensorflow models only
input_shapes.append(
constants.INPUT_SHAPE_FORMAT_MAPPING[input_key]
.format(1,
input_height,
input_width,
constants.INPUT_CHANNEL_SIZE_MAPPING[input_key]))
input_name_format = constants.NETWORK_INPUT_FORMAT_MAPPING[input_key]
input_names.append(
input_name_format.format(
constants.INPUT_HEAD_NAME_MAPPING[training_algorithm_key]))
if len(input_names) > 0 and len(input_shapes) == len(input_names):
common_params[constants.MOKeys.INPUT_SHAPE] = \
constants.MOKeys.INPUT_SHAPE_DELIM.join(input_shapes)
common_params[constants.APIFlags.INPUT] = \
constants.MOKeys.INPUT_SHAPE_DELIM.join(input_names)
common_params[constants.MOKeys.MODEL_NAME] = model_name
return common_params
def run_optimizer(self, mo_path, common_params, platform_parms):
"""Helper method that combines the common commands with the platform specific
commands.
Args:
mo_path (str): Path to intel"s model optimizer for a given platform
(mxnet, caffe, or tensor flow).
common_params (dict): Dictionary containing the cli flags common to all
model optimizer.
platform_parms (dict): Dictionary containing the cli flags for the specific
platform.
Raises:
Exception: Custom exception if the model file is not present.
Returns:
tuple: Tuple whose first value is the error code and second value
is a string to the location of the converted model if any.
"""
if not os.path.isfile(common_params[constants.MOKeys.MODEL_PATH]):
raise Exception(f"Model file {common_params[constants.MOKeys.MODEL_PATH]} not found")
cmd = f"{constants.PYTHON_BIN} {constants.INTEL_PATH}{mo_path}"
# Construct the cli command
for flag, value in dict(common_params, **platform_parms).items():
cmd += f" {flag} {value}"
self.get_logger().info(f"Model optimizer command: {cmd}")
tokenized_cmd = shlex.split(cmd)
retry_count = 0
# Retry running the optimizer if it fails due to any error
# The optimizer command is run for MAX_OPTIMIZER_RETRY_COUNT + 1 times
while retry_count <= constants.MAX_OPTIMIZER_RETRY_COUNT:
self.get_logger().info(f"Optimizing model: {retry_count} of "
f"{constants.MAX_OPTIMIZER_RETRY_COUNT} trials")
proc = subprocess.Popen(tokenized_cmd, stderr=subprocess.PIPE)
_, std_err = proc.communicate()
if not proc.returncode:
return 0, os.path.join(common_params[constants.MOKeys.OUT_DIR],
f"{common_params[constants.MOKeys.MODEL_NAME]}.xml")
std_err = re.sub(r", question #\d+", "", std_err.decode("utf-8"))
self.get_logger().error(f"Model optimizer error info: {std_err}")
retry_count += 1
# Return error code 1, which means that the model optimizer failed even after retries.
return 1, ""
def set_platform_param(self, platform_param, aux_inputs):
"""Helper method that creates a dictionary with the platform specific
Intel model optimizer cli commands.
Args:
platform_param (dict): Dictionary of available platform cli commands.
aux_inputs (dict): Dictionary of auxiliary options for the model optimizer.
Returns:
dict: Dictionary with platform specific params set if present in aux_inputs.
"""
self.get_logger().info(f"aux_inputs: {aux_inputs} ")
set_paltform_params = {}
for flag in platform_param:
if flag in aux_inputs:
set_paltform_params[flag] = aux_inputs[flag]
return set_paltform_params
def optimize_tf_model(self,
model_name,
model_metadata_sensors,
training_algorithm,
input_width,
input_height,
lidar_channels,
aux_inputs={}):
"""Helper function to run Intel"s model optimizer for DeepRacer tensorflow model.
Args:
model_name (str): Model prefix, should be the same in the weight and symbol file.
model_metadata_sensors (list): List of sensor input types(int) for all the sensors
with which the model was trained.
training_algorithm (int): Training algorithm key(int) for the algorithm with which
the model was trained.
input_width (int): Width of the input image to the inference engine.
input_height (int): Height of the input image to the inference engine.
lidar_channels (int): Number of LiDAR values that with which the LiDAR head of
the model was trained.
aux_inputs (dict, optional): Dictionary of auxiliary options for the model optimizer.
Defaults to {}.
Raises:
Exception: Custom exception if the input height or width is less than 1.
Returns:
tuple: Tuple whose first value is the error code and second value
is a string to the location of the converted model if any.
"""
if input_width < 1 or input_height < 1:
raise Exception("Invalid height or width")
# Convert the API information into Intel model optimizer cli commands.
common_params = self.convert_to_mo_cli(model_name,
model_metadata_sensors,
training_algorithm,
input_width,
input_height,
lidar_channels,
aux_inputs)
# Tensor Flow specific parameters.
tf_params = {"--input_model_is_text": "",
"--offload_unsupported_operations_to_tf": "",
"--tensorflow_subgraph_patterns": "",
"-tensorflow_operation_patterns": "",
"--tensorflow_custom_operations_config_update": "",
"--tensorflow_use_custom_operations_config": ""}
# Add the correct file suffix.
common_params[constants.MOKeys.MODEL_PATH] += ".pbtxt" if "--input_model_is_text" in aux_inputs else ".pb"
return self.run_optimizer("mo_tf.py", common_params,
self.set_platform_param(tf_params, aux_inputs))
def main(args=None):
rclpy.init(args=args)
model_optimizer_node = ModelOptimizerNode()
executor = MultiThreadedExecutor()
rclpy.spin(model_optimizer_node, executor)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
model_optimizer_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| 51.05949 | 114 | 0.568409 | 1,910 | 18,024 | 5.186387 | 0.205236 | 0.050878 | 0.031799 | 0.038159 | 0.39572 | 0.351504 | 0.262366 | 0.257016 | 0.240662 | 0.240662 | 0 | 0.002704 | 0.364015 | 18,024 | 352 | 115 | 51.204545 | 0.861467 | 0.417887 | 0 | 0.144509 | 0 | 0 | 0.087778 | 0.042229 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046243 | false | 0 | 0.057803 | 0 | 0.144509 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b6fa0d06ddf808de323e30136b2cb3ba0e36dbf | 1,217 | py | Python | filter_plugins/in_loop_list.py | jtyr/ansible-lvm_disk_extend | f7aca8c2cb752f233216666c7a820e8d6a9ffa1e | [
"MIT"
] | 7 | 2016-08-31T13:00:29.000Z | 2021-10-01T08:46:25.000Z | filter_plugins/in_loop_list.py | jtyr/ansible-lvm_disk_extend | f7aca8c2cb752f233216666c7a820e8d6a9ffa1e | [
"MIT"
] | null | null | null | filter_plugins/in_loop_list.py | jtyr/ansible-lvm_disk_extend | f7aca8c2cb752f233216666c7a820e8d6a9ffa1e | [
"MIT"
] | 8 | 2017-01-31T19:08:27.000Z | 2021-11-05T05:42:17.000Z | from ansible.module_utils.six import string_types
def in_loop_list(
val, loop_var, path=[], module='stat', param='exists', param_val=True):
"""Verifies if any of the loop results have the desired value"""
ret = False
for result in loop_var['results']:
item = result['item']
for field in path:
if (
(
isinstance(field, string_types) and
isinstance(item, dict) and
field in item
) or (
isinstance(field, int) and
isinstance(item, list) and
len(item) > field
)):
item = item[field]
else:
# Incorrect path
break
if (
item == val and
module in result and
param in result[module] and
result[module][param] == param_val):
ret = True
break
return ret
class FilterModule(object):
"""Custom Jinja2 filters"""
def filters(self):
return {
'in_loop_list': in_loop_list,
}
| 25.893617 | 79 | 0.457683 | 120 | 1,217 | 4.533333 | 0.425 | 0.044118 | 0.055147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001529 | 0.462613 | 1,217 | 46 | 80 | 26.456522 | 0.830275 | 0.078883 | 0 | 0.121212 | 0 | 0 | 0.02973 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.030303 | 0.030303 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b71400982dc54036bfed93c9d3caf4be2635a78 | 2,883 | py | Python | .tools/parser.py | Balnian/vcpkg-explorer | b67adfabd39dcf63e8f8e59f8a70ba162d6b9699 | [
"MIT"
] | null | null | null | .tools/parser.py | Balnian/vcpkg-explorer | b67adfabd39dcf63e8f8e59f8a70ba162d6b9699 | [
"MIT"
] | null | null | null | .tools/parser.py | Balnian/vcpkg-explorer | b67adfabd39dcf63e8f8e59f8a70ba162d6b9699 | [
"MIT"
] | null | null | null |
import argparse, os, json, io
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
# Parse CONTROL File
def simpleInsert(dic, line):
dic[line.split(':')[0].strip()] = line.split(':')[1].strip()
def simpleInsertList(dic, line):
dic[line.split(':')[0].strip()] = [i.strip() for i in line.split(':')[1].split(",")]
def parseControlSource(dic, line):
dic["Source"] = line
controlSpecialParser = {
"Source" : simpleInsert,
"Version": simpleInsert,
"Build-Depends": simpleInsertList,
"Default-Features": simpleInsertList,
"Description": simpleInsert
}
def parseControlLine(dic, line):
parser = controlSpecialParser.get(line.split(':')[0])
if(parser != None):
parser(dic,line)
def parseControlFeatureParagraph(dic, para):
if(len(para.strip()) == 0):
print("Empty:")
features = dic.get("Features")
if(features == None):
dic["Features"] = {}
# print("="*10)
# print(para)
# print("="*10)
lines = io.StringIO(para)
# Feature name (find the str containing "Feature:")
fname = [s for s in lines if "Feature:" in s][0].split(":")[1].strip()
fdic = {}
# Parse Feature paragraph in a feature dictionary (skip first because it;s the name)
[parseControlLine(fdic,line) for line in lines if "Feature" not in line]
dic["Features"][fname] = fdic
def parseControlSourceParagraph(dic, para):
buf = io.StringIO(para)
[parseControlLine(dic,line) for line in buf]
def partseControlFile(file):
dic={}
with open(file, "r",encoding="utf8") as f:
lines = f.read()
sections = lines.split("\n\n")
parseControlSourceParagraph(dic, sections[0])
[parseControlFeatureParagraph(dic, para) for para in sections[1:] if len(para.strip()) != 0]
# [parseControlLine(dic,line) for line in f]
return dic
parser = argparse.ArgumentParser()
parser.add_argument('SourceDirectory',type=dir_path,help="location of the port folder of the vcpkg to parse")
parser.add_argument('-o',type=dir_path,help="output of the JSON file generated", default="./")
args = parser.parse_args()
# Get all the names of the dirs inside of "ports"
controlfiles = []
# r=root, d=directories, f = files
for r, d, f in os.walk(args.SourceDirectory):
for file in f:
if 'CONTROL' == file:
controlfiles.append(os.path.join(r, file))
print(args)
dic = {}
for item in [partseControlFile(f) for f in controlfiles]:
name = item["Source"]
del item["Source"]
dic[name] = item
with open(args.o+"libs.json", 'w') as outf:
json.dump(dic, outf)
# print([partseControlFile(f) for f in controlfiles])
# print(f)
# print(namelist) | 27.990291 | 109 | 0.645855 | 373 | 2,883 | 4.97319 | 0.310992 | 0.033962 | 0.016173 | 0.021024 | 0.202695 | 0.186523 | 0.113208 | 0.086253 | 0.086253 | 0.086253 | 0 | 0.00695 | 0.201526 | 2,883 | 103 | 110 | 27.990291 | 0.798871 | 0.136316 | 0 | 0.179104 | 0 | 0 | 0.100969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134328 | false | 0 | 0.014925 | 0 | 0.19403 | 0.029851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b740de940b94786b8dade107ab2abfb4ca34794 | 859 | py | Python | calendars/urls/calendar.py | mouradmourafiq/django-calendar | 5cec7f8ac49637a02e331064d470255d1cbaf096 | [
"BSD-2-Clause"
] | 7 | 2015-02-23T11:59:02.000Z | 2021-03-01T18:09:35.000Z | calendars/urls/calendar.py | mouradmourafiq/django-calendar | 5cec7f8ac49637a02e331064d470255d1cbaf096 | [
"BSD-2-Clause"
] | null | null | null | calendars/urls/calendar.py | mouradmourafiq/django-calendar | 5cec7f8ac49637a02e331064d470255d1cbaf096 | [
"BSD-2-Clause"
] | 1 | 2016-09-15T11:37:08.000Z | 2016-09-15T11:37:08.000Z | # -*- coding: utf-8 -*-
'''
Created on Mar 20, 2011
@author: Mourad Mourafiq
@copyright: Copyright © 2011
other contributers:
'''
from django.conf.urls import patterns, include, url
from calendars.views import calendar_tables as calendar_tables
from calendars.views import events as events
urlpatterns = patterns('',
url(r'^$', calendar_tables.calendar_detail, {'template_name': "calendars/calendar.html"}, name='calendar_detail'),
url(r'^(?P<events_id>\d+)/(?P<delta_day>.?\d+)/(?P<delta_minute>.?\d+)/(?P<allDay>\d)/_update/$', calendar_tables.update_event_date, name='update_event_date'),
url(r'^planning/(?P<user_id>\d+)/$', calendar_tables.planning, name='planning'),
url(r'^events/$', calendar_tables.calendar_by_params, name='calendar_events'),
url(r'^events_title/$', events.events_titles, name='events_titles'),
) | 40.904762 | 163 | 0.706636 | 117 | 859 | 4.991453 | 0.435897 | 0.143836 | 0.061644 | 0.082192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014417 | 0.111758 | 859 | 21 | 164 | 40.904762 | 0.749672 | 0.14319 | 0 | 0 | 0 | 0.1 | 0.33882 | 0.192044 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b751be946e14f92ef6596264ea0fe477e367ac0 | 3,724 | py | Python | sdk/identity/azure-identity/tests/test_chained_token_credential_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/identity/azure-identity/tests/test_chained_token_credential_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/identity/azure-identity/tests/test_chained_token_credential_async.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from azure.identity import CredentialUnavailableError, ClientSecretCredential
from azure.identity.aio import ChainedTokenCredential
import pytest
from unittest.mock import Mock
from helpers_async import get_completed_future, wrap_in_future
@pytest.mark.asyncio
async def test_close():
credentials = [Mock(close=Mock(wraps=get_completed_future)) for _ in range(5)]
chain = ChainedTokenCredential(*credentials)
await chain.close()
for credential in credentials:
assert credential.close.call_count == 1
@pytest.mark.asyncio
async def test_context_manager():
credentials = [Mock(close=Mock(wraps=get_completed_future)) for _ in range(5)]
chain = ChainedTokenCredential(*credentials)
async with chain:
pass
for credential in credentials:
assert credential.close.call_count == 1
@pytest.mark.asyncio
async def test_credential_chain_error_message():
first_error = "first_error"
first_credential = Mock(
spec=ClientSecretCredential, get_token=Mock(side_effect=CredentialUnavailableError(first_error))
)
second_error = "second_error"
second_credential = Mock(
name="second_credential", get_token=Mock(side_effect=ClientAuthenticationError(second_error))
)
with pytest.raises(ClientAuthenticationError) as ex:
await ChainedTokenCredential(first_credential, second_credential).get_token("scope")
assert "ClientSecretCredential" in ex.value.message
assert first_error in ex.value.message
assert second_error in ex.value.message
@pytest.mark.asyncio
async def test_chain_attempts_all_credentials():
async def credential_unavailable(message="it didn't work"):
raise CredentialUnavailableError(message)
expected_token = AccessToken("expected_token", 0)
credentials = [
Mock(get_token=Mock(wraps=credential_unavailable)),
Mock(get_token=Mock(wraps=credential_unavailable)),
Mock(get_token=wrap_in_future(lambda _: expected_token)),
]
token = await ChainedTokenCredential(*credentials).get_token("scope")
assert token is expected_token
for credential in credentials[:-1]:
assert credential.get_token.call_count == 1
@pytest.mark.asyncio
async def test_chain_raises_for_unexpected_error():
"""the chain should not continue after an unexpected error (i.e. anything but CredentialUnavailableError)"""
async def credential_unavailable(message="it didn't work"):
raise CredentialUnavailableError(message)
expected_message = "it can't be done"
credentials = [
Mock(get_token=Mock(wraps=credential_unavailable)),
Mock(get_token=Mock(side_effect=ValueError(expected_message))),
Mock(get_token=Mock(wraps=wrap_in_future(lambda _: AccessToken("**", 42)))),
]
with pytest.raises(ClientAuthenticationError) as ex:
await ChainedTokenCredential(*credentials).get_token("scope")
assert expected_message in ex.value.message
assert credentials[-1].get_token.call_count == 0
@pytest.mark.asyncio
async def test_returns_first_token():
expected_token = Mock()
first_credential = Mock(get_token=wrap_in_future(lambda _: expected_token))
second_credential = Mock(get_token=Mock())
aggregate = ChainedTokenCredential(first_credential, second_credential)
credential = await aggregate.get_token("scope")
assert credential is expected_token
assert second_credential.get_token.call_count == 0
| 34.165138 | 112 | 0.741407 | 431 | 3,724 | 6.180974 | 0.220418 | 0.051051 | 0.036036 | 0.04955 | 0.556306 | 0.438063 | 0.416291 | 0.368994 | 0.31494 | 0.272523 | 0 | 0.003812 | 0.154672 | 3,724 | 108 | 113 | 34.481481 | 0.84244 | 0.038131 | 0 | 0.337838 | 0 | 0 | 0.040934 | 0.006342 | 0 | 0 | 0 | 0 | 0.148649 | 1 | 0 | false | 0.013514 | 0.094595 | 0 | 0.094595 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b752507440aeed9f54d65f404fbb19a93263529 | 2,538 | py | Python | needy/generators/xcconfig.py | carlbrown/needy | 5a70726c9846f86a88be896ec39740296d503835 | [
"MIT"
] | 65 | 2015-07-21T01:40:17.000Z | 2019-06-10T10:46:28.000Z | needy/generators/xcconfig.py | bittorrent/needy | 31e57ad09d5fc22126e10b735c586262a50139d7 | [
"MIT"
] | 110 | 2015-07-21T01:41:40.000Z | 2017-01-18T23:13:30.000Z | needy/generators/xcconfig.py | bittorrent/needy | 31e57ad09d5fc22126e10b735c586262a50139d7 | [
"MIT"
] | 4 | 2015-07-20T02:45:43.000Z | 2016-07-31T21:48:39.000Z | from ..generator import Generator
from ..platforms import available_platforms
from ..target import Target
import os
class XCConfigGenerator(Generator):
@staticmethod
def identifier():
return 'xcconfig'
def __xcconfig(self, needy, target, sdk, arch):
header_search_paths = []
library_search_paths = []
for name, library in needy.libraries_to_build(target):
header_search_paths.append(library.include_path())
library_search_paths.append(library.library_path())
ret = "NEEDY_HEADER_SEARCH_PATHS[sdk={},arch={}] = {}\n".format(sdk, arch, ' '.join(header_search_paths))
ret += "NEEDY_LIBRARY_SEARCH_PATHS[sdk={},arch={}] = {}\n".format(sdk, arch, ' '.join(library_search_paths))
ret += "HEADER_SEARCH_PATHS[sdk={},arch={}] = $(inherited) $(NEEDY_HEADER_SEARCH_PATHS)\n".format(sdk, arch)
ret += "LIBRARY_SEARCH_PATHS[sdk={},arch={}] = $(inherited) $(NEEDY_LIBRARY_SEARCH_PATHS)\n".format(sdk, arch)
ret += "\n"
return ret
def generate(self, needy):
path = os.path.join(needy.needs_directory(), 'search-paths.xcconfig')
contents = ''
if 'osx' in available_platforms():
contents += self.__xcconfig(needy, Target(needy.platform('osx'), 'i386'), 'macosx*', 'i386')
contents += self.__xcconfig(needy, Target(needy.platform('osx'), 'x86_64'), 'macosx*', 'x86_64')
if 'ios' in available_platforms():
contents += self.__xcconfig(needy, Target(needy.platform('ios'), 'armv7'), 'iphoneos*', 'armv7')
contents += self.__xcconfig(needy, Target(needy.platform('ios'), 'arm64'), 'iphoneos*', 'arm64')
if 'iossimulator' in available_platforms():
contents += self.__xcconfig(needy, Target(needy.platform('iossimulator'), 'i386'), 'iphonesimulator*', 'i386')
contents += self.__xcconfig(needy, Target(needy.platform('iossimulator'), 'x86_64'), 'iphonesimulator*', 'x86_64')
if 'tvos' in available_platforms():
contents += self.__xcconfig(needy, Target(needy.platform('tvos'), 'arm64'), 'appletvos*', 'arm64')
if 'tvossimulator' in available_platforms():
contents += self.__xcconfig(needy, Target(needy.platform('tvossimulator'), 'i386'), 'appletvsimulator*', 'i386')
contents += self.__xcconfig(needy, Target(needy.platform('tvossimulator'), 'x86_64'), 'appletvsimulator*', 'x86_64')
with open(path, 'w') as xcconfig:
xcconfig.write(contents)
| 47.886792 | 128 | 0.64342 | 277 | 2,538 | 5.66065 | 0.223827 | 0.091199 | 0.114796 | 0.143495 | 0.502551 | 0.485969 | 0.445153 | 0.409439 | 0.25 | 0.204082 | 0 | 0.025403 | 0.193459 | 2,538 | 52 | 129 | 48.807692 | 0.740596 | 0 | 0 | 0 | 0 | 0 | 0.234043 | 0.092987 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.102564 | 0.025641 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b770811d02f4fdde6f9b0fafb36764cd7c74385 | 6,166 | py | Python | NLPEngine/app.py | hmi-digital/bot_platform | 91a26e566b07fa309774d0333a6bccf3d64cccc5 | [
"MIT"
] | null | null | null | NLPEngine/app.py | hmi-digital/bot_platform | 91a26e566b07fa309774d0333a6bccf3d64cccc5 | [
"MIT"
] | null | null | null | NLPEngine/app.py | hmi-digital/bot_platform | 91a26e566b07fa309774d0333a6bccf3d64cccc5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import os
import re
import sys, getopt
import threading
from warnings import simplefilter
import flask
from flask import request, abort, make_response, jsonify
from utils import nlp_config
from utils import log_util
from core import train_model, predict_model
from pubsub import consumer
from pubsub import process_message
from pubsub import producer as pr
from pubsub import create_topics
# ignore all warnings
simplefilter(action='ignore')
scriptDir = os.path.dirname(__file__)
app = flask.Flask(__name__)
SERVER_HOST = ''
SERVER_PORT = None
IS_BROKER = False
def initialise():
# load all the config parameters
nlp_config.load_parameters()
global SERVER_HOST
global SERVER_PORT
global IS_BROKER
SERVER_HOST = '0.0.0.0'
SERVER_PORT = nlp_config.get_parameter('PORT')
IS_BROKER = re.search(nlp_config.get_parameter('USE_BROKER'), 'true', re.IGNORECASE)
@app.route('/train', methods=['POST'])
def trainDomain():
if not (request.args.get('domain')):
log_util.log_errormsg("[APP] missing domain parameter")
abort(404)
if request.args.get('locale'):
locale = request.args.get('locale')
else:
locale = 'en'
domain = request.args.get('domain')
res = train_model.train(domain, locale)
n = int(json.loads(res)["utterances"])
if re.search(nlp_config.get_parameter('ENSEMBLE'), 'true', re.IGNORECASE):
md = 'ENSEMBLE'
else:
if nlp_config.get_parameter('ALGORITHM') == 'TFIDF':
md = 'TFIDF'
else:
algo = os.path.splitext(nlp_config.get_parameter('CONFIG_FILE'))[0]
algo = algo.split("_")[1].upper()
md = 'NLU:' + algo
if n > 0:
response = {"messageId": "TRAIN_SUCCESS", "domain": domain, "locale": locale, "message": res, "model": md}
else:
response = {"messageId": "TRAIN_FAIL", "domain": domain, "locale": locale, "message": res, "model": md}
return make_response(jsonify(response), 200,
{'Content-Type': 'application/json; charset=utf-8'})
@app.route('/predict', methods=['POST'])
def predict_query():
if not (request.args.get('domain') or request.args.get('userUtterance')):
log_util.log_errormsg("[APP] missing parameters")
abort(404)
if request.args.get('locale'):
locale = request.args.get('locale')
else:
locale = 'en'
utter = request.args.get('userUtterance')
if locale == 'en':
utterance = re.sub(r'[^a-zA-Z ]', '', utter)
domain = request.args.get('domain')
if re.search(nlp_config.get_parameter('ENSEMBLE'), 'true', re.IGNORECASE):
md = 'ENSEMBLE'
else:
if nlp_config.get_parameter('ALGORITHM') == 'TFIDF':
md = 'TFIDF'
else:
algo = os.path.splitext(nlp_config.get_parameter('CONFIG_FILE'))[0]
algo = algo.split("_")[1].upper()
md = 'NLU:' + algo
res = {"messageId": "PREDICT", "domain": domain, "locale": locale, "userUtterance": utterance, "model": md,
"message": predict_model.predict(domain, locale, utterance)}
return make_response(jsonify(res), 200,
{'Content-Type': 'application/json; charset=utf-8'})
@app.route('/health', methods=['GET'])
def health_query():
res = {"status": "OK"}
return make_response(jsonify(res), 200,
{'Content-Type': 'application/json; charset=utf-8'})
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'response': 'ERROR: Please check your query parameter'}), 404,
{'Content-Type': 'application/json; charset=utf-8'})
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
def listener_thread():
log_util.log_infomsg("[APP] starting broker listener thread")
# create the topics
log_util.log_infomsg("[APP] creating the topics")
create_topics.create()
# initialise the producer
log_util.log_infomsg("[APP] creating the producer")
pr.initialise()
# Run consumer listener to process all the NLP_TO_BOT messages
consumer_ = consumer.initialise(nlp_config.get_parameter('TOPIC_BOT_TO_NLP'))
log_util.log_infomsg("[APP] checking for any messages")
for msg in consumer_:
log_util.log_infomsg(msg)
t = threading.Thread(target=process_message.process, args=(msg,))
t.start()
def main(argv):
port_val = ''
use_broker = False
initialise()
try:
opts, args = getopt.getopt(argv, 'hbp:', ['help', 'broker', 'port='])
except getopt.GetoptError:
print('app.py -p <port> -b (optional-use if broker services required)')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('app.py -p <port> -b (optional-use if broker services required)')
if opt in ('-b', '--broker'):
use_broker = True
log_util.log_infomsg(f'[APP] broker services is set to : {use_broker}')
if opt in ('-p', '--port'):
port_val = arg
log_util.log_infomsg(f'[APP] setting up NLP Engine port: {port_val}')
if port_val:
global SERVER_PORT
SERVER_PORT = port_val
if use_broker:
global IS_BROKER
IS_BROKER = True
if IS_BROKER:
log_util.log_infomsg("[APP] broker based NLPEngine enabled")
t = threading.Thread(target=listener_thread, daemon=True)
t.start()
else:
log_util.log_infomsg("[APP] REST API based NLPEngine enabled")
if re.search(nlp_config.get_parameter('HTTPS'), 'true', re.IGNORECASE):
context_ = ('keys/nlp.crt', 'keys/nlp.pem')
app.run(debug=False, host=SERVER_HOST, port=SERVER_PORT, threaded=True, ssl_context=context_)
else:
app.run(debug=False, host=SERVER_HOST, port=SERVER_PORT, threaded=True)
if __name__ == '__main__':
main(sys.argv[1:])
| 33.879121 | 114 | 0.641421 | 789 | 6,166 | 4.852978 | 0.243346 | 0.028206 | 0.028728 | 0.054845 | 0.426482 | 0.374771 | 0.30034 | 0.276051 | 0.244189 | 0.244189 | 0 | 0.007676 | 0.218294 | 6,166 | 181 | 115 | 34.066298 | 0.786722 | 0.028381 | 0 | 0.321918 | 0 | 0 | 0.223596 | 0.022727 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054795 | false | 0 | 0.10274 | 0.006849 | 0.191781 | 0.013699 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b775b82a0dc44534647e77cf8aa3514bf4dd4f3 | 1,587 | py | Python | tests/models/status/test_remained.py | vikian050194/forty | 7ef68fb06bb22a3008351c5a651eaa46b635d433 | [
"MIT"
] | null | null | null | tests/models/status/test_remained.py | vikian050194/forty | 7ef68fb06bb22a3008351c5a651eaa46b635d433 | [
"MIT"
] | 7 | 2021-03-15T17:18:36.000Z | 2021-04-26T09:40:53.000Z | tests/models/status/test_remained.py | vikian050194/forty | 7ef68fb06bb22a3008351c5a651eaa46b635d433 | [
"MIT"
] | null | null | null | from datetime import timedelta, date
from forty.managers.project_manager import Config
from forty.views import RemainedStatusView
from forty.models import StatusModel
from forty.tools import ActionsBuilder as A
from ..model_test_case import ModelTestCase
class TestStatusModelRemainedMethod(ModelTestCase):
def __init__(self, *args, **kwargs):
ModelTestCase.__init__(self, *args, **kwargs)
@property
def model_class(self):
return StatusModel
def test_default(self):
view: RemainedStatusView = self.model.remained()
self.assertEqual(view.today, timedelta(hours=8))
self.assertEqual(view.total, timedelta(hours=40))
self.pm.load_project.assert_called_once()
self.pm.load_actions.assert_called_once()
def test_today_overtime(self):
actions = A().start().at(hour=9).finish().at(hour=18).done()
self.actions_to_return(actions)
view: RemainedStatusView = self.model.remained()
self.assertEqual(view.today, timedelta(hours=-1))
self.assertEqual(view.total, timedelta(hours=31))
def test_total_overtime(self):
test_config = Config(day_limit=6, total_limit=8)
test_config.today = date(year=2021, month=1, day=1)
self.config_to_return(test_config)
actions = A().start().at(hour=9).finish().at(hour=18).done()
self.actions_to_return(actions)
view: RemainedStatusView = self.model.remained()
self.assertEqual(view.today, timedelta(hours=-3))
self.assertEqual(view.total, timedelta(hours=-1))
| 32.387755 | 68 | 0.696912 | 197 | 1,587 | 5.441624 | 0.324873 | 0.083955 | 0.106343 | 0.086754 | 0.441231 | 0.441231 | 0.334888 | 0.334888 | 0.334888 | 0.334888 | 0 | 0.017067 | 0.187776 | 1,587 | 48 | 69 | 33.0625 | 0.814585 | 0 | 0 | 0.212121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.242424 | 1 | 0.151515 | false | 0 | 0.181818 | 0.030303 | 0.393939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b7b8c103d8d9616e435f7c21cd9decd8e6f142c | 2,650 | py | Python | tests/pconv_rfr.py | DesignStripe/torch_pconv | 49e781bef96c511660ec35fcba68cf734710b10b | [
"BSD-3-Clause"
] | 1 | 2021-08-13T18:22:07.000Z | 2021-08-13T18:22:07.000Z | tests/pconv_rfr.py | DesignStripe/torch_pconv | 49e781bef96c511660ec35fcba68cf734710b10b | [
"BSD-3-Clause"
] | null | null | null | tests/pconv_rfr.py | DesignStripe/torch_pconv | 49e781bef96c511660ec35fcba68cf734710b10b | [
"BSD-3-Clause"
] | null | null | null | ###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: Guilin Liu (guilinl@nvidia.com)
###############################################################################
"""
Code by Guilin by but probably with some modifications by jingyuanli001, code at
https://github.com/jingyuanli001/RFR-Inpainting/blob/faed6f154e01fc3accce5dff82a5b28e6f426fbe/modules/partialconv2d.py
I tried to modify the least code: just enough to make is compatible with 3D masks (instead of 4D)
"""
import torch
import torch.nn.functional as F
from torch import nn
class PConvRFR(nn.Conv2d):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ones = torch.ones(self.out_channels, self.in_channels, self.kernel_size[0],
self.kernel_size[1])
# max value of one convolution's window
self.slide_winsize = self.ones.shape[1] * self.ones.shape[2] * self.ones.shape[3]
self.update_mask = None
self.mask_ratio = None
def forward(self, inputs, mask=None):
if len(inputs.shape) != 4 or len(mask.shape) != 3:
raise TypeError()
if inputs.dtype != torch.float32 or mask.dtype != torch.float32:
raise TypeError()
mask = mask[:, None].expand(-1, inputs.shape[1], -1, -1)
with torch.no_grad():
self.update_mask = F.conv2d(mask, self.ones.to(mask), bias=None, stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=1)
self.mask_ratio = self.slide_winsize / (self.update_mask + 1e-8)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio *= self.update_mask
raw_out = nn.Conv2d.forward(self, inputs * mask)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = raw_out * self.mask_ratio
return output, self.update_mask[:, 0]
def set_weight(self, w):
with torch.no_grad():
self.weight.copy_(w)
return self
def set_bias(self, b):
with torch.no_grad():
self.bias.copy_(b)
return self
def get_weight(self):
return self.weight
def get_bias(self):
return self.bias
| 33.974359 | 118 | 0.569057 | 328 | 2,650 | 4.466463 | 0.375 | 0.054608 | 0.076451 | 0.030717 | 0.063481 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030036 | 0.271321 | 2,650 | 77 | 119 | 34.415584 | 0.728638 | 0.176981 | 0 | 0.155556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0.044444 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b7c9765a8a3c6eb3b11ae701c1e55797770abd3 | 7,543 | py | Python | rs.py | rozium/rs-backend | 9a5f81daf5b87a960436a95c91896b01860e3636 | [
"MIT"
] | null | null | null | rs.py | rozium/rs-backend | 9a5f81daf5b87a960436a95c91896b01860e3636 | [
"MIT"
] | 1 | 2019-04-05T13:23:13.000Z | 2019-04-05T13:23:13.000Z | rs.py | rozium/rs-backend | 9a5f81daf5b87a960436a95c91896b01860e3636 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Rumah Sahaja backend
import jwt
import hashlib
import datetime
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, request, jsonify, make_response
from flask_cors import CORS
app = Flask(__name__)
app.config.from_pyfile('rs.cfg')
__DEBUG__ = True
__FrontEndURL__ = "http://localhost:3000"
# set resources and origins access
CORS(app, resources={r"/*": {"origins": __FrontEndURL__}})
db = SQLAlchemy(app)
# MODEL ######################################################################################################################
class Profile(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
phone = db.Column(db.String(50))
email = db.Column(db.String(50))
address = db.Column(db.Text)
about = db.Column(db.Text)
vision = db.Column(db.Text)
mission = db.Column(db.Text)
link = db.Column(db.Text)
created_at = db.Column(db.DateTime)
updated_at = db.Column(db.DateTime)
class News(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
content = db.Column(db.Text)
created_at = db.Column(db.DateTime)
updated_at = db.Column(db.DateTime)
images = db.Column(db.Text)
images_caption = db.Column(db.Text)
class Donation(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
phone = db.Column(db.String(50))
email = db.Column(db.String(50))
amount = db.Column(db.Integer)
receipt = db.Column(db.Text)
created_at = db.Column(db.DateTime)
# TOKEN ######################################################################################################################
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'X-Access-Token' in request.headers:
token = request.headers['X-Access-Token']
if not token:
return jsonify({'message' : 'Token required!'}), 401
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
verified = data['verified']
except:
return jsonify({'message' : 'Token is invalid!'}), 401
return f(verified, *args, **kwargs)
return decorated
# LOGIN ######################################################################################################################
@app.route('/login')
def login():
auth = request.authorization
if not auth or not auth.username or not auth.password:
return make_response('Could not verify', 401, {'WWW-Authenticate' : 'Basic realm="Login required!"'})
m = hashlib.md5()
m.update(auth.password)
if app.config['ADMIN_USERNAME'] == auth.username and app.config['ADMIN_PASSWORD'] == m.hexdigest():
token = jwt.encode({'verified': True, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token': token.decode('UTF-8')})
return make_response('Could not verify', 401, {'WWW-Authenticate' : 'Basic realm="Login required!"'})
# API PROFILE ################################################################################################################
@app.route('/profile', methods=['GET'])
def get_profile():
profile = Profile.query.first()
data = jsonify({
'name': profile.name,
'phone': profile.phone,
'email': profile.email,
'address': profile.address,
'about': profile.about,
'vision': profile.vision,
'mission': profile.mission,
'link': profile.link
})
return data
@app.route('/profile', methods=['PUT'])
@token_required
def update_profile():
profile = Profile.query.first()
data = request.get_json()
profile.name = data['name']
profile.phone = data['phone']
profile.email = data['email']
profile.address = data['address']
profile.about = data['about']
profile.vision = data['vision']
profile.mission = data['mission']
profile.link = data['link']
profile.updated_at = datetime.datetime.now()
db.session.commit()
return jsonify({ 'status': 200, 'message': 'update success' })
@app.route('/profile', methods=['POST'])
@token_required
def create_profile():
data = request.get_json()
news = Profile(name=data['name'],
phone=data['phone'],
email=data['email'],
address=data['address'],
about=data['about'],
vision=data['vision'],
mission=data['mission'],
link=data['link'],
created_at=datetime.datetime.now())
db.session.add(news)
db.session.commit()
return jsonify({ 'status': 201, 'message': 'create success'})
# API News ####################################################################################################################
@app.route('/news', methods=['GET'])
def get_all_news():
news = News.query.all()
def convert_to_json(object):
return {
'id': object.id,
'title': object.title,
'content': object.content,
'created_at': object.created_at,
'updated_at': object.updated_at,
'images': object.images,
'images_caption': object.images_caption
}
data = jsonify(list(map(convert_to_json, news)))
return data
@app.route('/news', methods=['POST'])
@token_required
def create_news():
data = request.get_json()
news = News(title=data['title'],
content=data['content'],
created_at=datetime.datetime.now(),
images=data['images'],
images_caption=data['images_caption'])
db.session.add(news)
db.session.commit()
return jsonify({ 'status': 201, 'message': 'create success'})
@app.route('/news', methods=['PUT'])
@token_required
def update_news():
data = request.get_json()
news_id = data['id']
news = News.query.filter_by(id=news_id).first()
news.title = data['title']
news.content = data['content']
news.updated_at = datetime.datetime.now()
news.images = data['images']
news.images_caption = data['images_caption']
db.session.commit()
return jsonify({ 'status': 200, 'message': 'update success' })
@app.route('/news/<news_id>', methods=['DELETE'])
@token_required
def delete_news(news_id):
news = News.query.filter_by(id=news_id).first()
db.session.delete(news)
db.session.commit()
return jsonify({ 'status': 200, 'message': 'delete success'})
# API Donation #################################################################################################################
@app.route('/donation', methods=['GET'])
@token_required
def get_all_donations():
donation = Donation.query.all()
def convert_to_json(object):
return {
'id': object.id,
'name': object.name,
'phone': object.phone,
'email': object.email,
'amount': object.amount,
'receipt': object.receipt,
'created_at': object.created_at
}
data = jsonify(list(map(convert_to_json, donation)))
return data
@app.route('/donation', methods=['POST'])
@token_required
def create_donation():
data = request.get_json()
donation = Donation(name=data['name'],
phone=data['phone'],
email=data['email'],
amount=data['amount'],
receipt=data['receipt'],
created_at=datetime.datetime.now())
db.session.add(donation)
db.session.commit()
return jsonify({ 'status': 201, 'message': 'create success'})
# Main #################################################################################################################
if __name__ == '__main__':
app.run(debug=__DEBUG__) | 31.560669 | 136 | 0.580141 | 863 | 7,543 | 4.951333 | 0.172654 | 0.046806 | 0.058507 | 0.029487 | 0.428973 | 0.397847 | 0.326235 | 0.296747 | 0.270536 | 0.251814 | 0 | 0.008438 | 0.167307 | 7,543 | 239 | 137 | 31.560669 | 0.671868 | 0.018163 | 0 | 0.314136 | 0 | 0 | 0.137371 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073298 | false | 0.015707 | 0.036649 | 0.010471 | 0.350785 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b829b263e7d55960edfb8d2e11a2f4d079044be | 1,334 | py | Python | powerforecast/base.py | htpauleta/PowerForecast | f0e24e94c9dc62e56d2b770b85f6284ab96d697c | [
"Apache-2.0"
] | null | null | null | powerforecast/base.py | htpauleta/PowerForecast | f0e24e94c9dc62e56d2b770b85f6284ab96d697c | [
"Apache-2.0"
] | null | null | null | powerforecast/base.py | htpauleta/PowerForecast | f0e24e94c9dc62e56d2b770b85f6284ab96d697c | [
"Apache-2.0"
] | 1 | 2020-12-07T02:21:12.000Z | 2020-12-07T02:21:12.000Z | """
@Project : PowerForecast
@Module : base.py
@Author : HjwGivenLyy [1752929469@qq.com]
@Created : 3/12/19 1:47 PM
@Desc : basic module of entire project
"""
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split
FILE_PATH = "/home/pauleta/project/PowerForecast/powerforecast/data/"
def evaluation_criteria_model(true_y: np.array, predict_y: np.array,
eval_type: str="mape") -> float:
"""模型评估准则"""
rtn = 0.0
if eval_type == "mape":
rtn = np.mean(np.abs(true_y - predict_y) / true_y)
elif eval_type == "mse":
rtn = mean_squared_error(true_y, predict_y)
elif eval_type == "mae":
rtn = mean_absolute_error(true_y, predict_y)
return rtn
def split_train_test_data(feature_x: np.array, label_y: np.array=None,
test_size: float=0.2):
"""将数据集分为 train 和 test """
if label_y is not None:
x_train, x_test, y_train, y_test = train_test_split(
feature_x, label_y, test_size=test_size, random_state=123456)
return x_train, x_test, y_train, y_test
else:
x_train, x_test = train_test_split(
feature_x, test_size=test_size, random_state=123456)
return x_train, x_test
| 30.318182 | 73 | 0.656672 | 197 | 1,334 | 4.147208 | 0.390863 | 0.0306 | 0.034272 | 0.053856 | 0.265606 | 0.221542 | 0.162791 | 0.162791 | 0.122399 | 0.122399 | 0 | 0.033597 | 0.241379 | 1,334 | 43 | 74 | 31.023256 | 0.773715 | 0.144678 | 0 | 0 | 0 | 0 | 0.061443 | 0.048976 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b845d9705a2fcbc398aa92244efea0721f73204 | 3,331 | py | Python | utils/metrics.py | raghuch/SABER | fb0f26152b009f923aaf572cef80940f2f256330 | [
"MIT"
] | 5 | 2019-11-22T12:42:38.000Z | 2020-07-14T14:51:47.000Z | utils/metrics.py | raghuch/SABER | fb0f26152b009f923aaf572cef80940f2f256330 | [
"MIT"
] | null | null | null | utils/metrics.py | raghuch/SABER | fb0f26152b009f923aaf572cef80940f2f256330 | [
"MIT"
] | 1 | 2021-02-02T08:43:00.000Z | 2021-02-02T08:43:00.000Z | import Levenshtein as Lev
import numpy as np
from utils.model_utils import get_most_probable
from ignite.metrics import Metric, Accuracy
from ignite.metrics.metric import reinit__is_reduced
from datasets.librispeech import get_vocab_list, sequence_to_string
import torch
def werCalc(s1, s2):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
s1 = s1.lower()
s2 = s2.lower()
# build mapping of words to integers
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
# map the words to a char array (Levenshtein packages only accepts
# strings)
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2)) / len(s2.split(' '))
def cerCalc(s1, s2):
"""
Computes the Character Error Rate, defined as the edit distance.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
s1 = s1.lower()
s2 = s2.lower()
s1, s2 = s1.replace(' ', ''), s2.replace(' ', '')
return Lev.distance(s1, s2) / len(s2)
def batch_wer_accuracy(preds, labels, label_lengths):
pred_sentences = get_most_probable(preds)
labels_list = labels.tolist()
idx = 0
wer = []
for i, length in enumerate(label_lengths.cpu().tolist()):
pred_sentence = pred_sentences[i]
gt_sentence = sequence_to_string(labels_list[idx:idx+length])
wer.append(werCalc(pred_sentence, gt_sentence))
idx += length
return np.sum(wer)
def batch_cer_accuracy(preds, labels, label_lengths):
pred_sentences = get_most_probable(preds)
labels_list = labels.tolist()
idx = 0
cer = []
for i, length in enumerate(label_lengths.cpu().tolist()):
pred_sentence = pred_sentences[i]
gt_sentence = sequence_to_string(labels_list[idx:idx+length])
cer.append(cerCalc(pred_sentence, gt_sentence))
idx += length
return np.sum(cer)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.sum += val * n
self.count += n
def get_average():
return self.sum / self.count
class WordErrorRate(Accuracy):
@reinit__is_reduced
def update(self, output):
with torch.no_grad():
y_pred, labels, label_lengths = output
wer = batch_wer_accuracy(y_pred, labels, label_lengths)
self._num_correct += wer
self._num_examples += label_lengths.shape[0]
class CharacterErrorRate(Accuracy):
@reinit__is_reduced
def update(self, output):
with torch.no_grad():
y_pred, labels, label_lengths = output
cer = batch_cer_accuracy(y_pred, labels, label_lengths)
self._num_correct += cer
self._num_examples += label_lengths.shape[0]
if __name__ == "__main__":
s1 = "WTF WHO ARE YOu"
s2 = "wff who are you"
wer = werCalc(s2, s1)
cer = cerCalc(s2, s1)
print(wer, cer) | 31.424528 | 74 | 0.650555 | 451 | 3,331 | 4.616408 | 0.301552 | 0.057637 | 0.051873 | 0.053794 | 0.529299 | 0.529299 | 0.511047 | 0.447646 | 0.447646 | 0.364073 | 0 | 0.018898 | 0.237466 | 3,331 | 106 | 75 | 31.424528 | 0.800787 | 0.160612 | 0 | 0.373333 | 0 | 0 | 0.015013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.093333 | 0.013333 | 0.333333 | 0.013333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b8ba167116b4e06f9ce668216dc0aca60bfda34 | 3,416 | py | Python | docs/update_readme.py | KarlTDebiec/PipeScaler | b990ece8f3dd2c3506c226ed871871997fc57beb | [
"BSD-3-Clause"
] | 1 | 2022-02-07T03:47:53.000Z | 2022-02-07T03:47:53.000Z | docs/update_readme.py | KarlTDebiec/PipeScaler | b990ece8f3dd2c3506c226ed871871997fc57beb | [
"BSD-3-Clause"
] | 49 | 2022-01-17T15:16:22.000Z | 2022-03-28T03:00:39.000Z | docs/update_readme.py | KarlTDebiec/PipeScaler | b990ece8f3dd2c3506c226ed871871997fc57beb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# update_readme.py
#
# Copyright (C) 2020-2021 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license.
"""Updates readme."""
import re
from inspect import cleandoc, getfile
from os.path import dirname, join, splitext
from pathlib import Path
from types import ModuleType
from typing import Dict, List, Type
from pipescaler import mergers, processors, sorters, sources, splitters, termini
from pipescaler.common import package_root, validate_input_path
from pipescaler.core import Stage
def get_github_link(cls: Type) -> str:
"""
Gets the GitHub master branch link to the file containing a class
Args:
cls: Class for which to get link
Returns:
GitHub link
"""
return "/".join(
["https://github.com/KarlTDebiec/PipeScaler/tree/master"]
+ list(Path(getfile(cls)).parts[len(Path(package_root).parts) - 1 :])
)
def get_module_regexes(modules: List[ModuleType]) -> Dict[ModuleType, re.Pattern]:
"""
Gets regular expressions to identify README sections for provided modules
Args:
modules: Modules for which to generate regexes
Returns:
Dictionary of modules to their regexes
"""
module_regexes = {}
for module in modules:
module_name = splitext(module.__name__)[-1].lstrip(".")
module_regex = re.compile(
f"[\S\s]*(?P<header>^.*{module_name}:$)\n(?P<body>(^\*\s.*$\n)+)[\S\s]*",
re.MULTILINE,
)
module_regexes[module] = module_regex
return module_regexes
def get_stage_description(stage: Stage) -> str:
"""
Gets the formatted description of a stage, including GitHub link
Uses the first block of lines in the Stage's docstring
Args:
stage: Stage for which to get formatted description
Returns:
Formatted description of stage
"""
name = stage.__name__
link = get_github_link(stage)
doc = stage.__doc__
if doc is None:
return f"* [{name}]({link})\n"
else:
doc_lines = cleandoc(stage.__doc__).split("\n")
try:
doc_head = " ".join(line for line in doc_lines[: doc_lines.index("")])
except ValueError:
doc_head = " ".join(line for line in doc_lines)
return f"* [{name}]({link}) - {doc_head}\n"
def get_stage_descriptions(module: ModuleType) -> str:
"""
Gets the descriptions of stages within a module
Args:
module: Module for which to get stage descriptions
Returns:
Formatted descriptions of stages
"""
section = ""
for stage in map(module.__dict__.get, module.__all__):
section += get_stage_description(stage)
return section
if __name__ == "__main__":
readme_filename = validate_input_path(join(dirname(package_root), "README.md"))
# Read README
with open(readme_filename, "r") as readme_file:
readme = readme_file.read()
# Update README
module_regexes = get_module_regexes(
[mergers, processors, sorters, sources, splitters, termini]
)
for module, module_regex in module_regexes.items():
body = module_regex.match(readme)["body"]
readme = readme.replace(body, get_stage_descriptions(module))
# Write README
with open(readme_filename, "w") as readme_file:
readme_file.write(readme)
| 28.705882 | 85 | 0.658372 | 435 | 3,416 | 4.983908 | 0.347126 | 0.041974 | 0.01845 | 0.017989 | 0.098708 | 0.072878 | 0.02952 | 0.02952 | 0.02952 | 0 | 0 | 0.003833 | 0.236241 | 3,416 | 118 | 86 | 28.949153 | 0.827137 | 0.278103 | 0 | 0 | 0 | 0.018519 | 0.087855 | 0.029716 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b8e709913bd8c5a373bf4dc6fb3e19539c1836c | 7,060 | py | Python | inventory_to_shippo_labels.py | ramanshahdatascience/t-shirts | a49edfeddd6f8eb51979464cce260b0288c75f4a | [
"BSD-3-Clause"
] | 1 | 2022-02-28T03:44:10.000Z | 2022-02-28T03:44:10.000Z | inventory_to_shippo_labels.py | ramanshahdatascience/t-shirts | a49edfeddd6f8eb51979464cce260b0288c75f4a | [
"BSD-3-Clause"
] | null | null | null | inventory_to_shippo_labels.py | ramanshahdatascience/t-shirts | a49edfeddd6f8eb51979464cce260b0288c75f4a | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
'''Usage:
./inventory_to_shippo_labels.py tshirt_inventory.xlsx labels.csv'''
import copy
import csv
import math
import re
from sys import argv
import warnings
import openpyxl
# Measured masses of t-shirts by size, in oz
SHIRT_WEIGHTS = {'MXS': 3.45 / 1,
'MS': 11.45 / 3,
'MM': 38.80 / 9,
'ML': 33.35 / 7,
'MXL': 21.15 / 4,
'M2XL': 17.60 / 3,
'M3XL': 6.55 / 1,
'WS': 9.75 / 3,
'WM': 18.05 / 5,
'WL': 16.55 / 4,
'WXL': 17.95 / 4,
'W2XL': 9.95 / 2}
# Empirical, based on finished packages:
# MM: 7.65 oz
# ML: 8.45 oz
BALANCE_OF_SHIPMENT = \
((7.65 + 8.45) - (SHIRT_WEIGHTS['MM'] + SHIRT_WEIGHTS['ML'])) / 2.0 # ~3.51 oz
# Shipping weights are actually uncertain due to manufacturing variance and
# taping technique. To demonstrate the uncertainty, I weighed a box (3.15 oz)
# and card with envelope (0.40 oz) and the sum (3.55 oz) is more than the
# empirical balance of shipment, which includes a label and tape. I saw
# individual shirt weights vary ~0.5 oz min to max. Add a small margin of
# safety to reduce the risk of buying too little postage (noting that having
# too much margin jacks typical MM orders, which weigh close to 8 oz, from 8 oz
# to 9 oz, which incurs a big price increase).
MARGIN_OF_SAFETY = 0.15
DEFAULT_COUNTRY = 'US'
COUNTRIES = {
'US': {'fields': ['City', 'State/Province', 'Zip/Postal Code'],
'postal_code_regex': re.compile(r'[0-9]{5}(-[0-9]{4})?')},
'GB': {'fields': ['City', 'Zip/Postal Code'],
'postal_code_regex': re.compile(
r'[A-Z][A-Z0-9]{1,3} [0-9][A-Z]{2}')},
'IE': {'fields': ['City', 'State/Province', 'Zip/Postal Code'],
'postal_code_regex': re.compile(r'[A-Z][0-9][0-9W] [A-Z0-9]{4}')}}
SHIPPO_FIELDS = {'Order Number': None,
'Order Date': None,
'Recipient Name': None,
'Company': None,
'Email': None,
'Phone': None,
'Street Line 1': None,
'Street Number': None,
'Street Line 2': None,
'City': None,
'State/Province': None,
'Zip/Postal Code': None,
'Country': None,
'Item Title': None,
'SKU': None,
'Quantity': None,
'Item Weight': None,
'Item Weight Unit': None,
'Item Price': None,
'Item Currency': None,
'Order Weight': None,
'Order Weight Unit': 'oz',
'Order Amount': None,
'Order Currency': None}
def shippo_details(size_text, name_text, address_text):
'''Parse spreadsheet row into Shippo fields.'''
result = copy.deepcopy(SHIPPO_FIELDS)
# Real addresses and not notes to hand-deliver have parts, thus commas
if address_text.find(', ') > -1:
result['Recipient Name'] = name_text
result['Order Weight'] = math.ceil(
SHIRT_WEIGHTS[size_text] +
BALANCE_OF_SHIPMENT +
MARGIN_OF_SAFETY)
address_fields = _address_fields(address_text)
result.update(address_fields)
else:
result = None
return result
def _address_fields(address_text):
fields = {}
if address_text[-2:] in COUNTRIES:
country = address_text[-2:]
fields['Country'] = country
remainder = address_text[:-2].strip().strip(',')
elif COUNTRIES[DEFAULT_COUNTRY]['postal_code_regex'].search(address_text):
country = DEFAULT_COUNTRY
fields['Country'] = country
remainder = address_text.strip().strip(',')
else:
raise Exception(f'Parse error on "{address_text}".')
for field in COUNTRIES[country]['fields'][::-1]:
# Fill out the country's address schema parsing the address text from
# the right
if field == 'Zip/Postal Code':
# Advance to last postal code match (sometimes things like
# five-digit street addresses will break the naive regex match)
regex = COUNTRIES[country]['postal_code_regex']
for postcode_match in re.finditer(regex, remainder):
pass
postcode_loc = postcode_match.span()
fields[field] = postcode_match.group().strip()
assert remainder[postcode_loc[1]:].strip().strip(',') == ''
remainder = remainder[:postcode_loc[0]].strip().strip(',')
elif field == 'State/Province':
# Assumes all state/province codes are all-caps abbreviations, or
# Irish counties
regex = re.compile(r'[A-Z][A-Z]+|County [A-Za-z]*|Co\. [A-Za-z]*')
for prov_match in re.finditer(regex, remainder):
pass
prov_loc = prov_match.span()
fields[field] = prov_match.group().strip()
assert remainder[prov_loc[1]:].strip().strip(',') == ''
remainder = remainder[:prov_loc[0]].strip().strip(',')
else:
loc = remainder.rfind(',')
fields[field] = remainder[loc + 1:].strip()
remainder = remainder[:loc].strip().strip(',')
street_address_parts = remainder.split(',')
if len(street_address_parts) == 1:
for unit_marker in ['Apt', 'Apartment', 'Unit', '#']:
loc = remainder.find(unit_marker)
if loc > -1:
fields['Street Line 1'] = remainder[:loc].strip()
fields['Street Line 2'] = remainder[loc:].strip()
break
if 'Street Line 1' not in fields:
fields['Street Line 1'] = remainder.strip().strip(',')
elif len(street_address_parts) == 2:
fields['Street Line 1'] = street_address_parts[0].strip()
fields['Street Line 2'] = street_address_parts[1].strip()
else:
raise Exception
return fields
wb = openpyxl.load_workbook(filename=argv[1], data_only=True)
results = []
assert wb['outgoing']['C1'].value == 'name'
assert wb['outgoing']['D1'].value == 'address'
assert wb['outgoing']['E1'].value == 'shipped'
ir = wb['outgoing'].iter_rows()
next(ir) # Header row
for row in ir:
size = row[1].value
name = row[2].value
address = row[3].value
shipped = row[4].value
# Omit recipients that are unconfirmed (and thus have no shipping address
# and size)
if all(i is not None for i in (size, name, address)) and shipped != 'Y':
if re.compile(r'[0-9]XL').search(size):
warnings.warn('Shirts bigger than XL may need a bigger box.')
fields = shippo_details(size, name, address)
if fields is not None:
results.append(fields)
with open(argv[2], 'w') as cf:
writer = csv.DictWriter(cf, fieldnames=SHIPPO_FIELDS.keys())
writer.writeheader()
for result in results:
writer.writerow(result)
| 35.124378 | 82 | 0.565156 | 896 | 7,060 | 4.362723 | 0.327009 | 0.02814 | 0.016628 | 0.015349 | 0.14505 | 0.101561 | 0.064722 | 0.041954 | 0.041954 | 0.041954 | 0 | 0.029821 | 0.297026 | 7,060 | 200 | 83 | 35.3 | 0.757808 | 0.177337 | 0 | 0.057143 | 0 | 0.021429 | 0.162626 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.014286 | false | 0.014286 | 0.05 | 0 | 0.078571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b8f165c611bb069dfd9e9a4299c2c9de8f14d21 | 1,299 | py | Python | projecteuler/projectEuler13.py | qingfengxia/python-projecteuler | a2cba042fe7256364f6a5fa55df805a87da9a301 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | projecteuler/projectEuler13.py | qingfengxia/python-projecteuler | a2cba042fe7256364f6a5fa55df805a87da9a301 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | projecteuler/projectEuler13.py | qingfengxia/python-projecteuler | a2cba042fe7256364f6a5fa55df805a87da9a301 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, absolute_import, division
""" Work out the first ten digits of
the sum of the following one-hundred 50-digit numbers.
"""
from projecteulerhelper import *
##########################################
def ProjectEuler13():
# read them into a int matrix , 50-digit numbers is divided into 10 5digits number
# int32 can only hold 9digits number
fileName = "ProjectEuler13.txt"
nd=5
ll=ReadBigInt(fileName, nd)
print("the row of data file, count of big int", len(ll))
SumMultipleDigits(ll,nd)
def SumMultipleDigits(ll,nd):
""" sum of multiple digits >>int32, each number is put in a row list """
s=[]
sumStr='' #make sure it is NOT same name with funciton sum!!!
base=10
for col in range(len(ll[0])):
s.append( sum([ll[row][col] for row in range(len(ll)) ]) )
for i in range(len(ll[0])-2, -1, -1):
s[i] += int(s[i+1]/(base**nd))
s[i+1] = s[i+1] % (base**nd)
for col in range(len(ll[0])): # when you copy code, MUST replace sth!!!
sumStr += str(s[col])
print("the first ten digits", sumStr[:10])
return sumStr
if __name__ == "__main__":
ProjectEuler13()
| 34.184211 | 88 | 0.585835 | 187 | 1,299 | 3.989305 | 0.497326 | 0.033512 | 0.053619 | 0.064343 | 0.092493 | 0.050938 | 0.050938 | 0 | 0 | 0 | 0 | 0.034056 | 0.254042 | 1,299 | 37 | 89 | 35.108108 | 0.73581 | 0.227868 | 0 | 0.086957 | 0 | 0 | 0.103194 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.217391 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b909b1dba2bfad79729fc3069bd4e37223b3379 | 2,758 | py | Python | merc/features/rfc1459/info.py | merc-devel/merc | 15e010db2474b5d9f9720fc83983b03c95063a02 | [
"MIT"
] | 4 | 2015-02-15T03:37:34.000Z | 2017-03-27T12:39:10.000Z | merc/features/rfc1459/info.py | merc-devel/merc | 15e010db2474b5d9f9720fc83983b03c95063a02 | [
"MIT"
] | null | null | null | merc/features/rfc1459/info.py | merc-devel/merc | 15e010db2474b5d9f9720fc83983b03c95063a02 | [
"MIT"
] | null | null | null | import collections
import datetime
from merc import feature
from merc import message
INFO_TEMPLATE = """\
____
__/ / /___ _ ___ ________
/_ . __/ ' \/ -_) __/ __/
/_ __/_/_/_/\__/_/ \__/
/_/_/
The Modern Extensible Relay Chat daemon, version {version}.
Copyright (C) {year}, #merc-devel
This software is licensed under the terms of the MIT license. The LICENSE file
in the source root contains full details and usage terms.
Visit us: http://merc-devel.com
Visit us on IRC: #merc @ irc.merc-devel.com
Get the merc source code at: https://github.com/merc-devel/merc
The following people have contributed significantly to merc, in
nickname-alphabetical order:
rfw, Tony Young <tony@rfw.name>
Shiz <hi@shiz.me>
This merc instance has been online since {online_since}, meaning it has been up
for {online_for}!
"""
class InfoFeature(feature.Feature):
NAME = __name__
install = InfoFeature.install
class InfoReply(message.Reply):
NAME = "371"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, line):
self.line = line
def as_reply_params(self):
return [self.line]
class EndOfInfo(message.Reply):
NAME = "374"
MIN_ARITY = 1
def __init__(self, reason="End of /INFO list", *args):
self.reason = reason
def as_reply_params(self):
return [self.reason]
@InfoFeature.register_user_command
class Info(message.Command):
NAME = "INFO"
MIN_ARITY = 0
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
year = datetime.date.today().year
online_since = app.creation_time.strftime("%c")
online_for = friendly_timespan(datetime.datetime.now() -
app.creation_time)
lines = INFO_TEMPLATE.format(
version=app.version,
year=year,
online_since=online_since,
online_for=online_for)
for line in lines.splitlines():
user.send_reply(InfoReply(line))
app.run_hooks("server.info", user)
user.send_reply(EndOfInfo())
def friendly_timespan(diff, range=3):
UNITS = collections.OrderedDict([
('year', 31536000),
('month', 2592000),
('week', 604800),
('day', 86400),
('hour', 3600),
('minute', 60),
('second', 1)
])
seconds = round(diff.total_seconds())
indications = []
for unit, amount in UNITS.items():
n, seconds = divmod(seconds, amount)
if n == 0:
continue
elif n > 1:
unit += "s"
indications.append('{} {}'.format(n, unit))
if range is not None:
range -= 1
if range == 0:
break
if len(indications) > 0:
if len(indications) > 1:
return ", ".join(indications[:-1]) + " and " + indications[-1]
return indications[0]
else:
return "a small while"
| 22.422764 | 79 | 0.652284 | 353 | 2,758 | 4.858357 | 0.461756 | 0.03207 | 0.029738 | 0.025656 | 0.058309 | 0.058309 | 0.034985 | 0 | 0 | 0 | 0 | 0.024276 | 0.22335 | 2,758 | 122 | 80 | 22.606557 | 0.776377 | 0 | 0 | 0.044444 | 0 | 0 | 0.298042 | 0.007614 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.044444 | 0.022222 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b90ae9be207fedc1b292b926c40882eae131319 | 1,878 | py | Python | divizor_master.py | Andrey-Mel/DZ_lesson_5 | 0f80a352c6d349738b22780942c7738c7d33e2fe | [
"MIT"
] | null | null | null | divizor_master.py | Andrey-Mel/DZ_lesson_5 | 0f80a352c6d349738b22780942c7738c7d33e2fe | [
"MIT"
] | null | null | null | divizor_master.py | Andrey-Mel/DZ_lesson_5 | 0f80a352c6d349738b22780942c7738c7d33e2fe | [
"MIT"
] | null | null | null | '''Необходимо реализовать модуль divisor_master. Все функции модуля принимают на вход натуральные числа от 1 до 1000.
Модуль содержит функции:
1) проверка числа на простоту (простые числа - это те числа у которых делители единица и они сами);
2) выводит список всех делителей числа;
3) выводит самый большой простой делитель числа.;'''
import math
def f_prost_chisla(n):
digit = []
for i in range(2, n+1 ):
for j in range(2, i):
if i % j == 0:
break
else:
digit.append(i)
print(f'Простые числа введеного числа \n {digit}') # вывод простых чисел
#функция выводит список всех делителей числа;
def f_delitely_chisla(n):
i = 2
spisok_delitely = []
while i <= n:
if n % i == 0:
spisok_delitely.append(i)
n //= i
else:
i += 1
print(spisok_delitely)
delitely = []
for d in spisok_delitely:
if d % i == 0:
break
else:
i+=1
delitely.append(d)
print(f'{delitely} - простые делители числа. Значит ищем наибольший делитель:')
spisok_delit_sort = sorted(delitely, reverse=True)
print(f'Наибольший простой делитель числа {spisok_delit_sort[0]}')
######pro
#4) функция выводит каноническое разложение числа
def f_mnogitely(n):
print(f' Вы ввели число {n}')
Mnogitely = []
j = 2
while j <= n:
if n % j == 0:
Mnogitely.append(j)
n //= j
else:
j += 1
if n > 1:
Mnogitely.append(n)
#функция выводит самый большой делитель (не обязательно простой) числа
Sort_mnogitely = sorted(Mnogitely)
Bigest_mnogityl = Sort_mnogitely[-1]
print(f' Множители числа - {Mnogitely}. Наибольший множитель: {Bigest_mnogityl}')
#f_mnogitely(144560)
#f_prost_chisla(144)
#f_delitely_chisla(500)
| 24.38961 | 117 | 0.609691 | 248 | 1,878 | 4.524194 | 0.375 | 0.026738 | 0.030303 | 0.046346 | 0.055258 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027068 | 0.2918 | 1,878 | 76 | 118 | 24.710526 | 0.816541 | 0.306177 | 0 | 0.177778 | 0 | 0 | 0.20047 | 0.017228 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.022222 | 0 | 0.088889 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b91bc419d87130534d251b58ff87a17cca48671 | 640 | py | Python | django/i_dont_need_this/web/otp.py | AakashKhatu/iDontNeedThis | 9b50a9377555fe990a7f21372e339a3f264800a9 | [
"MIT"
] | 2 | 2019-04-04T13:37:17.000Z | 2019-04-04T13:38:09.000Z | django/i_dont_need_this/web/otp.py | AakashKhatu/iDontNeedThis | 9b50a9377555fe990a7f21372e339a3f264800a9 | [
"MIT"
] | null | null | null | django/i_dont_need_this/web/otp.py | AakashKhatu/iDontNeedThis | 9b50a9377555fe990a7f21372e339a3f264800a9 | [
"MIT"
] | 1 | 2019-04-04T03:50:46.000Z | 2019-04-04T03:50:46.000Z | import requests
import random
def send_otp(number):
url = "https://www.fast2sms.com/dev/bulk"
otp = random.randint(10000, 99999)
querystring = {"authorization": "ZM2aEdmsy3WHNI8xejK6kiJ4hCYrBuwfn9t5QSLpov0VRb7lcP0qHGS5fkgWtPNX2YhFrQy9JnBOZTD6",
"sender_id": "FSTSMS", "language": "english", "route": "qt",
"numbers": number, "message": "8528",
"variables": "{AA}", "variables_values": otp}
headers = {
'cache-control': "no-cache"
}
response = requests.request(
"GET", url, headers=headers, params=querystring)
return (response.ok, otp)
| 33.684211 | 119 | 0.621875 | 57 | 640 | 6.929825 | 0.754386 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059305 | 0.235938 | 640 | 18 | 120 | 35.555556 | 0.748466 | 0 | 0 | 0 | 0 | 0 | 0.365625 | 0.125 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b923877a3fa52e1bcbc2ba2772f7f2b28266243 | 3,084 | py | Python | CoreConceptsPy/GdalPy/test/networks_test.py | spatial-ucsb/ConceptsOfSpatialInformation | 73d54a37ced14bc5ecb064f9d1ab8b1af8cd3c5a | [
"Apache-2.0"
] | 18 | 2015-03-03T22:57:20.000Z | 2020-06-17T10:17:58.000Z | CoreConceptsPy/GdalPy/test/networks_test.py | spatial-ucsb/ConceptsOfSpatialInformation | 73d54a37ced14bc5ecb064f9d1ab8b1af8cd3c5a | [
"Apache-2.0"
] | 1 | 2017-02-23T20:06:06.000Z | 2017-02-23T20:06:06.000Z | CoreConceptsPy/GdalPy/test/networks_test.py | spatial-ucsb/ConceptsOfSpatialInformation | 73d54a37ced14bc5ecb064f9d1ab8b1af8cd3c5a | [
"Apache-2.0"
] | 16 | 2015-02-13T02:05:36.000Z | 2018-09-07T04:02:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Abstract: Unit tests for the implementations of the core concept 'network'
"""
__author__ = "Michel Zimmer"
__copyright__ = "Copyright 2014"
__credits__ = ["Michel Zimmer"]
__license__ = ""
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__date__ = "December 2014"
__status__ = "Development"
import sys
import unittest
import networkx as nx
sys.path = [ '.', '..' ] + sys.path
from utils import _init_log
from networks import *
log = _init_log("networks_test")
class TestNetworkXEmptyNetwork(unittest.TestCase):
def setUp( self ):
self.N = NetworkX()
def test_nodes( self ):
self.assertEqual(self.N.nodes(), [])
def test_edges( self ):
self.assertEqual(self.N.edges(), [])
def test_addNode( self ):
"""
1 2
"""
self.N.addNode(1)
self.N.addNode(2)
self.assertEqual(self.N.nodes(), [1, 2])
def test_addEdge( self ):
"""
1 - 2
"""
self.N.addEdge(1, 2)
self.assertEqual(self.N.nodes(True), [(1, {}), (2, {})])
self.assertEqual(self.N.edges(True), [(1, 2, {})])
def test_connected( self ):
"""
1 - 2 3
"""
self.N.addEdge(1, 2)
self.N.addNode(3)
self.assertTrue(self.N.connected(1, 2))
self.assertFalse(self.N.connected(1, 3))
def test_shortestPath( self ):
"""
1 - 2 - 3
| |
+ - 5 - 4
"""
self.N.addEdge(1, 2)
self.N.addEdge(2, 3)
self.N.addEdge(3, 4)
self.N.addEdge(4, 5)
self.N.addEdge(5, 1)
self.assertEquals(self.N.shortestPath(1, 4), [1, 5, 4])
def test_shortestWeightedEdgesPath( self ):
"""
1 = 3
| |
2 - +
"""
self.N.addEdge(1, 2, weight = 1)
self.N.addEdge(2, 3)
self.N.addEdge(1, 3, weight = 4)
self.assertEquals(self.N.shortestPath(1, 3), [1, 3])
self.assertEquals(self.N.shortestPath(1, 3, 'weight'), [1, 2, 3])
def test_degree( self ):
"""
1 - 2 3
"""
self.N.addEdge(1, 2)
self.N.addNode(3)
self.assertEquals(self.N.degree(1), 1)
self.assertEquals(self.N.degree(3), 0)
def test_distance( self ):
"""
1 - 2 - 3
| |
+ - 5 - 4
"""
self.N.addEdge(1, 2)
self.N.addEdge(2, 3)
self.N.addEdge(3, 4)
self.N.addEdge(4, 5)
self.N.addEdge(5, 1)
self.assertEquals(self.N.distance(1, 4), 2)
def test_breadthFirst( self ):
"""
4 - 3 - 1 - 5 - 6 - 7
|
2
"""
self.N.addEdge(1, 2)
self.N.addEdge(1, 3)
self.N.addEdge(3, 4)
self.N.addEdge(1, 5)
self.N.addEdge(5, 6)
self.N.addEdge(6, 7)
self.assertEquals(self.N.breadthFirst(1, 2), [1, 2, 3, 4, 5, 6])
if __name__ == '__main__':
unittest.main()
| 22.844444 | 75 | 0.498379 | 390 | 3,084 | 3.789744 | 0.2 | 0.138701 | 0.17862 | 0.087957 | 0.47226 | 0.368742 | 0.301083 | 0.240189 | 0.205007 | 0.186739 | 0 | 0.064008 | 0.34144 | 3,084 | 134 | 76 | 23.014925 | 0.663712 | 0.087224 | 0 | 0.253521 | 0 | 0 | 0.038661 | 0 | 0 | 0 | 0 | 0 | 0.197183 | 1 | 0.15493 | false | 0 | 0.070423 | 0 | 0.239437 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b933659f87b8cf39336c9379fc5288ae062f439 | 6,714 | py | Python | onap_data_provider/resources/sdc_properties_mixins.py | onap/integration-data-provider | 0565394ecbd96730bf982909693514ab88703708 | [
"Apache-2.0"
] | null | null | null | onap_data_provider/resources/sdc_properties_mixins.py | onap/integration-data-provider | 0565394ecbd96730bf982909693514ab88703708 | [
"Apache-2.0"
] | null | null | null | onap_data_provider/resources/sdc_properties_mixins.py | onap/integration-data-provider | 0565394ecbd96730bf982909693514ab88703708 | [
"Apache-2.0"
] | null | null | null | """SDC properties mixins module."""
"""
Copyright 2021 Deutsche Telekom AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from abc import ABC
from typing import Any, Dict, List, Union
from onapsdk.exceptions import SDKException, ValidationError, ParameterError # type: ignore
from onapsdk.sdc.component import Component # type: ignore
from onapsdk.sdc.properties import NestedInput, Property, ComponentProperty # type: ignore
from onapsdk.sdc.sdc_resource import SdcResource # type: ignore
from onapsdk.sdc.vf import Vf # type: ignore
class SdcPropertiesMixins(ABC):
"""Mixins class for properties handling.
Mixin class for propoerties preparation for SdcResources and Components.
"""
def set_properties(
self, propresource: Union[SdcResource, Component], data: List[Any]
) -> None:
"""Set properties an SdcResource.
Args:
sdcresource (SdcResource): the SdcResource the properties should belong to
data (Dict[List, Any]): Data needed to create resource.
Raises ValidationError
"""
for property_data in data:
if any(
(prop.name == property_data["name"] for prop in propresource.properties)
):
prop = propresource.get_property(property_data["name"])
prop.value = property_data.get("value")
else:
proptype = property_data.get("type")
if proptype is None:
raise ValidationError(
f"New Property '{str(property_data['name'])}' is missing a type!"
)
property = Property(
name=property_data["name"],
property_type=proptype,
value=property_data.get("value"),
)
try:
propresource.add_property(property)
except SDKException:
raise ParameterError(
f"Creation of new property '{str(property_data['name'])}' "
f"for resourceclass '{str(propresource.__class__.__name__)}' is not provided yet!"
)
def declare_input(self, propresource: Union[SdcResource, Component], property_data: Dict[str, Any]) -> None:
"""Declare input.
Method to get a property from a component and create an input for it.
Args:
propresource (Union[SdcResource, Component]): Resource to create an input
property_data (Dict[str, Any]): Data used to create an input
Raises:
ValidationError: Provided data is invalid - missing property type
ParameterError: Declaring input returns an SDC error
"""
proptype = property_data.get("type")
if proptype is None:
raise ValidationError(
"New input '{0}' is missing a type!".format(
str(property_data["name"])
)
)
property = Property(
name=property_data["name"],
property_type=proptype,
value=property_data.get("value"),
)
try:
propresource.add_property(property)
propresource.declare_input(property)
except SDKException:
raise ParameterError(
f"Creation of new input '{str(property_data['name'])}' is not provided yet!"
)
def declare_nested_input(self, propresource: Union[SdcResource, Component], data: Dict[str, Any]) -> None:
"""Declare nested input.
Args:
propresource (SdcResource): Resource for which nested input is going to be declared
data (Dict[str, Any]): Data used for input creation.
"""
if not isinstance(propresource, SdcResource):
logging.error("Can't declare nested inputs for components!")
return
comp: Component = propresource.get_component_by_name(data["resource"])
propresource.declare_input(NestedInput(comp.sdc_resource, comp.sdc_resource.get_input(data["name"])))
def declare_resource_property_input(
self, sdc_resource: Union[SdcResource, Component], input_data: Dict[str, Any]
) -> None:
"""Declare input from resource's property.
Args:
sdc_resource (SdcResource): Resource for which input is going to be declared
input_data (Dict[str, Any]): Data used for input creation.
"""
if not isinstance(sdc_resource, Vf):
logging.error("Resource property as input is currently supported only for Vf resources.")
return
resource_component: Component = sdc_resource.get_component_by_name(
input_data["resource"]
)
component_property: ComponentProperty = resource_component.get_property(
input_data["name"]
)
sdc_resource.declare_input(component_property)
def set_inputs(
self, sdc_resource: Union[SdcResource, Component], inputs_data: List[Dict[str, Any]],
) -> None:
"""Set inputs of an SdcResource.
Args:
sdc_resource (SdcResource): the SdcResource the inputs should belong to
inputs_data (Dict[str, Any]): Input data to be set into resource.
"""
for input_data in inputs_data: # type: Dict[str, Any]
if input_data.get("nested-input"):
self.declare_nested_input(sdc_resource, input_data)
elif input_data.get("resource-property"):
self.declare_resource_property_input(sdc_resource, input_data)
# In case resource already has input with given name then set its value only
elif any(
(
resource_input.name == input_data["name"]
for resource_input in sdc_resource.inputs
)
):
sdc_resource.set_input_default_value(
sdc_resource.get_input(input_data["name"]),
input_data.get("value"),
)
else:
self.declare_input(sdc_resource, input_data)
| 40.203593 | 112 | 0.613196 | 740 | 6,714 | 5.433784 | 0.22027 | 0.04377 | 0.022382 | 0.024372 | 0.323303 | 0.263865 | 0.172096 | 0.142253 | 0.142253 | 0.114897 | 0 | 0.001931 | 0.30563 | 6,714 | 166 | 113 | 40.445783 | 0.860575 | 0.212243 | 0 | 0.326316 | 0 | 0 | 0.119053 | 0.029312 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.084211 | 0 | 0.168421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b94671a381123d62794de44e17b4732cff5ed86 | 3,193 | py | Python | simple_kg/discovery/entropy_pmi_new_word_discovery/utils.py | xiangking/simpleKG | d1464ec649022e17b2352b6249749af1b064526e | [
"MIT"
] | 1 | 2021-09-18T18:02:44.000Z | 2021-09-18T18:02:44.000Z | simple_kg/discovery/entropy_pmi_new_word_discovery/utils.py | xiangking/simpleKG | d1464ec649022e17b2352b6249749af1b064526e | [
"MIT"
] | null | null | null | simple_kg/discovery/entropy_pmi_new_word_discovery/utils.py | xiangking/simpleKG | d1464ec649022e17b2352b6249749af1b064526e | [
"MIT"
] | null | null | null | import re
import math
import codecs
import random
import numpy as np
def is_not_chinese(uchar:str):
"""
判断一个unicode是否是汉字
:param uchar: (str) 待判断的字符
"""
if uchar.isalpha() is True:
return False
elif uchar >= u'\u4e00' and uchar <= u'\u9fa5':
return False
else:
return True
def ngram_segment(text, n=3):
"""
使用n-gram分割句子
:param text: (str) 待分割的文本
:param n: (int) n-gram length, 默认值为3
"""
text_length = len(text)
skip = min(n, text_length)
# 生成ngram
for j in range(text_length):
for k in range(j + 1, min(j + 1 + skip, text_length + 1)):
yield text[j:k]
def get_bigram(text):
"""
一个单词拆分为所有可能的两两组合。例如,ABB可以分为(a,bb),(ab,b)
:param text: (str) 待分割的字符串
"""
return [(text[0:i], text[i:]) for i in range(1, len(text))]
class NewWordCandidateInfo(object):
"""
记录N-gram信息,包括左邻居,右邻居,频率,PMI
:param text: N-gram单词
Reference:
https://github.com/DenseAI/kaitian-xinci
"""
def __init__(self, text):
super(NewWordCandidateInfo, self).__init__()
self.text = text
self.freq = 0.0
self.left = [] # record left neighbors
self.left_dict = {}
self.right = [] # record right neighbors
self.right_dict = {}
self.pmi = 0
self.raw_freq = 0
self.raw_length = 0
def update_data(self, left, right):
"""添加出现在N-gram单词左右两边的字
Args:
left(str): 出现在N-gram单词左边的字
right(str): 出现在N-gram单词右边的字
"""
self.freq += 1.0
if left:
self.left.append(left)
if right:
self.right.append(right)
def compute_indexes(self, length):
"""计算单词的频率和左/右熵
Args:
总
"""
self.raw_freq = self.freq
self.raw_length = length
self.freq /= length
self.left, self.left_dict = NewWordCandidateInfo.compute_entropy(self.left)
self.right, self.right_dict = NewWordCandidateInfo.compute_entropy(self.right)
@staticmethod
def compute_entropy(_list):
"""计算左/右熵
Args:
_list(list): 出现在N-gram单词左/右的词列表
Formula:
[1] https://www.hankcs.com/nlp/extraction-and-identification-of-mutual-information-about-the-phrase-based-on-information-entropy.html
"""
length = float(len(_list))
frequence = {}
if length == 0:
return 0, frequence
else:
for i in _list:
frequence[i] = frequence.get(i, 0) + 1
return sum(map(lambda x: - x / length * math.log(x / length), frequence.values())), frequence
def compute_pmi(self, words_dict):
"""计算互信息
Args:
words_dict(dict): {N-gram单词: NewWordCandidateInfo对象}
Formula:
[1] https://ww1.sinaimg.cn/large/6cbb8645gw1el41boc5q9j20jw02oaa3.jpg
"""
sub_part = get_bigram(self.text)
if len(sub_part) > 0:
self.pmi = min(
map(lambda word: math.log(self.freq / words_dict[word[0]].freq / words_dict[word[1]].freq), sub_part)) | 24.945313 | 145 | 0.561854 | 386 | 3,193 | 4.53886 | 0.375648 | 0.031963 | 0.013699 | 0.043379 | 0.047945 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020147 | 0.316004 | 3,193 | 128 | 146 | 24.945313 | 0.782051 | 0.257125 | 0 | 0.067797 | 0 | 0 | 0.005666 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0.084746 | 0 | 0.338983 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b96e763ec1fd38d58bc97efc3d4e768f050096b | 5,323 | py | Python | data_loader_UAST.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | data_loader_UAST.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | data_loader_UAST.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | import torch
from torch.utils.data import Dataset, DataLoader
from pre_ast_path import ASTParser
from tqdm import tqdm, trange
import multiprocessing
from multiprocessing import Process, freeze_support
import pickle
import config
import os
import re
from gen_graph import get_adj, gen_feature
from unified_vocab import unified_vocab_dict
dict_ast_path = pickle.load(open('./vocabulary/unified_vocab.pkl', 'rb'))
language = {
"c": "c",
"cpp": "cpp",
"java": "java",
"js": "javascript",
"py": "python"
}
problem_map = {'1':0,'100':1,
'101':2, '102':3, '104':4, '11':5, '110':6, '111':7, '112':8, '118':9,
'12':10, '121':11, '125':12, '13':13, '136':14, '14':15, '15':16, '19':17,
'2':18, '20':19, '22':20, '26':21, '27':22, '28':23, '3':24, '35':25, '38':26,
'4':27, '46':28, '48':29, '5':30, '53':31, '55':32, '56':33, '58':34, '6':35,
'62':36, '66':37, '67':38, '69':39, '7':40, '70':41, '75':42, '78':43, '8':44,
'83':45, '88':46, '9':47, '94':48, '98':49}
def walkFile(file):
for root, dirs, files in os.walk(file):
file_name_list = []
for d in dirs:
file_name_list.append(os.path.join(root, d))
return file_name_list
def returnfiles(datapath):
all_files = []
for filepath, dirnames, filenames in os.walk(datapath):
if len(filenames)!=0:
for file in filenames:
all_files.append(filepath+"/"+file)
return all_files
def get_example(item):
code, label, extension = item
ast = ASTParser(language=language[extension])
tree = ast.parse_with_language(code_snippet=code, language=language[extension])
root_node = tree.root_node
ast_path = root_node.sexp()
#ruled
for key,value in unified_vocab_dict.items():
ast_path = ast_path.replace(key,value)
ast_path_list = []
ast_path = re.split('[()]', ast_path)
for i in ast_path:
i = i.strip()
if i != "":
ast_path_list.append(i)
AST_index = dict_ast_path.transform(ast_path_list, max_len=int(config.max_len))
ast_path = root_node.sexp()
#ruled
for key,value in unified_vocab_dict.items():
ast_path = ast_path.replace(key,value)
adj_matrix = get_adj(ast_path)
#print(len(dict_ast_path))
fea_matrix = gen_feature(ast_path,dict_ast_path)
fea_matrix = fea_matrix.numpy()
return [AST_index, adj_matrix, fea_matrix, label]
# return InputFeatures(code_token1, code_token2, label, url1, url2)
class MyDataset(Dataset):
def __init__(self, file_path=r'./leetcode/train', pool=None):
file_name = returnfiles(file_path)
self.examples = []
data = []
for file in file_name:
file_split = file.split("/")
extension = file_split[-1].split(".")[-1]
with open(file,"r",encoding='utf-8') as f:
code = f.read()
# input_code = f.read()
label = problem_map[file_split[-2]]
data.append((code, int(label),extension))
self.examples = pool.map(get_example, data)
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
# print(self.examples[index].url1,self.examples[index].code_token1,self.examples[index].label)
AST_index = self.examples[index][0]
adj_matrix = self.examples[index][1] # [1,5,3,6,...]
fea_matrix = self.examples[index][2]
label = self.examples[index][3]
return AST_index,adj_matrix,fea_matrix, label
# return torch.tensor(torch.tensor(self.examples[item].label))
def collate_fn(batch):
AST_index, adj_matrix,fea_matrix, label = list(zip(*batch))
AST_index = torch.LongTensor(AST_index)
adj_matrix = torch.FloatTensor(adj_matrix)
fea_matrix = torch.FloatTensor(fea_matrix)
label = torch.LongTensor(label)
return AST_index,adj_matrix, fea_matrix, label
def get_fusion_dataloader(train="train"):
print("get dataloader!")
cpu_cont = 5
pool = multiprocessing.Pool(cpu_cont)
if train == "train":
print("get train dataloader!")
data = MyDataset(file_path=r'./leetcode/train', pool=pool)
data_loader = DataLoader(data, batch_size=config.batch_size, shuffle=True,
collate_fn=collate_fn,num_workers=0)
elif train == "valid":
print("get valid dataloader!")
data = MyDataset(file_path=r'./leetcode/valid', pool=pool)
data_loader = DataLoader(data, batch_size=config.batch_size, shuffle=True,
collate_fn=collate_fn,num_workers=0)
elif train == "test":
print("get test dataloader!")
test_data = MyDataset(file_path=r'./leetcode/test', pool=pool)
data_loader = DataLoader(test_data, batch_size=config.batch_size, shuffle=True, collate_fn=collate_fn,num_workers=0)
return data_loader
if __name__ == '__main__':
data_loader = tqdm(get_fusion_dataloader(train="train"))
for idx,(ast_index, adj_matrix, fea_matrix, label) in tqdm(enumerate(data_loader), total=len(data_loader)):
print('idx',idx)
print('ast_index',ast_index)
print('adj_matrix', adj_matrix)
print('fea_matrix',fea_matrix)
print('label',label)
break
| 33.062112 | 124 | 0.629344 | 746 | 5,323 | 4.272118 | 0.289544 | 0.041732 | 0.037653 | 0.032005 | 0.283652 | 0.244117 | 0.224976 | 0.18042 | 0.168811 | 0.141826 | 0 | 0.052058 | 0.224122 | 5,323 | 160 | 125 | 33.26875 | 0.719613 | 0.054856 | 0 | 0.101695 | 0 | 0 | 0.078041 | 0.005973 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067797 | false | 0 | 0.101695 | 0.008475 | 0.237288 | 0.076271 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b97dcc4fee182443ecf7a65d71d064d71890c18 | 847 | py | Python | benedict/serializers/json.py | fabiocaccamo/python-benedict | da061049164efab95ee4360a9c971c5be248fbf2 | [
"MIT"
] | 365 | 2019-05-21T05:50:30.000Z | 2022-03-29T11:35:35.000Z | benedict/serializers/json.py | fabiocaccamo/python-benedict | da061049164efab95ee4360a9c971c5be248fbf2 | [
"MIT"
] | 78 | 2019-11-16T12:22:54.000Z | 2022-03-14T12:21:30.000Z | benedict/serializers/json.py | fabiocaccamo/python-benedict | da061049164efab95ee4360a9c971c5be248fbf2 | [
"MIT"
] | 26 | 2019-12-16T06:34:12.000Z | 2022-02-28T07:16:41.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from benedict.serializers.abstract import AbstractSerializer
from benedict.utils import type_util
from six import text_type
import json
class JSONSerializer(AbstractSerializer):
def __init__(self):
super(JSONSerializer, self).__init__()
def decode(self, s, **kwargs):
data = json.loads(s, **kwargs)
return data
def encode(self, d, **kwargs):
kwargs.setdefault('default', self._encode_default)
data = json.dumps(d, **kwargs)
return data
def _encode_default(self, obj):
if type_util.is_set(obj):
return list(obj)
elif type_util.is_datetime(obj):
return obj.isoformat()
elif type_util.is_decimal(obj):
return text_type(obj)
return text_type(obj)
| 24.911765 | 60 | 0.654073 | 104 | 847 | 5.067308 | 0.413462 | 0.060721 | 0.056926 | 0.072106 | 0.165085 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001567 | 0.246753 | 847 | 33 | 61 | 25.666667 | 0.824451 | 0.024793 | 0 | 0.173913 | 0 | 0 | 0.008495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.217391 | 0 | 0.695652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b981f9403e35e3edad1513c56a1d0685d8c02e9 | 5,148 | py | Python | docs/report/fa20-523-349/project/RankNet/indycar/online-lstm.py | mikahla1/cybertraining-dsc.github.io | 168cadb2f755cb6ad4907e5656bd879d57e01e43 | [
"Apache-2.0"
] | 4 | 2020-10-16T21:59:07.000Z | 2021-06-27T16:32:50.000Z | docs/report/fa20-523-349/project/RankNet/indycar/online-lstm.py | mikahla1/cybertraining-dsc.github.io | 168cadb2f755cb6ad4907e5656bd879d57e01e43 | [
"Apache-2.0"
] | 8 | 2020-09-04T13:14:18.000Z | 2021-08-19T09:05:27.000Z | docs/report/fa20-523-349/project/RankNet/indycar/online-lstm.py | mikahla1/cybertraining-dsc.github.io | 168cadb2f755cb6ad4907e5656bd879d57e01e43 | [
"Apache-2.0"
] | 25 | 2020-08-16T17:17:53.000Z | 2021-07-08T22:54:34.000Z | from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import read_csv
from pandas import datetime
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from math import sqrt
import matplotlib
import numpy
from numpy import concatenate
# date-time parsing function for loading the dataset
def parser(x):
return datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')
# frame a sequence as a supervised learning problem
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df = df.drop(0)
return df
# create a differenced series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# inverse scaling for a forecasted value
def invert_scale(scaler, X, yhat):
new_row = [x for x in X] + [yhat]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# fit an LSTM network to training data
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# make a one-step forecast
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
# Update LSTM model
def update_model(model, train, batch_size, updates):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
for i in range(updates):
model.fit(X, y, nb_epoch=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
# run a repeated experiment
def experiment(repeats, series, updates, lag=1):
# transform data to be stationary
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, lag)
supervised_values = supervised.values
# split data into train and test-sets
trainSize = 1500
train, test = supervised_values[0:trainSize], supervised_values[trainSize:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# run experiment
error_scores = list()
for r in range(repeats):
# fit the base model
lstm_model = fit_lstm(train_scaled, 1, 50, 1)
print('Start testing...')
# forecast test dataset
train_copy = numpy.copy(train_scaled)
predictions = list()
for i in range(len(test_scaled)):
# update model
#if i > 0:
# update_model(lstm_model, train_copy, 1, updates)
# predict
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
# add to training set
train_copy = concatenate((train_copy, test_scaled[i,:].reshape(1, -1)))
# report performance
rmse = sqrt(mean_squared_error(raw_values[-len(test_scaled):], predictions))
print('%d) Test RMSE: %.3f' % (r+1, rmse))
error_scores.append(rmse)
return error_scores
# execute the experiment
def run(lag=1):
# load dataset
series = read_csv('indy2018-1-vspeed.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# experiment
repeats = 10
results = DataFrame()
# run experiment
updates = 2
results['results'] = experiment(repeats, series, updates, lag)
# summarize results
print(results.describe())
# save results
results.to_csv('experiment_update_2.csv', index=False)
# entry point
run()
| 34.32 | 120 | 0.668221 | 726 | 5,148 | 4.625344 | 0.242424 | 0.026802 | 0.023824 | 0.016379 | 0.124479 | 0.055986 | 0.055986 | 0.055986 | 0.055986 | 0.055986 | 0 | 0.01892 | 0.219697 | 5,148 | 149 | 121 | 34.550336 | 0.817028 | 0.165113 | 0 | 0.06 | 0 | 0 | 0.03004 | 0.010326 | 0 | 0 | 0 | 0 | 0 | 1 | 0.11 | false | 0 | 0.14 | 0.02 | 0.34 | 0.03 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b9a65a2e9f2cc5df22b0506c5bd68872b48ef7e | 11,625 | py | Python | src/hpx_dashboard/server/widgets/widgets.py | jokteur/hpx-dashboard | 91ca3876dec389e514f89f34acdb6ec9cac9d1b4 | [
"BSD-3-Clause"
] | 6 | 2020-07-31T08:12:09.000Z | 2022-01-16T03:35:06.000Z | src/hpx_dashboard/server/widgets/widgets.py | jokteur/hpx-dashboard | 91ca3876dec389e514f89f34acdb6ec9cac9d1b4 | [
"BSD-3-Clause"
] | 23 | 2020-08-12T08:51:12.000Z | 2020-09-29T16:45:54.000Z | src/hpx_dashboard/server/widgets/widgets.py | jokteur/hpx-dashboard | 91ca3876dec389e514f89f34acdb6ec9cac9d1b4 | [
"BSD-3-Clause"
] | 2 | 2020-10-08T13:55:45.000Z | 2022-01-16T03:37:13.000Z | # -*- coding: utf-8 -*-
#
# HPX - dashboard
#
# Copyright (c) 2020 - ETH Zurich
# All rights reserved
#
# SPDX-License-Identifier: BSD-3-Clause
"""
"""
import copy
from datetime import datetime
import json
from bokeh.layouts import column, row
from bokeh.models.widgets import Button, Div, Toggle, TextAreaInput
from .base import BaseWidget, empty_placeholder
from ..plots import generator
from .select import DataCollectionSelect, SelectCustomLine
from ...common.logger import Logger
from ..data import DataAggregator, from_instance
logger = Logger()
class CustomCounterWidget(BaseWidget):
"""Produces a widget for plotting any counters"""
def __init__(self, doc, refresh_rate=1000, collection=None, **kwargs):
"""Produces a widget that allows the user to add / remove plots for any
counters from any collection
Arguments
---------
doc : Bokeh Document
bokeh document for auto-updating the widget
refresh_rate : int
refresh rate at which the Select refreshes and checks for new data collections (in ms)
**kwargs
arguments for the bokeh Select widget
"""
super().__init__(doc, refresh_rate=refresh_rate, collection=collection, **kwargs)
self._defaults_opts = dict(plot_width=800, plot_height=300)
self._defaults_opts.update((key, value) for key, value in kwargs.items())
self._lines = {}
self._lines_info = set()
self._line_counter = 0
# Buttons for editing the lines
self._add_line_b = Button(label="+", width=40)
self._add_line_b.on_click(self._add_line)
# Toggle button for the shading of the plots
self._shade_b = Toggle(label="Toggle plot shading", width=150)
self._shade_b.on_click(self._toggle_shade)
# Buttons for adding and removing plots
self._add_plot_b = Button(label="+", width=40)
self._add_plot_b.on_click(self._add_plot)
self._remove_plot_b = Button(label="-", width=40)
self._remove_plot_b.on_click(self._remove_plot)
# For editing the lines
self._edit_button = Toggle(label="Edit lines", width=100)
self._edit_button.on_click(self._toggle_edit)
self._json_input = TextAreaInput(
title="Export / inport widget:", width=500, max_length=20000
)
self._json_update_button = Button(label="Update from input", width=150)
self._json_update_button.on_click(self._set_from_input)
self._save_button = Button(label="Save state of widget to session", width=170)
self._save_button.on_click(self._save_widget)
self._root = column(
row(
Div(text="Add or remove plots:"),
self._remove_plot_b,
self._add_plot_b,
self._edit_button,
self._shade_b,
self._save_button,
),
empty_placeholder(),
empty_placeholder(),
)
self._plots = []
self._add_plot()
# If there is a saved state in the session of the widget
json_txt = DataAggregator().get_custom_widget_config()
if json_txt:
self.from_json(json_txt)
def _remove_line(self, idx):
del self._lines[idx]
self._update_line_widget()
def _add_line(self, update=True):
plots_text = [f"Plot {i + 1}" for i, _ in enumerate(self._plots)]
self._line_counter += 1
self._lines[self._line_counter] = SelectCustomLine(
self._doc,
self._line_counter,
plots_text,
self._remove_line,
)
if update:
self._update_line_widget()
def _toggle_shade(self, shade):
for plot in self._plots:
plot.toggle_shade()
def _save_widget(self):
DataAggregator().set_custom_widget_config(json.loads(self.to_json()))
def _update_plots(self):
plots = [plot.layout() for plot in self._plots]
self._root.children[2] = column(*plots)
# Update the lines with the available plots
plots_text = [f"Plot {i + 1}" for i, _ in enumerate(self._plots)]
for line in self._lines.values():
line.set_plots(plots_text)
def _update_line_widget(self):
lines = [line.layout() for line in self._lines.values()]
self._root.children[1] = column(
row(self._json_input, self._json_update_button),
row(self._add_line_b, Div(text="Add line")),
*lines,
)
def _toggle_edit(self, edit):
if edit:
self._update_line_widget()
else:
self._root.children[1] = empty_placeholder()
def _add_plot(self):
opts = copy.deepcopy(self._defaults_opts)
self._plots.append(
generator.TimeSeries(
self._doc,
refresh_rate=self._refresh_rate,
title=f"Plot {len(self._plots) + 1}",
**opts,
)
)
self._update_plots()
def _set_from_input(self):
self._toggle_edit(False)
self._edit_button.active = False
self.from_json(self._json_input.value)
def to_json(self):
"""Converts the state of the widget (number of plots, lines) to json"""
json_dict = {"num_plots": len(self._plots), "lines": []}
for plot_id, _, countername, instance, name in self._lines_info:
json_dict["lines"].append(
{"plot_id": plot_id, "countername": countername, "instance": instance, "name": name}
)
return json.dumps(json_dict)
def from_json(self, json_txt):
"""Takes a json as input and generates the corresponding plots and widgets.
Returns True if successful, False otherwise."""
json_dict = {}
try:
json_dict = json.loads(json_txt.rstrip())
except json.decoder.JSONDecodeError as e:
logger.error(f"JSON decode error: {e.msg}")
if "lines" not in json_dict:
return False
num_plots = 1
if "num_plots" in json_dict:
num_plots = json_dict["num_plots"]
# Remove all the lines
self._lines.clear()
# Set the correct number of plots
if num_plots > len(self._plots):
for _ in range(num_plots - len(self._plots)):
self._add_plot()
elif num_plots < len(self._plots):
for _ in range(len(self._plots) - num_plots):
self._remove_plot()
for line in json_dict["lines"]:
if not isinstance(line, dict):
return False
if (
"plot_id" not in line
or "countername" not in line
or "instance" not in line
or "name" not in line
):
return False
if not from_instance(tuple(line["instance"])):
return False
locality_id, pool, thread_id = from_instance(line["instance"])
self._add_line(False)
self._lines[self._line_counter].set_properties(
line["plot_id"],
None,
line["countername"],
locality_id,
pool,
thread_id,
line["name"],
)
return True
def update(self):
lines = set()
for line in self._lines.values():
lines.add(line.properties())
deleted_lines = self._lines_info.difference(lines)
new_lines = lines.difference(self._lines_info)
for plot_id, collection, countername, instance, name in deleted_lines:
if len(self._plots) >= plot_id:
self._plots[plot_id - 1].remove_line(countername, instance, collection, name)
for plot_id, collection, countername, instance, name in new_lines:
self._plots[plot_id - 1].add_line(countername, instance, collection, name)
self._lines_info = lines
self._json_input.value = self.to_json()
def _remove_plot(self):
if len(self._plots) == 1:
return
del self._plots[-1]
self._update_plots()
class DataCollectionWidget(BaseWidget):
"""Produces a widget for selecting current and past data collection instances"""
def __init__(self, doc, callback, refresh_rate=500, **kwargs):
"""Produces a widget that shows all the current and past data collection instances
in the form of a Select.
Arguments
---------
doc : Bokeh Document
bokeh document for auto-updating the widget
callback : function(collection: DataCollection)
callback for notifying when the user selects a certain data collection
refresh_rate : int
refresh rate at which the Select refreshes and checks for new data collections (in ms)
**kwargs
arguments for the bokeh Select widget
"""
super().__init__(doc, callback, refresh_rate=refresh_rate, **kwargs)
self._selected_collection = None
self._select = DataCollectionSelect(doc, self._set_collection, refresh_rate=refresh_rate)
self._div = Div(text="<b>No data available</b>")
self._root = column(self._select.layout(), self._div)
def _set_collection(self, collection):
""""""
self._selected_collection = collection
self._callback(collection)
self.update()
def update(self):
super().update()
collection = None
most_recent_flag = False
if not self._selected_collection:
most_recent_flag = True
collection = DataAggregator().get_live_collection()
else:
collection = self._selected_collection
if collection:
collection_list = DataAggregator().data
index = collection_list.index(collection)
collection = collection_list[index]
# Title of the run
title = f"Run #{index}"
if DataAggregator().get_current_run() == collection:
if most_recent_flag:
title += " (most recent, live)"
else:
title += " (live)"
elif most_recent_flag:
title += " (most recent)"
# Timings of the run
begin_time = datetime.fromtimestamp(int(collection.start_time))
time_info = f"<em>Start</em>: {begin_time}<br />"
if collection.end_time:
end_time = datetime.fromtimestamp(int(collection.end_time))
time_info += f"<em>End</em>: {end_time}"
# Num threads and localities
localities = collection.get_localities()
num_workers = 0
if localities:
num_workers = collection.get_num_worker_threads(localities[0])
instance_info = ""
if len(localities) == 1:
instance_info += "1 locality"
else:
instance_info += f"{len(localities)} localities"
instance_info += "<br />"
if num_workers == 1:
instance_info += "1 thread per locality"
else:
instance_info += f"{num_workers} threads per locality"
text = f"""<span class="run_summary"><h3 class="run_title">{title}</h3><br />
{time_info}<br />
{instance_info}</span>"""
if text != self._div.text:
self._div.text = text
| 33.598266 | 100 | 0.590624 | 1,364 | 11,625 | 4.771261 | 0.177419 | 0.024892 | 0.014751 | 0.007376 | 0.248002 | 0.139367 | 0.108021 | 0.095267 | 0.072526 | 0.072526 | 0 | 0.00825 | 0.311828 | 11,625 | 345 | 101 | 33.695652 | 0.80525 | 0.138065 | 0 | 0.114035 | 0 | 0 | 0.073475 | 0.007982 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074561 | false | 0 | 0.04386 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1b9e89c2c7b2551425e05e9bab467dd513d23999 | 15,379 | py | Python | src/core/uv_edit/prim.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 63 | 2016-01-02T16:28:47.000Z | 2022-01-19T11:29:51.000Z | src/core/uv_edit/prim.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 12 | 2016-06-12T14:14:15.000Z | 2020-12-18T16:11:45.000Z | src/core/uv_edit/prim.py | Epihaius/panda3dstudio | f5c62ca49617cae1aa5aa5b695200027da99e242 | [
"BSD-3-Clause"
] | 17 | 2016-05-23T00:02:27.000Z | 2021-04-25T17:48:27.000Z | from .base import *
import array
class UVPrimitivePart:
def __init__(self, picking_color_id, owner, data_row_range, start_positions,
default_positions, geom_prim, pos):
self.picking_color_id = picking_color_id
self.id = picking_color_id
self.owner = owner
self.data_row_range = data_row_range
self.start_positions = start_positions
self.default_positions = default_positions
self.geom_prim = geom_prim
self.is_selected = False
self.default_pos = Point3(pos)
self.center_pos = pos
self.mat = Mat4(Mat4.translate_mat(pos))
def copy(self, owner):
part = UVPrimitivePart(self.picking_color_id, owner, self.data_row_range,
self.start_positions, self.default_positions, self.geom_prim,
Point3(self.default_pos))
part.mat = Mat4(self.mat)
part.center_pos = Point3(self.center_pos)
return part
def get_center_pos(self, ref_node=None):
return self.center_pos
def get_pos(self):
transform = TransformState.make_mat(self.mat)
u, _, v = transform.get_pos()
return u, v
def get_rotation(self):
transform = TransformState.make_mat(self.mat)
rotation = transform.get_hpr()[2]
return rotation
def get_scale(self):
transform = TransformState.make_mat(self.mat)
su, _, sv = transform.get_scale()
return su, sv
def transform(self, mat, is_rel_value=True):
if not is_rel_value:
self.center_pos = Point3()
self.mat = Mat4(Mat4.ident_mat())
mat.xform_point_in_place(self.center_pos)
self.mat *= mat
def set_default_transform(self):
def_pos = self.default_positions
count = len(def_pos) // 3
pos = sum([Point3(*def_pos[i*3:i*3+3]) for i in range(count)], Point3()) / count
self.default_pos = Point3(pos)
self.center_pos = pos
self.mat = Mat4(Mat4.translate_mat(pos))
class UVPrimitive:
def __init__(self, uv_set_id, part_registry=None, primitive=None, data_copy=None):
self.uv_set_id = uv_set_id
self._pos_array_start = None
self._rows_to_transf = SparseArray()
if data_copy:
self._primitive = data_copy["primitive"]
self.parts = data_copy["parts"]
self.geom = data_copy["geom"]
else:
self._primitive = primitive
self.parts = []
self.geom = None
self.__create_geometry(part_registry)
def destroy(self, destroy_world_parts=True):
self.parts = []
self.geom.detach_node()
self.geom = None
if destroy_world_parts:
self._primitive.destroy_parts()
self._primitive = None
self._pos_array_start = None
self._rows_to_transf = None
def copy(self, uv_set_id=None):
data_copy = {"primitive": self._primitive}
data_copy["parts"] = parts = []
geom_node = self.geom.node()
vertex_data = GeomVertexData(geom_node.modify_geom(0).modify_vertex_data())
unsel_tris_geom = Geom(vertex_data)
sel_tris_geom = Geom(vertex_data)
seam_geom = Geom(vertex_data)
seam_geom.add_primitive(self.geom.get_child(0).node().get_geom(0).get_primitive(0))
geom_node_copy = GeomNode(geom_node.name)
geom_node_copy.add_geom(unsel_tris_geom, UVMgr.get("part_states")["unselected"])
geom_node_copy.add_geom(sel_tris_geom)
data_copy["geom"] = geom = NodePath(geom_node_copy)
geom.set_state(UVMgr.get("part_states")["selected"])
geom.set_effects(UVMgr.get("poly_selection_effects"))
geom.set_bin("background", 10)
geom.set_tag("uv_template", "poly")
seam_node_copy = GeomNode("seams")
seam_node_copy.add_geom(seam_geom)
seams_copy = geom.attach_new_node(seam_node_copy)
seams_copy.set_state(self.geom.get_child(0).get_state())
seams_copy.set_tag("uv_template", "seam")
prim = UVPrimitive(uv_set_id, None, None, data_copy)
for part_index, part in enumerate(self._primitive.parts):
part_copy = self.parts[part_index].copy(prim)
parts.append(part_copy)
if uv_set_id is not None:
part.uv_parts[uv_set_id] = part_copy
sel_uv_parts = []
other = self._primitive if uv_set_id is None else self
for uv_part, part in zip(parts, other.parts):
if part.is_selected:
sel_uv_parts.append(uv_part)
prim.set_selected_parts(sel_uv_parts)
return prim
def __create_geometry(self, part_registry):
uv_set_id = self.uv_set_id
count = sum(len(p.uv_borders[uv_set_id]) for p in self._primitive.parts)
vertex_format_picking = Mgr.get("vertex_format_picking")
vertex_data = GeomVertexData("data", vertex_format_picking, Geom.UH_dynamic)
vertex_data.reserve_num_rows(count)
vertex_data.unclean_set_num_rows(count)
self._vertex_data = vertex_data
pos_writer = GeomVertexWriter(vertex_data, "vertex")
col_writer = GeomVertexWriter(vertex_data, "color")
ind_writer = GeomVertexWriter(vertex_data, "index")
pickable_type_id = PickableTypes.get_id("primitive_part")
vert_index = 0
parts = self.parts
unsel_tris_geom = Geom(vertex_data)
sel_tris_geom = Geom(vertex_data)
seam_geom = Geom(vertex_data)
seam_prim = GeomLines(Geom.UH_static)
seam_prim.reserve_num_vertices(count)
seam_geom.add_primitive(seam_prim)
for part_index, part in enumerate(self._primitive.parts):
uv_border = part.uv_borders[uv_set_id]
tri_vert_count = (len(uv_border) - 2) * 3
unsel_tris_prim = GeomTriangles(Geom.UH_static)
unsel_tris_prim.set_index_type(Geom.NT_uint32)
unsel_tris_prim.reserve_num_vertices(tri_vert_count)
picking_col_id = part.picking_color_id
picking_color = get_color_vec(picking_col_id, pickable_type_id)
start_index = vert_index
seam_prim.add_vertices(start_index, start_index + 1)
for i, (u, v) in enumerate(uv_border):
pos = (u, 0., v)
pos_writer.add_data3(pos)
col_writer.add_data4(picking_color)
ind_writer.add_data1i(part_index)
if i > 1:
unsel_tris_prim.add_vertices(start_index, vert_index - 1, vert_index)
seam_prim.add_vertices(vert_index - 1, vert_index)
vert_index += 1
seam_prim.add_vertices(vert_index - 1, start_index)
row_range = (start_index, vert_index)
pos = sum([Point3(u, 0., v) for u, v in uv_border], Point3()) / len(uv_border)
start_positions = array.array("f", [c for p in (Point3(u, 0., v)
for u, v in uv_border) for c in p])
uv_part = UVPrimitivePart(picking_col_id, self, row_range, start_positions,
part.uv_defaults, unsel_tris_prim, pos)
parts.append(uv_part)
part.uv_parts[self.uv_set_id] = uv_part
part_registry[picking_col_id] = uv_part
unsel_tris_geom.add_primitive(unsel_tris_prim)
name = f"{self._primitive.model.id}_uv_geom"
geom_node = GeomNode(name)
geom_node.add_geom(unsel_tris_geom, UVMgr.get("part_states")["unselected"])
geom_node.add_geom(sel_tris_geom)
geom = GD.uv_prim_geom_root.attach_new_node(geom_node)
geom.set_state(UVMgr.get("part_states")["selected"])
geom.set_effects(UVMgr.get("poly_selection_effects"))
geom.set_bin("background", 10)
geom.set_tag("uv_template", "poly")
self.geom = geom
geom_node = GeomNode("seams")
geom_node.add_geom(seam_geom)
seams_node = geom.attach_new_node(geom_node)
seams_node.set_state(UVMgr.get("part_states")["unselected"])
seams_node.set_color(0., 1., 0., 1.)
seams_node.set_transparency(TransparencyAttrib.M_none)
seams_node.set_render_mode_thickness(1, 2)
seams_node.set_tag("uv_template", "seam")
def get_selected_parts(self):
return [p for p in self.parts if p.is_selected]
def set_selected_parts(self, parts):
geom = self.geom
unsel_geom = geom.node().modify_geom(0)
unsel_geom.clear_primitives()
sel_geom = geom.node().modify_geom(1)
sel_geom.clear_primitives()
rows = self._rows_to_transf
rows.clear()
for part in self.parts:
if part in parts:
start_row, end_row = part.data_row_range
rows.set_range(start_row, end_row - start_row)
sel_geom.add_primitive(part.geom_prim)
part.is_selected = True
else:
unsel_geom.add_primitive(part.geom_prim)
part.is_selected = False
def set_part_state(self, sel_state, state):
if sel_state == "unselected":
self.geom.node().set_geom_state(0, state)
else:
self.geom.set_state(state)
def init_transform(self):
pos_array = self.geom.node().modify_geom(0).modify_vertex_data().modify_array(0)
self._pos_array_start = GeomVertexArrayData(pos_array)
def __set_start_positions(self):
vertex_data = self.geom.node().modify_geom(0).modify_vertex_data()
pos_view = memoryview(vertex_data.modify_array(0)).cast("B").cast("f")
for part in self.get_selected_parts():
start_row, end_row = part.data_row_range
pos_values = part.start_positions
pos_view[start_row*3:end_row*3] = pos_values
def set_transform_component(self, transf_type, axis, value):
self.__set_start_positions()
vertex_data = self.geom.node().modify_geom(0).modify_vertex_data()
tmp_vertex_data = GeomVertexData(vertex_data)
primitive = self._primitive
for part in self.get_selected_parts():
mat = part.mat
transform = TransformState.make_mat(mat)
if transf_type == "translate":
pos = VBase3(transform.get_pos())
if axis == "u":
pos.x = value
else:
pos.z = value
new_mat = transform.set_pos(pos).get_mat()
elif transf_type == "rotate":
hpr = VBase3(transform.get_hpr())
hpr.z = value
new_mat = transform.set_hpr(hpr).get_mat()
elif transf_type == "scale":
scale = VBase3(transform.get_scale())
if axis == "u":
scale.x = max(.01, value)
else:
scale.z = max(.01, value)
new_mat = transform.set_scale(scale).get_mat()
part.transform(new_mat, is_rel_value=False)
offset_mat = Mat4.translate_mat(-part.default_pos)
new_mat = offset_mat * new_mat
start_row, end_row = part.data_row_range
rows = SparseArray.range(start_row, end_row - start_row)
tmp_vertex_data.transform_vertices(new_mat, rows)
mat = (Mat4.rotate_mat_normaxis(90., Vec3.right()) * new_mat
* Mat4.rotate_mat_normaxis(-90., Vec3.right()))
primitive.transform_uvs(self.uv_set_id, [self.parts.index(part)],
mat, is_rel_value=False)
pos_array = GeomVertexArrayData(tmp_vertex_data.get_array(0))
vertex_data.set_array(0, pos_array)
vertex_data = self.geom.node().modify_geom(1).modify_vertex_data()
vertex_data.set_array(0, pos_array)
vertex_data = self.geom.get_child(0).node().modify_geom(0).modify_vertex_data()
vertex_data.set_array(0, pos_array)
bounds = self.geom.get_child(0).node().get_bounds()
if bounds.radius == 0.:
bounds = BoundingSphere(bounds.center, .1)
self.geom.node().set_bounds(bounds)
def reset_default_part_uvs(self):
pos_views = []
vertex_data = self.geom.node().modify_geom(0).modify_vertex_data()
pos_view = memoryview(vertex_data.modify_array(0)).cast("B").cast("f")
pos_views.append(pos_view)
vertex_data = self.geom.node().modify_geom(1).modify_vertex_data()
pos_view = memoryview(vertex_data.modify_array(0)).cast("B").cast("f")
pos_views.append(pos_view)
vertex_data = self.geom.get_child(0).node().modify_geom(0).modify_vertex_data()
pos_view = memoryview(vertex_data.modify_array(0)).cast("B").cast("f")
pos_views.append(pos_view)
for part in self.get_selected_parts():
start_row, end_row = part.data_row_range
for pos_view in pos_views:
pos_view[start_row*3:end_row*3] = part.default_positions
part.set_default_transform()
bounds = self.geom.get_child(0).node().get_bounds()
if bounds.radius == 0.:
bounds = BoundingSphere(bounds.center, .1)
self.geom.node().set_bounds(bounds)
self._primitive.reset_default_part_uvs(self.uv_set_id)
def transform_selection(self, mat):
vertex_data = self.geom.node().modify_geom(0).modify_vertex_data()
tmp_vertex_data = GeomVertexData(vertex_data)
tmp_vertex_data.set_array(0, GeomVertexArrayData(self._pos_array_start))
tmp_vertex_data.transform_vertices(mat, self._rows_to_transf)
pos_array = GeomVertexArrayData(tmp_vertex_data.get_array(0))
vertex_data.set_array(0, pos_array)
vertex_data = self.geom.node().modify_geom(1).modify_vertex_data()
vertex_data.set_array(0, pos_array)
vertex_data = self.geom.get_child(0).node().modify_geom(0).modify_vertex_data()
vertex_data.set_array(0, pos_array)
def finalize_transform(self, mat, cancelled=False):
if cancelled:
vertex_data = self.geom.node().modify_geom(0).modify_vertex_data()
vertex_data.set_array(0, self._pos_array_start)
vertex_data = self.geom.node().modify_geom(1).modify_vertex_data()
vertex_data.set_array(0, self._pos_array_start)
vertex_data = self.geom.get_child(0).node().modify_geom(0).modify_vertex_data()
vertex_data.set_array(0, self._pos_array_start)
else:
primitive = self._primitive
part_indices = []
for i, part in enumerate(self.parts):
if part.is_selected:
part.transform(mat)
part_indices.append(i)
mat = (Mat4.rotate_mat_normaxis(90., Vec3.right()) * mat
* Mat4.rotate_mat_normaxis(-90., Vec3.right()))
primitive.transform_uvs(self.uv_set_id, part_indices, mat)
bounds = self.geom.get_child(0).node().get_bounds()
if bounds.radius == 0.:
bounds = BoundingSphere(bounds.center, .1)
self.geom.node().set_bounds(bounds)
self._pos_array_start = None
def show(self):
self.geom.reparent_to(GD.uv_prim_geom_root)
def hide(self):
self.geom.detach_node()
| 36.185882 | 91 | 0.63099 | 2,053 | 15,379 | 4.375548 | 0.093035 | 0.072359 | 0.026494 | 0.026049 | 0.516977 | 0.408549 | 0.386508 | 0.354447 | 0.337638 | 0.297562 | 0 | 0.011423 | 0.265687 | 15,379 | 424 | 92 | 36.271226 | 0.784026 | 0 | 0 | 0.31746 | 0 | 0 | 0.026075 | 0.006437 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073016 | false | 0 | 0.006349 | 0.006349 | 0.107937 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ba2705eba4f588807811918b9d16b830ce3ae7b | 5,185 | py | Python | lyrebird/config.py | voodoocooder/lyrebird | 02e6ebdcfd646855f9648293d9b5ef6da1c6603b | [
"MIT"
] | 1 | 2020-12-11T03:20:11.000Z | 2020-12-11T03:20:11.000Z | lyrebird/config.py | voodoocooder/lyrebird | 02e6ebdcfd646855f9648293d9b5ef6da1c6603b | [
"MIT"
] | null | null | null | lyrebird/config.py | voodoocooder/lyrebird | 02e6ebdcfd646855f9648293d9b5ef6da1c6603b | [
"MIT"
] | null | null | null | import codecs
import json
import os
from pathlib import Path
import shutil
from packaging import version
from lyrebird.mock.logger_helper import get_logger
from .version import IVERSION
from typing import List
from flask import Response, abort
from lyrebird.mock.console_helper import warning_msg, err_msg
import subprocess
"""
Config manager
Config file path: ~/.lyrebird/conf.json
Lyrebird will copy config template to ~/.lyrebird/conf.json, when lyrebird server start.
"""
CURRENT_DIR = Path(__file__).parent
CONFIG_TEMPLATE_FILE = CURRENT_DIR/'templates'/'conf.json'
CACHE_TEMPLATE_FILE = CURRENT_DIR/'templates'/'cache.json'
class Config:
def __init__(self, root_dir='~', name='lyrebird'):
self.root = Path(root_dir, '.lyrebird').expanduser()
self.default_conf_path = Path(self.root, 'conf.json').expanduser()
self.custom_conf_dir = Path(self.root, 'conf').expanduser()
self.cache_path = Path(self.root, 'cache.json').expanduser()
self.pid_path = Path(self.root, f'{name}.pid').expanduser()
self.tmp_dir = Path(self.root, 'tmp').expanduser()
self.plugin_root = Path(self.root, 'plugins')
def init(self):
if not self.root.exists():
self.root.mkdir()
if not self.default_conf_path.exists():
shutil.copyfile(CONFIG_TEMPLATE_FILE, self.default_conf_path)
else:
current_conf_version = self.load_default().get('version', '0.0.0')
template_conf_version = self.load_template().get('version', '0.0.0')
if version.parse(current_conf_version) < version.parse(template_conf_version):
shutil.copyfile(CONFIG_TEMPLATE_FILE, self.default_conf_path)
if not self.cache_path.exists():
shutil.copyfile(CACHE_TEMPLATE_FILE, self.cache_path)
if not self.custom_conf_dir.exists():
self.custom_conf_dir.mkdir(parents=True)
if not self.tmp_dir.exists():
self.tmp_dir.mkdir(parents=True)
def load(self, name):
conf = self.load_default()
if name:
conf.update(self.load_custom(name))
else:
self.load_cache().get('custom_conf')
return conf
def load_template(self):
return json.loads(codecs.open(CONFIG_TEMPLATE_FILE).read())
def load_default(self):
return json.loads(codecs.open(self.default_conf_path).read())
def load_custom(self, name):
if not name.endswith('.json'):
name = name + '.json'
custom_conf_path = self.custom_conf_dir / name
if custom_conf_path.exists():
return json.loads(codecs.open(self.custom_conf_dir/name).read())
else:
return json.loads(codecs.open(self.custom_conf_dir/'tmp.json').read())
def save_pid(self):
with codecs.open(self.pid_path, 'w', 'utf-8') as f:
f.write(str(os.getpid()))
def read_pid(self):
with codecs.open(self.pid_path, 'r', 'utf-8') as f:
return int(f.read())
def remove_pid(self):
if self.pid_path.exists():
os.remove(self.pid_path)
def load_tmp(self, name, **kwargs):
tmp_conf_path = self.custom_conf_dir/'tmp.json'
with codecs.open(tmp_conf_path, 'w', 'utf-8') as f:
f.write(json.dumps(kwargs, indent=4, ensure_ascii=False))
conf = self.load(name)
conf.update(kwargs)
return conf
def load_cache(self):
if self.cache_path.exists():
with codecs.open(self.cache_path, 'r', 'utf-8') as f:
return json.loads(f.read())
else:
raise ConfCacheNotFound
def save_cache(self, cache):
with codecs.open(self.cache_path, 'w', 'utf-8') as f:
f.write(json.dumps(cache, indent=4, ensure_ascii=False))
def download(self, uri):
repo = self.tmp_dir/'repo'
if repo.exists():
shutil.rmtree(repo)
p = subprocess.run(f"git clone {uri} {repo}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode == 0:
print(p.stdout.decode())
else:
# raise GitCloneFailed(p.stderr.decode())
get_logger().critical(
msg=f'下载配置文件错误,但不影响Overbridge使用,可以手动import相应插件base \n download conf data by git failed:{p.stderr.decode()}')
return
for subfile in repo.iterdir():
if not subfile.is_dir():
continue
if subfile.name.startswith('.'):
continue
if subfile.name == 'conf':
for conf_file in subfile.iterdir():
shutil.copy(conf_file, self.custom_conf_dir/conf_file.name)
else:
try:
dst = os.path.join(self.plugin_root, subfile.name)
command = f'cp -f -R {subfile}/* {dst}'
if not os.path.exists(dst):
os.makedirs(dst)
subprocess.run(command, shell=True)
except Exception as e:
print(e)
class ConfCacheNotFound(Exception):
pass
class GitCloneFailed(Exception):
pass
| 35.272109 | 124 | 0.615043 | 671 | 5,185 | 4.587183 | 0.201192 | 0.035737 | 0.036387 | 0.044185 | 0.215725 | 0.169591 | 0.11371 | 0.103314 | 0.077973 | 0.017544 | 0 | 0.003671 | 0.264417 | 5,185 | 146 | 125 | 35.513699 | 0.803356 | 0.007522 | 0 | 0.12069 | 0 | 0 | 0.069325 | 0.014025 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112069 | false | 0.017241 | 0.112069 | 0.017241 | 0.327586 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ba28bfe8ab06942b3b06a80f0ac591f457cd467 | 1,279 | py | Python | LeetCode/Python3/Math/313. Super Ugly Number.py | WatsonWangZh/CodingPractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 11 | 2019-09-01T22:36:00.000Z | 2021-11-08T08:57:20.000Z | LeetCode/Python3/Math/313. Super Ugly Number.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | null | null | null | LeetCode/Python3/Math/313. Super Ugly Number.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 2 | 2020-05-27T14:58:52.000Z | 2020-05-27T15:04:17.000Z | # Write a program to find the nth super ugly number.
# Super ugly numbers are positive numbers
# whose all prime factors are in the given prime list primes of size k.
# Example:
# Input: n = 12, primes = [2,7,13,19]
# Output: 32
# Explanation: [1,2,4,7,8,13,14,16,19,26,28,32] is the sequence of the first 12
# super ugly numbers given primes = [2,7,13,19] of size 4.
# Note:
# 1 is a super ugly number for any given primes.
# The given numbers in primes are in ascending order.
# 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.
# The nth super ugly number is guaranteed to fit in a 32-bit signed integer.
class Solution:
def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:
ugly = []
ugly.append(1)
idx = [0 for _ in range(len(primes))]
while len(ugly) < n:
tmp = []
mn = float('inf')
for i in range(len(primes)):
tmp.append(ugly[idx[i]] * primes[i])
# print(tmp)
mn = min(tmp)
for i in range(len(primes)):
if tmp[i] == mn:
idx[i] += 1
ugly.append(mn)
# print(ugly, idx)
return ugly[-1] | 31.975 | 80 | 0.526192 | 190 | 1,279 | 3.547368 | 0.421053 | 0.066766 | 0.066766 | 0.071217 | 0.15727 | 0.059347 | 0 | 0 | 0 | 0 | 0 | 0.070905 | 0.360438 | 1,279 | 40 | 81 | 31.975 | 0.750611 | 0.487099 | 0 | 0.125 | 0 | 0 | 0.00468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ba40fa09320088e8373dee62cef5d2fb0a82f5f | 3,368 | py | Python | third_party_package/RDKit_2015_03_1/rdkit/Chem/EState/EState_VSA.py | Ivy286/cluster_basedfps | 7fc216537f570436f008ea567c137d03ba2b6d81 | [
"WTFPL"
] | 9 | 2019-04-23T01:46:12.000Z | 2021-08-16T07:07:12.000Z | third_party_package/RDKit_2015_03_1/rdkit/Chem/EState/EState_VSA.py | Ivy286/cluster_basedfps | 7fc216537f570436f008ea567c137d03ba2b6d81 | [
"WTFPL"
] | null | null | null | third_party_package/RDKit_2015_03_1/rdkit/Chem/EState/EState_VSA.py | Ivy286/cluster_basedfps | 7fc216537f570436f008ea567c137d03ba2b6d81 | [
"WTFPL"
] | 5 | 2016-09-21T03:47:48.000Z | 2019-07-30T22:17:35.000Z | # $Id$
#
# Copyright (C)2003-2010 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Hybrid EState-VSA descriptors (like the MOE VSA descriptors)
"""
import numpy
from rdkit.Chem.EState.EState import EStateIndices as EStateIndices_
from rdkit.Chem.MolSurf import _LabuteHelper as VSAContribs_
import bisect
"""
These default VSA bins were chosen using the PP3K solubility data
set. An arbitrary number of bins were selected and the
boundaries were selected to give an approximately equal number of
atoms per bin
"""
vsaBins=[4.78,5.00,5.410,5.740,6.00,6.07,6.45,7.00,11.0]
def VSA_EState_(mol,bins=None,force=1):
""" *Internal Use Only*
"""
if not force and hasattr(mol,'_vsaEState'):
return mol._vsaEState
if bins is None: bins = estateBins
propContribs = EStateIndices_(mol,force=force)
volContribs = VSAContribs_(mol)
ans = numpy.zeros(len(bins)+1,numpy.float)
for i,prop in enumerate(propContribs):
if prop is not None:
bin = bisect.bisect_right(bins,volContribs[i+1])
ans[bin] += prop
mol._vsaEState=ans
return ans
"""
These default EState bins were chosen using the PP3K solubility data
set. An arbitrary number of bins (10) were selected and the
boundaries were selected to give an approximately equal number of
atoms per bin
"""
estateBins=[-0.390,0.290,0.717,1.165,1.540,1.807,2.05,4.69,9.17,15.0]
def EState_VSA_(mol,bins=None,force=1):
""" *Internal Use Only*
"""
if not force and hasattr(mol,'_eStateVSA'):
return mol._eStateVSA
if bins is None: bins = estateBins
propContribs = EStateIndices_(mol,force=force)
volContribs = VSAContribs_(mol)
ans = numpy.zeros(len(bins)+1,numpy.float)
for i,prop in enumerate(propContribs):
if prop is not None:
bin = bisect.bisect_right(bins,prop)
ans[bin] += volContribs[i+1]
mol._eStateVSA=ans
return ans
def _InstallDescriptors():
for i in range(len(vsaBins)):
fn = lambda x,y=i:VSA_EState_(x,force=0)[y]
if i > 0:
fn.__doc__="VSA EState Descriptor %d (% 4.2f <= x < % 4.2f)"%(i+1,vsaBins[i-1],vsaBins[i])
else:
fn.__doc__="VSA EState Descriptor %d (-inf < x < % 4.2f)"%(i+1,vsaBins[i])
name="VSA_EState%d"%(i+1)
fn.version="1.0.0"
globals()[name]=fn
i+=1
fn = lambda x,y=i:VSA_EState_(x,force=0)[y]
fn.__doc__="VSA EState Descriptor %d (% 4.2f <= x < inf)"%(i+1,vsaBins[i-1])
name="VSA_EState%d"%(i+1)
fn.version="1.0.0"
globals()[name]=fn
fn=None
for i in range(len(estateBins)):
fn = lambda x,y=i:EState_VSA_(x,force=0)[y]
if i > 0:
fn.__doc__="EState VSA Descriptor %d (% 4.2f <= x < % 4.2f)"%(i+1,estateBins[i-1],estateBins[i])
else:
fn.__doc__="EState VSA Descriptor %d (-inf < x < % 4.2f)"%(i+1,estateBins[i])
name="EState_VSA%d"%(i+1)
fn.version="1.0.1"
globals()[name]=fn
i+=1
fn = lambda x,y=i:EState_VSA_(x,force=0)[y]
fn.__doc__="EState VSA Descriptor %d (% 4.2f <= x < inf)"%(i+1,estateBins[i-1])
name="EState_VSA%d"%(i+1)
fn.version="1.0.1"
globals()[name]=fn
fn=None
# Change log for EState_VSA descriptors:
# version 1.0.1: optimizations, values unaffected
_InstallDescriptors()
| 30.618182 | 102 | 0.676366 | 566 | 3,368 | 3.922261 | 0.259717 | 0.016216 | 0.010811 | 0.018018 | 0.623874 | 0.595496 | 0.582883 | 0.565766 | 0.543243 | 0.482883 | 0 | 0.049062 | 0.17696 | 3,368 | 109 | 103 | 30.899083 | 0.751804 | 0.139846 | 0 | 0.575758 | 0 | 0 | 0.147023 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.060606 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ba69541de6a827ed0694499f2b0ff1f99e24608 | 2,019 | py | Python | examples/extract_clips.py | hannahherbig/gopro-py-api | 7f4f87bc34448e95c2ff1230fe85724420db8b4e | [
"MIT"
] | null | null | null | examples/extract_clips.py | hannahherbig/gopro-py-api | 7f4f87bc34448e95c2ff1230fe85724420db8b4e | [
"MIT"
] | 1 | 2021-06-11T01:02:30.000Z | 2021-06-11T01:02:30.000Z | examples/extract_clips.py | hannahherbig/gopro-py-api | 7f4f87bc34448e95c2ff1230fe85724420db8b4e | [
"MIT"
] | null | null | null | import time
import numpy as np
from goprocam import GoProCamera, constants
gpCam = GoProCamera.GoPro()
# Extracts clips from latest video
latestVideo = gpCam.getVideoInfo()
print("Tag count %s" % latestVideo.get(constants.Info.TagCount))
arrayLength = latestVideo[constants.Info.TagCount]
if arrayLength % 2 == 0:
print("Matching tag pairs!")
splitArray = np.array_split(latestVideo[constants.Info.Tags], arrayLength / 2)
for tag in splitArray:
startMs = tag[0]
stopMs = tag[1]
print(f"\n[START ms] {startMs}\n[STOP ms] {stopMs}")
fileName = "{}/{}".format(
gpCam.getMediaInfo("folder"), gpCam.getMediaInfo("file")
)
videoId = gpCam.getClip(
fileName,
constants.Clip.R1080p,
constants.Clip.FPS_NORMAL,
str(startMs),
str(stopMs),
)
print(
"On queue!\nVideo Id: %s\nStatus: %s"
% (videoId, gpCam.clipStatus(str(videoId)))
)
time.sleep(1)
while gpCam.clipStatus(str(videoId)) != "complete":
time.sleep(1)
time.sleep(2)
print(
"Downloading!\nVideo Id: %s\nStatus: %s"
% (videoId, gpCam.clipStatus(str(videoId)))
)
url = gpCam.getClipURL(str(videoId))
download = [
url.split("/")[len(url.split("/")) - 1],
url.split("/")[len(url.split("/")) - 2],
]
print("Downloading %s" % download)
try:
gpCam.downloadLastMedia(
path=url,
custom_filename="output/{}_{}_{}".format(
startMs, stopMs, download[0].replace("TRV", "MP4")
),
)
except (Exception) as e:
time.sleep(2)
gpCam.downloadLastMedia(
path=url,
custom_filename="output/{}_{}_{}".format(
startMs, stopMs, download[0].replace("TRV", "MP4")
),
)
| 31.546875 | 82 | 0.526003 | 197 | 2,019 | 5.350254 | 0.411168 | 0.037951 | 0.051233 | 0.071158 | 0.29981 | 0.263757 | 0.263757 | 0.263757 | 0.263757 | 0.263757 | 0 | 0.014116 | 0.333333 | 2,019 | 63 | 83 | 32.047619 | 0.768945 | 0.015849 | 0 | 0.315789 | 0 | 0 | 0.115869 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ba76bd8b43fd2bdb2df468a9e9d41baccb0e88c | 1,647 | py | Python | gogoedu/management/commands/create-examples.py | tuandang98/gogoedu | f98587f9d315e1253d3a3b9e1e4cd9f148184e29 | [
"MIT"
] | 2 | 2021-04-27T16:00:32.000Z | 2021-05-30T14:00:07.000Z | gogoedu/management/commands/create-misson.py | tuandang98/gogoedu | f98587f9d315e1253d3a3b9e1e4cd9f148184e29 | [
"MIT"
] | null | null | null | gogoedu/management/commands/create-misson.py | tuandang98/gogoedu | f98587f9d315e1253d3a3b9e1e4cd9f148184e29 | [
"MIT"
] | null | null | null | from django.core.management import BaseCommand
from django_gamification.models import BadgeDefinition, Category, UnlockableDefinition, GamificationInterface
from gogoedu.models import myUser
class Command(BaseCommand):
help = 'Generates fake data for the app'
def handle(self, *args, **options):
category_tested=Category.objects.create(name='Tested Badges', description='These are the tested badges')
Diamond=BadgeDefinition.objects.create(
name='Diamond',
description='Congratulation Diamond',
points=100,
progression_target=1500,
category=category_tested,
)
Gold=BadgeDefinition.objects.create(
name='Gold',
description='Congratulation Gold',
points=100,
progression_target=700,
next_badge=Diamond,
category=category_tested,
)
Silver=BadgeDefinition.objects.create(
name='Silver',
description='Congratulation Silver',
points=100,
progression_target=300,
next_badge=Gold,
category=category_tested,
)
BadgeDefinition.objects.create(
name='Bronze',
description='Congratulation Bronze',
points=50,
progression_target=100,
next_badge=Silver,
category=category_tested,
)
UnlockableDefinition.objects.create(
name='Some super sought after feature',
description='You unlocked a super sought after feature',
points_required=50
)
| 31.075472 | 112 | 0.612629 | 145 | 1,647 | 6.862069 | 0.413793 | 0.078392 | 0.102513 | 0.128643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022887 | 0.310261 | 1,647 | 52 | 113 | 31.673077 | 0.852993 | 0 | 0 | 0.162791 | 0 | 0 | 0.151184 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.069767 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1ba9ca4fa4978599705739170468a0038dc28a23 | 16,675 | py | Python | sea/directivity.py | MuriloCardosoSoares/sea | aa105cbbedcde0ddcb1047c28cd4337e7f72ee6d | [
"MIT"
] | 5 | 2020-12-14T14:00:01.000Z | 2021-11-22T17:51:55.000Z | sea/directivity.py | MuriloCardosoSoares/sea | aa105cbbedcde0ddcb1047c28cd4337e7f72ee6d | [
"MIT"
] | null | null | null | sea/directivity.py | MuriloCardosoSoares/sea | aa105cbbedcde0ddcb1047c28cd4337e7f72ee6d | [
"MIT"
] | null | null | null | import numpy as np
import scipy
import scipy.io
import pickle
from scipy.special import lpmv, spherical_jn, spherical_yn
class Directivity:
def __init__(self, data_path, rho0, c0, freq_vec, simulated_ir_duration, measurement_radius, sh_order, type, sample_rate=44100, **kwargs):
'''
This script encodes the measured impulse responses, representing the
directivity of a source into sperical harmonic coefficients
source_data_path -> path that leads to the .mat file that contains the source data
source_name -> string. It is gonna be used as the name of the file where the solution is gonna be saved
rho0 -> air density
c0 -> speed of sound
simulated_ir_duration -> length of simulation [s]
measurement_radius -> distance from source to measurement positions [m]
existing_pre_delay -> delay before direct sound arrives as provided in GRAS dataset [samples]
'''
self.data_path = data_path
self.rho0 = rho0
self.c0 = c0
self.freq_vec = freq_vec
self.simulated_ir_duration = simulated_ir_duration
self.measurement_radius = measurement_radius
self.sh_order = sh_order
self.type = type
self.sample_rate = sample_rate
try:
self.existing_pre_delay = kwargs["existing_pre_delay"]
except:
pass
def encode_directivity (self, file_name):
self.file_name = file_name
# Derived parameters:
nfft = self.sample_rate*self.simulated_ir_duration # Number of FFT points
f_list = self.sample_rate*np.arange(nfft)/nfft # List of FFT frequencies
fi_lim_lo = np.argmin(np.abs(f_list - self.freq_vec[0])) # FFT bins above which to encode
fi_lim_hi = np.argmin(np.abs(f_list - self.freq_vec[-1])) # FFT bins below which to encode (Hz)
f_list = f_list[fi_lim_lo:fi_lim_hi + 1] # Only retain frequencies to be encoded
if self.type == "source":
## Load and adjust meaured impulse responses:
# Load measured impulse responses:
print ('Loading source data. It might be computationally costing...')
source_data = scipy.io.loadmat(self.data_path) # loads variables IR, Phi, Theta
ir = np.array (source_data['IR'])
# Convert measurement angles from degrees to radians & ensure are column vectors
beta = np.array(source_data['Theta']) * np.pi/180
beta = beta.reshape((np.size(beta), 1))
alpha = np.array(source_data['Phi']) * np.pi/180
alpha = alpha.reshape((np.size(alpha), 1))
del source_data
# Correct initial time delay for measurement distance and window:
desired_pre_delay = round(self.sample_rate * self.measurement_radius / self.c0) # Delay before direct sound arrives based on measurement radius (#samples)
half_window = np.concatenate((np.array(0.5-0.5*np.cos(np.pi*np.linspace(0, 1, self.existing_pre_delay))).conj().T, np.array(np.ones((np.size(ir,0) - self.existing_pre_delay))))) # Rising window
half_window = half_window.reshape((np.size(half_window), 1))
ir = np.concatenate((np.zeros((desired_pre_delay - self.existing_pre_delay, np.size(ir,1))), np.multiply(ir, half_window)))
half_window = np.concatenate((np.ones((np.ceil(np.size(ir,0)/2).astype(int),1)), 0.5+0.5*np.cos(np.pi*np.linspace(0, 1, np.floor(np.size(ir,0)/2).astype(int)).conj().T.reshape(np.floor(np.size(ir,0)/2).astype(int),1)))) # Falling window
ir = np.multiply(ir, half_window);
# Derived parameters:
num_meas = np.size(ir,1); # Number of measurment points
## Fourier transform the impulse responses:
# Loop over measurement points and FFT:
phi_meas = np.zeros((fi_lim_hi-fi_lim_lo+1, num_meas), dtype = np.complex128)
print ('Computing FFTs')
for iMeas in range(num_meas):
fft_ir = np.conj(np.fft.fft(ir[:,iMeas], n = nfft)) # conj used because project uses exp(-1i*w*t) Fourier Transform
phi_meas[:,iMeas] = np.array([fft_ir[fi_lim_lo:fi_lim_hi + 1]]) # Only retain frequencies to be encoded
del ir, fft_ir, iMeas, fi_lim_lo, fi_lim_hi
# Transpose to optimise memory access for encoding step:
print ('Transposing transfer function array...')
phi_meas = np.transpose(phi_meas)
print ('Complete.')
## Encoding:
# Create weighting vector:
w = np.pi/180*(np.cos(beta-np.pi/360)-np.cos(beta+np.pi/360));
w[0] = 2*np.pi*(1-np.cos(np.pi/360));
w[-1] = w[0];
w = w.reshape((np.size(w), ))
print ('Weight addition error = %s.' % abs(np.sum(w) - (4*np.pi)))
# Pre-calculate spherical harmonic functions:
y_nm, dy_dbeta, dy_dalpha = spherical_harmonic_all(self.sh_order, alpha,beta);
# Loop over frequency:
self.sh_coefficients = []
i = 0
for fi, f in enumerate (f_list):
if f == self.freq_vec[i]:
# Calculate spherical Hankel functions:
hnOut = np.zeros(((self.sh_order+1)**2, 1), dtype = np.complex128);
for n in np.arange(self.sh_order+1):
for m in np.arange(-n, n + 1):
hnOut[sub2indSH(m,n),0] = spherical_hankel_out(n, self.measurement_radius*2*np.pi*f/self.c0)
# Calculate b_nm coefficients via a mode-matching approach (Eq. 9 in paper):
sh_coefficients_f = np.matmul(y_nm.conj().T, np.transpose(np.divide(np.multiply(w, phi_meas[:,fi]), hnOut)))
sh_coefficients_f = np.diagonal(sh_coefficients_f)
self.sh_coefficients.append(sh_coefficients_f)
i+=1
elif self.type == "receiver":
# Load measured impulse responses:
print ('Loading receiver directionality data. It might be computationally costing...')
receiver_data = scipy.io.loadmat(self.data_path) # loads variables HRIR_R,HRIR_L, Phi, Theta
hrir_l = np.array(receiver_data['HRIR_L'])
hrir_r = np.array(receiver_data['HRIR_R'])
azimuth = np.array(receiver_data['azimuth'])
azimuth = azimuth.reshape((np.size(azimuth), 1))
elevation = np.array(receiver_data['elevation'])
elevation = elevation.reshape((np.size(elevation), 1))
# Convert measurement angles from degrees to radians, and from elevation to polar
alpha = np.multiply(np.divide(azimuth, 360), (2*np.pi))
beta = np.multiply(np.divide(np.subtract(90, elevation), 360), (2*np.pi))
del receiver_data, azimuth, elevation
# Derived parameters:
ir_length = np.size(hrir_l, 0) # Length of recorded impulse response (#samples)
num_meas = np.size(hrir_l, 1) # Number of measurment points
## Fourier transform the impulse responses - left:
# The IR is windowed with a half-Hanning window applied to its last 25%, to
# avoid a wrap-around discontinity of and then zero-padded to achieve the
# required frequency resolution.
half_window = np.concatenate((np.ones((np.ceil(ir_length/2).astype(int),1)), np.array(0.5+0.5*np.cos(np.pi*np.linspace(0, 1, np.floor(ir_length/2).astype(int)).conj().T)).reshape((np.size(np.linspace(0, 1, np.floor(ir_length/2).astype(int))), 1))))
half_window = half_window.reshape((np.size(half_window), ))
## Encoding:
# Pre-calculate spherical harmonic functions:
y_nm, dy_dbeta, dy_dalpha = spherical_harmonic_all(self.sh_order, alpha, beta)
# Pre-calculate spherical Hankel functions:
hnOut = np.zeros(((self.sh_order+1)**2, np.size(f_list)), dtype = np.complex128)
for fi, f in enumerate(f_list):
for n in np.arange(self.sh_order + 1):
for m in np.arange(-n, n + 1):
hnOut[sub2indSH(m,n),fi] = spherical_hankel_out(n, self.measurement_radius*2*np.pi*f/self.c0)
# Loop over measurement points and FFT - left:
hrtf = np.zeros((fi_lim_hi-fi_lim_lo+1, num_meas), dtype = np.complex128)
for i_meas in range(num_meas):
fft_hrir = np.conj(np.fft.fft(np.multiply(half_window, hrir_l[:,i_meas]), n = nfft)) # conj used because project uses exp(-1i*w*t) Fourier Transform
hrtf[:,i_meas] = np.array([fft_hrir[fi_lim_lo:fi_lim_hi + 1]]) # Only retain frequencies to be encoded
del hrir_l, fft_hrir, i_meas
# Transpose to optimise memory access for encoding step:
print('\tTransposing transfer function array...')
hrtf = np.transpose(hrtf)
print('Complete.\n')
# Loop over frequency - left:
self.sh_coefficients_left = []
i = 0
for fi, f in enumerate (f_list):
if f == self.freq_vec[i]:
# Calculate Lnm coefficients by a least-squares fit approach:
A = np.multiply(4*np.pi*np.transpose(hnOut[:,fi])/hnOut[0,fi], np.conj(y_nm))
sh_coefficients_left_f = np.linalg.lstsq (A, hrtf[:,fi])
self.sh_coefficients_left.append(sh_coefficients_left_f[0])
i+=1
# Loop over measurement points and FFT - right:
hrtf = np.zeros((fi_lim_hi-fi_lim_lo+1, num_meas), dtype = np.complex128)
for i_meas in range(num_meas):
fft_hrir = np.conj(np.fft.fft(np.multiply(half_window, hrir_r[:,i_meas]), n = nfft)) # conj used because project uses exp(-1i*w*t) Fourier Transform
hrtf[:,i_meas] = np.array([fft_hrir[fi_lim_lo:fi_lim_hi + 1]]) # Only retain frequencies to be encoded
del hrir_r, fft_hrir, i_meas
# Transpose to optimise memory access for encoding right:
print('\tTransposing transfer function array...')
hrtf = np.transpose(hrtf)
print('Complete.\n')
# Loop over frequency - right:
self.sh_coefficients_right = []
i = 0
for fi, f in enumerate (f_list):
if f == self.freq_vec[i]:
# Calculate Lnm coefficients by a least-squares fit approach:
A = np.multiply(4*np.pi*np.transpose(hnOut[:,fi])/hnOut[0,fi], np.conj(y_nm))
sh_coefficients_right_f = np.linalg.lstsq (A, hrtf[:,fi])
self.sh_coefficients_right.append(sh_coefficients_right_f[0])
i+=1
else:
raise ValueError("Type is not valid. It must be source or receiver.")
save_name = "%s.pickle" % self.file_name
pickle_obj = open(save_name, "wb")
pickle.dump(self, pickle_obj)
pickle_obj.close()
print ("Saved results to %s.pickle" % self.file_name)
#### Functions #####
def sub2indSH (m,n):
"""
i = sub2indSH(m,n)
Convert Spherical Harmonic (m,n) indices to array index i
Assumes that i iterates from 0 (Python style)
"""
i = n**2 + n + m
return i
def spherical_harmonic_all (max_order, alpha, beta):
"""
(y, dy_dbeta, dy_dalpha) = spherical_harmonic_all(max_order, alpha, sinbeta, cosbeta)
Computes a Spherical Harmonic function and it's angular derivatives for
all (m,n) up to the given maximum order. The algorithm is equivalent to that
implemented in SphericalHarmonic, but this version avoids repeated calls
to lpmv, since that is very time consuming.
Arguments - these should all be scalars:
r is radius
alpha is azimuth angle (angle in radians from the positive x axis, with
rotation around the positive z axis according to the right-hand screw rule)
beta is polar angle, but it is specified as two arrays of its cos and sin values.
max_order is maximum Spherical Harmonic order and should be a non-negative real integer scalar
Returned data will be vectors of length (max_order+1)^2.
"""
cosbeta = np.cos(beta)
sinbeta = np.sin(beta)
# Preallocate output arrays:
y = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
dy_dbeta = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
dy_dalpha = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
#% Loop over n and calculate spherical harmonic functions y_nm
for n in range(max_order+1):
# Compute Legendre function and its derivatives for all m:
p_n = lpmv(range(0,n+1), n, cosbeta)
#print (np.shape(p_n))
#shape_p_n = np.shape(p_n)
#p_n = p_n.reshape((shape_p_n[1], shape_p_n[0]))
for m in range(-n, n+1):
# Legendre function its derivatives for |m|:
p_nm = p_n[:, np.absolute(m)]
p_nm = p_nm.reshape((np.size(p_nm), ))
if n==0:
dPmn_dbeta = 0
elif m==0:
dPmn_dbeta = p_n[:,1]
elif abs(m)<n:
dPmn_dbeta = 0.5*p_n[:,abs(m)+1] - 0.5*(n+abs(m))*(n-abs(m)+1)*p_n[:,abs(m)-1];
dPmn_dbeta = dPmn_dbeta.reshape((np.size(dPmn_dbeta), ))
elif (abs(m)==1) and (n==1):
dPmn_dbeta = -cosbeta
dPmn_dbeta = dPmn_dbeta.reshape((np.size(dPmn_dbeta), ))
#elif sinbeta<=np.finfo(float).eps:
#dPmn_dbeta = 0
else:
dPmn_dbeta = -abs(m)*cosbeta.reshape((np.size(cosbeta), ))*p_nm/sinbeta.reshape((np.size(sinbeta), )) - (n+abs(m))*(n-abs(m)+1)*p_n[:,abs(m)-1]
dPmn_dbeta = dPmn_dbeta.reshape((np.size(dPmn_dbeta), ))
#print (dPmn_dbeta)
#print (np.shape(dPmn_dbeta))
# Compute scaling term, including sign factor:
scaling_term = ((-1)**m) * np.sqrt((2 * n + 1) / (4 * np.pi * np.prod(np.float64(range(n-abs(m)+1, n+abs(m)+1)))))
# Compute exponential term:
exp_term = np.exp(1j*m*alpha)
exp_term = exp_term.reshape((np.size(exp_term), ))
# Put it all together:
i = sub2indSH(m,n)
#y[:,i] = np.multiply (exp_term, p_nm)
y[:,i] = scaling_term * exp_term * p_nm
dy_dbeta[:,i] = scaling_term * exp_term * dPmn_dbeta
dy_dalpha[:,i] = y[:,i] * 1j * m
return y, dy_dbeta, dy_dalpha
def spherical_hankel_out (n, z):
"""
(h, dhdz) = spherical_hankel_out(n, z)
Computes a spherical Hankel function of the first kind (outgoing in this
paper's lingo) and its first derivative.
"""
h = spherical_jn(n,z,False) + 1j*spherical_yn(n,z,False)
#h = h[0,0]
#dhdz = spherical_jn(n,z,True) + 1j*spherical_yn(n,z,True)
return h #, dhdz
| 46.319444 | 261 | 0.544168 | 2,154 | 16,675 | 4.061746 | 0.177344 | 0.018516 | 0.022288 | 0.004801 | 0.441993 | 0.399474 | 0.354555 | 0.33501 | 0.324609 | 0.286661 | 0 | 0.019066 | 0.348906 | 16,675 | 359 | 262 | 46.448468 | 0.786774 | 0.271664 | 0 | 0.213018 | 0 | 0 | 0.041862 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029586 | false | 0.005917 | 0.029586 | 0 | 0.08284 | 0.065089 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bad56ad689387d54b73f03233d0f2cbf354e3cc | 1,048 | py | Python | migrations/versions/570_create_brief_responses_table.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 25 | 2015-01-14T10:45:13.000Z | 2021-05-26T17:21:41.000Z | migrations/versions/570_create_brief_responses_table.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 641 | 2015-01-15T11:10:50.000Z | 2021-06-15T22:18:42.000Z | migrations/versions/570_create_brief_responses_table.py | uk-gov-mirror/alphagov.digitalmarketplace-api | 5a1db63691d0c4a435714837196ab6914badaf62 | [
"MIT"
] | 22 | 2015-06-13T15:37:45.000Z | 2021-08-19T23:40:49.000Z | """Create brief responses table
Revision ID: 570
Revises: 560
Create Date: 2016-02-10 12:19:22.888832
"""
# revision identifiers, used by Alembic.
revision = '570'
down_revision = '560'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('brief_responses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('data', postgresql.JSON(), nullable=False),
sa.Column('brief_id', sa.Integer(), nullable=False),
sa.Column('supplier_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['brief_id'], ['briefs.id'], ),
sa.ForeignKeyConstraint(['supplier_id'], ['suppliers.supplier_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_brief_responses_created_at'), 'brief_responses', ['created_at'], unique=False)
def downgrade():
op.drop_index(op.f('ix_brief_responses_created_at'), table_name='brief_responses')
op.drop_table('brief_responses')
| 29.942857 | 107 | 0.714695 | 139 | 1,048 | 5.208633 | 0.381295 | 0.135359 | 0.103591 | 0.116022 | 0.223757 | 0.223757 | 0.223757 | 0.09116 | 0 | 0 | 0 | 0.034973 | 0.126908 | 1,048 | 34 | 108 | 30.823529 | 0.756284 | 0.132634 | 0 | 0 | 0 | 0 | 0.244173 | 0.08768 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1baf04303536b99a3ebb6d62d4ffeb153d15b182 | 2,224 | py | Python | tests/stages/test_sklearn_stages.py | blakeNaccarato/pdpcli | ad3ca6c2eccb552bfb4450b5cce02b30c5087282 | [
"MIT"
] | 15 | 2021-02-24T18:22:45.000Z | 2022-01-06T22:08:46.000Z | tests/stages/test_sklearn_stages.py | blakeNaccarato/pdpcli | ad3ca6c2eccb552bfb4450b5cce02b30c5087282 | [
"MIT"
] | 1 | 2022-01-07T08:13:07.000Z | 2022-01-07T08:13:07.000Z | tests/stages/test_sklearn_stages.py | blakeNaccarato/pdpcli | ad3ca6c2eccb552bfb4450b5cce02b30c5087282 | [
"MIT"
] | 1 | 2022-01-06T22:08:44.000Z | 2022-01-06T22:08:44.000Z | import numpy
import pandas
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from pdpcli.stages.sklearn_stages import SklearnPredictor, SklearnTransformer
def test_sklearn_predictor() -> None:
df = pandas.DataFrame(
data=numpy.random.normal(size=(64, 2)),
columns=["a", "b"],
)
df["c"] = ((df["a"] + df["b"]) > 0).apply(int)
stage = SklearnPredictor(
estimator=LogisticRegression(),
feature_columns=["a", "b"],
target_columns="c",
output_columns="d",
)
output = stage.apply(df)
assert list(output.columns) == ["a", "b", "c", "d"]
def test_sklearn_predictor_without_feature_columns() -> None:
df = pandas.DataFrame(
data=numpy.random.normal(size=(64, 2)),
columns=["a", "b"],
)
df["c"] = ((df["a"] + df["b"]) > 0).apply(int)
stage = SklearnPredictor(
estimator=LogisticRegression(),
target_columns="c",
output_columns="d",
)
output = stage.apply(df)
assert list(output.columns) == ["a", "b", "c", "d"]
def test_sklearn_transformer_with_tfidf_vectorizer() -> None:
df = pandas.DataFrame()
df["text"] = ["This is a first sentence.", "This is a second sentence."]
stage = SklearnTransformer(
transformer=TfidfVectorizer(token_pattern=r"(?u)\b\w+\b"),
feature_columns="text",
output_columns="tfidf",
)
output = stage.apply(df)
assert len(output.columns) == 6
assert list(output.columns) == [
"tfidf_a",
"tfidf_first",
"tfidf_is",
"tfidf_second",
"tfidf_sentence",
"tfidf_this",
]
def test_sklearn_transformer_without_feature_columns() -> None:
df = pandas.DataFrame(data=numpy.random.normal(size=(64, 32)))
stage = SklearnTransformer(
transformer=PCA(n_components=8),
output_columns="pca",
)
output = stage.apply(df)
assert len(output.columns) == 8
assert list(output.columns) == [
"pca_0",
"pca_1",
"pca_2",
"pca_3",
"pca_4",
"pca_5",
"pca_6",
"pca_7",
]
| 25.563218 | 77 | 0.600719 | 257 | 2,224 | 5.027237 | 0.272374 | 0.100619 | 0.03483 | 0.065015 | 0.458204 | 0.458204 | 0.458204 | 0.458204 | 0.396285 | 0.396285 | 0 | 0.013707 | 0.245504 | 2,224 | 86 | 78 | 25.860465 | 0.756257 | 0 | 0 | 0.376812 | 0 | 0 | 0.091727 | 0 | 0 | 0 | 0 | 0 | 0.086957 | 1 | 0.057971 | false | 0 | 0.086957 | 0 | 0.144928 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bb5310f158638846addef3485329fd1186a5a2c | 3,823 | py | Python | src/naima/tests/test_saveread.py | zblz/naima | 4e7b85dfcbc00d5e4474c2f339ace0e565c9ffcd | [
"BSD-3-Clause"
] | 32 | 2015-01-02T03:09:58.000Z | 2022-03-04T15:30:53.000Z | src/naima/tests/test_saveread.py | Tyrannosaurusxuan/naima | 4e7b85dfcbc00d5e4474c2f339ace0e565c9ffcd | [
"BSD-3-Clause"
] | 97 | 2015-01-28T20:09:44.000Z | 2021-11-01T23:16:12.000Z | src/naima/tests/test_saveread.py | Tyrannosaurusxuan/naima | 4e7b85dfcbc00d5e4474c2f339ace0e565c9ffcd | [
"BSD-3-Clause"
] | 45 | 2015-01-16T09:17:15.000Z | 2022-03-31T08:33:46.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import astropy.units as u
import numpy as np
from astropy.io import ascii
from astropy.tests.helper import pytest
from astropy.utils.data import get_pkg_data_filename
from ..analysis import read_run, save_run
from ..model_fitter import InteractiveModelFitter
from ..plot import plot_chain, plot_data, plot_fit
from ..utils import validate_data_table
from .fixtures import simple_sampler as sampler # noqa
try:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
try:
import emcee # noqa
HAS_EMCEE = True
except ImportError:
HAS_EMCEE = False
fname = get_pkg_data_filename("data/CrabNebula_HESS_ipac.dat")
data_table = ascii.read(fname)
@pytest.mark.skipif("not HAS_EMCEE")
def test_roundtrip(sampler, tmp_path):
filename = tmp_path / "naima_test_sampler.hdf5"
save_run(filename, sampler)
assert os.path.exists(filename)
nresult = read_run(filename)
assert np.allclose(sampler.get_chain(), nresult.get_chain())
assert np.allclose(
sampler.get_chain(flat=True), nresult.get_chain(flat=True)
)
assert np.allclose(sampler.get_log_prob(), nresult.get_log_prob())
assert np.allclose(
sampler.get_log_prob(flat=True), nresult.get_log_prob(flat=True)
)
nwalkers, nsteps = sampler.get_chain().shape[:2]
sampler_blobs = sampler.get_blobs()
new_blobs = nresult.get_blobs()
assert sampler_blobs.shape == new_blobs.shape
j, k = nwalkers // 2, nsteps // 2
for l in range(len(sampler_blobs[j][k])):
b0 = sampler_blobs[j][k][l]
b1 = new_blobs[j][k][l]
if isinstance(b0, tuple) or isinstance(b0, list):
for b0m, b1m in zip(b0, b1):
assert np.allclose(b0m, b1m)
else:
assert np.allclose(b0, b1)
for key in sampler.run_info.keys():
assert np.all(sampler.run_info[key] == nresult.run_info[key])
for i in range(len(sampler.labels)):
assert sampler.labels[i] == nresult.labels[i]
for col in sampler.data.colnames:
assert np.allclose(
u.Quantity(sampler.data[col]).value,
u.Quantity(nresult.data[col]).value,
)
assert str(sampler.data[col].unit) == str(nresult.data[col].unit)
validate_data_table(nresult.data)
assert np.allclose(
np.mean(sampler.acceptance_fraction), nresult.acceptance_fraction
)
@pytest.mark.skipif("not HAS_MATPLOTLIB or not HAS_EMCEE")
def test_plot_fit(sampler, tmp_path):
filename = tmp_path / "naima_test_sampler.hdf5"
save_run(filename, sampler, clobber=True)
nresult = read_run(filename, modelfn=sampler.modelfn)
plot_data(nresult)
plot_fit(nresult, 0)
plot_fit(nresult, 0, e_range=[0.1, 10] * u.TeV)
plot_fit(nresult, 0, sed=False)
plt.close("all")
@pytest.mark.skipif("not HAS_MATPLOTLIB or not HAS_EMCEE")
def test_plot_chain(sampler, tmp_path):
filename = tmp_path / "naima_test_sampler.hdf5"
save_run(filename, sampler, clobber=True)
nresult = read_run(filename, modelfn=sampler.modelfn)
for i in range(nresult.get_chain().shape[2]):
plot_chain(nresult, i)
plt.close("all")
@pytest.mark.skipif("not HAS_MATPLOTLIB or not HAS_EMCEE")
def test_imf(sampler, tmp_path):
filename = tmp_path / "naima_test_sampler.hdf5"
save_run(filename, sampler, clobber=True)
nresult = read_run(filename, modelfn=sampler.modelfn)
imf = InteractiveModelFitter(
nresult.modelfn, nresult.get_chain()[-1][-1], nresult.data
)
imf.do_fit("test")
from naima.core import lnprobmodel
lnprobmodel(nresult.modelfn(imf.pars, nresult.data)[0], nresult.data)
plt.close("all")
| 30.584 | 73 | 0.69762 | 546 | 3,823 | 4.70696 | 0.241758 | 0.028016 | 0.049805 | 0.029572 | 0.319066 | 0.29572 | 0.271595 | 0.245914 | 0.245914 | 0.245914 | 0 | 0.010003 | 0.18938 | 3,823 | 124 | 74 | 30.830645 | 0.819297 | 0.018572 | 0 | 0.25 | 0 | 0 | 0.068036 | 0.032284 | 0 | 0 | 0 | 0 | 0.135417 | 1 | 0.041667 | false | 0 | 0.177083 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bb7506d3b4ef13debad4f463bd0c3cecc4860d0 | 7,513 | py | Python | twccli/commands/net.py | TW-NCHC/TWCC-CLI | 743be786440ff6f9689c6a10cc2131565e51d391 | [
"Apache-2.0"
] | 12 | 2019-04-27T07:45:02.000Z | 2020-11-13T08:16:18.000Z | twccli/commands/net.py | twcc/TWCC-CLI | 743be786440ff6f9689c6a10cc2131565e51d391 | [
"Apache-2.0"
] | 23 | 2021-03-05T07:53:37.000Z | 2022-03-20T03:12:33.000Z | twccli/commands/net.py | TW-NCHC/TWCC-CLI | 743be786440ff6f9689c6a10cc2131565e51d391 | [
"Apache-2.0"
] | 6 | 2019-02-27T00:19:11.000Z | 2020-11-13T08:16:19.000Z | from twccli.twcc.services.compute import GpuSite, VcsSite, VcsSecurityGroup, VcsServerNet
from twccli.twcc.util import isNone, mk_names
from twccli.twcc.services.compute import getServerId, getSecGroupList
from twccli.twcc.services.compute_util import list_vcs
from twccli.twccli import pass_environment, logger
from twccli.twcc.services.base import Keypairs
from twccli.twcc.services.generic import GenericService
import click
import re
import sys
@click.command(help='Manage CCS (Container Compute Service) ports.')
@click.option('-p',
'--port',
'port',
type=int,
required=True,
help='Port number.')
@click.option('-s',
'--site-id',
'site_id',
type=int,
required=True,
help='ID of the container.')
@click.option('-open/-close',
'--open-port/--close-port',
'isAttach',
is_flag=True,
show_default=True,
help='opens/close container ports.')
@pass_environment
def ccs(env, site_id, port, isAttach):
"""Command line for network function of ccs
Functions:
expose/unbind port
:param port: Port number for your VCS environment
:type port: integer
:param site_id: Resource id for VCS
:type site_id: integer
:param isAttach: exposed/un-exposed port for continer services
:type isAttach: bool
"""
b = GpuSite()
tsite = b.queryById(site_id)
if isAttach:
b.exposedPort(site_id, port)
else:
b.unbindPort(site_id, port)
def net_vcs_protocol_check(protocol):
avbl_proto = ['ah', 'pgm', 'tcp', 'ipv6-encap', 'dccp', 'igmp', 'icmp', 'esp', 'vrrp', 'ipv6-icmp', 'gre', 'sctp', 'rsvp', 'ipv6-route', 'udp', 'ipv6-opts', 'ipv6-nonxt', 'udplite', 'egp', 'ipip', 'icmpv6', 'ipv6-frag', 'ospf']
if not protocol in avbl_proto:
pronum = re.findall('^([01]?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])$',protocol)
if pronum:
protocol = str(int(pronum[0]))
else:
raise ValueError(
"Protocol is not valid. available: {}.".format(avbl_proto))
def public_ip_assignee(site_info, fip, eip):
errorFlg = True
if len(site_info['public_ip']) > 0 and fip == False:
VcsServerNet().deAssociateIP(site_info['id'])
errorFlg = False
if len(site_info['public_ip']) == 0:
if not isNone(eip):
VcsServerNet().associateIP(site_info['id'], eip_id = eip)
errorFlg = False
elif fip == True:
VcsServerNet().associateIP(site_info['id'])
errorFlg = False
return errorFlg
def max_min_port_check(portrange):
if re.findall('[^0-9-]', portrange):
raise ValueError('port range should be digital-digital')
port_list = portrange.split('-')
if len(port_list) == 2:
port_min, port_max = [int(mport) for mport in port_list]
if port_min < 0 or port_max < 0:
raise ValueError('port range must bigger than 0')
elif port_min > port_max:
raise ValueError(
'port_range_min must be <= port_range_max')
else:
raise ValueError('port range set error')
return port_min, port_max
@click.command(help='Manage VCS (Virtual Compute Service) security groups.')
@click.option('-p', '--port', 'port', type=int, help='Port number.')
@click.option('-s',
'--site-id',
'site_id',
type=int,
required=True,
help='ID of the container.')
@click.option('-cidr',
'--cidr-network',
'cidr',
type=str,
help='Network range for security group.',
default='192.168.0.1/24',
show_default=True)
@click.option('-fip / -nofip',
'--floating-ip / --no-floating-ip',
'fip',
is_flag=True,
default=None,
show_default=False,
help='Configure your instance with or without a floating IP.')
@click.option('-ip',
'--eip',
'eip',
type=str,
default=None,
show_default=False,
help='Configure your instance with a EIP.')
@click.option('-in/-out',
'--ingress/--egress',
'is_ingress',
is_flag=True,
default=True,
show_default=True,
help='Applying security group directions.')
@click.option(
'-prange',
'--port-range',
'portrange',
type=str,
help='Port number from min-port to max-port, use "-" as delimiter, ie: 3000-3010. Only supported for TCP, UDP, UDPLITE, SCTP and DCCP'
)
@click.option('-proto',
'--protocol',
'protocol',
type=str,
help='Manage VCS security groups protocol.',
default='tcp',
show_default=True)
@click.argument('site_ids', nargs=-1)
@pass_environment
def vcs(env, site_ids, site_id, port, cidr, protocol, is_ingress, fip, portrange, eip):
"""Command line for network function of vcs
:param portrange: Port range number for your VCS environment
:type portrange: string
:param fip: Configure your VCS environment with or without floating IP
:type fip: bool
:param port: Port number for your VCS environment
:type port: integer
:param site_id: Resource id for VCS
:type site_id: integer
:param cidr: Network range for security group
:type cidr: string
:param protocol: Network protocol for security group
:type protocol: string
:param is_ingress: Applying security group directions.
:type is_ingress: bool
"""
net_vcs_protocol_check(protocol)
# case 1: floating ip operations
site_ids = mk_names(site_id, site_ids)
if len(site_ids) == 0:
raise ValueError("Error: VCS id: {} is not found.".format(site_id))
site_infos = list_vcs(site_ids, False, is_print=False)
for site_info in site_infos:
errorFlg = public_ip_assignee(site_info, fip, eip)
# case 2: port setting
from netaddr import IPNetwork
IPNetwork(cidr)
secg_list = getSecGroupList(site_info['id'])
secg_id = secg_list['id']
if not isNone(portrange):
port_min, port_max = max_min_port_check(portrange)
secg = VcsSecurityGroup()
secg.addSecurityGroup(secg_id, port_min, port_max, cidr, protocol,
"ingress" if is_ingress else "egress")
errorFlg = False
if not isNone(port):
secg = VcsSecurityGroup()
secg.addSecurityGroup(secg_id, port, port, cidr, protocol,
"ingress" if is_ingress else "egress")
errorFlg = False
if errorFlg:
raise ValueError(
"Error! Nothing to do! Check `--help` for detail.")
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS, help="NETwork related operations.")
def cli():
try:
import sys
ga = GenericService()
func_call = '_'.join([i for i in sys.argv[1:] if re.findall(r'\d',i) == [] and not i == '-sv']).replace('-','')
ga._send_ga(func_call)
except Exception as e:
logger.warning(e)
pass
cli.add_command(ccs)
cli.add_command(vcs)
def main():
cli()
if __name__ == "__main__":
main()
| 33.540179 | 231 | 0.591508 | 920 | 7,513 | 4.7 | 0.248913 | 0.020814 | 0.019426 | 0.025439 | 0.317761 | 0.237512 | 0.199815 | 0.140148 | 0.140148 | 0.140148 | 0 | 0.010071 | 0.286304 | 7,513 | 223 | 232 | 33.690583 | 0.796345 | 0.119526 | 0 | 0.285714 | 0 | 0.011905 | 0.203536 | 0.009839 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0.02381 | 0.071429 | 0 | 0.125 | 0.005952 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bb7fb2a1b9c93628680395b5402a95c58d49234 | 16,609 | py | Python | qsiprep/cli/recon_plot.py | arokem/qsiprep | f0a12fa002ea99cad97f2b5e40c1517d0569e14c | [
"BSD-3-Clause"
] | null | null | null | qsiprep/cli/recon_plot.py | arokem/qsiprep | f0a12fa002ea99cad97f2b5e40c1517d0569e14c | [
"BSD-3-Clause"
] | null | null | null | qsiprep/cli/recon_plot.py | arokem/qsiprep | f0a12fa002ea99cad97f2b5e40c1517d0569e14c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import warnings
import os
import sys
import os.path as op
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
import nibabel as nb
import numpy as np
from qsiprep.niworkflows.viz.utils import slices_from_bbox
from qsiprep.interfaces.converters import fib2amps, mif2amps
from dipy.core.sphere import HemiSphere
from dipy.core.ndindex import ndindex
from dipy.reconst.odf import gfa
from dipy.direction import peak_directions
from PIL import Image
from fury import actor, window
from nipype import logging
LOGGER = logging.getLogger('nipype.interface')
warnings.filterwarnings("ignore", category=ImportWarning)
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
def sink_mask_file(in_file, orig_file, out_dir):
import os
from nipype.utils.filemanip import fname_presuffix, copyfile
os.makedirs(out_dir, exist_ok=True)
out_file = fname_presuffix(orig_file, suffix='_mask', newpath=out_dir)
copyfile(in_file, out_file, copy=True, use_hardlink=True)
return out_file
def recon_plot():
"""Convert fib to mif."""
parser = ArgumentParser(
description='qsiprep: Convert DSI Studio fib file to MRtrix mif file.',
formatter_class=RawTextHelpFormatter)
parser.add_argument('--fib',
action='store',
type=os.path.abspath,
help='DSI Studio fib file to convert')
parser.add_argument('--mif',
type=os.path.abspath,
action='store',
help='path to a MRtrix mif file')
parser.add_argument('--amplitudes',
type=os.path.abspath,
action='store',
help='4D ampliudes corresponding to --directions')
parser.add_argument('--directions',
type=os.path.abspath,
action='store',
help='text file of directions corresponding to --amplitudes')
parser.add_argument('--mask_file',
action='store',
type=os.path.abspath,
help='a NIfTI-1 format file defining a brain mask.')
parser.add_argument('--odf_rois',
action='store',
type=os.path.abspath,
help='a NIfTI-1 format file with ROIs for plotting ODFs')
parser.add_argument('--peaks_image',
action='store',
default="peaks_mosiac.png",
type=os.path.abspath,
help='png file for odf peaks image')
parser.add_argument('--odfs_image',
action='store',
default="odfs_mosaic.png",
type=os.path.abspath,
help='png file for odf results')
parser.add_argument('--background_image',
action='store',
type=os.path.abspath,
help='a NIfTI-1 format file with a valid q/sform.')
parser.add_argument('--subtract-iso',
action='store_true',
help='subtract ODF min so visualization looks similar in mrview')
parser.add_argument('--peaks_only',
action='store_true',
help='only plot the peaks')
parser.add_argument('--ncuts', type=int, default=3,
help="number of slices to plot")
parser.add_argument('--padding', type=int, default=10,
help="number of slices to plot")
opts = parser.parse_args()
if opts.mif:
odf_img, directions = mif2amps(opts.mif, os.getcwd())
LOGGER.info("converting %s to plot ODF/peaks", opts.mif)
elif opts.fib:
odf_img, directions = fib2amps(opts.fib,
opts.background_image,
os.getcwd())
LOGGER.info("converting %s to plot ODF/peaks", opts.fib)
elif opts.amplitudes and opts.directions:
LOGGER.info("loading amplitudes=%s, directions=%s "
"to plot ODF/peaks", opts.amplitudes, opts.directions)
odf_img = nb.load(opts.amplitudes)
directions = np.load(opts.directions)
else:
raise Exception('Requires either a mif file or fib file')
odf_4d = odf_img.get_fdata()
sphere = HemiSphere(xyz=directions.astype(np.float))
if not opts.background_image:
background_data = odf_4d.mean(3)
else:
background_data = nb.load(opts.background_image).get_fdata()
LOGGER.info("saving peaks image to %s", opts.peaks_image)
peak_slice_series(odf_4d, sphere, background_data, opts.peaks_image,
n_cuts=opts.ncuts, mask_image=opts.mask_file,
padding=opts.padding)
# Plot ODFs in interesting regions
if opts.odf_rois and not opts.peaks_only:
LOGGER.info("saving odfs image to %s", opts.odfs_image)
odf_roi_plot(odf_4d, sphere, background_data, opts.odfs_image,
opts.odf_rois,
subtract_iso=opts.subtract_iso,
mask=opts.mask_file)
sys.exit(0)
def plot_peak_slice(odf_4d, sphere, background_data, out_file, axis, slicenum, mask_data,
tile_size=1200, normalize_peaks=True):
view_up = [(0., 0., 1.), (0., 0., 1.), (0., -1., 0.)]
# Make a slice mask to reduce memory
new_shape = list(odf_4d.shape)
new_shape[axis] = 1
image_shape = new_shape[:3]
midpoint = (new_shape[0] / 2., new_shape[1] / 2., new_shape[2] / 2.)
if axis == 0:
odf_slice = odf_4d[slicenum, :, :, :].reshape(new_shape)
image_slice = background_data[slicenum, :, :].reshape(image_shape)
mask_slice = mask_data[slicenum, :, :].reshape(image_shape)
camera_dist = max(midpoint[1], midpoint[2]) * np.pi
elif axis == 1:
odf_slice = odf_4d[:, slicenum, :, :].reshape(new_shape)
image_slice = background_data[:, slicenum, :].reshape(image_shape)
mask_slice = mask_data[:, slicenum, :].reshape(image_shape)
camera_dist = max(midpoint[0], midpoint[2]) * np.pi
elif axis == 2:
odf_slice = odf_4d[:, :, slicenum, :].reshape(new_shape)
image_slice = background_data[:, :, slicenum].reshape(image_shape)
mask_slice = mask_data[:, :, slicenum].reshape(image_shape)
camera_dist = max(midpoint[0], midpoint[1]) * np.pi
position = list(midpoint)
position[axis] += camera_dist
# Find the actual peaks
peak_dirs, peak_values = peaks_from_odfs(odf_slice, sphere,
relative_peak_threshold=.1,
min_separation_angle=15,
mask=mask_slice,
normalize_peaks=normalize_peaks,
npeaks=3)
if normalize_peaks:
peak_values = peak_values / peak_values.max() * np.pi
peak_actor = actor.peak_slicer(peak_dirs, peak_values, colors=None)
image_actor = actor.slicer(image_slice, opacity=0.6, interpolation='nearest')
image_size = (tile_size, tile_size)
scene = window.Scene()
scene.add(image_actor)
scene.add(peak_actor)
xfov_min, xfov_max = 0, new_shape[0] - 1
yfov_min, yfov_max = 0, new_shape[1] - 1
zfov_min, zfov_max = 0, new_shape[2] - 1
peak_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max)
image_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max)
scene.set_camera(focal_point=tuple(midpoint),
position=tuple(position),
view_up=view_up[axis])
window.record(scene, out_path=out_file, reset_camera=False, size=image_size)
def peak_slice_series(odf_4d, sphere, background_data, out_file, mask_image=None,
prefix='odf', tile_size=1200, n_cuts=3, padding=4,
normalize_peaks=True):
# Make a slice mask to reduce memory
if mask_image is None:
LOGGER.info("No mask image for plotting peaks")
image_mask = np.ones(background_data.shape)
else:
image_mask = nb.load(mask_image).get_fdata()
slice_indices = slices_from_bbox(background_data, cuts=n_cuts, padding=padding)
LOGGER.info("Plotting slice indices %s", slice_indices)
# Render the axial slices
z_image = Image.new('RGB', (tile_size, tile_size * n_cuts))
for slicenum, z_slice in enumerate(slice_indices['z']):
png_file = '{}_tra_{:03d}.png'.format(prefix, z_slice)
plot_peak_slice(odf_4d, sphere, background_data, png_file, 2, z_slice, image_mask,
tile_size, normalize_peaks)
z_image.paste(Image.open(png_file), (0, slicenum * tile_size))
# Render the sagittal slices
x_image = Image.new('RGB', (tile_size, tile_size * n_cuts))
for slicenum, x_slice in enumerate(slice_indices['x']):
png_file = '{}_sag_{:03d}.png'.format(prefix, x_slice)
plot_peak_slice(odf_4d, sphere, background_data, png_file, 0, x_slice, image_mask,
tile_size, normalize_peaks)
x_image.paste(Image.open(png_file), (0, slicenum * tile_size))
# Render the coronal slices
y_image = Image.new('RGB', (tile_size, tile_size * n_cuts))
for slicenum, y_slice in enumerate(slice_indices['y']):
png_file = '{}_cor_{:03d}.png'.format(prefix, y_slice)
plot_peak_slice(odf_4d, sphere, background_data, png_file, 1, y_slice, image_mask,
tile_size, normalize_peaks)
y_image.paste(Image.open(png_file), (0, slicenum * tile_size))
final_image = Image.new('RGB', (tile_size * 3, tile_size * n_cuts))
final_image.paste(z_image, (0, 0))
final_image.paste(x_image, (tile_size, 0))
final_image.paste(y_image, (tile_size * 2, 0))
final_image.save(out_file)
def peaks_from_odfs(odf4d, sphere, relative_peak_threshold,
min_separation_angle, mask=None,
gfa_thr=0, normalize_peaks=False,
npeaks=5):
shape = odf4d.shape[:-1]
if mask is None:
mask = np.ones(shape, dtype='bool')
else:
if mask.shape != shape:
raise ValueError("Mask is not the same shape as data.")
gfa_array = np.zeros(shape)
qa_array = np.zeros((shape + (npeaks,)))
peak_dirs = np.zeros((shape + (npeaks, 3)))
peak_values = np.zeros((shape + (npeaks,)))
peak_indices = np.zeros((shape + (npeaks,)), dtype='int')
peak_indices.fill(-1)
global_max = -np.inf
for idx in ndindex(shape):
if not mask[idx]:
continue
odf = odf4d[idx]
gfa_array[idx] = gfa(odf)
if gfa_array[idx] < gfa_thr:
global_max = max(global_max, odf.max())
continue
# Get peaks of odf
direction, pk, ind = peak_directions(odf, sphere,
relative_peak_threshold,
min_separation_angle)
# Calculate peak metrics
if pk.shape[0] != 0:
global_max = max(global_max, pk[0])
n = min(npeaks, pk.shape[0])
qa_array[idx][:n] = pk[:n] - odf.min()
peak_dirs[idx][:n] = direction[:n]
peak_indices[idx][:n] = ind[:n]
peak_values[idx][:n] = pk[:n]
if normalize_peaks:
peak_values[idx][:n] /= pk[0]
peak_dirs[idx] *= peak_values[idx][:, None]
qa_array /= global_max
return peak_dirs, peak_values
def get_camera_for_roi(roi_data, roi_id, view_axis):
voxel_coords = np.row_stack(np.nonzero(roi_data == roi_id))
centroid = voxel_coords.mean(1)
other_axes = [[1, 2], [0, 2], [0, 1]][view_axis]
projected_x = voxel_coords[other_axes[0]]
projected_y = voxel_coords[other_axes[1]]
xspan = projected_x.max() - projected_x.min()
yspan = projected_y.max() - projected_y.min()
camera_distance = max(xspan, yspan) * np.pi
return centroid, camera_distance
def plot_an_odf_slice(odf_4d, full_sphere, background_data, tile_size, filename,
centroid, axis, camera_distance, subtract_iso, mask_image):
view_up = [(0., 0., 1.), (0., 0., 1.), (0., -1., 0.)]
# Adjust the centroid so it's only a single slice
slicenum = int(np.round(centroid)[axis])
centroid[axis] = 0
position = centroid.copy()
position[axis] = position[axis] + camera_distance
# Roll if viewing an axial slice
roll = 3 if axis == 2 else 0
position[1] = position[1] - roll
# Ensure the dimensions reflect that there is only one slice
new_shape = list(odf_4d.shape)
new_shape[axis] = 1
image_shape = new_shape[:3]
if axis == 0:
odf_slice = odf_4d[slicenum, :, :, :].reshape(new_shape)
image_slice = background_data[slicenum, :, :].reshape(image_shape)
elif axis == 1:
odf_slice = odf_4d[:, slicenum, :, :].reshape(new_shape)
image_slice = background_data[:, slicenum, :].reshape(image_shape)
elif axis == 2:
odf_slice = odf_4d[:, :, slicenum, :].reshape(new_shape)
image_slice = background_data[:, :, slicenum].reshape(image_shape)
# Tile to get the whole ODF
odf_slice = np.tile(odf_slice, (1, 1, 1, 2))
if subtract_iso:
odf_slice = odf_slice - odf_slice.min(3, keepdims=True)
# Make graphics objects
odf_actor = actor.odf_slicer(odf_slice, sphere=full_sphere,
colormap=None, scale=0.6, mask=image_slice)
image_actor = actor.slicer(image_slice, opacity=0.6, interpolation='nearest')
image_size = (tile_size, tile_size)
scene = window.Scene()
scene.add(image_actor)
scene.add(odf_actor)
xfov_min, xfov_max = 0, new_shape[0] - 1
yfov_min, yfov_max = 0, new_shape[1] - 1
zfov_min, zfov_max = 0, new_shape[2] - 1
odf_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max)
image_actor.display_extent(xfov_min, xfov_max, yfov_min, yfov_max, zfov_min, zfov_max)
scene.set_camera(focal_point=tuple(centroid),
position=tuple(position),
view_up=view_up[axis])
window.record(scene, out_path=filename, reset_camera=False, size=image_size)
scene.clear()
def odf_roi_plot(odf_4d, halfsphere, background_data, out_file, roi_file,
prefix='odf', tile_size=1200, subtract_iso=False, mask=None):
roi_data = nb.load(roi_file).get_fdata()
roi_image = Image.new('RGB', (tile_size * 3, tile_size))
roi1_centroid, roi1_distance = get_camera_for_roi(roi_data, 1, 2)
roi2_centroid, roi2_distance = get_camera_for_roi(roi_data, 2, 1)
roi3_centroid, roi3_distance = get_camera_for_roi(roi_data, 3, 1)
camera_distance = max(roi1_distance, roi2_distance, roi3_distance)
# Make a slice mask to reduce memory
if mask is None:
image_mask = np.ones(roi_data.shape)
else:
image_mask = nb.load(mask).get_fdata()
# Fill out the other half of the sphere
odf_sphere = halfsphere.mirror()
semiovale_axial_file = '{}_semoivale_axial.png'.format(prefix)
plot_an_odf_slice(odf_4d, odf_sphere, background_data, tile_size,
semiovale_axial_file, centroid=roi1_centroid, axis=2,
camera_distance=camera_distance, subtract_iso=subtract_iso,
mask_image=image_mask)
roi_image.paste(Image.open(semiovale_axial_file), (0, 0))
# Render the coronal slice with a double-crossing
cst_x_cc_file = '{}_CSTxCC.png'.format(prefix)
plot_an_odf_slice(odf_4d, odf_sphere, background_data, tile_size,
cst_x_cc_file, centroid=roi2_centroid, axis=1,
camera_distance=camera_distance, subtract_iso=subtract_iso,
mask_image=image_mask)
roi_image.paste(Image.open(cst_x_cc_file), (tile_size, 0))
# Render the corpus callosum
cc_file = '{}_CC.png'.format(prefix)
plot_an_odf_slice(odf_4d, odf_sphere, background_data, tile_size,
cc_file, centroid=roi3_centroid, axis=1,
camera_distance=camera_distance, subtract_iso=subtract_iso,
mask_image=image_mask)
roi_image.paste(Image.open(cc_file), (2 * tile_size, 0))
roi_image.save(out_file) | 43.252604 | 90 | 0.617858 | 2,188 | 16,609 | 4.438757 | 0.146709 | 0.025535 | 0.014415 | 0.013386 | 0.449959 | 0.411347 | 0.389003 | 0.341845 | 0.323105 | 0.309308 | 0 | 0.016133 | 0.272262 | 16,609 | 384 | 91 | 43.252604 | 0.787375 | 0.037088 | 0 | 0.295238 | 0 | 0 | 0.076393 | 0.001378 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025397 | false | 0 | 0.063492 | 0 | 0.098413 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bb83da1c4ee56bb7f6b29451e7244bad2be8a01 | 2,740 | py | Python | tests/regressiontests/aggregation_regress/tests.py | huicheese/Django-test3 | ac11d2dce245b48392e52d1f4acfd5e7433b243e | [
"BSD-3-Clause"
] | null | null | null | tests/regressiontests/aggregation_regress/tests.py | huicheese/Django-test3 | ac11d2dce245b48392e52d1f4acfd5e7433b243e | [
"BSD-3-Clause"
] | null | null | null | tests/regressiontests/aggregation_regress/tests.py | huicheese/Django-test3 | ac11d2dce245b48392e52d1f4acfd5e7433b243e | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.test import TestCase
from django.db.models import Count, Max
from regressiontests.aggregation_regress.models import *
class AggregationTests(TestCase):
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
if settings.DATABASE_ENGINE != 'oracle':
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
#oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
| 38.055556 | 82 | 0.626642 | 356 | 2,740 | 4.69382 | 0.356742 | 0.020946 | 0.031119 | 0.035907 | 0.399761 | 0.399761 | 0.363854 | 0.363854 | 0.363854 | 0.312388 | 0 | 0.016427 | 0.289051 | 2,740 | 71 | 83 | 38.591549 | 0.841376 | 0.364599 | 0 | 0.424242 | 0 | 0 | 0.178389 | 0.049233 | 0 | 0 | 0 | 0 | 0.060606 | 1 | 0.090909 | false | 0 | 0.121212 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bbaf70e6d7277e8e9e2216ee33cc6779be14e8e | 3,884 | py | Python | Node.py | recep-yildirim/Astar-Algorithm | 77936f42fac464d73ef6024ef808a188008801e7 | [
"MIT"
] | null | null | null | Node.py | recep-yildirim/Astar-Algorithm | 77936f42fac464d73ef6024ef808a188008801e7 | [
"MIT"
] | null | null | null | Node.py | recep-yildirim/Astar-Algorithm | 77936f42fac464d73ef6024ef808a188008801e7 | [
"MIT"
] | null | null | null | class Node () :
def __init__ (self , table , index = None) :
self.__table = table
self.__index = index
self.__weight = self.__calculateWeight ()
self.__left_child = None
self.__forward_child = None
self.__right_child = None
self.__parent = None
def getTable (self) :
return self.__table
def getWeight (self) :
return self.__weight
def setWeight (self , weight) :
self.__weight = weight
def getLeftChild (self) :
return self.__left_child
def setLeftChild (self , left_child) :
self.__left_child = left_child
def getForwardChild (self) :
return self.__forward_child
def setForwardChild (self , forward_child) :
self.__forward_child = forward_child
def getRightChild (self) :
return self.__right_child
def setRightChild (self , right_child) :
self.__right_child = right_child
def getParent (self) :
return self.__parent
def setParent (self , parent) :
self.__parent = parent
def __calculateWeight (self) :
count = 0
for i in range (9) :
if self.__table [i] == (i + 1) :
count += 1
return count
def checkMoves (self) :
index = self.__table.index ('*')
nodes = dict ()
left = None
right = None
forward = None
if (index - 1) < len (self.__table) and (index - 1) > -1 and (index - 1) != self.__index and self.__checkIndex (index - 1) :
left = Node (self.__createTable (index , (index - 1)) , index)
nodes [left.getWeight ()] = left
self.__generateConnect (left , "left")
if (index + 1) < len (self.__table) and (index + 1) > -1 and (index + 1) != self.__index and self.__checkIndex (index + 1):
right = Node (self.__createTable (index , (index + 1)) , index)
nodes [right.getWeight ()] = right
self.__generateConnect (right , "right")
if (index + 3) < len (self.__table) and (index + 3) > -1 and (index + 3) != self.__index and self.__checkIndex (index + 3) :
forward = Node (self.__createTable (index , (index + 3)) , index)
nodes [forward.getWeight ()] = forward
self.__generateConnect (forward , "forward")
if (index - 3) < len (self.__table) and (index - 3) > -1 and (index - 3) != self.__index and self.__checkIndex (index - 3) :
if left == None :
left = Node (self.__createTable (index , (index - 3)) , index)
nodes [left.getWeight ()] = left
self.__generateConnect (left , "left")
elif right == None :
right = Node (self.__createTable (index , (index - 3)) , index)
nodes [right.getWeight ()] = right
self.__generateConnect (right , "right")
elif forward == None :
forward = Node (self.__createTable (index , (index - 3)) , index)
nodes [forward.getWeight ()] = forward
self.__generateConnect (forward , "forward")
return nodes
def __createTable (self , from_index , to_index) :
table = [i for i in self.__table]
table [from_index] , table [to_index] = table [to_index] , table [from_index]
return table
def __checkIndex (self , index) :
if self.__table [index] == (index + 1) :
return False
else :
return True
def __generateConnect (self , child , name) :
if name == "left" :
self.__left_child = child
child.setParent (self)
elif name == "right" :
self.__right_child = child
child.setParent (self)
else :
self.__forward_child = child
child.setParent (self) | 31.577236 | 132 | 0.555098 | 414 | 3,884 | 4.879227 | 0.125604 | 0.035644 | 0.041584 | 0.071287 | 0.480198 | 0.421782 | 0.412871 | 0.412871 | 0.353465 | 0.242574 | 0 | 0.012039 | 0.337024 | 3,884 | 123 | 133 | 31.577236 | 0.772427 | 0 | 0 | 0.193182 | 0 | 0 | 0.010811 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.193182 | false | 0 | 0 | 0.068182 | 0.329545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bbb61f6bb7f5635828fad04e429c28202fefb4b | 732 | py | Python | http/flask/validation.py | hanwhhanwh/python-test | ac97ae83fc9eac3157ff7dc39d6295bc5b15d589 | [
"MIT"
] | null | null | null | http/flask/validation.py | hanwhhanwh/python-test | ac97ae83fc9eac3157ff7dc39d6295bc5b15d589 | [
"MIT"
] | null | null | null | http/flask/validation.py | hanwhhanwh/python-test | ac97ae83fc9eac3157ff7dc39d6295bc5b15d589 | [
"MIT"
] | null | null | null | from jsonschema import validate
# A sample schema, like what we'd get from json.load()
schema = {
"type" : "object",
"properties" : {
"price" : {"type" : "number", "minimum": 0, "maximum": 39},
"name" : {"type" : "string"},
},
}
# If no exception is raised by validate(), the instance is valid.
validate(instance={"name" : "Eggs", "price" : 34.99}, schema=schema)
validate(
instance={"name" : "Eggs", "price" : "Invalid"}, schema=schema,
) # doctest: +IGNORE_EXCEPTION_DETAIL
""" result =>
jsonschema.exceptions.ValidationError: 'Invalid' is not of type 'number'
Failed validating 'type' in schema['properties']['price']:
{'type': 'number'}
On instance['price']:
'Invalid'
"""
| 28.153846 | 72 | 0.61612 | 84 | 732 | 5.345238 | 0.595238 | 0.066815 | 0.084633 | 0.111359 | 0.129176 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011945 | 0.199454 | 732 | 25 | 73 | 29.28 | 0.754266 | 0.204918 | 0 | 0 | 0 | 0 | 0.26087 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bc3974da46b2e7222bd0a8f654b8b44516aed96 | 1,789 | py | Python | Python/min-cost-climbing-stairs.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2022-01-30T06:55:28.000Z | 2022-01-30T06:55:28.000Z | Python/min-cost-climbing-stairs.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | null | null | null | Python/min-cost-climbing-stairs.py | RideGreg/LeetCode | b70818b1e6947bf29519a24f78816e022ebab59e | [
"MIT"
] | 1 | 2021-12-31T03:56:39.000Z | 2021-12-31T03:56:39.000Z | # Time: O(n)
# Space: O(1)
# 746
# On a staircase, the i-th step has some non-negative cost cost[i] assigned (0 indexed).
#
# Once you pay the cost, you can either climb one or two steps.
# You need to find minimum cost to reach the top of the floor,
# and you can either start from the step with index 0, or the step with index 1.
#
# Example 1:
# Input: cost = [10, 15, 20]
# Output: 15
# Explanation: Cheapest is start on cost[1], pay that cost and go to the top.
# Example 2:
# Input: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
# Output: 6
# Explanation: Cheapest is start on cost[0], and only step on 1s, skipping cost[3].
# Note:
# - cost will have a length in the range [2, 1000].
# - Every cost[i] will be an integer in the range [0, 999].
# DP: dp[x] = min(dp[x - 1], dp[x - 2]) + cost[x]
class Solution(object):
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
prev, cur = cost[0], cost[1]
for i in range(2, len(cost)):
prev, cur = cur, cost[i] + min(prev, cur)
return min(prev, cur)
''' space not optimized
size = len(cost)
dp = [cost[0], cost[1]]
for x in range(2, size):
dp.append(min(dp[x - 1], dp[x - 2]) + cost[x])
return min(dp[-1], dp[-2])
'''
def minCostClimbingStairs_ming(self, cost):
N = len(cost)
dp = [None] * N
def re(n):
if dp[n] == None:
if n == 0 or n == 1:
return cost[n]
dp[n] = cost[n] + min(re(n - 1), re(n - 2))
return dp[n]
return min(re(N-1), re(N-2))
print(Solution().minCostClimbingStairs([0,1,2,0])) # 1
print(Solution().minCostClimbingStairs([0,0,0,1])) # 0
| 28.396825 | 88 | 0.546115 | 294 | 1,789 | 3.319728 | 0.343537 | 0.015369 | 0.015369 | 0.032787 | 0.147541 | 0.120902 | 0.055328 | 0.032787 | 0.032787 | 0 | 0 | 0.060048 | 0.301845 | 1,789 | 62 | 89 | 28.854839 | 0.721377 | 0.446059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.444444 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bc4943ecdbf1164bdb164129e61d0599e4b4509 | 398 | py | Python | PyMOTW/source/fnmatch/fnmatch_fnmatch.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | PyMOTW/source/fnmatch/fnmatch_fnmatch.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2020-07-18T03:52:03.000Z | 2020-07-18T04:18:01.000Z | PyMOTW/source/fnmatch/fnmatch_fnmatch.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 2 | 2021-03-06T04:28:32.000Z | 2021-03-06T04:59:17.000Z | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Test an individual filename with a pattern.
"""
#end_pymotw_header
import fnmatch
import os
pattern = 'fnmatch_*.py'
print('Pattern :', pattern)
print()
files = os.listdir('.')
for name in sorted(files):
print('Filename: {:<25} {}'.format(
name, fnmatch.fnmatch(name, pattern)))
| 18.952381 | 55 | 0.678392 | 53 | 398 | 5.037736 | 0.716981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023952 | 0.160804 | 398 | 20 | 56 | 19.9 | 0.775449 | 0.38191 | 0 | 0 | 0 | 0 | 0.175214 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bc877b0af569563991836c407b7a62985a32bd0 | 3,767 | py | Python | evaluation_metrics.py | 87ZGitHub/sfd.pytorch | 66108ab35d8b1c1601c326b151141d9115a1409e | [
"MIT"
] | 124 | 2018-07-08T14:36:06.000Z | 2021-04-08T14:01:20.000Z | evaluation_metrics.py | 87ZGitHub/sfd.pytorch | 66108ab35d8b1c1601c326b151141d9115a1409e | [
"MIT"
] | 21 | 2018-07-09T07:17:40.000Z | 2020-08-11T12:26:13.000Z | evaluation_metrics.py | 87ZGitHub/sfd.pytorch | 66108ab35d8b1c1601c326b151141d9115a1409e | [
"MIT"
] | 25 | 2018-07-09T04:51:21.000Z | 2021-04-06T15:40:45.000Z | import numpy as np
from anchor import compute_iou
import torch
def AP(prediction, gt, iou_threshold):
"""compute average precision of detection, all the coordinate should be
(top left bottom right)
Args:
predict_bboxes (ndarray): should be a N * (4 + 1 + 1) ndarray
N is number of boxes been predicted(batch_size),
4 represents [top, left, bottom, right],
1 is the confidence of the class
1 is the number represents the class
gt_bboxes (ndarray): should be a M * (4 + 1) ndarray
M is the number of ground truth bboxes of that image
4 represents [top, left, bottom, right],
1 represents the class number of the bbox. Since we use 0 to be the
background, so class number of object should be started from 1
iou_threshold (float): threshold of iou for seperate the true positive
or false positive
num_classes (int): how many classes of the target
Returns: vector of class_number size, each element is AP
value of every class
"""
# apply softmax for prediction[:, 4:], get the highest index and klass
bboxes = prediction[:, :4]
scores = prediction[:, 4]
klasses = prediction[:, 5]
# sort klass, scores, bboxes by value of scores
inds = np.argsort(scores)[::-1]
scores, klasses, bboxes = scores[inds], klasses[inds], bboxes[inds]
# get a list result of tp and fp, length should be the same as bboxes
result = np.zeros(len(bboxes))
matched_index = []
ious = compute_iou(bboxes, gt[:, :4])
for index, iou in enumerate(ious):
gt_index = np.argmax(iou)
if iou[gt_index] > iou_threshold \
and gt_index not in matched_index \
and klasses[index] == gt[gt_index, 4]:
result[index] = 1
matched_index.append(gt_index)
# get tp and fp result of every class
ap_of_klass = {}
for klass in np.unique(klasses):
klass_indices = klasses == klass
klass_result = result[klass_indices]
object_num = np.sum(gt[:, 4] == klass)
cumsum = np.cumsum(klass_result)
recall_point_num = np.unique(cumsum)
precisions = np.zeros_like(recall_point_num, dtype=np.float)
recalls = np.zeros_like(recall_point_num, dtype=np.float)
for recall_point in recall_point_num:
recall_point = int(recall_point)
if recall_point == 0:
continue
predictions_num = np.searchsorted(cumsum, recall_point) + 1.0
precisions[recall_point - 1] = float(recall_point) / predictions_num
recalls[recall_point - 1] = recall_point / object_num
recalls = np.insert(recalls, 0, 0.0)
precisions = np.insert(precisions, 0, 0.0)
recalls = np.append(recalls, 1.0)
precisions = np.append(precisions, 0.0)
# make precision monotone decreased
current_precision = 0
for i in range(len(precisions) - 1, -1, -1):
precisions[i] = max(current_precision, precisions[i])
current_precision = precisions[i]
ap = 0
for i in range(1, len(precisions)):
precision = precisions[i]
recall_span = recalls[i] - recalls[i - 1]
ap += precision * recall_span
ap_of_klass[klass] = ap
return ap_of_klass
def softmax(mat):
"""change a vector to softmax score in batch
Args:
mat (ndarray): 2 dimensional matrix, shape is [batch_size, array_size]
Returns:
ndarray: a tensor which is has the same shape as the input
"""
mat_exp = torch.exp(mat)
mat_sum = torch.sum(mat_exp, dim=1, keepdim=True)
return mat_exp / mat_sum
| 36.221154 | 80 | 0.622511 | 519 | 3,767 | 4.396917 | 0.283237 | 0.062664 | 0.02454 | 0.023663 | 0.088519 | 0.05872 | 0.05872 | 0.032428 | 0.032428 | 0 | 0 | 0.01686 | 0.291479 | 3,767 | 103 | 81 | 36.572816 | 0.838142 | 0.361826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.055556 | 0 | 0.12963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bcaf2077f60c60393a9163a728dc944be68b9aa | 4,151 | py | Python | ml_src/utils.py | JaredDobry/imdb_scraper | 70056e8bbed49470fa6cbb42e930c95c437bdee3 | [
"MIT"
] | null | null | null | ml_src/utils.py | JaredDobry/imdb_scraper | 70056e8bbed49470fa6cbb42e930c95c437bdee3 | [
"MIT"
] | 2 | 2021-11-17T22:43:26.000Z | 2021-11-17T22:43:38.000Z | ml_src/utils.py | JaredDobry/imdb_scraper | 70056e8bbed49470fa6cbb42e930c95c437bdee3 | [
"MIT"
] | null | null | null | import random
import pickle
import pathlib
import numpy as np
import pandas as pd
from json import loads
from os.path import exists
from typing import Any, Dict, List, Tuple
from metrics import Category, Feature
def unpickle_file(file: pathlib.Path) -> Any:
abs_path = str(file)
if not abs_path.endswith(".pickle"):
abs_path += ".pickle"
with open(abs_path, "rb") as f:
return pickle.load(f)
def load_json(file: pathlib.Path) -> pd.DataFrame:
if not exists(file):
raise FileNotFoundError
# Figure out how many lines there are so we can pre-allocate an array
lines = 0
with open(file, "r") as f:
for _ in f:
lines += 1
arr = [{}] * lines
# Perform load
x = 0
with open(file, "r") as f:
for line in f:
entry = loads(line)
if entry["release_date"] == '':
entry["release_date"] = 'null'
arr[x] = entry
x += 1
return pd.DataFrame(arr)
def unpickle_df(file: pathlib.Path) -> pd.DataFrame:
return pd.DataFrame(unpickle_file(file))
def load_df(file: pathlib.Path) -> pd.DataFrame:
return pd.DataFrame(load_json(file))
# The most important function in repo. Builds an numpy matrix from a dataframe bases on the "feature tuple"
def get_training_nparray(
df: pd.DataFrame, training_features: Tuple[Feature], disp_warning=False
) -> Tuple[np.ndarray, List[str]]:
def get_prepped_arr(feature: Feature) -> Category:
handle = feature.handle
feature_keys = feature.feature_keys
category = handle(df[[feature for feature in feature_keys]], feature_keys)
# If prepped feature returns more than one col, it is probably one hot encoded. Fro OHE, we expect
# many zero elements so we want to remove the warning if one hot encoded. Otherwise we should make this
# error check
if disp_warning and len(category.category_vals) == 1:
non_zero_count = np.count_nonzero(category.category_vals)
len_arr = len(category.category_vals[0])
if non_zero_count < 0.9 * len_arr:
print(
f"WARNING: the **{feature_keys}** training feature has nonzero results in only "
f"{round(100*non_zero_count/len_arr, 2)}% of data points. Are you sure you want to use it?"
)
return category
training_names = []
training_ls = []
for feature_tup in training_features:
vals, names = get_prepped_arr(feature_tup)
training_ls.extend(vals)
training_names.extend(names)
assert len(training_ls) == len(training_names)
return np.array(training_ls).T, training_names
# Strike system. If a datapoint is missing more than n realistic values, cut it out from the data.
def rm_rows_missing_data(df: pd.DataFrame, n: int) -> pd.DataFrame:
def strikes_lt_n(row: pd.Series) -> bool:
strikes = 0
if int(row["budget"]) == 0:
strikes += 1
if int(row["revenue"]) == 0:
strikes += 1
if float(row["runtime"]) == 0:
strikes += 1
if not row["genres"]:
strikes += 1
if not row["spoken_languages"]:
strikes += 1
# if not row["original_language"]:
# strikes += 1
year = row["release_date"].partition("-")[0]
if year == "null" or int(year) < 1950:
strikes +=1
return strikes < n
strikes_lt_n_ls = np.array([strikes_lt_n(row) for _, row in df.iterrows()], dtype=bool)
return df.loc[strikes_lt_n_ls, :]
# Train test split on a dataframe directly
def train_test_split(df: pd.DataFrame, test_percent: int, seed: int=42) -> Tuple[pd.DataFrame, pd.DataFrame]:
len_df = len(df)
num_test = test_percent*len_df//100
all_indeces = range(len_df)
random.seed(seed)
indeces_test = set(random.sample(all_indeces, k=num_test))
bool_list_test = [index in indeces_test for index in all_indeces]
bool_list_train = [not test for test in bool_list_test]
return df[bool_list_train], df[bool_list_test]
| 33.208 | 111 | 0.631414 | 590 | 4,151 | 4.286441 | 0.315254 | 0.052195 | 0.019771 | 0.020166 | 0.080664 | 0.051404 | 0.051404 | 0.051404 | 0.035587 | 0 | 0 | 0.010859 | 0.267887 | 4,151 | 124 | 112 | 33.475806 | 0.821323 | 0.14093 | 0 | 0.090909 | 0 | 0.011364 | 0.075949 | 0.009564 | 0 | 0 | 0 | 0 | 0.011364 | 1 | 0.102273 | false | 0 | 0.102273 | 0.022727 | 0.306818 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bcb75a52c153491e5a8fa2aab5c85e45ee3a1cd | 1,564 | py | Python | desertbot/modules/commands/Gif.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-03-20T17:10:10.000Z | 2021-11-17T18:58:04.000Z | desertbot/modules/commands/Gif.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 109 | 2015-08-20T13:16:35.000Z | 2022-01-21T19:40:35.000Z | desertbot/modules/commands/Gif.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-03-29T05:55:01.000Z | 2021-02-05T19:19:39.000Z | """
Created on Dec 05, 2013
@author: StarlitGhost
"""
import random
from twisted.plugin import IPlugin
from zope.interface import implementer
from desertbot.message import IRCMessage
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand
from desertbot.response import IRCResponse
@implementer(IPlugin, IModule)
class Gif(BotCommand):
def triggers(self):
return ['gif']
def help(self, query):
return 'gif [<year>] - fetches a random gif posted during Desert Bus'
def execute(self, message: IRCMessage):
baseURL = "http://greywool.com/desertbus/{}/gifs/random.php"
years = range(7, 11)
if len(message.parameterList) > 0:
invalid = ("'{}' is not a valid year, valid years are {} to {}"
.format(message.parameterList[0], years[0], years[-1]))
try:
if len(message.parameterList[0]) < 4:
year = int(message.parameterList[0])
else:
raise ValueError
except ValueError:
return IRCResponse(invalid, message.replyTo)
if year not in years:
return IRCResponse(invalid, message.replyTo)
else:
year = random.choice(years)
url = baseURL.format(year)
response = self.bot.moduleHandler.runActionUntilValue('fetch-url', url)
link = response.content
return IRCResponse("Random DB{} gif: {}".format(year, link), message.replyTo)
gif = Gif()
| 28.436364 | 85 | 0.621483 | 170 | 1,564 | 5.717647 | 0.488235 | 0.053498 | 0.08642 | 0.05144 | 0.131687 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014134 | 0.276215 | 1,564 | 54 | 86 | 28.962963 | 0.844523 | 0.029412 | 0 | 0.114286 | 0 | 0 | 0.125166 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.2 | 0.057143 | 0.457143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bcbc87a25327faeab5c975e6b8196f74250bcaa | 9,457 | py | Python | datalad/distribution/drop.py | psychoinformatics-de/datalad | 7435edc1d3c73ae2254fa4bfcb8412a8de6d8d4c | [
"MIT"
] | 298 | 2015-01-25T17:36:29.000Z | 2022-03-20T03:38:47.000Z | datalad/distribution/drop.py | psychoinformatics-de/datalad | 7435edc1d3c73ae2254fa4bfcb8412a8de6d8d4c | [
"MIT"
] | 6,387 | 2015-01-02T18:15:01.000Z | 2022-03-31T20:58:58.000Z | datalad/distribution/drop.py | psychoinformatics-de/datalad | 7435edc1d3c73ae2254fa4bfcb8412a8de6d8d4c | [
"MIT"
] | 109 | 2015-01-25T17:49:40.000Z | 2022-03-06T06:54:54.000Z | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""High-level interface for dropping dataset content
"""
__docformat__ = 'restructuredtext'
import logging
from os.path import (
join as opj,
isabs,
normpath,
)
from datalad.utils import ensure_list
from datalad.support.param import Parameter
from datalad.support.constraints import EnsureStr, EnsureNone
from datalad.support.exceptions import (
CommandError,
InsufficientArgumentsError,
)
from datalad.distribution.dataset import (
Dataset,
EnsureDataset,
datasetmethod,
require_dataset,
)
from datalad.interface.base import (
Interface,
build_doc,
)
from datalad.interface.common_opts import (
if_dirty_opt,
recursion_flag,
recursion_limit,
)
from datalad.interface.results import (
get_status_dict,
annexjson2result,
success_status_map,
results_from_annex_noinfo,
)
from datalad.interface.utils import (
handle_dirty_dataset,
eval_results,
)
from datalad.core.local.status import Status
lgr = logging.getLogger('datalad.distribution.drop')
dataset_argument = Parameter(
args=("-d", "--dataset"),
metavar="DATASET",
doc="""specify the dataset to perform the operation on.
If no dataset is given, an attempt is made to identify a dataset
based on the `path` given""",
constraints=EnsureDataset() | EnsureNone())
check_argument = Parameter(
args=("--nocheck",),
doc="""whether to perform checks to assure the configured minimum
number (remote) source for data.[CMD: Give this
option to skip checks CMD]""",
action="store_false",
dest='check')
def _postproc_result(res, respath_by_status, ds, **kwargs):
res = annexjson2result(
# annex reports are always about files
res, ds, type='file', **kwargs)
success = success_status_map[res['status']]
respath_by_status[success] = \
respath_by_status.get(success, []) + [res['path']]
if res["status"] == "error" and res["action"] == "drop":
msg = res["message"]
if isinstance(msg, str) and "Use --force to" in msg:
# Avoid confusing datalad-drop callers with git-annex-drop's
# suggestion to use --force.
res["message"] = msg.replace("--force", "--nocheck")
return res
def _drop_files(ds, paths, check, noannex_iserror=False, **kwargs):
"""Helper to drop content in datasets.
Parameters
----------
ds : Dataset
paths : path or list(path)
which content to drop
check : bool
whether to instruct annex to perform minimum copy availability
checks
noannex_iserror : bool
whether calling this function on a pure Git repo results in an
'impossible' or 'notneeded' result.
**kwargs
additional payload for the result dicts
"""
# expensive, access only once
ds_repo = ds.repo
if 'action' not in kwargs:
kwargs['action'] = 'drop'
# always need to make sure that we pass a list
# `normalize_paths` decorator will otherwise screw all logic below
paths = ensure_list(paths)
if not hasattr(ds_repo, 'drop'):
for p in paths:
r = get_status_dict(
status='impossible' if noannex_iserror else 'notneeded',
path=p if isabs(p) else normpath(opj(ds.path, p)),
message="no annex'ed content",
**kwargs)
r['action'] = 'drop'
yield r
return
cmd = ['drop']
if not check:
cmd.append('--force')
respath_by_status = {}
try:
yield from (
_postproc_result(res, respath_by_status, ds)
for res in ds_repo._call_annex_records(cmd, files=paths)
)
except CommandError as e:
# pick up the results captured so far and yield them
# the error will be amongst them
yield from (
_postproc_result(res, respath_by_status, ds)
for res in e.kwargs.get('stdout_json', [])
)
# report on things requested that annex was silent about
for r in results_from_annex_noinfo(
ds, paths, respath_by_status,
dir_fail_msg='could not drop some content in %s %s',
noinfo_dir_msg='nothing to drop from %s',
noinfo_file_msg="no annex'ed content",
**kwargs):
r['action'] = 'drop'
yield r
@build_doc
class Drop(Interface):
"""Drop file content from datasets
This command takes any number of paths of files and/or directories. If
a common (super)dataset is given explicitly, the given paths are
interpreted relative to this dataset.
Recursion into subdatasets needs to be explicitly enabled, while recursion
into subdirectories within a dataset is done automatically. An optional
recursion limit is applied relative to each given input path.
By default, the availability of at least one remote copy is verified before
file content is dropped. As these checks could lead to slow operation
(network latencies, etc), they can be disabled.
"""
_examples_ = [
dict(text="Drop single file content",
code_py="drop('path/to/file')",
code_cmd="datalad drop <path/to/file>"),
dict(text="Drop all file content in the current dataset",
code_py="drop('.')",
code_cmd="datalad drop"),
dict(text="Drop all file content in a dataset and all its subdatasets",
code_py="drop(dataset='.', recursive=True)",
code_cmd="datalad drop -d <path/to/dataset> -r"),
dict(text="Disable check to ensure the configured minimum number of "
"remote sources for dropped data",
code_py="drop(path='path/to/content', check=False)",
code_cmd="datalad drop <path/to/content> --nocheck"),
]
_action = 'drop'
_params_ = dict(
dataset=dataset_argument,
path=Parameter(
args=("path",),
metavar="PATH",
doc="path/name of the component to be dropped",
nargs="*",
constraints=EnsureStr() | EnsureNone()),
recursive=recursion_flag,
recursion_limit=recursion_limit,
check=check_argument,
if_dirty=if_dirty_opt,
)
@staticmethod
@datasetmethod(name=_action)
@eval_results
def __call__(
path=None,
dataset=None,
recursive=False,
recursion_limit=None,
check=True,
if_dirty='save-before'):
if not dataset and not path:
raise InsufficientArgumentsError(
"insufficient information for `drop`: requires at least a path or dataset")
refds_path = Interface.get_refds_path(dataset)
res_kwargs = dict(action='drop', logger=lgr, refds=refds_path)
# this try-except dance is only to maintain a previous behavior of `drop`
# where it did not ValueError, but yielded error status
try:
ds = require_dataset(
dataset, check_installed=True, purpose='drop content')
except ValueError as e:
yield dict(
status='error',
message=str(e),
**res_kwargs,
)
return
if dataset and not path:
# act on the whole dataset if nothing else was specified
path = refds_path
content_by_ds = {}
for st in Status.__call__(
# do not use `ds` to preserve path semantics
dataset=dataset,
path=path,
annex=None,
untracked='no',
recursive=recursive,
recursion_limit=recursion_limit,
eval_subdataset_state='no',
report_filetype='raw',
return_type='generator',
result_renderer=None,
# yield errors and let caller decide
on_failure='ignore'):
if st['status'] == 'error':
# Downstream code can't do anything with these. Let the caller
# decide their fate.
yield st
continue
# ignore submodule entries
if st.get('type') == 'dataset':
if not Dataset(st['path']).is_installed():
continue
parentds = st['path']
else:
parentds = st['parentds']
cbd = content_by_ds.get(parentds, [])
cbd.append(st['path'])
content_by_ds[parentds] = cbd
# iterate over all datasets, order doesn't matter
for ds_path in content_by_ds:
ds = Dataset(ds_path)
# TODO generator
# this should yield what it did
handle_dirty_dataset(ds, mode=if_dirty)
for r in _drop_files(
ds,
content_by_ds[ds_path],
check=check,
**res_kwargs):
yield r
# there is nothing to save at the end
| 33.775 | 91 | 0.592577 | 1,112 | 9,457 | 4.910971 | 0.311151 | 0.020143 | 0.019227 | 0.013184 | 0.058231 | 0.058231 | 0.049441 | 0.032961 | 0.032961 | 0.032961 | 0 | 0.001067 | 0.306122 | 9,457 | 279 | 92 | 33.896057 | 0.831149 | 0.232315 | 0 | 0.087179 | 0 | 0 | 0.183312 | 0.007573 | 0 | 0 | 0 | 0.003584 | 0 | 1 | 0.015385 | false | 0 | 0.061538 | 0 | 0.112821 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bcc04e91758981f791ffbb36211c7f01c585bd9 | 13,552 | py | Python | music-dl.py | SyTemossy/music-dl | ada8ecc92e85e5fbe5936684915bf4fe1bab5583 | [
"MIT"
] | null | null | null | music-dl.py | SyTemossy/music-dl | ada8ecc92e85e5fbe5936684915bf4fe1bab5583 | [
"MIT"
] | null | null | null | music-dl.py | SyTemossy/music-dl | ada8ecc92e85e5fbe5936684915bf4fe1bab5583 | [
"MIT"
] | null | null | null | import argparse
import sys
import os
import urllib
import requests
import json
import random
commands = []
path = sys.path[0]
dpath = sys.path[0]
autocreate = False
netease_api = {
'music_url': 'http://music.163.com/song/media/outer/url?id=',
'playlist': 'https://api.surmon.me/music/list/',
'detail': 'http://music.163.com/api/song/detail/?id=xxxxx&ids=[xxxxx]'
}
qq_api = {
'info_url':
'https://u.y.qq.com/cgi-bin/musicu.fcg?format=json&data={%22req_0%22:{%22module%22:%22vkey.GetVkeyServer%22,%22method%22:%22CgiGetVkey%22,%22param%22:{%22guid%22:%22358840384%22,%22songmid%22:[%22helloworld%22],%22songtype%22:[0],%22uin%22:%221443481947%22,%22loginflag%22:1,%22platform%22:%2220%22}},%22comm%22:{%22uin%22:%2218585073516%22,%22format%22:%22json%22,%22ct%22:24,%22cv%22:0}}',
'playlist': 'https://api.qq.jsososo.com/songlist?id='
}
Illicit_Chracters = {'\\', '/', ':', '?', '|', '<', '>', '*'}
prefix = '.mp3'
check_exist = True
ar_str = ''
skip = False
proxy_period = 0
user_agent = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36 OPR/67.0.3575.53',
'Mozilla/5.0 (X11; U; Linux x86_64; en-ca) AppleWebKit/531.2+ (KHTML, like Gecko) Version/5.0 Safari/531.2+',
'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; Touch; rv:11.0) like Gecko',
'Mozilla/5.0 (Windows NT 10.0; rv:76.0) Gecko/20100101 Firefox/76.0',
'Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1; Microsoft; Lumia 950) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Mobile Safari/537.36 Edge/15.14977',
'Mozilla/5.0 (Linux; Android 7.0; SM-A310F Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.91 Mobile Safari/537.36 OPR/42.7.2246.114996',
'Mozilla/5.0 (Linux;Android 5.1.1;OPPO A33 Build/LMY47V;wv) AppleWebKit/537.36(KHTML,link Gecko) Version/4.0 Chrome/43.0.2357.121 Mobile Safari/537.36',
'Mozilla/5.0 (Linux; U; Android 6.0.1; zh-CN; F5121 Build/34.0.A.1.247) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/40.0.2214.89 UCBrowser/11.5.1.944 Mobile Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36 Edg/80.0.361.66'
]
class proxymanager:
def get_proxy(self):
try:
return requests.get("http://118.24.52.95/get/").json()
except:
return self.get_proxy()
def delete_proxy(self, proxy):
requests.get("http://118.24.52.95/delete/?proxy={}".format(proxy))
def getHtml(self):
proxy = self.get_proxy().get("proxy")
proxies = {
'http': 'http://{}'.format(proxy),
'https': 'https://{}'.format(proxy)
}
try:
requests.packages.urllib3.disable_warnings()
html = requests.get('http://www.example.com',
proxies=proxies,
verify=False)
return proxy
except (Exception):
self.delete_proxy(proxy)
self.getHtml()
pass
class core:
def genfake(self, type):
ua = random.choice(user_agent)
if type == '163':
headers = {'User-Agent': ua, 'Referer': 'https://music.163.com/'}
elif type == 'qq':
headers = {
'User-Agent': ua,
'Referer': 'https://y.qq.com/portal/player.html'
}
return headers
def autocreate(self, path):
if not os.path.exists(path):
os.makedirs(path)
return path
def to_legal(self, filename):
for x in Illicit_Chracters:
filename = filename.replace(x, ar_str)
return filename
def download_file(self, url, filename, type):
global check_exist
if ar:
filename = self.to_legal(filename)
full_path = '{}\\{}{}'.format(dpath, filename, prefix)
global skip
if os.path.exists(full_path) and not skip:
while True:
if check_exist == 'yf':
break
if check_exist == 'nf':
return False
r = input(
'{} 已存在, 是否重新下载? [y/n/yf(对以后操作都应用)/nf(对以后操作都应用)]'.format(
filename))
if r == 'y':
break
elif r == 'n':
return False
elif r == 'yf':
skip = True
check_exist = r
break
elif r == 'nf':
skip = True
check_exist = r
return False
headers = core().genfake(type)
#global proxy_period
#if proxy_period == 0:
proxy = proxymanager().getHtml()
proxies = {'http': proxy, 'https': proxy}
httpproxy_handler = urllib.request.ProxyHandler(proxies)
opener = urllib.request.build_opener(httpproxy_handler)
#proxy_period += 1
#elif proxy_period == 2:
#proxy_period = 0
req = urllib.request.Request(url, headers=headers)
data = urllib.request.urlopen(req).read()
with open(full_path, 'wb') as f:
f.write(data)
f.close()
return True
class qq:
def getsongurl(self, ori_url):
id = ori_url[ori_url.index('song/') + 5:ori_url.index('.html')]
headers = core().genfake('qq')
response = requests.get(qq_api['info_url'].replace('helloworld', id),
headers=headers)
js = json.loads(response.text)
global prefix
filename = js['req_0']['data']['midurlinfo'][0]['filename']
prefix = os.path.splitext(filename)[1]
sip = random.choice(js['req_0']['data']['sip'])
purl = js['req_0']['data']['midurlinfo'][0]['purl']
url = sip + purl
return url
def getsongdetail(self, url):
from bs4 import BeautifulSoup
headers = core().genfake('qq')
if url.startswith('y.qq.com'):
url = 'https://' + url
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
#body > div.main > div.mod_data > div > div.data__singer
data = soup.select(
'body > div.main > div.mod_data > div > div.data__name > h1')
for item in data:
name = item.get('title')
data = soup.select(
'body > div.main > div.mod_data > div > div.data__singer')
for item in data:
artists = item.get('title')
if artists.find('/'):
artists = artists.replace('/', '&')
return '{} - {}'.format(name, artists)
def getplaylist(self, url):
id = url[url.index('playlist/') + 9:url.index('.html')]
headers = core().genfake('qq')
response = requests.get('{}{}'.format(qq_api['playlist'], id),
headers=headers)
js = json.loads(response.text)
try:
count = js['data']['total_song_num']
except:
raise Exception('Invaild URL!')
dissname = js['data']['dissname']
global dpath
dissname = core().to_legal(dissname)
dpath = core().autocreate('{}\\{}'.format(dpath, dissname))
print('歌单信息获取成功!\n开始下载{}'.format(dissname))
for i in range(0, count):
mid = js['data']['songlist'][i]['songmid']
self.start('y.qq.com/n/yqq/song/{}.html'.format(mid), 0)
pass
def start(self, url, type):
if type == 0:
print('获取歌曲信息...')
songurl = self.getsongurl(url)
filename = self.getsongdetail(url)
print('开始下载{}{}'.format(filename, '...'))
r = core().download_file(songurl, filename, 'qq')
if r == True:
print('{}下载完毕!'.format(filename))
elif r == False:
print('{}已存在!'.format(filename))
elif type == 1:
print('正在获取歌单信息...')
self.getplaylist(url)
pass
else:
raise Exception('Invaild URL!')
return
pass
class netease:
def getsongdetail(self, id):
headers = core().genfake('163')
body = requests.get(netease_api['detail'].replace('xxxxx', id),
headers=headers)
js = json.loads(body.content)
if js['code'] != 200:
raise Exception('Incorrect song ID')
name = js['songs'][0]['name']
artist = js['songs'][0]['artists'][0]['name']
i = 1
while True:
try:
artist += '&' + js['songs'][0]['artists'][i]['name']
i += 1
except:
break
return name + ' - ' + artist
def getsongurl(self, ori_url):
ori_url = ori_url.replace('/#', '')
parsed_url = urllib.parse.urlparse(ori_url)
id = urllib.parse.parse_qs(parsed_url.query)['id']
if id[0].isdecimal:
songurl = netease_api['music_url'] + id[0]
else:
raise Exception('Incorrect song ID')
return songurl, id[0]
pass
def getplaylist(self, ori_url):
ori_url = ori_url.replace('/#', '')
parsed_url = urllib.parse.urlparse(ori_url)
id = urllib.parse.parse_qs(parsed_url.query)['id']
if id[0].isdecimal:
listurl = netease_api['playlist'] + id[0]
else:
raise Exception('Incorrect playlist ID')
body = requests.get(listurl)
js = json.loads(body.content)
if js['status'] != 'success':
raise Exception(js['error'])
trackCount = int(js['result']['trackCount'])
playlistName = js['result']['name']
global dpath
playlistName = core().to_legal(playlistName)
dpath = core().autocreate('{}\\{}'.format(dpath, playlistName))
print('歌单信息获取成功!\n开始下载{}'.format(playlistName))
for x in range(0, trackCount):
self.start(
'{}{}'.format(netease_api['music_url'],
js['result']['tracks'][x]['id']), 0)
pass
def start(self, url, type):
prefix = '.mp3'
global path
if type == 1:
print('正在获取歌单信息...')
self.getplaylist(url)
pass
elif type == 0:
print('获取歌曲信息...')
songurl, songid = self.getsongurl(url)
filename = self.getsongdetail(songid)
print('开始下载{}{}'.format(filename, '...'))
r = core().download_file(songurl, filename, '163')
if r == True:
print('{}下载完毕!'.format(filename))
elif r == False:
print('{}已存在!'.format(filename))
pass
else:
raise Exception('Invaild URL!')
return
pass
pass
def argparser(argv):
global dpath
global ar
argv = argv[1:len(argv)]
#print(argv)
parser = argparse.ArgumentParser(description=['Produce by SyTemossy'])
parser.add_argument('--input',
'-i',
help='Where to Download',
type=str,
required=True)
parser.add_argument('--output',
'-o',
help='Saved Path',
type=str,
default=dpath,
required=False)
parser.add_argument('--AutoCreate',
'-ac',
action='store_true',
default=True,
help='Auto Created Path, Only Work With -t/--to',
required=False)
parser.add_argument(
'--AutoReplace',
'-ar',
action='store_true',
default=False,
help=
'Auto Replace Illicit Character\nSuch as: \\, /, :, *, ?, \", <, >, |',
required=False)
args = parser.parse_args(argv)
i = args.input
ar = args.AutoReplace
dpath = args.output
autocreate = args.AutoCreate
print('Input: ', args.input)
print('Output: ', args.output)
print('AutoCreate: ', args.AutoCreate)
print('AutoReplace: ', args.AutoReplace)
core().autocreate(dpath)
if i.find('music.163.com/song?id=') != -1 or i.find(
'music.163.com/#/song?id=') != -1:
netease().start(i, 0)
elif i.find('music.163.com/playlist?id=') != -1 or i.find(
'music.163.com/#/playlist?id=') != -1:
netease().start(i, 1)
elif i.find('y.qq.com/n/yqq/song/') != -1:
qq().start(i, 0)
elif i.find('y.qq.com/n/yqq/playlist/') != -1:
qq().start(i, 1)
else:
raise Exception('Invaild URL')
return True
pass
if __name__ == "__main__":
if not sys.argv[0].endswith('.py'):
path = os.getcwd()
dpath = os.getcwd()
try:
output = argparser(sys.argv)
print(output)
except Exception as e:
print(e)
print('False')
| 37.333333 | 396 | 0.516824 | 1,566 | 13,552 | 4.408685 | 0.227969 | 0.008691 | 0.013036 | 0.01825 | 0.343424 | 0.280852 | 0.228418 | 0.187283 | 0.132242 | 0.123841 | 0 | 0.058596 | 0.331316 | 13,552 | 362 | 397 | 37.436464 | 0.703266 | 0.011954 | 0 | 0.304878 | 0 | 0.036585 | 0.254973 | 0.016205 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0.033537 | 0.02439 | 0 | 0.137195 | 0.057927 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bcc76036f4adee6ab1ac6897a215f0d51bec9a3 | 4,604 | py | Python | python/federatedml/param/data_split_param.py | hubert-he/FATE | 6758e150bd7ca7d6f788f9a7a8c8aea7e6500363 | [
"Apache-2.0"
] | 3,787 | 2019-08-30T04:55:10.000Z | 2022-03-31T23:30:07.000Z | python/federatedml/param/data_split_param.py | hubert-he/FATE | 6758e150bd7ca7d6f788f9a7a8c8aea7e6500363 | [
"Apache-2.0"
] | 1,439 | 2019-08-29T16:35:52.000Z | 2022-03-31T11:55:31.000Z | python/federatedml/param/data_split_param.py | hubert-he/FATE | 6758e150bd7ca7d6f788f9a7a8c8aea7e6500363 | [
"Apache-2.0"
] | 1,179 | 2019-08-29T16:18:32.000Z | 2022-03-31T12:55:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.param.base_param import BaseParam
from federatedml.util import LOGGER
class DataSplitParam(BaseParam):
"""
Define data split param that used in data split.
Parameters
----------
random_state : None, int, default: None
Specify the random state for shuffle.
test_size : None, float, int, default: 0.0
Specify test data set size.
float value specifies fraction of input data set, int value specifies exact number of data instances
train_size : None, float, int, default: 0.8
Specify train data set size.
float value specifies fraction of input data set, int value specifies exact number of data instances
validate_size : None, float, int, default: 0.2
Specify validate data set size.
float value specifies fraction of input data set, int value specifies exact number of data instances
stratified : boolean, default: False
Define whether sampling should be stratified, according to label value.
shuffle : boolean, default : True
Define whether do shuffle before splitting or not.
split_points : None, list, default : None
Specify the point(s) by which continuous label values are bucketed into bins for stratified split.
eg.[0.2] for two bins or [0.1, 1, 3] for 4 bins
need_run: bool, default: True
Specify whether to run data split
"""
def __init__(self, random_state=None, test_size=None, train_size=None, validate_size=None, stratified=False,
shuffle=True, split_points=None, need_run=True):
super(DataSplitParam, self).__init__()
self.random_state = random_state
self.test_size = test_size
self.train_size = train_size
self.validate_size = validate_size
self.stratified = stratified
self.shuffle = shuffle
self.split_points = split_points
self.need_run = need_run
def check(self):
model_param_descr = "data split param's "
if self.random_state is not None:
if not isinstance(self.random_state, int):
raise ValueError(f"{model_param_descr} random state should be int type")
BaseParam.check_nonnegative_number(self.random_state, f"{model_param_descr} random_state ")
if self.test_size is not None:
BaseParam.check_nonnegative_number(self.test_size, f"{model_param_descr} test_size ")
if isinstance(self.test_size, float):
BaseParam.check_decimal_float(self.test_size, f"{model_param_descr} test_size ")
if self.train_size is not None:
BaseParam.check_nonnegative_number(self.train_size, f"{model_param_descr} train_size ")
if isinstance(self.train_size, float):
BaseParam.check_decimal_float(self.train_size, f"{model_param_descr} train_size ")
if self.validate_size is not None:
BaseParam.check_nonnegative_number(self.validate_size, f"{model_param_descr} validate_size ")
if isinstance(self.validate_size, float):
BaseParam.check_decimal_float(self.validate_size, f"{model_param_descr} validate_size ")
# use default size values if none given
if self.test_size is None and self.train_size is None and self.validate_size is None:
self.test_size = 0.0
self.train_size = 0.8
self.validate_size = 0.2
BaseParam.check_boolean(self.stratified, f"{model_param_descr} stratified ")
BaseParam.check_boolean(self.shuffle, f"{model_param_descr} shuffle ")
BaseParam.check_boolean(self.need_run, f"{model_param_descr} need run ")
if self.split_points is not None:
if not isinstance(self.split_points, list):
raise ValueError(f"{model_param_descr} split_points should be list type")
LOGGER.debug("Finish data_split parameter check!")
return True
| 43.028037 | 112 | 0.691138 | 636 | 4,604 | 4.831761 | 0.253145 | 0.042304 | 0.063456 | 0.06248 | 0.352424 | 0.323462 | 0.267491 | 0.215099 | 0.215099 | 0.143508 | 0 | 0.00795 | 0.235013 | 4,604 | 106 | 113 | 43.433962 | 0.864566 | 0.400521 | 0 | 0 | 0 | 0 | 0.177028 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.045455 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd10a1e1d8c38d5a012608be42f9224c3b3f533 | 6,258 | py | Python | reid/trainers.py | xueping187/weakly-supervised-person-re-id | 3cfe98264dcdb667c132727a57ab80da5a9e6a8f | [
"Apache-2.0"
] | 2 | 2021-09-14T03:39:43.000Z | 2021-09-14T03:41:04.000Z | reid/trainers.py | xueping187/weakly-supervised-person-re-id | 3cfe98264dcdb667c132727a57ab80da5a9e6a8f | [
"Apache-2.0"
] | null | null | null | reid/trainers.py | xueping187/weakly-supervised-person-re-id | 3cfe98264dcdb667c132727a57ab80da5a9e6a8f | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function, absolute_import
import time
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
#from .evaluation_metrics import accuracy
from .utils.meters import AverageMeter
def MIL(element_logits, seq_len, batch_size, labels):
''' element_logits should be torch tensor of dimension (B, n_element, n_class),
k should be numpy array of dimension (B,) indicating the top k locations to average over,
labels should be a numpy array of dimension (B, n_class) of 1 or 0
return is a torch tensor of dimension (B, n_class) '''
# labels = labels.type(torch.cuda.FloatTensor)
k = np.ceil(seq_len/4).astype('int32')
labels = labels / torch.sum(labels, dim=1, keepdim=True)
instance_logits = torch.zeros(0).cuda()
for i in range(batch_size):
tmp, _ = torch.topk(element_logits[i][:seq_len[i]], k=int(k[i]), dim=0)
instance_logits = torch.cat([instance_logits, torch.mean(tmp, 0, keepdim=True)], dim=0)
milloss = -torch.mean(torch.sum(Variable(labels) * F.log_softmax(instance_logits, dim=1), dim=1), dim=0)
return milloss
def CPAL(x, element_logits, seq_len, n_similar, labels):
''' x is the torch tensor of feature from the last layer of model of dimension (n_similar, n_element, n_feature),
element_logits should be torch tensor of dimension (n_similar, n_element, n_class)
seq_len should be numpy array of dimension (B,)
labels should be a numpy array of dimension (B, n_class) of 1 or 0 '''
sim_loss = 0.
n_tmp = 0.
for i in range(0, n_similar*2, 2):
atn1 = F.softmax(element_logits[i][:seq_len[i]], dim=0)
atn2 = F.softmax(element_logits[i+1][:seq_len[i+1]], dim=0)
n1 = torch.FloatTensor([np.maximum(seq_len[i]-1, 1)]).cuda()
n2 = torch.FloatTensor([np.maximum(seq_len[i+1]-1, 1)]).cuda()
Hf1 = torch.mm(torch.transpose(x[i][:seq_len[i]], 1, 0), atn1)
Hf2 = torch.mm(torch.transpose(x[i+1][:seq_len[i+1]], 1, 0), atn2)
Lf1 = torch.mm(torch.transpose(x[i][:seq_len[i]], 1, 0), (1 - atn1)/n1)
Lf2 = torch.mm(torch.transpose(x[i+1][:seq_len[i+1]], 1, 0), (1 - atn2)/n2)
d1 = 1 - torch.sum(Hf1*Hf2, dim=0) / (torch.norm(Hf1, 2, dim=0) * torch.norm(Hf2, 2, dim=0))
d2 = 1 - torch.sum(Hf1*Lf2, dim=0) / (torch.norm(Hf1, 2, dim=0) * torch.norm(Lf2, 2, dim=0))
d3 = 1 - torch.sum(Hf2*Lf1, dim=0) / (torch.norm(Hf2, 2, dim=0) * torch.norm(Lf1, 2, dim=0))
sim_loss = sim_loss + 0.5*torch.sum(torch.max(d1-d2+0.5, torch.FloatTensor([0.]).cuda())*Variable(labels[i,:])*Variable(labels[i+1,:]))
sim_loss = sim_loss + 0.5*torch.sum(torch.max(d1-d3+0.5, torch.FloatTensor([0.]).cuda())*Variable(labels[i,:])*Variable(labels[i+1,:]))
n_tmp = n_tmp + torch.sum(Variable(labels[i,:])*Variable(labels[i+1,:]))
sim_loss = sim_loss / n_tmp
return sim_loss
class BaseTrainer(object):
def __init__(self, model, fixed_layer=True):
super(BaseTrainer, self).__init__()
self.model = model
self.fixed_layer = fixed_layer
def train(self, epoch, data_loader, optimizer, print_freq=1):
self.model.train()
if self.fixed_layer:
# The following code is used to keep the BN on the first three block fixed
fixed_bns = []
for idx, (name, module) in enumerate(self.model.module.named_modules()):
if name.find("layer3") != -1:
assert len(fixed_bns) == 22
break
if name.find("bn") != -1:
fixed_bns.append(name)
module.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
millosses = AverageMeter()
cpalosses = AverageMeter()
end = time.time()
for i, inputs in enumerate(data_loader):
data_time.update(time.time() - end)
inputs, targets = self._parse_data(inputs)
loss, milloss, cpaloss = self._forward(inputs, targets)
# loss = self._forward(inputs, targets)
losses.update(loss.item(), targets.size(0))
millosses.update(milloss, targets.size(0))
cpalosses.update(cpaloss, targets.size(0))
optimizer.zero_grad()
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
#torch.nn.utils.clip_grad_norm(self.model.parameters(), 0.75)
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
if (i + 1) % print_freq == 0:
print('Epoch: [{}][{}/{}]\t'
'Loss {:.3f} ({:.3f})\t'
'MILLoss {:.3f} ({:.3f})\t'
'CPALoss {:.3f} ({:.3f})\t'
.format(epoch, i + 1, len(data_loader),
losses.val, losses.avg,
millosses.val, millosses.avg,
cpalosses.val, cpalosses.avg))
def _parse_data(self, inputs):
raise NotImplementedError
def _forward(self, inputs, targets):
raise NotImplementedError
class Trainer(BaseTrainer):
def _parse_data(self, inputs):
imgs, pids = inputs
pids = pids.float()
inputs = Variable(imgs, requires_grad=False)
targets = Variable(pids.cuda())
return inputs, targets
def _forward(self, inputs, targets):
# seq_len = np.sum(np.max(np.abs(inputs),axis=2)>0,axis=1)
# seq_len = np.ones((inputs.shape[0],1))*inputs.shape[1]
seq_len = np.array([100]*10)
seq_len = np.array(seq_len)
Lambda = 0.5
num_similar = 3
self.model.eval()
final_features,element_logits = self.model(inputs)
milloss = MILL(element_logits, seq_len, inputs.shape[0], targets)
cpaloss = CPAL(final_features, element_logits, seq_len, num_similar, targets)
total_loss = Lambda * milloss + (1-Lambda) * cpaloss
return total_loss, milloss, cpaloss
| 42.283784 | 143 | 0.592681 | 871 | 6,258 | 4.123995 | 0.221584 | 0.033408 | 0.017539 | 0.01559 | 0.2951 | 0.244154 | 0.222439 | 0.193207 | 0.161192 | 0.142261 | 0 | 0.03151 | 0.269735 | 6,258 | 147 | 144 | 42.571429 | 0.754486 | 0.171461 | 0 | 0.079208 | 0 | 0 | 0.02054 | 0 | 0 | 0 | 0 | 0 | 0.009901 | 1 | 0.079208 | false | 0 | 0.069307 | 0 | 0.207921 | 0.039604 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd1b91cc7621a6826da68a2e74632538595b5f6 | 6,618 | py | Python | basicsr/models/basicvsr_model.py | IanYeung/ReCp | 1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410 | [
"Apache-2.0",
"MIT"
] | null | null | null | basicsr/models/basicvsr_model.py | IanYeung/ReCp | 1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410 | [
"Apache-2.0",
"MIT"
] | null | null | null | basicsr/models/basicvsr_model.py | IanYeung/ReCp | 1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410 | [
"Apache-2.0",
"MIT"
] | null | null | null | import logging
from copy import deepcopy
import os.path as osp
from tqdm import tqdm
import torch
from torch.nn.parallel import DistributedDataParallel
from basicsr.models.video_base_model import VideoBaseModel
from basicsr.utils import imwrite, tensor2img
from basicsr.utils.dist_util import get_dist_info
from basicsr.metrics import calculate_metric
from basicsr.utils.registry import MODEL_REGISTRY
logger = logging.getLogger('basicsr')
@MODEL_REGISTRY.register()
class BasicVSRModel(VideoBaseModel):
"""BasicVSR Model.
Paper: BasicVSR: The Search for Essential Components in
Video Super-Resolution and Beyond
"""
def __init__(self, opt):
super(BasicVSRModel, self).__init__(opt)
if self.is_train:
self.fix_iter = opt['train'].get('fix_iter')
if isinstance(self.net_g, DistributedDataParallel):
logger.warning('Set net_g.find_unused_parameters = True.')
self.net_g.find_unused_parameters = True
def setup_optimizers(self):
train_opt = self.opt['train']
spynet_lr_mul = train_opt.get('spynet_lr_mul', 1)
logger.info('Multiple the learning rate '
f'for spynet with {spynet_lr_mul}.')
if spynet_lr_mul == 1:
optim_params = self.net_g.parameters()
else: # separate dcn params and normal params for differnet lr
normal_params = []
spynet_params = []
for name, param in self.net_g.named_parameters():
if 'spynet' in name:
spynet_params.append(param)
else:
normal_params.append(param)
optim_params = [
{ # add normal params first
'params': normal_params,
'lr': train_opt['optim_g']['lr']
},
{
'params': spynet_params,
'lr': train_opt['optim_g']['lr'] * spynet_lr_mul
},
]
optim_type = train_opt['optim_g'].pop('type')
if optim_type == 'Adam':
self.optimizer_g = torch.optim.Adam(optim_params,
**train_opt['optim_g'])
else:
raise NotImplementedError(
f'optimizer {optim_type} is not supperted yet.')
self.optimizers.append(self.optimizer_g)
def optimize_parameters(self, current_iter):
if self.fix_iter:
if current_iter == 1:
for k, v in self.net_g.named_parameters():
if 'spynet' in k:
v.requires_grad = False
elif current_iter == self.fix_iter + 1:
for v in self.net_g.parameters():
v.requires_grad = True
super(BasicVSRModel, self).optimize_parameters(current_iter)
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
# dist_validation has not implemented yet, use nondist_validation
rank, world_size = get_dist_info()
if rank == 0:
self.nondist_validation(dataloader, current_iter, tb_logger,
save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger,
save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = self.opt['val'].get('metrics') is not None
if with_metrics:
self.metric_results = {
metric: 0
for metric in self.opt['val']['metrics'].keys()
}
pbar = tqdm(total=len(dataloader), unit='image', ascii=True)
for idx, val_data in enumerate(dataloader):
clip_name = val_data['key'].split('/')[0]
self.feed_data(val_data)
self.test()
visuals = self.get_current_visuals()
sr_imgs = tensor2img(visuals['result'])
if 'gt' in visuals:
gt_imgs = tensor2img(visuals['gt'])
del self.gt
# tentative for out of GPU memory
del self.lq
del self.output
torch.cuda.empty_cache()
if save_img:
if self.opt['is_train']:
save_img_name = osp.join(self.opt['path']['visualization'],
f'{dataset_name}_train',
clip_name, '{idx:08d}.png')
else:
if self.opt['val']['suffix']:
save_img_name = osp.join(
self.opt['path']['visualization'], dataset_name,
clip_name, ('{idx:08d}_' +
f'{self.opt["val"]["suffix"]}.png'))
else:
save_img_name = osp.join(
self.opt['path']['visualization'], dataset_name,
clip_name, '{idx:08d}.png')
for sr_img_idx, sr_img in zip(val_data['frame_list'], sr_imgs):
imwrite(sr_img, save_img_name.format(idx=sr_img_idx.item()))
if with_metrics:
# calculate metrics
opt_metric = deepcopy(self.opt['val']['metrics'])
for name, opt_ in opt_metric.items():
metric_results_ = [
calculate_metric(dict(img1=sr, img2=gt), opt_)
for sr, gt in zip(sr_imgs, gt_imgs)
]
self.metric_results[name] += torch.tensor(
sum(metric_results_) / len(metric_results_))
pbar.update(1)
pbar.set_description(f'Test {clip_name}')
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= (idx + 1)
super(VideoBaseModel,
self)._log_validation_metric_values(current_iter,
dataset_name, tb_logger)
def get_current_visuals(self):
# dim: n,t,c,h,w
t = self.lq.shape[1]
assert (t == self.gt.shape[1] and t == self.output.shape[1])
lq = self.lq.detach().cpu().squeeze(0)
gt = self.gt.detach().cpu().squeeze(0)
result = self.output.detach().cpu().squeeze(0)
return dict(
lq=[lq[i] for i in range(t)],
gt=[gt[i] for i in range(t)],
result=[result[i] for i in range(t)])
| 39.628743 | 80 | 0.531581 | 742 | 6,618 | 4.520216 | 0.256065 | 0.022958 | 0.014311 | 0.016696 | 0.16607 | 0.15653 | 0.128205 | 0.10316 | 0.10316 | 0.039952 | 0 | 0.006204 | 0.366727 | 6,618 | 166 | 81 | 39.86747 | 0.794083 | 0.048202 | 0 | 0.102941 | 0 | 0 | 0.076972 | 0.009402 | 0 | 0 | 0 | 0 | 0.007353 | 1 | 0.044118 | false | 0 | 0.080882 | 0 | 0.139706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd2c291602bab4b785fdad8e90b37c866fe0baa | 28,100 | py | Python | service/doc_etl.py | wisebobo/doc_ocr_by_template | 299601c9c5275cf6ad1bbdca071060df5ede0c7b | [
"MIT"
] | 6 | 2019-11-29T10:10:58.000Z | 2022-01-29T07:01:48.000Z | service/doc_etl.py | wisebobo/doc_ocr_by_template | 299601c9c5275cf6ad1bbdca071060df5ede0c7b | [
"MIT"
] | 7 | 2019-12-17T05:14:17.000Z | 2022-02-10T01:08:50.000Z | service/doc_etl.py | wisebobo/doc_ocr_by_template | 299601c9c5275cf6ad1bbdca071060df5ede0c7b | [
"MIT"
] | 3 | 2019-12-17T09:44:28.000Z | 2022-01-12T09:54:00.000Z | # -*- coding:utf-8 -*-
import re, sys
import time, logging
from collections import OrderedDict
from util.MRZ import MRZ, MRZOCRCleaner
from util.ctc2hanzi import ctc_code
from util.name2pinyin import ChineseName
from util.CHN_ID_Verify import CHNIdNumber
from util.log import logging_elapsed_time
class doc_etl(object):
def __init__(self, img, vendor, ocr_rslt, doc_type):
self._img = img
self._vendor = vendor
self._doc_type = doc_type
self._ocr_rslt = ocr_rslt
self._doc_number = ''
self._first_name = ''
self._last_name = ''
self._local_name = ''
self._local_name_GBK = ''
self._local_name_last = ''
self._local_name_first = ''
self._GBKCode_last = ''
self._GBKCode_first = ''
self._DOB = ''
self._sex = ''
self._nationality = ''
self._DOE = ''
self._address = ''
self._mrz_type = ''
self._mrz_line1 = ''
self._mrz_line2 = ''
self._mrz_line3 = ''
self._valid_score = 0
if self._doc_type in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]:
self._country = 'CHN'
self._nationality = 'CHN'
elif self._doc_type in [14, 15, 16, 17, 18, 19, 20, 21]:
self._country = 'AUS'
self._nationality = 'AUS'
elif self._doc_type in [22]:
self._country = 'NZL'
self._nationality = 'NZL'
else:
self._country = ''
self._nationality = ''
if self._doc_type in [1, 7, 8, 9, 10, 11, 12, 13]:
self._date_format = '%Y%m%d'
elif self._doc_type in [2, 3, 4, 5, 6, 15, 19, 21, 22]:
self._date_format = '%d%m%Y'
elif self._doc_type in [14, 16, 17, 18, 20]:
self._date_format = '%d%b%Y'
else:
self._date_format = '%Y%m%d'
self.doc_type_dict = {
"0": "Passport",
"1": "CHN ID Card",
"2": "HK ID Card",
"3": "HK ID Card (Old)",
"4": "Macau ID Card",
"5": "Macau ID Card (Old)",
"6": "Macau ID Card MRZ",
"7": "CN to HK/Macau Entry Card",
"8": "CN to HK/Macau Entry Book (Old)",
"9": "CN to TW Entry Card",
"10": "HK/Macau to CN Entry Card",
"11": "HK/Macau to CN Entry Card (Old)",
"12": "TW to CN Entry Card",
"13": "TW to CN Entry Book (Old)",
"14": "AU Driver License - NSW - New South Wales",
"15": "AU Driver License - VA - Victoria Australia",
"16": "AU Driver License - ACT - Australian Capital Territory",
"17": "AU Driver License - QLD - Queensland",
"18": "AU Driver License - WA - Western Australia",
"19": "AU Driver License - NT - Northern Territory",
"20": "AU Driver License - TAS - Tasmania",
"21": "AU Driver License - SA - South Australia",
"22": "NZ Driver License"
}
self.mrz_type_dict = {
"TD3": "Passport",
"CHNHK2000": "CN to HK/Macau Entry Book (Old)",
"CHNTW2000": "CH to TW Entry Book (Old)",
"TWCHN2000": "TW to CN Entry Book (Old)",
"CHNHK2014": "CN to HK/Macau Entry Card",
"CHNTW2014": "CN to TW Entry Card"
}
logging.info('Received image as : ' + self.doc_type_dict[str(self._doc_type)])
if self._vendor == 'Face++_Template':
self._parse_by_template()
else:
if self._doc_type in [0, 5, 6]:
self._parse_data()
else:
self._parse_by_template()
self._calc_score()
def __repr__(self):
return "DOC OCR({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7})".format(self._vendor, self._valid_score,
self._doc_number, self._first_name,
self._last_name, self._local_name, self._DOB,
self._sex)
def _calc_score(self):
try:
self._valid_score = 0
if self._doc_number:
self._valid_score += 25
if self._DOB:
self._valid_score += 20
if self._DOE:
self._valid_score += 10
if self._local_name or self._local_name_GBK:
self._valid_score += 5
if self._last_name:
self._valid_score += 15
if self._first_name:
self._valid_score += 15
if self._sex:
self._valid_score += 10
if self._address:
self._valid_score += 5
if self._nationality:
self._valid_score += 5
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
def to_dict(self):
# Converts this object to an (ordered) dictionary of field-value pairs.
result = OrderedDict()
result['valid_score'] = 0
result['vendor'] = self._vendor
try:
result['valid_score'] = self._valid_score
if self._valid_score > 0:
result['doc_number'] = self._doc_number.strip()
result['first_name'] = self._first_name.strip()
result['last_name'] = self._last_name.strip()
result['local_name'] = self._local_name.strip()
result['local_name_GBK'] = self._local_name_GBK.strip()
result['date_of_birth'] = self._DOB
result['sex'] = self._sex
result['country'] = self._country.strip()
result['nationality'] = self._nationality.strip()
result['date_of_expiry'] = self._DOE
result['address'] = self._address.strip()
result['doc_type'] = self._doc_type
result['mrz_type'] = self._mrz_type
result['mrz_line1'] = self._mrz_line1.strip()
result['mrz_line2'] = self._mrz_line2.strip()
result['mrz_line3'] = self._mrz_line3.strip()
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
finally:
return result
def _convCTC2GBK(self, ctcCode):
try:
local_name = ''
tmpValue = re.sub('[^0-9]', '', ctcCode)
if tmpValue.isdigit():
name_len = int(len(tmpValue) / 4)
for i in range(name_len):
temp_code = tmpValue[0 + i * 4:4 + i * 4]
try:
local_name = local_name + ''.join(ctc_code[temp_code])
except KeyError as e:
logging.error(self._vendor + ': Unable to convert CTC code (' + temp_code + ') to GBK')
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
finally:
return local_name
def _parse_data(self):
try:
if self._doc_type in [0, 6, 7, 8, 9, 13]:
mrz_valid = self._getMrz(self._ocr_rslt)
if mrz_valid:
if self._mrz_type in ['CHNHK2014', 'CHNTW2014']:
self._recog_chn_hk_tw_2014(self._ocr_rslt)
elif self._mrz_type in ['CHNHK2000', 'CHNTW2000', 'TWCHN2000']:
self._recog_chn_hk_tw_2000(self._ocr_rslt)
elif self._doc_type in [5]:
self._recog_macau_id_old(self._ocr_rslt)
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
def _parse_by_template(self):
try:
for key, value in self._ocr_rslt.items():
tmpValue = value.upper()
if key == 'DOB':
if self._date_format == '%d%b%Y':
DOB = re.sub('[^0-9A-Z]', '', tmpValue)
else:
DOB = re.sub('[^0-9]', '', tmpValue)
if self._check_date(DOB, self._date_format):
self._DOB = time.strftime('%Y-%m-%d', time.strptime(DOB, self._date_format))
elif key == 'expiryDate':
if self._date_format == '%d%b%Y':
DOE = re.sub('[^0-9A-Z]', '', tmpValue)
else:
DOE = re.sub('[^0-9]', '', tmpValue)
if self._doc_type == 17:
self._date_format = '%d%m%y'
if self._check_date(DOE, self._date_format):
self._DOE = time.strftime('%Y-%m-%d', time.strptime(DOE, self._date_format))
elif key == 'docNumber':
doc_num = tmpValue.replace('(', '(').replace(')', ')')
self._doc_number = re.sub('[^0-9A-Z()]', '', doc_num)
elif key == 'Sex':
if '女' in tmpValue or 'F' in tmpValue:
self._sex = 'F'
elif '男' in tmpValue or 'M' in tmpValue:
self._sex = 'M'
elif key == 'localName':
self._local_name = tmpValue.replace(' ', '')
elif key == 'localName_Last':
self._local_name_last = tmpValue.replace(' ', '')
elif key == 'localName_First':
self._local_name_first = tmpValue.replace(' ', '')
elif key == 'GBKCode':
self._local_name_GBK = self._convCTC2GBK(tmpValue)
elif key == 'GBKCode_Last':
self._GBKCode_last = re.sub('[^0-9]', '', tmpValue)
elif key == 'GBKCode_First':
self._GBKCode_first = re.sub('[^0-9]', '', tmpValue)
elif key == 'Name':
tmpValue = tmpValue.replace(',', ',')
eng_name = re.sub('[^A-Z ,]', '', tmpValue)
if ',' in eng_name:
name = eng_name.split(',')
else:
name = eng_name.split(' ')
if len(name):
if self._doc_type in [14]:
self._last_name = re.sub('[^A-Z ]', '', name[-1])
self._first_name = re.sub('[^A-Z ]', '', ' '.join(name[:-1]).strip())
else:
self._last_name = re.sub('[^A-Z ]', '', name[0])
self._first_name = re.sub('[^A-Z ]', '', ' '.join(name[1:]).strip())
elif key == 'lastName':
self._last_name = re.sub('[^A-Z ]', '', tmpValue)
elif key == 'firstName':
self._first_name = re.sub('[^A-Z ]', '', tmpValue)
elif key == 'address':
self._address = tmpValue
elif key == 'mrzLine1':
tmpValue = tmpValue.replace('く', '<').replace('≤', '<').replace('工', 'I').replace('エ', 'I').upper()
self._mrz_line1 = re.sub('[^0-9A-Z<]', '', tmpValue)
elif key == 'mrzLine2':
tmpValue = tmpValue.replace('く', '<').replace('≤', '<').replace('工', 'I').replace('エ', 'I').upper()
self._mrz_line2 = re.sub('[^0-9A-Z<]', '', tmpValue)
elif key == 'mrzLine3':
tmpValue = tmpValue.replace('く', '<').replace('≤', '<').replace('工', 'I').replace('エ', 'I').upper()
self._mrz_line3 = re.sub('[^0-9A-Z<]', '', tmpValue)
# Some other further extraction / validation
if self._doc_type == 1:
if self._local_name:
self._last_name, self._first_name = ChineseName(self._local_name)
if self._doc_number:
# Extract Document ID
doc_num_list = []
doc_num_list.append(self._doc_number)
doc_number_1 = MRZOCRCleaner.apply(doc_num_list, formatType='CHNID')
doc_number_1 = ''.join(doc_number_1)
doc_number_1 = ''.join(re.findall(r"[0-9X]+", doc_number_1))
doc_number_2 = ''.join(re.findall(r"[0-9X]+", self._doc_number))
logging.info('doc_number_0 = ' + self._doc_number)
logging.info('doc_number_1 = ' + doc_number_1)
logging.info('doc_number_2 = ' + doc_number_2)
doc_number = None
if CHNIdNumber.verify_id(self._doc_number):
doc_number = self._doc_number
elif CHNIdNumber.verify_id(doc_number_1):
doc_number = doc_number_1
elif CHNIdNumber.verify_id(doc_number_2):
doc_number = doc_number_2
else:
logging.info('Invalid CHN ID')
if doc_number is not None:
self._doc_number = doc_number
self._DOB = CHNIdNumber(doc_number).get_birthday()
sex = CHNIdNumber(doc_number).get_sex()
if sex == 0:
self._sex = 'F'
elif sex == 1:
self._sex = 'M'
elif self._doc_type in [2, 3]:
if self._doc_number:
if self._validate_hk_id(self._doc_number):
pass
else:
self._doc_number = ''
elif self._doc_type == 4:
if self._local_name_last and self._local_name_first:
self._local_name = self._local_name_last + self._local_name_first
if self._GBKCode_last and self._GBKCode_first:
GBKCode = self._GBKCode_last + self._GBKCode_first
self._local_name_GBK = self._convCTC2GBK(GBKCode)
if self._doc_number:
self._doc_number = self._get_macau_id(self._doc_number)
elif self._doc_type == 6:
mrzString = []
mrzString.append(self._mrz_line1)
mrzString.append(self._mrz_line2)
mrzString.append(self._mrz_line3)
mrz_valid = self._getMrz(mrzString)
elif self._doc_type in [7, 9]:
mrzString = []
mrzString.append(self._mrz_line1)
mrz_valid = self._getMrz(mrzString)
elif self._doc_type in [8, 13]:
mrzString = []
mrzString.append(self._mrz_line1)
mrzString.append(self._mrz_line2)
mrz_valid = self._getMrz(mrzString)
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
def _check_date(self, date, format='%d-%m-%Y'):
try:
__date = time.strptime(date, format)
# Date difference should be less than 100 years
__earliestYear = str(int(time.strftime("%Y")) - 100)
__earliestDate = time.strptime(__earliestYear + "0101", "%Y%m%d")
__futureYear = str(int(time.strftime("%Y")) + 20)
__futureDate = time.strptime(__futureYear + "0101", "%Y%m%d")
if __date >= __earliestDate and __date <= __futureDate:
return True
else:
return False
except ValueError:
return False
def _getMrz(self, ocr_string):
try:
mrz_list = []
if len(ocr_string) >= 5:
tempString = ocr_string[-5:]
else:
tempString = ocr_string
for item in tempString:
item = item.replace(' ', '').replace('く', '<').replace('≤', '<').replace('工', 'I').replace('エ', 'I').upper()
if len(item) > 44:
item = item[0:44]
if len(item) >= 28 and re.match('^[0-9A-Z<]+$', item):
mrz_list.append(item)
if self._doc_type == 7:
mrz_type = 'CHNHK2014'
elif self._doc_type == 8:
mrz_type = 'CHNHK2000'
elif self._doc_type == 9:
mrz_type = 'CHNTW2014'
elif self._doc_type == 13:
mrz_type = 'TWCHN2000'
else:
mrz_type = None
mrz_list = MRZOCRCleaner.apply(mrz_list, mrz_type)
mrz = MRZ(mrz_list, mrz_type)
logging.info(self._vendor + ": score = " + str(mrz.valid_score) + "; mrz_list = " + str(mrz_list))
if mrz.valid_score > 50:
self._doc_number = mrz.number.replace('<', '')
name = mrz.names.replace('<', '')
self._first_name = name
self._last_name = mrz.surname.replace('<', '')
if self._contain_zh(name):
self._local_name = name
self._last_name, self._first_name = ChineseName(self._local_name)
if len(mrz.date_of_birth) == 6:
if time.strptime(mrz.date_of_birth, '%y%m%d') > time.localtime(time.time()):
self._DOB = time.strftime('%Y-%m-%d', time.strptime('19' + mrz.date_of_birth, '%Y%m%d'))
else:
self._DOB = time.strftime('%Y-%m-%d', time.strptime(mrz.date_of_birth, '%y%m%d'))
elif len(mrz.date_of_birth) == 8:
self._DOB = time.strftime('%Y-%m-%d', time.strptime(mrz.date_of_birth, '%Y%m%d'))
else:
self._DOB = mrz.date_of_birth
if mrz.sex:
self._sex = mrz.sex
self._country = mrz.country
self._nationality = mrz.nationality.replace('<', '')
if len(mrz.expiration_date) == 6:
self._DOE = time.strftime('%Y-%m-%d', time.strptime(mrz.expiration_date, '%y%m%d'))
elif len(mrz.expiration_date) == 8:
self._DOE = time.strftime('%Y-%m-%d', time.strptime(mrz.expiration_date, '%Y%m%d'))
else:
self._DOE = mrz.expiration_date
self._mrz_type = mrz.mrz_type
self._mrz_line1 = mrz.mrz_line1
self._mrz_line2 = mrz.mrz_line2
self._mrz_line3 = mrz.mrz_line3
self._valid_score = mrz.valid_score
if mrz.mrz_type == 'TD1' and mrz.country == 'MAC':
self._doc_number = '%s(%s)' % (self._doc_number[0:7], self._doc_number[-1])
return True
else:
return False
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
return False
'''
======================================
港澳通行证2014版,卡式
台湾通行证2014版,卡式
======================================
'''
def _recog_chn_hk_tw_2014(self, ocr_string):
try:
logging.info('Trying to recognize as CHN TO HK/MACAU/TW ENTRY CARD ...')
eng_name = ''
zh_name_found = False
for i in range(1, 4):
tempName = ocr_string[i]
temp_str_1 = ''.join(re.findall(r"[0-9a-zA-Z]+", tempName))
doc_num_list = []
doc_num_list.append(temp_str_1)
doc_number_1 = MRZOCRCleaner.apply(doc_num_list, formatType='CHNHK2014ID')
doc_number_1 = ''.join(doc_number_1)
if self._doc_number in tempName or self._doc_number in doc_number_1:
pass
elif self._contain_zh(tempName) and not zh_name_found:
tempName = tempName.replace(' ', '')
tempName = re.findall(r'[\u4e00-\u9fa5]', tempName)
tempName = ''.join(tempName)
self._local_name = tempName
zh_name_found = True
elif self._validate_eng_name(tempName):
eng_name = eng_name + tempName
if len(eng_name):
eng_name = eng_name.replace(',', ' ').replace('.', ' ').replace('/', ' ')
name = eng_name.split(' ')
if len(name):
self._last_name = re.sub('[^a-zA-Z ]', '', name[0].strip())
self._first_name = re.sub('[^a-zA-Z ]', '', ' '.join(name[1:]).strip())
if zh_name_found:
self._last_name, self._first_name = ChineseName(self._local_name)
if len(self._last_name.strip()) and len(self._first_name.strip()):
self._valid_score += 10
if len(ocr_string) >= 3:
for temp_sex in ocr_string[3:]:
if '女' in temp_sex:
self._sex = 'F'
break
elif '男' in temp_sex:
self._sex = 'M'
break
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
finally:
logging.info(self.to_dict())
'''
======================================
港澳通行证2000版,本式
大陆居民往来台湾通行证 v2000
台湾居民往来大陆通行证 v2000
======================================
'''
def _recog_chn_hk_tw_2000(self, ocr_string):
try:
eng_name = ''
for i in range(1, 5):
tempName = ocr_string[i]
if self._validate_eng_name(tempName):
eng_name = eng_name + tempName
if len(eng_name):
eng_name = eng_name.replace(',', ' ').replace('.', ' ').replace('/', ' ')
name = eng_name.split(' ')
if len(name):
self._last_name = re.sub('[^a-zA-Z ]', '', name[0].strip())
self._first_name = re.sub('[^a-zA-Z ]', '', ' '.join(name[1:]).strip())
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
def _get_macau_id(self, id_num):
try:
doc_num = id_num.replace('(', '(').replace(')', ')')
doc_num = doc_num[-10:]
doc_num_list = []
doc_num_list.append(doc_num)
doc_num = MRZOCRCleaner.apply(doc_num_list, formatType='MACID')
doc_num = re.sub('[^0-9()]', '', ''.join(doc_num))
if len(doc_num) == 10:
return doc_num
else:
return ''
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
return ''
'''
======================================
澳门身份证,卡式
======================================
'''
def _recog_macau_id_old(self, ocr_string):
try:
logging.info('Tring to recognize as MACAU ID - OLD ...')
self._country = 'CHN'
# Extract Doc ID
doc_num = ''.join(ocr_string[-1]).strip()
self._doc_number = self._get_macau_id(doc_num)
# Extract Name
tempString = ''.join(re.findall(r"\d+", ocr_string[4])) + ''.join(re.findall(r"\d+", ocr_string[8]))
self._local_name = self._convCTC2GBK(tempString)
# Extract Sex
sex_list = ['男M', '女F']
sex_idx = -1
for idx in range(len(ocr_string)):
sex_cnt = 0
for key in sex_list:
tempString = ocr_string[idx].upper()
if key in tempString:
sex_idx = idx
sex = re.sub('[^a-zA-Z]', '', tempString).upper()
sex = sex[-1]
if sex in ('F', 'M'):
self._sex = sex
break
# Extract DOB and Expiry Date
if sex_idx > 0:
DOB = ''.join(re.findall(r"\d+", ocr_string[sex_idx - 2]))
DOE = ''.join(re.findall(r"\d+", ocr_string[sex_idx + 4]))
if self._check_date(DOB, '%d%m%Y'):
self._DOB = time.strftime('%Y-%m-%d', time.strptime(DOB, '%d%m%Y'))
if self._check_date(DOE, '%d%m%Y'):
self._DOE = time.strftime('%Y-%m-%d', time.strptime(DOE, '%d%m%Y'))
return True
else:
return False
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
finally:
logging.info(self.to_dict())
def _contain_zh(self, word):
'''
判断传入字符串是否包含中文
:param word: 待判断字符串
:return: True:包含中文 False:不包含中文
'''
try:
zh_pattern = re.compile(u'[\u4e00-\u9fff]+')
# word = word.decode()
match = zh_pattern.search(word)
return match
except Exception as e:
logging.error('%s: Error when running function "%s", error = %s' % (self._vendor, sys._getframe().f_code.co_name, str(e)))
return False
'''
======================================
香港居民身份证号码校验
======================================
'''
def _validate_hk_id(self, id_number):
"""Verify function
>>> _validate_hk_id('C123456(9)')
True
>>> _validate_hk_id('AY987654(A)')
False
"""
def char2digit(character):
"""Convert character to digit
>>> char2digit('A')
1
>>> char2digit('Z')
26
>>> char2digit('1')
1
"""
if len(character) != 1:
return None
if character.isupper():
return ord(character) - ord('A') + 1
elif character.isdigit():
return int(character)
else:
raise None
def accumulate(reversed_number_list):
"""Accumulate
>>> accumulate([6, 5, 4, 3, 2, 1, 7])
133
>>> accumulate([5, 5, 5, 5, 5, 5, 12])
231
"""
REVERSED_WEIGHT = [2, 3, 4, 5, 6, 7, 8, 9]
accumulate = 0
for idx in range(0, len(reversed_number_list)):
accumulate += reversed_number_list[idx] * REVERSED_WEIGHT[idx]
return accumulate
FORMAT_REGEX = re.compile('(([A-Z]{1,2})([0-9]{6}))\(([0-9]|A)\)')
matched = FORMAT_REGEX.search(id_number)
if matched is None:
return False
matched_groups = matched.groups()
character = matched_groups[1]
digit = matched_groups[2]
check_digit = matched_groups[3]
reversed_number_list = []
for item in reversed(list(character + digit)):
reversed_number_list.append(char2digit(item))
accumulate = accumulate(reversed_number_list)
accumulate += int(check_digit, base=16)
remainder = divmod(accumulate, 11)[1]
return (remainder == 0)
| 38.545953 | 134 | 0.478221 | 3,147 | 28,100 | 4.004131 | 0.104862 | 0.037854 | 0.022697 | 0.013412 | 0.481152 | 0.409967 | 0.336243 | 0.263868 | 0.239981 | 0.222601 | 0 | 0.028856 | 0.382135 | 28,100 | 728 | 135 | 38.598901 | 0.696694 | 0.022811 | 0 | 0.283582 | 0 | 0.003731 | 0.102499 | 0.001386 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031716 | false | 0.007463 | 0.014925 | 0.001866 | 0.089552 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd3506d46da04e4e4dbb22df73bd65177767e36 | 4,238 | py | Python | MidControl.py | qchen59/CSC520_Pente_AI | fa2c3c530ccd59f442c4e448787ef066165a4906 | [
"Apache-2.0"
] | null | null | null | MidControl.py | qchen59/CSC520_Pente_AI | fa2c3c530ccd59f442c4e448787ef066165a4906 | [
"Apache-2.0"
] | 1 | 2022-03-31T23:06:59.000Z | 2022-03-31T23:06:59.000Z | MidControl.py | qchen59/CSC520_Pente_AI | fa2c3c530ccd59f442c4e448787ef066165a4906 | [
"Apache-2.0"
] | 1 | 2022-03-31T22:07:46.000Z | 2022-03-31T22:07:46.000Z | import ConsecutivePieces as Cp
import math
import copy
def mid_control_streaks(board, turn):
"""
Uses the 'calculate_streaks' method to work out the heuristic values. Then the heuristic values of the pieces that
are in the middle 5x5 area of the board is doubled. This will bump up the final score as well.
:param board: a Pente game board
:param turn: either 1 or 2, depending on if the 1st or 2nd player is wanting to place a piece
:return: current board, board with heuristic values, and a score that is calculated based on the heuristic
"""
board, heuristics, score = Cp.calculate_streaks(board, turn)
size = len(board)
if size < 6: # No point calculating this heuristic for smaller boards
return board, heuristics, score
mid = math.ceil(size / 2) # Getting the middle point of the board
for i in range(mid - 3, mid + 2):
for j in range(mid - 3, mid + 2):
if heuristics[i][j] > 2: # Doubling the score of the heuristic values that are in
heuristics[i][j] = 2 * heuristics[i][j] # the middle of the board.
score = 0
for i in range(size):
for j in range(size):
if heuristics[i][j] > 2:
score += heuristics[i][j]
return board, heuristics, score
def mid_control_pieces(board, turn):
"""
Calculates a heuristic score based on the placements inside the middle 5x5 area of the board. For every turn's piece
the score is increased by one. For every opponents piece, the score is decreased by one.
Each node adjacent to a turn player's piece, will get a heuristic value of either 3 or 6. If the piece is inside the
middle 5x5 area the value is 6. Otherwise 3.
:param board: a Pente game board
:param turn: either 1 or 2, depending on if the 1st or 2nd player is wanting to place a piece
:return: current board, board with heuristic values, and a score that is calculated based on the heuristic
"""
size = len(board)
heuristics = copy.deepcopy(board)
opponent = 2 if turn == 1 else 1 # Getting the opponent
mid = math.ceil(size / 2) # Getting the middle point of the board
score = 0
# Calculating heuristics of the middle 5x5 area
for i in range(mid - 3, mid + 2):
for j in range(mid - 3, mid + 2):
if board[i][j] == turn:
score += 1
for axis in [(1, 0), (0, 1), (1, 1), (1, -1)]: # Four axes that we consider
if mid - 3 <= i - axis[0] < mid + 2 and mid - 3 <= j - axis[1] < mid + 2:
node = board[i - axis[0]][j - axis[1]]
if node != turn and node != opponent:
heuristics[i - axis[0]][j - axis[1]] = 6
if mid - 3 <= i + axis[0] < mid + 2 and mid - 3 <= j + axis[1] < mid + 2:
node = board[i + axis[0]][j + axis[1]]
if node != turn and node != opponent:
heuristics[i + axis[0]][j + axis[1]] = 6
elif board[i][j] == opponent:
score -= 1
# Calculating heuristics of the rest of the board
for i in range(size):
for j in range(size):
if mid - 3 <= i < mid + 2 or mid - 3 <= j < mid + 2:
continue
if board[i][j] == turn:
for axis in [(1, 0), (0, 1), (1, 1), (1, -1)]: # Four axes that we consider
if 0 <= i - axis[0] < size and 0 <= j - axis[1] < size:
node = board[i - axis[0]][j - axis[1]]
if node != turn and node != opponent:
heuristics[i - axis[0]][j - axis[1]] = 3
if 0 <= i + axis[0] < size and 0 <= j + axis[1] < size:
node = board[i + axis[0]][j + axis[1]]
if node != turn and node != opponent:
heuristics[i + axis[0]][j + axis[1]] = 3
return board, heuristics, score
| 47.088889 | 120 | 0.522888 | 606 | 4,238 | 3.646865 | 0.176568 | 0.027149 | 0.032579 | 0.031674 | 0.560181 | 0.514932 | 0.514932 | 0.486878 | 0.486878 | 0.486878 | 0 | 0.038462 | 0.380368 | 4,238 | 89 | 121 | 47.617978 | 0.803123 | 0.337659 | 0 | 0.563636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.054545 | 0 | 0.145455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd438755efdfb68b94d3ed6b17ddefd4e151daa | 1,237 | py | Python | mpi/python/intercomm.py | tyohei/examples | 38652f48aca2b668bcc116ba401795d4be2f8f18 | [
"MIT"
] | 1 | 2020-09-14T17:29:02.000Z | 2020-09-14T17:29:02.000Z | mpi/python/intercomm.py | tyohei/examples | 38652f48aca2b668bcc116ba401795d4be2f8f18 | [
"MIT"
] | 8 | 2020-09-05T10:19:39.000Z | 2021-05-07T10:04:27.000Z | mpi/python/intercomm.py | tyohei/examples | 38652f48aca2b668bcc116ba401795d4be2f8f18 | [
"MIT"
] | null | null | null | from mpi4py import MPI
import common
def main():
comm = MPI.COMM_WORLD
print_mpi = common.create_print_mpi(comm)
print_mpi('Hello World!')
if comm.rank == 0 or comm.rank == 1:
intracomm = comm.Split(color=0, key=comm.rank)
else:
intracomm = comm.Split(color=1, key=comm.rank)
print_mpi('Hello Intra World!: [{}/{}]'.format(intracomm.rank,
intracomm.size))
if comm.rank == 0 or comm.rank == 1:
remote_leader = 2 # Rank in MPI_COMM_WORLD
local_leader = 1 # Rank in intracomm
else:
remote_leader = 1 # Rank in MPI_COMM_WORLD
local_leader = 0 # Rank in intracomm
intercomm = intracomm.Create_intercomm(
local_leader, MPI.COMM_WORLD, remote_leader)
print_mpi('Hello Inter World!: [{}/{}]'.format(intercomm.rank,
intercomm.size))
if comm.rank == 0 or comm.rank == 1:
send_buf = 0
root = MPI.ROOT if intercomm.rank == local_leader else MPI.PROC_NULL
else:
send_buf = 16
root = 1
recv_buf = intercomm.reduce(send_buf, root=root)
print(recv_buf)
if __name__ == '__main__':
main()
| 27.488889 | 76 | 0.579628 | 160 | 1,237 | 4.26875 | 0.25625 | 0.093704 | 0.070278 | 0.048316 | 0.193265 | 0.193265 | 0.193265 | 0.108346 | 0.076135 | 0 | 0 | 0.020047 | 0.31447 | 1,237 | 44 | 77 | 28.113636 | 0.785377 | 0.065481 | 0 | 0.1875 | 0 | 0 | 0.064292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.0625 | 0 | 0.09375 | 0.15625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd6be2fc7c5b1b45e6116789f9a3be537751a34 | 6,970 | py | Python | data/gta5_dataset.py | Jo-wang/ProDA | 58910b1fe2bdbf79d0e12708b77b6df4f386bb49 | [
"MIT"
] | 193 | 2021-03-25T07:29:56.000Z | 2022-03-30T08:56:44.000Z | caco_proda_finetune/data/gta5_dataset.py | jxhuang0508/CaCo | 0106d93fd6277ca843572a6aa01bdf2d1caca117 | [
"MIT"
] | 43 | 2021-04-13T02:13:18.000Z | 2022-03-31T11:14:58.000Z | caco_proda_finetune/data/gta5_dataset.py | jxhuang0508/CaCo | 0106d93fd6277ca843572a6aa01bdf2d1caca117 | [
"MIT"
] | 35 | 2021-03-26T09:29:58.000Z | 2022-01-20T17:40:08.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import sys
import torch
import numpy as np
import scipy.misc as m
import matplotlib.pyplot as plt
import matplotlib.image as imgs
from PIL import Image
import random
import scipy.io as io
from tqdm import tqdm
from scipy import stats
from torch.utils import data
from data import BaseDataset
from data.randaugment import RandAugmentMC
class GTA5_loader(BaseDataset):
"""
GTA5 synthetic dataset
for domain adaptation to Cityscapes
"""
colors = [ # [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
]
label_colours = dict(zip(range(19), colors))
def __init__(self, opt, logger, augmentations=None):
self.opt = opt
self.root = opt.src_rootpath
self.split = 'all'
self.augmentations = augmentations
self.randaug = RandAugmentMC(2, 10)
self.n_classes = 19
self.img_size = (1914, 1052)
self.mean = [0.0, 0.0, 0.0] #TODO: calculating the mean value of rgb channels on GTA5
self.image_base_path = os.path.join(self.root, 'images')
self.label_base_path = os.path.join(self.root, 'labels')
splits = io.loadmat(os.path.join(self.root, 'split.mat'))
if self.split == 'all':
ids = np.concatenate((splits['trainIds'][:,0], splits['valIds'][:,0], splits['testIds'][:,0]))
elif self.split == 'train':
ids = splits['trainIds'][:,0]
elif self.split == 'val':
ids = splits['valIds'][:200,0]
elif self.split == 'test':
ids = splits['testIds'][:,0]
self.ids = []
for i in range(len(ids)):
self.ids.append(os.path.join(self.label_base_path, str(i+1).zfill(5) + '.png'))
self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, 34, -1]
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33,]
self.class_names = ["unlabelled","road","sidewalk","building","wall","fence","pole","traffic_light",
"traffic_sign","vegetation","terrain","sky","person","rider","car","truck","bus","train",
"motorcycle","bicycle",]
self.ignore_index = 250
self.class_map = dict(zip(self.valid_classes, range(19)))
if len(self.ids) == 0:
raise Exception(
"No files for style=[%s] found in %s" % (self.split, self.image_base_path)
)
print("Found {} {} images".format(len(self.ids), self.split))
def __len__(self):
return len(self.ids)
def __getitem__(self, index):
"""__getitem__
param: index
"""
id = self.ids[index]
if self.split != 'all' and self.split != 'val':
filename = '{:05d}.png'.format(id)
img_path = os.path.join(self.image_base_path, filename)
lbl_path = os.path.join(self.label_base_path, filename)
else:
img_path = os.path.join(self.image_base_path, id.split('/')[-1])
lbl_path = id
img = Image.open(img_path)
lbl = Image.open(lbl_path)
img = img.resize(self.img_size, Image.BILINEAR)
lbl = lbl.resize(self.img_size, Image.NEAREST)
img = np.asarray(img, dtype=np.uint8)
lbl = np.asarray(lbl, dtype=np.uint8)
lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8))
input_dict = {}
if self.augmentations!=None:
img, lbl, _, _, _ = self.augmentations(img, lbl)
img_strong, params = self.randaug(Image.fromarray(img))
img_strong, _ = self.transform(img_strong, lbl)
input_dict['img_strong'] = img_strong
input_dict['params'] = params
img, lbl = self.transform(img, lbl)
input_dict['img'] = img
input_dict['label'] = lbl
input_dict['img_path'] = self.ids[index]
return input_dict
def encode_segmap(self, lbl):
for _i in self.void_classes:
lbl[lbl == _i] = self.ignore_index
for _i in self.valid_classes:
lbl[lbl == _i] = self.class_map[_i]
return lbl
def decode_segmap(self, temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = self.label_colours[l][0]
g[temp == l] = self.label_colours[l][1]
b[temp == l] = self.label_colours[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
def transform(self, img, lbl):
"""transform
img, lbl
"""
img = np.array(img)
# img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
img = img.astype(float) / 255.0
img = img.transpose(2, 0, 1)
classes = np.unique(lbl)
lbl = np.array(lbl)
lbl = lbl.astype(float)
# lbl = m.imresize(lbl, self.img_size, "nearest", mode='F')
lbl = lbl.astype(int)
if not np.all(classes == np.unique(lbl)):
print("WARN: resizing labels yielded fewer classes") #TODO: compare the original and processed ones
if not np.all(np.unique(lbl[lbl != self.ignore_index]) < self.n_classes):
print("after det", classes, np.unique(lbl))
raise ValueError("Segmentation map contained invalid class values")
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def get_cls_num_list(self):
cls_num_list = np.array([16139327127, 4158369631, 8495419275, 927064742, 318109335,
532432540, 67453231, 40526481, 3818867486, 1081467674,
6800402117, 182228033, 15360044, 1265024472, 567736474,
184854135, 32542442, 15832619, 2721193])
# cls_num_list = np.zeros(self.n_classes, dtype=np.int64)
# for n in range(len(self.ids)):
# lbl = Image.open(self.ids[n])
# lbl = lbl.resize(self.img_size, Image.NEAREST)
# lbl = np.asarray(lbl, dtype=np.uint8)
# lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8))
# for i in range(self.n_classes):
# cls_num_list[i] += (lbl == i).sum()
return cls_num_list
| 34.50495 | 114 | 0.545194 | 915 | 6,970 | 4.039344 | 0.310383 | 0.006494 | 0.005682 | 0.026515 | 0.143939 | 0.120671 | 0.102814 | 0.074134 | 0.055195 | 0.036797 | 0 | 0.09403 | 0.31033 | 6,970 | 201 | 115 | 34.676617 | 0.674849 | 0.106456 | 0 | 0 | 0 | 0 | 0.068067 | 0 | 0 | 0 | 0 | 0.00995 | 0 | 1 | 0.047945 | false | 0 | 0.10274 | 0.006849 | 0.212329 | 0.020548 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd8344c5079ad7625f96055dc71a256fa657bdc | 14,857 | py | Python | neural-net/neural-net.py | MrTeuthis/traffic-toronto | 9bf33ddc437ec138e78539f35260ec46761f0fd2 | [
"MIT"
] | null | null | null | neural-net/neural-net.py | MrTeuthis/traffic-toronto | 9bf33ddc437ec138e78539f35260ec46761f0fd2 | [
"MIT"
] | null | null | null | neural-net/neural-net.py | MrTeuthis/traffic-toronto | 9bf33ddc437ec138e78539f35260ec46761f0fd2 | [
"MIT"
] | null | null | null | import tensorflow as tf
import random as rand
import numpy as np
import pickle
import random
import argparse
import pprint
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib.animation import FuncAnimation
import sys
parser = argparse.ArgumentParser()
parser.add_argument("--train", help="train the neural net", action="store_true")
parser.add_argument("--feedforward", help="run the neural net on data", action="store_true")
parser.add_argument("--input", help="the input of the neural net (a .pkl file)", action="store")
parser.add_argument("--output", help="the output of the neural net (a .pkl file)", action="store")
parser.add_argument("--weather", help="make weather gifs", action="store_true")
parser.add_argument("--time", help="make week chart", action="store_true")
args=parser.parse_args()
if (args.train and (args.feedforward or args.input or args.output)) or ((args.feedforward or args.input or args.output) and not (args.feedforward and args.input and args.output)):
print("invalid arguments")
exit(1)
# To suppress debug output, run python or python3 with the -O option:
#$ python -O tfmat.py
# PLEASE UPDATE THESE AS REQUIRED before trying to run tyvm
# Number of features in the input.
# This has *nothing to do* with the number of training examples.
NUM_IN = 8
# Number of nodes in the hidden layer.
NUM_HIDDEN = 5
# Number of features in the output.
NUM_OUT = 2
# The learning rate of the neural network.
LEARNING_RATE = 0.3
def stringify_tensors(labels: list, tensors: list, sep='\n'):
return sep.join([str(label) + ': ' + str(tensor) for label, tensor in zip(labels, tensors)])
def visualize_tensors(labels: list, tensors: list, start='', end='\n'):
return start + stringify_tensors(labels, tensors) + end
with tf.name_scope('in'):
x = tf.placeholder(tf.float32, [None, NUM_IN], name='x') #inputs
y_ = tf.placeholder(tf.float32, [None, NUM_OUT], name='true_ys')
if args.train:
with tf.name_scope('hidden1'):
with tf.name_scope('weights'):
W1 = tf.Variable(tf.random_uniform([NUM_IN, NUM_HIDDEN], -1.0)) #weights of hiddens
with tf.name_scope('biases'):
b1 = tf.Variable(tf.random_uniform([NUM_HIDDEN], -1.0)) #biases
with tf.name_scope('preacts'):
pre1 = tf.matmul(x, W1) + b1 #pre-activations of hiddens
with tf.name_scope('acts'):
H1 = tf.sigmoid(pre1) #activations of hiddens
with tf.name_scope('out'):
with tf.name_scope('weights'):
W2 = tf.Variable(tf.random_uniform([NUM_HIDDEN, NUM_OUT], -1.0)) #weights of outs
with tf.name_scope('biases'):
b2 = tf.Variable(tf.random_uniform([NUM_OUT], -1.0)) #biases of outs
with tf.name_scope('preacts'):
pre2 = tf.matmul(H1, W2) + b2 #pre-activations of outs
with tf.name_scope('acts'):
y = tf.identity(pre2) #activations of outs
with tf.name_scope('cost'):
mse_cost = tf.losses.mean_squared_error(y_, y)
train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(mse_cost)
#get data from pickle
training_inputs = pickle.load(open("data/training-inputs.pkl","rb"))
training_outputs = pickle.load(open("data/training-outputs.pkl","rb"))
with tf.Session() as sess:
tf.global_variables_initializer().run()
iteration = 1
while True:
try:
batch_xs, batch_ys = training_inputs, training_outputs
_, cost, yr, W2r, b2r, H1r, W1r, b1r, xr = sess.run(
(train_step, mse_cost, y, W2, b2, H1, W1, b1, x),
feed_dict={x: batch_xs, y_: batch_ys}
)
print("iteration " + str(iteration))
iteration += 1
print(visualize_tensors(["COST"],[cost]))
except KeyboardInterrupt:
break
#dump weights to pickle
print("\ndumping weights to pickle")
pickle.dump(W1r, open("weights/W1r.pkl","wb"))
pickle.dump(W2r, open("weights/W2r.pkl","wb"))
pickle.dump(b1r, open("weights/b1r.pkl","wb"))
pickle.dump(b2r, open("weights/b2r.pkl","wb"))
if args.feedforward:
data_input = pickle.load(open(args.input,"rb"))
data_output = pickle.load(open(args.output,"rb"))
W1r = pickle.load(open("weights/W1r.pkl","rb"))
W2r = pickle.load(open("weights/W2r.pkl","rb"))
b1r = pickle.load(open("weights/b1r.pkl","rb"))
b2r = pickle.load(open("weights/b2r.pkl","rb"))
with tf.name_scope('hidden1'):
with tf.name_scope('weights'):
W1 = tf.Variable(W1r) #weights of hiddens
with tf.name_scope('biases'):
b1 = tf.Variable(b1r) #biases
with tf.name_scope('preacts'):
pre1 = tf.matmul(x, W1) + b1 #pre-activations of hiddens
with tf.name_scope('acts'):
H1 = tf.sigmoid(pre1) #activations of hiddens
with tf.name_scope('out'):
with tf.name_scope('weights'):
W2 = tf.Variable(W2r) #weights of outs
with tf.name_scope('biases'):
b2 = tf.Variable(b2r) #biases of outs
with tf.name_scope('preacts'):
pre2 = tf.matmul(H1, W2) + b2 #pre-activations of outs
with tf.name_scope('acts'):
y = tf.identity(pre2) #activations of outs
with tf.name_scope('cost'):
mse_cost = tf.losses.mean_squared_error(y_, y)
with tf.Session() as sess:
tf.global_variables_initializer().run()
y_out, cost = sess.run((y, mse_cost), feed_dict={x:data_input, y_:data_output})
print(visualize_tensors(["cost"], [cost]))
print("regularized maximums")
print(np.amax(y_out,0))
print("regularized minimums")
print(np.amin(y_out,0))
print("de-regularizing data")
max_values = pickle.load(open("data/max-values.pkl","rb"))[8:10]
min_values = pickle.load(open("data/min-values.pkl","rb"))[8:10]
regularization = [-1,1]
for i in range(len(y_out)):
for j in range(len(y_out[0])):
y_out[i][j] = (y_out[i][j] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[j] - min_values[j])
data_output[i][j] = (data_output[i][j] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[j] - min_values[j])
print ("previous maximums")
print(max_values)
print("maximum outputs")
print(np.amax(y_out,0))
print("previous minimums")
print(min_values)
print("minimum outputs")
print(np.amin(y_out,0))
difference = np.absolute(y_out - data_output)
print("average absolute difference")
print(np.average(difference, 0))
print("standard deviation of each output")
print(np.std(y_out - data_output,0))
print("max difference")
print(np.ndarray.max(difference, 0))
numBins = 100
print("histogram for differences of relative time")
histogram = np.histogram(difference[:,0],bins=numBins)
for i in range(numBins):
print(str(histogram[1][i]) + " " + str(histogram[0][i]))
print("histogram for differences of volume")
histogram = np.histogram(difference[:,1],bins=numBins)
for i in range(numBins):
print(str(histogram[1][i]) + " " + str(histogram[0][i]))
if args.weather:
W1r = pickle.load(open("weights/W1r.pkl","rb"))
W2r = pickle.load(open("weights/W2r.pkl","rb"))
b1r = pickle.load(open("weights/b1r.pkl","rb"))
b2r = pickle.load(open("weights/b2r.pkl","rb"))
data_list = []
numSteps = 30
temperature = [0] * (numSteps + 1)
dewPoint = [0] * (numSteps + 1)
humidity = [0] * (numSteps + 1)
for i in range(numSteps+1):
temperature[i] = i/numSteps * 2 - 1
for j in range(numSteps+1):
dewPoint[j] = j/numSteps * 2 - 1
for k in range(numSteps+1):
humidity[k] = k/numSteps * 2 - 1
data_list.append([0,0,0.5,0,0,temperature[i], dewPoint[j], humidity[k]])
data_input = np.asarray(data_list)
with tf.name_scope('hidden1'):
with tf.name_scope('weights'):
W1 = tf.Variable(W1r) #weights of hiddens
with tf.name_scope('biases'):
b1 = tf.Variable(b1r) #biases
with tf.name_scope('preacts'):
pre1 = tf.matmul(x, W1) + b1 #pre-activations of hiddens
with tf.name_scope('acts'):
H1 = tf.sigmoid(pre1) #activations of hiddens
with tf.name_scope('out'):
with tf.name_scope('weights'):
W2 = tf.Variable(W2r) #weights of outs
with tf.name_scope('biases'):
b2 = tf.Variable(b2r) #biases of outs
with tf.name_scope('preacts'):
pre2 = tf.matmul(H1, W2) + b2 #pre-activations of outs
with tf.name_scope('acts'):
y = tf.identity(pre2) #activations of outs
with tf.name_scope('cost'):
mse_cost = tf.losses.mean_squared_error(y_, y)
with tf.Session() as sess:
tf.global_variables_initializer().run()
y_out = sess.run(y, feed_dict={x:data_input})
print("de-regularizing data")
max_values = pickle.load(open("data/max-values.pkl","rb"))
min_values = pickle.load(open("data/min-values.pkl","rb"))
regularization = [-1,1]
for i in range(len(y_out)):
for j in range(len(y_out[0])):
y_out[i][j] = (y_out[i][j] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[j+8] - min_values[j+8]) + min_values[j+8]
for i in range(len(data_input)):
for j in range(len(data_input[0])):
data_input[i][j] = (data_input[i][j] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[j] - min_values[j]) + min_values[j]
for i in range(len(temperature)):
temperature[i] = (temperature[i] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[5] - min_values[5]) + min_values[5]
dewPoint[i] = (dewPoint[i] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[6] - min_values[6]) + min_values[6]
humidity[i] = (humidity[i] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[7] - min_values[7]) + min_values[7]
index = 0
weather_outputs = np.zeros((numSteps+1, numSteps+1, numSteps+1, 2))
for i in range(numSteps+1):
for j in range(numSteps+1):
for k in range(numSteps+1):
weather_outputs[i][j][k] = y_out[index]
index += 1
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(dewPoint, humidity)
ax.set_zlim(np.amin(y_out, 0)[1], np.amax(y_out, 0)[1]) #change index for time/volume
ax.xaxis.set_major_locator(LinearLocator(5))
ax.xaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.yaxis.set_major_locator(LinearLocator(5))
ax.yaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.zaxis.set_major_locator(LinearLocator(5))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_xlabel("Dew Point")
ax.set_ylabel("Relative Humidity")
ax.set_zlabel("Traffic Volume")
ttl = ax.text2D(0.05, 0.95, "2D Text", transform=ax.transAxes)
def update(i):
Z = weather_outputs[i,:,:,1] #change index for time/volume
surf = ax.plot_surface(X,Y,Z,cmap=cm.coolwarm,linewidth=0,antialiased=False)
ttl.set_text("temperature: " + str(round(temperature[i])))
return surf, ttl
if __name__ == '__main__':
anim = FuncAnimation(fig, update, frames=np.arange(0, numSteps+1), interval=300)
anim.save('weather.gif', dpi=80, writer='imagegick')
if args.time:
W1r = pickle.load(open("weights/W1r.pkl","rb"))
W2r = pickle.load(open("weights/W2r.pkl","rb"))
b1r = pickle.load(open("weights/b1r.pkl","rb"))
b2r = pickle.load(open("weights/b2r.pkl","rb"))
data_list = []
days = [0]*7
time_intervals = (24*60)//60 #the last number is the time in minutes between data points
times = [0]*time_intervals
for i in range(len(days)):
days[i] = (i/6.0)*2.0 - 1
for j in range(len(times)):
times[j] = (j/time_intervals)*2.0 - 1
data_list.append([0,days[i],times[j], 0, 0, 0, 0, 0])
data_input = np.asarray(data_list)
with tf.name_scope('hidden1'):
with tf.name_scope('weights'):
W1 = tf.Variable(W1r) #weights of hiddens
with tf.name_scope('biases'):
b1 = tf.Variable(b1r) #biases
with tf.name_scope('preacts'):
pre1 = tf.matmul(x, W1) + b1 #pre-activations of hiddens
with tf.name_scope('acts'):
H1 = tf.sigmoid(pre1) #activations of hiddens
with tf.name_scope('out'):
with tf.name_scope('weights'):
W2 = tf.Variable(W2r) #weights of outs
with tf.name_scope('biases'):
b2 = tf.Variable(b2r) #biases of outs
with tf.name_scope('preacts'):
pre2 = tf.matmul(H1, W2) + b2 #pre-activations of outs
with tf.name_scope('acts'):
y = tf.identity(pre2) #activations of outs
with tf.name_scope('cost'):
mse_cost = tf.losses.mean_squared_error(y_, y)
with tf.Session() as sess:
tf.global_variables_initializer().run()
y_out = sess.run(y, feed_dict={x:data_input})
print(data_input)
print("deregularizing data")
max_values = pickle.load(open("data/max-values.pkl","rb"))
min_values = pickle.load(open("data/min-values.pkl","rb"))
regularization = [-1,1]
for i in range(len(y_out)):
for j in range(len(y_out[0])):
y_out[i][j] = (y_out[i][j] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[j+8] - min_values[j+8]) + min_values[j+8]
for i in range(len(data_input)):
for j in range(len(data_input[0])):
data_input[i][j] = (data_input[i][j] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[j] - min_values[j]) + min_values[j]
for i in range(len(days)):
days[i] = (days[i] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[1] - min_values[1]) + min_values[1]
for i in range(len(times)):
times[i] = (times[i] - regularization[0]) / (regularization[1] - regularization[0]) * (max_values[2] - min_values[2]) + min_values[2]
x_axis_days = []
for i in range(len(days)):
for j in range(len(times)):
x_axis_days.append(days[i] + times[j])
for row in data_input:
print(row)
x = np.asarray(x_axis_days)
volume = y_out[:,1]
relative_time = y_out[:,0]
plt.plot(x, relative_time, "ro")
plt.show()
| 39.938172 | 179 | 0.622871 | 2,134 | 14,857 | 4.215558 | 0.142455 | 0.032681 | 0.050022 | 0.075033 | 0.635394 | 0.581036 | 0.514896 | 0.495665 | 0.483659 | 0.45309 | 0 | 0.027055 | 0.2238 | 14,857 | 371 | 180 | 40.045822 | 0.753035 | 0.073299 | 0 | 0.489796 | 0 | 0 | 0.106778 | 0.003571 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010204 | false | 0 | 0.044218 | 0.006803 | 0.064626 | 0.112245 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd84f91db30e70500d6e6134cbbca69df24cc6b | 4,175 | py | Python | tests/torch_api/test_process_group.py | woqidaideshi/bagua | 0ee96da598685748519d58d24ce983499cb36721 | [
"MIT"
] | 635 | 2021-06-11T03:03:11.000Z | 2022-03-31T14:52:57.000Z | tests/torch_api/test_process_group.py | zhjc/bagua | eeaa2bf8950248a2e72ce2e471bbf08cb3b8b985 | [
"MIT"
] | 181 | 2021-06-10T12:27:19.000Z | 2022-03-31T04:08:19.000Z | tests/torch_api/test_process_group.py | shjwudp/bagua | 7e1b438e27e3119b23e472f5b9217a9862932bef | [
"MIT"
] | 71 | 2021-06-10T13:16:53.000Z | 2022-03-22T09:26:22.000Z | import os
import unittest
import torch
import torch.distributed as c10d
import bagua.torch_api as bagua
from tests.internal.common_utils import find_free_port
from tests.internal.multi_process import MultiProcessTestCase, setup_bagua_env
from tests import skip_if_cuda_not_available
class Result(object):
def __init__(self):
self.data = torch.zeros(100)
def run_new_group(rank, nprocs, args, results, env):
setup_bagua_env(rank, env)
all_ranks = list(range(nprocs))
odd_ranks = list(filter(lambda r: r % 2 == 1, all_ranks))
g = bagua.communication.new_group(ranks=odd_ranks)
tensor = torch.rand(100).cuda()
tensor *= rank
bagua.communication.allreduce(tensor, tensor, comm=g.get_global_communicator())
results[rank].data.copy_(tensor)
def run_from_torch_group(rank, nprocs, args, results, env):
setup_bagua_env(rank, env)
all_ranks = list(range(nprocs))
ranks_1 = list(filter(lambda r: r % 3 == 1, all_ranks))
ranks_2 = list(filter(lambda r: r % 2 == 0, all_ranks))
g_1 = torch.distributed.new_group(ranks_1)
bg_1 = bagua.communication.from_torch_group(g_1)
g_2 = torch.distributed.new_group(ranks_2)
bg_2 = bagua.communication.from_torch_group(g_2)
if rank in ranks_1:
assert torch.distributed.get_rank(g_1) == bg_1.get_global_communicator().rank()
assert (
torch.distributed.get_world_size(g_1)
== bg_1.get_global_communicator().nranks() # noqa: W503
)
if rank in ranks_2:
assert torch.distributed.get_rank(g_2) == bg_2.get_global_communicator().rank()
assert (
torch.distributed.get_world_size(g_2)
== bg_2.get_global_communicator().nranks() # noqa: W503
)
class TestProcessGroup(MultiProcessTestCase):
@skip_if_cuda_not_available()
def test_new_group(self):
nprocs = torch.cuda.device_count()
results = [Result() for _ in range(nprocs)]
self.run_test_locally(run_new_group, nprocs, args={}, results=results)
all_ranks = list(range(nprocs))
odd_ranks = list(filter(lambda r: r % 2 == 1, all_ranks))
for rank in odd_ranks:
peer_rank = (rank + 2) % nprocs
self.assertTrue(torch.equal(results[rank].data, results[peer_rank].data))
@skip_if_cuda_not_available()
def test_from_torch_group(self):
nprocs = torch.cuda.device_count()
self.run_test_locally(run_from_torch_group, nprocs, args={}, results=None)
from torch.testing._internal.common_distributed import ( # noqa: E402
MultiProcessTestCase,
skip_if_lt_x_gpu,
)
class ProcessGroupNCCLTest(MultiProcessTestCase):
def setUp(self):
super(ProcessGroupNCCLTest, self).setUp()
# NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests
# that use NCCL_BLOCKING_WAIT will test it as expected.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
os.environ.update(
{
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": str(find_free_port(8000, 8100)),
"BAGUA_SERVICE_PORT": str(find_free_port(9000, 9100)),
}
)
self._spawn_processes()
@skip_if_lt_x_gpu(2)
def test_bagua_pg(self):
# Need to use NCCL_BLOCKING_WAIT and not ASYNC_ERROR_HANDLING,
# otherwise process will be taken down and we can't check for errors.
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0"
os.environ["NCCL_BLOCKING_WAIT"] = "1"
os.environ.update(
{
"WORLD_SIZE": str(self.world_size),
"LOCAL_WORLD_SIZE": str(self.world_size),
"RANK": str(self.rank),
"LOCAL_RANK": str(self.rank),
}
)
bagua.init_process_group()
pg = c10d.new_group(ranks=list(range(0, self.world_size)))
pg.bagua_patch()
self.assertTrue(pg in bagua.communication._torch_to_bagua_pg_map)
del pg
c10d.destroy_process_group()
self.assertEqual(len(bagua.communication._torch_to_bagua_pg_map), 0)
if __name__ == "__main__":
unittest.main()
| 32.617188 | 87 | 0.661796 | 564 | 4,175 | 4.583333 | 0.244681 | 0.043327 | 0.040619 | 0.026306 | 0.426306 | 0.348162 | 0.238685 | 0.138878 | 0.138878 | 0.138878 | 0 | 0.024329 | 0.232096 | 4,175 | 127 | 88 | 32.874016 | 0.781971 | 0.067545 | 0 | 0.16129 | 0 | 0 | 0.043243 | 0.01287 | 0 | 0 | 0 | 0 | 0.075269 | 1 | 0.075269 | false | 0 | 0.096774 | 0 | 0.204301 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bd948d6210c3b20eae27d41f0ad6a8b33ca0641 | 1,209 | py | Python | laskarit/viikko2/unicafe/src/kassapaate.py | miikara/landlord | 1f9a2ae2485adb8837a9b1d0668cc0708aec4d5f | [
"MIT"
] | 1 | 2021-05-16T22:41:55.000Z | 2021-05-16T22:41:55.000Z | laskarit/viikko2/unicafe/src/kassapaate.py | miikara/landlord | 1f9a2ae2485adb8837a9b1d0668cc0708aec4d5f | [
"MIT"
] | null | null | null | laskarit/viikko2/unicafe/src/kassapaate.py | miikara/landlord | 1f9a2ae2485adb8837a9b1d0668cc0708aec4d5f | [
"MIT"
] | null | null | null | class Kassapaate:
def __init__(self):
self.kassassa_rahaa = 100000
self.edulliset = 0
self.maukkaat = 0
def syo_edullisesti_kateisella(self, maksu):
if maksu >= 240:
self.kassassa_rahaa = self.kassassa_rahaa + 240
self.edulliset += 1
return maksu - 240
else:
return maksu
def syo_maukkaasti_kateisella(self, maksu):
if maksu >= 400:
self.kassassa_rahaa = self.kassassa_rahaa + 400
self.maukkaat += 1
return maksu - 400
else:
return maksu
def syo_edullisesti_kortilla(self, kortti):
if kortti.saldo >= 240:
kortti.ota_rahaa(240)
self.edulliset += 1
return True
else:
return False
def syo_maukkaasti_kortilla(self, kortti):
if kortti.saldo >= 400:
kortti.ota_rahaa(400)
self.maukkaat += 1
return True
else:
return False
def lataa_rahaa_kortille(self, kortti, summa):
if summa >= 0:
kortti.lataa_rahaa(summa)
self.kassassa_rahaa += summa
else:
return
| 26.866667 | 59 | 0.544251 | 129 | 1,209 | 4.922481 | 0.24031 | 0.113386 | 0.16063 | 0.066142 | 0.579528 | 0.431496 | 0.091339 | 0 | 0 | 0 | 0 | 0.057718 | 0.383788 | 1,209 | 44 | 60 | 27.477273 | 0.794631 | 0 | 0 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.410256 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1bdb8ff2ab04650c354ff55f8bfdaf2310b0c2ee | 4,135 | py | Python | multiuploader/utils.py | dicos/django-multiuploader | 5fc7c782486bfab367e372b90efadb9c61e6e257 | [
"MIT"
] | null | null | null | multiuploader/utils.py | dicos/django-multiuploader | 5fc7c782486bfab367e372b90efadb9c61e6e257 | [
"MIT"
] | null | null | null | multiuploader/utils.py | dicos/django-multiuploader | 5fc7c782486bfab367e372b90efadb9c61e6e257 | [
"MIT"
] | null | null | null | import os
import time
import urllib
import logging
import mimetypes
from hashlib import sha1
from random import choice
from wsgiref.util import FileWrapper
from django.conf import settings
from django.core.files import File
from django.http import HttpResponse, StreamingHttpResponse
from django.utils.timezone import now
from django.utils.text import get_valid_filename
from django.core.files.uploadedfile import UploadedFile
import multiuploader.default_settings as DEFAULTS
log = logging
# Getting files here
def format_file_extensions(extensions):
return ".(%s)$" % "|".join(extensions)
def _upload_to(instance, filename):
upload_path = getattr(settings, 'MULTIUPLOADER_FILES_FOLDER', DEFAULTS.MULTIUPLOADER_FILES_FOLDER)
if upload_path[-1] != '/':
upload_path += '/'
filename = get_valid_filename(os.path.basename(filename))
filename, ext = os.path.splitext(filename)
hash = sha1(str(time.time())).hexdigest()
fullname = os.path.join(upload_path, "%s.%s%s" % (filename, hash, ext))
return fullname
def get_uploads_from_request(request):
"""Description should be here"""
attachments = []
#We're only supports POST
if request.method == 'POST':
if request.FILES == None:
return []
#getting file data for further manipulations
if not u'files' in request.FILES:
return []
for fl in request.FILES.getlist("files"):
wrapped_file = UploadedFile(fl)
filename = wrapped_file.name
file_size = wrapped_file.file.size
attachments.append({"file": fl, "date": now(), "name": wrapped_file.name})
return attachments
def get_uploads_from_temp(ids):
"""Method returns of uploaded files"""
from models import MultiuploaderFile
ats = []
files = MultiuploaderFile.objects.filter(pk__in=ids)
#Getting THE FILES
for fl in files:
ats.append({"file":File(fl.file), "date":fl.upload_date, "name":fl.filename, 'id': fl.pk})
return ats
def get_uploads_from_model(instance, attr):
"""Replaces attachment files from model to a given location,
returns list of opened files of dict {file:'file',date:date,name:'filename'}"""
ats = []
files = getattr(instance, attr)
for fl in files.all():
ats.append({"file": File(fl.file), "date": fl.upload_date, "name": fl.filename})
return ats
def generate_safe_pk(self):
def wrapped(self):
while 1:
skey = getattr(settings, 'SECRET_KEY', 'asidasdas3sfvsanfja242aako;dfhdasd&asdasi&du7')
pk = sha1('%s%s' % (skey, ''.join([choice('0123456789') for i in range(11)]))).hexdigest()
try:
self.__class__.objects.get(pk=pk)
except:
return pk
return wrapped
def download_response(request, filelike, filename):
response = StreamingHttpResponse(filelike)
type, encoding = mimetypes.guess_type(filename)
response['Content-Type'] = type or 'application/octet-stream'
response['Content-Length'] = filelike.size
if encoding is not None:
response['Content-Encoding'] = encoding
# To inspect details for the below code, see http://greenbytes.de/tech/tc2231/
if u'WebKit' in request.META['HTTP_USER_AGENT']:
# Safari 3.0 and Chrome 2.0 accepts UTF-8 encoded string directly.
filename_header = 'filename=%s' % filename.encode('utf-8')
elif u'MSIE' in request.META['HTTP_USER_AGENT']:
# IE does not support internationalized filename at all.
# It can only recognize internationalized URL, so we do the trick via routing rules.
filename_header = ''
else:
# For others like Firefox, we follow RFC2231 (encoding extension in HTTP headers).
filename_header = 'filename*=UTF-8\'\'%s' % urllib.quote(filename.encode('utf-8'))
response['Content-Disposition'] = 'attachment; ' + filename_header
return response
| 32.559055 | 103 | 0.648126 | 502 | 4,135 | 5.23506 | 0.388446 | 0.022831 | 0.01484 | 0.019406 | 0.060122 | 0.060122 | 0.040335 | 0.040335 | 0.040335 | 0.040335 | 0 | 0.01216 | 0.244256 | 4,135 | 126 | 104 | 32.81746 | 0.8288 | 0.159613 | 0 | 0.076923 | 0 | 0 | 0.10217 | 0.028632 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.205128 | 0.012821 | 0.435897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59ef80c6d5b9db80f94f7e583695d33684b37cea | 672 | py | Python | students/K33402/Barabanov Denis/lr1/task2/server.py | dEbAR38/ITMO_ICT_WebDevelopment_2020-2021 | 208cbc6d2b6d40c3043d35ce773a3433b377f671 | [
"MIT"
] | null | null | null | students/K33402/Barabanov Denis/lr1/task2/server.py | dEbAR38/ITMO_ICT_WebDevelopment_2020-2021 | 208cbc6d2b6d40c3043d35ce773a3433b377f671 | [
"MIT"
] | null | null | null | students/K33402/Barabanov Denis/lr1/task2/server.py | dEbAR38/ITMO_ICT_WebDevelopment_2020-2021 | 208cbc6d2b6d40c3043d35ce773a3433b377f671 | [
"MIT"
] | null | null | null | import socket
sock = socket.socket()
sock.bind(('', 9090))
sock.listen(1)
conn, addr = sock.accept()
print(conn)
while True:
data = conn.recv(1024)
data = str(data.decode())
print(data)
if not data=='pifagor':
conn.send('Такой задачи нет'.encode())
break
else:
conn.send('введите катеты треугольника (a,b)'.encode())
data = conn.recv(1024)
data = str(data.decode())
try:
a, b = map(int, data.split(','))
except:
conn.send('неправильный ввоод'.encode())
c = a**2 + b**2
c = 'длина гипотенузы = ' + str(c**(1/2))
conn.send(c.encode())
conn.close() | 24.888889 | 63 | 0.541667 | 88 | 672 | 4.136364 | 0.511364 | 0.087912 | 0.065934 | 0.087912 | 0.181319 | 0.181319 | 0.181319 | 0.181319 | 0 | 0 | 0 | 0.03527 | 0.282738 | 672 | 27 | 64 | 24.888889 | 0.719917 | 0 | 0 | 0.16 | 0 | 0 | 0.139673 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59f050a0b633204f5e3438dac473652fa6b25572 | 12,025 | py | Python | src/striemann/tests/test_striemann.py | madedotcom/striemann | fc6089bc5f1b96118ffe6474594798763d19c050 | [
"MIT"
] | null | null | null | src/striemann/tests/test_striemann.py | madedotcom/striemann | fc6089bc5f1b96118ffe6474594798763d19c050 | [
"MIT"
] | 16 | 2018-05-29T14:52:57.000Z | 2020-05-14T12:33:32.000Z | src/striemann/tests/test_striemann.py | madedotcom/striemann | fc6089bc5f1b96118ffe6474594798763d19c050 | [
"MIT"
] | null | null | null | from expects import expect
from icdiff_expects import equal
from unittest import mock
import striemann.metrics
import json
class Test:
def test_gauges(self):
transport = striemann.metrics.InMemoryTransport()
metrics = striemann.metrics.Metrics(transport, source="test")
metrics.recordGauge("service_name", 2.0, tags=["spam"], ham="eggs")
metrics.recordGauge("service_name", 4.0, tags=["spam"], ham="eggs")
metrics.incrementCounter("service_name", value=5, tags=["foo"], bar="baz")
metrics.flush()
expect(transport.last_batch).to(
equal(
[
{
"attributes": {"ham": "eggs", "source": "test"},
"metric_f": 4.0,
"service": "service_name",
"tags": ["spam"],
"description": "gauge",
},
{
"attributes": {"bar": "baz", "source": "test"},
"metric_f": 5,
"service": "service_name",
"tags": ["foo"],
"description": "counter",
},
]
)
)
def test_ttl(self):
transport = striemann.metrics.InMemoryTransport()
metrics = striemann.metrics.Metrics(transport)
metrics.incrementCounter("heartbeat", ttl=7)
metrics.flush()
expect(transport.last_batch).to(
equal(
[
{
"service": "heartbeat",
"ttl": 7.0,
"metric_f": 1,
"tags": [],
"attributes": {},
"description": "counter",
}
]
)
)
@mock.patch("timeit.default_timer", side_effect=[0, 1, 0, 3, 0, 5])
def test_timers_internal_storage(self, timer):
transport = striemann.metrics.InMemoryTransport()
metrics = striemann.metrics.Metrics(transport)
with metrics.time("time"):
pass
with metrics.time("time"):
pass
[stored_data] = list(metrics._ranges._metrics_summaries.values())
assert isinstance(stored_data, dict)
assert stored_data["min"] == 1
assert stored_data["max"] == 3
assert stored_data["count"] == 2
assert stored_data["total"] == 4, (
'We call metrics.time("time") twice, once for 1 sec '
"and once for 3 sec so 4 sec in total (see side_effect)"
)
first = stored_data["first"]
assert isinstance(first, striemann.metrics.Metric)
with metrics.time("time"):
pass
[stored_data] = list(metrics._ranges._metrics_summaries.values())
assert isinstance(stored_data, dict)
assert stored_data["min"] == 1
assert stored_data["max"] == 5
assert stored_data["count"] == 3
assert stored_data["total"] == 9
assert stored_data["first"] is first
@mock.patch("timeit.default_timer", side_effect=[0, 1, 0, 3])
def test_timers(self, timer):
transport = striemann.metrics.InMemoryTransport()
metrics = striemann.metrics.Metrics(transport)
with metrics.time("time"):
pass
with metrics.time("time"):
pass
metrics.flush()
expect(transport.last_batch[0]).to(
equal(
{
"service": "time.min",
"metric_f": 1,
"tags": [],
"attributes": {},
"description": "range",
}
)
)
expect(transport.last_batch[1]).to(
equal(
{
"service": "time.max",
"metric_f": 3,
"tags": [],
"attributes": {},
"description": "range",
}
)
)
expect(transport.last_batch[2]).to(
equal(
{
"service": "time.mean",
"metric_f": 2,
"tags": [],
"attributes": {},
"description": "range",
}
)
)
expect(transport.last_batch[3]).to(
equal(
{
"service": "time.count",
"metric_f": 2,
"tags": [],
"attributes": {},
"description": "range",
}
)
)
class TestStdoutTransport:
def test_transport(self, capsys):
transport = striemann.metrics.StdoutTransport(
service="foo", owner="baz", env="local"
)
expect(transport.batch).to(equal([]))
expect(transport.service).to(equal("foo"))
expect(transport.owner).to(equal("baz"))
expect(transport.env).to(equal("local"))
metrics = striemann.metrics.Metrics(transport, source="test")
metrics.incrementCounter("service_name", value=5, tags=["foo"], bar="baz")
metrics.flush()
out, err = capsys.readouterr()
out_json = json.loads(out)
time = out_json["metric"]["time"]
assert json.loads(out) == {
"metric": {
"name": "service_name",
"value": 5,
"data": {
"tags": ["foo"],
"description": "counter",
"bar": "baz",
"source": "test",
},
"env": "local",
"owner": "baz",
"service": "foo",
"time": time,
}
}
def test_transport_with_ttl(self, capsys):
transport = striemann.metrics.StdoutTransport(
service="foo", owner="baz", env="local"
)
expect(transport.batch).to(equal([]))
expect(transport.service).to(equal("foo"))
expect(transport.owner).to(equal("baz"))
expect(transport.env).to(equal("local"))
metrics = striemann.metrics.Metrics(transport)
metrics.incrementCounter("service_name", ttl=600)
metrics.flush()
out, err = capsys.readouterr()
out_json = json.loads(out)
time = out_json["metric"]["time"]
assert json.loads(out) == {
"metric": {
"name": "service_name",
"value": 1,
"data": {"description": "counter", "tags": []},
"env": "local",
"owner": "baz",
"service": "foo",
"ttl": 600,
"time": time,
}
}
def test_transport_no_tags_and_attributes(self, capsys):
transport = striemann.metrics.StdoutTransport(
service="foo", owner="baz", env="local"
)
expect(transport.batch).to(equal([]))
expect(transport.service).to(equal("foo"))
expect(transport.owner).to(equal("baz"))
expect(transport.env).to(equal("local"))
metrics = striemann.metrics.Metrics(transport)
metrics.incrementCounter("service_name")
metrics.flush()
out, err = capsys.readouterr()
out_json = json.loads(out)
time = out_json["metric"]["time"]
assert json.loads(out) == {
"metric": {
"name": "service_name",
"value": 1,
"data": {"description": "counter", "tags": []},
"env": "local",
"owner": "baz",
"service": "foo",
"time": time,
}
}
class FakeRiemannClientTransport:
def __init__(self, log, send=lambda msg: None):
self.log = log
self._send = send
def send(self, msg):
self.log.append("try to send")
self._send(msg)
self.log.append("sent")
def connect(self):
self.log.append("connected")
def disconnect(self):
self.log.append("disconnected")
class ExplodingRiemannClientTransport(FakeRiemannClientTransport):
def __init__(self, log):
super().__init__(log)
self.connected = True
def send(self, msg):
self.log.append("try to send")
if self.connected:
self.log.append("sent")
else:
self.log.append("connection refused")
raise ConnectionRefusedError
def connect(self):
if self.connected:
self.log.append("connected")
else:
self.log.append("connection refused")
raise ConnectionRefusedError
class TestReconnect:
def test_we_reconnect_once_on_failure_regardless(self):
# regardless what state we think the connection is in
t = striemann.metrics.RiemannTransport("dummy host", "dummy port")
failed = False
def fail_once(self_):
nonlocal failed
if not failed:
failed = True
raise Exception("bother")
log = []
t.transport = FakeRiemannClientTransport(log, fail_once)
t.flush(is_closing=False)
assert log == [
"connected",
"try to send",
"disconnected",
"connected",
"try to send",
"sent",
]
def test_connection_refused_reconnection(self):
t = striemann.metrics.RiemannTransport("dummy host", "dummy port")
log = []
t.transport = ExplodingRiemannClientTransport(log)
t.flush(is_closing=False)
t.transport.connected = False
t.flush(is_closing=False)
t.transport.connected = True
t.flush(is_closing=False)
assert log == [
"connected",
"try to send",
"sent",
"try to send",
"connection refused",
"disconnected",
"connection refused",
"connected",
"try to send",
"sent",
]
def test_in_normal_case_we_just_send(self):
t = striemann.metrics.RiemannTransport("dummy host", "dummy port")
def succeed(self_):
pass
log = []
t.transport = FakeRiemannClientTransport(log, succeed)
t.flush(is_closing=False)
assert log == ["connected", "try to send", "sent"]
class TestCompositeTransport:
def test_composite_riemann_and_stdout(self, capsys):
def succeed(self_):
pass
riemann_log = []
riemann_transport = striemann.metrics.RiemannTransport(
"dummy host", "dummy port"
)
riemann_transport.transport = FakeRiemannClientTransport(riemann_log, succeed)
stdout_transport = striemann.metrics.StdoutTransport(
service="foo", owner="baz", env="local"
)
transport = striemann.metrics.CompositeTransport(
riemann_transport, stdout_transport
)
metrics = striemann.metrics.Metrics(transport, source="test")
metrics.incrementCounter("service_name", value=5, tags=["foo"], bar="baz")
metrics.flush()
out, err = capsys.readouterr()
out_json = json.loads(out)
time = out_json["metric"]["time"]
assert riemann_log == ["connected", "try to send", "sent"]
assert json.loads(out) == {
"metric": {
"name": "service_name",
"value": 5,
"data": {
"tags": ["foo"],
"description": "counter",
"bar": "baz",
"source": "test",
},
"env": "local",
"owner": "baz",
"service": "foo",
"time": time,
}
}
| 30.0625 | 86 | 0.490728 | 1,067 | 12,025 | 5.412371 | 0.147142 | 0.063723 | 0.04329 | 0.041558 | 0.695065 | 0.65974 | 0.625628 | 0.594978 | 0.50632 | 0.487273 | 0 | 0.007111 | 0.380208 | 12,025 | 399 | 87 | 30.137845 | 0.767745 | 0.004241 | 0 | 0.589124 | 0 | 0 | 0.138573 | 0 | 0 | 0 | 0 | 0 | 0.060423 | 1 | 0.063444 | false | 0.021148 | 0.015106 | 0 | 0.096677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59f073559061a5de8a0cd772dadf4560b56bd0fb | 5,128 | py | Python | paper/figures/HD189733/viz.py | j-faria/wobble | 041cd58fa8fb87bb5b41ee23bd1ea716bab7051b | [
"MIT"
] | 38 | 2016-06-03T13:15:49.000Z | 2021-12-01T00:02:11.000Z | paper/figures/HD189733/viz.py | j-faria/wobble | 041cd58fa8fb87bb5b41ee23bd1ea716bab7051b | [
"MIT"
] | 63 | 2016-09-17T13:38:16.000Z | 2021-02-05T16:27:10.000Z | paper/figures/HD189733/viz.py | j-faria/wobble | 041cd58fa8fb87bb5b41ee23bd1ea716bab7051b | [
"MIT"
] | 17 | 2017-05-04T03:03:16.000Z | 2022-01-10T17:56:43.000Z | from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from ipywidgets import Layout
import starry
from ylm_rot import get_ylm_coeffs
import matplotlib.pyplot as pl
import numpy as np
vslider = \
widgets.FloatSlider(
value=0.1,
min=0.1,
max=10.0,
step=0.01,
description=r'$v_\mathrm{eq}$ [km / s]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
oslider = \
widgets.FloatSlider(
value=0,
min=-90,
max=90.0,
step=0.1,
description=r'$\lambda$ [deg]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
layout=Layout(width='40%')
)
islider = \
widgets.FloatSlider(
value=90,
min=1,
max=179.0,
step=0.1,
description=r'$i$ [deg]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
layout=Layout(width='40%')
)
aslider = \
widgets.FloatSlider(
value=0,
min=0,
max=1.0,
step=0.01,
description=r'$\alpha$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
u1slider = \
widgets.FloatSlider(
value=0,
min=0.0,
max=2.0,
step=0.01,
description=r'$u_1$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
u2slider = \
widgets.FloatSlider(
value=0.0,
min=-1.0,
max=1.0,
step=0.01,
description=r'$u_2$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
yslider = \
widgets.FloatSlider(
value=0,
min=-1.0,
max=1.0,
step=0.01,
description=r'$b$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f',
layout=Layout(width='40%')
)
rslider = \
widgets.FloatSlider(
value=0.1,
min=0.01,
max=0.5,
step=0.001,
description=r'$r / R_\star$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout=Layout(width='40%')
)
# Load RV data for HD 189733 from Bedell, corrected for the baseline
xo_189, rv_189 = np.loadtxt("HD189733_sample.txt", unpack=True)
# Create the global starry maps
map_Iv_plus_I = starry.Map(5)
map_I = starry.Map(2)
def visualize_func(veq=1, inc=90, obl=0, alpha=0, u1=0, u2=0, yo=0, ro=0.1):
"""Interactive visualization of the RM effect."""
# Map resolution for plotting
res = 300
# Set the map coefficients
map_Iv_plus_I[:3, :] = get_ylm_coeffs(inc=inc, obl=obl, alpha=alpha, veq=veq * 1.e3)
map_Iv_plus_I[0, 0] = 1
map_Iv_plus_I[1] = u1
map_Iv_plus_I[2] = u2
map_I[0, 0] = 1
map_I[1] = u1
map_I[2] = u2
# Check if LD is physical
if (u1 + u2) > 1 or (u1 + 2 * u2) < 0 or u1 < 0:
u1slider.style.handle_color = "#FF0000"
u2slider.style.handle_color = "#FF0000"
else:
u1slider.style.handle_color = "#FFFFFF"
u2slider.style.handle_color = "#FFFFFF"
# Plot the brightness-weighted velocity field
x, y = np.meshgrid(np.linspace(-1, 1, res), np.linspace(-1, 1, res))
img = np.array([map_Iv_plus_I(x=x[j], y=y[j]) -
map_I(x=x[j], y=y[j]) for j in range(res)]) * (np.pi / 1.e3)
fig = pl.figure(figsize=(15, 8))
axim = pl.axes((0, 0.05, 0.3, 0.8))
axcb = pl.axes((0, 0.85, 0.3, 0.03))
axrm = pl.axes((0.4, 0.20, 0.6, 0.5))
im = axim.imshow(img, cmap='RdBu_r', origin='lower',
vmin=-veq, vmax=veq, extent=(-1,1,-1,1))
cb = pl.colorbar(im, orientation='horizontal', cax=axcb)
cb.ax.set_xlabel("Radial velocity [km / s]")
axim.contour(img, origin='lower', levels=np.linspace(-veq, veq, 20),
colors=['k' for i in range(20)], alpha=0.25,
extent=(-1,1,-1,1))
axim.axis('off')
axim.set_aspect(1)
axim.axhline(yo, color='k', alpha=0.5)
axim.axhline(yo + 0.5 * ro, color='k', ls='--', alpha=0.5)
axim.axhline(yo - 0.5 * ro, color='k', ls='--', alpha=0.5)
# Compute the RM effect amplitude
xo = np.linspace(-1 - 2 * ro, 1 + 2 * ro, 1000)
Iv_plus_I = map_Iv_plus_I.flux(xo=xo, yo=yo, ro=ro)
I = map_I.flux(xo=xo, yo=yo, ro=ro)
RM = (Iv_plus_I - I) / I
# Plot it
axrm.plot(xo, RM)
axrm.set_xlabel(r"Occultor x position [$R_\star$]", fontsize=16)
axrm.set_ylabel("Radial velocity [m /s]", fontsize=16)
axrm.set_title("The Rossiter-McLaughlin effect", fontsize=20)
axrm.plot(xo_189, rv_189, '.')
def visualize():
return interact(visualize_func, veq=vslider, inc=islider,
obl=oslider, alpha=aslider, u1=u1slider,
u2=u2slider, yo=yslider, ro=rslider) | 25.89899 | 88 | 0.608034 | 766 | 5,128 | 3.972585 | 0.25718 | 0.06211 | 0.020703 | 0.076241 | 0.425896 | 0.388432 | 0.351298 | 0.320407 | 0.309234 | 0.301019 | 0 | 0.064712 | 0.228549 | 5,128 | 198 | 89 | 25.89899 | 0.704499 | 0.058892 | 0 | 0.431953 | 0 | 0 | 0.085168 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011834 | false | 0 | 0.04142 | 0.005917 | 0.059172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59f0dcccc30e5c47e03c3762b37daf71a44fb738 | 14,101 | py | Python | tests/test_simple.py | FFY00/mousebender | df0b7f7408c952ea8d124ab2c8c9b8c24ea77d06 | [
"BSD-3-Clause"
] | 21 | 2020-04-10T03:53:44.000Z | 2022-01-12T08:31:56.000Z | tests/test_simple.py | FFY00/mousebender | df0b7f7408c952ea8d124ab2c8c9b8c24ea77d06 | [
"BSD-3-Clause"
] | 34 | 2020-04-10T19:56:29.000Z | 2022-01-27T22:10:22.000Z | tests/test_simple.py | sthagen/mousebender | 849844440f024adb9efe19c4774bae1ed45335f6 | [
"BSD-3-Clause"
] | 8 | 2020-04-10T03:09:00.000Z | 2021-11-04T11:17:38.000Z | """Tests for mousebender.simple."""
import warnings
import importlib_resources
import packaging.version
import pytest
from mousebender import simple
from .data import simple as simple_data
class TestProjectURLConstruction:
"""Tests for mousebender.simple.create_project_url()."""
@pytest.mark.parametrize("base_url", ["/simple/", "/simple"])
def test_url_joining(self, base_url):
url = simple.create_project_url(base_url, "hello")
assert url == "/simple/hello/"
def test_project_name_lowercased(self):
url = simple.create_project_url("/", "THEPROJECTNAME")
assert url == "/theprojectname/"
def test_project_name_normalized(self):
normal_url = simple.create_project_url("/", "the_project.name.-_.-_here")
assert normal_url == "/the-project-name-here/"
def test_only_project_name_in_url_normalized(self):
url = simple.create_project_url(
"https://terribly_awesome.com/So/Simple/", "THE_project.name.-_.-_here"
)
assert url == "https://terribly_awesome.com/So/Simple/the-project-name-here/"
def test_no_base_url(self):
url = simple.create_project_url("", "django-node")
assert url == "django-node/"
class TestRepoIndexParsing:
"""Tests for mousebender.simple.parse_repo_index()."""
@pytest.mark.parametrize(
"name,count,expected_item",
[
("pypi", 212_862, ("numpy", "/simple/numpy/")),
("piwheels", 263_872, ("django-node", "django-node/")),
],
)
def test_full_parse(self, name, count, expected_item):
index_html = importlib_resources.read_text(simple_data, f"index.{name}.html")
index = simple.parse_repo_index(index_html)
assert len(index) == count
key, value = expected_item
assert key in index
assert index[key] == value
def test_no_cdata(self):
index_html = (
"<html><head></head><body><a href='https://no.url/here'></a></body></html>"
)
index = simple.parse_repo_index(index_html)
assert not index
def test_no_href(self):
index_html = "<html><head></head><body><a>my-cdata-package</a></body></html>"
index = simple.parse_repo_index(index_html)
assert not index
def test_project_url_normalization_complete(self):
index_html = """
<html>
<body>
<a href="/project/PACKAGE-NAME">package-name</a>
</body>
</html>
"""
index = simple.parse_repo_index(index_html)
assert index["package-name"] == "/project/package-name/"
def test_project_name_not_normalized(self):
index_html = """
<html>
<body>
<a href="/project/package-name">PACKAGE-NAME</a>
</body>
</html>
"""
index = simple.parse_repo_index(index_html)
assert index["PACKAGE-NAME"] == "/project/package-name/"
def test_relative_url(self):
index_html = """
<html>
<body>
<a href="django-node">django-node</a>
</body>
</html>
"""
index = simple.parse_repo_index(index_html)
assert index["django-node"] == "django-node/"
class TestParseArchiveLinks:
"""Tests for mousebender.simple.parse_archive_links()."""
@pytest.mark.parametrize(
"module_name,count,expected_archive_link",
[
(
"numpy",
1402,
simple.ArchiveLink(
"numpy-1.13.0rc1-cp36-none-win_amd64.whl",
"https://files.pythonhosted.org/packages/5c/2e/5c0eee0635035a7e0646734e2b9388e17a97f6f2087e15141a218b6f2b6d/numpy-1.13.0rc1-cp36-none-win_amd64.whl",
packaging.specifiers.SpecifierSet(
">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*"
),
(
"sha256",
"8e8e1ccf025c8b6a821f75086a364a68d9e1877519a35bf8facec9e5120836f4",
),
None,
),
),
(
"pulpcore-client",
370,
simple.ArchiveLink(
"pulpcore_client-3.1.0.dev1578940535-py3-none-any.whl",
"https://files.pythonhosted.org/packages/ca/7e/e14e41dc4bc60208f597f346d57755636e882be7509179c4e7c11f2c60a9/pulpcore_client-3.1.0.dev1578940535-py3-none-any.whl",
packaging.specifiers.SpecifierSet(),
(
"sha256",
"83a3759d7b6af33083b0d4893d53615fc045cbad9adde68a8df02e25b1862bc6",
),
None,
),
),
(
"pytorch",
522,
simple.ArchiveLink(
"torchvision-0.5.0+cu100-cp36-cp36m-linux_x86_64.whl",
"cu100/torchvision-0.5.0%2Bcu100-cp36-cp36m-linux_x86_64.whl",
packaging.specifiers.SpecifierSet(),
None,
None,
),
),
(
"AICoE-tensorflow",
15,
simple.ArchiveLink(
"tensorflow-2.0.0-cp37-cp37m-linux_x86_64.whl",
"tensorflow-2.0.0-cp37-cp37m-linux_x86_64.whl",
packaging.specifiers.SpecifierSet(),
None,
None,
),
),
(
"numpy-piwheels",
316,
simple.ArchiveLink(
"numpy-1.10.4-cp35-cp35m-linux_armv7l.whl",
"numpy-1.10.4-cp35-cp35m-linux_armv7l.whl",
packaging.specifiers.SpecifierSet(),
(
"sha256",
"5768279588a4766adb0211bbaa0f5857be38483c5aafe5d1caecbcd32749966e",
),
None,
),
),
],
)
def test_full_parse(self, module_name, count, expected_archive_link):
html = importlib_resources.read_text(
simple_data, f"archive_links.{module_name}.html"
)
archive_links = simple.parse_archive_links(html)
assert len(archive_links) == count
assert expected_archive_link in archive_links
@pytest.mark.parametrize(
"html,expected_filename",
[
(
'<a href="https://files.pythonhosted.org/packages/92/e2/7d9c6894511337b012735c0c149a7b4e49db0b934798b3ae05a3b46f31f0/numpy-1.12.1-cp35-none-win_amd64.whl#sha256=818d5a1d5752d09929ce1ba1735366d5acc769a1839386dc91f3ac30cf9faf19">numpy-1.12.1-cp35-none-win_amd64.whl</a><br/>',
"numpy-1.12.1-cp35-none-win_amd64.whl",
),
(
'<a href="cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl">cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl</a><br>',
"torch-1.2.0+cpu-cp35-cp35m-win_amd64.whl",
),
],
)
def test_filename(self, html, expected_filename):
archive_links = simple.parse_archive_links(html)
assert len(archive_links) == 1
assert archive_links[0].filename == expected_filename
@pytest.mark.parametrize(
"html,expected_url",
[
(
'<a href="https://files.pythonhosted.org/packages/92/e2/7d9c6894511337b012735c0c149a7b4e49db0b934798b3ae05a3b46f31f0/numpy-1.12.1-cp35-none-win_amd64.whl#sha256=818d5a1d5752d09929ce1ba1735366d5acc769a1839386dc91f3ac30cf9faf19">numpy-1.12.1-cp35-none-win_amd64.whl</a><br/>',
"https://files.pythonhosted.org/packages/92/e2/7d9c6894511337b012735c0c149a7b4e49db0b934798b3ae05a3b46f31f0/numpy-1.12.1-cp35-none-win_amd64.whl",
),
(
'<a href="cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl">cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl</a><br>',
"cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl",
),
],
)
def test_url(self, html, expected_url):
archive_links = simple.parse_archive_links(html)
assert len(archive_links) == 1
assert archive_links[0].url == expected_url
@pytest.mark.parametrize(
"html,supported,unsupported",
[
(
'<a href="cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl">cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl</a><br>',
"3.8",
None,
),
(
'<a href="https://files.pythonhosted.org/packages/4e/d9/d7ec4b9508e6a89f80de3e18fe3629c3c089355bec453b55e271c53dd23f/numpy-1.13.0-cp34-none-win32.whl#sha256=560ca5248c2a8fd96ac75a05811eca0ce08dfeea2ee128c87c9c7261af366288" data-requires-python=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*">numpy-1.13.0-cp34-none-win32.whl</a><br/>',
"2.7",
"3.3",
),
],
)
def test_requires_python(self, html, supported, unsupported):
archive_links = simple.parse_archive_links(html)
assert len(archive_links) == 1
assert packaging.version.Version(supported) in archive_links[0].requires_python
if unsupported:
assert (
packaging.version.Version(unsupported)
not in archive_links[0].requires_python
)
@pytest.mark.parametrize(
"html,expected_hash",
[
(
'<a href="https://files.pythonhosted.org/packages/92/e2/7d9c6894511337b012735c0c149a7b4e49db0b934798b3ae05a3b46f31f0/numpy-1.12.1-cp35-none-win_amd64.whl#sha256=818d5a1d5752d09929ce1ba1735366d5acc769a1839386dc91f3ac30cf9faf19">numpy-1.12.1-cp35-none-win_amd64.whl</a><br/>',
(
"sha256",
"818d5a1d5752d09929ce1ba1735366d5acc769a1839386dc91f3ac30cf9faf19",
),
),
(
'<a href="cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl">cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl</a><br>',
None,
),
],
)
def test_hash_(self, html, expected_hash):
archive_links = simple.parse_archive_links(html)
assert len(archive_links) == 1
assert archive_links[0].hash_ == expected_hash
@pytest.mark.parametrize(
"html,expected_gpg_sig",
[
(
'<a href="cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl">cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl</a><br>',
None,
),
(
'<a href="cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl" data-gpg-sig="true">cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl</a><br>',
True,
),
(
'<a href="cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl" data-gpg-sig="false">cpu/torch-1.2.0%2Bcpu-cp35-cp35m-win_amd64.whl</a><br>',
False,
),
],
)
def test_gpg_sig(self, html, expected_gpg_sig):
archive_links = simple.parse_archive_links(html)
assert len(archive_links) == 1
assert archive_links[0].gpg_sig == expected_gpg_sig
@pytest.mark.parametrize(
"html,expected",
[
(
'<a href="spam-1.2.3-py3.none.any.whl" data-yanked>spam-1.2.3-py3.none.any.whl</a>',
(True, ""),
),
(
'<a href="spam-1.2.3-py3.none.any.whl" data-yanked="oops!">spam-1.2.3-py3.none.any.whl</a>',
(True, "oops!"),
),
(
'<a href="spam-1.2.3-py3.none.any.whl" data-yanked="">spam-1.2.3-py3.none.any.whl</a>',
(True, ""),
),
(
'<a href="spam-1.2.3-py3.none.any.whl">spam-1.2.3-py3.none.any.whl</a>',
(False, ""),
),
],
)
def test_yanked(self, html, expected):
archive_links = simple.parse_archive_links(html)
assert len(archive_links) == 1
assert archive_links[0].yanked == expected
@pytest.mark.parametrize(
"parser", [simple.parse_repo_index, simple.parse_archive_links]
)
class TestPEP629Versioning:
def _example(self, major, minor):
return f"""
<!DOCTYPE html>
<html>
<head>
<meta name="pypi:repository-version" content="{major}.{minor}">
</head>
<body>
<!-- This space intentionally left blank. -->
</body>
</html>
"""
def test_unspecified(self, parser):
html = """
<!DOCTYPE html>
<html>
<body>
<!-- This space intentionally left blank. -->
</body>
</html>
"""
# No errors.
parser(html)
def test_equal(self, parser):
html = self._example(*simple._SUPPORTED_VERSION)
# No errors.
parser(html)
def test_newer_minor(self, parser):
html = self._example(
simple._SUPPORTED_VERSION[0], simple._SUPPORTED_VERSION[1] + 1
)
with warnings.catch_warnings(record=True) as raised_warnings:
parser(html)
assert raised_warnings and len(raised_warnings) == 1
assert raised_warnings[0].category is simple.UnsupportedVersionWarning
def test_newer_major(self, parser):
html = self._example(simple._SUPPORTED_VERSION[0] + 1, 0)
with pytest.raises(simple.UnsupportedVersion):
parser(html)
def test_older_minor(self, parser, monkeypatch):
monkeypatch.setattr(simple, "_SUPPORTED_VERSION", (1, 1))
html = self._example(1, 0)
# No error.
parser(html)
# No test for older major versions as that case is currently impossible with
# 1.0 as the only possible version.
| 37.403183 | 346 | 0.556344 | 1,499 | 14,101 | 5.070047 | 0.140093 | 0.050526 | 0.037632 | 0.016842 | 0.613026 | 0.545263 | 0.494079 | 0.481184 | 0.438421 | 0.388684 | 0 | 0.115761 | 0.312035 | 14,101 | 376 | 347 | 37.50266 | 0.667663 | 0.022906 | 0 | 0.432927 | 0 | 0.064024 | 0.388497 | 0.184978 | 0 | 0 | 0 | 0 | 0.091463 | 1 | 0.073171 | false | 0 | 0.02439 | 0.003049 | 0.112805 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59f5273b29007b20b07871db2635217e7e4a6330 | 1,743 | py | Python | danceschool/register/forms.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 32 | 2017-09-12T04:25:25.000Z | 2022-03-21T10:48:07.000Z | danceschool/register/forms.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 97 | 2017-09-01T02:43:08.000Z | 2022-01-03T18:20:34.000Z | danceschool/register/forms.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 19 | 2017-09-26T13:34:46.000Z | 2022-03-21T10:48:10.000Z | from django import forms
from django.utils.translation import gettext_lazy as _
from django.db.models import F
from dal import autocomplete
from danceschool.core.models import Customer
class CustomerGuestAutocompleteForm(forms.Form):
'''
This form can be used to search for customers and names on the guest
list for a night or event.
'''
def __init__(self, *args, **kwargs):
date = kwargs.pop('date', None)
super().__init__(*args, **kwargs)
self.fields['date'] = forms.DateField(
initial=date,
widget=forms.HiddenInput
)
# Note that the autocomplete works on the unioned queryset of
# customers, staff, and guests even though the queryset specified here
# is customers only. We only specify customers here to avoid issues
# with filtering unioned querysets.
self.fields['name'] = forms.ModelChoiceField(
queryset=Customer.objects.annotate(
firstName=F('first_name'), lastName=F('last_name')
).values('firstName', 'lastName'),
widget=autocomplete.ModelSelect2(
url='registerAutocomplete',
forward=['date'],
attrs={
# This will set the input placeholder attribute:
'data-placeholder': _('Enter a name'),
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-minimum-input-length': 2,
'data-max-results': 10,
'class': 'modern-style',
'data-html': True,
}
)
)
| 35.571429 | 79 | 0.583477 | 181 | 1,743 | 5.546961 | 0.618785 | 0.02988 | 0.021912 | 0.027888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003416 | 0.32817 | 1,743 | 48 | 80 | 36.3125 | 0.853971 | 0.277108 | 0 | 0 | 0 | 0 | 0.135332 | 0.020259 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59f6a0718d51a789e27b2f717d206af701b1445f | 3,093 | py | Python | SIR_functions.py | TheNewExecutor/CoronaVirus | 779cd9f8338ce3b9f2a0383a5a526e694c65e3ee | [
"MIT"
] | null | null | null | SIR_functions.py | TheNewExecutor/CoronaVirus | 779cd9f8338ce3b9f2a0383a5a526e694c65e3ee | [
"MIT"
] | null | null | null | SIR_functions.py | TheNewExecutor/CoronaVirus | 779cd9f8338ce3b9f2a0383a5a526e694c65e3ee | [
"MIT"
] | null | null | null | from scipy.integrate import solve_ivp
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from hyperopt import hp, fmin, tpe
from typing import Tuple
def SIR(t, y, N, kappa, tau, nu):
"""
Expresses SIR model in initial value ODE format, including time,
state variables, and parameters required to compute the derivatives
Parameters
---------------
t : array
Independent variable
y : array-like
Initial state of system, [S, I, R]
parameters : array-like
Parameters of model [N, kappa, tau, nu]
Returns
----------------
f : array-like
Array of derivatives for the system to be numerically integrated
"""
S, I, R = y
dSdt = - kappa * tau / N * S * I
dIdt = (kappa * tau / N * S - nu) * I
dRdt = nu * I
f = [dSdt, dIdt, dRdt]
return f
def create_SIR_curves(N: int, kappa: float, tau: float,
nu: float, start_date: object,
total_points: int) -> object:
"""
Use parameters to solve system of ODEs as an initial value problem
Parameters
----------
N : int
Size of population
kappa : float
Contacts per unit time individual has with rest of population
tau : float [0, 1]
Probability that contact leads to transmission
nu : float [0, 1]
Probability per unit time that infected host be removed from population
total_points : int
Total points from start for simulated solutions
Returns
-------
df : object
"""
S, I, R = N - 1, 1, 0
sys0 = (S, I, R)
start = 0
t = np.linspace(start, stop, total_points)
sol = solve_ivp(fun=SIR, t_span=(start, stop), y0=sys0, args=parameters, t_eval=t)
df = pd.DataFrame(sol.y.T, columns=['S', 'I', 'R'])
df['Date'] = pd.date_range(start_date, freq='D', periods=124)
df['Predicted Confirmed'] = df['I'] + df['R']
return df
def plot_SIR(parameters, df):
N, kappa, tau, nu = parameters
Re = kappa * tau / (nu)
#df.plot(x='Time', y=df.columns.drop('Time'), figsize=(15, 10))
plt.title('SIR')
return fig
def model_loss(parameters):
# initial state variables
df = create_SIR_curves(parameters)
loss = mean_squared_error(df['Cases'].values, df[df.columns[-1]].values)
return loss
def prepare_data(data: object, location: str) -> Tuple(object, int):
"""
Extract start date and length from dataframe
Parameters
----------
data : object
Aggregated dataframe object with Confirmed, Death, Recovered etc. of
location : str
Name of location to filter
Returns
-------
start_date : object
Start datetime of first nonzero data point of infected
num_days : int
Number of days from start date to current reported data
"""
start_date = data[data['Confirmed']!=0].head(1)['Date']
num_days = len(data)
data.fillna(0)
| 28.118182 | 87 | 0.590365 | 412 | 3,093 | 4.371359 | 0.383495 | 0.026652 | 0.008329 | 0.018323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01012 | 0.297123 | 3,093 | 109 | 88 | 28.376147 | 0.818307 | 0.436793 | 0 | 0 | 0 | 0 | 0.035162 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.157895 | 0 | 0.394737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59f93eebbd7e541b8c3908f89c9876035051096e | 854 | py | Python | src/lcm_zmq_integration/xserver.py | bthcode/cmake_scipy_ctypes_example | 64a7afdd825f0bb6a50cb174f4ced231b3017c8d | [
"BSD-3-Clause"
] | null | null | null | src/lcm_zmq_integration/xserver.py | bthcode/cmake_scipy_ctypes_example | 64a7afdd825f0bb6a50cb174f4ced231b3017c8d | [
"BSD-3-Clause"
] | null | null | null | src/lcm_zmq_integration/xserver.py | bthcode/cmake_scipy_ctypes_example | 64a7afdd825f0bb6a50cb174f4ced231b3017c8d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Example Script to show LCM serialized data transported
on by very simple ZeroMQ pub/sub message server/client
"""
import zmq
from example_lcm import image_t
import time
#
# Set up a simple ZMQ publisher
# - socket type is zmq.PUB, meaning fire and forget publisher
# - socket connection is bind ( meaning server )
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5556")
counter = 0
while True:
# populate a sample LCM data structure
I = image_t()
I.data = [ counter,1,1,2,2,2,3,3,3,4,4,4]
I.height = 4
I.width = 3
I.size = len(I.data )
# give the message a type
if counter % 2 == 0:
buf = 'KEY' + I.encode()
else:
buf = 'XXX' + I.encode()
counter += 1
# send
socket.send(buf)
time.sleep(0.5)
# end while True
| 20.333333 | 63 | 0.625293 | 135 | 854 | 3.933333 | 0.533333 | 0.022599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036107 | 0.254098 | 854 | 41 | 64 | 20.829268 | 0.797488 | 0.41452 | 0 | 0 | 0 | 0 | 0.03719 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59f95b02645ef93d80f48dd3f5a6fd77148ddf83 | 6,799 | py | Python | python/misc/dataFrameToDatabase/dataFrameToDatabase.py | jlucas-esri/Geospatial-Center-Code | a8a1c7028d254690af788cbdd9cbdf859a422413 | [
"Apache-2.0"
] | 14 | 2020-09-22T22:11:35.000Z | 2022-02-05T07:50:06.000Z | python/misc/dataFrameToDatabase/dataFrameToDatabase.py | jlucas-esri/Geospatial-Center-Code | a8a1c7028d254690af788cbdd9cbdf859a422413 | [
"Apache-2.0"
] | 2 | 2020-09-23T15:14:40.000Z | 2021-08-24T15:04:11.000Z | python/misc/dataFrameToDatabase/dataFrameToDatabase.py | apfister/Geospatial-Center | a8a1c7028d254690af788cbdd9cbdf859a422413 | [
"Apache-2.0"
] | 6 | 2020-11-20T17:22:30.000Z | 2021-11-12T13:22:20.000Z | import logging
import time
import pandas as pd
from pandas.errors import EmptyDataError
import sqlalchemy
from typing import Union, List
class DataFrameToDatabase:
def __init__(self, df:Union[pd.DataFrame, pd.io.parsers.TextFileReader],
dbTableName:str,
driver:str,
username:str=None,
password:str=None,
address:str=None,
dbName:str=None,
port:Union[int, str]=None,
query:dict={},
dbEcho:bool=True,
if_exists:str='fail',
index:bool=True,
index_label:str=None,
chunksize:int=None,
dtype:dict=None,
):
#private
self._logger = logging.getLogger('DataFrameToDatabase')
self._logger.setLevel(logging.INFO)
#default value updated in self._validateDataFrame
self._isIterable = False
#pd.DataFrame.to_sql variables
self._index = index
self._index_label = index_label
self._chunksize = chunksize
self._dtype = dtype
self._dbTableName = dbTableName
if if_exists not in ['fail', 'append', 'replace']:
raise ValueError('if_exists must be set to "fails", "replace", or "append"')
elif if_exists == 'replace':
self._logger.warning(f'Table "{dbTableName}" will be overwritten.')
self._if_exists = if_exists
#validating and categorizing it as iterable or not
self._logger.info('Validating DataFrame...')
if self._validateDataFrame(df):
self._df = df
self._logger.info('Valid DataFrame')
#validating db params
self._logger.info('Validating database parameters...')
if self._validateDbParameters(driver, username, password, address, port, dbName, query):
#sqlalchemy.create_engine parameters
self._dbEcho = dbEcho
self._driver = driver
self._username = username
self._password = password
self._address = address
self._port = port
self._dbName = dbName
self._query = query
self._logger.info('Valid database parameters')
# self._logger.info('Inserting data...')
# self.insertData()
def _validateDataFrame(self, df):
"""
Validates that the df isn't empty and categorizes it as iterable (TextFileReader) or not iterable (DataFrame)
"""
#if the df is a standard DataFrame
if type(df) == pd.DataFrame:
self._logger.info('Using regular dataframe')
if df.empty:
self._logger.error('Empty dataframe')
raise EmptyDataError('DataFrame is empty')
self.colsAndTypes = {name: df.dtypes[name] for name in list(df.columns)}
self._isIterable = False
#if the df is a large file read in through chunks
elif type(df) == pd.io.parsers.TextFileReader:
self._logger.info('Using large dataframe')
for chunk in df:
self.colsAndTypes = {name: chunk.dtypes[name] for name in list(chunk.columns)}
if chunk.empty:
self._logger.error('Empty dataframe')
raise EmptyDataError('DataFrame is empty')
break
self._isIterable = True
else:
raise TypeError(f'Invalid df type. Type "{type(df)}" is not a DataFrame or TextFileReader')
return True
def _validateDbParameters(self, driver, username, password, address, port, dbName, query):
"""
Validates database parameters by passing it into create_engine. If it succeeds, the parameters are valid
"""
try:
# if driver:
# driver = '+' + driver
# if port:
# port = ':' + str(port)
# if password:
# password = ':' + password
# if address:
# address = '@' + address
dbUrl = sqlalchemy.engine.URL.create(drivername=driver,
username=username,
password=password,
host=address,
port=port,
database=dbName,
query=query)
self._engine = sqlalchemy.create_engine(dbUrl, echo=self._dbEcho)
except Exception as e:
self._logger.exception(e)
raise e
else:
return True
def insertData(self):
"""
Inserts data into the database depending on the type of DataFrame given
"""
if self._isIterable:
#boolean tracking if function DataFrame.to_sql has been run for any chunk
updated = False
for chunk in self._df:
start = time.time()
if not updated:
chunk.to_sql(name=self._dbTableName,
con=self._engine,
if_exists=self._if_exists,
index=self._index,
index_label=self._index_label,
chunksize=self._chunksize,
dtype=self._dtype)
updated = True
elif updated:
chunk.to_sql(name=self._dbTableName,
con=self._engine,
if_exists='append',
index=self._index,
index_label=self._index_label,
chunksize=self._chunksize,
dtype=self._dtype)
end = time.time()
self._logger.info(f'Chunk inserted in {end-start:.3f} seconds')
elif not self._isIterable:
start = time.time()
self._df.to_sql(name=self._dbTableName,
con=self._engine,
if_exists=self._if_exists,
index=self._index,
index_label=self._index_label,
chunksize=self._chunksize,
dtype=self._dtype)
end = time.time()
self._logger.info(f'DataFrame inserted in {end-start:.3f} seconds')
def main(self):
self._logger.info('Inserting data...')
self.insertData()
| 36.358289 | 117 | 0.508016 | 644 | 6,799 | 5.212733 | 0.240683 | 0.047662 | 0.041704 | 0.011618 | 0.263628 | 0.257671 | 0.227882 | 0.177242 | 0.177242 | 0.177242 | 0 | 0.000499 | 0.410943 | 6,799 | 186 | 118 | 36.553763 | 0.837703 | 0.123695 | 0 | 0.266129 | 0 | 0 | 0.090506 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040323 | false | 0.040323 | 0.048387 | 0 | 0.112903 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59fa8411dfa74bbeb8be27d4bcf88306bb7fbf17 | 5,508 | py | Python | pdfstream/servers/xpd_server.py | st3107/pdfstream | 6e1829d889e5f5400386513efe993ad0596da8a5 | [
"BSD-3-Clause"
] | null | null | null | pdfstream/servers/xpd_server.py | st3107/pdfstream | 6e1829d889e5f5400386513efe993ad0596da8a5 | [
"BSD-3-Clause"
] | 34 | 2020-07-08T16:24:52.000Z | 2020-11-21T17:55:13.000Z | pdfstream/servers/xpd_server.py | st3107/pdfstream | 6e1829d889e5f5400386513efe993ad0596da8a5 | [
"BSD-3-Clause"
] | 1 | 2020-10-05T14:51:32.000Z | 2020-10-05T14:51:32.000Z | """The analysis server. Process raw image to PDF."""
import typing as tp
import databroker
from bluesky.callbacks.zmq import Publisher
from databroker.v2 import Broker
from event_model import RunRouter
from ophyd.sim import NumpySeqHandler
from pdfstream.callbacks.analysis import AnalysisConfig, VisConfig, ExportConfig, AnalysisStream, Exporter, \
Visualizer, no_need_to_refresh_db
from pdfstream.callbacks.calibration import CalibrationConfig, Calibration
from pdfstream.servers import CONFIG_DIR, ServerNames
from pdfstream.servers.base import ServerConfig, find_cfg_file, BaseServer, StartStopCallback
class XPDConfig(AnalysisConfig, VisConfig, ExportConfig, CalibrationConfig):
"""The configuration for the xpd data reduction. It consists of analysis, visualization and exportation."""
def __init__(self, *args, **kwargs):
super(XPDConfig, self).__init__(*args, **kwargs)
self._an_db = None
@property
def an_db(self) -> tp.Union[None, Broker]:
name = self.get("DATABASE", "an_db", fallback=None)
if no_need_to_refresh_db(self._an_db, name):
pass
elif name is None:
self._an_db = None
elif name == "temp":
print("Warning: a temporary db is created for an db. It will be destroy at the end of the session.")
self._an_db = databroker.v2.temp()
else:
self._an_db = databroker.catalog[name]
return self._an_db
@an_db.setter
def an_db(self, db: Broker):
section_name = "DATABASE"
db_key = "an_db"
if section_name not in self.sections():
self.add_section(section_name)
self.set(section_name, db_key, db.name)
self._an_db = db
@property
def publisher_config(self) -> dict:
host = self.get("PUBLISH TO", "host")
port = self.getint("PUBLISH TO", "port")
prefix = self.get("PUBLISH TO", "prefix", fallback="").encode()
return {
"address": (host, port),
"prefix": prefix
}
@property
def functionality(self) -> dict:
return {
"do_calibration": self.getboolean("FUNCTIONALITY", "do_calibration"),
"dump_to_db": self.getboolean("FUNCTIONALITY", "dump_to_db"),
"export_files": self.getboolean("FUNCTIONALITY", "export_files"),
"visualize_data": self.getboolean("FUNCTIONALITY", "visualize_data"),
"send_messages": self.getboolean("FUNCTIONALITY", "send_messages"),
}
class XPDServerConfig(XPDConfig, ServerConfig):
"""The configuration for xpd server."""
pass
class XPDServer(BaseServer):
"""The server of XPD data analysis. It is a live dispatcher with XPDRouter subscribed."""
def __init__(self, config: XPDServerConfig):
super(XPDServer, self).__init__(config.address, prefix=config.prefix)
self.subscribe(StartStopCallback())
self.subscribe(XPDRouter(config))
def make_and_run(
cfg_file: str = None,
*,
suppress_warning: bool = True
):
"""Run the xpd data reduction server.
The server will receive message from proxy and process the data in the message. The processed data will be
visualized and exported to database and the file system.
Parameters
----------
cfg_file :
The path to configuration .ini file. The default path is "~/.config/acq/xpd_server.ini".
suppress_warning :
If True, all warning will be suppressed. Turn it to False when running in a test.
"""
if suppress_warning:
import warnings
warnings.simplefilter("ignore")
if not cfg_file:
cfg_file = find_cfg_file(CONFIG_DIR, ServerNames.xpd)
config = XPDServerConfig(allow_no_value=True)
config.read(cfg_file)
server = XPDServer(config)
server.install_qt_kicker()
server.start()
class XPDRouter(RunRouter):
"""A router that contains the callbacks for the xpd data reduction."""
def __init__(self, config: XPDConfig):
factory = XPDFactory(config)
super(XPDRouter, self).__init__(
[factory],
handler_registry={"NPY_SEQ": NumpySeqHandler}
)
class XPDFactory:
"""The factory to generate callback for xpd data reduction."""
def __init__(self, config: XPDConfig):
self.config = config
self.analysis = AnalysisStream(config)
self.func = self.config.functionality
if self.func["do_calibration"]:
self.calibration = Calibration(config)
if self.func["dump_to_db"]:
self.analysis.subscribe(self.config.an_db.v1.insert)
if self.func["export_files"]:
self.analysis.subscribe(Exporter(config))
if self.func["visualize_data"]:
self.analysis.subscribe(Visualizer(config))
if self.func["send_messages"]:
self.analysis.subscribe(Publisher(**self.config.publisher_config))
def __call__(self, name: str, doc: dict) -> tp.Tuple[list, list]:
if name == "start":
if doc.get(self.config.dark_identifier):
# dark frame run
return [], []
elif doc.get(self.config.calib_identifier):
# calibration run
if self.func.do_calibration:
return [self.calibration], []
else:
return [], []
else:
# light frame run
return [self.analysis], []
return [], []
| 35.535484 | 112 | 0.642157 | 641 | 5,508 | 5.340094 | 0.291732 | 0.01636 | 0.01636 | 0.016652 | 0.056091 | 0.02454 | 0.02454 | 0.02454 | 0 | 0 | 0 | 0.00073 | 0.253813 | 5,508 | 154 | 113 | 35.766234 | 0.832117 | 0.157771 | 0 | 0.154545 | 0 | 0.009091 | 0.098598 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.018182 | 0.1 | 0.009091 | 0.309091 | 0.009091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
59fadeeb36901e3065564a09daa8450d35a9d7e8 | 2,965 | py | Python | src/tests/snn/test_snn.py | ibm-research-tokyo/diffsnn | 9299fc5e8542c6fde33a287f81e7ae3682b2fd9d | [
"Apache-2.0"
] | 20 | 2021-06-01T02:42:43.000Z | 2022-02-14T07:08:34.000Z | src/tests/snn/test_snn.py | ibm-research-tokyo/diffsnn | 9299fc5e8542c6fde33a287f81e7ae3682b2fd9d | [
"Apache-2.0"
] | null | null | null | src/tests/snn/test_snn.py | ibm-research-tokyo/diffsnn | 9299fc5e8542c6fde33a287f81e7ae3682b2fd9d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Title '''
__author__ = 'Hiroshi Kajino <KAJINO@jp.ibm.com>'
__copyright__ = 'Copyright IBM Corp. 2020, 2021'
from copy import deepcopy
import unittest
import torch
from diffsnn.pp.snn import FullyObsSigmoidSNN
from .base import (TestBase,
GenFullyObsSigmoidSNN,
GenFullyObsSigmoidSNNWithoutKernel,
GenFullyObsSigmoidSNNWithSmallKernel,
TrainPoisson,
TrainSNN)
# ---------------- main tests----------------
class TestPoissonFit(TrainPoisson,
GenFullyObsSigmoidSNNWithoutKernel,
TestBase,
unittest.TestCase):
''' fit Poisson process to SNN w/o kernel weights
'''
def setUp(self):
self.n_neurons = 2
self.n_epochs = 100
self.sample_size = 100
self.length = 50
def check_fit(self):
print(' * true conditional_intensity = {}'\
.format(torch.sigmoid(self.gen_model.params['bias'])))
print(' * learned one\n\t', self.trainable_model)
self.assertTrue(torch.allclose(
self.trainable_model.params['intensity_list'],
torch.sigmoid(self.gen_model.params['bias']),
atol=1e-1))
class TestPoissonFitWithNegligibleKernel(GenFullyObsSigmoidSNNWithSmallKernel, TestPoissonFit):
''' fit Poisson process to SNN w/ negligible kernel weights
'''
class TestSNNFit(TrainSNN,
GenFullyObsSigmoidSNN,
TestBase,
unittest.TestCase):
''' fit SNN to SNN
'''
def setUp(self):
self.n_neurons = 2
self.n_epochs = 20
self.sample_size = 500
self.length = 50
def check_fit(self):
print(' * true snn\n', self.gen_model)
print(' * learned snn\n', self.trainable_model)
self.assertTrue(torch.allclose(self.gen_model.params['bias'],
self.trainable_model.params['bias'],
atol=2e-1))
class TestSNNBiasFit(TrainSNN,
GenFullyObsSigmoidSNN,
TestBase,
unittest.TestCase):
''' fit SNN bias to SNN
'''
def setUp(self):
self.n_neurons = 2
self.n_epochs = 10
self.sample_size = 500
self.length = 50
def preprocess(self):
self.trainable_model.params['kernel_weight'].data \
= deepcopy(self.gen_model.params['kernel_weight'].data)
self.trainable_model.params['kernel_weight'].requires_grad = False
def check_fit(self):
print(' * true snn\n', self.gen_model)
print(' * learned snn\n', self.trainable_model)
self.assertTrue(torch.allclose(self.gen_model.params['bias'],
self.trainable_model.params['bias'],
atol=2e-1))
| 31.542553 | 95 | 0.568971 | 293 | 2,965 | 5.617747 | 0.324232 | 0.066829 | 0.087485 | 0.054678 | 0.555893 | 0.53706 | 0.465371 | 0.352369 | 0.292831 | 0.26367 | 0 | 0.019841 | 0.320067 | 2,965 | 93 | 96 | 31.88172 | 0.796627 | 0.084317 | 0 | 0.546875 | 0 | 0 | 0.093587 | 0.00783 | 0 | 0 | 0 | 0 | 0.046875 | 1 | 0.109375 | false | 0 | 0.078125 | 0 | 0.25 | 0.09375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9401e5967de0c4feb25f57d65b0bb881a0b235ab | 12,940 | py | Python | tacker/sol_refactored/controller/vnflcm_view.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 116 | 2015-10-18T02:57:08.000Z | 2022-03-15T04:09:18.000Z | tacker/sol_refactored/controller/vnflcm_view.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 6 | 2016-11-07T22:15:54.000Z | 2021-05-09T06:13:08.000Z | tacker/sol_refactored/controller/vnflcm_view.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 166 | 2015-10-20T15:31:52.000Z | 2021-11-12T08:39:49.000Z | # Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import re
from dateutil import parser
from oslo_log import log as logging
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import lcm_op_occ_utils as lcmocc_utils
from tacker.sol_refactored.common import subscription_utils as subsc_utils
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__)
class FilterExpr(object):
def __init__(self, op, attr, values):
self.op = op
self.attr = attr
self.values = values
def match_eq(self, val):
return val == self.values[0]
def match_neq(self, val):
return val != self.values[0]
def match_in(self, val):
return val in self.values
def match_nin(self, val):
return val not in self.values
def match_gt(self, val):
return val > self.values[0]
def match_gte(self, val):
return val >= self.values[0]
def match_lt(self, val):
return val < self.values[0]
def match_lte(self, val):
return val <= self.values[0]
def match_cont(self, val):
for v in self.values:
if v in val:
return True
return False
def match_ncont(self, val):
return not self.match_cont(val)
def match(self, val):
try:
for a in self.attr:
# TODO(toshii): handle "@key"
val = val[a]
except KeyError:
LOG.debug("Attr %s not found in %s", self.attr, val)
return False
LOG.debug("Key %s type %s", self.attr, type(val))
# If not str, assume type conversion is already done.
# Note: It is assumed that the type doesn't change between calls,
# which can be problematic with KeyValuePairs.
if isinstance(self.values[0], str):
if isinstance(val, datetime):
self.values[0] = parser.isoparse(self.values[0])
elif isinstance(val, bool):
self.values[0] = bool(self.values[0])
elif isinstance(val, int):
self.values = [int(v) for v in self.values]
elif isinstance(val, float):
self.values = [float(v) for v in self.values]
return getattr(self, "match_" + self.op)(val)
class AttributeSelector(object):
def __init__(self, default_exclude_list, all_fields=None, fields=None,
exclude_fields=None, exclude_default=None):
self.exclude_fields = []
self.fields = []
if all_fields is not None:
if fields is not None or exclude_fields is not None or \
exclude_default is not None:
raise sol_ex.InvalidAttributeSelector()
# Nothing to do
elif fields is not None:
if exclude_fields is not None:
raise sol_ex.InvalidAttributeSelector()
self.fields = fields.split(',')
if exclude_default is not None:
self.exclude_fields = [v for v in default_exclude_list
if v not in self.fields]
elif exclude_fields is not None:
if exclude_default is not None:
raise sol_ex.InvalidAttributeSelector()
self.exclude_fields = exclude_fields.split(',')
else:
self.exclude_fields = default_exclude_list
def filter(self, obj, odict):
deleted = {}
if self.exclude_fields:
excl_fields = self.exclude_fields
else:
if not self.fields:
# Implies all_fields
return odict
excl_fields = [k for k in odict.keys() if k not in self.fields]
for k in excl_fields:
klist = k.split('/')
if len(klist) > 1:
# TODO(toshii): check if this nested field is nullable
pass
else:
if not obj.fields[klist[0]].nullable:
continue
val = odict
deleted_ptr = deleted
try:
for i, k1 in enumerate(klist, start=1):
if i == len(klist):
deleted_ptr[k1] = val[k1]
del val[k1]
else:
val = val[k1]
if k1 not in deleted_ptr:
deleted_ptr[k1] = {}
deleted_ptr = deleted_ptr[k1]
except KeyError:
pass
if not self.fields:
return odict
# Readd partial dictionary content
for k in self.fields:
klist = k.split('/')
val = odict
deleted_ptr = deleted
try:
for i, k1 in enumerate(klist, start=1):
if i == len(klist):
val[k1] = deleted_ptr[k1]
else:
if k1 not in val:
val[k1] = {}
val = val[k1]
deleted_ptr = deleted_ptr[k1]
except KeyError:
LOG.debug("Key %s not found in %s or %s", k1, val, deleted_ptr)
return odict
class BaseViewBuilder(object):
value_regexp = r"([^',)]+|('[^']*')+)"
value_re = re.compile(value_regexp)
simpleFilterExpr_re = re.compile(r"\(([a-z]+),([^,]+)(," +
value_regexp + r")+\)")
tildeEscape_re = re.compile(r"~([1ab])")
opOne = ['eq', 'neq', 'gt', 'gte', 'lt', 'lte']
opMulti = ['in', 'nin', 'cont', 'ncont']
def __init__(self):
pass
def parse_attr(self, attr):
def tilde_unescape(string):
def repl(m):
if m.group(1) == '1':
return '/'
elif m.group(1) == 'a':
return ','
elif m.group(1) == 'b':
return '@'
s1 = self.tildeEscape_re.sub(repl, string)
return re.sub('~0', '~', s1)
attrs = attr.split('/')
# TODO(toshii): handle "@key"
return [tilde_unescape(a) for a in attrs]
def parse_values(self, values):
loc = 0
res = []
while loc < len(values):
if values[loc] != ",":
LOG.debug("comma expected, %s at loc %d", values, loc)
raise sol_ex.InvalidAttributeFilter(
sol_detail=("value parse error. comma expected, %s" %
values))
loc += 1
m = self.value_re.match(values[loc:])
if m is None:
LOG.debug("value parse error, %s at loc %d", values, loc)
raise sol_ex.InvalidAttributeFilter(
sol_detail="value parse error")
loc += m.end()
if m.group(0).startswith("'"):
res.append(re.sub("''", "'", m.group(0)[1:-1]))
else:
res.append(m.group(0))
return res
def parse_filter(self, filter):
"""Implement SOL013 5.2 Attribute-based filtering"""
loc = 0
res = []
while True:
m = self.simpleFilterExpr_re.match(filter[loc:])
if m is None:
LOG.debug("filter %s parse error at char %d", filter, loc)
raise sol_ex.InvalidAttributeFilter(
sol_detail="filter parse error")
op = m.group(1)
if op not in self.opOne and op not in self.opMulti:
raise sol_ex.InvalidAttributeFilter(
sol_detail=("Invalid op %s" % op))
values = self.parse_values(
filter[(loc + m.end(2)):(loc + m.end(3))])
if len(values) > 1 and op not in self.opMulti:
raise sol_ex.InvalidAttributeFilter(
sol_detail=("Only one value is allowed for op %s" % op))
res.append(FilterExpr(op, self.parse_attr(m.group(2)), values))
loc += m.end()
if loc == len(filter):
return res
if filter[loc] != ';':
LOG.debug("filter %s parse error at char %d "
"(semicolon expected)", filter, loc)
raise sol_ex.InvalidAttributeFilter(
sol_detail="filter parse error. semicolon expected.")
loc += 1
def parse_selector(self, req):
"""Implement SOL013 5.3 Attribute selectors"""
params = {}
for k in ['all_fields', 'fields', 'exclude_fields', 'exclude_default']:
v = req.get(k)
if v is not None:
params[k] = v
return AttributeSelector(self._EXCLUDE_DEFAULT, **params)
def match_filters(self, val, filters):
if filters is None:
return True
for f in filters:
if not f.match(val):
return False
return True
def detail_list(self, values, filters, selector):
return [self.detail(v, selector) for v in values
if self.match_filters(v, filters)]
class InstanceViewBuilder(BaseViewBuilder):
_EXCLUDE_DEFAULT = ['vnfConfigurableProperties',
'vimConnectionInfo',
'instantiatedVnfInfo',
'metadata',
'extensions']
def __init__(self, endpoint):
self.endpoint = endpoint
def parse_filter(self, filter):
return super().parse_filter(filter)
def detail(self, inst, selector=None):
# NOTE: _links is not saved in DB. create when it is necessary.
if not inst.obj_attr_is_set('_links'):
inst._links = inst_utils.make_inst_links(inst, self.endpoint)
resp = inst.to_dict()
# remove password from vim_connection_info
# see SOL003 4.4.1.6
for vim_info in resp.get('vimConnectionInfo', {}).values():
if ('accessInfo' in vim_info and
'password' in vim_info['accessInfo']):
vim_info['accessInfo'].pop('password')
if selector is not None:
resp = selector.filter(inst, resp)
return resp
def detail_list(self, insts, filters, selector):
return super().detail_list(insts, filters, selector)
class LcmOpOccViewBuilder(BaseViewBuilder):
_EXCLUDE_DEFAULT = ['operationParams',
'error',
'resourceChanges',
'changedInfo',
'changedExtConnectivity']
def __init__(self, endpoint):
self.endpoint = endpoint
def parse_filter(self, filter):
return super().parse_filter(filter)
def detail(self, lcmocc, selector=None):
# NOTE: _links is not saved in DB. create when it is necessary.
if not lcmocc.obj_attr_is_set('_links'):
lcmocc._links = lcmocc_utils.make_lcmocc_links(lcmocc,
self.endpoint)
resp = lcmocc.to_dict()
if selector is not None:
resp = selector.filter(lcmocc, resp)
return resp
def detail_list(self, lcmoccs, filters, selector):
return super().detail_list(lcmoccs, filters, selector)
class SubscriptionViewBuilder(BaseViewBuilder):
def __init__(self, endpoint):
self.endpoint = endpoint
def parse_filter(self, filter):
return super().parse_filter(filter)
def detail(self, subsc, selector=None):
# NOTE: _links is not saved in DB. create when it is necessary.
if not subsc.obj_attr_is_set('_links'):
self_href = subsc_utils.subsc_href(subsc.id, self.endpoint)
subsc._links = objects.LccnSubscriptionV2_Links()
subsc._links.self = objects.Link(href=self_href)
resp = subsc.to_dict()
# NOTE: authentication is not included in LccnSubscription
resp.pop('authentication', None)
if selector is not None:
resp = selector.filter(subsc, resp)
return resp
def detail_list(self, subscs, filters):
return super().detail_list(subscs, filters, None)
| 35.258856 | 79 | 0.55 | 1,522 | 12,940 | 4.551905 | 0.193167 | 0.030312 | 0.016888 | 0.018476 | 0.356813 | 0.318274 | 0.269342 | 0.239752 | 0.205398 | 0.149394 | 0 | 0.009431 | 0.352628 | 12,940 | 366 | 80 | 35.355191 | 0.817596 | 0.1051 | 0 | 0.29927 | 0 | 0 | 0.066025 | 0.004072 | 0 | 0 | 0 | 0.002732 | 0 | 1 | 0.127737 | false | 0.018248 | 0.032847 | 0.058394 | 0.346715 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940225d04a3a801bd3feb67c187f2cbc8efcab81 | 2,059 | py | Python | project-metrics/metrics_service/apis/codecov.py | rsimha/amp-github-apps | 140652c6538e9fc3c3870b12f777c3cba14bf098 | [
"Apache-2.0"
] | 36 | 2019-02-07T03:43:54.000Z | 2022-03-04T12:55:02.000Z | project-metrics/metrics_service/apis/codecov.py | rsimha/amp-github-apps | 140652c6538e9fc3c3870b12f777c3cba14bf098 | [
"Apache-2.0"
] | 450 | 2018-11-21T22:36:39.000Z | 2022-01-26T18:40:49.000Z | project-metrics/metrics_service/apis/codecov.py | rsimha/amp-github-apps | 140652c6538e9fc3c3870b12f777c3cba14bf098 | [
"Apache-2.0"
] | 42 | 2018-11-21T22:31:11.000Z | 2022-03-08T06:46:20.000Z | """Module for fetching code coverage info from Codecov."""
from agithub import base as agithub_base
from flask_api import status
import logging
from typing import Any, Dict, Optional, Text
import env
class CodecovApiError(Exception):
"""Errors encountered while querying the Codecov API."""
def __init__(self, status_code: int, err_msg: Text):
"""Constructor.
Args:
status_code: HTTP status return code of the Codecov API response.
err_msg: error message provided by the API.
"""
super(CodecovApiError, self).__init__(
'Codecov API Exception (HTTP %d): %s' % (status_code, err_msg))
class CodecovApi(agithub_base.API):
"""Codecov API interface."""
def __init__(self, *args, **kwargs):
extra_headers = {
'Authorization': 'token %s' % env.get('CODECOV_API_ACCESS_TOKEN'),
'User-Agent': 'AMPProjectMetrics/1.0.0',
'Content-Type': 'application/json'
}
props = agithub_base.ConnectionProperties(
api_url='codecov.io',
url_prefix='/api/gh',
secure_http=True,
extra_headers=extra_headers)
self.setClient(agithub_base.Client(*args, **kwargs))
self.setConnectionProperties(props)
@property
def repo(self) -> agithub_base.IncompleteRequest:
"""Returns a partial Codecov request for the repository in env.yaml."""
return self[env.get('GITHUB_REPO')]
def get_absolute_coverage(self, commit_hash: Optional[Text] = None) -> float:
"""Fetch the absolute coverage at HEAD.
Args:
commit_hash: hash of commit to fetch coverage for (default HEAD)
Raises:
CodecovApiError: if the call to the Codecov API fails.
Returns:
Code coverage percentage in the range [0-100].
"""
endpoint = (
self.repo.commits[commit_hash]
if commit_hash else self.repo.branch.main)
status_code, response = endpoint.get(limit=1)
if status_code == status.HTTP_200_OK:
return float(response['commit']['totals']['c'])
raise CodecovApiError(status_code, response['error']['reason'])
| 30.731343 | 79 | 0.684798 | 261 | 2,059 | 5.229885 | 0.452107 | 0.043956 | 0.028571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006687 | 0.201068 | 2,059 | 66 | 80 | 31.19697 | 0.8231 | 0.272948 | 0 | 0 | 0 | 0 | 0.135915 | 0.033099 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.147059 | 0 | 0.382353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940234fb7a7c68381fd595779e6eeb95949ac3b9 | 2,564 | py | Python | src/calibrate.py | modi712/Computer-Vision | a34d3d73f883beae812c50b879f4dc8ef679b3ac | [
"MIT"
] | null | null | null | src/calibrate.py | modi712/Computer-Vision | a34d3d73f883beae812c50b879f4dc8ef679b3ac | [
"MIT"
] | null | null | null | src/calibrate.py | modi712/Computer-Vision | a34d3d73f883beae812c50b879f4dc8ef679b3ac | [
"MIT"
] | null | null | null | # Calibrates camera using chessboard video
import numpy as np
import cv2
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*7,3), np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
#Input Image
images = glob.glob('./Chessboard/*.jpg')
# print(len(images))
# videos = glob.glob('./Chessboard/*.mp4')
inputt = './Chessboard/chess.mp4'
count=0
capture = cv2.VideoCapture(cv2.samples.findFileOrKeep(inputt))
if not capture.isOpened:
print('Unable to open: ' + args.inputt)
exit(0)
while True:
if count>50:
break
ret, frame = capture.read()
if frame is None:
break
# fname =frame
# for fname in images:
img = frame
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
print(ret)
# If found, add object points, image points (after refining them)
if ret == True:
count+=1
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (7,6), corners2,ret)
cv2.imshow('img',img)
cv2.waitKey(500)
#break
cv2.destroyAllWindows()
#Analysis
print(str(count)+' frames computed')
# print(objpoints)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
print("mtx: \n",mtx)
# print("ret: ",ret)
# print("rvecs: ",rvecs)
# print("tvecs: ",tvecs)
#----------Results------
"""
chess.mp4 -> 68
[[824.05762458 0. 381.10745975]
[ 0. 839.01299642 134.22842609]
[ 0. 0. 1. ]]
-> 51
[[671.80835447, 0. , 313.47576239],
[ 0. , 669.23376398, 287.95174802],
[ 0. , 0. , 1. ]]
ches2.mp4 -> 1
[[692.47904804 0. 439.49980877]
[ 0. 802.40991748 204.76436888]
[ 0. 0. 1. ]]
chess3.mp4 -> 0
chess4.mp4 -> 229/51
[[860.00199971 0. 27.8037219 ]
[ 0. 777.62721079 193.25869631]
[ 0. 0. 1. ]]
"""
| 29.136364 | 101 | 0.570203 | 330 | 2,564 | 4.412121 | 0.487879 | 0.010989 | 0.010302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152725 | 0.277301 | 2,564 | 87 | 102 | 29.471264 | 0.633028 | 0.232449 | 0 | 0.054054 | 0 | 0 | 0.065495 | 0.017572 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.081081 | 0 | 0.081081 | 0.108108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9404e9100f31206f2975c729bb0ee10df6676295 | 1,788 | py | Python | src/currencycloud/clients/vans.py | icebotariccl/currencycloud-python | 03bb0df2743e6669790dee6f2367f9e0500a4610 | [
"MIT"
] | null | null | null | src/currencycloud/clients/vans.py | icebotariccl/currencycloud-python | 03bb0df2743e6669790dee6f2367f9e0500a4610 | [
"MIT"
] | null | null | null | src/currencycloud/clients/vans.py | icebotariccl/currencycloud-python | 03bb0df2743e6669790dee6f2367f9e0500a4610 | [
"MIT"
] | null | null | null | '''This module provides a class for VANs calls to the CC API'''
from currencycloud.http import Http
from currencycloud.resources import PaginatedCollection, Van
import deprecation
class Vans(Http):
'''This class provides an interface to the VANs endpoints of the CC API'''
def find(self, **kwargs):
'''Search for VANs that meet a number of criteria and receive a paged response.'''
response = self.get('/v2/virtual_accounts', query=kwargs)
data = [Van(self, **fields) for fields in response['virtual_accounts']]
return PaginatedCollection(data, response['pagination'])
def first(self, **params):
params['per_page'] = 1
return self.find(**params)[0]
@deprecation.deprecated(deprecated_in="2.7.5", removed_in=None,
current_version=None,
details="Use the generic find function instead")
def retrieve_subaccounts(self, resource_id, **kwargs):
'''Get a list of VANs attached to a sub-account.'''
response = self.get('/v2/virtual_accounts/subaccounts/' + resource_id, query=kwargs)
data = [Van(self, **fields) for fields in response['virtual_accounts']]
return PaginatedCollection(data, response['pagination'])
@deprecation.deprecated(deprecated_in="2.7.5", removed_in=None,
current_version=None,
details="Use the generic find function instead")
def find_subaccounts(self, **kwargs):
'''Get a list of VANS for all sub-accounts.'''
response = self.get('/v2/virtual_accounts/subaccounts/find', query=kwargs)
data = [Van(self, **fields) for fields in response['virtual_accounts']]
return PaginatedCollection(data, response['pagination'])
| 47.052632 | 92 | 0.65604 | 217 | 1,788 | 5.327189 | 0.345622 | 0.077855 | 0.038927 | 0.044118 | 0.608131 | 0.608131 | 0.545848 | 0.471453 | 0.471453 | 0.471453 | 0 | 0.007988 | 0.229866 | 1,788 | 37 | 93 | 48.324324 | 0.831518 | 0.162192 | 0 | 0.48 | 0 | 0 | 0.17663 | 0.047554 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.12 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94076e38767f1fdb24941a14891524bbde794118 | 5,703 | bzl | Python | tests/util/defs.bzl | guw/rules_pkg | 0541b09ca8b68c40e8868fd3c4a748e1bb5eafa3 | [
"Apache-2.0"
] | 62 | 2021-09-21T18:58:02.000Z | 2022-03-07T02:17:43.000Z | third_party/rules_pkg-0.7.0/tests/util/defs.bzl | Vertexwahn/FlatlandRT | 37d09fde38b25eff5f802200b43628efbd1e3198 | [
"Apache-2.0"
] | null | null | null | third_party/rules_pkg-0.7.0/tests/util/defs.bzl | Vertexwahn/FlatlandRT | 37d09fde38b25eff5f802200b43628efbd1e3198 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules to aid testing"""
load("//pkg/private:pkg_files.bzl", "add_label_list", "write_manifest")
load("@bazel_skylib//lib:unittest.bzl", "analysistest", "asserts")
load("@rules_python//python:defs.bzl", "py_binary")
def _directory_impl(ctx):
out_dir_file = ctx.actions.declare_directory(ctx.attr.outdir or ctx.attr.name)
args = ctx.actions.args()
args.add(out_dir_file.path)
for fn in ctx.attr.filenames:
args.add(fn)
args.add(ctx.attr.contents)
ctx.actions.run(
outputs = [out_dir_file],
inputs = [],
executable = ctx.executable._dir_creator,
arguments = [args],
)
return DefaultInfo(files = depset([out_dir_file]))
directory = rule(
doc = """Helper rule to create simple TreeArtifact structures
We would normally just use genrules for this, but their directory output
creation capabilities are "unsound".
""",
implementation = _directory_impl,
attrs = {
"filenames": attr.string_list(
doc = """Paths to create in the directory.
Paths containing directories will also have the intermediate directories created too.""",
),
"contents": attr.string(),
"outdir": attr.string(),
"_dir_creator": attr.label(
default = ":create_directory_with_contents",
executable = True,
cfg = "exec",
),
},
)
def _fake_artifact_impl(ctx):
out_file = ctx.actions.declare_file(ctx.attr.name)
content = ["echo " + rf.path for rf in ctx.files.runfiles]
ctx.actions.write(
output = out_file,
content = "\r\n".join(content),
is_executable = ctx.attr.executable,
)
return DefaultInfo(
files = depset([out_file] + ctx.files.files),
runfiles = ctx.runfiles(files = ctx.files.runfiles),
executable = out_file if ctx.attr.executable else None,
)
fake_artifact = rule(
doc = """Rule to create a fake artifact that depends on its srcs.
This rule creates a file that appears to depend on its srcs and passes along
other targets in DefaultInfo as files and/or runfiles. It creates a script that
echos all the file names. It is useful for building an object that is like a
cc_binary in complexity, but does not depend on a large toolchain.""",
implementation = _fake_artifact_impl,
attrs = {
"deps": attr.label_list(
doc = "Dependencies to trigger other rules, but are then discarded.",
allow_files = True,
),
"files": attr.label_list(
doc = "Deps which are passed in DefaultInfo as files.",
allow_files = True,
),
"runfiles": attr.label_list(
doc = "Deps which are passed in DefaultInfo as runfiles.",
allow_files = True,
),
"executable": attr.bool(
doc = "If True, the DefaultInfo will be marked as executable.",
default = False,
),
},
)
def _write_content_manifest_impl(ctx):
content_map = {} # content handled in the manifest
file_deps = [] # inputs we depend on
add_label_list(ctx, content_map, file_deps, ctx.attr.srcs)
write_manifest(ctx, ctx.outputs.out, content_map, use_short_path = ctx.attr.use_short_path)
_write_content_manifest = rule(
doc = """Helper rule to write the content manifest for a pkg_*.
This is intended only for testing the manifest creation features.""",
implementation = _write_content_manifest_impl,
attrs = {
"srcs": attr.label_list(
doc = """List of source inputs.""",
allow_files = True,
),
"out": attr.output(),
"use_short_path": attr.bool(
doc = """Use the rootless path in the manifest.
Useful to ensure that the platform-specific prefix (i.e. parts
including something like "x64_windows-fastbuild") isn't present in
paths in the manifest.
See also https://docs.bazel.build/versions/main/skylark/lib/File.html#path
""",
default = True,
),
},
)
def write_content_manifest(name, srcs):
_write_content_manifest(
name = name,
srcs = srcs,
use_short_path = True,
out = name + ".manifest",
)
############################################################
# Test boilerplate
############################################################
def _generic_base_case_test_impl(ctx):
env = analysistest.begin(ctx)
# Nothing here intentionally, this is simply an attempt to verify successful
# analysis.
return analysistest.end(env)
generic_base_case_test = analysistest.make(
_generic_base_case_test_impl,
attrs = {},
)
# Generic negative test boilerplate
def _generic_negative_test_impl(ctx):
env = analysistest.begin(ctx)
asserts.expect_failure(env, ctx.attr.reason)
return analysistest.end(env)
generic_negative_test = analysistest.make(
_generic_negative_test_impl,
attrs = {
"reason": attr.string(
default = "",
),
},
expect_failure = True,
)
| 32.403409 | 95 | 0.639663 | 714 | 5,703 | 4.956583 | 0.344538 | 0.01978 | 0.028257 | 0.018084 | 0.103419 | 0.046906 | 0.046906 | 0.027691 | 0.027691 | 0.027691 | 0 | 0.002315 | 0.242504 | 5,703 | 175 | 96 | 32.588571 | 0.816898 | 0.137647 | 0 | 0.178295 | 0 | 0.007752 | 0.337872 | 0.029954 | 0 | 0 | 0 | 0 | 0.015504 | 1 | 0.046512 | false | 0.023256 | 0 | 0 | 0.077519 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940780137a7561bcd2fdc5e84bbb4666938c7d61 | 4,799 | py | Python | systems/cluster_utils.py | cltl/LongTailIdentity | 525f6eb10ce6c8eba1d6f462a900fd0ebd0f97a7 | [
"Apache-2.0"
] | 1 | 2019-02-07T06:30:35.000Z | 2019-02-07T06:30:35.000Z | systems/cluster_utils.py | cltl/LongTailIdentity | 525f6eb10ce6c8eba1d6f462a900fd0ebd0f97a7 | [
"Apache-2.0"
] | null | null | null | systems/cluster_utils.py | cltl/LongTailIdentity | 525f6eb10ce6c8eba1d6f462a900fd0ebd0f97a7 | [
"Apache-2.0"
] | null | null | null | import json
from collections import defaultdict
import random
def transform_to_json(data):
a_json = {}
cluster_id=1
for pid, separate_ids in data.items():
for spid in separate_ids:
a_json[spid]=cluster_id
cluster_id+=1
return a_json
def create_keys_per_name(data):
"""
Get dictionary where keys are property combinations and values are local IDs.
Return data where the keys are names, whereas the values are dictionaries (properties, ids).
"""
keys_per_name={}
for key, ids in data.items():
name, *rest=key
key_tuple=tuple(key)
if name not in keys_per_name.keys():
keys_per_name[name]=defaultdict(set)
keys_per_name[name][key_tuple]=ids
return keys_per_name
def obtain_current_value_set(combination, merged):
# first check if this combination has already been merged
if combination in merged.keys():
new_combination=merged[combination]
#print('found merge value for', combination, new_combination)
else:
new_combination=combination
return new_combination
def merge_or_not(new_combination, new_combination2):
to_merge=False
new_key=[]
#print()
#print('Comparing', new_combination, new_combination2)
for key_pos in range(0,len(new_combination)): # if there is a clash, then set to_merge to False
if new_combination[key_pos]!=new_combination2[key_pos] and new_combination[key_pos] and new_combination2[key_pos]: # clash means two non-empty values are different
to_merge=False
break
elif new_combination[key_pos] and not new_combination2[key_pos]: # store the property value if there is no clash, whichever combination has it
new_key.append(new_combination[key_pos])
to_merge=True
elif new_combination2[key_pos] and not new_combination[key_pos]: # store the property value if there is no clash, whichever combination has it
new_key.append(new_combination2[key_pos])
to_merge=True
else: # they both already had the value, just pick one
new_key.append(new_combination[key_pos])
#print(to_merge, new_key)
return to_merge, new_key
def decide_merging(combinations):
"""
Given all local contexts for a name, decide which of them to merge. We do this by comparing property value lists.
"""
merged={}
iteration=0
while(True):
num_merged=0
#print(combinations)
for combination in combinations: # list of lists
for combination2 in combinations:
if combination>combination2:
new_combination=obtain_current_value_set(tuple(combination), merged)
new_combination2=obtain_current_value_set(tuple(combination2), merged)
# now decide on merging!
to_merge, new_key = merge_or_not(new_combination, new_combination2)
# if we merge these two, then assign each of them to the superior combination of property values
if to_merge:
num_merged+=1
merged[tuple(combination)]=tuple(new_key)
merged[tuple(combination2)]=tuple(new_key)
iteration+=1
#print(merged)
#print('Iteration %d, number of merges %d' % (iteration, num_merged))
if iteration>2:
print(iteration)
if num_merged==0:
break
#input('continue?')
return merged
def lookup_key(d, test_key):
"""
For debugging: Check whether a key exists among the IDs.
"""
for n, vals in d.items():
for v, ids in vals.items():
if test_key in ids:
print(test_key, n, len(vals))
return True
return False
def perform_merging(keys_per_name):
"""
Perform clustering between the local contexts for all names.
"""
new_data=defaultdict(set)
for name, prop_vals in keys_per_name.items(): # Iterate over all names
prop_combinations=list(prop_vals.keys())
#random.shuffle(prop_combinations)
merged_vals=decide_merging(prop_combinations)
for values, ids in prop_vals.items():
values_tuple=tuple(values)
if values_tuple in merged_vals.keys(): # if these values are to be merged, then get their new/extended set of properties
new_key=tuple([name] + list(merged_vals[values_tuple]))
new_data[new_key] |= ids
else:
new_data[values_tuple] |= ids
return new_data
def count_values(data):
s=0
for k,v in data.items():
s+=len(v)
return s
| 36.633588 | 171 | 0.635757 | 629 | 4,799 | 4.648649 | 0.227345 | 0.071819 | 0.030096 | 0.04104 | 0.163817 | 0.100205 | 0.100205 | 0.056772 | 0.056772 | 0.056772 | 0 | 0.006733 | 0.288185 | 4,799 | 130 | 172 | 36.915385 | 0.849239 | 0.267972 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.034091 | 0 | 0.227273 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9407ea96d7707d8b52c8d7cf4a197411ba311970 | 2,151 | py | Python | hypotonic/__init__.py | mscavnicky/hypotonic | 499ea821988c34ae5492979f4f8056c4a45b3b1f | [
"MIT"
] | 8 | 2019-01-01T13:50:00.000Z | 2020-09-06T16:15:51.000Z | hypotonic/__init__.py | mscavnicky/hypotonic | 499ea821988c34ae5492979f4f8056c4a45b3b1f | [
"MIT"
] | 1 | 2019-02-24T12:13:46.000Z | 2019-02-24T19:50:49.000Z | hypotonic/__init__.py | mscavnicky/hypotonic | 499ea821988c34ae5492979f4f8056c4a45b3b1f | [
"MIT"
] | null | null | null | import logging
import importlib
import asyncio
import aiohttp
logger = logging.getLogger('hypotonic')
class Hypotonic:
def __init__(self, url=None):
self.commands = []
self.results = []
self.errors = []
if url:
self.commands.append(('get', (url,), {}))
async def worker(self, i, session, queue):
logger.debug(f"Worker {i} starting.")
module = importlib.import_module('hypotonic.command')
while True:
commands, context, data = await queue.get()
command, args, kwargs = commands.pop()
try:
logger.debug(("Start", i, command, args, kwargs))
# Dynamically load the command function.
func = getattr(module, command)
async for context, data in func(session, context, data, *args, **kwargs):
if not commands:
self.results.append(data)
else:
logger.debug(("Queue", i, commands, context, data))
queue.put_nowait((commands.copy(), context, data))
logger.debug(("Stop", i, command, args, kwargs))
except Exception as error:
self.errors.append(((command, args, kwargs), context, error))
logger.debug(('Error', i, command, args, kwargs, error))
finally:
queue.task_done()
async def run(self):
session = aiohttp.ClientSession(raise_for_status=True)
queue = asyncio.Queue()
tasks = []
for i in range(4):
loop = asyncio.get_event_loop()
tasks.append(loop.create_task(self.worker(i, session, queue)))
queue.put_nowait((list(reversed(self.commands)), None, {}))
await queue.join()
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
await session.close()
return self.results, self.errors
def data(self):
"""Return all the scraped data as a list of dicts."""
loop = asyncio.new_event_loop()
try:
loop.run_until_complete(self.run())
return self.results, self.errors
finally:
loop.close()
def __getattr__(self, attr):
def apply(*args, **kwargs):
self.commands.append((attr, args, kwargs))
return self
return apply
| 26.8875 | 81 | 0.62901 | 263 | 2,151 | 5.060837 | 0.338403 | 0.060105 | 0.063862 | 0.047333 | 0.040571 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000611 | 0.239424 | 2,151 | 79 | 82 | 27.227848 | 0.812958 | 0.040446 | 0 | 0.101695 | 0 | 0 | 0.033042 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067797 | false | 0 | 0.084746 | 0 | 0.237288 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940830c796f500a7ec43a9e9a73df7a17bb2c403 | 714 | py | Python | 03. Programacion orientada a objetos/04. clases y objetos/e1.py | Cidryl/python-desde-cero | fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276 | [
"MIT"
] | null | null | null | 03. Programacion orientada a objetos/04. clases y objetos/e1.py | Cidryl/python-desde-cero | fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276 | [
"MIT"
] | null | null | null | 03. Programacion orientada a objetos/04. clases y objetos/e1.py | Cidryl/python-desde-cero | fade09d13ab0ed0cbb4f45a49a4ad9e3980f3276 | [
"MIT"
] | null | null | null | class Alumno:
def declarar(self,nombre,dato):
self.nombre=nombre
self.puntuacion=dato
def visualizar(self):
print("Nombre:",self.nombre)
print("Puntuacion:",self.puntuacion)
def estadistica(self):
if self.puntuacion<=4:
print("insuficiente")
elif self.puntuacion>=5:
print("notable")
elif self.puntuacion>=8:
print("sobresaliente")
else:
print("Libre")
# bloque principal
alumno1=Alumno()
alumno1.declarar("diego",2)
alumno1.visualizar()
alumno1.estadistica()
alumno2=Alumno()
alumno2.declarar("ana",10)
alumno2.visualizar()
alumno2.estadistica() | 22.3125 | 45 | 0.592437 | 70 | 714 | 6.042857 | 0.414286 | 0.165485 | 0.085106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02729 | 0.281513 | 714 | 32 | 46 | 22.3125 | 0.797271 | 0.022409 | 0 | 0 | 0 | 0 | 0.094595 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.166667 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9408773ad197cea530dbf06232c18e80bd339f4d | 5,487 | py | Python | sagas/corpus/searcher.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/corpus/searcher.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | sagas/corpus/searcher.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | from bert_serving.client import BertClient
import pandas as pd
import numpy as np
import json
import sagas.tracker_fn as tc
from sagas.conf.conf import cf
def search_in(text, lang):
with open(f'{cf.conf_dir}/stack/crawlers/langcrs/all_{lang}.json') as json_file:
sents=json.load(json_file)
return [sent for sent in sents if sent['text']==text]
def search_in_list(text, langs):
rs={}
for lang in langs:
rs[lang]=search_in(text, lang)
return rs
def sents_summary(sents, source):
from sagas.nlu.uni_remote import dep_parse
from sagas.nlu.uni_remote_viz import list_contrast
from sagas.conf.conf import cf
from sagas.nlu.utils import fix_sents
from sagas.nlu.uni_parser import get_chunks
sents=fix_sents(sents, source)
engine=cf.engine(source)
doc_jsonify, resp = dep_parse(sents, source, engine, ['predicts'])
types=[]
if doc_jsonify is None:
raise Exception(f'Cannot parse sentence for lang {source}')
if len(resp['predicts']) > 0:
rs=resp['predicts']
else:
rs = get_chunks(doc_jsonify)
for serial, r in enumerate(rs):
print(f"{serial}. {r['type']} -> {r['word']}")
types.append(f"{source}:{r['type']}")
list_contrast(rs, source)
return types
class CorpusSearcher(object):
def __init__(self, model_file='spacy-2.2/data/embedded_corpus.pkl'):
from os.path import expanduser
self.bc = BertClient()
self.model_file=expanduser(model_file)
def train(self, quotes, source_col='text'):
embeddings = self.bc.encode(quotes[source_col].to_list())
quotes['EMBEDDINGS'] = embeddings.tolist()
# Persist to pickle
quotes.to_pickle(self.model_file)
def train_corpus(self, data_file, source_col='text'):
# f'{cf.conf_dir}/stack/crawlers/langcrs/all_{lang}.json'
dfjson = pd.read_json(data_file)
self.train(dfjson, source_col=source_col)
def load_quotes_and_embeddings(self, file):
quotes = pd.read_pickle(file)
# change dtype in place for memory efficiency
quotes['EMBEDDINGS'] = quotes['EMBEDDINGS'].apply(
lambda arr: np.array(arr, dtype='float32')
)
quote_embeddings = np.stack(quotes.EMBEDDINGS.values)
# reduce memory footprint by dropping column
quotes.drop('EMBEDDINGS', axis='columns')
# normalize embeddings for cosine distance
embedding_sums = quote_embeddings.sum(axis=1)
normed_embeddings = quote_embeddings / embedding_sums[:, np.newaxis]
return quotes, normed_embeddings
def create_index(self, embeddings):
import faiss
"""
Create an index over the quote embeddings for fast similarity search.
"""
dim = embeddings.shape[1]
index = faiss.IndexFlatL2(dim)
index.add(embeddings)
return index
def search(self, text, cols, top_result=5):
text_embedding = self.bc.encode([text])
normalized_text_embedding = text_embedding / text_embedding.sum()
quotes, embeddings = self.load_quotes_and_embeddings(self.model_file)
index = self.create_index(embeddings)
_, idx = index.search(normalized_text_embedding, top_result)
# relevant_quotes = quotes.iloc[idx.flatten()].text.values
# relevant_chapters = quotes.iloc[idx.flatten()].chapter.values
rs=[]
for col in cols:
rs.append(quotes.iloc[idx.flatten()][col].values)
# relevant_chapters = quotes.iloc[idx.flatten()]['chapter'].values
return rs
@staticmethod
def parse_controls(results):
rs = []
for lang, v in results.items():
for sent in v:
rs.append((sent['translate'], lang, sent['translit']))
return rs
def run(self, text, langs=None, top_result=5, summary=False, verbose=True):
"""
$ python -m sagas.corpus.searcher run 'I read a letter.'
$ python -m sagas.corpus.searcher run 'I read a letter.' ja,id
$ python -m sagas.corpus.searcher run 'I read a letter.' ja,id,fa 2 True False
:param text:
:return:
"""
# 先按相似度查找到与给定内容近似的英文句子
relevant_quotes, relevant_chapters = self.search(text, ['text', 'chapter'], top_result)
summary_info=[]
for q in range(top_result):
tc.emp('magenta', '>' + relevant_quotes[q])
tc.emp('green', relevant_chapters[q])
if langs is not None:
# 因为语料都是按英文作对照的, 所以直接按英文句子查找到其它语言的句子就可以了
# search_in_list('I write a letter.', ['ja', 'fa', 'id'])
results=search_in_list(relevant_quotes[q], langs)
if verbose:
tc.emp('blue', json.dumps(results, indent=2, ensure_ascii=False))
if summary:
all_types = []
rs_c=CorpusSearcher.parse_controls(results)
for r in rs_c:
if r[2]!='':
tc.emp('red', f".. {r[2]}")
types=sents_summary(r[0], r[1])
all_types.extend(types)
summary_info.append((relevant_quotes[q], all_types))
tc.emp('cyan', '✁', '-' * 30)
for s in summary_info:
tc.info(s)
def end(self):
self.bc.close()
if __name__ == '__main__':
import fire
fire.Fire(CorpusSearcher)
| 34.949045 | 95 | 0.612539 | 694 | 5,487 | 4.684438 | 0.29683 | 0.01661 | 0.014765 | 0.024608 | 0.144263 | 0.114734 | 0.099354 | 0.099354 | 0.099354 | 0.065518 | 0 | 0.004501 | 0.271186 | 5,487 | 156 | 96 | 35.173077 | 0.808202 | 0.131766 | 0 | 0.064815 | 0 | 0 | 0.074283 | 0.018679 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.12963 | 0 | 0.314815 | 0.009259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9408ba6fbe5aa752679b16a7faeb0c1c96aadb6e | 882 | py | Python | Mobility Algorithms/Test/link_identifier_test.py | James-OHara/NCHRP-BSM-Traffic-Measures | d6842c9dc63de8c2d470482fbfd1ec91a9c2ae56 | [
"Apache-2.0"
] | null | null | null | Mobility Algorithms/Test/link_identifier_test.py | James-OHara/NCHRP-BSM-Traffic-Measures | d6842c9dc63de8c2d470482fbfd1ec91a9c2ae56 | [
"Apache-2.0"
] | null | null | null | Mobility Algorithms/Test/link_identifier_test.py | James-OHara/NCHRP-BSM-Traffic-Measures | d6842c9dc63de8c2d470482fbfd1ec91a9c2ae56 | [
"Apache-2.0"
] | null | null | null | import unittest
import numpy as np
from bsm_stream_vector import LinkIdentifier
"""Test LinkIndentifier class in bsm_stream_vector to ensure the BSMs are being assigned to the correct Measures Estimation link
"""
class LinkIdentifierTest(unittest.TestCase):
#This test only works if findLink in LinkIndentifier returns [0,7]
def setUp(self):
self.link_identifier = LinkIdentifier("i405links.csv")
def test_findLink(self):
"""Test that BSMs in bsm_sample.csv are assigned to the correct Measures Estimation link
"""
with open("bsm_sample.csv") as in_f:
is_header = True
for row in in_f:
if is_header:
is_header = False
continue
data = row.split(',')
point = np.array([float(data[14]),float(data[15])])
link = int(data[8])
self.assertTrue(link == self.link_identifier.findLink(point))
if __name__ == '__main__':
unittest.main() | 30.413793 | 129 | 0.727891 | 127 | 882 | 4.88189 | 0.511811 | 0.03871 | 0.048387 | 0.064516 | 0.135484 | 0.135484 | 0.135484 | 0 | 0 | 0 | 0 | 0.013699 | 0.172336 | 882 | 29 | 130 | 30.413793 | 0.835616 | 0.190476 | 0 | 0 | 0 | 0 | 0.061017 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940bf9f6bcf066af4e32b8cbf37d9f28635da2d5 | 8,350 | py | Python | TSP/model_free.py | machine-reasoning-ufrgs/graph-nn | 7489b7b3d6a4750245e3f506982d98b56a74b6bb | [
"MIT"
] | 1 | 2019-11-04T15:56:00.000Z | 2019-11-04T15:56:00.000Z | TSP/model_free.py | machine-reasoning-ufrgs/graph-nn | 7489b7b3d6a4750245e3f506982d98b56a74b6bb | [
"MIT"
] | null | null | null | TSP/model_free.py | machine-reasoning-ufrgs/graph-nn | 7489b7b3d6a4750245e3f506982d98b56a74b6bb | [
"MIT"
] | 2 | 2019-09-21T12:10:56.000Z | 2021-04-17T13:55:32.000Z |
import sys, os
import tensorflow as tf
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from graphnn_free import GraphNN
from mlp import Mlp
def build_network(d):
# Define hyperparameters
d = d
learning_rate = 2e-5
l2norm_scaling = 1e-10
global_norm_gradient_clipping_ratio = 0.65
# Define a placeholder for the answers to the decision problems
route_exists = tf.placeholder( tf.float32, shape = (None,), name = 'route_exists' )
# Define a placeholder for the cost of each route
route_costs = tf.placeholder( tf.float32, shape=(None,1), name='route_costs')
# Define a placeholder for the edges mask
edges_mask = tf.placeholder( tf.float32, shape = (None,), name = 'edges_mask' )
# Define placeholders for the list of number of vertices and edges per instance
n_vertices = tf.placeholder( tf.int32, shape = (None,), name = 'n_vertices')
n_edges = tf.placeholder( tf.int32, shape = (None,), name = 'edges')
#
EV_matrix = tf.placeholder( tf.float32, shape = (None,None), name = "EV" )
edge_weight = tf.placeholder( tf.float32, shape = (None,1), name = "edge_weight" )
target_cost = tf.placeholder( tf.float32, shape = (None,1), name = "target_cost" )
time_steps = tf.placeholder( tf.int32, shape = (), name = "time_steps" )
initial_embedding_mlp = Mlp(
layer_sizes = [ d for _ in range(3) ],
activations = [ tf.nn.relu for _ in range(3) ],
output_size = d,
name = 'E_init_MLP',
name_internal_layers = True,
kernel_initializer = tf.contrib.layers.xavier_initializer(),
bias_initializer = tf.zeros_initializer()
)
edge_initial_embeddings = initial_embedding_mlp(
tf.concat(
[ edge_weight, target_cost ],
axis = 1
)
)
vertex_initial_embeddings = tf.get_variable(
initializer = tf.random_normal( (1,d) ),
dtype = tf.float32, name='V_init'
)
total_n = tf.shape( EV_matrix )[1]
tiled_and_normalized_vertex_initial_embeddings = tf.tile(
tf.div(
vertex_initial_embeddings,
tf.sqrt( tf.cast( total_n, tf.float32 ) )
),
[ total_n, 1 ]
)
# Define GNN dictionary
GNN = {}
# Define Graph neural network
gnn = GraphNN(
{
# V is the set of vertex embeddings
'V': d,
# E is the set of edge embeddings
'E': d
},
{
# M is a E×V adjacency matrix connecting each edge to the vertices it is connected to
'EV': ('E','V')
},
{
# V_msg_E is a MLP which computes messages from vertex embeddings to edge embeddings
'V_msg_E': ('V','E'),
# E_msg_V is a MLP which computes messages from edge embeddings to vertex embeddings
'E_msg_V': ('E','V')
},
{
# V(t+1) ← Vu( EVᵀ × E_msg_V(E(t)) )
'V': [
{
'mat': 'EV',
'msg': 'E_msg_V',
'transpose?': True,
'var': 'E'
}
],
# E(t+1) ← Eu( EV × V_msg_E(V(t)), W, C )
'E': [
{
'mat': 'EV',
'msg': 'V_msg_E',
'var': 'V'
}
]
},
name='TSP'
)
# Populate GNN dictionary
GNN['gnn'] = gnn
GNN['route_exists'] = route_exists
GNN['route_costs'] = route_costs
GNN['edges_mask'] = edges_mask
GNN['n_vertices'] = n_vertices
GNN['n_edges'] = n_edges
GNN["EV"] = EV_matrix
GNN["W"] = edge_weight
GNN["C"] = target_cost
GNN["time_steps"] = time_steps
# Define E_vote, which will compute one logit for each edge
E_vote_MLP = Mlp(
layer_sizes = [ d for _ in range(3) ],
activations = [ tf.nn.relu for _ in range(3) ],
output_size = 1,
name = 'E_vote',
name_internal_layers = True,
kernel_initializer = tf.contrib.layers.xavier_initializer(),
bias_initializer = tf.zeros_initializer()
)
vote_bias = tf.get_variable(initializer=tf.zeros_initializer(), shape=(), dtype=tf.float32, name='vote_bias')
# Get the last embeddings
last_states = gnn(
{ "EV": EV_matrix },
{ "V": tiled_and_normalized_vertex_initial_embeddings, "E": edge_initial_embeddings },
time_steps = time_steps
)
GNN["last_states"] = last_states
E_n = last_states['E'].h
# Compute a vote for each embedding
#E_vote = tf.reshape(E_vote_MLP(tf.concat([E_n,route_costs], axis=1)), [-1])
E_vote = tf.reshape(E_vote_MLP(E_n), [-1])
E_prob = tf.sigmoid(E_vote)
# Compute the number of problems in the batch
num_problems = tf.shape(n_vertices)[0]
# n_edges_acc[i] is the number of edges in the batch up to the i-th instance
n_edges_acc = tf.map_fn(lambda i: tf.reduce_sum(tf.gather(n_edges, tf.range(0,i))), tf.range(0,num_problems))
# Compute decision predictions (one per problem)
_, pred_logits = tf.while_loop(
lambda i, predictions: tf.less(i, num_problems),
lambda i, predictions:
(
(i+1),
predictions.write(
i,
#tf.reduce_mean(tf.gather(E_vote, tf.range(n_edges_acc[i], n_edges_acc[i] + n_edges[i])))
tf.reduce_mean( E_vote[n_edges_acc[i]:n_edges_acc[i]+n_edges[i]] )
)
),
[0, tf.TensorArray(size=num_problems, dtype=tf.float32)]
)
pred_logits = pred_logits.stack() + vote_bias
GNN['predictions'] = tf.sigmoid(pred_logits)
# Count the number of edges that appear in the solution
pos_edges_n = tf.reduce_sum(edges_mask)
# Count the number of edges that do not appear in the solution
neg_edges_n = tf.reduce_sum(tf.subtract(tf.ones_like(edges_mask), edges_mask))
# Define edges loss
GNN['loss_edges'] = tf.losses.sigmoid_cross_entropy(
multi_class_labels = edges_mask,
logits = E_vote,
weights = tf.add(
tf.scalar_mul(
tf.divide(tf.add(pos_edges_n,neg_edges_n),pos_edges_n),
edges_mask),
tf.scalar_mul(
tf.divide(tf.add(pos_edges_n,neg_edges_n),neg_edges_n),
tf.subtract(tf.ones_like(edges_mask), edges_mask)
)
)
)
# Define decision loss
GNN['loss_decision'] = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=route_exists, logits=pred_logits))
# Compute true positives, false positives, true negatives, false negatives
GNN['true_pos'] = tf.reduce_sum(tf.multiply(route_exists, tf.cast(tf.equal(route_exists, tf.round(GNN['predictions'])), tf.float32)))
GNN['false_pos'] = tf.reduce_sum(tf.multiply(route_exists, tf.cast(tf.not_equal(route_exists, tf.round(GNN['predictions'])), tf.float32)))
GNN['true_neg'] = tf.reduce_sum(tf.multiply(1-route_exists, tf.cast(tf.equal(route_exists, tf.round(GNN['predictions'])), tf.float32)))
GNN['false_neg'] = tf.reduce_sum(tf.multiply(1-route_exists, tf.cast(tf.not_equal(route_exists, tf.round(GNN['predictions'])), tf.float32)))
# Define edges accuracy
GNN['acc_edges'] = tf.reduce_mean(tf.cast(tf.equal(edges_mask, tf.round(E_prob)), tf.float32))
# Define decision accuracy
GNN['acc_decision'] = tf.reduce_mean(tf.cast(tf.equal(route_exists, tf.round(GNN['predictions'])), tf.float32))
# Define optimizer
optimizer = tf.train.AdamOptimizer(name='Adam', learning_rate=learning_rate)
# Compute cost relative to L2 normalization
vars_cost = tf.add_n([ tf.nn.l2_loss(var) for var in tf.trainable_variables() ])
# Define gradients and train step
for loss_type in ['edges','decision']:
grads, _ = tf.clip_by_global_norm(tf.gradients(GNN['loss_' + loss_type] + tf.multiply(vars_cost, l2norm_scaling),tf.trainable_variables()),global_norm_gradient_clipping_ratio)
GNN['train_step_' + loss_type] = optimizer.apply_gradients(zip(grads, tf.trainable_variables()))
#end
# Return GNN dictionary
return GNN
#end
| 39.57346 | 183 | 0.600719 | 1,128 | 8,350 | 4.219858 | 0.198582 | 0.030252 | 0.027311 | 0.027731 | 0.389286 | 0.329622 | 0.295378 | 0.236765 | 0.214076 | 0.214076 | 0 | 0.012616 | 0.278563 | 8,350 | 210 | 184 | 39.761905 | 0.776726 | 0.186108 | 0 | 0.10596 | 0 | 0 | 0.066894 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006623 | false | 0 | 0.02649 | 0 | 0.039735 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940c0b72ebd459534046c762a3ab4ce951637c18 | 1,593 | py | Python | ixnetwork_restpy/pytest_tests/tests/test_async_operation.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/pytest_tests/tests/test_async_operation.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/pytest_tests/tests/test_async_operation.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | """Tests to verify the async=True functionality of operations
"""
import pytest
from ixnetwork_restpy import TestPlatform, BadRequestError
def test_async_operation(ixnetwork):
# uncomment the following to see the full request and response
# from ixnetwork_restpy import TestPlatform
# ixnetwork.parent.parent.Trace = TestPlatform.TRACE_ALL
ixnetwork.parent.parent.Trace = TestPlatform.TRACE_INFO
# clear the configuration asynchronously
ixnetwork.info("new config executing async")
ixnetwork.NewConfig(async_operation=True)
ixnetwork.info("async code executing during new config")
response = ixnetwork.get_async_response()
ixnetwork.info("retrieved async response")
assert response is None
def test_async_operation_for_errors(ixnetwork):
# uncomment the following to see the full request and response
# from src.ixnetwork_restpy import TestPlatform
# ixnetwork.parent.parent.Trace = TestPlatform.TRACE_ALL
ixnetwork.parent.parent.Trace = TestPlatform.TRACE_INFO
# checking if async code returns proper errors or not
ixnetwork.info("traffic apply executing async")
ixnetwork.Traffic.Apply(async_operation=True)
ixnetwork.info("async code executing during traffic apply")
try:
ixnetwork.get_async_response()
except BadRequestError as e:
ixnetwork.info("retrieved async response error")
assert e.status_code == 400
assert "Error in L2/L3 Traffic Apply" in e.message
if __name__ == "__main__":
pytest.main(["-v", "-s", "--server", "localhost:11009:windows", __file__])
| 37.928571 | 78 | 0.750157 | 196 | 1,593 | 5.933673 | 0.382653 | 0.067068 | 0.072227 | 0.089424 | 0.513328 | 0.421324 | 0.421324 | 0.421324 | 0.421324 | 0.326741 | 0 | 0.00757 | 0.170747 | 1,593 | 41 | 79 | 38.853659 | 0.872824 | 0.295041 | 0 | 0.086957 | 0 | 0 | 0.233544 | 0.020739 | 0 | 0 | 0 | 0 | 0.130435 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940ddb5e6655f6d1f74858cf98a8d51ff4dc825b | 1,452 | py | Python | kodistubs/_publish_docs.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | kodistubs/_publish_docs.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | kodistubs/_publish_docs.py | ogero/Deluge-Manager-XBMC | 10c4f2a93ac1fffba01209444ba5e597036b968b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Created on: 24.02.2016
# Author: Roman Miroshnychenko aka Roman V.M. (romanvm@yandex.ua)
from __future__ import print_function
import os
from subprocess import call
gh_token = os.environ['GH_TOKEN']
repo_slug= os.environ['TRAVIS_REPO_SLUG']
gh_repo_url = 'https://{gh_token}@github.com/{repo_slug}.git'.format(gh_token=gh_token,
repo_slug=repo_slug)
devnull = open(os.devnull, 'w')
def execute(args, silent=False):
if silent:
stdout = stderr = devnull
else:
stdout = stderr = None
res = call(args, stdout=stdout, stderr=stderr)
if res:
raise RuntimeError('Call {call} returned error code {res}'.format(
call=str(args).replace(gh_token, '*****'),
res=res))
base_dir = os.path.dirname(os.path.abspath(__file__))
docs_dir = os.path.join(base_dir, 'docs')
html_dir = os.path.join(docs_dir, '_build', 'html')
os.chdir(docs_dir)
execute(['make', 'html'])
os.chdir(html_dir)
execute(['git', 'init'])
execute(['git', 'config', 'user.name', '"Roman Miroshnychenko"'])
execute(['git', 'config', 'user.email', '"romanvm@yandex.ua"'])
open('.nojekyll', 'w').close()
execute(['git', 'add', '--all', '.'])
execute(['git', 'commit', '-m' '"Updates docs"'])
execute(['git', 'push', '--force', '--quiet', gh_repo_url, 'HEAD:gh-pages'], silent=True)
print('Docs published to GitHub Pages.')
| 33.767442 | 89 | 0.631543 | 199 | 1,452 | 4.442211 | 0.467337 | 0.047511 | 0.030543 | 0.033937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007563 | 0.180441 | 1,452 | 42 | 90 | 34.571429 | 0.735294 | 0.083333 | 0 | 0 | 0 | 0 | 0.249435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.09375 | 0 | 0.125 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940ea273ac1efd7b81d8def3cd0c08c7f2a030f1 | 12,861 | py | Python | self_play_train.py | DrVecctor/GameOfGo | ab8b0313372fe380b44dd7021c00895c971b3e54 | [
"MIT"
] | null | null | null | self_play_train.py | DrVecctor/GameOfGo | ab8b0313372fe380b44dd7021c00895c971b3e54 | [
"MIT"
] | null | null | null | self_play_train.py | DrVecctor/GameOfGo | ab8b0313372fe380b44dd7021c00895c971b3e54 | [
"MIT"
] | 1 | 2020-09-11T16:49:01.000Z | 2020-09-11T16:49:01.000Z | import warnings
warnings.filterwarnings('ignore')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow.python.util.deprecation as deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import json
import argparse
import multiprocessing
import random
import shutil
import time
import tempfile
from collections import namedtuple
import h5py
import numpy as np
from dlgo import kerasutil
from dlgo import scoring
from dlgo.experience import ExperienceCollector, combine_experience, load_experience
from dlgo.agent import Agent, decode_agent
from dlgo.encoder import Encoder
from dlgo.goboard import GameState, Player, Point
from dlgo.utils import print_board
def load_agent(num, work_dir, load_args):
file_path = os.path.join(work_dir,'agent_%08d.hdf5' % num)
with h5py.File(file_path, 'r') as h5file:
agent = decode_agent(h5file)
agent.set_num_rounds(load_args.nr)
agent.set_c(load_args.c)
agent.set_concent_param(load_args.cp)
agent.set_dirichlet_weight(load_args.dw)
return agent
class GameRecord(namedtuple('GameRecord', 'moves winner margin')):
pass
def simulate_game(black_player, white_player, board_size):
moves = []
game = GameState.new_game(board_size)
agents = {
Player.black: black_player,
Player.white: white_player,
}
while not game.is_over():
next_move = agents[game.next_player].select_move(game)
moves.append(next_move)
game = game.apply_move(next_move)
print_board(game.board)
game_result = scoring.compute_game_result(game)
print(game_result)
return GameRecord(
moves=moves,
winner=game_result.winner,
margin=game_result.winning_margin
)
def get_temp_file():
fd, fname = tempfile.mkstemp(prefix='dlgo-train')
os.close(fd)
return fname
def do_self_play(args):
work_dir, board_size, agent1_num, agent2_num, num_games, experience_filename, gpu_frac, load_args = args
kerasutil.set_gpu_memory_target(gpu_frac)
random.seed(int(time.time()) + os.getpid())
np.random.seed(int(time.time()) + os.getpid())
agent1 = load_agent(agent1_num, work_dir, load_args)
agent2 = load_agent(agent2_num, work_dir, load_args)
collector1 = ExperienceCollector()
collector2 = ExperienceCollector()
ag1b = []
ag2b = []
color1 = Player.black
for i in range(num_games):
print('Simulating game %d/%d...' % (i + 1, num_games))
collector1.begin_episode()
agent1.set_collector(collector1)
collector2.begin_episode()
agent2.set_collector(collector2)
if color1 == Player.black:
black_player, white_player = agent1, agent2
else:
white_player, black_player = agent1, agent2
game_record = simulate_game(black_player, white_player, board_size)
if game_record.winner == color1:
print('Agent 1 wins.')
collector1.complete_episode(reward=game_record.margin)
collector2.complete_episode(reward=-game_record.margin)
elif game_record.winner == color1.other:
print('Agent 2 wins.')
collector1.complete_episode(reward=-game_record.margin)
collector2.complete_episode(reward=game_record.margin)
else:
print('Agents play a draw.')
collector1.complete_episode(reward=0)
collector2.complete_episode(reward=0)
if game_record.winner==Player.black:
black_score = game_record.margin
elif game_record.winner==Player.white:
black_score = -game_record.margin
else:
black_score = 0
if color1 == Player.black:
ag1b.append(black_score)
else:
ag2b.append(black_score)
color1 = color1.other
experience = combine_experience([collector1,collector2])
print('Saving experience buffer to %s\n' % experience_filename)
with h5py.File(experience_filename, 'w') as experience_outf:
experience.serialize(experience_outf)
return (ag1b, ag2b)
def generate_experience(work_dir, lear_agent, ref_agent, experience_file, num_games, board_size, num_workers, load_args):
experience_files = []
workers = []
gpu_frac = 0.95 / float(num_workers)
games_per_worker = num_games // num_workers
for i in range(num_workers):
filename = get_temp_file()
print("filename for worker %d: %s" % (i,filename))
experience_files.append(filename)
pool = multiprocessing.Pool(num_workers)
worker_args = [
(
work_dir,
board_size,
lear_agent,
ref_agent,
games_per_worker,
experience_files[_],
gpu_frac,
load_args,
)
for _ in range(num_workers)
]
game_results = pool.map(do_self_play, worker_args)
# Merge experience buffers.
print('Merging experience buffers...')
first_filename = experience_files[0]
other_filenames = experience_files[1:]
with h5py.File(first_filename, 'r') as expf:
combined_buffer = load_experience(expf)
for filename in other_filenames:
with h5py.File(filename, 'r') as expf:
next_buffer = load_experience(expf)
combined_buffer = combine_experience([combined_buffer, next_buffer])
ag1b = []
ag2b = []
for ag1b_work, ag2b_work in game_results:
ag1b += ag1b_work
ag2b += ag2b_work
stats_data = []
split_work_dir = os.path.split(os.path.abspath(work_dir))
stats_path = os.path.join(split_work_dir[0],'stats.json')
if os.path.exists(stats_path):
with open(stats_path) as infile:
stats_data = json.load(infile)
stats_data.append({
'ag_b_dir': split_work_dir[1],
'ag_b_num': lear_agent,
'ag_w_dir': split_work_dir[1],
'ag_w_num': ref_agent,
'nr_b': load_args.nr,
'nr_w': load_args.nr,
'c_b': load_args.c,
'c_w': load_args.c,
'cp_b': load_args.cp,
'cp_w': load_args.cp,
'dw_b': load_args.dw,
'dw_w': load_args.dw,
'scores': ag1b})
stats_data.append({
'ag_b_dir': split_work_dir[1],
'ag_b_num': ref_agent,
'ag_w_dir': split_work_dir[1],
'ag_w_num': lear_agent,
'nr_b': load_args.nr,
'nr_w': load_args.nr,
'c_b': load_args.c,
'c_w': load_args.c,
'cp_b': load_args.cp,
'cp_w': load_args.cp,
'dw_b': load_args.dw,
'dw_w': load_args.dw,
'scores': ag2b})
with open(stats_path,'w') as outfile:
json.dump(stats_data,outfile,indent=0)
print('Saving into %s...' % experience_file)
with h5py.File(experience_file, 'w') as experience_outf:
combined_buffer.serialize(experience_outf)
temp_komi = sum(ag1b+ag2b)/len(ag1b+ag2b)
ag1b_komi = [x - temp_komi for x in ag1b]
ag2b_komi = [x - temp_komi for x in ag2b]
ag1_wins = 0
for res in ag1b_komi:
if res>0:
ag1_wins += 1
for res in ag2b_komi:
if res<0:
ag1_wins += 1
ag1_win_rate = ag1_wins/len(ag1b+ag2b)
print('win rate of learning agent %d against reference agent %d: %f' % (lear_agent,ref_agent,ag1_win_rate))
print('required win rate: %f' % load_args.evfrac)
if ag1_win_rate>load_args.evfrac:
new_ref = lear_agent
print('agent %d is the new reference' % lear_agent)
else:
new_ref = ref_agent
print('agent %d remains the reference' % ref_agent)
ref_path = os.path.join(work_dir,'references.json')
ref_data = []
if os.path.exists(ref_path):
with open(ref_path) as infile:
ref_data = json.load(infile)
ref_data.append({
'agent': new_ref,
'new': ag1_win_rate>load_args.evfrac,
'winrate': ag1_win_rate,
'threshold': load_args.evfrac,
'meanscore': temp_komi
})
with open(ref_path,'w') as outfile:
json.dump(ref_data,outfile,indent=0)
# Clean up.
for fname in experience_files:
os.unlink(fname)
pool.close()
pool.join()
def train_worker(work_dir,lear_agent,next_agent,experience_file,lr,mo,batch_size,policy_loss_weight,epochs,load_args):
learning_agent = load_agent(lear_agent, work_dir, load_args)
with h5py.File(experience_file, 'r') as expf:
exp_buffer = load_experience(expf)
history = learning_agent.train(exp_buffer, learning_rate=lr, momentum=mo, batch_size=batch_size, policy_loss_weight=policy_loss_weight, epochs=epochs)
with h5py.File(os.path.join(work_dir,'agent_%08d.hdf5' % next_agent), 'w') as updated_agent_outf:
learning_agent.serialize(updated_agent_outf)
ag_path = os.path.join(work_dir,'agents.json')
ag_data = []
if os.path.exists(ag_path):
with open(ag_path) as infile:
ag_data = json.load(infile)
ag_data.append({
'agent': next_agent,
'lr': lr,
'mo': mo,
'bs': batch_size,
'plw': policy_loss_weight,
'ep': epochs,
'history': history
})
with open(ag_path,'w') as outfile:
json.dump(ag_data,outfile,indent=0)
def train_on_experience(work_dir,lear_agent,next_agent,experience_file,lr,mo,batch_size,policy_loss_weight,epochs,load_args):
# Do the training in the background process. Otherwise some Keras
# stuff gets initialized in the parent, and later that forks, and
# that messes with the workers.
worker = multiprocessing.Process(
target=train_worker,
args=(
work_dir,
lear_agent,
next_agent,
experience_file,
lr,
mo,
batch_size,
policy_loss_weight,
epochs,
load_args
)
)
worker.start()
worker.join()
def parse_cmds(cmdfile, parser):
global args
with open(cmdfile) as f:
cmdline=f.readline()
args = parser.parse_args(cmdline.split())
def main():
cmdparser = argparse.ArgumentParser()
cmdparser.add_argument('--work-dir')
cmdargs = cmdparser.parse_args()
cmdfile = os.path.join(cmdargs.work_dir,'options')
parser = argparse.ArgumentParser()
parser.add_argument('--games-per-worker-and-color', type=int, default=32)
parser.add_argument('--num-workers', type=int, default=8)
parser.add_argument('--nr', type=int, default=300)
parser.add_argument('--c', type=float, default=2.0)
parser.add_argument('--cp', type=float, default=0.03)
parser.add_argument('--dw', type=float, default=0.3)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--mo', type=float, default=0.9)
parser.add_argument('--bs', type=int, default=2048)
parser.add_argument('--plw', type=float, default=0.2)
parser.add_argument('--ep', type=int, default=20)
parser.add_argument('--evfrac', type=float, default=0.55)
parser.add_argument('--stop', type=int, default=0)
parse_cmds(cmdfile, parser)
with h5py.File(os.path.join(cmdargs.work_dir,'agent_00000000.hdf5'), 'r') as h5file:
board_size = int(h5file['encoder'].attrs['board_size'])
print('read the following board size: %d' % board_size)
experience_file = os.path.join(cmdargs.work_dir, 'exp_temp.hdf5')
while args.stop==0:
parse_cmds(cmdfile, parser)
ag_path = os.path.join(cmdargs.work_dir,'agents.json')
if os.path.exists(ag_path):
with open(ag_path) as infile:
ag_dat = json.load(infile)
lear_agent = ag_dat[-1]['agent']
else:
lear_agent = 0
ref_path = os.path.join(cmdargs.work_dir,'references.json')
if os.path.exists(ref_path):
with open(ref_path) as infile:
ref_dat = json.load(infile)
ref_agent = ref_dat[-1]['agent']
else:
ref_agent = 0
generate_experience(
work_dir=cmdargs.work_dir,
lear_agent=lear_agent,
ref_agent=ref_agent,
experience_file=experience_file,
num_games=2*args.games_per_worker_and_color*args.num_workers,
board_size=board_size,
num_workers=args.num_workers,
load_args=args)
train_on_experience(
work_dir=cmdargs.work_dir,
lear_agent=lear_agent,
next_agent=lear_agent+2*args.games_per_worker_and_color*args.num_workers,
experience_file=experience_file,
lr=args.lr,
mo=args.mo,
batch_size=args.bs,
policy_loss_weight=args.plw,
epochs=args.ep,
load_args=args)
parse_cmds(cmdfile, parser)
if __name__ == '__main__':
main()
| 32.892583 | 154 | 0.643807 | 1,718 | 12,861 | 4.553551 | 0.16007 | 0.036815 | 0.02825 | 0.010738 | 0.288892 | 0.23188 | 0.205803 | 0.167455 | 0.149048 | 0.149048 | 0 | 0.016622 | 0.24687 | 12,861 | 390 | 155 | 32.976923 | 0.791039 | 0.015007 | 0 | 0.187311 | 0 | 0 | 0.069894 | 0.002211 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02719 | false | 0.003021 | 0.060423 | 0 | 0.102719 | 0.048338 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
940f45a74fbe5732957c0a584ead0ec8af62f5f8 | 1,618 | py | Python | mooc_ex/xbaoItemPrice.py | ds17/reptiles | 99418624ae4b7548bf4dc1ea834e8c75a47a0557 | [
"Apache-2.0"
] | null | null | null | mooc_ex/xbaoItemPrice.py | ds17/reptiles | 99418624ae4b7548bf4dc1ea834e8c75a47a0557 | [
"Apache-2.0"
] | null | null | null | mooc_ex/xbaoItemPrice.py | ds17/reptiles | 99418624ae4b7548bf4dc1ea834e8c75a47a0557 | [
"Apache-2.0"
] | 1 | 2021-02-20T13:17:42.000Z | 2021-02-20T13:17:42.000Z | #D:\Python\Python35\python.exe
# -*- coding:utf-8 -*-
'淘宝商品价格定向爬虫'
import requests,csv,re,os,time
i=1 #全局计数变量
def getHTMLtext(url):
try:
r=requests.get(url)
r.encoding=r.apparent_encoding
return r.text
except:
print('获取页面异常')
def parsePage(ilt,html):
# info=re.findall(r'raw_title":"(.*?)",.*?,"view_price":"(.*?)".*?"view_sales":"(.*?)人付款","comment_count":"(.*?)"',html) #
# for item in info:
# ilt.append(list(item))
# print(ilt)
global i
info=re.finditer(r'raw_title":"(.*?)",.*?,"view_price":"(.*?)".*?"view_sales":"(.*?)人付款","comment_count":"(.*?)"',html) #
for item in info:
ilt.append([i,item.group(1),item.group(2),item.group(3),item.group(4)])
i+=1
# view_price=re.findall(r'"view_price":"(.*?)"',html)
# view_sales=re.findall(r'"view_sales":"(.*?)"',html)
def write2csv(item,fpath):
with open(fpath,'a+',newline='\n') as csvfile:
writer=csv.writer(csvfile)
# for m in range(len(ilt)):
# for i in range(len(ilt[m])):
# writer.writerow(ilt[m][i])
writer.writerow(item)
csvfile.close()
def main():
kw = '收纳盒'
url='https://s.taobao.com/search?q='+kw
ilt=[[kw],['No.','名称','价格','付款人数','评论数']]
page=1
csv_path=os.path.join(os.getcwd(),'xbaoItem.csv')
for i in range(page):
html=getHTMLtext(url+'&s='+str(44*i))
parsePage(ilt,html)
time.sleep(5)
print(i)
for item in ilt:
write2csv(item,csv_path)
ilt.clear() #降低内存占用
if __name__=='__main__':
main()
| 26.52459 | 128 | 0.549444 | 222 | 1,618 | 3.900901 | 0.432432 | 0.04157 | 0.034642 | 0.030023 | 0.157044 | 0.157044 | 0.157044 | 0.157044 | 0.157044 | 0.157044 | 0 | 0.011933 | 0.223115 | 1,618 | 60 | 129 | 26.966667 | 0.677009 | 0.277503 | 0 | 0 | 0 | 0 | 0.157082 | 0.079828 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.027027 | 0 | 0.162162 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9411e487e97666c6453fbbd22292c525e1819e7a | 2,359 | py | Python | appCore/apps/replica/cms/views/template.py | jadedgamer/alifewellplayed.com | b7b3dee8d3b9526c7cfe77078570a29394ef7e76 | [
"MIT"
] | 4 | 2017-04-22T11:03:01.000Z | 2018-01-16T22:28:15.000Z | appCore/apps/replica/cms/views/template.py | alifewellplayed/alifewellplayed.com | b7b3dee8d3b9526c7cfe77078570a29394ef7e76 | [
"MIT"
] | 10 | 2017-04-06T19:54:42.000Z | 2017-11-07T06:53:10.000Z | appCore/apps/replica/cms/views/template.py | alifewellplayed/alifewellplayed.com | b7b3dee8d3b9526c7cfe77078570a29394ef7e76 | [
"MIT"
] | 1 | 2017-12-14T12:49:40.000Z | 2017-12-14T12:49:40.000Z | from django.conf import settings
from django.template import RequestContext
from django.shortcuts import render_to_response, render, get_object_or_404, redirect
from django.views.decorators.cache import cache_page
from django.views.generic.list import ListView
from django.contrib import messages
from replica import settings as ReplicaSettings
from replica.pulse.models import CodeBlock
from replica.cms.forms import CodeBlockModelForm
#Get list of all templates
class CodeBlockList(ListView):
paginate_by = ReplicaSettings.PAGINATE
template_name = 'replica/cms/template_List.html'
def get_queryset(self):
return CodeBlock.objects.all()
def get_context_data(self, **kwargs):
context = super(CodeBlockList, self).get_context_data(**kwargs)
instance = CodeBlock(user=self.request.user)
f = CodeBlockModelForm(instance=instance)
context.update({'title':'Temlate List', 'is_list':True, 'form': f})
return context
def CodeBlockEdit(request, templateID=None):
if templateID:
template = get_object_or_404(CodeBlock, pk=templateID)
instance = template
edit = True
msg = 'Template updated.'
obj_title = "Editing template: {}".format(template.title)
else:
template = None
instance = CodeBlock(user=request.user)
edit = False
msg = 'New template created.'
obj_title = 'New Template'
if request.method == 'POST':
f = CodeBlockModelForm(request.POST or None, request.FILES, instance=instance)
if f.is_valid():
f.save()
messages.info(request, msg)
return redirect('ReplicaAdmin:TemplateEdit', templateID=instance.id)
else:
f = CodeBlockModelForm(instance=instance)
variables = {
'form': f,
'obj': template,
'content_type': 'Template',
'edit':edit,
'obj_title':obj_title,
}
template = 'replica/cms/template_Edit.html'
return render(request, template, variables)
def CodeBlockDelete(request, templateID):
t = get_object_or_404(CodeBlock, pk=templateID)
if request.method == 'POST':
t.delete()
return redirect('ReplicaAdmin:TemplateList')
template = 'replica/cms/shared/delete-confirm.html'
variables = {'obj': t, 'content_type': 'Template'}
return render(request, template, variables)
| 36.859375 | 86 | 0.694786 | 272 | 2,359 | 5.919118 | 0.349265 | 0.037267 | 0.020497 | 0.026087 | 0.088199 | 0.043478 | 0.043478 | 0 | 0 | 0 | 0 | 0.004787 | 0.203052 | 2,359 | 63 | 87 | 37.444444 | 0.851596 | 0.010598 | 0 | 0.137931 | 0 | 0 | 0.135877 | 0.063438 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.155172 | 0.017241 | 0.37931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9411f20ddc93846bc9ba0efb924f787c448b3493 | 1,844 | py | Python | cogs.py | Paarf/BoxBot | da323ddf30e368bed077ef75f843eb6e43902bd3 | [
"MIT"
] | null | null | null | cogs.py | Paarf/BoxBot | da323ddf30e368bed077ef75f843eb6e43902bd3 | [
"MIT"
] | null | null | null | cogs.py | Paarf/BoxBot | da323ddf30e368bed077ef75f843eb6e43902bd3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import discord
from discord.ext import commands
class Commands:
def __init__(self, bot):
self.bot = bot
# you can use events here too
async def on_message(self, msg):
print(msg.content)
# or commands like this
@commands.command()
async def a(self, ctx):
await ctx.send("b")
@commands.command()
async def hello(self,ctx):
await ctx.send(f"hello {ctx.author.mention} <:301100937659809792:318009894579994624>")
@commands.command()
async def ping(self,ctx):
await ctx.send(f"{ctx.author.mention}")
@commands.command()
async def length(self,ctx):
await ctx.send(f"The message length is {len(ctx.message.content) - 29}")
@commands.command()
async def info(self,ctx, member : discord.Member = None):
if member is None:
member = ctx.message.author
embed = discord.Embed(title="User Info", colour=member.colour,description="General info about a Discord Account")
embed.set_image(url=member.avatar_url)
embed.set_thumbnail(url=member.avatar_url)
embed.set_author(name=str(member), url="https://discordapp.com", icon_url=member.avatar_url)
embed.set_footer(text=discord.Guild.name, icon_url=member.avatar_url)
embed.add_field(name="Nitro Account", value="TBD")
embed.add_field(name="Bot Account", value="test")
embed.add_field(name="Displayed Name", value=str(member))
embed.add_field(name="<:thonkang:325666093161250816>", value="these last two", inline=True)
embed.add_field(name="<:thonkang:325666093161250816>", value="are inline fields", inline=True)
await ctx.send(content="User Info", embed=embed)
def setup(bot):
bot.add_cog(Commands(bot))
| 34.148148 | 122 | 0.646421 | 241 | 1,844 | 4.858921 | 0.373444 | 0.040991 | 0.085397 | 0.098207 | 0.242528 | 0.226302 | 0.081981 | 0 | 0 | 0 | 0 | 0.052338 | 0.222885 | 1,844 | 53 | 123 | 34.792453 | 0.764829 | 0.038503 | 0 | 0.138889 | 0 | 0 | 0.205711 | 0.072844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.138889 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
941744eaeedb8147966109fc40c6847481dcda99 | 13,358 | py | Python | quantlib/test/test_hybridhestonhullwhite_process.py | yuyingfeng/pyql | ceb838581ad4db73a0208bc51bde2771bb534e5f | [
"BSD-3-Clause"
] | null | null | null | quantlib/test/test_hybridhestonhullwhite_process.py | yuyingfeng/pyql | ceb838581ad4db73a0208bc51bde2771bb534e5f | [
"BSD-3-Clause"
] | null | null | null | quantlib/test/test_hybridhestonhullwhite_process.py | yuyingfeng/pyql | ceb838581ad4db73a0208bc51bde2771bb534e5f | [
"BSD-3-Clause"
] | 2 | 2016-08-24T20:56:14.000Z | 2022-01-03T05:58:42.000Z | from __future__ import division
from __future__ import print_function
from .unittest_tools import unittest
import numpy as np
from quantlib.settings import Settings
from quantlib.instruments.option import (
EuropeanExercise)
from quantlib.instruments.payoffs import PAYOFF_TO_STR
from quantlib.models.shortrate.onefactormodels.hullwhite import HullWhite
from quantlib.instruments.option import VanillaOption
from quantlib.time.api import (today, Years, Actual365Fixed,
Period, May, Date,
NullCalendar)
from quantlib.processes.api import (BlackScholesMertonProcess,
HestonProcess,
HullWhiteProcess)
from quantlib.models.equity.heston_model import (
HestonModel)
from quantlib.termstructures.yields.api import ZeroCurve, FlatForward
from quantlib.termstructures.volatility.api import BlackConstantVol
from quantlib.pricingengines.api import (
AnalyticEuropeanEngine,
AnalyticBSMHullWhiteEngine,
AnalyticHestonEngine,
AnalyticHestonHullWhiteEngine,
FdHestonHullWhiteVanillaEngine)
from quantlib.quotes import SimpleQuote
from quantlib.instruments.payoffs import (
PlainVanillaPayoff, Put, Call)
from quantlib.methods.finitedifferences.solvers.fdmbackwardsolver import FdmSchemeDesc
def flat_rate(today, forward, daycounter):
return FlatForward(
reference_date=today,
forward=SimpleQuote(forward),
daycounter=daycounter
)
class HybridHestonHullWhiteProcessTestCase(unittest.TestCase):
def setUp(self):
self.settings = Settings()
self.calendar = NullCalendar()
self.todays_date = Date(15, May, 1998)
self.settlement_date = Date(17, May, 1998)
self.settings.evaluation_date = self.todays_date
# options parameters
self.dividend_yield = 0.00
self.risk_free_rate = 0.06
self.volatility = 0.25
self.spot = SimpleQuote(100)
self.maturity = Date(17, May, 1999)
self.daycounter = Actual365Fixed()
self.tol = 1e-2
# bootstrap the yield/dividend/vol curves
dates = [self.settlement_date] + \
[self.settlement_date + Period(i + 1, Years)
for i in range(40)]
rates = [0.01] + \
[0.01 + 0.0002 * np.exp(np.sin(i / 4.0)) for i in range(40)]
divRates = [0.02] + \
[0.02 + 0.0001 * np.exp(np.sin(i / 5.0)) for i in range(40)]
self.r_ts = ZeroCurve(dates, rates, self.daycounter)
self.q_ts = ZeroCurve(dates, divRates, self.daycounter)
self.vol_ts = BlackConstantVol(
self.settlement_date,
self.calendar,
self.volatility,
self.daycounter
)
self.black_scholes_merton_process = BlackScholesMertonProcess(
self.spot,
self.q_ts,
self.r_ts,
self.vol_ts
)
self.dates = dates
def test_bsm_hw(self):
print("Testing European option pricing for a BSM process" +
" with one-factor Hull-White model...")
dc = Actual365Fixed()
todays_date = today()
maturity_date = todays_date + Period(20, Years)
settings = Settings()
settings.evaluation_date = todays_date
spot = SimpleQuote(100)
q_ts = flat_rate(todays_date, 0.04, dc)
r_ts = flat_rate(todays_date, 0.0525, dc)
vol_ts = BlackConstantVol(todays_date, NullCalendar(), 0.25, dc)
hullWhiteModel = HullWhite(r_ts, 0.00883, 0.00526)
bsm_process = BlackScholesMertonProcess(spot, q_ts,
r_ts, vol_ts)
exercise = EuropeanExercise(maturity_date)
fwd = spot.value * q_ts.discount(maturity_date) / \
r_ts.discount(maturity_date)
payoff = PlainVanillaPayoff(Call, fwd)
option = VanillaOption(payoff, exercise)
tol = 1e-8
corr = [-0.75, -0.25, 0.0, 0.25, 0.75]
expectedVol = [0.217064577, 0.243995801, 0.256402830,
0.268236596, 0.290461343]
for c, v in zip(corr, expectedVol):
bsm_hw_engine = AnalyticBSMHullWhiteEngine(c, bsm_process,
hullWhiteModel)
option = VanillaOption(payoff, exercise)
option.set_pricing_engine(bsm_hw_engine)
npv = option.npv
compVolTS = BlackConstantVol(todays_date, NullCalendar(),
v, dc)
bs_process = BlackScholesMertonProcess(spot, q_ts,
r_ts, compVolTS)
bsEngine = AnalyticEuropeanEngine(bs_process)
comp = VanillaOption(payoff, exercise)
comp.set_pricing_engine(bsEngine)
impliedVol = comp.implied_volatility(npv, bs_process,
1e-10, 500,
min_vol=0.1,
max_vol=0.4)
if (abs(impliedVol - v) > tol):
print("Failed to reproduce implied volatility cor: %f" % c)
print("calculated: %f" % impliedVol)
print("expected : %f" % v)
if abs((comp.npv - npv) / npv) > tol:
print("Failed to reproduce NPV")
print("calculated: %f" % comp.npv)
print("expected : %f" % npv)
self.assertAlmostEqual(impliedVol, v, delta=tol)
self.assertAlmostEqual(comp.npv / npv, 1, delta=tol)
def test_compare_bsm_bsmhw_hestonhw(self):
dc = Actual365Fixed()
todays_date = today()
settings = Settings()
settings.evaluation_date = todays_date
tol = 1.e-2
spot = SimpleQuote(100)
dates = [todays_date + Period(i, Years) for i in range(40)]
rates = [0.01 + 0.0002 * np.exp(np.sin(i / 4.0)) for i in range(40)]
divRates = [0.02 + 0.0001 * np.exp(np.sin(i / 5.0)) for i in range(40)]
s0 = SimpleQuote(100)
r_ts = ZeroCurve(dates, rates, dc)
q_ts = ZeroCurve(dates, divRates, dc)
vol = SimpleQuote(0.25)
vol_ts = BlackConstantVol(
todays_date,
NullCalendar(),
vol.value, dc)
bsm_process = BlackScholesMertonProcess(
spot, q_ts, r_ts, vol_ts)
payoff = PlainVanillaPayoff(Call, 100)
exercise = EuropeanExercise(dates[1])
option = VanillaOption(payoff, exercise)
analytic_european_engine = AnalyticEuropeanEngine(bsm_process)
option.set_pricing_engine(analytic_european_engine)
npv_bsm = option.npv
variance = vol.value * vol.value
hestonProcess = HestonProcess(
risk_free_rate_ts=r_ts,
dividend_ts=q_ts,
s0=s0,
v0=variance,
kappa=5.0,
theta=variance,
sigma=1e-4,
rho=0.0)
hestonModel = HestonModel(hestonProcess)
hullWhiteModel = HullWhite(r_ts, a=0.01, sigma=0.01)
bsmhwEngine = AnalyticBSMHullWhiteEngine(
0.0, bsm_process, hullWhiteModel)
hestonHwEngine = AnalyticHestonHullWhiteEngine(
hestonModel, hullWhiteModel, 128)
hestonEngine = AnalyticHestonEngine(hestonModel, 144)
option.set_pricing_engine(hestonEngine)
npv_heston = option.npv
option.set_pricing_engine(bsmhwEngine)
npv_bsmhw = option.npv
option.set_pricing_engine(hestonHwEngine)
npv_hestonhw = option.npv
print("calculated with BSM: %f" % npv_bsm)
print("BSM-HW: %f" % npv_bsmhw)
print("Heston: %f" % npv_heston)
print("Heston-HW: %f" % npv_hestonhw)
self.assertAlmostEqual(npv_bsm, npv_bsmhw, delta=tol)
self.assertAlmostEqual(npv_bsm, npv_hestonhw, delta=tol)
def test_compare_BsmHW_HestonHW(self):
"""
From Quantlib test suite
"""
print("Comparing European option pricing for a BSM " +
"process with one-factor Hull-White model...")
dc = Actual365Fixed()
todays_date = today()
settings = Settings()
settings.evaluation_date = todays_date
tol = 1.e-2
spot = SimpleQuote(100)
dates = [todays_date + Period(i, Years) for i in range(40)]
rates = [0.01 + 0.0002 * np.exp(np.sin(i / 4.0)) for i in range(40)]
divRates = [0.02 + 0.0001 * np.exp(np.sin(i / 5.0)) for i in range(40)]
s0 = SimpleQuote(100)
r_ts = ZeroCurve(dates, rates, dc)
q_ts = ZeroCurve(dates, divRates, dc)
vol = SimpleQuote(0.25)
vol_ts = BlackConstantVol(
todays_date,
NullCalendar(),
vol.value, dc)
bsm_process = BlackScholesMertonProcess(
spot, q_ts, r_ts, vol_ts)
variance = vol.value * vol.value
hestonProcess = HestonProcess(
risk_free_rate_ts=r_ts,
dividend_ts=q_ts,
s0=s0,
v0=variance,
kappa=5.0,
theta=variance,
sigma=1e-4,
rho=0.0)
hestonModel = HestonModel(hestonProcess)
hullWhiteModel = HullWhite(r_ts, a=0.01, sigma=0.01)
bsmhwEngine = AnalyticBSMHullWhiteEngine(
0.0, bsm_process, hullWhiteModel)
hestonHwEngine = AnalyticHestonHullWhiteEngine(
hestonModel, hullWhiteModel, 128)
tol = 1e-5
strikes = [0.25, 0.5, 0.75, 0.8, 0.9,
1.0, 1.1, 1.2, 1.5, 2.0, 4.0]
maturities = [1, 2, 3, 5, 10, 15, 20, 25, 30]
types = [Put, Call]
for type in types:
for strike in strikes:
for maturity in maturities:
maturity_date = todays_date + Period(maturity, Years)
exercise = EuropeanExercise(maturity_date)
fwd = strike * s0.value * \
q_ts.discount(maturity_date) / \
r_ts.discount(maturity_date)
payoff = PlainVanillaPayoff(type, fwd)
option = VanillaOption(payoff, exercise)
option.set_pricing_engine(bsmhwEngine)
calculated = option.npv
option.set_pricing_engine(hestonHwEngine)
expected = option.npv
if ((np.abs(expected - calculated) > calculated * tol) and
(np.abs(expected - calculated) > tol)):
cp = PAYOFF_TO_STR[type]
print("Failed to reproduce npv")
print("strike : %f" % strike)
print("maturity : %d" % maturity)
print("type : %s" % cp)
self.assertAlmostEqual(expected, calculated,
delta=tol)
@unittest.skip("skipping very long zanette test...")
def test_zanette(self):
"""
From paper by A. Zanette et al.
"""
dc = Actual365Fixed()
todays_date = today()
settings = Settings()
settings.evaluation_date = todays_date
# constant yield and div curves
dates = [todays_date + Period(i, Years) for i in range(3)]
rates = [0.04 for i in range(3)]
divRates = [0.03 for i in range(3)]
r_ts = ZeroCurve(dates, rates, dc)
q_ts = ZeroCurve(dates, divRates, dc)
s0 = SimpleQuote(100)
# Heston model
v0 = .1
kappa_v = 2
theta_v = 0.1
sigma_v = 0.3
rho_sv = -0.5
hestonProcess = HestonProcess(
risk_free_rate_ts=r_ts,
dividend_ts=q_ts,
s0=s0,
v0=v0,
kappa=kappa_v,
theta=theta_v,
sigma=sigma_v,
rho=rho_sv)
hestonModel = HestonModel(hestonProcess)
# Hull-White
kappa_r = 1
sigma_r = .2
hullWhiteProcess = HullWhiteProcess(r_ts, a=kappa_r, sigma=sigma_r)
strike = 100
maturity = 1
type = Call
maturity_date = todays_date + Period(maturity, Years)
exercise = EuropeanExercise(maturity_date)
payoff = PlainVanillaPayoff(type, strike)
option = VanillaOption(payoff, exercise)
def price_cal(rho, tGrid):
fd_hestonHwEngine = FdHestonHullWhiteVanillaEngine(
hestonModel,
hullWhiteProcess,
rho,
tGrid, 100, 40, 20, 0, True, FdmSchemeDesc.Hundsdorfer())
option.set_pricing_engine(fd_hestonHwEngine)
return option.npv
calc_price = []
for rho in [-0.5, 0, .5]:
for tGrid in [50, 100, 150, 200]:
tmp = price_cal(rho, tGrid)
print("rho (S,r): %f Ns: %d Price: %f" %
(rho, tGrid, tmp))
calc_price.append(tmp)
expected_price = [11.38, ] * 4 + [12.79, ] * 4 + [14.06, ] * 4
np.testing.assert_almost_equal(calc_price, expected_price, 2)
| 30.778802 | 86 | 0.564381 | 1,435 | 13,358 | 5.111498 | 0.174913 | 0.029993 | 0.009816 | 0.017996 | 0.451125 | 0.381731 | 0.357737 | 0.333197 | 0.3182 | 0.3182 | 0 | 0.048304 | 0.344438 | 13,358 | 433 | 87 | 30.849885 | 0.789311 | 0.012726 | 0 | 0.345638 | 0 | 0 | 0.03669 | 0 | 0 | 0 | 0 | 0 | 0.020134 | 1 | 0.02349 | false | 0 | 0.060403 | 0.003356 | 0.09396 | 0.060403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9419dfff89fc658e4d5b9784d855175327b46249 | 13,470 | py | Python | microutil/napari_wrappers.py | Hekstra-Lab/microutil | ab3b7b51754bf90ef35d6eea1c7b35cece638f0e | [
"BSD-3-Clause"
] | 2 | 2021-07-06T05:31:51.000Z | 2021-10-05T13:29:59.000Z | microutil/napari_wrappers.py | Hekstra-Lab/microutil | ab3b7b51754bf90ef35d6eea1c7b35cece638f0e | [
"BSD-3-Clause"
] | 16 | 2021-02-08T22:36:42.000Z | 2021-11-01T22:36:02.000Z | microutil/napari_wrappers.py | Hekstra-Lab/microutil | ab3b7b51754bf90ef35d6eea1c7b35cece638f0e | [
"BSD-3-Clause"
] | null | null | null | __all__ = [
"manual_segmentation",
"correct_watershed",
"correct_decreasing_cell_frames",
]
import warnings
import numpy as np
import xarray as xr
from .array_utils import axis2int
from .segmentation import (
napari_points_to_peak_mask,
peak_mask_to_napari_points,
watershed_single_frame_preseeded,
)
from .track_utils import find_bad_frames
try:
import napari
except ImportError:
napari = None
warnings.warn(
"Could not import napari. The function *manual_segmentation* will fail if you call it",
stacklevel=3,
)
def scroll_time(viewer, time_axis=1):
def scroll_callback(layer, event):
modifiers = [key.name for key in event.modifiers]
if "Shift" in modifiers:
new = list(viewer.dims.current_step)
# get the max time
max_time = viewer.dims.range[time_axis][1]
# event.delta is (float, float) for horizontal and vertical scroll
# on linux shift-scroll gives vertical
# but on mac it gives horizontal. So just take the max and hope
# for the best
if max(event.delta) > 0:
if new[time_axis] < max_time:
new[time_axis] += 1
else:
if new[time_axis] > 0:
new[time_axis] -= 1
viewer.dims.current_step = new
viewer.mouse_wheel_callbacks.append(scroll_callback)
def apply_label_keybinds(labels):
@labels.bind_key("q")
def paint_mode(viewer): # noqa: F811
labels.mode = "erase"
@labels.bind_key("w")
def paint_mode(viewer): # noqa: F811
labels.mode = "fill"
@labels.bind_key("s")
def paint_mode(viewer): # noqa: F811
labels.selected_label = 0
labels.mode = "fill"
@labels.bind_key("e")
def paint_mode(viewer): # noqa: F811
labels.mode = "paint"
@labels.bind_key("r")
def paint_mode(viewer): # noqa: F811
labels.mode = "pick"
@labels.bind_key("t")
def new_cell(viewer): # noqa: F811
labels.selected_label = labels.data.max() + 1
def scroll_callback(layer, event):
if len(event.modifiers) == 0 and labels.mode in ["paint", "erase"]:
if event.delta[1] > 0:
labels.brush_size += 1
else:
labels.brush_size = max(labels.brush_size - 1, 1)
labels.mouse_wheel_callbacks.append(scroll_callback)
def apply_points_keybinds(points):
@points.bind_key('q')
def remove_selected(layer):
points.remove_selected()
@points.bind_key('w')
def add_mode(layer):
points.mode = 'add'
@points.bind_key('e')
def select_mode(layer):
points.mode = 'select'
def manual_segmentation(img, mask=None, time_axis='T'):
"""
Open up Napari set up for manual segmentation. Adds these custom keybindings:
q : erase
w : fill
e : paint
r : pick
t : create new label
s : fill with background
scroll : modify brush size when in paint mode
shift + Scroll : scrub through time points
Parameters
----------
img : image-like
Last to dims should be XY. You probably want this to be a BF image.
mask : array-like or None
If array-like it should be broadcastable to the same dims as *img*
time_axis : str or int or None, default: 'T'
Which axis to treat as the time axis for shift-scroll.
If None or a string when img is an xarray then the first axis will be used.
Returns
-------
mask :
The mask that was updated by user interactions
"""
if napari is None:
raise ImportError("You must install Napari in order to use this function.")
if mask is None:
# needs to be numpy as all other options do not seem to work
# see https://github.com/napari/napari/issues/2190
mask = np.zeros_like(img, dtype=np.int)
# if isinstance(img, np.ndarray):
# mask = np.zeros_like(img,dtype=np.int)
# elif isinstance(img, xr.DataArray):
# mask = nr.zeros_like(img, dtype=np.int)
# elif isinstance(img, da.Array):
# mask = np.zeros_like(img)
elif not isinstance(mask, np.ndarray):
print("casting mask to numpy array")
print("see https://github.com/napari/napari/issues/2190 for details")
mask = np.array(mask)
time_axis = axis2int(img, axis=time_axis, fallthrough=0)
# create the viewer and add the cells image
viewer = napari.view_image(img, name="cells")
# add the labels
labels = viewer.add_labels(mask, name="segmentation")
# Add more keybinds for better ergonomics
apply_label_keybinds(labels)
scroll_time(viewer, time_axis=time_axis)
napari.run()
if isinstance(img, xr.DataArray):
return xr.DataArray(labels.data, coords=img.coords, dims=img.dims)
else:
return labels.data
def correct_watershed(ds):
"""
Manually correct parts of an image with a bad watershed.
This will modify the 'peak_mask' and 'labels' variables of the Dataset inplace.
Keybindings:
2 : toggle between mask and labels
3 : toggle between controlling mask/labels or points
4 : Toggle visibility of mask+labels on and off
5 : Toggle whether painting paints through all time points
Control-l : rereun current frame's watershed
Control-Shift-l : rereun watershed for all the frames
Shift + Scroll : scrub through time points
Point Layer Keybindings:
q : delete selected points
w : Switch to `Add Points` mode
e : Switch to `Select` mode
Mask/Labels layer Keybindngs:
q : erase
w : fill
e : paint
r : pick
t : create new label
s : fill with background
Scroll : modify brush size when in paint mode
Parameters
----------
ds : (S, T, ... , Y, X) xarray dataset
"""
viewer = napari.view_image(ds["images"].sel(C="BF"))
labels = viewer.add_labels(ds["labels"].values, name="labels", visible=False)
mask = viewer.add_labels(ds["mask"].values, name="mask", visible=True)
points = viewer.add_points(peak_mask_to_napari_points(ds['peak_mask']), size=1)
apply_label_keybinds(labels)
apply_label_keybinds(mask)
scroll_time(viewer, time_axis=1)
apply_points_keybinds(points)
through_time = False
def setup_through_time(layer):
old_paint = layer.paint
# old_fill = layer.fill
def paint_through_time(coord, new_label, refresh=True):
if through_time:
for i in range(labels.data.shape[1]):
c = list(coord)
c[1] = i
old_paint(c, new_label, refresh)
else:
old_paint(coord, new_label, refresh)
# doesn't work because napari fill only allows filling the currently
# viewed slice
# https://github.com/napari/napari/blob/402771b0a331a62a891fd0a08c2f698424d51633/napari/layers/labels/labels.py#L809-L816
# def fill_through_time(coord, new_label, refresh=True):
# print('here??')
# if through_time:
# print('here2')
# for i in range(labels.data.shape[1]):
# c = list(coord)
# c[1] = i
# print(c)
# old_fill(c, new_label, refresh)
# else:
# old_fill(coord, new_label, refresh)
layer.paint = paint_through_time
# layer.fill = fill_through_time
setup_through_time(labels)
setup_through_time(mask)
def toggle_masks(*args):
if mask.visible and labels.visible:
# ugh - I guess set the labels to visible
labels_and_points()
elif mask.visible:
labels_and_points()
else:
mask_and_points()
if viewer.active_layer in [mask, labels]:
set_correct_active_labels()
def mask_and_points(*args):
mask.visible = True
labels.visible = False
def labels_and_points(*args):
mask.visible = False
labels.visible = True
layer_arr = np.array([labels, mask])
def set_correct_active_labels():
"""If a labels layer is active make sure it is the correct one"""
viewer.layers.unselect_all()
new_layer = layer_arr[[labels.visible, mask.visible]][0]
# using seleccted instead of active layer due to
# https://github.com/napari/napari/issues/2390
new_layer.selected = True
def toggle_points_vs_labels(viewer):
if viewer.active_layer == points:
set_correct_active_labels()
else:
viewer.layers.unselect_all()
points.selected = True
def gogogo(viewer):
labels_and_points()
S, T = viewer.dims.current_step[:2]
ds['peak_mask'][S, T] = napari_points_to_peak_mask(
points.data, (ds.dims['Y'], ds.dims['X']), S, T
)
watershed_single_frame_preseeded(ds, S, T)
labels.data = ds['labels'].values
def gogogo_all(viewer):
labels_and_points()
for S in range(ds.dims['S']):
for T in range(ds.dims['T']):
ds['peak_mask'][S, T] = napari_points_to_peak_mask(
points.data, (ds.dims['Y'], ds.dims['X']), S, T
)
watershed_single_frame_preseeded(ds, S, T)
labels.data = ds['labels'].values
_lastmask = mask
def toggle_bf_mask(viewer):
nonlocal _lastmask
if mask.visible or labels.visible:
if mask.visible:
_lastmask = mask
mask.visible = False
if labels.visible:
_lastmask = labels
labels.visible = False
else:
_lastmask.visible = True
set_correct_active_labels()
def toggle_through_time(*args):
nonlocal through_time
through_time = not through_time
viewer.bind_key("2", toggle_masks)
viewer.bind_key("3", toggle_points_vs_labels)
viewer.bind_key("4", toggle_bf_mask)
viewer.bind_key("5", toggle_through_time)
viewer.bind_key("Control-l", gogogo)
viewer.bind_key("Control-Shift-l", gogogo_all)
def correct_decreasing_cell_frames(ds, bad_frames=None, extra_labels=None):
"""
Show only the pairs of frames for which cell number decreasing.
This will modify *ds['labels']* in place when closed or when `ctrl-shift-d` pressed.
Controls:
Labels editing the same as always
Control-Shift-d : check the values and change what is displayed to the problem frames.
Parameters
----------
ds : (S, T, ..., Y, X) Dataset
bad_frames : list of tuple of int, optional
If *None*, then `find_bad_frames` will be used
extra_labels : str or list of strings
Other channels in the dataset to view. Will be added
as a napari label layer so should probably be binary images.
Returns
-------
viewer : Napari viewer object
"""
if extra_labels is not None:
if isinstance(extra_labels, str):
extra_labels = [extra_labels]
else:
extra_labels = []
def gen_data(bad_frames=None):
if bad_frames is None:
bad_frames = find_bad_frames(ds)
s_idx = []
t_idx = []
for i in bad_frames:
s_idx.extend([i[0], i[0]])
t_idx.extend([i[1] - 1, i[1]])
BF = (
ds['images']
.sel(C='BF')
.values[:][tuple(s_idx), tuple(t_idx)]
.reshape(len(t_idx) // 2, 2, *ds['labels'].shape[-2:])
)
indiv = (
ds['labels']
.values[:][tuple(s_idx), tuple(t_idx)]
.reshape(len(t_idx) // 2, 2, *ds['labels'].shape[-2:])
)
other_layers = [
(
ds[other]
.values[:][tuple(s_idx), tuple(t_idx)]
.reshape(len(t_idx) // 2, 2, *ds[other].shape[-2:])
)
for other in extra_labels
]
return BF, indiv, other_layers, s_idx, t_idx
def reassign():
"""
Because the reshape of the original values makes a copy rather than a view :(
so standard in place editing doesn't work
"""
ds['labels'].values[:][tuple(s_idx), tuple(t_idx)] = labels.data.reshape(
len(t_idx), *labels.data.shape[-2:]
)
def check_all(*args):
reassign()
nonlocal t_idx, s_idx
BF, indiv, other_layers, s_idx, t_idx = gen_data(None)
image.data = BF
for data, layer in zip(other_layers, others):
layer.data = data
labels.data = indiv
BF, indiv, other_layers, s_idx, t_idx = gen_data(bad_frames)
viewer = napari.Viewer()
image = viewer.add_image(BF)
labels = viewer.add_labels(indiv)
others = [viewer.add_labels(other) for other in other_layers]
viewer.layers.unselect_all()
labels.selected = True
apply_label_keybinds(labels)
scroll_time(viewer, time_axis=1)
viewer.bind_key('Control-Shift-d', check_all)
def on_close(*args, **kwargs):
reassign()
# this on_close may not work in the future. See discussion on zulip
# https://napari.zulipchat.com/#narrow/stream/212875-general/topic/on-close/near/230088585
viewer.window._qt_window.destroyed.connect(on_close)
return viewer
| 31.619718 | 129 | 0.607275 | 1,791 | 13,470 | 4.40536 | 0.18593 | 0.023701 | 0.011534 | 0.015209 | 0.279848 | 0.225349 | 0.1782 | 0.165273 | 0.117617 | 0.090494 | 0 | 0.013799 | 0.289829 | 13,470 | 425 | 130 | 31.694118 | 0.810997 | 0.2902 | 0 | 0.219917 | 0 | 0 | 0.055682 | 0.005546 | 0 | 0 | 0 | 0 | 0 | 1 | 0.13278 | false | 0 | 0.041494 | 0 | 0.190871 | 0.008299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
941bf67ace09cea1fa8be060142fbc460a1ab4a9 | 1,433 | py | Python | pypy/module/posix/app_startfile.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2021-06-02T23:02:09.000Z | 2021-06-02T23:02:09.000Z | pypy/module/posix/app_startfile.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2021-03-30T18:08:41.000Z | 2021-03-30T18:08:41.000Z | pypy/module/posix/app_startfile.py | olliemath/pypy | 8b873bd0b8bf76075aba3d915c260789f26f5788 | [
"Apache-2.0",
"OpenSSL"
] | 1 | 2022-03-30T11:42:37.000Z | 2022-03-30T11:42:37.000Z | # NOT_RPYTHON
class CFFIWrapper(object):
def __init__(self):
import cffi
ffi = cffi.FFI()
ffi.cdef("""
HINSTANCE ShellExecuteA(HWND, LPCSTR, LPCSTR, LPCSTR, LPCSTR, INT);
HINSTANCE ShellExecuteW(HWND, LPCWSTR, LPCWSTR, LPCWSTR, LPCWSTR, INT);
""")
self.NULL = ffi.NULL
self.cast = ffi.cast
self.lib = ffi.dlopen("Shell32.dll")
self.SW_SHOWNORMAL = 1
self.getwinerror = ffi.getwinerror
_cffi_wrapper = None
def startfile(filepath, operation=None):
global _cffi_wrapper
if _cffi_wrapper is None:
_cffi_wrapper = CFFIWrapper()
w = _cffi_wrapper
#
if operation is None:
operation = w.NULL
if isinstance(filepath, bytes):
if isinstance(operation, str):
operation = operation.encode("ascii")
rc = w.lib.ShellExecuteA(w.NULL, operation, filepath,
w.NULL, w.NULL, w.SW_SHOWNORMAL)
elif isinstance(filepath, str):
if isinstance(operation, bytes):
operation = operation.decode("ascii")
rc = w.lib.ShellExecuteW(w.NULL, operation, filepath,
w.NULL, w.NULL, w.SW_SHOWNORMAL)
else:
raise TypeError("argument 1 must be str or bytes")
rc = int(w.cast("uintptr_t", rc))
if rc <= 32:
code, msg = w.getwinerror()
raise WindowsError(code, msg, filepath)
| 32.568182 | 79 | 0.597348 | 165 | 1,433 | 5.072727 | 0.357576 | 0.041816 | 0.028674 | 0.026284 | 0.107527 | 0.107527 | 0.107527 | 0.107527 | 0.107527 | 0.107527 | 0 | 0.005952 | 0.296581 | 1,433 | 43 | 80 | 33.325581 | 0.824405 | 0.007676 | 0 | 0.054054 | 0 | 0 | 0.159267 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.027027 | 0 | 0.108108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
941fa2d0f2957a837877bc52e0cf20be207d8edc | 2,147 | py | Python | Revoke.py | locobastos/IBM-Informix-Comparison-Script | bd624a73e24a1d994e5d3c266d49a7f12d972611 | [
"MIT"
] | null | null | null | Revoke.py | locobastos/IBM-Informix-Comparison-Script | bd624a73e24a1d994e5d3c266d49a7f12d972611 | [
"MIT"
] | null | null | null | Revoke.py | locobastos/IBM-Informix-Comparison-Script | bd624a73e24a1d994e5d3c266d49a7f12d972611 | [
"MIT"
] | null | null | null | # coding=utf-8
class Revoke:
"""
A revoke is described by :
- The database on which it exists
- The privilege revoked
- The user on wich the revoke applies
- The table on which the revoke applies
- The owner of the table on which the revoke applies
"""
def __init__(self, database, revoke_statement):
"""
We create a new Revoke instance by giving it the database instance (usefull to get the
database name from the revoke statement) and the revoke statement.
:param database: The database instance
:param revoke_statement: The revoke statement
"""
splitted_statement = revoke_statement.split()
# Database statement
self.database = database
# Privilege revoked
self.privilege_revoked = splitted_statement[1]
# The user on which the revoke applies
self.user_revoked = splitted_statement[-1][:-1]
if "." in splitted_statement[3]:
# The table on which the revoke applies
self.table_revoke = splitted_statement[3].split('.')[1]
# The owner of the table on which the revoke applies
self.table_owner_revoke = splitted_statement[3].split('.')[0]
else:
# The table on which the revoke applies
self.table_revoke = splitted_statement[3]
# The owner of the table on which the revoke applies
self.table_owner_revoke = splitted_statement[3]
def __eq__(self, other_revoke):
"""
Two revokes instances are equal only if all of their fields are equals.
:param other_revoke: The other revoke instance to compare with
:return: True if the two revokes instances are equal
"""
return self.privilege_revoked == other_revoke.privilege_revoked \
and self.user_revoked == other_revoke.user_revoked \
and self.table_revoke == other_revoke.table_revoke \
and self.table_owner_revoke == other_revoke.table_owner_revoke \
and self.database.database_name == other_revoke.database.database_name
| 37.017241 | 94 | 0.647415 | 269 | 2,147 | 4.996283 | 0.234201 | 0.073661 | 0.095238 | 0.083333 | 0.334077 | 0.286458 | 0.266369 | 0.243304 | 0.243304 | 0.243304 | 0 | 0.00719 | 0.287378 | 2,147 | 57 | 95 | 37.666667 | 0.871242 | 0.432231 | 0 | 0 | 0 | 0 | 0.002752 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
94216394fa225ea1e01514370a7ab54fc7850fd6 | 4,394 | py | Python | StateTracing/dataloader.py | junchenfeng/diagnosis_tracing | 4e26e2ad0c7abc547f22774b6c9c299999a152c3 | [
"MIT"
] | null | null | null | StateTracing/dataloader.py | junchenfeng/diagnosis_tracing | 4e26e2ad0c7abc547f22774b6c9c299999a152c3 | [
"MIT"
] | null | null | null | StateTracing/dataloader.py | junchenfeng/diagnosis_tracing | 4e26e2ad0c7abc547f22774b6c9c299999a152c3 | [
"MIT"
] | 1 | 2020-09-08T13:42:16.000Z | 2020-09-08T13:42:16.000Z | from torch import tensor
from numpy.random import choice,shuffle
max_len = 128
def random_cut(length):
s = choice(length-max_len+1)
return s,s+max_len
def differeniate(statesA,statesB):
return [[i,b] for i,(a,b) in enumerate(zip(map(int,statesA),map(int,statesB))) if b!=a]
def generate_targets(sequences):
ciid = -1
Xs,Ys = [],[]
for i,(iid,state) in enumerate(sequences):
if iid != ciid:
cstate = state # updata state
ciid = iid
else:
cstate = sequences[i-1][1]
diff = differeniate(cstate,state)
if len(diff) > 0:
for d in diff:
Xs.append(state)
Ys.append(d)
return Xs,Ys
def pad_sequence3(sequences,padding = 0):
"""
every element in sequence is a 2-d list
with shape [time_steps,dim]
the dim is fixed
"""
d2s = [len(seq) for seq in sequences]
d3 = sequences[0][0].__len__()
result = []
max_l = max(d2s)
for seq,l in zip(sequences,d2s):
result.append(seq + [[padding for _ in range(d3)] for i in range((max_l-l))])
return result
def pad_sequence2(sequences,padding=0):
lens = [len(seq) for seq in sequences]
ml = max(lens)
results = []
for seq,l in zip(sequences,lens):
results.append(seq+[padding for i in range(ml-l)])
return results
def read_data(file_name):
data = []
add = data.append
tmp = []
with open(file_name,'r',encoding='utf8') as f:
while True:
line = f.readline()
if not line:break
if line.strip()=="":
if tmp!=[]:
add(tmp)
tmp = []
else:
item,state = line.strip().split()
# item + 1 bacause of padding value is 0
tmp.append([int(item)+1,list(map(int,state))])
if tmp != []:
add(tmp)
return data
def load_init():
results = {}
items = {}
for line in open('./items.dat','r',encoding='utf8'):
itm,id_ = line.strip().split(' ')
items[itm]=id_
for line in open('./init.dat','r',encoding='utf8'):
itm,state = line.strip().split(' ')
results[int(items[itm])+1] = [1 if e=='0' else 0 for e in state]
return results
class DataLoader():
def __init__(self,data,inits):
self.data = data
self.size = len(data)
self.inits = inits
def shuffle(self,):
shuffle(self.data)
def samples(self,batch_size):
cursor = 0
self.shuffle()
while cursor < self.size:
data = self.data[cursor:cursor+batch_size]
cursor += batch_size
states,masks = [],[]
for d in data:
if len(d)>max_len:
s,e = random_cut(len(d))
d = d[s:e]
itms,sts = zip(*d)
msk = [self.inits[i] for i in itms]
states.append(list(sts))
masks.append(msk)
yield pad_sequence3(states),pad_sequence3(masks)
def check(Xs,Ys):
for xs,ys in zip(Xs,Ys):
for i in range(len(xs)-1):
x_ = [v for v in xs[i]]
if sum(x_) == 0:
break
pos,val = ys[i]
x_[pos] = val
if x_ != xs[i+1]:
print(x_)
print('------')
print(xs[i+1])
print(' ')
print(' ')
print('ok')
if __name__ == '__main__':
from numpy import array
inits = load_init()
if 'data' not in dir():
data = read_data('../data/test.1.dat')
dl = DataLoader(data,inits)
for x,y in dl.samples(100):
x = array(x)
y = array(y)
print(x.shape,y.shape)
break
#
# items,sequences = zip(*data[123])
# x = data[15]
# y = generate_targets(x)
# items,states = zip(*x)
#
# x = [[1,[4,0,0,0]],
# [1,[4,1,0,0]],
# [1,[4,0,0,0]],
# [1,[4,2,0,0]]]
#
#
# targets = generate_targets(x)
| 26.46988 | 92 | 0.466318 | 553 | 4,394 | 3.616637 | 0.242315 | 0.007 | 0.012 | 0.0165 | 0.07 | 0.051 | 0.007 | 0 | 0 | 0 | 0 | 0.024962 | 0.39827 | 4,394 | 165 | 93 | 26.630303 | 0.731467 | 0.087619 | 0 | 0.121739 | 0 | 0 | 0.020822 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095652 | false | 0 | 0.026087 | 0.008696 | 0.191304 | 0.06087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9421e29a0bf88b5d1d33c777e246e5747fbbdf1c | 467 | py | Python | classification/classifier.py | mjbaucas/OneWordSpeechRecognition | 8b6ec3e59c77b574d0f6e29cd50aaa8106a0c106 | [
"CC-BY-4.0"
] | null | null | null | classification/classifier.py | mjbaucas/OneWordSpeechRecognition | 8b6ec3e59c77b574d0f6e29cd50aaa8106a0c106 | [
"CC-BY-4.0"
] | null | null | null | classification/classifier.py | mjbaucas/OneWordSpeechRecognition | 8b6ec3e59c77b574d0f6e29cd50aaa8106a0c106 | [
"CC-BY-4.0"
] | null | null | null | from keras.models import load_model
from numpy import array, asarray, shape
from dataset import DatasetGenerator
def classify_sound(model, sound_file):
model = load_model(model)
LABELS = 'no yes'.split()
dsGen = DatasetGenerator(label_set=LABELS)
x = array(dsGen.process_wav_file(sound_file))
x = asarray(x).reshape(-1, 177, 98, 1)
#make a prediction
y = model.predict(x)
prediction = y[0]
if prediction[0] > 0.99:
return(1)
else:
return(0)
| 22.238095 | 46 | 0.717345 | 71 | 467 | 4.605634 | 0.56338 | 0.055046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03599 | 0.167024 | 467 | 20 | 47 | 23.35 | 0.804627 | 0.036403 | 0 | 0 | 0 | 0 | 0.013363 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9423171d16250d6171ddafca3f4c87c148dbefd2 | 489 | py | Python | 00_Original/34_Netzwerkkommunikation/E-Mail/Erstellen_komplexer_E-Mails/email_mit_anhang.py | felixdittrich92/Python3_book | cd0e2b55aa72c51927d347b70199fb9ed928e06f | [
"MIT"
] | null | null | null | 00_Original/34_Netzwerkkommunikation/E-Mail/Erstellen_komplexer_E-Mails/email_mit_anhang.py | felixdittrich92/Python3_book | cd0e2b55aa72c51927d347b70199fb9ed928e06f | [
"MIT"
] | null | null | null | 00_Original/34_Netzwerkkommunikation/E-Mail/Erstellen_komplexer_E-Mails/email_mit_anhang.py | felixdittrich92/Python3_book | cd0e2b55aa72c51927d347b70199fb9ed928e06f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
msg = MIMEMultipart()
msg["Subject"] = "Hallo Welt"
msg["From"] = "Donald Duck <don@ld.de>"
msg["To"] = "Onkel Dagobert <d@gobert.de>"
text = MIMEText("Dies ist meine selbst erstellte E-Mail.")
msg.attach(text)
f = open("lena.png", "rb")
bild = MIMEImage(f.read())
f.close()
msg.attach(bild)
print(msg.as_string())
| 22.227273 | 58 | 0.697342 | 75 | 489 | 4.533333 | 0.666667 | 0.079412 | 0.114706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002331 | 0.122699 | 489 | 21 | 59 | 23.285714 | 0.79021 | 0.08589 | 0 | 0 | 0 | 0 | 0.276404 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9427c1c57cf677a2859c3b47d370c6f3bd16a79d | 543 | py | Python | asyncqtpy/common.py | codelv/asyncqtpy | def6207aa44c7de794345816fff514103c2440bb | [
"BSD-2-Clause"
] | null | null | null | asyncqtpy/common.py | codelv/asyncqtpy | def6207aa44c7de794345816fff514103c2440bb | [
"BSD-2-Clause"
] | null | null | null | asyncqtpy/common.py | codelv/asyncqtpy | def6207aa44c7de794345816fff514103c2440bb | [
"BSD-2-Clause"
] | null | null | null | # © 2018 Gerard Marull-Paretas <gerard@teslabs.com>
# © 2014 Mark Harviston <mark.harviston@gmail.com>
# © 2014 Arve Knudsen <arve.knudsen@gmail.com>
# BSD License
"""Mostly irrelevant, but useful utilities common to UNIX and Windows."""
import logging
def with_logger(cls):
"""Class decorator to add a logger to a class."""
module = cls.__module__
assert module is not None
cls_name = f"{module}.{cls.__qualname__}"
logger = cls._logger = logging.getLogger(cls_name)
logger.setLevel(logging.WARNING)
return cls
| 30.166667 | 73 | 0.714549 | 78 | 543 | 4.858974 | 0.602564 | 0.021108 | 0.042216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026726 | 0.173112 | 543 | 17 | 74 | 31.941176 | 0.81069 | 0.493554 | 0 | 0 | 0 | 0 | 0.103448 | 0.103448 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
9427fe62f70bac3dffa9c51e9108759e4dbc47d2 | 13,665 | pyp | Python | plugins/Py-RoundedTube/Py-RoundedTube.pyp | tdapper/cinema4d_py_sdk | 32b1d2b63fe28510c83c66065394042900000e92 | [
"Apache-2.0"
] | 113 | 2015-12-11T11:00:49.000Z | 2022-03-27T01:33:12.000Z | plugins/Py-RoundedTube/Py-RoundedTube.pyp | tdapper/cinema4d_py_sdk | 32b1d2b63fe28510c83c66065394042900000e92 | [
"Apache-2.0"
] | 3 | 2016-04-04T12:45:28.000Z | 2019-04-12T08:33:38.000Z | plugins/Py-RoundedTube/Py-RoundedTube.pyp | tdapper/cinema4d_py_sdk | 32b1d2b63fe28510c83c66065394042900000e92 | [
"Apache-2.0"
] | 50 | 2015-11-16T11:13:02.000Z | 2022-03-26T07:05:25.000Z | """
RoundedTube
Copyright: MAXON Computer GmbH
Written for Cinema 4D R18
Modified Date: 05/12/2018
"""
import os
import math
import sys
import c4d
from c4d import plugins, utils, bitmaps, gui
# Be sure to use a unique ID obtained from www.plugincafe.com
PLUGIN_ID = 1025250
class RoundedTube(plugins.ObjectData):
"""RoundedTube Generator"""
# define the number of handles that will be drawn, it's actually a constant.
HANDLECOUNT = 5
# Enable few optimizations, take a look at the GetVirtualObjects method for more information.
def __init__(self):
self.SetOptimizeCache(True)
# Helper method to set the local axis of the object.
@staticmethod
def SetAxis(op, axis):
if axis is c4d.PRIM_AXIS_YP: return
padr = op.GetAllPoints()
if padr is None: return
elif axis is c4d.PRIM_AXIS_XP:
for i, p in enumerate(padr):
op.SetPoint(i, c4d.Vector( p.y, -p.x, p.z))
elif axis is c4d.PRIM_AXIS_XN:
for i, p in enumerate(padr):
op.SetPoint(i, c4d.Vector(-p.y, p.x, p.z))
elif axis is c4d.PRIM_AXIS_YN:
for i, p in enumerate(padr):
op.SetPoint(i, c4d.Vector(-p.x, -p.y, p.z))
elif axis is c4d.PRIM_AXIS_ZP:
for i, p in enumerate(padr):
op.SetPoint(i, c4d.Vector(p.x, -p.z, p.y))
elif axis is c4d.PRIM_AXIS_ZN:
for i, p in enumerate(padr):
op.SetPoint(i, c4d.Vector(p.x, p.z, -p.y))
op.Message(c4d.MSG_UPDATE)
# Helper method to determine how point should be swapped according to the local axis.
@staticmethod
def SwapPoint(p, axis):
if axis is c4d.PRIM_AXIS_XP:
return c4d.Vector(p.y, -p.x, p.z)
elif axis is c4d.PRIM_AXIS_XN:
return c4d.Vector(-p.y, p.x, p.z)
elif axis is c4d.PRIM_AXIS_YN:
return c4d.Vector(-p.x, -p.y, p.z)
elif axis is c4d.PRIM_AXIS_ZP:
return c4d.Vector(p.x, -p.z, p.y)
elif axis is c4d.PRIM_AXIS_ZN:
return c4d.Vector(p.x, p.z, -p.y)
return p
# Override method, called when the object is initialized to set default values.
def Init(self, op):
self.InitAttr(op, float, [c4d.PY_TUBEOBJECT_RAD])
self.InitAttr(op, float, [c4d.PY_TUBEOBJECT_IRADX])
self.InitAttr(op, float, [c4d.PY_TUBEOBJECT_IRADY])
self.InitAttr(op, float, [c4d.PY_TUBEOBJECT_SUB])
self.InitAttr(op, int, [c4d.PY_TUBEOBJECT_ROUNDSUB])
self.InitAttr(op, float, [c4d.PY_TUBEOBJECT_ROUNDRAD])
self.InitAttr(op, int, [c4d.PY_TUBEOBJECT_SEG])
self.InitAttr(op, int, [c4d.PRIM_AXIS])
op[c4d.PY_TUBEOBJECT_RAD]= 200.0
op[c4d.PY_TUBEOBJECT_IRADX] = 50.0
op[c4d.PY_TUBEOBJECT_IRADY] = 50.0
op[c4d.PY_TUBEOBJECT_SUB] = 1
op[c4d.PY_TUBEOBJECT_ROUNDSUB] = 8
op[c4d.PY_TUBEOBJECT_ROUNDRAD] = 10.0
op[c4d.PY_TUBEOBJECT_SEG] = 36
op[c4d.PRIM_AXIS] = c4d.PRIM_AXIS_YP
return True
# Override method, react to some messages received to react to some event.
def Message(self, node, type, data):
# MSG_DESCRIPTION_VALIDATE is called after each parameter change. It allows checking of the input value to correct it if not.
if type == c4d.MSG_DESCRIPTION_VALIDATE:
node[c4d.PY_TUBEOBJECT_IRADX] = c4d.utils.ClampValue(node[c4d.PY_TUBEOBJECT_IRADX], 0.0, node[c4d.PY_TUBEOBJECT_RAD])
node[c4d.PY_TUBEOBJECT_ROUNDRAD] = c4d.utils.ClampValue( node[c4d.PY_TUBEOBJECT_ROUNDRAD], 0.0, node[c4d.PY_TUBEOBJECT_IRADX])
# MSH_MENUPREPARE is called when the user presses the Menu entry for this object. It allows to setup our object. In this case, it defines the Phong by adding a Phong Tag to the generator.
elif type == c4d.MSG_MENUPREPARE:
node.SetPhong(True, False, c4d.utils.DegToRad(40.0))
return True
# Override method, should return the number of handle.
def GetHandleCount(self, op):
return self.HANDLECOUNT
# Override method, called to know the position of a handle.
def GetHandle(self, op, i, info):
rad = op[c4d.PY_TUBEOBJECT_RAD]
if rad is None: rad = 200.0
iradx = op[c4d.PY_TUBEOBJECT_IRADX]
if iradx is None: iradx = 50.0
irady = op[c4d.PY_TUBEOBJECT_IRADY]
if irady is None: irady = 50.0
rrad = op[c4d.PY_TUBEOBJECT_ROUNDRAD]
if rrad is None: rrad = 10.0
axis = op[c4d.PRIM_AXIS]
if axis is None: return
if i is 0:
info.position = c4d.Vector(rad, 0.0, 0.0)
info.direction = c4d.Vector(1.0, 0.0, 0.0)
elif i is 1:
info.position = c4d.Vector(rad+iradx, 0.0, 0.0)
info.direction = c4d.Vector(1.0, 0.0, 0.0)
elif i is 2:
info.position = c4d.Vector(rad, irady, 0.0)
info.direction = c4d.Vector(0.0, 1.0, 0.0)
elif i is 3:
info.position = c4d.Vector(rad+iradx, irady-rrad, 0.0)
info.direction = c4d.Vector(0.0, -1.0, 0.0)
elif i is 4:
info.position = c4d.Vector(rad+iradx-rrad, irady, 0.0)
info.direction = c4d.Vector(-1.0, 0.0, 0.0)
info.position = RoundedTube.SwapPoint(info.position, axis)
info.direction = RoundedTube.SwapPoint(info.direction, axis)
info.type = c4d.HANDLECONSTRAINTTYPE_LINEAR
# Override method, called when the user moves a handle. This is the place to set parameters.
def SetHandle(self, op, i, p, info):
data = op.GetDataInstance()
if data is None: return
tmp = c4d.HandleInfo()
self.GetHandle(op, i, tmp)
val = (p-tmp.position)*info.direction
if i is 0:
op[c4d.PY_TUBEOBJECT_RAD] = utils.FCut(op[c4d.PY_TUBEOBJECT_RAD]+val, op[c4d.PY_TUBEOBJECT_IRADX], sys.maxint)
elif i is 1:
op[c4d.PY_TUBEOBJECT_IRADX] = utils.FCut(op[c4d.PY_TUBEOBJECT_IRADX]+val, op[c4d.PY_TUBEOBJECT_ROUNDRAD], op[c4d.PY_TUBEOBJECT_RAD])
elif i is 2:
op[c4d.PY_TUBEOBJECT_IRADY] = utils.FCut(op[c4d.PY_TUBEOBJECT_IRADY]+val, op[c4d.PY_TUBEOBJECT_ROUNDRAD], sys.maxint)
elif i is 3 or i is 4:
op[c4d.PY_TUBEOBJECT_ROUNDRAD] = utils.FCut(op[c4d.PY_TUBEOBJECT_ROUNDRAD]+val, 0.0, min(op[c4d.PY_TUBEOBJECT_IRADX], op[c4d.PY_TUBEOBJECT_IRADY]))
# Override method, draw additional stuff in the viewport (e.g. the handles).
def Draw(self, op, drawpass, bd, bh):
if drawpass!=c4d.DRAWPASS_HANDLES: return c4d.DRAWRESULT_SKIP
rad = op[c4d.PY_TUBEOBJECT_RAD]
iradx = op[c4d.PY_TUBEOBJECT_IRADX]
irady = op[c4d.PY_TUBEOBJECT_IRADY]
axis = op[c4d.PRIM_AXIS]
bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ACTIVEPOINT))
hitid = op.GetHighlightHandle(bd)
bd.SetMatrix_Matrix(op, bh.GetMg())
for i in xrange(self.HANDLECOUNT):
if i==hitid:
bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_SELECTION_PREVIEW))
else:
bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ACTIVEPOINT))
info = c4d.HandleInfo()
self.GetHandle(op, i, info)
bd.DrawHandle(info.position, c4d.DRAWHANDLE_BIG, 0)
bd.SetPen(c4d.GetViewColor(c4d.VIEWCOLOR_ACTIVEPOINT))
if i is 0:
info2 = c4d.HandleInfo()
self.GetHandle(op, 1, info2)
bd.DrawLine(info.position, info2.position, 0)
self.GetHandle(op, 2, info2)
bd.DrawLine(info.position, info2.position, 0)
elif i is 3:
bd.DrawLine(info.position, RoundedTube.SwapPoint(c4d.Vector(rad+iradx, irady, 0.0), axis), 0)
elif i is 4:
bd.DrawLine(info.position, RoundedTube.SwapPoint(c4d.Vector(rad+iradx, irady, 0.0), axis), 0)
return c4d.DRAWRESULT_OK
# Helper method to generate a lathe over points.
def GenerateLathe(self, cpadr, cpcnt, sub):
op = tag = padr = vadr = None
i = j = pcnt = vcnt = a = b = c = d = 0
length = sn = cs = v1 = v2 = 0.0
pcnt = cpcnt * sub
vcnt = cpcnt * sub
op = c4d.PolygonObject(pcnt, vcnt)
if op is None: return None
uvadr = [0.0]*(cpcnt+1)
for i in xrange(cpcnt):
uvadr[i] = length
length += (cpadr[ (i+1)%cpcnt ] - cpadr[i] ).GetLength()
if length > 0.0: length = 1.0/length
for i in xrange(cpcnt):
uvadr[i] *= length
uvadr[cpcnt] = 1.0
vcnt = 0
for i in xrange(sub):
sn, cs = utils.SinCos(math.pi*2 * float(i) / float(sub))
v1 = float(i) / float(sub)
v2 = float(i+1) / float(sub)
for j in xrange(cpcnt):
a = cpcnt*i+j
op.SetPoint(a, c4d.Vector(cpadr[j].x*cs,cpadr[j].y,cpadr[j].x*sn))
if i < sub:
b = cpcnt*i +((j+1)%cpcnt)
c = cpcnt*((i+1)%sub)+((j+1)%cpcnt)
d = cpcnt*((i+1)%sub)+j
pol = c4d.CPolygon(a,b,c,d)
op.SetPolygon(vcnt, pol)
vcnt += 1
op.Message(c4d.MSG_UPDATE)
op.SetPhong(True, 1, utils.Rad(80.0))
return op
# Override method, should return the bounding box of the generated object.
def GetDimension(self, op, mp, rad):
rado = op[c4d.PY_TUBEOBJECT_RAD]
if rado is None: return
radx = op[c4d.PY_TUBEOBJECT_IRADX]
if radx is None: return
rady = op[c4d.PY_TUBEOBJECT_IRADY]
if rady is None: return
axis = op[c4d.PRIM_AXIS]
if axis is None: return
mp = 0.0
if axis is c4d.PRIM_AXIS_XP or axis is c4d.PRIM_AXIS_XN:
rad.x = rady
rad.y = rado+radx
rad.z = rado+radx
elif axis is c4d.PRIM_AXIS_YP or axis is c4d.PRIM_AXIS_YN:
rad.x = rado+radx
rad.y = rady
rad.z = rado+radx
elif axis is c4d.PRIM_AXIS_ZP or axis is c4d.PRIM_AXIS_ZN:
rad.x = rado+radx
rad.y = rado+radx
rad.z = rady
# Override method, should generate and return the object.
def GetVirtualObjects(self, op, hierarchyhelp):
# Disabled the following lines because cache flag was set
# So the cache build is done before this method is called
#dirty = op.CheckCache(hierarchyhelp) or op.IsDirty(c4d.DIRTY_DATA)
#if dirty is False: return op.GetCache(hierarchyhelp)
rad = op[c4d.PY_TUBEOBJECT_RAD]
if rad is None: rad = 200.0
iradx = op[c4d.PY_TUBEOBJECT_IRADX]
if iradx is None: iradx = 50.0
irady = op[c4d.PY_TUBEOBJECT_IRADY]
if irady is None: irady = 50.0
rrad = op[c4d.PY_TUBEOBJECT_ROUNDRAD]
if rrad is None: rrad = 10.0
num_sub = op[c4d.PY_TUBEOBJECT_SUB]
if num_sub is None: num_sub = 1
sub = utils.CalcLOD(num_sub, 1, 1, 1000)
num_rsub = op[c4d.PY_TUBEOBJECT_ROUNDSUB]
if num_rsub is None: num_rsub = 8
rsub = utils.CalcLOD(num_rsub, 1, 1, 1000)
num_seg = op[c4d.PY_TUBEOBJECT_SEG]
if num_seg is None: num_seg = 36
seg = utils.CalcLOD(num_seg, 1, 3, 1000)
i = 0
sn = 0.0
cs = 0.0
cpcnt = 4*(sub+rsub)
cpadr = [c4d.Vector()]*cpcnt
for i in xrange(sub):
cpadr[i] = c4d.Vector(rad-iradx, (1.0 - float(i)/sub*2.0)*(irady-rrad), 0.0)
cpadr[i+sub+rsub] = c4d.Vector(rad+(float(i)/sub*2.0-1.0)*(iradx-rrad), -irady, 0.0)
cpadr[i+2*(sub+rsub)] = c4d.Vector(rad+iradx, (float(i)/float(sub)*2.0-1.0)*(irady-rrad), 0.0)
cpadr[i+3*(sub+rsub)] = c4d.Vector(rad+(1.0-float(i)/float(sub)*2.0)*(iradx-rrad), irady, 0.0)
pi05 = 1.570796326
for i in xrange(rsub):
sn, cs = utils.SinCos(float(i)/rsub*pi05)
cpadr[i+sub] = c4d.Vector(rad-(iradx-rrad+cs*rrad), -(irady-rrad+sn*rrad), 0.0)
cpadr[i+sub+(sub+rsub)] = c4d.Vector(rad+(iradx-rrad+sn*rrad), -(irady-rrad+cs*rrad), 0.0)
cpadr[i+sub+2*(sub+rsub)] = c4d.Vector(rad+(iradx-rrad+cs*rrad), +(irady-rrad+sn*rrad), 0.0)
cpadr[i+sub+3*(sub+rsub)] = c4d.Vector(rad-(iradx-rrad+sn*rrad), +(irady-rrad+cs*rrad), 0.0)
ret = self.GenerateLathe(cpadr, cpcnt, seg)
if ret is None: return None
axis = op[c4d.PRIM_AXIS]
if axis is None: return None
RoundedTube.SetAxis(ret, axis)
ret.SetName(op.GetName())
return ret
# This code is called at the startup, it register the class RoundedTube as a plugin to be used later in Cinema 4D. It have to be done only once.
if __name__ == "__main__":
# Get the curren path of the file
dir, file = os.path.split(__file__)
# Load the oroundedtube.tif from res folder as a c4d BaseBitmap to be used as an icon.
icon = bitmaps.BaseBitmap()
icon.InitWith(os.path.join(dir, "res", "oroundedtube.tif"))
# Register the class RoundedTube as a Object Plugin to be used later in Cinema 4D.
plugins.RegisterObjectPlugin(id=PLUGIN_ID, str="Py-RoundedTube",
g=RoundedTube,
description="roundedtube", icon=icon,
info=c4d.OBJECT_GENERATOR)
| 38.064067 | 195 | 0.589901 | 2,021 | 13,665 | 3.892133 | 0.158832 | 0.032418 | 0.097254 | 0.082126 | 0.52924 | 0.414315 | 0.302568 | 0.226799 | 0.201627 | 0.198195 | 0 | 0.040902 | 0.298646 | 13,665 | 358 | 196 | 38.170391 | 0.779841 | 0.143798 | 0 | 0.297189 | 0 | 0 | 0.004458 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0.008032 | 0.02008 | 0.004016 | 0.124498 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
942a08b6947bb2c1cb07c5fce687f9f20cbf31ee | 1,548 | py | Python | scripts/models/checkpoint.py | daniele21/DL_soccer_prediction_v2 | 97bafe911fd8883d6679cf55fd0fff34db67ef06 | [
"MIT"
] | null | null | null | scripts/models/checkpoint.py | daniele21/DL_soccer_prediction_v2 | 97bafe911fd8883d6679cf55fd0fff34db67ef06 | [
"MIT"
] | null | null | null | scripts/models/checkpoint.py | daniele21/DL_soccer_prediction_v2 | 97bafe911fd8883d6679cf55fd0fff34db67ef06 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from scripts.utils.saving import save_soccer_model
def checkpoint(model, early_stopping=True):
losses = model.losses
patience_rate = model.es_patience
if(len(losses['train']) > 1 and len(losses['eval']) > 1 and early_stopping):
last_train_loss = losses['train'][-2]
curr_train_loss = losses['train'][-1]
train_loss_diff = curr_train_loss - last_train_loss
# try:
# last_five_eval_loss = losses['eval'][-6]
# last_three_eval_loss = losses['eval'][-4]
# except:
# last_five_eval_loss = 10000
# last_three_eval_loss = 10000
last_eval_loss = losses['eval'][-2]
curr_eval_loss = losses['eval'][-1]
eval_loss_diff = curr_eval_loss - last_eval_loss
if(last_train_loss > curr_train_loss and
last_eval_loss > curr_eval_loss):
filepath = save_soccer_model(model)
return filepath
# else:
# if (train_loss_diff < last_train_loss * patience_rate and
# eval_loss_diff < last_eval_loss * patience_rate and
# curr_eval_loss - last_three_eval_loss < last_three_eval_loss * patience_rate and
# curr_eval_loss - last_five_eval_loss < last_five_eval_loss * patience_rate
# ):
# filepath = save_soccer_model(model)
# return filepath
else:
filepath = save_soccer_model(model)
return filepath
return None
| 33.652174 | 102 | 0.605943 | 191 | 1,548 | 4.492147 | 0.219895 | 0.177156 | 0.06993 | 0.074592 | 0.305361 | 0.305361 | 0.247086 | 0.198135 | 0.090909 | 0 | 0 | 0.017691 | 0.306202 | 1,548 | 45 | 103 | 34.4 | 0.781192 | 0.352067 | 0 | 0.210526 | 0 | 0 | 0.027411 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |