hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fa0ddd1a37b392e41cbb6638562c0f9ac2329f3f | 1,444 | py | Python | Tutortial2_calculator/list_break_input.py | Poorav06/PythonHunters | 589bc6bc568ae2df9edc15c9dcaaecd792f826e4 | [
"MIT"
] | null | null | null | Tutortial2_calculator/list_break_input.py | Poorav06/PythonHunters | 589bc6bc568ae2df9edc15c9dcaaecd792f826e4 | [
"MIT"
] | null | null | null | Tutortial2_calculator/list_break_input.py | Poorav06/PythonHunters | 589bc6bc568ae2df9edc15c9dcaaecd792f826e4 | [
"MIT"
] | null | null | null | # This code is to read a input command or equation and break it up to different variables using lists.
# use a variable to take the input equation
equation = input("Please enter the equation: ")
# Let's check the type of the variable, this is to check if the variable can be used for calculation
print("type of the variable equation is ", type(equation))
# why the data type is str
# variable assigned to an input command is always a string
# In this form the variable cannot be used for calculation
# How to convert the string to float
#x =float(equation)
#print("type of the variable x is ",type(x))
# We will test this code by giving random inputs as a compbination of numbers and strings
# test 1 : we will input 25.4
# test 2 : we will input 25.3 + 3
# test 1 will be success
# test 2 will throw error , value error at line no 14
# How do I break the input string to different values of equation
# How to convert a string to a list
# we will take a varible for list and convert the equation variable to list
equation_lst = str.split(equation)
# lets print and see
print(equation_lst)
print(type(equation_lst[0]))
# will the list values be in int/float or string , it will be in string and thats why before we do any calculation we have to convert it to float.
n1 = float(equation_lst[0])
opt = equation_lst[1]
n2 = float(equation_lst[2])
# now lets add the numbers
sum = n1 + n2
print("Sum of number is ", sum ) | 33.581395 | 147 | 0.734765 | 263 | 1,444 | 4.011407 | 0.365019 | 0.062559 | 0.025592 | 0.048341 | 0.041706 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018293 | 0.204986 | 1,444 | 43 | 148 | 33.581395 | 0.900697 | 0.74169 | 0 | 0 | 0 | 0 | 0.219373 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa113c5f6546a3cba1732a6d83a099482059458c | 513 | py | Python | base/urls.py | erengulum/Studybud | d0196314b07b47bedaa83733e1e594ffe4081f99 | [
"MIT"
] | 1 | 2022-03-28T07:38:32.000Z | 2022-03-28T07:38:32.000Z | base/urls.py | erengulum/Studybud | d0196314b07b47bedaa83733e1e594ffe4081f99 | [
"MIT"
] | null | null | null | base/urls.py | erengulum/Studybud | d0196314b07b47bedaa83733e1e594ffe4081f99 | [
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path("",views.home,name="home"),
path("room/<str:pk>/",views.room,name="room"), #name is not necessary but it is important. Even though you can path, if the name stays same then there won't be any problem on other py files
path("create-room/",views.createRoom, name="create-room"),
path("update-room/<str:pk>",views.updateRoom, name="update-room"),
path("delete-room/<str:pk>",views.deleteRoom, name="delete-room"),
]
| 34.2 | 194 | 0.684211 | 79 | 513 | 4.443038 | 0.544304 | 0.059829 | 0.076923 | 0.119658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152047 | 513 | 14 | 195 | 36.642857 | 0.806897 | 0.274854 | 0 | 0 | 0 | 0 | 0.289189 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa1479a6a8614ed1d8dc97292266984ebbf908e4 | 1,857 | py | Python | app/main.py | ronanren/worldometers | 4c8e593716cf051265f324c0b07d2d87699f2b91 | [
"MIT"
] | 5 | 2020-09-24T22:14:02.000Z | 2021-05-11T22:01:26.000Z | app/main.py | ronanren/worldometers | 4c8e593716cf051265f324c0b07d2d87699f2b91 | [
"MIT"
] | 1 | 2020-09-25T15:38:26.000Z | 2020-09-25T15:44:19.000Z | app/main.py | ronanren/worldometers | 4c8e593716cf051265f324c0b07d2d87699f2b91 | [
"MIT"
] | 3 | 2020-09-25T15:35:41.000Z | 2021-05-11T22:01:31.000Z | from flask import Flask
from flask_restful import Api
from flask import request, jsonify
from flask import jsonify
from bs4 import BeautifulSoup
import urllib.request
import re
import json
import ast
from datetime import datetime
from cachetools import cached, TTLCache
from apscheduler.schedulers.background import BackgroundScheduler
from app.scraping import fetch_data_coronavirus
app = Flask(__name__)
api = Api(app)
cacheCovid = TTLCache(maxsize=1024, ttl=30)
sched = BackgroundScheduler()
sched.add_job(fetch_data_coronavirus, 'interval', minutes=1, max_instances=2)
sched.start()
@app.route('/', methods=['GET'])
def home():
return "<h1>Unofficial Worldometers.info API</h1><p>This site is a API for get data from Worldometers.info</p>"
# CORONAVIRUS SECTION
@cached(cacheCovid)
def get_data_coronavirus():
f = open('app/data/coronavirus.json', "r")
data = f.read()
data = ast.literal_eval(data)
f.close()
return data
@app.route('/api/coronavirus/all/', methods=['GET'])
def api_coronavirus_all():
res = get_data_coronavirus()
return res
@app.route('/api/coronavirus/country/<country>', methods=['GET'])
def api_coronavirus_country(country):
result = {}
res = get_data_coronavirus()
i = 1
found = False
while (not found and i < len(res['data'])):
if (res['data'][i]['Country'].lower() == country.lower()):
found = True
else:
i += 1
if (found):
result['data'] = res['data'][i]
result['last_update'] = res['last_update']
return result
else:
return {"Error": "Country not found !"}
@app.route('/api/coronavirus/world/', methods=['GET'])
def api_coronavirus_world():
res = {}
data = get_data_coronavirus()
res['data'] = data['data'][0]
res['last_update'] = data['last_update']
return res
| 25.438356 | 115 | 0.676898 | 244 | 1,857 | 5.028689 | 0.352459 | 0.085575 | 0.04238 | 0.05379 | 0.066015 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009253 | 0.185245 | 1,857 | 72 | 116 | 25.791667 | 0.801718 | 0.010232 | 0 | 0.105263 | 0 | 0.017544 | 0.17756 | 0.067538 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0 | 0.22807 | 0.017544 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa1589d76ae6f9221e4cf4a095022ee5a6de983a | 1,532 | py | Python | Inference/inference_module/inference_models.py | Clayrisee/BachelorsProject-Covid19Detection | 8f1d88b04418b2d53f5a53260981dbc2c95e6c82 | [
"MIT"
] | null | null | null | Inference/inference_module/inference_models.py | Clayrisee/BachelorsProject-Covid19Detection | 8f1d88b04418b2d53f5a53260981dbc2c95e6c82 | [
"MIT"
] | null | null | null | Inference/inference_module/inference_models.py | Clayrisee/BachelorsProject-Covid19Detection | 8f1d88b04418b2d53f5a53260981dbc2c95e6c82 | [
"MIT"
] | null | null | null | import torch
import timm
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
class InferenceModel(nn.Module):
def __init__(self, input_size, backbone, output_class):
super(InferenceModel, self).__init__()
self.input_size = input_size
self.network = timm.create_model(self._get_backbone_names(backbone=backbone),
pretrained=False, num_classes=output_class)
def _get_backbone_names(self, backbone:str):
backbone_dict = {
'vit': 'vit_small_patch32_224',
'efficientnet_b0':'efficientnet_b0',
'convnext': 'convnext_base'
}
return backbone_dict[backbone]
def forward(self, x):
"""
Method to pass forward the batch input into each layer in dataset. (feature extract and classifier)
Args:
x (torch.Tensor) : Batch of Input Tensor.
"""
x = self.network(x)
return F.softmax(x, dim=1)
def preprocessing_img(self, img):
preprocess = transforms.Compose([
transforms.Resize(self.input_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return preprocess(img)
@torch.no_grad()
def predict(self, input_img):
preprocess_img = torch.unsqueeze(self.preprocessing_img(input_img), dim=0)
# print(preprocess_img.shape)
return self(preprocess_img)
| 33.304348 | 107 | 0.624021 | 181 | 1,532 | 5.071823 | 0.464088 | 0.039216 | 0.042484 | 0.037037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029757 | 0.27611 | 1,532 | 45 | 108 | 34.044444 | 0.798016 | 0.117493 | 0 | 0 | 0 | 0 | 0.057165 | 0.016006 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0 | 0.15625 | 0 | 0.46875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa1c3772e5c94aec58bcd1ec78d5878e695cc542 | 8,165 | py | Python | scripts/albert/classify_eval.py | ArthurRizar/dialog_state_tracking_bert | 8ee16a854d0ccaad44918c4ede16851acf11ceaa | [
"Apache-2.0"
] | null | null | null | scripts/albert/classify_eval.py | ArthurRizar/dialog_state_tracking_bert | 8ee16a854d0ccaad44918c4ede16851acf11ceaa | [
"Apache-2.0"
] | null | null | null | scripts/albert/classify_eval.py | ArthurRizar/dialog_state_tracking_bert | 8ee16a854d0ccaad44918c4ede16851acf11ceaa | [
"Apache-2.0"
] | null | null | null | #coding:utf-8
###################################################
# File Name: eval.py
# Author: Meng Zhao
# mail: @
# Created Time: Fri 23 Mar 2018 09:27:09 AM CST
#=============================================================
import os
import sys
import csv
import codecs
import numpy as np
import tensorflow as tf
sys.path.append('../')
from preprocess import tokenization
from preprocess import bert_data_utils
from preprocess import dataloader
from tensorflow.contrib import learn
#os.environ["CUDA_VISIBLE_DEVICES"] = "" # not use GPU
flags = tf.flags
FLAGS = flags.FLAGS
MODEL_DIR = './output'
flags.DEFINE_boolean("do_lower_case", True, "Whether to lower case the input text")
flags.DEFINE_string("vocab_file", MODEL_DIR + '/vocab.txt', "vocab file")
flags.DEFINE_string("label_file", MODEL_DIR + '/labels.txt', "label file")
flags.DEFINE_string("label_map_file", MODEL_DIR + '/label_map', "label map file")
flags.DEFINE_string("model_dir", MODEL_DIR + '/checkpoints', "vocab file")
flags.DEFINE_string("bert_config_file", MODEL_DIR + '/bert_config.json', "config json file")
tf.flags.DEFINE_string("test_data_file", '../data/test.tsv', "Test data source.")
tf.flags.DEFINE_integer("batch_size", 32, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("max_sequence_length", 32, "max sequnce length")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
def show_best_acc(preds, y_truth):
correct_predictions = float(sum(preds==y_truth))
print('Total number of test examples: {}'.format(len(y_truth)))
print('Accuracy: {:g}'.format(correct_predictions/float(len(y_truth))))
def show_topk_acc(all_topk, y_truth):
topk_corrects = 0.0
errs_id_x = []
errs_y = []
for idx, y in enumerate(y_truth):
if y in all_topk[idx]:
topk_corrects += 1.0
else:
pred_y = all_topk[idx][0]
errs_id_x.append(idx)
errs_y.append((y, pred_y))
print('Top k accuracy: {:g}'.format(topk_corrects/float(len(y_truth))))
def show_each_label_acc(idx2label, all_predictions, y_truth):
accs_map = {}
statistic_map = {}
for label_idx in idx2label:
label = idx2label[label_idx]
search_indices = [idx for idx, value in enumerate(y_truth) if value==label_idx]
cur_y = y_truth[search_indices]
cur_pred = all_predictions[search_indices]
cur_correct_pred = float(sum(cur_pred==cur_y))
if len(cur_y) != 0:
accs_map[label_idx] = cur_correct_pred / float(len(cur_y))
statistic_map[label_idx] = (int(cur_correct_pred), len(cur_y))
else:
accs_map[label_idx] = 0.0
statistic_map[label_idx] = (0, 0)
for label_idx in accs_map:
cur_acc = accs_map[label_idx]
corrects, total = statistic_map[label_idx]
print('label_idx: {}, label: {}, corrects: {}, total: {}, Accuracy: {:g} '.format(label_idx, idx2label[label_idx], corrects, total, cur_acc))
def write_predictions(raw_examples, features, all_predictions, all_topk, idx2label):
#get real label
y_truth = np.array([item.label_id for item in features])
#best acc
show_best_acc(all_predictions, y_truth)
#top k acc
show_topk_acc(all_topk, y_truth)
#each label acc
#show_each_label_acc(idx2label, all_predictions, y_truth)
#save the evaluation to a csv
all_topk_pred_label = []
for indices in all_topk:
labels = [idx2label[int(idx)] for idx in indices]
all_topk_pred_label.append(labels)
all_pred_label = [idx2label[int(idx)] for idx in all_predictions]
utf8_x_raw = [item.text_a for item in raw_examples]
utf8_y_raw = [idx2label[int(idx)] for idx in y_truth]
#predictions_human_readable = np.column_stack((np.array(utf8_x_raw), all_pred_label))
predictions_human_readable = np.column_stack((np.array(utf8_x_raw), utf8_y_raw))
predictions_human_readable = np.column_stack((predictions_human_readable, all_topk_pred_label))
out_path = os.path.join(FLAGS.model_dir, '..', 'prediciton.csv')
print('Saving evaluation to {0}'.format(out_path))
with open(out_path, 'w') as f:
csv.writer(f, delimiter="\t",).writerows(predictions_human_readable)
diff_out_path = os.path.join(FLAGS.model_dir, '..', 'diff_prediciton.csv')
print('Saving diff evaluation to {0}'.format(diff_out_path))
diff_predictions_human_readable = [item for item in predictions_human_readable if item[1] != item[2]]
with open(diff_out_path, 'w') as f:
csv.writer(f, delimiter="\t").writerows(diff_predictions_human_readable)
def get_feed_data(features):
feed_input_ids = [item.input_ids for item in features]
feed_input_mask = [item.input_mask for item in features]
feed_segment_ids = [item.segment_ids for item in features]
return feed_input_ids, feed_input_mask, feed_segment_ids
def eval():
tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
label_map, idx2label = bert_data_utils.read_label_map_file(FLAGS.label_map_file)
features = bert_data_utils.file_based_convert_examples_to_features(FLAGS.test_data_file,
label_map,
FLAGS.max_sequence_length,
tokenizer)
print('\nEvaluating...\n')
#Evaluation
graph = tf.Graph()
with graph.as_default():
restore_graph_def = tf.GraphDef()
restore_graph_def.ParseFromString(open(FLAGS.model_dir+'/frozen_model.pb', 'rb').read())
tf.import_graph_def(restore_graph_def, name='')
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
#tensors we feed
input_ids = graph.get_operation_by_name('input_ids').outputs[0]
input_mask = graph.get_operation_by_name('input_mask').outputs[0]
token_type_ids = graph.get_operation_by_name('segment_ids').outputs[0]
is_training = graph.get_operation_by_name('is_training').outputs[0]
#tensors we want to evaluate
probs = graph.get_operation_by_name('loss/probs').outputs[0]
scores = graph.get_operation_by_name('loss/logits').outputs[0]
pred_labels = graph.get_operation_by_name('loss/pred_labels').outputs[0]
batches = dataloader.batch_iter(list(features), FLAGS.batch_size, 1, shuffle=False)
#collect the predictions here
all_predictions = []
all_topk = []
for batch in batches:
feed_input_ids, feed_input_mask, feed_segment_ids = get_feed_data(batch)
feed_dict = {input_ids: feed_input_ids,
input_mask: feed_input_mask,
token_type_ids: feed_segment_ids,
is_training: False,}
batch_probs, batch_scores, batch_pred_labels = sess.run([probs, scores, pred_labels],
feed_dict)
batch_pred_label = np.argmax(batch_probs, -1)
all_predictions = np.concatenate([all_predictions, batch_pred_label])
temp = np.argsort(-batch_scores, 1)
all_topk.extend(temp[:, :3].tolist()) #top 3
raw_examples = list(bert_data_utils.get_data_from_file(FLAGS.test_data_file))
truth_label_ids = np.array([item.label_id for item in features])
#write predictions to file
write_predictions(raw_examples, features, all_predictions, all_topk, idx2label)
if __name__ == '__main__':
eval()
| 40.825 | 149 | 0.645315 | 1,088 | 8,165 | 4.516544 | 0.204963 | 0.018315 | 0.039072 | 0.027066 | 0.244404 | 0.196581 | 0.13431 | 0.124542 | 0.112332 | 0.063899 | 0 | 0.009791 | 0.236987 | 8,165 | 199 | 150 | 41.030151 | 0.778973 | 0.066013 | 0 | 0.015152 | 0 | 0 | 0.107493 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.083333 | 0 | 0.136364 | 0.05303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa1cf87f9aad0af91c7941ef3c0694ecd2070abf | 28,189 | py | Python | caret_analyze/infra/lttng/ros2_tracing/data_model.py | atsushi421/CARET_analyze | 3ee1c8945522312b372f350e1ca68f86af2c997f | [
"Apache-2.0"
] | null | null | null | caret_analyze/infra/lttng/ros2_tracing/data_model.py | atsushi421/CARET_analyze | 3ee1c8945522312b372f350e1ca68f86af2c997f | [
"Apache-2.0"
] | 1 | 2022-02-21T07:48:39.000Z | 2022-02-21T07:48:39.000Z | caret_analyze/infra/lttng/ros2_tracing/data_model.py | atsushi421/CARET_analyze | 3ee1c8945522312b372f350e1ca68f86af2c997f | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Robert Bosch GmbH
# Copyright 2020-2021 Christophe Bedard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ROS 2 data model."""
from caret_analyze.record.record_factory import RecordFactory, RecordsFactory
import pandas as pd
from tracetools_analysis.data_model import (DataModel,
DataModelIntermediateStorage)
class Ros2DataModel(DataModel):
"""
Container to model pre-processed ROS 2 data for analysis.
This aims to represent the data in a ROS 2-aware way.
"""
def __init__(self) -> None:
"""Create a Ros2DataModel."""
super().__init__()
# Objects (one-time events, usually when something is created)
self._contexts: DataModelIntermediateStorage = []
self._nodes: DataModelIntermediateStorage = []
self._publishers: DataModelIntermediateStorage = []
self._subscriptions: DataModelIntermediateStorage = []
self._subscription_objects: DataModelIntermediateStorage = []
self._services: DataModelIntermediateStorage = []
self._clients: DataModelIntermediateStorage = []
self._timers: DataModelIntermediateStorage = []
self._timer_node_links: DataModelIntermediateStorage = []
self._callback_objects: DataModelIntermediateStorage = []
self._callback_symbols: DataModelIntermediateStorage = []
self._lifecycle_state_machines: DataModelIntermediateStorage = []
self._executors: DataModelIntermediateStorage = []
self._executors_static: DataModelIntermediateStorage = []
self._callback_groups: DataModelIntermediateStorage = []
self._callback_groups_static: DataModelIntermediateStorage = []
self._callback_group_timer: DataModelIntermediateStorage = []
self._callback_group_subscription: DataModelIntermediateStorage = []
self._callback_group_service: DataModelIntermediateStorage = []
self._callback_group_client: DataModelIntermediateStorage = []
self._rmw_impl: DataModelIntermediateStorage = []
self._tilde_subscriptions: DataModelIntermediateStorage = []
self._tilde_publishers: DataModelIntermediateStorage = []
self._tilde_subscribe_added: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
# string argument
self._lifecycle_transitions: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
self.callback_start_instances = RecordsFactory.create_instance(
None, ['callback_start_timestamp', 'callback_object', 'is_intra_process']
)
self.callback_end_instances = RecordsFactory.create_instance(
None, ['callback_end_timestamp', 'callback_object']
)
self.dds_write_instances = RecordsFactory.create_instance(
None, ['tid', 'dds_write_timestamp', 'message']
)
self.dds_bind_addr_to_stamp = RecordsFactory.create_instance(
None, ['tid', 'dds_bind_addr_to_stamp_timestamp', 'addr', 'source_timestamp']
)
self.dds_bind_addr_to_addr = RecordsFactory.create_instance(
None, ['dds_bind_addr_to_addr_timestamp', 'addr_from', 'addr_to']
)
self.on_data_available_instances = RecordsFactory.create_instance(
None, ['on_data_available_timestamp', 'source_timestamp']
)
self.rclcpp_intra_publish_instances = RecordsFactory.create_instance(
None, ['tid', 'rclcpp_intra_publish_timestamp', 'publisher_handle',
'message', 'message_timestamp']
)
self.rclcpp_publish_instances = RecordsFactory.create_instance(
None, [
'tid', 'rclcpp_publish_timestamp', 'publisher_handle',
'message', 'message_timestamp'
]
)
self.rcl_publish_instances = RecordsFactory.create_instance(
None, ['tid', 'rcl_publish_timestamp', 'publisher_handle', 'message']
)
self.dispatch_subscription_callback_instances = RecordsFactory.create_instance(
None, ['dispatch_subscription_callback_timestamp', 'callback_object', 'message',
'source_timestamp', 'message_timestamp'])
self.dispatch_intra_process_subscription_callback_instances = \
RecordsFactory.create_instance(
None,
['dispatch_intra_process_subscription_callback_timestamp', 'callback_object',
'message', 'message_timestamp']
)
self.message_construct_instances = RecordsFactory.create_instance(
None, ['message_construct_timestamp', 'original_message', 'constructed_message']
)
self.tilde_subscribe = RecordsFactory.create_instance(
None, [
'tilde_subscribe_timestamp',
'subscription',
'tilde_message_id']
)
self.tilde_publish = RecordsFactory.create_instance(
None, [
'tilde_publish_timestamp',
'publisher',
'subscription_id',
'tilde_message_id']
)
self.sim_time = RecordsFactory.create_instance(
None, [
'system_time',
'sim_time']
)
self.timer_event = RecordsFactory.create_instance(
None, [
'time_event_stamp']
)
def add_context(self, context_handle, timestamp, pid, version) -> None:
record = {
'context_handle': context_handle,
'timestamp': timestamp,
'pid': pid,
'version': version, # Comment out to align with Dict[str: int64_t]
}
self._contexts.append(record)
def add_node(self, node_handle, timestamp, tid, rmw_handle, name, namespace) -> None:
record = {
'node_handle': node_handle,
'timestamp': timestamp,
'tid': tid,
'rmw_handle': rmw_handle,
'namespace': namespace,
'name': name,
}
self._nodes.append(record)
def add_publisher(self, handle, timestamp, node_handle, rmw_handle, topic_name, depth) -> None:
record = {
'publisher_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
}
self._publishers.append(record)
def add_rcl_subscription(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
record = {
'subscription_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
}
self._subscriptions.append(record)
def add_rclcpp_subscription(
self, subscription_pointer, timestamp, subscription_handle
) -> None:
record = {
'subscription': subscription_pointer,
'timestamp': timestamp,
'subscription_handle': subscription_handle,
}
self._subscription_objects.append(record)
def add_service(self, handle, timestamp, node_handle, rmw_handle, service_name) -> None:
record = {
'service_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
}
self._services.append(record)
def add_client(self, handle, timestamp, node_handle, rmw_handle, service_name) -> None:
record = {
'client_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
}
self._clients.append(record)
def add_timer(self, handle, timestamp, period, tid) -> None:
record = {
'timer_handle': handle,
'timestamp': timestamp,
'period': period,
'tid': tid,
}
self._timers.append(record)
def add_tilde_subscribe_added(
self, subscription_id, node_name, topic_name, timestamp
) -> None:
record = {
'subscription_id': subscription_id,
'node_name': node_name,
'topic_name': topic_name,
'timestamp': timestamp
}
self._tilde_subscribe_added.append(record)
def add_timer_node_link(self, handle, timestamp, node_handle) -> None:
record = {
'timer_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
}
self._timer_node_links.append(record)
def add_callback_object(self, reference, timestamp, callback_object) -> None:
record = {
'reference': reference,
'timestamp': timestamp,
'callback_object': callback_object,
}
self._callback_objects.append(record)
def add_callback_symbol(self, callback_object, timestamp, symbol) -> None:
record = {
'callback_object': callback_object,
'timestamp': timestamp,
'symbol': symbol,
}
self._callback_symbols.append(record)
def add_lifecycle_state_machine(self, handle, node_handle) -> None:
record = {
'state_machine_handle': handle,
'node_handle': node_handle,
}
self._lifecycle_state_machines.append(record)
def add_lifecycle_state_transition(
self, state_machine_handle, start_label, goal_label, timestamp
) -> None:
record = {
'state_machine_handle': state_machine_handle,
'start_label': start_label,
'goal_label': goal_label,
'timestamp': timestamp,
}
self._lifecycle_transitions.append(record)
def add_tilde_subscription(
self, subscription, node_name, topic_name, timestamp
) -> None:
record = {
'subscription': subscription,
'node_name': node_name,
'topic_name': topic_name,
'timestamp': timestamp,
}
self._tilde_subscriptions.append(record)
def add_tilde_publisher(
self, publisher, node_name, topic_name, timestamp
) -> None:
record = {
'publisher': publisher,
'node_name': node_name,
'topic_name': topic_name,
'timestamp': timestamp,
}
self._tilde_publishers.append(record)
def add_callback_start_instance(
self, timestamp: int, callback: int, is_intra_process: bool
) -> None:
record = RecordFactory.create_instance(
{
'callback_start_timestamp': timestamp,
'callback_object': callback,
'is_intra_process': is_intra_process,
}
)
self.callback_start_instances.append(record)
def add_callback_end_instance(self, timestamp: int, callback: int) -> None:
record = RecordFactory.create_instance(
{'callback_end_timestamp': timestamp, 'callback_object': callback}
)
self.callback_end_instances.append(record)
def add_rclcpp_intra_publish_instance(
self,
tid: int,
timestamp: int,
publisher_handle: int,
message: int,
message_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'rclcpp_intra_publish_timestamp': timestamp,
'publisher_handle': publisher_handle,
'message': message,
'message_timestamp': message_timestamp,
}
)
self.rclcpp_intra_publish_instances.append(record)
def add_rclcpp_publish_instance(
self,
tid: int,
timestamp: int,
publisher_handle: int,
message: int,
message_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'rclcpp_publish_timestamp': timestamp,
'publisher_handle': publisher_handle,
'message': message,
'message_timestamp': message_timestamp,
}
)
self.rclcpp_publish_instances.append(record)
def add_rcl_publish_instance(
self,
tid: int,
timestamp: int,
publisher_handle: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'rcl_publish_timestamp': timestamp,
'publisher_handle': publisher_handle,
'message': message,
}
)
self.rcl_publish_instances.append(record)
def add_dds_write_instance(
self,
tid: int,
timestamp: int,
message: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'dds_write_timestamp': timestamp,
'message': message,
}
)
self.dds_write_instances.append(record)
def add_dds_bind_addr_to_addr(
self,
timestamp: int,
addr_from: int,
addr_to: int,
) -> None:
record = RecordFactory.create_instance(
{
'dds_bind_addr_to_addr_timestamp': timestamp,
'addr_from': addr_from,
'addr_to': addr_to,
}
)
self.dds_bind_addr_to_addr.append(record)
def add_dds_bind_addr_to_stamp(
self,
tid: int,
timestamp: int,
addr: int,
source_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'tid': tid,
'dds_bind_addr_to_stamp_timestamp': timestamp,
'addr': addr,
'source_timestamp': source_timestamp,
}
)
self.dds_bind_addr_to_stamp.append(record)
def add_on_data_available_instance(
self,
timestamp: int,
source_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'on_data_available_timestamp': timestamp,
'source_timestamp': source_timestamp,
}
)
self.on_data_available_instances.append(record)
def add_message_construct_instance(
self, timestamp: int, original_message: int, constructed_message: int
) -> None:
record = RecordFactory.create_instance(
{
'message_construct_timestamp': timestamp,
'original_message': original_message,
'constructed_message': constructed_message,
}
)
self.message_construct_instances.append(record)
def add_dispatch_subscription_callback_instance(
self,
timestamp: int,
callback_object: int,
message: int,
source_timestamp: int,
message_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'dispatch_subscription_callback_timestamp': timestamp,
'callback_object': callback_object,
'message': message,
'source_timestamp': source_timestamp,
'message_timestamp': message_timestamp,
}
)
self.dispatch_subscription_callback_instances.append(record)
def add_sim_time(
self,
timestamp: int,
sim_time: int
) -> None:
record = RecordFactory.create_instance(
{
'system_time': timestamp,
'sim_time': sim_time
}
)
self.sim_time.append(record)
def add_rmw_implementation(self, rmw_impl: str):
self._rmw_impl.append({'rmw_impl': rmw_impl})
def add_dispatch_intra_process_subscription_callback_instance(
self,
timestamp: int,
callback_object: int,
message: int,
message_timestamp: int,
) -> None:
record = RecordFactory.create_instance(
{
'dispatch_intra_process_subscription_callback_timestamp': timestamp,
'callback_object': callback_object,
'message': message,
'message_timestamp': message_timestamp
}
)
self.dispatch_intra_process_subscription_callback_instances.append(
record)
def add_tilde_subscribe(
self,
timestamp: int,
subscription: int,
tilde_message_id: int,
) -> None:
record = RecordFactory.create_instance(
{
'tilde_subscribe_timestamp': timestamp,
'subscription': subscription,
'tilde_message_id': tilde_message_id
}
)
self.tilde_subscribe.append(record)
def add_tilde_publish(
self,
timestamp: int,
publisher: int,
subscription_id: int,
tilde_message_id: int,
) -> None:
record = RecordFactory.create_instance(
{
'tilde_publish_timestamp': timestamp,
'publisher': publisher,
'subscription_id': subscription_id,
'tilde_message_id': tilde_message_id,
}
)
self.tilde_publish.append(record)
def add_executor(
self,
executor_addr: int,
timestamp: int,
executor_type_name: str
) -> None:
record = {
'timestamp': timestamp,
'executor_addr': executor_addr,
'executor_type_name': executor_type_name,
}
self._executors.append(record)
def add_executor_static(
self,
executor_addr: int,
entities_collector_addr: int,
timestamp: int,
executor_type_name: str
) -> None:
record = {
'timestamp': timestamp,
'executor_addr': executor_addr,
'entities_collector_addr': entities_collector_addr,
'executor_type_name': executor_type_name,
}
self._executors_static.append(record)
def add_callback_group(
self,
executor_addr: int,
timestamp: int,
callback_group_addr: int,
group_type_name: str
) -> None:
record = {
'timestamp': timestamp,
'executor_addr': executor_addr,
'callback_group_addr': callback_group_addr,
'group_type_name': group_type_name
}
self._callback_groups.append(record)
def add_callback_group_static_executor(
self,
entities_collector_addr: int,
timestamp: int,
callback_group_addr: int,
group_type_name: str
) -> None:
record = {
'timestamp': timestamp,
'entities_collector_addr': entities_collector_addr,
'callback_group_addr': callback_group_addr,
'group_type_name': group_type_name
}
self._callback_groups_static.append(record)
def callback_group_add_timer(
self,
callback_group_addr: int,
timestamp: int,
timer_handle: int
) -> None:
record = {
'timestamp': timestamp,
'callback_group_addr': callback_group_addr,
'timer_handle': timer_handle,
}
self._callback_group_timer.append(record)
def callback_group_add_subscription(
self,
callback_group_addr: int,
timestamp: int,
subscription_handle: int
) -> None:
record = {
'timestamp': timestamp,
'callback_group_addr': callback_group_addr,
'subscription_handle': subscription_handle,
}
self._callback_group_subscription.append(record)
def callback_group_add_service(
self,
callback_group_addr: int,
timestamp: int,
service_handle: int
) -> None:
record = {
'timestamp': timestamp,
'callback_group_addr': callback_group_addr,
'service_handle': service_handle,
}
self._callback_group_service.append(record)
def callback_group_add_client(
self,
callback_group_addr: int,
timestamp: int,
client_handle: int
) -> None:
record = {
'timestamp': timestamp,
'callback_group_addr': callback_group_addr,
'client_handle': client_handle,
}
self._callback_group_client.append(record)
def _finalize(self) -> None:
self.contexts = pd.DataFrame.from_dict(self._contexts)
if self._contexts:
self.contexts.set_index('context_handle', inplace=True, drop=True)
self.nodes = pd.DataFrame.from_dict(self._nodes)
if self._nodes:
self.nodes.set_index('node_handle', inplace=True, drop=True)
self.publishers = pd.DataFrame.from_dict(self._publishers)
if self._publishers:
self.publishers.set_index(
'publisher_handle', inplace=True, drop=True)
self.subscriptions = pd.DataFrame.from_dict(self._subscriptions)
if self._subscriptions:
self.subscriptions.set_index(
'subscription_handle', inplace=True, drop=True)
self.subscription_objects = pd.DataFrame.from_dict(
self._subscription_objects)
if self._subscription_objects:
self.subscription_objects.set_index(
'subscription', inplace=True, drop=True)
self.services = pd.DataFrame.from_dict(self._services)
if self._services:
self.services.set_index('service_handle', inplace=True, drop=True)
self.clients = pd.DataFrame.from_dict(self._clients)
if self._clients:
self.clients.set_index('client_handle', inplace=True, drop=True)
self.timers = pd.DataFrame.from_dict(self._timers)
if self._timers:
self.timers.set_index('timer_handle', inplace=True, drop=True)
self.timer_node_links = pd.DataFrame.from_dict(self._timer_node_links)
if self._timer_node_links:
self.timer_node_links.set_index(
'timer_handle', inplace=True, drop=True)
self.callback_objects = pd.DataFrame.from_dict(self._callback_objects)
if self._callback_objects:
self.callback_objects.set_index(
'reference', inplace=True, drop=True)
self.callback_symbols = pd.DataFrame.from_dict(self._callback_symbols)
if self._callback_symbols:
self.callback_symbols.set_index(
'callback_object', inplace=True, drop=True)
self.lifecycle_state_machines = pd.DataFrame.from_dict(
self._lifecycle_state_machines)
if self._lifecycle_state_machines:
self.lifecycle_state_machines.set_index(
'state_machine_handle', inplace=True, drop=True
)
self.lifecycle_transitions = pd.DataFrame.from_dict(
self._lifecycle_transitions)
self.executors = pd.DataFrame.from_dict(self._executors)
if self._executors:
self.executors.set_index(
'executor_addr', inplace=True, drop=True
)
self.executors_static = pd.DataFrame.from_dict(self._executors_static)
if self._executors_static:
self.executors_static.set_index(
'executor_addr', inplace=True, drop=True
)
self.callback_groups = pd.DataFrame.from_dict(self._callback_groups)
if self._callback_groups:
self.callback_groups.set_index(
'callback_group_addr', inplace=True, drop=True
)
self.callback_groups_static = pd.DataFrame.from_dict(self._callback_groups_static)
if self._callback_groups_static:
self.callback_groups_static.set_index(
'callback_group_addr', inplace=True, drop=True
)
self.callback_group_timer = pd.DataFrame.from_dict(self._callback_group_timer)
if self._callback_group_timer:
self.callback_group_timer.set_index(
'callback_group_addr', inplace=True, drop=True
)
self.callback_group_subscription = pd.DataFrame.from_dict(
self._callback_group_subscription)
if self._callback_group_subscription:
self.callback_group_subscription.set_index(
'callback_group_addr', inplace=True, drop=True
)
self.callback_group_service = pd.DataFrame.from_dict(self._callback_group_service)
if self._callback_group_service:
self.callback_group_service.set_index(
'callback_group_addr', inplace=True, drop=True
)
self.callback_group_client = pd.DataFrame.from_dict(self._callback_group_client)
if self._callback_group_client:
self.callback_group_client.set_index(
'callback_group_addr', inplace=True, drop=True
)
self.tilde_subscriptions = pd.DataFrame.from_dict(self._tilde_subscriptions)
if self._tilde_subscriptions:
self.tilde_subscriptions.set_index(
'subscription', inplace=True, drop=True
)
self.tilde_publishers = pd.DataFrame.from_dict(self._tilde_publishers)
if self._tilde_publishers:
self.tilde_publishers.set_index(
'publisher', inplace=True, drop=True
)
self.tilde_subscribe_added = pd.DataFrame.from_dict(self._tilde_subscribe_added)
if self._tilde_subscribe_added:
self.tilde_subscribe_added.set_index(
'subscription_id', inplace=True, drop=True
)
self.rmw_impl = pd.DataFrame.from_dict(self._rmw_impl)
def print_data(self) -> None:
print('====================ROS 2 DATA MODEL===================')
print('Contexts:')
print(self.contexts.to_string())
print()
print('Nodes:')
print(self.nodes.to_string())
print()
print('Publishers:')
print(self.publishers.to_string())
print()
print('Subscriptions:')
print(self.subscriptions.to_string())
print()
print('Subscription objects:')
print(self.subscription_objects.to_string())
print()
print('Services:')
print(self.services.to_string())
print()
print('Clients:')
print(self.clients.to_string())
print()
print('Timers:')
print(self.timers.to_string())
print()
print('Timer-node links:')
print(self.timer_node_links.to_string())
print()
print('Callback objects:')
print(self.callback_objects.to_string())
print()
print('Callback symbols:')
print(self.callback_symbols.to_string())
print()
print('Callback instances:')
print(self.callback_instances.to_string())
print()
print('Lifecycle state machines:')
print(self.lifecycle_state_machines.to_string())
print()
print('Lifecycle transitions:')
print(self.lifecycle_transitions.to_string())
print('==================================================')
| 35.909554 | 99 | 0.605201 | 2,694 | 28,189 | 5.979955 | 0.078693 | 0.044693 | 0.036313 | 0.037989 | 0.568963 | 0.470577 | 0.336934 | 0.290627 | 0.2455 | 0.216884 | 0 | 0.001222 | 0.303558 | 28,189 | 784 | 100 | 35.955357 | 0.819377 | 0.035581 | 0 | 0.388652 | 0 | 0 | 0.126244 | 0.033274 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060993 | false | 0 | 0.004255 | 0 | 0.066667 | 0.062411 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa1d85254f9ec636a9176b1bc341ab629a7a8196 | 19,252 | py | Python | logstash/logstash.py | dexbiobot/SML-Cogs | e8d3d12e5bf1d760196006f86a6c16ed95e3c964 | [
"MIT"
] | 17 | 2017-05-30T13:21:18.000Z | 2022-03-27T13:08:17.000Z | logstash/logstash.py | dexbiobot/SML-Cogs | e8d3d12e5bf1d760196006f86a6c16ed95e3c964 | [
"MIT"
] | 16 | 2017-06-11T12:55:06.000Z | 2019-02-20T21:00:59.000Z | logstash/logstash.py | dexbiobot/SML-Cogs | e8d3d12e5bf1d760196006f86a6c16ed95e3c964 | [
"MIT"
] | 17 | 2017-05-03T16:09:46.000Z | 2020-05-13T21:19:37.000Z | """
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import logging
import os
import re
import json
from datetime import timedelta
import logstash
from __main__ import send_cmd_help
from discord import Channel
from discord import ChannelType
from discord import Game
from discord import Member
from discord import Message
from discord import Role
from discord import Server
from discord import Status
from discord.ext import commands
from discord.ext.commands import Command
from discord.ext.commands import Context
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from elasticsearch_dsl.query import QueryString
from elasticsearch_dsl.query import Range
HOST = 'localhost'
PORT = 5959
INTERVAL = timedelta(hours=4).seconds
DB_PATH = os.path.join('data', 'logstash', 'logstash.db')
PATH = os.path.join('data', 'logstash')
JSON = os.path.join(PATH, 'settings.json')
EMOJI_P = re.compile('\<\:.+?\:\d+\>')
UEMOJI_P = re.compile(u'['
u'\U0001F300-\U0001F64F'
u'\U0001F680-\U0001F6FF'
u'\uD83C-\uDBFF\uDC00-\uDFFF'
u'\u2600-\u26FF\u2700-\u27BF]{1,2}',
re.UNICODE)
class Logstash:
"""Send activity of Discord using Google Analytics."""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = dataIO.load_json(JSON)
self.extra = {}
self.task = bot.loop.create_task(self.loop_task())
self.handler = logstash.LogstashHandler(HOST, PORT, version=1)
self.logger = logging.getLogger('discord.logger')
self.logger.setLevel(logging.INFO)
self.logger.addHandler(self.handler)
self.logger.info('discord.logger: Logstash cog init')
logging.getLogger("red").addHandler(self.handler)
def __unload(self):
"""Unhook logger when unloaded.
Thanks Kowlin!
"""
self.logger.removeHandler(self.handler)
logging.getLogger("red").removeHandler(self.handler)
async def loop_task(self):
"""Loop task."""
# pass
# disabled to see if it is causing memory problems
await self.bot.wait_until_ready()
self.extra = {
'log_type': 'discord.logger',
'application': 'red',
'bot_id': self.bot.user.id,
'bot_name': self.bot.user.name
}
self.log_all_gauges()
await asyncio.sleep(INTERVAL)
if self is self.bot.get_cog('Logstash'):
self.task = self.bot.loop.create_task(self.loop_task())
@commands.group(pass_context=True, no_pm=True)
@checks.serverowner_or_permissions(manage_server=True)
async def logstash(self, ctx: Context):
"""Logstash command. Admin only."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@logstash.command(name="all", pass_context=True)
async def logstash_all(self):
"""Send all stats."""
self.log_all_gauges()
await self.bot.say("Logged all.")
@logstash.command(name="log", pass_context=True)
async def logstash_log(self, ctx, key, *, json_str):
"""Log an arbitrary event with key an json input.
[p]logstash log key_name {"x": 1, "y": 2, "z": 3}
"""
try:
extra = json.loads(json_str)
except json.JSONDecodeError:
await self.bot.say("Invalid JSON entered.")
return
self.logger.info(key, extra=extra)
await self.bot.say("Logged.")
async def on_channel_create(self, channel: Channel):
"""Track channel creation."""
self.log_channel_create(channel)
async def on_channel_delete(self, channel: Channel):
"""Track channel deletion."""
self.log_channel_delete(channel)
async def on_command(self, command: Command, ctx: Context):
"""Track command usage."""
self.log_command(command, ctx)
async def on_message(self, message: Message):
"""Track on message."""
self.log_message(message)
# self.log_emojis(message)
async def on_message_delete(self, message: Message):
"""Track message deletion."""
self.log_message_delete(message)
async def on_message_edit(self, before: Message, after: Message):
"""Track message editing."""
self.log_message_edit(before, after)
async def on_member_join(self, member: Member):
"""Track members joining server."""
self.log_member_join(member)
async def on_member_update(self, before: Member, after: Member):
"""Called when a Member updates their profile.
Only track status after.
"""
self.log_member_update(before, after)
async def on_member_remove(self, member: Member):
"""Track members leaving server."""
self.log_member_remove(member)
async def on_ready(self):
"""Bot ready."""
self.log_all_gauges()
async def on_resume(self):
"""Bot resume."""
self.log_all_gauges()
def get_message_sca(self, message: Message):
"""Return server, channel and author from message."""
return message.server, message.channel, message.author
def get_server_params(self, server: Server):
"""Return extra fields for server."""
extra = {
'id': server.id,
'name': server.name,
}
return extra
def get_channel_params(self, channel: Channel):
"""Return extra fields for channel."""
extra = {
'id': channel.id,
'name': channel.name,
'server': self.get_server_params(channel.server),
'position': channel.position,
'is_default': channel.is_default,
'created_at': channel.created_at.isoformat(),
'type': {
'text': channel.type == ChannelType.text,
'voice': channel.type == ChannelType.voice,
'private': channel.type == ChannelType.private,
'group': channel.type == ChannelType.group
}
}
return extra
def get_server_channel_params(self, channel: Channel):
"""Return digested version of channel params"""
extra = {
'id': channel.id,
'name': channel.name,
'position': channel.position,
'is_default': channel.is_default,
'created_at': channel.created_at.isoformat(),
}
return extra
def get_member_params(self, member: Member):
"""Return data for member."""
extra = {
'name': member.display_name,
'username': member.name,
'display_name': member.display_name,
'id': member.id,
'bot': member.bot
}
if isinstance(member, Member):
extra.update({
'status': self.get_extra_status(member.status),
'game': self.get_game_params(member.game),
'top_role': self.get_role_params(member.top_role),
'joined_at': member.joined_at.isoformat()
})
if hasattr(member, 'server'):
extra['server'] = self.get_server_params(member.server)
# message sometimes reference a user and has no roles info
if hasattr(member, 'roles'):
extra['roles'] = [self.get_role_params(r) for r in member.server.role_hierarchy if r in member.roles]
return extra
def get_role_params(self, role: Role):
"""Return data for role."""
if not role:
return {}
extra = {
'name': role.name,
'id': role.id
}
return extra
def get_extra_status(self, status: Status):
"""Return data for status."""
extra = {
'online': status == Status.online,
'offline': status == Status.offline,
'idle': status == Status.idle,
'dnd': status == Status.dnd,
'invisible': status == Status.invisible
}
return extra
def get_game_params(self, game: Game):
"""Return ata for game."""
if game is None:
return {}
extra = {
'name': game.name,
'url': game.url,
'type': game.type
}
return extra
def get_sca_params(self, message: Message):
"""Return extra fields from messages."""
server = message.server
channel = message.channel
author = message.author
extra = {}
if author is not None:
extra['author'] = self.get_member_params(author)
if channel is not None:
extra['channel'] = self.get_channel_params(channel)
if server is not None:
extra['server'] = self.get_server_params(server)
return extra
def get_mentions_extra(self, message: Message):
"""Return mentions in message."""
mentions = set(message.mentions.copy())
names = [m.display_name for m in mentions]
ids = [m.id for m in mentions]
return {
'mention_names': names,
'mention_ids': ids
}
def get_emojis_params(self, message: Message):
"""Return list of emojis used in messages."""
emojis = []
emojis.append(EMOJI_P.findall(message.content))
emojis.append(UEMOJI_P.findall(message.content))
return {
'emojis': emojis
}
def get_event_key(self, name: str):
"""Return event name used in logger."""
return "discord.logger.{}".format(name)
def log(self, key, extra=None):
"""Generic logging.
Used to allow other cogs to log with this cog.
"""
self.logger.info(key, extra=extra)
def log_command(self, command, ctx):
"""Log bot commands."""
pass
# extra = {
# 'name': command.name
# }
# extra.update(self.get_sca_params(ctx.message))
# self.log_discord_event("command", extra)
def log_emojis(self, message: Message):
"""Log emoji uses."""
emojis = []
emojis.append(EMOJI_P.findall(message.content))
emojis.append(UEMOJI_P.findall(message.content))
if not self.extra:
return
for emoji in emojis:
extra = self.extra.copy()
event_key = "message.emoji"
extra = {
'discord_event': event_key,
'emoji': emoji
}
self.logger.info(self.get_event_key(event_key), extra=extra)
def log_discord(self, key=None, is_event=False, is_gauge=False, extra=None):
"""Log Discord logs"""
if key is None:
return
if self.extra is None:
return
if extra is None:
extra = {}
extra.update(self.extra.copy())
if is_event:
extra['discord_event'] = key
if is_gauge:
extra['discord_gauge'] = key
self.logger.info(self.get_event_key(key), extra=extra)
def log_discord_event(self, key=None, extra=None):
"""Log Discord events."""
self.log_discord(key=key, is_event=True, extra=extra)
def log_discord_gauge(self, key=None, extra=None):
"""Log Discord events."""
self.log_discord(key=key, is_gauge=True, extra=extra)
def log_channel_create(self, channel: Channel):
"""Log channel creation."""
extra = {
'channel': self.get_channel_params(channel)
}
self.log_discord_event("channel.create", extra)
def log_channel_delete(self, channel: Channel):
"""Log channel deletion."""
extra = {
'channel': self.get_channel_params(channel)
}
self.log_discord_event("channel.delete", extra)
def log_member_join(self, member: Member):
"""Log member joining the server."""
extra = {
'member': self.get_member_params(member)
}
self.log_discord_event("member.join", extra)
def log_member_update(self, before: Member, after: Member):
"""Track member’s updated status."""
if set(before.roles) != set(after.roles):
extra = {
'member': self.get_member_params(after)
}
if len(before.roles) > len(after.roles):
roles_removed = set(before.roles) - set(after.roles)
extra['role_update'] = 'remove'
extra['roles_removed'] = [self.get_role_params(r) for r in roles_removed]
else:
roles_added = set(after.roles) - set(before.roles)
extra['role_update'] = 'add'
extra['roles_added'] = [self.get_role_params(r) for r in roles_added]
self.log_discord_event('member.update.roles', extra)
def log_member_remove(self, member: Member):
"""Log member leaving the server."""
extra = {
'member': self.get_member_params(member)
}
self.log_discord_event("member.remove", extra)
def log_message(self, message: Message):
"""Log message."""
extra = {'content': message.content}
extra.update(self.get_sca_params(message))
extra.update(self.get_mentions_extra(message))
self.log_discord_event('message', extra)
def log_message_delete(self, message: Message):
"""Log deleted message."""
extra = {'content': message.content}
extra.update(self.get_sca_params(message))
extra.update(self.get_mentions_extra(message))
self.log_discord_event('message.delete', extra)
def log_message_edit(self, before: Message, after: Message):
"""Log message editing."""
extra = {
'content_before': before.content,
'content_after': after.content
}
extra.update(self.get_sca_params(after))
extra.update(self.get_mentions_extra(after))
self.log_discord_event('message.edit', extra)
def log_all_gauges(self):
"""Log all gauge values."""
self.log_servers()
self.log_channels()
self.log_members()
self.log_voice()
self.log_players()
self.log_uptime()
self.log_server_roles()
self.log_server_channels()
def log_servers(self):
"""Log servers."""
if not self.extra:
return
event_key = 'servers'
extra = self.extra.copy()
servers = list(self.bot.servers)
extra.update({
'discord_gauge': event_key,
'server_count': len(servers)
})
servers_data = []
for server in servers:
servers_data.append(self.get_server_params(server))
extra['servers'] = servers_data
self.logger.info(self.get_event_key(event_key), extra=extra)
def log_channels(self):
"""Log channels."""
channels = list(self.bot.get_all_channels())
extra = {
'channel_count': len(channels)
}
self.log_discord_gauge('all_channels', extra=extra)
# individual channels
for channel in channels:
self.log_channel(channel)
def log_channel(self, channel: Channel):
"""Log one channel."""
extra = {'channel': self.get_channel_params(channel)}
self.log_discord_gauge('channel', extra=extra)
def log_members(self):
"""Log members."""
members = list(self.bot.get_all_members())
unique = set(m.id for m in members)
extra = {
'member_count': len(members),
'unique_member_count': len(unique)
}
self.log_discord_gauge('all_members', extra=extra)
for member in members:
self.log_member(member)
def log_member(self, member: Member):
"""Log member."""
extra = {'member': self.get_member_params(member)}
self.log_discord_gauge('member', extra=extra)
def log_voice(self):
"""Log voice channels."""
pass
def log_players(self):
"""Log VC players."""
pass
def log_uptime(self):
"""Log updtime."""
pass
def log_server_roles(self):
"""Log server roles."""
for server in self.bot.servers:
extra = {}
extra['server'] = self.get_server_params(server)
extra['roles'] = []
roles = server.role_hierarchy
# count number of members with a particular role
for index, role in enumerate(roles):
count = sum([1 for m in server.members if role in m.roles])
role_params = self.get_role_params(role)
role_params['count'] = count
role_params['hierachy_index'] = index
extra['roles'].append(role_params)
self.log_discord_gauge('server.roles', extra)
def log_server_channels(self):
"""Log server channels."""
for server in self.bot.servers:
extra = {
'server': self.get_server_params(server),
'channels': {
'text': [],
'voice': []
}
}
channels = sorted(server.channels, key=lambda x:x.position)
for channel in channels:
channel_params = self.get_server_channel_params(channel)
if channel.type == ChannelType.text:
extra['channels']['text'].append(channel_params)
elif channel.type == ChannelType.voice:
extra['channels']['voice'].append(channel_params)
self.log_discord_gauge('server.channels', extra)
def check_folder():
"""Check folder."""
if not os.path.exists(PATH):
os.makedirs(PATH)
def check_file():
"""Check files."""
defaults = {}
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, defaults)
def setup(bot):
"""Setup bot."""
check_folder()
check_file()
n = Logstash(bot)
bot.add_cog(n)
| 32.52027 | 117 | 0.59573 | 2,267 | 19,252 | 4.909131 | 0.158359 | 0.031449 | 0.017791 | 0.015365 | 0.310181 | 0.222572 | 0.17396 | 0.114386 | 0.112229 | 0.107018 | 0 | 0.004172 | 0.290256 | 19,252 | 591 | 118 | 32.575296 | 0.810304 | 0.127052 | 0 | 0.224747 | 0 | 0 | 0.072251 | 0.006261 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106061 | false | 0.017677 | 0.063131 | 0 | 0.219697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa1d88767f9598326147db70a097f66a059b0283 | 7,731 | py | Python | feapder/utils/webdriver.py | Litt1eQ/feapder | e4dd0fd86526699f10e5f239fff4af8cc5e957a5 | [
"MIT"
] | 1 | 2021-04-20T11:29:36.000Z | 2021-04-20T11:29:36.000Z | feapder/utils/webdriver.py | jiyegui/feapder | 1d598ad115654aec972b063be06330761ed7a5b9 | [
"MIT"
] | null | null | null | feapder/utils/webdriver.py | jiyegui/feapder | 1d598ad115654aec972b063be06330761ed7a5b9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 2021/3/18 4:59 下午
---------
@summary:
---------
@author: Boris
@email: boris_liu@foxmail.com
"""
import queue
import threading
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from feapder.utils.log import log
from feapder.utils.tools import Singleton
DEFAULT_USERAGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
class WebDriver(RemoteWebDriver):
CHROME = "CHROME"
PHANTOMJS = "PHANTOMJS"
def __init__(
self,
load_images=True,
user_agent=None,
proxy=None,
headless=False,
driver_type=PHANTOMJS,
timeout=16,
window_size=(1024, 800),
executable_path=None,
**kwargs
):
"""
webdirver 封装,支持chrome及phantomjs
Args:
load_images: 是否加载图片
user_agent: 字符串 或 无参函数,返回值为user_agent
proxy: xxx.xxx.xxx.xxx:xxxx 或 无参函数,返回值为代理地址
headless: 是否启用无头模式
driver_type: CHROME 或 PHANTOMJS,
timeout: 请求超时时间
window_size: # 窗口大小
executable_path: 浏览器路径,默认为默认路径
**kwargs:
"""
self._load_images = load_images
self._user_agent = user_agent or DEFAULT_USERAGENT
self._proxy = proxy
self._headless = headless
self._timeout = timeout
self._window_size = window_size
self._executable_path = executable_path
self.proxies = {}
self.user_agent = None
if driver_type == WebDriver.CHROME:
self.driver = self.chrome_driver()
elif driver_type == WebDriver.PHANTOMJS:
self.driver = self.phantomjs_driver()
else:
raise TypeError(
"dirver_type must be one of CHROME or PHANTOMJS, but received {}".format(
type(driver_type)
)
)
# driver.get(url)一直不返回,但也不报错的问题,这时程序会卡住,设置超时选项能解决这个问题。
self.driver.set_page_load_timeout(self._timeout)
# 设置10秒脚本超时时间
self.driver.set_script_timeout(self._timeout)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_val:
log.error(exc_val)
self.quit()
return True
def get_driver(self):
return self.driver
def chrome_driver(self):
chrome_options = webdriver.ChromeOptions()
# 此步骤很重要,设置为开发者模式,防止被各大网站识别出来使用了Selenium
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
chrome_options.add_experimental_option("useAutomationExtension", False)
if self._proxy:
chrome_options.add_argument(
"--proxy-server={}".format(
self._proxy() if callable(self._proxy) else self._proxy
)
)
if self._user_agent:
chrome_options.add_argument(
"user-agent={}".format(
self._user_agent()
if callable(self._user_agent)
else self._user_agent
)
)
if not self._load_images:
chrome_options.add_experimental_option(
"prefs", {"profile.managed_default_content_settings.images": 2}
)
if self._headless:
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
if self._window_size:
chrome_options.add_argument(
"--window-size={},{}".format(self._window_size[0], self._window_size[1])
)
if self._executable_path:
driver = webdriver.Chrome(
chrome_options=chrome_options, executable_path=self._executable_path
)
else:
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
})
"""
},
)
return driver
def phantomjs_driver(self):
import warnings
warnings.filterwarnings("ignore")
service_args = []
dcap = DesiredCapabilities.PHANTOMJS
if self._proxy:
service_args.append(
"--proxy=%s" % self._proxy() if callable(self._proxy) else self._proxy
)
if self._user_agent:
dcap["phantomjs.page.settings.userAgent"] = (
self._user_agent() if callable(self._user_agent) else self._user_agent
)
if not self._load_images:
service_args.append("--load-images=no")
if self._executable_path:
driver = webdriver.PhantomJS(
service_args=service_args,
desired_capabilities=dcap,
executable_path=self._executable_path,
)
else:
driver = webdriver.PhantomJS(
service_args=service_args, desired_capabilities=dcap
)
if self._window_size:
driver.set_window_size(self._window_size[0], self._window_size[1])
del warnings
return driver
@property
def cookies(self):
cookies_json = {}
for cookie in self.driver.get_cookies():
cookies_json[cookie["name"]] = cookie["value"]
return cookies_json
@cookies.setter
def cookies(self, val: dict):
"""
设置cookie
Args:
val: {"key":"value", "key2":"value2"}
Returns:
"""
for key, value in val.items():
self.driver.add_cookie({"name": key, "value": value})
def __getattr__(self, name):
if self.driver:
return getattr(self.driver, name)
else:
raise AttributeError
# def __del__(self):
# self.quit()
@Singleton
class WebDriverPool:
def __init__(self, pool_size=5, **kwargs):
self.queue = queue.Queue(maxsize=pool_size)
self.kwargs = kwargs
self.lock = threading.RLock()
self.driver_count = 0
@property
def is_full(self):
return self.driver_count >= self.queue.maxsize
def get(self, user_agent: str = None, proxy: str = None) -> WebDriver:
"""
获取webdriver
当webdriver为新实例时会使用 user_agen, proxy, cookie参数来创建
Args:
user_agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36
proxy: xxx.xxx.xxx.xxx
Returns:
"""
if not self.is_full:
with self.lock:
if not self.is_full:
kwargs = self.kwargs.copy()
if user_agent:
kwargs["user_agent"] = user_agent
if proxy:
kwargs["proxy"] = proxy
driver = WebDriver(**kwargs)
self.queue.put(driver)
self.driver_count += 1
driver = self.queue.get()
return driver
def put(self, driver):
self.queue.put(driver)
def remove(self, driver):
driver.quit()
self.driver_count -= 1
def close(self):
while not self.queue.empty():
driver = self.queue.get()
driver.quit()
self.driver_count -= 1
| 29.284091 | 145 | 0.567326 | 803 | 7,731 | 5.222914 | 0.267746 | 0.040773 | 0.034096 | 0.028612 | 0.268002 | 0.213162 | 0.191226 | 0.172389 | 0.137339 | 0.137339 | 0 | 0.017167 | 0.336955 | 7,731 | 263 | 146 | 29.395437 | 0.801014 | 0.113569 | 0 | 0.19209 | 0 | 0.00565 | 0.101585 | 0.025811 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084746 | false | 0 | 0.045198 | 0.016949 | 0.20339 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa1f99bd0a967bb0bb3aa9714a2c76e5ad34507f | 965 | py | Python | category.py | studenton/ncell-app-camp-ideas | 8e0a542d65b4cdad12fde9dae48794962867e2bb | [
"MIT"
] | 6 | 2018-10-06T03:38:23.000Z | 2019-05-10T08:02:25.000Z | category.py | studenton/ncell-app-camp-ideas | 8e0a542d65b4cdad12fde9dae48794962867e2bb | [
"MIT"
] | null | null | null | category.py | studenton/ncell-app-camp-ideas | 8e0a542d65b4cdad12fde9dae48794962867e2bb | [
"MIT"
] | 4 | 2018-10-06T03:38:26.000Z | 2021-10-02T15:50:59.000Z | def get_page(url):
try:
import urllib
return urllib.urlopen(url).read()
except:
return ''
from bs4 import BeautifulSoup
# import lxml
import requests
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
with open("ideas.csv","r") as file:
data = file.read()
lines =data.splitlines()
source = requests.get("http://ncellappcamp.com/ideas")
soup = BeautifulSoup(source.content, "lxml")
category=soup.find_all('div',{'class':'category-icon'})
categorylist=[]
for item in category:
src = item.find('img')
image=src.get('src')
if "tourism" in image:
categorylist.append("Tourism")
elif "ic_game" in image:
categorylist.append("Gaming")
elif "ic_health" in image:
categorylist.append("Health")
elif "ic_utilities" in image:
categorylist.append("Utilities")
with open("new.csv","w") as newfile:
for i in range(727):
newfile.write(lines[i]+','+categorylist[i]+'\n')
| 26.805556 | 56 | 0.655959 | 126 | 965 | 4.984127 | 0.531746 | 0.044586 | 0.121019 | 0.159236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006402 | 0.190674 | 965 | 35 | 57 | 27.571429 | 0.797695 | 0.011399 | 0 | 0 | 0 | 0 | 0.156513 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.125 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa20ef9e1083deafc50859142152b00fd02d1e68 | 11,544 | py | Python | pydpkg/dsc.py | memory/python-dpkg | b88fe8f2d454d3f2c56c9d92be8ffa8ed2c02212 | [
"Apache-2.0"
] | 4 | 2021-03-03T15:37:51.000Z | 2021-12-15T21:04:22.000Z | pydpkg/dsc.py | memory/python-dpkg | b88fe8f2d454d3f2c56c9d92be8ffa8ed2c02212 | [
"Apache-2.0"
] | 2 | 2020-11-20T13:12:14.000Z | 2021-12-04T21:08:36.000Z | pydpkg/dsc.py | memory/python-dpkg | b88fe8f2d454d3f2c56c9d92be8ffa8ed2c02212 | [
"Apache-2.0"
] | 5 | 2021-07-01T10:41:18.000Z | 2021-12-10T11:53:13.000Z | """ pydpkg.dpkg.Dpkg: a class to represent dpkg files """
# stdlib imports
import hashlib
import logging
import os
from collections import defaultdict
from email import message_from_file, message_from_string
import pgpy
# pypi imports
import six
# local imports
from pydpkg.exceptions import (
DscMissingFileError,
DscBadChecksumsError,
)
from pydpkg.base import _Dbase
REQUIRED_HEADERS = ("package", "version", "architecture")
class Dsc(_Dbase):
"""Class allowing import and manipulation of a debian source
description (dsc) file."""
# pylint: disable=too-many-instance-attributes
def __init__(self, filename=None, logger=None):
self.filename = os.path.expanduser(filename)
self._dirname = os.path.dirname(self.filename)
self._log = logger or logging.getLogger(__name__)
self._message = None
self._source_files = None
self._sizes = None
self._message_str = None
self._checksums = None
self._corrected_checksums = None
self._pgp_message = None
def __repr__(self):
return repr(self.message_str)
def __str__(self):
return six.text_type(self.message_str)
def __getattr__(self, attr):
"""Overload getattr to treat message headers as object
attributes (so long as they do not conflict with an existing
attribute).
:param attr: string
:returns: string
:raises: AttributeError
"""
self._log.debug("grabbing attr: %s", attr)
if attr in self.__dict__:
return self.__dict__[attr]
# handle attributes with dashes :-(
munged = attr.replace("_", "-")
# beware: email.Message[nonexistent] returns None not KeyError
if munged in self.message:
return self.message[munged]
raise AttributeError(f"'Dsc' object has no attribute '{attr}'")
def get(self, item, ret=None):
"""Public wrapper for getitem"""
try:
return self.__getitem__(item)
except KeyError:
return ret
@property
def message(self):
"""Return an email.Message object containing the parsed dsc file"""
self._log.debug("accessing message property")
if self._message is None:
self._message = self._process_dsc_file()
return self._message
@property
def headers(self):
"""Return a dictionary of the message items"""
if self._message is None:
self._message = self._process_dsc_file()
return dict(self._message.items())
@property
def pgp_message(self):
"""Return a pgpy.PGPMessage object containing the signed dsc
message (or None if the message is unsigned)"""
if self._message is None:
self._message = self._process_dsc_file()
return self._pgp_message
@property
def source_files(self):
"""Return a list of source files found in the dsc file"""
if self._source_files is None:
self._source_files = self._process_source_files()
return [x[0] for x in self._source_files]
@property
def all_files_present(self):
"""Return true if all files listed in the dsc have been found"""
if self._source_files is None:
self._source_files = self._process_source_files()
return all(x[2] for x in self._source_files)
@property
def all_checksums_correct(self):
"""Return true if all checksums are correct"""
return not self.corrected_checksums
@property
def corrected_checksums(self):
"""Returns a dict of the CORRECT checksums in any case
where the ones provided by the dsc file are incorrect."""
if self._corrected_checksums is None:
self._corrected_checksums = self._validate_checksums()
return self._corrected_checksums
@property
def missing_files(self):
"""Return a list of all files from the dsc that we failed to find"""
if self._source_files is None:
self._source_files = self._process_source_files()
return [x[0] for x in self._source_files if x[2] is False]
@property
def sizes(self):
"""Return a list of source files found in the dsc file"""
if self._source_files is None:
self._source_files = self._process_source_files()
return {(x[0], x[1]) for x in self._source_files}
@property
def message_str(self):
"""Return the dsc message as a string
:returns: string
"""
if self._message_str is None:
self._message_str = self.message.as_string()
return self._message_str
@property
def checksums(self):
"""Return a dictionary of checksums for the source files found
in the dsc file, keyed first by hash type and then by filename."""
if self._checksums is None:
self._checksums = self._process_checksums()
return self._checksums
def validate(self):
"""Raise exceptions if files are missing or checksums are bad."""
if not self.all_files_present:
raise DscMissingFileError([x[0] for x in self._source_files if not x[2]])
if not self.all_checksums_correct:
raise DscBadChecksumsError(self.corrected_checksums)
def _process_checksums(self):
"""Walk through the dsc message looking for any keys in the
format 'Checksum-hashtype'. Return a nested dictionary in
the form {hashtype: {filename: {digest}}}"""
self._log.debug("process_checksums()")
sums = {}
for key in self.message.keys():
if key.lower().startswith("checksums"):
hashtype = key.split("-")[1].lower()
# grrrrrr debian :( :( :(
elif key.lower() == "files":
hashtype = "md5"
else:
continue
sums[hashtype] = {}
source = self.message[key]
for line in source.split("\n"):
if line: # grrr py3--
digest, _, filename = line.strip().split(" ")
pathname = os.path.abspath(os.path.join(self._dirname, filename))
sums[hashtype][pathname] = digest
return sums
def _internalize_message(self, msg):
"""Ugh: the dsc message body may not include a Files or
Checksums-foo entry for _itself_, which makes for hilarious
misadventures up the chain. So, pfeh, we add it."""
self._log.debug("internalize_message()")
base = os.path.basename(self.filename)
size = os.path.getsize(self.filename)
for key, source in msg.items():
self._log.debug("processing key: %s", key)
if key.lower().startswith("checksums"):
hashtype = key.split("-")[1].lower()
elif key.lower() == "files":
hashtype = "md5"
else:
continue
found = []
for line in source.split("\n"):
if line: # grrr
found.append(line.strip().split(" "))
files = [x[2] for x in found]
print(f"Files: {files}")
if base not in files:
self._log.debug("dsc file not found in %s: %s", key, base)
self._log.debug("getting hasher for %s", hashtype)
hasher = getattr(hashlib, hashtype)()
self._log.debug("hashing file")
with open(self.filename, "rb") as fileobj:
# pylint: disable=cell-var-from-loop
for chunk in iter(lambda: fileobj.read(1024), b""):
hasher.update(chunk)
self._log.debug("completed hashing file")
self._log.debug("got %s digest: %s", hashtype, hasher.hexdigest())
newline = f"\n {hasher.hexdigest()} {size} {base}"
self._log.debug("new line: %s", newline)
msg.replace_header(key, msg[key] + newline)
return msg
def _process_dsc_file(self):
"""Extract the dsc message from a file: parse the dsc body
and return an email.Message object. Attempt to extract the
RFC822 message from an OpenPGP message if necessary."""
self._log.debug("process_dsc_file()")
if not (self.filename.endswith(".dsc") or self.filename.endswith(".dsc.asc")):
self._log.debug(
"File %s does not appear to be a dsc file; pressing "
"on but we may experience some turbulence and possibly "
"explode.",
self.filename,
)
try:
self._pgp_message = pgpy.PGPMessage.from_file(self.filename)
self._log.debug("Found pgp signed message")
except IOError as ex:
self._log.fatal('Could not read dsc file "%s": %s', self.filename, ex)
raise
except (ValueError, pgpy.errors.PGPError) as ex:
self._log.warning(
"dsc file %s is not signed or has a corrupt sig: %s", self.filename, ex
)
if self._pgp_message is not None:
msg = message_from_string(self._pgp_message.message)
else:
with open(self.filename, encoding="UTF-8") as fileobj:
msg = message_from_file(fileobj)
return self._internalize_message(msg)
def _process_source_files(self):
"""Walk through the list of lines in the 'Files' section of
the dsc message, and verify that the file exists in the same
location on our filesystem as the dsc file. Return a list
of tuples: the normalized pathname for the file, the
size of the file (as claimed by the dsc) and whether the file
is actually present in the filesystem locally.
Also extract the file size from the message lines and fill
out the _files dictionary.
"""
self._log.debug("process_source_files()")
filenames = []
try:
files = self.message["Files"]
except KeyError:
self._log.fatal(
'DSC file "%s" does not have a Files section', self.filename
)
raise
for line in files.split("\n"):
if line:
_, size, filename = line.strip().split(" ")
pathname = os.path.abspath(os.path.join(self._dirname, filename))
filenames.append((pathname, int(size), os.path.isfile(pathname)))
return filenames
def _validate_checksums(self):
"""Iterate over the dict of asserted checksums from the
dsc file. Check each in turn. If any checksum is invalid,
append the correct checksum to a similarly structured dict
and return them all at the end."""
self._log.debug("validate_checksums()")
bad_hashes = defaultdict(lambda: defaultdict(None))
for hashtype, filenames in six.iteritems(self.checksums):
for filename, digest in six.iteritems(filenames):
hasher = getattr(hashlib, hashtype)()
with open(filename, "rb") as fileobj:
# pylint: disable=cell-var-from-loop
for chunk in iter(lambda: fileobj.read(128), b""):
hasher.update(chunk)
if hasher.hexdigest() != digest:
bad_hashes[hashtype][filename] = hasher.hexdigest()
return dict(bad_hashes)
| 39 | 87 | 0.599359 | 1,410 | 11,544 | 4.758156 | 0.214184 | 0.03935 | 0.028618 | 0.01416 | 0.222239 | 0.190639 | 0.186615 | 0.182442 | 0.165748 | 0.145178 | 0 | 0.003119 | 0.305613 | 11,544 | 295 | 88 | 39.132203 | 0.833832 | 0.223146 | 0 | 0.27451 | 0 | 0 | 0.083875 | 0.004988 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107843 | false | 0 | 0.044118 | 0.009804 | 0.264706 | 0.004902 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa21da575e25d333fe048209ef8ddd2a27f95baa | 66,050 | py | Python | hoggorm/pcr.py | olivertomic/hoggorm | afb2ab9a08c8111f1f82c324ecaf8a601f399eeb | [
"BSD-2-Clause"
] | 61 | 2018-01-03T20:20:26.000Z | 2022-03-30T18:57:48.000Z | hoggorm/pcr.py | andife/hoggorm | 71f72823d474d8916b61c9740e3f63afd669c2c2 | [
"BSD-2-Clause"
] | 35 | 2020-10-15T18:37:19.000Z | 2021-08-02T04:50:35.000Z | hoggorm/pcr.py | olivertomic/hoggorm | afb2ab9a08c8111f1f82c324ecaf8a601f399eeb | [
"BSD-2-Clause"
] | 29 | 2018-04-07T01:09:37.000Z | 2022-03-10T16:19:52.000Z | # -*- coding: utf-8 -*-
# Import necessary modules
import numpy as np
import numpy.linalg as npla
import hoggorm.statTools as st
import hoggorm.cross_val as cv
class nipalsPCR:
"""
This class carries out Principal Component Regression for two arrays using NIPALS algorithm.
PARAMETERS
----------
arrX : numpy array
This is X in the PCR model. Number and order of objects (rows) must match those of ``arrY``.
arrY : numpy array
This is Y in the PCR model. Number and order of objects (rows) must match those of ``arrX``.
numComp : int, optional
An integer that defines how many components are to be computed. If not provided, the maximum possible number of components is used.
Xstand : boolean, optional
Defines whether variables in ``arrX`` are to be standardised/scaled or centered.
False : columns of ``arrX`` are mean centred (default)
``Xstand = False``
True : columns of ``arrX`` are mean centred and devided by their own standard deviation
``Xstand = True``
Ystand : boolean, optional
Defines whether variables in ``arrY`` are to be standardised/scaled or centered.
False : columns of ``arrY`` are mean centred (default)
``Ystand = False``
True : columns of ``arrY`` are mean centred and devided by their own standard deviation
``Ystand = True``
cvType : list, optional
The list defines cross validation settings when computing the PCA model. Note if `cvType` is not provided, cross validation will not be performed and as such cross validation results will not be available. Choose cross validation type from the following:
loo : leave one out / a.k.a. full cross validation (default)
``cvType = ["loo"]``
KFold : leave out one fold or segment
``cvType = ["KFold", numFolds]``
numFolds: int
Number of folds or segments
lolo : leave one label out
``cvType = ["lolo", labelsList]``
labelsList: list
Sequence of lables. Must be same lenght as number of rows in ``arrX`` and ``arrY``. Leaves out objects with same lable.
RETURNS
-------
class
A class that contains the PCR model and computational results
EXAMPLES
--------
First import the hoggormpackage
>>> import hoggorm as ho
Import your data into a numpy array.
>>> np.shape(my_X_data)
(14, 292)
>>> np.shape(my_Y_data)
(14, 5)
Examples of how to compute a PCR model using different settings for the input parameters.
>>> model = ho.nipalsPCR(arrX=my_X_data, arrY=my_Y_data, numComp=5)
>>> model = ho.nipalsPCR(arrX=my_X_data, arrY=my_Y_data)
>>> model = ho.nipalsPCR(arrX=my_X_data, arrY=my_Y_data, numComp=3, Ystand=True)
>>> model = ho.nipalsPCR(arrX=my_X_data, arrY=my_Y_data, Xstand=False, Ystand=True)
>>> model = ho.nipalsPCR(arrX=my_X_data, arrY=my_Y_data, cvType=["loo"])
>>> model = ho.nipalsPCR(arrX=my_X_data, arrY=my_Y_data, cvType=["KFold", 7])
>>> model = ho.nipalsPCR(arrX=my_X_data, arrY=my_Y_data, cvType=["lolo", [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7]])
Examples of how to extract results from the PCR model.
>>> X_scores = model.X_scores()
>>> X_loadings = model.X_loadings()
>>> Y_loadings = model.Y_loadings()
>>> X_cumulativeCalibratedExplainedVariance_allVariables = model.X_cumCalExplVar_indVar()
>>> Y_cumulativeValidatedExplainedVariance_total = model.Y_cumCalExplVar()
"""
def __init__(self,
arrX,
arrY,
numComp=None,
Xstand=False,
Ystand=False,
cvType=None):
"""
On initialisation check how arrX and arrY are to be pre-processed
(parameters Xstand and Ystand are either True or False). Then check
whether number of components chosen by user is OK.
"""
# ===============================================================================
# Check what is provided by user for PCA-part of PCR
# ===============================================================================
# Define X and Y within class such that the data can be accessed from
# all attributes in class.
self.arrX_input = arrX
self.arrY_input = arrY
# Check whether cvType is provided. If NOT, then no cross validation
# is carried out.
self.cvType = cvType
# Define maximum number of components to compute depending on whether
# cross validation was selected or not.
if isinstance(self.cvType, type(None)):
maxNumPC = min(np.shape(self.arrX_input))
else:
# Depict the number of components that are possible to compute based
# on size of data set (#rows, #cols), type of cross validation (i.e.
# size of CV segments)
numObj = np.shape(self.arrX_input)[0]
# Compute the sizes of training sets in CV
if self.cvType[0] == "loo":
cvComb = cv.LeaveOneOut(numObj)
elif self.cvType[0] == "KFold":
cvComb = cv.KFold(numObj, k=self.cvType[1])
elif self.cvType[0] == "lolo":
cvComb = cv.LeaveOneLabelOut(self.cvType[1])
else:
print("Requested form of cross validation is not available")
pass
# First devide into combinations of training and test sets. Collect
# sizes of training sets, since this also may limit the number of
# components that can be computed.
segSizes = []
for train_index, test_index in cvComb:
x_train, x_test = cv.split(train_index, test_index,
self.arrX_input)
y_train, y_test = cv.split(train_index, test_index,
self.arrY_input)
segSizes.append(numObj - sum(train_index))
# Compute the max number of components based on only object size
maxN = numObj - max(segSizes) - 1
# Choose whatever is smaller, number of variables or maxN
maxNumPC = min(np.shape(arrX)[1], maxN)
# Now set the number of components that is possible to compute.
if numComp is None:
self.numPC = maxNumPC
else:
if numComp > maxNumPC:
self.numPC = maxNumPC
else:
self.numPC = numComp
# Pre-process data according to user request.
# -------------------------------------------
# Check whether standardisation of X and Y are requested by user. If
# NOT, then X and y are centred by default.
self.Xstand = Xstand
self.Ystand = Ystand
# Standardise X if requested by user, otherwise center X.
if self.Xstand:
self.Xmeans = np.average(self.arrX_input, axis=0)
self.Xstd = np.std(self.arrX_input, axis=0, ddof=1)
self.arrX = (self.arrX_input - self.Xmeans) / self.Xstd
else:
self.Xmeans = np.average(self.arrX_input, axis=0)
self.arrX = self.arrX_input - self.Xmeans
# Standardise Y if requested by user, otherwise center Y.
if self.Ystand:
self.Ymeans = np.average(self.arrY_input, axis=0)
self.Ystd = np.std(self.arrY_input, axis=0, ddof=1)
self.arrY = (self.arrY_input - self.Ymeans) / self.Ystd
else:
self.Ymeans = np.average(self.arrY_input, axis=0)
self.arrY = self.arrY_input - self.Ymeans
# Before PLS2 NIPALS algorithm starts initiate and lists in which
# results will be stored.
self.X_scoresList = []
self.Y_scoresList = []
self.X_loadingsList = []
self.Y_loadingsList = []
self.X_loadingsWeightsList = []
self.coeffList = []
self.Y_residualsList = [self.arrY]
self.X_residualsList = [self.arrX]
# Collect residual matrices/arrays after each computed PC
self.resids = {}
self.X_residualsDict = {}
# Collect predicted matrices/array Xhat after each computed PC
self.calXhatDict_singPC = {}
# Collect explained variance in each PC
self.calExplainedVariancesDict = {}
self.X_calExplainedVariancesList = []
# ===============================================================================
# Here the NIPALS PCA algorithm on X starts
# ===============================================================================
threshold = 1.0e-8
X_new = self.arrX.copy()
# Compute number of principal components as specified by user
for j in range(self.numPC):
# Check if first column contains only zeros. If yes, then
# NIPALS will not converge and (npla.norm(num) will contain
# nan's). Rather put in other starting values.
if not np.any(X_new[:, 0]):
X_repl_nonCent = np.arange(np.shape(X_new)[0])
X_repl = X_repl_nonCent - np.mean(X_repl_nonCent)
t = X_repl.reshape(-1, 1)
else:
t = X_new[:, 0].reshape(-1, 1)
# Iterate until score vector converges according to threshold
while 1:
num = np.dot(np.transpose(X_new), t)
denom = npla.norm(num)
p = num / denom
t_new = np.dot(X_new, p)
diff = t - t_new
t = t_new.copy()
SS = np.sum(np.square(diff))
# Check whether sum of squares is smaller than threshold. Break
# out of loop if true and start computation of next PC.
if SS < threshold:
self.X_scoresList.append(t)
self.X_loadingsList.append(p)
break
# Peel off information explained by actual componentand continue with
# decomposition on the residuals (X_new = E).
X_old = X_new.copy()
Xhat_j = np.dot(t, np.transpose(p))
X_new = X_old - Xhat_j
# Store residuals E and Xhat in their dictionaries
self.X_residualsDict[j + 1] = X_new
self.calXhatDict_singPC[j + 1] = Xhat_j
if self.Xstand:
self.calXhatDict_singPC[j +
1] = (Xhat_j * self.Xstd) + self.Xmeans
else:
self.calXhatDict_singPC[j + 1] = Xhat_j + self.Xmeans
# Collect scores and loadings for the actual PC.
self.arrT = np.hstack(self.X_scoresList)
self.arrP = np.hstack(self.X_loadingsList)
# Compute Y loadings by using MLR (see Module 6, Equ. 6.8 ++)
term_1 = npla.inv(np.dot(np.transpose(self.arrT), self.arrT))
term_2 = np.dot(np.transpose(self.arrT), self.arrY)
self.arrQ = np.transpose(np.dot(term_1, term_2))
# ==============================================================================
# From here computation of CALIBRATED explained variance starts
# ==============================================================================
# ========== COMPUTATIONS FOR X ==========
# ---------------------------------------------------------------------
# Create a list holding arrays of Xhat predicted calibration after each
# component. Xhat is computed with Xhat = T*P'
self.calXpredList = []
# Compute Xhat for 1 and more components (cumulatively).
for ind in range(1, self.numPC + 1):
part_arrT = self.arrT[:, 0:ind]
part_arrP = self.arrP[:, 0:ind]
predXcal = np.dot(part_arrT, np.transpose(part_arrP))
if self.Xstand:
Xhat = (predXcal * self.Xstd) + self.Xmeans
else:
Xhat = predXcal + self.Xmeans
self.calXpredList.append(Xhat)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect all PRESSE for individual variables in a dictionary.
# Keys represent number of component.
self.PRESSEdict_indVar_X = {}
# Compute PRESS for calibration / estimation
PRESSE_0_indVar_X = np.sum(np.square(st.center(self.arrX_input)),
axis=0)
self.PRESSEdict_indVar_X[0] = PRESSE_0_indVar_X
# Compute PRESS for each Xhat for 1, 2, 3, etc number of components
# and compute explained variance
for ind, Xhat in enumerate(self.calXpredList):
diffX = self.arrX_input - Xhat
PRESSE_indVar_X = np.sum(np.square(diffX), axis=0)
self.PRESSEdict_indVar_X[ind + 1] = PRESSE_indVar_X
# Now store all PRESSE values into an array. Then compute MSEE and
# RMSEE.
self.PRESSEarr_indVar_X = np.array(
list(self.PRESSEdict_indVar_X.values()))
self.MSEEarr_indVar_X = self.PRESSEarr_indVar_X / np.shape(
self.arrX_input)[0]
self.RMSEEarr_indVar_X = np.sqrt(self.MSEEarr_indVar_X)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Compute explained variance for each variable in X using the
# MSEP for each variable. Also collect PRESSE, MSEE, RMSEE in
# their respective dictionaries for each variable. Keys represent
# now variables and NOT components as above with
# self.PRESSEdict_indVar_X
self.cumCalExplVarXarr_indVar = np.zeros(
np.shape(self.MSEEarr_indVar_X))
MSEE_0_indVar_X = self.MSEEarr_indVar_X[0, :]
for ind, MSEE_indVar_X in enumerate(self.MSEEarr_indVar_X):
explVar = (MSEE_0_indVar_X - MSEE_indVar_X) / MSEE_0_indVar_X * 100
self.cumCalExplVarXarr_indVar[ind] = explVar
self.PRESSE_indVar_X = {}
self.MSEE_indVar_X = {}
self.RMSEE_indVar_X = {}
self.cumCalExplVarX_indVar = {}
for ind in range(np.shape(self.PRESSEarr_indVar_X)[1]):
self.PRESSE_indVar_X[ind] = self.PRESSEarr_indVar_X[:, ind]
self.MSEE_indVar_X[ind] = self.MSEEarr_indVar_X[:, ind]
self.RMSEE_indVar_X[ind] = self.RMSEEarr_indVar_X[:, ind]
self.cumCalExplVarX_indVar[
ind] = self.cumCalExplVarXarr_indVar[:, ind]
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect total PRESSE across all variables in a dictionary. Also,
# compute total calibrated explained variance in Y.
self.PRESSE_total_dict_X = {}
self.PRESSE_total_list_X = np.sum(self.PRESSEarr_indVar_X, axis=1)
for ind, PRESSE_X in enumerate(self.PRESSE_total_list_X):
self.PRESSE_total_dict_X[ind] = PRESSE_X
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect total MSEE across all variables in a dictionary. Also,
# compute total validated explained variance in X.
self.MSEE_total_dict_X = {}
self.MSEE_total_list_X = (np.sum(self.MSEEarr_indVar_X, axis=1) /
np.shape(self.arrX_input)[1])
MSEE_0_X = self.MSEE_total_list_X[0]
# Compute total calibrated explained variance in X
self.XcumCalExplVarList = []
if not self.Xstand:
for ind, MSEE_X in enumerate(self.MSEE_total_list_X):
perc = (MSEE_0_X - MSEE_X) / MSEE_0_X * 100
self.MSEE_total_dict_X[ind] = MSEE_X
self.XcumCalExplVarList.append(perc)
else:
self.XcumCalExplVarArr = np.average(self.cumCalExplVarXarr_indVar,
axis=1)
self.XcumCalExplVarList = list(self.XcumCalExplVarArr)
# Construct list with total validated explained variance in X
self.XcalExplVarList = []
for ind, item in enumerate(self.XcumCalExplVarList):
if ind == len(self.XcumCalExplVarList) - 1:
break
explVarComp = (self.XcumCalExplVarList[ind + 1] -
self.XcumCalExplVarList[ind])
self.XcalExplVarList.append(explVarComp)
# Construct a dictionary that holds predicted X (Xhat) from calibration
# for each number of components.
self.calXpredDict = {}
for ind, item in enumerate(self.calXpredList):
self.calXpredDict[ind + 1] = item
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Compute total RMSEE and store values in a dictionary and list.
self.RMSEE_total_dict_X = {}
self.RMSEE_total_list_X = np.sqrt(self.MSEE_total_list_X)
for ind, RMSEE_X in enumerate(self.RMSEE_total_list_X):
self.RMSEE_total_dict_X[ind] = RMSEE_X
# ---------------------------------------------------------------------
# ========== COMPUTATIONS FOR Y ============
# ---------------------------------------------------------------------
# Create a list holding arrays of Yhat predicted calibration after each
# component. Yhat is computed with Yhat = T*Chat*Q'
self.calYpredList = []
for ind in range(1, self.numPC + 1):
x_scores = self.arrT[:, 0:ind]
y_loadings = self.arrQ[:, 0:ind]
# c_regrCoeff = self.arrC[0:ind,0:ind]
# Depending on whether Y was standardised or not compute Yhat
# accordingly.
if self.Ystand:
Yhat_stand = np.dot(x_scores, np.transpose(y_loadings))
Yhat = (Yhat_stand *
self.Ystd.reshape(1, -1)) + self.Ymeans.reshape(1, -1)
else:
Yhat = np.dot(x_scores,
np.transpose(y_loadings)) + self.Ymeans.reshape(
1, -1)
self.calYpredList.append(Yhat)
# Compute Y residuals and store in list
self.Y_residualsList.append(self.arrY - Yhat)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect all PRESSE for individual variables in a dictionary.
# Keys represent number of component.
self.PRESSEdict_indVar = {}
# Compute PRESS for calibration / estimation
PRESSE_0_indVar = np.sum(np.square(st.center(self.arrY_input)), axis=0)
self.PRESSEdict_indVar[0] = PRESSE_0_indVar
# Compute PRESS for each Yhat for 1, 2, 3, etc number of components
# and compute explained variance
for ind, Yhat in enumerate(self.calYpredList):
diffY = self.arrY_input - Yhat
PRESSE_indVar = np.sum(np.square(diffY), axis=0)
self.PRESSEdict_indVar[ind + 1] = PRESSE_indVar
# Now store all PRESSE values into an array. Then compute MSEE and
# RMSEE.
self.PRESSEarr_indVar = np.array(list(self.PRESSEdict_indVar.values()))
self.MSEEarr_indVar = self.PRESSEarr_indVar / np.shape(
self.arrY_input)[0]
self.RMSEEarr_indVar = np.sqrt(self.MSEEarr_indVar)
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Compute explained variance for each variable in Y using the
# MSEP for each variable. Also collect PRESSE, MSEE, RMSEE in
# their respective dictionaries for each variable. Keys represent
# now variables and NOT components as above with
# self.PRESSEdict_indVar
self.cumCalExplVarYarr_indVar = np.zeros(np.shape(self.MSEEarr_indVar))
MSEE_0_indVar = self.MSEEarr_indVar[0, :]
for ind, MSEE_indVar in enumerate(self.MSEEarr_indVar):
explVar = (MSEE_0_indVar - MSEE_indVar) / MSEE_0_indVar * 100
self.cumCalExplVarYarr_indVar[ind] = explVar
self.PRESSE_indVar = {}
self.MSEE_indVar = {}
self.RMSEE_indVar = {}
self.cumCalExplVarY_indVar = {}
for ind in range(np.shape(self.PRESSEarr_indVar)[1]):
self.PRESSE_indVar[ind] = self.PRESSEarr_indVar[:, ind]
self.MSEE_indVar[ind] = self.MSEEarr_indVar[:, ind]
self.RMSEE_indVar[ind] = self.RMSEEarr_indVar[:, ind]
self.cumCalExplVarY_indVar[
ind] = self.cumCalExplVarYarr_indVar[:, ind]
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect total PRESSE across all variables in a dictionary. Also,
# compute total calibrated explained variance in Y.
self.PRESSE_total_dict = {}
self.PRESSE_total_list = np.sum(self.PRESSEarr_indVar, axis=1)
for ind, PRESSE in enumerate(self.PRESSE_total_list):
self.PRESSE_total_dict[ind] = PRESSE
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Collect total MSEE across all variables in a dictionary. Also,
# compute total calibrated explained variance in Y.
self.MSEE_total_dict = {}
self.MSEE_total_list = (np.sum(self.MSEEarr_indVar, axis=1) /
np.shape(self.arrY_input)[1])
MSEE_0 = self.MSEE_total_list[0]
# Compute total calibrated explained variance in Y
self.YcumCalExplVarList = []
if not self.Ystand:
for ind, MSEE in enumerate(self.MSEE_total_list):
perc = (MSEE_0 - MSEE) / MSEE_0 * 100
self.MSEE_total_dict[ind] = MSEE
self.YcumCalExplVarList.append(perc)
else:
self.YcumCalExplVarArr = np.average(self.cumCalExplVarYarr_indVar,
axis=1)
self.YcumCalExplVarList = list(self.YcumCalExplVarArr)
# Construct list with total validated explained variance in Y
self.YcalExplVarList = []
for ind, item in enumerate(self.YcumCalExplVarList):
if ind == len(self.YcumCalExplVarList) - 1:
break
explVarComp = (self.YcumCalExplVarList[ind + 1] -
self.YcumCalExplVarList[ind])
self.YcalExplVarList.append(explVarComp)
# Construct a dictionary that holds predicted Y (Yhat) from calibration
# for each number of components.
self.calYpredDict = {}
for ind, item in enumerate(self.calYpredList):
self.calYpredDict[ind + 1] = item
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# Compute total RMSEP and store values in a dictionary and list.
self.RMSEE_total_dict = {}
self.RMSEE_total_list = np.sqrt(self.MSEE_total_list)
for ind, RMSEE in enumerate(self.RMSEE_total_list):
self.RMSEE_total_dict[ind] = RMSEE
# ---------------------------------------------------------------------
# ==============================================================================
# From here cross validation procedure starts
# ==============================================================================
if self.cvType is not None:
numObj = np.shape(self.arrX)[0]
if self.cvType[0] == "loo":
print("loo")
cvComb = cv.LeaveOneOut(numObj)
elif self.cvType[0] == "KFold":
print("KFold")
cvComb = cv.KFold(numObj, k=self.cvType[1])
elif self.cvType[0] == "lolo":
print("lolo")
cvComb = cv.LeaveOneLabelOut(self.cvType[1])
else:
print("Requested form of cross validation is not available")
# Collect predicted y (i.e. yhat) for each CV segment in a
# dictionary according to nubmer of PC
self.valYpredDict = {}
for ind in range(1, self.numPC + 1):
self.valYpredDict[ind] = np.zeros(np.shape(self.arrY_input))
# Collect predicted x (i.e. xhat) for each CV segment in a
# dictionary according to number of PC
self.valXpredDict = {}
for ind in range(1, self.numPC + 1):
self.valXpredDict[ind] = np.zeros(np.shape(self.arrX_input))
# Collect train and test set in dictionaries for each componentand put
# them in this list.
self.cvTrainAndTestDataList = []
# Collect: validation X scores T, validation X loadings P,
# validation Y scores U, validation Y loadings Q,
# validation X loading weights W and scores regression coefficients C
# in lists for each PC
self.val_arrTlist = []
self.val_arrPlist = []
self.val_arrQlist = []
# Collect train and test set in a dictionary for each component
self.cvTrainAndTestDataList = []
self.X_train_means_list = np.zeros(np.shape(self.arrX_input))
self.Y_train_means_list = np.zeros(np.shape(self.arrY_input))
# First devide into combinations of training and test sets
for train_index, test_index in cvComb:
X_train, X_test = cv.split(train_index, test_index,
self.arrX_input)
Y_train, Y_test = cv.split(train_index, test_index,
self.arrY_input)
subDict = {}
subDict["x train"] = X_train
subDict["x test"] = X_test
subDict["y train"] = Y_train
subDict["y test"] = Y_test
self.cvTrainAndTestDataList.append(subDict)
# -------------------------------------------------------------
# Center or standardise X according to users choice
if self.Xstand:
X_train_mean = np.average(X_train, axis=0).reshape(1, -1)
X_train_std = np.std(X_train, axis=0,
ddof=1).reshape(1, -1)
X_train_proc = (X_train - X_train_mean) / X_train_std
# Standardise X test using mean and STD from training set
X_test_proc = (X_test - X_train_mean) / X_train_std
else:
X_train_mean = np.average(X_train, axis=0).reshape(1, -1)
X_train_proc = X_train - X_train_mean
# Center X test using mean from training set
X_test_proc = X_test - X_train_mean
# -------------------------------------------------------------
self.X_train_means_list[test_index, ] = X_train_mean
# -------------------------------------------------------------
# Center or standardise Y according to users choice
if self.Ystand:
Y_train_mean = np.average(Y_train, axis=0)
Y_train_std = np.std(Y_train, axis=0, ddof=1)
Y_train_proc = (Y_train - Y_train_mean) / Y_train_std
else:
Y_train_mean = np.average(Y_train, axis=0)
Y_train_proc = Y_train - Y_train_mean
# -------------------------------------------------------------
self.Y_train_means_list[test_index, ] = Y_train_mean
# Here the NIPALS PCA algorithm starts
# ------------------------------------
threshold = 1.0e-8
X_new = X_train_proc.copy()
# Collect scores and loadings in lists that will be later converted
# to arrays.
scoresList = []
loadingsList = []
# Compute number of principal components as specified by user
for j in range(self.numPC):
# Check if first column contains only zeros. If yes, then
# NIPALS will not converge and (npla.norm(num) will contain
# nan's). Rather put in other starting values.
if not np.any(X_new[:, 0]):
X_repl_nonCent = np.arange(np.shape(X_new)[0])
X_repl = X_repl_nonCent - np.mean(X_repl_nonCent)
t = X_repl.reshape(-1, 1)
else:
t = X_new[:, 0].reshape(-1, 1)
# Iterate until score vector converges according to threshold
while 1:
num = np.dot(np.transpose(X_new), t)
denom = npla.norm(num)
p = num / denom
t_new = np.dot(X_new, p)
diff = t - t_new
t = t_new.copy()
SS = np.sum(np.square(diff))
# Check whether sum of squares is smaller than threshold. Break
# out of loop if true and start computation of next PC.
if SS < threshold:
scoresList.append(t)
loadingsList.append(p)
break
# Peel off information explained by actual component and continue with
# decomposition on the residuals (X_new = E).
X_old = X_new.copy()
Xhat_j = np.dot(t, np.transpose(p))
X_new = X_old - Xhat_j
# Collect X scores and X loadings for the actual PC.
valT = np.hstack(scoresList)
valP = np.hstack(loadingsList)
self.val_arrTlist.append(valT)
self.val_arrPlist.append(valP)
# Compute Y loadings
term_1 = npla.inv(np.dot(np.transpose(valT), valT))
term_2 = np.dot(np.transpose(valT), Y_train_proc)
valQ = np.transpose(np.dot(term_1, term_2))
self.val_arrQlist.append(valQ)
# Compute the scores for the left out object
projT = np.dot(X_test_proc, valP)
dims = np.shape(projT)[1]
# Construct validated predicted X first for one component,
# then two, three, etc
for ind in range(0, dims):
# part_projT = projT[:,0:ind+1].reshape(1,-1)
part_projT = projT[:, 0:ind + 1]
part_valP = valP[:, 0:ind + 1]
valPredX_proc = np.dot(part_projT, np.transpose(part_valP))
part_valQ = valQ[:, 0:ind + 1]
valPredY_proc = np.dot(part_projT, np.transpose(part_valQ))
# Depending on preprocessing re-process in same manner
# in order to get values that compare to original values.
if self.Xstand:
valPredX = (valPredX_proc * X_train_std) + X_train_mean
else:
valPredX = valPredX_proc + X_train_mean
self.valXpredDict[ind + 1][test_index, ] = valPredX
if self.Ystand:
valPredY = (valPredY_proc * Y_train_std) + Y_train_mean
else:
valPredY = valPredY_proc + Y_train_mean
self.valYpredDict[ind + 1][test_index, ] = valPredY
# Put all predicitons into an array that corresponds to the
# original variable
self.valXpredList = []
valPreds = self.valXpredDict.values()
for preds in valPreds:
pc_arr = np.vstack(preds)
self.valXpredList.append(pc_arr)
# Put all predicitons into an array that corresponds to the
# original variable
self.valYpredList = []
valPreds = self.valYpredDict.values()
for preds in valPreds:
pc_arr = np.vstack(preds)
self.valYpredList.append(pc_arr)
# ==============================================================================
# From here VALIDATED explained variance is computed
# ==============================================================================
# ========== Computations for X ==========
# -----------------------------------------------------------------
# Compute PRESSCV (PRediction Error Sum of Squares) for cross
# validation
self.valXpredList = self.valXpredDict.values()
# Collect all PRESS in a dictionary. Keys represent number of
# component.
self.PRESSCVdict_indVar_X = {}
# First compute PRESSCV for zero components
self.PRESSCV_0_indVar_X = np.sum(
np.square(self.arrX_input - self.X_train_means_list), axis=0)
self.PRESSCVdict_indVar_X[0] = self.PRESSCV_0_indVar_X
# Compute PRESSCV for each Yhat for 1, 2, 3, etc number of
# components and compute explained variance
for ind, Xhat in enumerate(self.valXpredList):
# diffX = self.arrX_input - Xhat
diffX = self.arrX_input - Xhat
PRESSCV_indVar_X = np.sum(np.square(diffX), axis=0)
self.PRESSCVdict_indVar_X[ind + 1] = PRESSCV_indVar_X
# Now store all PRESSCV values into an array. Then compute MSECV
# and RMSECV.
self.PRESSCVarr_indVar_X = np.array(
list(self.PRESSCVdict_indVar_X.values()))
self.MSECVarr_indVar_X = (self.PRESSCVarr_indVar_X /
np.shape(self.arrX_input)[0])
self.RMSECVarr_indVar_X = np.sqrt(self.MSECVarr_indVar_X)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Compute explained variance for each variable in X using the
# MSEP for each variable. Also collect PRESS, MSECV, RMSECV in
# their respective dictionaries for each variable. Keys represent
# now variables and NOT components as above with
# self.PRESSdict_indVar
self.cumValExplVarXarr_indVar = np.zeros(
np.shape(self.MSECVarr_indVar_X))
MSECV_0_indVar_X = self.MSECVarr_indVar_X[0, :]
for ind, MSECV_indVar_X in enumerate(self.MSECVarr_indVar_X):
explVar = (MSECV_0_indVar_X -
MSECV_indVar_X) / MSECV_0_indVar_X * 100
self.cumValExplVarXarr_indVar[ind] = explVar
self.PRESSCV_indVar_X = {}
self.MSECV_indVar_X = {}
self.RMSECV_indVar_X = {}
self.cumValExplVarX_indVar = {}
for ind in range(np.shape(self.PRESSCVarr_indVar_X)[1]):
self.PRESSCV_indVar_X[ind] = self.PRESSCVarr_indVar_X[:, ind]
self.MSECV_indVar_X[ind] = self.MSECVarr_indVar_X[:, ind]
self.RMSECV_indVar_X[ind] = self.RMSECVarr_indVar_X[:, ind]
self.cumValExplVarX_indVar[
ind] = self.cumValExplVarXarr_indVar[:, ind]
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Collect total PRESSCV across all variables in a dictionary.
self.PRESSCV_total_dict_X = {}
self.PRESSCV_total_list_X = np.sum(self.PRESSCVarr_indVar_X,
axis=1)
for ind, PRESSCV_X in enumerate(self.PRESSCV_total_list_X):
self.PRESSCV_total_dict_X[ind] = PRESSCV_X
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Collect total MSECV across all variables in a dictionary. Also,
# compute total validated explained variance in X.
self.MSECV_total_dict_X = {}
self.MSECV_total_list_X = (np.sum(self.MSECVarr_indVar_X, axis=1) /
np.shape(self.arrX_input)[1])
MSECV_0_X = self.MSECV_total_list_X[0]
# Compute total validated explained variance in X
self.XcumValExplVarList = []
if not self.Xstand:
for ind, MSECV_X in enumerate(self.MSECV_total_list_X):
perc = (MSECV_0_X - MSECV_X) / MSECV_0_X * 100
self.MSECV_total_dict_X[ind] = MSECV_X
self.XcumValExplVarList.append(perc)
else:
self.XcumValExplVarArr = np.average(
self.cumValExplVarXarr_indVar, axis=1)
self.XcumValExplVarList = list(self.XcumValExplVarArr)
# Construct list with total validated explained variance in X in
# each component
self.XvalExplVarList = []
for ind, item in enumerate(self.XcumValExplVarList):
if ind == len(self.XcumValExplVarList) - 1:
break
explVarComp = (self.XcumValExplVarList[ind + 1] -
self.XcumValExplVarList[ind])
self.XvalExplVarList.append(explVarComp)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Compute total RMSECV and store values in a dictionary and list.
self.RMSECV_total_dict_X = {}
self.RMSECV_total_list_X = np.sqrt(self.MSECV_total_list_X)
for ind, RMSECV_X in enumerate(self.RMSECV_total_list_X):
self.RMSECV_total_dict_X[ind] = RMSECV_X
# -----------------------------------------------------------------
# ========== Computations for Y ==========
# -----------------------------------------------------------------
# Compute PRESSCV (PRediction Error Sum of Squares) for cross
# validation
self.valYpredList = self.valYpredDict.values()
# Collect all PRESS in a dictionary. Keys represent number of
# component.
self.PRESSdict_indVar = {}
# First compute PRESSCV for zero components
self.PRESSCV_0_indVar = np.sum(np.square(self.arrY_input -
self.Y_train_means_list),
axis=0)
self.PRESSdict_indVar[0] = self.PRESSCV_0_indVar
# Compute PRESSCV for each Yhat for 1, 2, 3, etc number of components
# and compute explained variance
for ind, Yhat in enumerate(self.valYpredList):
diffY = self.arrY_input - Yhat
PRESSCV_indVar = np.sum(np.square(diffY), axis=0)
self.PRESSdict_indVar[ind + 1] = PRESSCV_indVar
# Now store all PRESSCV values into an array. Then compute MSECV and
# RMSECV.
self.PRESSCVarr_indVar = np.array(
list(self.PRESSdict_indVar.values()))
self.MSECVarr_indVar = self.PRESSCVarr_indVar / np.shape(
self.arrY_input)[0]
self.RMSECVarr_indVar = np.sqrt(self.MSECVarr_indVar)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Compute explained variance for each variable in Y using the
# MSECV for each variable. Also collect PRESS, MSECV, RMSECV in
# their respective dictionaries for each variable. Keys represent
# now variables and NOT components as above with
# self.PRESSdict_indVar
self.cumValExplVarYarr_indVar = np.zeros(
np.shape(self.MSECVarr_indVar))
MSECV_0_indVar = self.MSECVarr_indVar[0, :]
for ind, MSECV_indVar in enumerate(self.MSECVarr_indVar):
explVar = (MSECV_0_indVar -
MSECV_indVar) / MSECV_0_indVar * 100
self.cumValExplVarYarr_indVar[ind] = explVar
self.PRESSCV_indVar = {}
self.MSECV_indVar = {}
self.RMSECV_indVar = {}
self.cumValExplVarY_indVar = {}
for ind in range(np.shape(self.PRESSCVarr_indVar)[1]):
self.PRESSCV_indVar[ind] = self.PRESSCVarr_indVar[:, ind]
self.MSECV_indVar[ind] = self.MSECVarr_indVar[:, ind]
self.RMSECV_indVar[ind] = self.RMSECVarr_indVar[:, ind]
self.cumValExplVarY_indVar[
ind] = self.cumValExplVarYarr_indVar[:, ind]
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Collect total PRESSCV across all variables in a dictionary.
self.PRESSCV_total_dict = {}
self.PRESSCV_total_list = np.sum(self.PRESSCVarr_indVar, axis=1)
for ind, PRESSCV in enumerate(self.PRESSCV_total_list):
self.PRESSCV_total_dict[ind] = PRESSCV
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Collect total MSECV across all variables in a dictionary. Also,
# compute total validated explained variance in Y.
self.MSECV_total_dict = {}
self.MSECV_total_list = (np.sum(self.MSECVarr_indVar, axis=1) /
np.shape(self.arrY_input)[1])
MSECV_0 = self.MSECV_total_list[0]
# Compute total validated explained variance in Y
self.YcumValExplVarList = []
if not self.Ystand:
for ind, MSECV in enumerate(self.MSECV_total_list):
perc = (MSECV_0 - MSECV) / MSECV_0 * 100
self.MSECV_total_dict[ind] = MSECV
self.YcumValExplVarList.append(perc)
else:
self.YcumValExplVarArr = np.average(
self.cumValExplVarYarr_indVar, axis=1)
self.YcumValExplVarList = list(self.YcumValExplVarArr)
# Construct list with total validated explained variance in Y in
# each component
self.YvalExplVarList = []
for ind, item in enumerate(self.YcumValExplVarList):
if ind == len(self.YcumValExplVarList) - 1:
break
explVarComp = (self.YcumValExplVarList[ind + 1] -
self.YcumValExplVarList[ind])
self.YvalExplVarList.append(explVarComp)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Compute total RMSECV and store values in a dictionary and list.
self.RMSECV_total_dict = {}
self.RMSECV_total_list = np.sqrt(self.MSECV_total_list)
for ind, RMSECV in enumerate(self.RMSECV_total_list):
self.RMSECV_total_dict[ind] = RMSECV
# -----------------------------------------------------------------
def modelSettings(self):
"""
Returns a dictionary holding the settings under which NIPALS PCR was
run.
"""
# Collect settings under which PCA was run.
self.settings = {}
self.settings["numComp"] = self.numPC
self.settings["Xstand"] = self.Xstand
self.settings["arrX"] = self.arrX_input
self.settings["analysed arrX"] = self.arrX
self.settings["arrY"] = self.arrY_input
self.settings["analysed arrY"] = self.arrX
return self.settings
def X_means(self):
"""
Returns array holding column means of array X.
"""
return self.Xmeans.reshape(1, -1)
def X_scores(self):
"""
Returns array holding scores of array X. First column holds scores
for component 1, second column holds scores for component 2, etc.
"""
return self.arrT
def X_loadings(self):
"""
Returns array holding loadings of array X. Rows represent variables
and columns represent components. First column holds loadings for
component 1, second column holds scores for component 2, etc.
"""
return self.arrP
def X_corrLoadings(self):
"""
Returns array holding correlation loadings of array X. First column
holds correlation loadings for component 1, second column holds
correlation loadings for component 2, etc.
"""
# Creates empty matrix for correlation loadings
arr_corrLoadings = np.zeros(
(np.shape(self.arrT)[1], np.shape(self.arrP)[0]), float)
# Compute correlation loadings:
# For each component in score matrix
for PC in range(np.shape(self.arrT)[1]):
PCscores = self.arrT[:, PC]
# For each variable/attribute in original matrix (not meancentered)
for var in range(np.shape(self.arrX)[1]):
origVar = self.arrX[:, var]
corrs = np.corrcoef(PCscores, origVar)
arr_corrLoadings[PC, var] = corrs[0, 1]
self.arr_corrLoadings = np.transpose(arr_corrLoadings)
return self.arr_corrLoadings
def X_residuals(self):
"""
Returns a dictionary holding the residual arrays for array X after
each computed component. Dictionary key represents order of component.
"""
return self.X_residualsDict
def X_calExplVar(self):
"""
Returns a list holding the calibrated explained variance for
each component. First number in list is for component 1, second number
for component 2, etc.
"""
return self.XcalExplVarList
def X_cumCalExplVar_indVar(self):
"""
Returns an array holding the cumulative calibrated explained variance
for each variable in X after each component. First row represents zero
components, second row represents one component, third row represents
two components, etc. Columns represent variables.
"""
return self.cumCalExplVarXarr_indVar
def X_cumCalExplVar(self):
"""
Returns a list holding the cumulative calibrated explained variance
for array X after each component.
"""
return self.XcumCalExplVarList
def X_predCal(self):
"""
Returns a dictionary holding the predicted arrays Xhat from
calibration after each computed component. Dictionary key represents
order of component.
"""
return self.calXpredDict
def X_PRESSE_indVar(self):
"""
Returns array holding PRESSE for each individual variable in X
acquired through calibration after each computed component. First row
is PRESSE for zero components, second row for component 1, third row
for component 2, etc.
"""
return self.PRESSEarr_indVar_X
def X_PRESSE(self):
"""
Returns array holding PRESSE across all variables in X acquired
through calibration after each computed component. First row is PRESSE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.PRESSE_total_list_X
def X_MSEE_indVar(self):
"""
Returns an array holding MSEE for each variable in array X acquired
through calibration after each computed component. First row holds MSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSEEarr_indVar_X
def X_MSEE(self):
"""
Returns an array holding MSEE across all variables in X acquired
through calibration after each computed component. First row is MSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSEE_total_list_X
def X_RMSEE_indVar(self):
"""
Returns an array holding RMSEE for each variable in array X acquired
through calibration after each component. First row holds RMSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSEEarr_indVar_X
def X_RMSEE(self):
"""
Returns an array holding RMSEE across all variables in X acquired
through calibration after each computed component. First row is RMSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSEE_total_list_X
def X_valExplVar(self):
"""
Returns a list holding the validated explained variance for X after
each component. First number in list is for component 1, second number
for component 2, third number for component 3, etc.
"""
return self.XvalExplVarList
def X_cumValExplVar_indVar(self):
"""
Returns an array holding the cumulative validated explained variance
for each variable in X after each component. First row represents
zero components, second row represents component 1, third row for
compnent 2, etc. Columns represent variables.
"""
return self.cumValExplVarXarr_indVar
def X_cumValExplVar(self):
"""
Returns a list holding the cumulative validated explained variance
for array X after each component. First number represents zero
components, second number represents component 1, etc.
"""
return self.XcumValExplVarList
def X_predVal(self):
"""
Returns dictionary holding arrays of predicted Xhat after each
component from validation. Dictionary key represents order of
component.
"""
return self.valXpredDict
def X_PRESSCV_indVar(self):
"""
Returns array holding PRESSCV for each individual variable in X
acquired through cross validation after each computed component. First
row is PRESSCV for zero components, second row for component 1, third
row for component 2, etc.
"""
return self.PRESSCVarr_indVar_X
def X_PRESSCV(self):
"""
Returns an array holding PRESSCV across all variables in X acquired
through cross validation after each computed component. First row is
PRESSCV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.PRESSCV_total_list_X
def X_MSECV_indVar(self):
"""
Returns an arrary holding MSECV for each variable in X acquired through
cross validation. First row is MSECV for zero components, second row
for component 1, etc.
"""
return self.MSECVarr_indVar_X
def X_MSECV(self):
"""
Returns an array holding MSECV across all variables in X acquired
through cross validation after each computed component. First row is
MSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSECV_total_list_X
def X_RMSECV_indVar(self):
"""
Returns an arrary holding RMSECV for each variable in X acquired
through cross validation after each computed component. First row is
RMSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSECVarr_indVar_X
def X_RMSECV(self):
"""
Returns an array holding RMSECV across all variables in X acquired
through cross validation after each computed component. First row is
RMSECV for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSECV_total_list_X
def X_scores_predict(self, Xnew, numComp=None):
"""
Returns array of X scores from new X data using the exsisting model.
Rows represent objects and columns represent components.
"""
if numComp is None:
numComp = self.numPC
assert numComp <= self.numPC, ValueError("Maximum numComp = " +
str(self.numPC))
assert numComp > -1, ValueError("numComp must be >= 0")
# First pre-process new X data accordingly
if self.Xstand:
x_new = (Xnew - np.average(self.arrX_input, axis=0)) / np.std(
self.arrX_input, ddof=1)
else:
x_new = Xnew - np.average(self.arrX_input, axis=0)
# Compute the scores for new object
projT = np.dot(x_new, self.arrP[:, 0:numComp])
return projT
def Y_means(self):
"""
Returns array holding means of columns in array Y.
"""
return self.Ymeans.reshape(1, -1)
def Y_loadings(self):
"""
Returns an array holding loadings C of array Y. Rows represent
variables and columns represent components. First column for
component 1, second columns for component 2, etc.
"""
return self.arrQ
def Y_corrLoadings(self):
"""
Returns array holding correlation loadings of array X. First column
holds correlation loadings for component 1, second column holds
correlation loadings for component 2, etc.
"""
# Creates empty matrix for correlation loadings
arr_YcorrLoadings = np.zeros(
(np.shape(self.arrT)[1], np.shape(self.arrQ)[0]), float)
# Compute correlation loadings:
# For each component in score matrix
for PC in range(np.shape(self.arrT)[1]):
PCscores = self.arrT[:, PC]
# For each variable/attribute in original matrix (not meancentered)
for var in range(np.shape(self.arrY)[1]):
origVar = self.arrY[:, var]
corrs = np.corrcoef(PCscores, origVar)
arr_YcorrLoadings[PC, var] = corrs[0, 1]
self.arr_YcorrLoadings = np.transpose(arr_YcorrLoadings)
return self.arr_YcorrLoadings
def Y_residuals(self):
"""
Returns a dictionary holding residuals F of array Y after each
component. Dictionary key represents order of component.
"""
# Create empty dictionary that will hold residuals
Y_residualsDict = {}
# Fill dictionary with residuals arrays from residuals list
for ind, item in enumerate(self.Y_residualsList):
Y_residualsDict[ind] = item
return Y_residualsDict
def Y_calExplVar(self):
"""
Returns a list holding the calibrated explained variance for each
component. First number in list is for component 1, second number for
component 2, etc.
"""
return self.YcalExplVarList
def Y_cumCalExplVar_indVar(self):
"""
Returns an array holding the cumulative calibrated explained variance
for each variable in Y after each component. First row represents zero
components, second row represents one component, third row represents
two components, etc. Columns represent variables.
"""
return self.cumCalExplVarYarr_indVar
def Y_cumCalExplVar(self):
"""
Returns a list holding the cumulative calibrated explained variance
for array X after each component. First number represents zero
components, second number represents component 1, etc.
"""
return self.YcumCalExplVarList
def Y_predCal(self):
"""
Returns dictionary holding arrays of predicted Yhat after each
component from calibration. Dictionary key represents order of
components.
"""
return self.calYpredDict
def Y_PRESSE_indVar(self):
"""
Returns array holding PRESSE for each individual variable in Y
acquired through calibration after each component. First row is
PRESSE for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.PRESSEarr_indVar
def Y_PRESSE(self):
"""
Returns array holding PRESSE across all variables in Y acquired
through calibration after each computed component. First row is PRESSE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.PRESSE_total_list
def Y_MSEE_indVar(self):
"""
Returns an array holding MSEE for each variable in array Y acquired
through calibration after each computed component. First row holds MSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSEEarr_indVar
def Y_MSEE(self):
"""
Returns an array holding MSEE across all variables in Y acquired
through calibration after each computed component. First row is MSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.MSEE_total_list
def Y_RMSEE_indVar(self):
"""
Returns an array holding RMSEE for each variable in array Y acquired
through calibration after each component. First row holds RMSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSEEarr_indVar
def Y_RMSEE(self):
"""
Returns an array holding RMSEE across all variables in Y acquired
through calibration after each computed component. First row is RMSEE
for zero components, second row for component 1, third row for
component 2, etc.
"""
return self.RMSEE_total_list
def Y_valExplVar(self):
"""
Returns a list holding the validated explained variance for Y after
each component. First number in list is for component 1, second number
for component 2, third number for component 3, etc.
"""
return self.YvalExplVarList
def Y_cumValExplVar_indVar(self):
"""
Returns an array holding the cumulative validated explained variance
for each variable in Y after each component. First row represents
zero components, second row represents component 1, third row for
compnent 2, etc. Columns represent variables.
"""
return self.cumValExplVarYarr_indVar
def Y_cumValExplVar(self):
"""
Returns a list holding the cumulative validated explained variance
for array X after each component. First number represents zero
components, second number represents component 1, etc.
"""
return self.YcumValExplVarList
def Y_predVal(self):
"""
Returns dictionary holding arrays of predicted Yhat after each
component from validation. Dictionary key represents order of
component.
"""
return self.valYpredDict
def Y_PRESSCV_indVar(self):
"""
Returns an array holding PRESSCV of each variable in array Y acquired
through cross validation after each computed component. First row is
PRESSCV for zero components, second row component 1, third row for
component 2, etc.
"""
return self.PRESSCVarr_indVar
def Y_PRESSCV(self):
"""
Returns an array holding PRESSCV across all variables in Y acquired
through cross validation after each computed component. First row is
PRESSCV for zero components, second row component 1, third row for
component 2, etc.
"""
return self.PRESSCV_total_list
def Y_MSECV_indVar(self):
"""
Returns an array holding MSECV of each variable in array Y acquired
through cross validation after each computed component. First row is
MSECV for zero components, second row component 1, third row for
component 2, etc.
"""
return self.MSECVarr_indVar
def Y_MSECV(self):
"""
Returns an array holding MSECV across all variables in Y acquired
through cross validation after each computed component. First row is
MSECV for zero components, second row component 1, third row for
component 2, etc.
"""
return self.MSECV_total_list
def Y_RMSECV_indVar(self):
"""
Returns an array holding RMSECV for each variable in array Y acquired
through cross validation after each computed component. First row is
RMSECV for zero components, second row component 1, third row for
component 2, etc.
"""
return self.RMSECVarr_indVar
def Y_RMSECV(self):
"""
Returns an array holding RMSECV across all variables in Y acquired
through cross validation after each computed component. First row is
RMSECV for zero components, second row component 1, third row for
component 2, etc.
"""
return self.RMSECV_total_list
def regressionCoefficients(self, numComp=1):
"""
Returns regression coefficients from the fitted model using all
available samples and a chosen number of components.
"""
assert numComp <= self.numPC, ValueError("Maximum numComp = " +
str(self.numPC))
assert numComp > -1, ValueError("numComp must be >= 0")
# B = P*Q'
if self.Ystand:
return np.dot(self.arrP[:, 0:numComp],
np.transpose(self.arrQ[:, 0:numComp])) * np.std(
self.arrY_input, ddof=1, axis=0).reshape(1, -1)
else:
return np.dot(self.arrP[:, 0:numComp],
np.transpose(self.arrQ[:, 0:numComp]))
def Y_predict(self, Xnew, numComp=1):
"""
Return predicted Yhat from new measurements X.
"""
assert numComp <= self.numPC, ValueError("Maximum numComp = " +
str(self.numPC))
assert numComp > -1, ValueError("numComp must be >= 0")
# Return average if numComp == 0
if numComp == 0:
Yhat = np.zeros(np.shape(self.arrY_input)) + np.average(
self.arrY_input, axis=0)
else:
# First pre-process new X data accordingly
if self.Xstand:
x_new = (Xnew - np.average(self.arrX_input, axis=0)) / np.std(
self.arrX_input, ddof=1, axis=0)
else:
x_new = Xnew - np.average(self.arrX_input, axis=0)
# Compute the scores for new object
projT = np.dot(x_new, self.arrP[:, 0:numComp])
# Compute processed responses
y_pred_proc = np.dot(projT, np.transpose(self.arrQ[:, 0:numComp]))
# Compute predicted values back to original scale
if self.Ystand:
Yhat = (y_pred_proc *
np.std(self.arrY_input, ddof=1, axis=0).reshape(
1, -1)) + np.average(self.arrY_input, axis=0)
else:
Yhat = y_pred_proc + np.average(self.arrY_input, axis=0)
return Yhat
def cvTrainAndTestData(self):
"""
Returns a list consisting of dictionaries holding training and test
sets.
"""
return self.cvTrainAndTestDataList
def corrLoadingsEllipses(self):
"""
Returns coordinates for the ellipses that represent 50% and 100% expl.
variance in correlation loadings plot.
"""
# Create range for ellipses
t = np.arange(0.0, 2 * np.pi, 0.01)
# Compuing the outer circle (100 % expl. variance)
xcords100perc = np.cos(t)
ycords100perc = np.sin(t)
# Computing inner circle
xcords50perc = 0.707 * np.cos(t)
ycords50perc = 0.707 * np.sin(t)
# Collect ellipse coordinates in dictionary
ellipses = {}
ellipses["x50perc"] = xcords50perc
ellipses["y50perc"] = ycords50perc
ellipses["x100perc"] = xcords100perc
ellipses["y100perc"] = ycords100perc
return ellipses
| 42.150606 | 262 | 0.551083 | 7,371 | 66,050 | 4.81495 | 0.075431 | 0.014398 | 0.017328 | 0.013525 | 0.661971 | 0.598659 | 0.534671 | 0.516244 | 0.481023 | 0.457215 | 0 | 0.009711 | 0.317108 | 66,050 | 1,566 | 263 | 42.177522 | 0.777142 | 0.4119 | 0 | 0.244118 | 0 | 0 | 0.009849 | 0 | 0 | 0 | 0 | 0 | 0.008824 | 1 | 0.082353 | false | 0.001471 | 0.005882 | 0 | 0.172059 | 0.007353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa225497bd1f355b230ac427db158e76ca59367c | 6,052 | py | Python | xls/dslx/interpreter/parse_and_interpret.py | lromor/xls | dd1ab857519bb984d178cfdb7758252689dbd647 | [
"Apache-2.0"
] | null | null | null | xls/dslx/interpreter/parse_and_interpret.py | lromor/xls | dd1ab857519bb984d178cfdb7758252689dbd647 | [
"Apache-2.0"
] | null | null | null | xls/dslx/interpreter/parse_and_interpret.py | lromor/xls | dd1ab857519bb984d178cfdb7758252689dbd647 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
#
# Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers that parse-then-interpret some text with error handler."""
import io
import os
import sys
import time
from typing import Text, Optional, cast, Tuple
from xls.dslx import import_helpers
from xls.dslx import ir_converter
from xls.dslx import parser_helpers
from xls.dslx.interpreter import quickcheck_helpers
from xls.dslx.python import cpp_typecheck
from xls.dslx.python.cpp_deduce import TypeInferenceError
from xls.dslx.python.cpp_deduce import XlsTypeError
from xls.dslx.python.cpp_parser import CppParseError
from xls.dslx.python.cpp_parser import Parser
from xls.dslx.python.interpreter import FailureError
from xls.dslx.python.interpreter import Interpreter
from xls.dslx.python.scanner import ScanError
from xls.dslx.python.scanner import Scanner
from xls.dslx.span import PositionalError
def _matches(test_name: Text, test_filter: Optional[Text]) -> bool:
if test_filter is None:
return True
# TODO(leary): 2019-08-28 Implement wildcards.
return test_name == test_filter
def parse_and_test(program: Text,
name: Text,
*,
filename: Text,
additional_search_paths: Tuple[str, ...] = (),
raise_on_error: bool = True,
test_filter: Optional[Text] = None,
trace_all: bool = False,
compare_jit: bool = True,
seed: Optional[int] = None) -> bool:
"""Parses program and run all tests contained inside.
Args:
program: The program text to parse.
name: Name for the module.
filename: The filename from which "program" text originates.
additional_search_paths: Additional paths at which we search for imported
module files.
raise_on_error: When true, raises exceptions that happen in tests;
otherwise, simply returns a boolean to the caller when all test have run.
test_filter: Test filter specification (e.g. as passed from bazel test
environment).
trace_all: Whether or not to trace all expressions.
compare_jit: Whether or not to assert equality between interpreted and
JIT'd function return values.
seed: Seed for QuickCheck random input stimulus.
Returns:
Whether or not an error occurred during parsing/testing.
Raises:
ScanError, ParseError: In case of front-end errors.
TypeInferenceError, TypeError: In case of type errors.
EvaluateError: In case of a runtime failure.
"""
did_fail = False
test_name = None
type_info = None
importer = import_helpers.Importer(additional_search_paths)
ran = 0
try:
module = Parser(Scanner(filename, program), name).parse_module()
type_info = cpp_typecheck.check_module(module, importer.cache,
importer.additional_search_paths)
ir_package = (
ir_converter.convert_module_to_package(
module, type_info, importer.cache, traverse_tests=True)
if compare_jit else None)
interpreter = Interpreter(
module,
type_info,
importer.typecheck,
importer.additional_search_paths,
importer.cache,
trace_all=trace_all,
ir_package=ir_package)
for test_name in module.get_test_names():
if not _matches(test_name, test_filter):
continue
ran += 1
print('[ RUN UNITTEST ]', test_name, file=sys.stderr)
interpreter.run_test(test_name)
print('[ OK ]', test_name, file=sys.stderr)
if ir_package and module.get_quickchecks():
if seed is None:
# We want to guarantee non-determinism by default. See
# https://abseil.io/docs/cpp/guides/random#stability-of-generated-sequences
# for rationale.
seed = int(os.getpid() * time.time())
print(f'[ SEED: {seed} ]')
for quickcheck in module.get_quickchecks():
test_name = quickcheck.f.name.identifier
print('[ RUN QUICKCHECK ]', test_name, file=sys.stderr)
quickcheck_helpers.run_quickcheck(
interpreter, ir_package, quickcheck, seed=seed)
print('[ OK ]', test_name, file=sys.stderr)
except (PositionalError, FailureError, CppParseError, ScanError,
TypeInferenceError, XlsTypeError) as e:
did_fail = True
parser_helpers.pprint_positional_error(
e, output=cast(io.IOBase, sys.stderr))
if test_name:
print(
'[ FAILED ]',
test_name,
e.__class__.__name__,
file=sys.stderr)
if raise_on_error:
raise
finally:
if type_info is not None:
type_info.clear_type_info_refs_for_gc()
print('[==================]', ran, 'test(s) ran.', file=sys.stderr)
return did_fail
def parse_and_test_path(path: Text,
raise_on_error: bool = True,
test_filter: Optional[Text] = None,
trace_all: bool = False,
compare_jit: bool = True,
seed: Optional[int] = None) -> bool:
"""Wrapper around parse_and_test that reads the file contents at "path"."""
with open(path) as f:
text = f.read()
name = os.path.basename(path)
name, _ = os.path.splitext(name)
return parse_and_test(
text,
name,
filename=path,
raise_on_error=raise_on_error,
test_filter=test_filter,
trace_all=trace_all,
compare_jit=compare_jit,
seed=seed)
| 34.982659 | 83 | 0.667383 | 783 | 6,052 | 5 | 0.311622 | 0.025032 | 0.039336 | 0.03908 | 0.148914 | 0.132822 | 0.100128 | 0.053129 | 0.053129 | 0.053129 | 0 | 0.004188 | 0.25033 | 6,052 | 172 | 84 | 35.186047 | 0.858717 | 0.307171 | 0 | 0.12963 | 0 | 0 | 0.035966 | 0 | 0 | 0 | 0 | 0.005814 | 0 | 1 | 0.027778 | false | 0 | 0.240741 | 0 | 0.305556 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa225ef6807983deecbdbb6c9523db258952b1d0 | 416 | py | Python | Cura/Uranium/tests/benchmarks/Math/profile_rayintersection.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | Cura/Uranium/tests/benchmarks/Math/profile_rayintersection.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | Cura/Uranium/tests/benchmarks/Math/profile_rayintersection.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | # Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Math.AxisAlignedBox import AxisAlignedBox
from UM.Math.Ray import Ray
from UM.Math.Vector import Vector
@profile
def intersects(box, ray):
return box.intersectsRay(ray)
ray = Ray(Vector(10, 10, 10), Vector(-1, -1, -1))
box = AxisAlignedBox(10, 10, 10)
for i in range(100000):
intersects(box, ray)
| 24.470588 | 62 | 0.725962 | 67 | 416 | 4.507463 | 0.537313 | 0.05298 | 0.099338 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074499 | 0.161058 | 416 | 16 | 63 | 26 | 0.790831 | 0.225962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0.1 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa2532020409c4d4839d8efbdaadb3f0c9d7fe08 | 763 | py | Python | one.py | NingAnMe/extraterrestrial-solar-radiation | 338197de3f2f5ea42417d6b773be736cae6bf4a8 | [
"MIT"
] | null | null | null | one.py | NingAnMe/extraterrestrial-solar-radiation | 338197de3f2f5ea42417d6b773be736cae6bf4a8 | [
"MIT"
] | null | null | null | one.py | NingAnMe/extraterrestrial-solar-radiation | 338197de3f2f5ea42417d6b773be736cae6bf4a8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020-07-15 21:31
# @Author : NingAnMe <ninganme@qq.com>
import os
app_path = r"dist\e\e.exe"
longitude = 120.1
latitude = 60.1
datetime_start = '201901010000'
datetime_end = '202012312359'
out_file = '{:02f}_{:02f}.csv'.format(longitude, latitude)
frequency = 'minute'
print('<<< :{}'.format(longitude))
print('<<< :{}'.format(latitude))
print('<<< :{}'.format(datetime_start))
print('<<< :{}'.format(datetime_end))
print('<<< :{}'.format(out_file))
os.system("{} --mode one --longitude {} --latitude {} --datetime_start {} --datetime_end {} --outfile {} --frequency {}".format(
app_path, longitude, latitude, datetime_start, datetime_end, out_file, frequency))
print('>>> {}'.format(out_file))
| 31.791667 | 128 | 0.651376 | 95 | 763 | 5.073684 | 0.494737 | 0.136929 | 0.078838 | 0.074689 | 0.170124 | 0.170124 | 0 | 0 | 0 | 0 | 0 | 0.071006 | 0.114024 | 763 | 23 | 129 | 33.173913 | 0.642012 | 0.142857 | 0 | 0 | 0 | 0.0625 | 0.32 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa28936710bc96ed3037de69f0f9a3cdf289660f | 9,547 | py | Python | tickit/devices/eiger/eiger_adapters.py | dls-controls/tickit | 00bb013e69674bcfe4926f365ecb3c65c080abe8 | [
"Apache-2.0"
] | 4 | 2021-09-16T13:35:33.000Z | 2022-02-01T23:35:53.000Z | tickit/devices/eiger/eiger_adapters.py | dls-controls/tickit | 00bb013e69674bcfe4926f365ecb3c65c080abe8 | [
"Apache-2.0"
] | 46 | 2021-09-16T13:44:58.000Z | 2022-02-02T13:42:56.000Z | tickit/devices/eiger/eiger_adapters.py | dls-controls/tickit | 00bb013e69674bcfe4926f365ecb3c65c080abe8 | [
"Apache-2.0"
] | null | null | null | import json
import logging
from aiohttp import web
from apischema import serialize
from tickit.adapters.httpadapter import HTTPAdapter
from tickit.adapters.interpreters.endpoints.http_endpoint import HTTPEndpoint
from tickit.adapters.zmqadapter import ZeroMQAdapter
from tickit.devices.eiger.eiger import EigerDevice
from tickit.devices.eiger.eiger_schema import AccessMode, SequenceComplete, Value
from tickit.devices.eiger.eiger_status import State
from tickit.devices.eiger.filewriter.eiger_filewriter import EigerFileWriterAdapter
from tickit.devices.eiger.monitor.eiger_monitor import EigerMonitorAdapter
from tickit.devices.eiger.stream.eiger_stream import EigerStreamAdapter
DETECTOR_API = "detector/api/1.8.0"
LOGGER = logging.getLogger(__name__)
class EigerRESTAdapter(
HTTPAdapter, EigerStreamAdapter, EigerMonitorAdapter, EigerFileWriterAdapter
):
"""An Eiger adapter which parses the commands sent to the HTTP server."""
device: EigerDevice # type: ignore
@HTTPEndpoint.get(f"/{DETECTOR_API}" + "/config/{parameter_name}")
async def get_config(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for requesting configuration variables from the Eiger.
Args:
request (web.Request): The request object that takes the given parameter.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
param = request.match_info["parameter_name"]
if hasattr(self.device.settings, param):
attr = self.device.settings[param]
data = serialize(
Value(
attr["value"],
attr["metadata"]["value_type"].value,
access_mode=(
attr["metadata"]["access_mode"].value # type: ignore
if hasattr(attr["metadata"], "access_mode")
else AccessMode.READ_ONLY
),
)
)
else:
data = serialize(
Value("None", "string", access_mode=AccessMode.NONE) # type: ignore
)
return web.json_response(data)
@HTTPEndpoint.put(
f"/{DETECTOR_API}" + "/config/{parameter_name}", include_json=True
)
async def put_config(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for setting configuration variables for the Eiger.
Args:
request (web.Request): The request object that takes the given parameter
and value.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
param = request.match_info["parameter_name"]
response = json.loads(await request.json())
if self.device.get_state()["value"] != State.IDLE.value: # type: ignore
LOGGER.warning("Eiger not initialized or is currently running.")
return web.json_response(serialize(SequenceComplete(7)))
elif (
hasattr(self.device.settings, param)
and self.device.get_state()["value"] == State.IDLE.value # type: ignore
):
attr = response["value"]
LOGGER.debug(f"Changing to {attr} for {param}")
self.device.settings[param] = attr
LOGGER.debug("Set " + str(param) + " to " + str(attr))
return web.json_response(serialize(SequenceComplete(8)))
else:
LOGGER.debug("Eiger has no config variable: " + str(param))
return web.json_response(serialize(SequenceComplete(9)))
@HTTPEndpoint.get(f"/{DETECTOR_API}" + "/status/{status_param}")
async def get_status(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for requesting the status of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
param = request.match_info["status_param"]
if hasattr(self.device.status, param):
attr = self.device.status[param]
else:
attr = "None"
data = serialize({"value": attr})
return web.json_response(data)
@HTTPEndpoint.get(f"/{DETECTOR_API}" + "/status/board_000/{status_param}")
async def get_board_000_status(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for requesting the status of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
param = request.match_info["status_param"]
if hasattr(self.device.status, param):
attr = self.device.status[param]
else:
attr = "None"
data = serialize({"value": attr})
return web.json_response(data)
@HTTPEndpoint.get(f"/{DETECTOR_API}" + "/status/builder/{status_param}")
async def get_builder_status(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for requesting the status of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
param = request.match_info["status_param"]
if hasattr(self.device.status, param):
attr = self.device.status[param]
else:
attr = "None"
data = serialize({"value": attr})
return web.json_response(data)
@HTTPEndpoint.put(f"/{DETECTOR_API}" + "/command/initialize")
async def initialize_eiger(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for the 'initialize' command of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
await self.device.initialize()
LOGGER.debug("Initializing Eiger...")
return web.json_response(serialize(SequenceComplete(1)))
@HTTPEndpoint.put(f"/{DETECTOR_API}" + "/command/arm")
async def arm_eiger(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for the 'arm' command of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
await self.device.arm()
LOGGER.debug("Arming Eiger...")
return web.json_response(serialize(SequenceComplete(2)))
@HTTPEndpoint.put(f"/{DETECTOR_API}" + "/command/disarm")
async def disarm_eiger(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for the 'disarm' command of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
await self.device.disarm()
LOGGER.debug("Disarming Eiger...")
return web.json_response(serialize(SequenceComplete(3)))
@HTTPEndpoint.put(f"/{DETECTOR_API}" + "/command/trigger")
async def trigger_eiger(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for the 'trigger' command of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
trigger_message = await self.device.trigger()
self.device._set_state(State.IDLE)
LOGGER.debug(trigger_message)
return web.json_response(serialize(SequenceComplete(4)))
@HTTPEndpoint.put(f"/{DETECTOR_API}" + "/command/cancel")
async def cancel_eiger(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for the 'cancel' command of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
await self.device.cancel()
LOGGER.debug("Cancelling Eiger...")
return web.json_response(serialize(SequenceComplete(5)))
@HTTPEndpoint.put(f"/{DETECTOR_API}" + "/command/abort")
async def abort_eiger(self, request: web.Request) -> web.Response:
"""A HTTP Endpoint for the 'abort' command of the Eiger.
Args:
request (web.Request): The request object that takes the request method.
Returns:
web.Response: The response object returned given the result of the HTTP
request.
"""
await self.device.abort()
LOGGER.debug("Aborting Eiger...")
return web.json_response(serialize(SequenceComplete(6)))
class EigerZMQAdapter(ZeroMQAdapter):
"""An Eiger adapter which parses the data to send along a ZeroMQStream."""
device: EigerDevice
| 35.623134 | 85 | 0.627213 | 1,086 | 9,547 | 5.441989 | 0.134438 | 0.055838 | 0.063283 | 0.046193 | 0.700508 | 0.657022 | 0.57242 | 0.529272 | 0.529272 | 0.529272 | 0 | 0.002596 | 0.273594 | 9,547 | 267 | 86 | 35.756554 | 0.849603 | 0.021158 | 0 | 0.264463 | 0 | 0 | 0.130774 | 0.021934 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.107438 | 0 | 0.247934 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa2a8e2a1ee32054320e60b95b385277a93d41ec | 588 | py | Python | utilities.py | ungood/reinforcement-learning | 1b68f01654ca07ce4c7cf5c7aa158e3b240ca6e9 | [
"MIT"
] | null | null | null | utilities.py | ungood/reinforcement-learning | 1b68f01654ca07ce4c7cf5c7aa158e3b240ca6e9 | [
"MIT"
] | 1 | 2019-10-18T16:46:38.000Z | 2019-10-18T16:46:38.000Z | utilities.py | ungood/reinforcement-learning | 1b68f01654ca07ce4c7cf5c7aa158e3b240ca6e9 | [
"MIT"
] | null | null | null | import base64
from tempfile import NamedTemporaryFile
from IPython.display import HTML, display
import matplotlib.pyplot as plt
IMG_TAG = """<img src="data:image/gif;base64,{0}" alt="some_text">"""
def anim_to_gif(anim, fps=10):
data="0"
with NamedTemporaryFile(suffix='.gif') as f:
anim.save(f.name, writer='imagemagick', fps=fps);
data = open(f.name, "rb").read()
data = str(base64.b64encode(data), 'utf-8')
return IMG_TAG.format(data)
def display_animation(anim, **kwords):
plt.close(anim._fig)
display(HTML(anim_to_gif(anim, **kwords)))
| 30.947368 | 69 | 0.683673 | 86 | 588 | 4.569767 | 0.546512 | 0.066158 | 0.045802 | 0.066158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026316 | 0.159864 | 588 | 18 | 70 | 32.666667 | 0.769231 | 0 | 0 | 0 | 0 | 0 | 0.129252 | 0.052721 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.266667 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa323a5eb0b14d8cc8ce7b8b5526e2133a53503e | 2,747 | py | Python | sknetwork/ranking/hits.py | HerrZYZ/scikit-network | fa2b684ee37c90679ed3d0a48426d3f4baceb70d | [
"BSD-3-Clause"
] | 457 | 2018-07-24T12:42:14.000Z | 2022-03-31T08:30:39.000Z | sknetwork/ranking/hits.py | HerrZYZ/scikit-network | fa2b684ee37c90679ed3d0a48426d3f4baceb70d | [
"BSD-3-Clause"
] | 281 | 2018-07-13T05:01:19.000Z | 2022-03-31T14:13:43.000Z | sknetwork/ranking/hits.py | HerrZYZ/scikit-network | fa2b684ee37c90679ed3d0a48426d3f4baceb70d | [
"BSD-3-Clause"
] | 58 | 2019-04-22T09:04:32.000Z | 2022-03-30T12:43:08.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Oct 07 2019
@author: Nathan de Lara <ndelara@enst.fr>
"""
from typing import Union
import numpy as np
from scipy import sparse
from sknetwork.linalg import SVDSolver, LanczosSVD
from sknetwork.ranking.base import BaseRanking
from sknetwork.utils.check import check_format
class HITS(BaseRanking):
"""Hub and authority scores of each node.
For bipartite graphs, the hub score is computed on rows and the authority score on columns.
Parameters
----------
solver : ``'lanczos'`` (default, Lanczos algorithm) or :class:`SVDSolver` (custom solver)
Which solver to use.
Attributes
----------
scores_ : np.ndarray
Hub score of each node.
scores_row_ : np.ndarray
Hub score of each row, for bipartite graphs.
scores_col_ : np.ndarray
Authority score of each column, for bipartite graphs.
Example
-------
>>> from sknetwork.ranking import HITS
>>> from sknetwork.data import star_wars
>>> hits = HITS()
>>> biadjacency = star_wars()
>>> scores = hits.fit_transform(biadjacency)
>>> np.round(scores, 2)
array([0.5 , 0.23, 0.69, 0.46])
References
----------
Kleinberg, J. M. (1999). Authoritative sources in a hyperlinked environment.
Journal of the ACM, 46(5), 604-632.
"""
def __init__(self, solver: Union[str, SVDSolver] = 'lanczos'):
super(HITS, self).__init__()
if type(solver) == str:
self.solver: SVDSolver = LanczosSVD()
else:
self.solver = solver
def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray]) -> 'HITS':
"""Compute HITS algorithm with a spectral method.
Parameters
----------
adjacency :
Adjacency or biadjacency matrix of the graph.
Returns
-------
self: :class:`HITS`
"""
adjacency = check_format(adjacency)
self.solver.fit(adjacency, 1)
hubs: np.ndarray = self.solver.singular_vectors_left_.reshape(-1)
authorities: np.ndarray = self.solver.singular_vectors_right_.reshape(-1)
h_pos, h_neg = (hubs > 0).sum(), (hubs < 0).sum()
a_pos, a_neg = (authorities > 0).sum(), (authorities < 0).sum()
if h_pos > h_neg:
hubs = np.clip(hubs, a_min=0., a_max=None)
else:
hubs = np.clip(-hubs, a_min=0., a_max=None)
if a_pos > a_neg:
authorities = np.clip(authorities, a_min=0., a_max=None)
else:
authorities = np.clip(-authorities, a_min=0., a_max=None)
self.scores_row_ = hubs
self.scores_col_ = authorities
self.scores_ = hubs
return self
| 28.915789 | 95 | 0.607572 | 347 | 2,747 | 4.677233 | 0.389049 | 0.033272 | 0.012323 | 0.014787 | 0.187924 | 0.158965 | 0.088725 | 0.083795 | 0.083795 | 0.083795 | 0 | 0.021815 | 0.265744 | 2,747 | 94 | 96 | 29.223404 | 0.782846 | 0.421187 | 0 | 0.09375 | 0 | 0 | 0.007874 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.1875 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa3272d7139bcfc8c5bb901630f1f41796b6b106 | 1,586 | py | Python | Chapter08/microservices/order/send_order.py | ariwells2001/Python-Programming-Blueprints | 23981ab304e65bcc24560393c75fd5ee85c96ce5 | [
"MIT"
] | 72 | 2017-12-19T09:19:40.000Z | 2021-11-08T13:13:34.000Z | Chapter08/microservices/order/send_order.py | ariwells2001/Python-Programming-Blueprints | 23981ab304e65bcc24560393c75fd5ee85c96ce5 | [
"MIT"
] | 20 | 2018-03-21T01:15:27.000Z | 2021-09-08T00:59:40.000Z | Chapter08/microservices/order/send_order.py | ariwells2001/Python-Programming-Blueprints | 23981ab304e65bcc24560393c75fd5ee85c96ce5 | [
"MIT"
] | 53 | 2017-12-19T09:19:42.000Z | 2022-03-06T02:21:10.000Z | import json
import sys
import argparse
from http import HTTPStatus
import requests
def setUpData(order_id):
data = {
"items": [
{
"name": "Prod 001",
"price_per_unit": 10,
"product_id": 1,
"quantity": 2
},
{
"name": "Prod 002",
"price_per_unit": 12,
"product_id": 2,
"quantity": 2
}
],
"order_customer": {
"customer_id": 14,
"email": "test@test.com",
"name": "Test User"
},
"order_id": order_id,
"status": 1,
"total": "190.00"
}
return data
def send_order(data):
token = '8d19ddce090211fffe22af6c06cdfd06ecb94f4e'
headers = {
'Authorization': f'Token {token}',
'Content-type': 'application/json'
}
response = requests.post(
'http://127.0.0.1:8000/api/order/add/',
headers=headers,
data=json.dumps(data))
if response.status_code == HTTPStatus.NO_CONTENT:
print('Ops! Something went wrong!')
sys.exit(1)
print('Request was successfull')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create a order for test')
parser.add_argument('--orderid',
dest='order_id',
required=True,
help='Specify the the order id')
args = parser.parse_args()
data = setUpData(args.order_id)
send_order(data)
| 21.726027 | 56 | 0.501892 | 156 | 1,586 | 4.929487 | 0.538462 | 0.054616 | 0.031209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052578 | 0.376419 | 1,586 | 72 | 57 | 22.027778 | 0.724975 | 0 | 0 | 0.036364 | 0 | 0 | 0.26797 | 0.025221 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.090909 | 0 | 0.145455 | 0.036364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa341fa2d09e2941731c44bb055c6ff66bda6b86 | 623 | py | Python | Leetcode/93. Restore IP Addresses/solution2.py | asanoviskhak/Outtalent | c500e8ad498f76d57eb87a9776a04af7bdda913d | [
"MIT"
] | 51 | 2020-07-12T21:27:47.000Z | 2022-02-11T19:25:36.000Z | Leetcode/93. Restore IP Addresses/solution2.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | null | null | null | Leetcode/93. Restore IP Addresses/solution2.py | CrazySquirrel/Outtalent | 8a10b23335d8e9f080e5c39715b38bcc2916ff00 | [
"MIT"
] | 32 | 2020-07-27T13:54:24.000Z | 2021-12-25T18:12:50.000Z | class Solution:
def helper(self, s: str, d: int) -> List[str]:
if len(s) > 3 * d or len(s) < d: return []
result = []
for i in range(min(3, len(s))):
cur = s[:i + 1]
if (len(cur) >= 2 and cur[0] == '0') or int(cur) > 255: continue
if d > 1:
for postfix in self.helper(s[i + 1:], d - 1):
result.append([cur] + postfix)
elif cur == s:
result.append([cur])
return result
def restoreIpAddresses(self, s: str) -> List[str]:
return ['.'.join(ip) for ip in self.helper(s, 4)]
| 28.318182 | 76 | 0.455859 | 89 | 623 | 3.191011 | 0.393258 | 0.042254 | 0.056338 | 0.091549 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033766 | 0.382022 | 623 | 21 | 77 | 29.666667 | 0.703896 | 0 | 0 | 0 | 0 | 0 | 0.00321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0 | 0.066667 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa34ebf209fff9be11542312246de0a33c4111ae | 1,174 | py | Python | python/build_chronologies.py | griffij/TimeDFaults | 612c9860df2b4eb7869d8a22293bc5e1215f6ea4 | [
"Apache-2.0"
] | null | null | null | python/build_chronologies.py | griffij/TimeDFaults | 612c9860df2b4eb7869d8a22293bc5e1215f6ea4 | [
"Apache-2.0"
] | null | null | null | python/build_chronologies.py | griffij/TimeDFaults | 612c9860df2b4eb7869d8a22293bc5e1215f6ea4 | [
"Apache-2.0"
] | null | null | null | """Construct chronologies for a given fault
"""
from QuakeRates.dataman.parse_params import parse_param_file, get_event_sets
#paramfiles = ['../params/Dunstan4event_VanDissen2007_simple.txt',
# '../params/Dunstan5event_VanDissen2007_simple.txt',
# '../params/Dunstan6event_VanDissen2007_simple.txt']
paramfiles = ['../params/Dunstan6eventOxcal_devonshire.csv',
'../params/Dunstan5eventOxcal_devonshire.csv',
'../params/Dunstan5eventOxcalv2_devonshire.csv',
'../params/Dunstan4eventOxcal_devonshire.csv']
#paramfiles = ['../params/Hyde3event_lugcreek.txt',
# '../params/Hyde4event_lugcreek.txt']
# Number of sample chronologies to generate
n_samples = 1000
names, event_sets, event_certainties, num_events, \
tect_regions, fault_styles = get_event_sets(paramfiles, ['all'], ['all'], 1)
print(names)
for i, event_set in enumerate(event_sets):
event_set.gen_chronologies(n_samples, observation_end=2020, min_separation=500)
chron_filename = '../data/chronologies/' + event_set.name + \
'_%i_chronologies.csv' % n_samples
event_set.write_chronology(chron_filename)
| 43.481481 | 83 | 0.717206 | 127 | 1,174 | 6.314961 | 0.535433 | 0.044888 | 0.082294 | 0.054863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034378 | 0.157581 | 1,174 | 26 | 84 | 45.153846 | 0.776542 | 0.321976 | 0 | 0 | 0 | 0 | 0.281888 | 0.248724 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa3527cd0d3ddb121b35eca7794ee5b42c61c83a | 1,607 | py | Python | tutorial/09_Features/10_GRSD_descriptors.py | maguangyan/pclpy_tutorial | 9ce54d1f1a70cf379b5954ad4d8bed3210e06a4c | [
"BSD-3-Clause"
] | 17 | 2021-10-04T08:00:50.000Z | 2022-03-31T07:23:52.000Z | tutorial/09_Features/10_GRSD_descriptors.py | maguangyan/pclpy_tutorial | 9ce54d1f1a70cf379b5954ad4d8bed3210e06a4c | [
"BSD-3-Clause"
] | 3 | 2021-12-17T07:42:04.000Z | 2022-03-30T02:17:15.000Z | tutorial/09_Features/10_GRSD_descriptors.py | maguangyan/pclpy_tutorial | 9ce54d1f1a70cf379b5954ad4d8bed3210e06a4c | [
"BSD-3-Clause"
] | 2 | 2022-03-18T07:19:19.000Z | 2022-03-29T14:25:01.000Z | # -*- coding: utf-8 -*-
# @Time : DATE:2021/9/25
# @Author : yan
# @Email : 1792659158@qq.com
# @File : 10_GRSD_descriptors.py
import pclpy
from pclpy import pcl
import numpy as np
import sys
import matplotlib.pyplot as plt
import pyvista as pv
if __name__ == '__main__':
# 加载点云
cloud = pcl.PointCloud.PointXYZ()
reader = pcl.io.PCDReader()
reader.read("../../data/min_cut_segmentation_tutorial.pcd", cloud)
# 构造法线估计类
ne = pcl.features.NormalEstimation.PointXYZ_Normal()
ne.setInputCloud(cloud)
tree = pcl.search.KdTree.PointXYZ()
ne.setSearchMethod(tree)
cloud_normals = pcl.PointCloud.Normal()
ne.setRadiusSearch(0.3)
# 计算法线
ne.compute(cloud_normals)
# Create the GASD estimation class, and pass the input dataset to it
grsd = pcl.features.GRSDEstimation.PointXYZ_Normal_GRSDSignature21()
grsd.setInputCloud(cloud)
grsd.setRadiusSearch(0.1)
tree = pcl.search.KdTree.PointXYZ()
grsd.setSearchMethod(tree)
grsd.setInputNormals(cloud_normals)
descriptors = pcl.PointCloud.GRSDSignature21()
grsd.compute(descriptors)
plt.plot(descriptors.histogram[0])
plt.show()
viewer = pcl.visualization.PCLVisualizer("3D viewer")
viewer.setBackgroundColor(0, 0, 0)
rgb = pcl.visualization.PointCloudColorHandlerCustom.PointXYZ(cloud, 0.0, 255.0, 0.0)
viewer.addPointCloud(cloud, rgb, "sample cloud")
viewer.setPointCloudRenderingProperties(0, 1, "sample cloud")
viewer.addCoordinateSystem(1)
viewer.initCameraParameters()
while not viewer.wasStopped():
viewer.spinOnce(10) | 27.237288 | 89 | 0.712508 | 192 | 1,607 | 5.864583 | 0.520833 | 0.008881 | 0.023091 | 0.033748 | 0.047957 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034586 | 0.172371 | 1,607 | 59 | 90 | 27.237288 | 0.81203 | 0.125078 | 0 | 0.055556 | 0 | 0 | 0.060845 | 0.031496 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa3623b88ca53dfcb4d1d75bbcfd65d5f9eeb7f9 | 3,736 | py | Python | test/test_dc_trainer.py | martijnvanbeers/diagnnose | 02b536b53e82b5701490b42063b24c5fb348c2f7 | [
"MIT"
] | null | null | null | test/test_dc_trainer.py | martijnvanbeers/diagnnose | 02b536b53e82b5701490b42063b24c5fb348c2f7 | [
"MIT"
] | null | null | null | test/test_dc_trainer.py | martijnvanbeers/diagnnose | 02b536b53e82b5701490b42063b24c5fb348c2f7 | [
"MIT"
] | null | null | null | import os
import shutil
import unittest
from collections import Counter
from unittest.mock import MagicMock, patch
from diagnnose.classifiers.dc_trainer import DCTrainer
from diagnnose.corpus import import_corpus
from .test_utils import create_and_dump_dummy_activations
# GLOBALS
ACTIVATION_NAMES = [(0, "hx")]
ACTIVATIONS_NAME = "hx_l0"
NUM_TEST_SENTENCES = 5
ACTIVATIONS_DIR = "test/test_data"
class TestDCTrainer(unittest.TestCase):
""" Test functionalities of the DCTrainer class. """
@classmethod
def setUpClass(cls) -> None:
# Create directory if necessary
if not os.path.exists(ACTIVATIONS_DIR):
os.makedirs(ACTIVATIONS_DIR)
# Create dummy data have reader read it
cls.labels = create_and_dump_dummy_activations(
num_sentences=NUM_TEST_SENTENCES,
activations_dim=10,
max_sen_len=7,
activations_dir=ACTIVATIONS_DIR,
activations_name=ACTIVATIONS_NAME,
num_classes=5,
)
corpus = import_corpus(f"{ACTIVATIONS_DIR}/corpus.tsv")
# Model without class weights
cls.model = DCTrainer(
ACTIVATIONS_DIR,
corpus,
ACTIVATION_NAMES,
activations_dir=ACTIVATIONS_DIR,
classifier_type="logreg_sklearn",
)
# Model with class weights
cls.weighed_model = DCTrainer(
ACTIVATIONS_DIR,
corpus,
ACTIVATION_NAMES,
activations_dir=ACTIVATIONS_DIR,
classifier_type="logreg_sklearn",
)
# Create split here s.t. we can later mock this exact function in DCTrainer.train
# This way we can use the same random data splits
# and make sure class weights are counted correctly,
# otherwise this variable would be inside the local scope of the function and inaccessible
cls.data_dict = cls.weighed_model.data_loader.create_data_split(
ACTIVATION_NAMES[0]
)
@classmethod
def tearDownClass(cls) -> None:
# Remove files after tests
if os.listdir(ACTIVATIONS_DIR):
shutil.rmtree(ACTIVATIONS_DIR)
@patch("diagnnose.activations.data_loader.DataLoader.create_data_split")
@patch("diagnnose.classifiers.dc_trainer.DCTrainer._save_results")
@patch("diagnnose.classifiers.dc_trainer.DCTrainer._save_classifier")
@patch("diagnnose.classifiers.dc_trainer.DCTrainer._eval")
@patch("diagnnose.classifiers.dc_trainer.DCTrainer._fit")
def test_class_weights(
self,
_mock_fit_data: MagicMock,
mock_eval_classifier: MagicMock,
_mock_save_classifier: MagicMock,
_mock_save_results: MagicMock,
create_data_split_mock: MagicMock,
) -> None:
create_data_split_mock.return_value = self.data_dict
# Confirm that class weights are not used if flag is not given
mock_eval_classifier.return_value = self.labels # Fake predictions
self.model.train()
self.assertIsNone(
self.model.classifier.class_weight,
"Class weights are given although flag is set to False",
)
# Confirm that class weights are calculated correctly if actually used
class_counts = Counter(self.data_dict["train_y"].numpy())
num_labels = sum(class_counts.values())
self.weighed_model.train(calc_class_weights=True)
self.assertTrue(
all(
[
class_counts[class_] / num_labels == weight
for class_, weight in self.weighed_model.classifier.class_weight.items()
]
),
"Class weights have wrong values.",
)
| 35.580952 | 98 | 0.660867 | 425 | 3,736 | 5.56 | 0.355294 | 0.082945 | 0.046551 | 0.061363 | 0.210749 | 0.164198 | 0.127804 | 0.088024 | 0.088024 | 0.088024 | 0 | 0.002927 | 0.268469 | 3,736 | 104 | 99 | 35.923077 | 0.86169 | 0.164615 | 0 | 0.164557 | 0 | 0 | 0.142166 | 0.096712 | 0 | 0 | 0 | 0 | 0.025316 | 1 | 0.037975 | false | 0 | 0.113924 | 0 | 0.164557 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa381170ebb8e216de2a0d13381a12c1208da778 | 8,531 | py | Python | unidler.py | ministryofjustice/analytics-platform-unidler | 79463f19c415c62ee12fb7e9dc49910fd0e0c5c9 | [
"MIT"
] | null | null | null | unidler.py | ministryofjustice/analytics-platform-unidler | 79463f19c415c62ee12fb7e9dc49910fd0e0c5c9 | [
"MIT"
] | 5 | 2018-09-25T14:18:56.000Z | 2019-08-13T05:28:50.000Z | unidler.py | ministryofjustice/analytics-platform-unidler | 79463f19c415c62ee12fb7e9dc49910fd0e0c5c9 | [
"MIT"
] | 1 | 2021-04-11T06:43:03.000Z | 2021-04-11T06:43:03.000Z | import contextlib
from http import HTTPStatus
from http.server import HTTPServer, BaseHTTPRequestHandler
import logging
import os
import socket
from socketserver import ThreadingMixIn
import ssl
import sys
import kubernetes
from kubernetes import client, config
from kubernetes.client.models import (
V1beta1HTTPIngressPath,
V1beta1HTTPIngressRuleValue,
V1beta1IngressBackend,
V1beta1IngressRule,
)
IDLED = 'mojanalytics.xyz/idled'
IDLED_AT = 'mojanalytics.xyz/idled-at'
INGRESS_CLASS = 'kubernetes.io/ingress.class'
INGRESS_CLASS_NAME = os.environ.get('INGRESS_CLASS_NAME', 'istio')
UNIDLER = 'unidler'
UNIDLER_NAMESPACE = 'default'
logging.basicConfig(level=os.environ.get('LOG_LEVEL', 'DEBUG'))
app_log = logging.getLogger('unidler')
logging.getLogger('kubernetes').setLevel(logging.WARNING)
def run(host='0.0.0.0', port=8080):
try:
config.load_incluster_config()
except:
config.load_kube_config()
unidler = UnidlerServer((host, int(port)), RequestHandler)
app_log.info(f'Unidler listening on {host}:{port}')
unidler.serve_forever()
class UnidlerServer(ThreadingMixIn, HTTPServer):
pass
class RequestHandler(BaseHTTPRequestHandler):
unidling = {}
def do_GET(self):
hostname = self.headers.get('Host', UNIDLER)
if hostname.startswith(UNIDLER):
app_log.debug('No hostname specified')
self.respond(HTTPStatus.NO_CONTENT, '')
return
username = hostname.split('.')[0]
log = logging.getLogger('unidler:{}'.format(username))
try:
if hostname in self.unidling:
log.debug('Internal state: unidling is in progress')
if self.unidling[hostname].is_done():
self.unidling[hostname].enable_ingress()
del self.unidling[hostname]
else:
log.debug('Unidling is not done yet')
elif is_idle(hostname):
log.debug('It is idle, so starting unidling')
self.unidling[hostname] = Unidling(hostname, log)
self.unidling[hostname].start()
else:
log.error('Shouldn\'t happen - idler received request when it thinks the tool is not idled')
except (DeploymentNotFound, IngressNotFound) as not_found:
self.send_error(HTTPStatus.NOT_FOUND, str(not_found))
except Exception as error:
self.send_error(HTTPStatus.INTERNAL_SERVER_ERROR, str(error))
else:
self.respond(HTTPStatus.ACCEPTED, please_wait(hostname))
def respond(self, status, body):
self.send_response(status)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(str(body).encode('utf-8'))
class DeploymentNotFound(Exception):
pass
class IngressNotFound(Exception):
pass
class Unidling(object):
def __init__(self, hostname, log=app_log):
self.hostname = hostname
self.ingress = None
self.deployment = None
self.started = False
self.replicas = 0
self.enabled = False
self.log = log
def start(self):
if not self.started:
self.log.debug('Starting unidle')
self.started = True
self.ingress = ingress_for_host(self.hostname)
self.deployment = deployment_for_ingress(self.ingress)
restore_replicas(self.deployment, self.log)
unmark_idled(self.deployment, self.log)
# XXX writing changes triggers the asynchronous creation of
# pods, which can take a few seconds
write_deployment_changes(self.deployment, self.log)
else:
self.log.error('Shouldn\'t happen - starting the idled process for a second time')
def is_done(self):
if self.started:
self.deployment = deployment_for_ingress(self.ingress)
replicas = int(self.deployment.status.available_replicas or 0)
self.log.debug('Is done?\n "idled" label removed = {}\n replicas = {}'.format(
IDLED not in self.deployment.metadata.labels, replicas))
return (
IDLED not in self.deployment.metadata.labels and
replicas >= 1)
else:
self.log.error('Shouldn\'t happen - state is "started" yet unidle appears to be "done"')
return False
def enable_ingress(self):
if not self.enabled:
self.enabled = True
self.log.debug('Enabling ingress')
ingress = unidler_ingress()
remove_host_rule(self.hostname, ingress, self.log)
write_ingress_changes(ingress, self.log)
# XXX do we need to wait here for the ingress controller to pick up the
# changes?
ingress = ingress_for_host(self.hostname)
enable_ingress(self.ingress)
write_ingress_changes(self.ingress, self.log)
else:
self.log.error('Shouldn\'t happen - Ingress enabling triggered for the second time')
def deployment_for_ingress(ingress):
try:
return client.AppsV1beta1Api().read_namespaced_deployment(
ingress.metadata.name,
ingress.metadata.namespace)
except kubernetes.client.rest.ApiException:
raise DeploymentNotFound(
ingress.metadata.name,
ingress.metadata.namespace)
def ingress_for_host(hostname):
# XXX assumes first ingress rule is the one we want
ingresses = client.ExtensionsV1beta1Api().list_ingress_for_all_namespaces()
ingress = next(
(
ingress
for ingress in ingresses.items
if (ingress.metadata.name != UNIDLER and
ingress.spec.rules[0].host == hostname)
),
None)
if ingress is None:
raise IngressNotFound(hostname)
return ingress
def is_idle(hostname, log=app_log):
deployment = deployment_for_ingress(ingress_for_host(hostname))
log.debug('Is idle? "idled" label = {}'.format(IDLED in deployment.metadata.labels))
return IDLED in deployment.metadata.labels
def restore_replicas(deployment, log=app_log):
annotation = deployment.metadata.annotations.get(IDLED_AT)
if annotation is not None:
replicas = annotation.split(',')[1]
else:
log.error('Deployment has no idled-at annotation - assuming 1 replica')
replicas = 1
log.debug(f'Restoring {replicas} replicas')
deployment.spec.replicas = int(replicas)
def unmark_idled(deployment, log=app_log):
log.debug('Removing idled annotation and label')
if IDLED in deployment.metadata.labels:
del deployment.metadata.labels[IDLED]
if IDLED_AT in deployment.metadata.annotations:
del deployment.metadata.annotations[IDLED_AT]
def write_deployment_changes(deployment, log=app_log):
log.debug(
f'Writing changes to deployment {deployment.metadata.name} '
f'in namespace {deployment.metadata.namespace}')
client.AppsV1beta1Api().replace_namespaced_deployment(
deployment.metadata.name,
deployment.metadata.namespace,
deployment)
def write_ingress_changes(ingress, log=app_log):
log.debug(
f'Writing changes to ingress {ingress.metadata.name} '
f'in namespace {ingress.metadata.namespace}')
client.ExtensionsV1beta1Api().patch_namespaced_ingress(
ingress.metadata.name,
ingress.metadata.namespace,
ingress)
def unidler_ingress():
return client.ExtensionsV1beta1Api().read_namespaced_ingress(
UNIDLER, UNIDLER_NAMESPACE)
def remove_host_rule(hostname, ingress, log=app_log):
log.debug(
f'Removing host rules for {hostname} '
f'from ingress {ingress.metadata.name} '
f'in namespace {ingress.metadata.namespace}')
num_rules_before = len(ingress.spec.rules)
ingress.spec.rules = list(
filter(
lambda rule: rule.host != hostname,
ingress.spec.rules))
log.debug('Rules removed: {}'.format(num_rules_before - len(ingress.spec.rules)))
def enable_ingress(ingress):
ingress.metadata.annotations[INGRESS_CLASS] = INGRESS_CLASS_NAME
def please_wait(hostname):
with open('please_wait.html') as f:
body = f.read()
return body.replace(
f"UNIDLER_REDIRECT_URL = ''",
f"UNIDLER_REDIRECT_URL = 'https://{hostname}'")
if __name__ == '__main__':
run(*sys.argv[1:])
| 32.071429 | 108 | 0.657016 | 977 | 8,531 | 5.612078 | 0.229273 | 0.020427 | 0.01149 | 0.011672 | 0.180011 | 0.144446 | 0.102134 | 0.048514 | 0.048514 | 0.022615 | 0 | 0.005599 | 0.246278 | 8,531 | 265 | 109 | 32.192453 | 0.847123 | 0.025906 | 0 | 0.125 | 0 | 0 | 0.124639 | 0.028059 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09 | false | 0.015 | 0.06 | 0.005 | 0.22 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa389d61ea0653e2eac01c0ca4962508df93ac5b | 32,692 | py | Python | opentamp/src/policy_hooks/vae/vae.py | Algorithmic-Alignment-Lab/openTAMP-legacy | 3b7c3be164cc968ad77a928286d6460cd70a670e | [
"MIT"
] | 2 | 2022-03-09T19:48:20.000Z | 2022-03-26T17:31:07.000Z | opentamp/src/policy_hooks/vae/vae.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
] | null | null | null | opentamp/src/policy_hooks/vae/vae.py | Algorithmic-Alignment-Lab/OpenTAMP | eecb950bd273da8cbed4394487630e8453f2c242 | [
"MIT"
] | null | null | null | import copy
import json
import logging
import os
import sys
import tempfile
import time
import traceback
import h5py
import numpy as np
import tables
import tensorflow as tf
from opentamp.src.policy_hooks.vae.vae_networks import *
'''
Random things to remember:
- End with no-op task (since we go obs + task -> next_obs, we want last obs + task -> last obs for code simplicity)
- Or cut last timestep?
- Policy gets a reward for finding bad encode/decode paths?
- Constrain conditional encoding (i.e. latent output) against prior?
'''
LATENT_DIM = 16
ENCODER_CONFIG = {
'n_channels': [16, 32, 32],
'filter_sizes': [5, 5, 5],
'strides': [3, 3, 3],
'fc_dims': [LATENT_DIM] # [2 * 3 * 32]
# 'out_act': 'tanh',
}
DECODER_CONFIG = {
'conv_init_shape': [2, 3, 32],
'n_channels': [32, 16, 3],
'filter_sizes': [5, 5, 5],
'strides': [3, 3, 3],
'fc_dims': None,
'out_act': 'sigmoid',
}
LATENT_DYNAMICS_CONFIG = {
'fc_dims': [LATENT_DIM, LATENT_DIM],
}
class VAE(object):
def __init__(self, hyperparams):
self.config = hyperparams
tf.reset_default_graph()
tf.set_random_seed(self.config.get('random_seed', 1234))
self.tf_iter = 0
self.batch_size = self.config.get('batch_size', 64)
self.train_iters = self.config.get('train_iters', 100)
self.T = self.config['rollout_len'] - 2
self.rollout_len = self.config['rollout_len'] - 2
self.obs_dims = [80, 107, 3] # list(hyperparams['obs_dims'])
self.task_dim = hyperparams['task_dims']
# The following hyperparameters also describe where the weights are saved
self.weight_dir = hyperparams['weight_dir']
# if self.load_step < 0:
# is_rnn = 'rnn' if self.use_recurrent_dynamics else 'fc'
# overshoot = 'overshoot' if self.use_overshooting else 'onestep'
# self.ckpt_name = self.weight_dir+'/vae_{0}_{1}_{2}.ckpt'.format(self.train_mode, is_rnn, overshoot)
# else:
# self.ckpt_name = self.weight_dir+'/vae_{0}_{1}_{2}.ckpt'.format(self.train_mode, is_rnn, overshoot, load_step)
if hyperparams.get('load_data', True):
f_mode = 'a'
self.data_file = self.weight_dir+'/vae_buffer.hdf5'
self.data = h5py.File(self.data_file, f_mode)
try:
self.obs_data = self.data['obs_data']
self.task_data = self.data['task_data']
self.task_data = self.task_data[:, :, :self.task_dim]
self.task_dim = self.task_data.shape[-1]
except:
obs_data = np.zeros([0, self.rollout_len]+list(self.obs_dims))
task_data = np.zeros((0, self.rollout_len, self.task_dim))
self.obs_data = self.data.create_dataset('obs_data', data=obs_data, maxshape=(None, None, None, None, None), dtype='uint8')
self.task_data = self.data.create_dataset('task_data', data=task_data, maxshape=(None, None, None), dtype='uint8')
# self.data.swmr_mode=True
elif hyperparams.get('data_read_only', False):
f_mode = 'r'
self.data_file = self.weight_dir+'/vae_buffer.hdf5'
self.data = h5py.File(self.data_file, f_mode, swmr=True)
self.obs_data = self.data['obs_data']
self.task_data = self.data['task_data']
# while not os.path.isfile(self.weight_dir+'/vae_buffer.hdf5'):
# time.sleep(1)
self.train_mode = hyperparams.get('train_mode', 'online')
assert self.train_mode in ['online', 'conditional', 'unconditional']
self.use_recurrent_dynamics = hyperparams.get('use_recurrent_dynamics', False)
self.use_overshooting = hyperparams.get('use_overshooting', False)
self.use_prior = hyperparams.get('use_prior', True)
self.load_step = hyperparams.get('load_step', 0)
# self.beta = hyperparams.get('beta', 10)
# self.beta_d = hyperparams.get('overshoot_beta', 1./self.T)
self.beta = 0.2 # hyperparams.get('beta', 0.5)
self.beta_d = hyperparams.get('overshoot_beta', 0.1)
self.data_limit = hyperparams.get('data_limit', None)
self.data_limit = self.data_limit if self.data_limit is not None else len(self.obs_data)
self.obs_data = self.obs_data[:self.data_limit]
self.task_data = self.task_data[:self.data_limit]
self.dist_constraint = hyperparams.get('dist_constraint', False)
self.ckpt_name = self.get_weight_file()
# self.data_file = self.weight_dir+'/vae_buffer.npz'
# try:
# data = np.load(self.data_file, mmap_mode='w+')
# except:
# pass
# self.obs_data = np.zeros((0, self.dT, self.dO))
# self.task_data = np.zeros((0, self.dT, self.dU))
self.max_buffer = hyperparams.get('max_buffer', 1e6)
self.dist_constraint = hyperparams.get('distance_constraint', False)
self.cur_lr = 1e-3
with tf.variable_scope('vae', reuse=False):
self.init_network()
self.init_solver()
self.scope = 'vae'
self.gpu_fraction = self.config['gpu_fraction'] if 'gpu_fraction' in self.config else 0.95
if 'allow_growth' in self.config and not self.config['allow_growth']:
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.gpu_fraction)
else:
gpu_options = tf.GPUOptions(allow_growth=True)
self.sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True))
init_op = tf.initialize_all_variables()
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)
self.saver = tf.train.Saver(variables)
if self.use_recurrent_dynamics:
zero_state = self.latent_dynamics.lstm_cell.zero_state(batch_size=1, dtype=tf.float32)
self.zero_state = tuple(self.sess.run(zero_state))
try:
self.saver.restore(self.sess, self.ckpt_name)
except Exception as e:
self.sess.run(init_op)
print(('\n\nCould not load previous weights for {0} from {1}\n\n'.format(self.scope, self.weight_dir)))
self.update_count = 0
self.n_updates = 0
self.update_size = self.config.get('update_size', 1)
def get_weight_file(self, addendum=None):
is_rnn = 'rnn' if self.use_recurrent_dynamics else 'fc'
overshoot = 'overshoot' if self.use_overshooting else 'onestep'
step = self.load_step
mode = self.train_mode
prior = 'prior' if self.use_prior else 'noprior'
beta = 'beta'+str(self.beta)
overshoot_beta = 'beta_d'+str(self.beta_d)
limit = self.data_limit if self.data_limit is not None else len(self.obs_data)
limit = str(limit)+'nsamples'
dist = 'distconstr' if self.dist_constraint else 'nodistconstr'
if addendum is None:
ext = "vae_{0}_{1}_{2}_{3}_{4}_{5}_{6}.ckpt".format(mode, is_rnn, overshoot, prior, beta, dist, limit)
else:
ext = "vae_{0}_{1}_{2}_{3}_{4}_{5}_{6}_{7}.ckpt".format(mode, is_rnn, overshoot, prior, beta, dist, limit, addendum)
file_name = self.weight_dir + ext
return file_name
def serialize_weights(self):
print('Serializing vae weights')
var_to_val = {}
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae')
for v in variables:
var_to_val[v.name] = self.sess.run(v).tolist()
return json.dumps(var_to_val)
def deserialize_weights(self, json_wts, save=True):
var_to_val = json.loads(json_wts)
# print 'Deserializing', scopes
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae')
for var in variables:
var.load(var_to_val[var.name], session=self.sess)
if save: self.store_scope_weights()
# print 'Weights for {0} successfully deserialized and stored.'.format(scopes)
# def update_weights(self, weight_dir=None):
# if weight_dir is None:
# weight_dir = self.weight_dir
# self.saver.restore(self.sess, weight_dir+'/vae_{0}.ckpt'.format(self.train_mode))
def store_scope_weights(self, weight_dir=None, addendum=None):
if weight_dir is None:
weight_dir = self.weight_dir
try:
variables = self.sess.graph.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vae')
saver = tf.train.Saver(variables)
saver.save(self.sess, self.get_weight_file(addendum))
print(('Saved vae weights for', self.train_mode, 'in', self.weight_dir))
except:
print('Saving variables encountered an issue but it will not crash:')
traceback.print_exception(*sys.exc_info())
def store_weights(self, weight_dir=None):
self.store_scope_weights(weight_dir)
def store(self, obs, task_list):
print(('Storing data for', self.scope))
assert len(obs) == len(task_list)
# self.T = len(obs)
# self.obs_data = np.r_[self.obs_data, obs]
# self.task_data = np.r_[self.task_data, task_list]
# obs = obs[:self.T]
# task_list = task_list[:self.T]
obs = obs.reshape((1,)+obs.shape)
task_list = task_list.reshape((1,)+task_list.shape)
self.obs_data.resize((len(self.obs_data)+1,) + obs.shape[1:])
self.obs_data[-1] = obs.astype(np.uint8)
self.task_data.resize((len(self.task_data)+1,) + task_list.shape[1:])
self.task_data[-1] = task_list.astype(np.uint8)
# if len(self.obs_data) > self.max_buffer:
# self.obs_data = self.obs_data[-self.max_buffer:]
# self.task_data = self.task_data[-self.max_buffer:]
self.update_count += 1
if self.update_count > self.update_size and len(self.obs_data) > 10:
print('Updating vae')
# self.update()
self.n_updates += 1
self.update_count = 0
if not self.n_updates % 5:
self.save_buffers()
if self.n_updates > 10:
self.store_scope_weights()
self.n_updates = 0
return True
return False
def save_buffers(self):
# np.savez(self.data_file, task_data=self.task_data, obs_data=self.obs_data)
self.data.flush()
def init_network(self):
import tensorflow as tf
self.x_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims))
self.latent_in = tf.compat.v1.placeholder(tf.float32, shape=[1, 1, LATENT_DIM])
self.task_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+[self.task_dim])
self.latent_task_in = tf.compat.v1.placeholder(tf.float32, shape=[1, 1, self.task_dim])
self.offset_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims))
self.before_offset_in = tf.compat.v1.placeholder(tf.float32, shape=[self.batch_size*self.T]+list(self.obs_dims))
self.training = tf.compat.v1.placeholder(tf.bool)
if len(self.obs_dims) == 1:
pass
else:
pass
self.fc_in = None # tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim])
self.offset_fc_in = None #tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim])
self.far_offset_fc_in = None # tf.compat.v1.placeholder(tf.float32, shape=[None, self.task_dim])
# mask = tf.ones((self.batch_size, self.T))
# mask[:,-1] = 0
# self.far_offset_loss_mask = tf.constant(mask.reshape([self.batch_size*self.T]))
self.encoder = Encoder()
self.encode_mu, self.encode_logvar = self.encoder.get_net(self.x_in / 255., self.training, fc_in=self.fc_in, config=ENCODER_CONFIG)
self.encode_posterior = tf.distributions.Normal(self.encode_mu, tf.sqrt(tf.exp(self.encode_logvar)))
# self.offset_encode_mu, self.offset_encode_logvar = self.encoder.get_net(self.offset_in, self.training, fc_in=self.offset_fc_in, reuse=True, config=ENCODER_CONFIG)
# self.far_offset_encode_mu, self.far_offset_encode_logvar = self.encoder.get_net(self.far_offset_in, self.training, fc_in=self.far_offset_fc_in, reuse=True, config=ENCODER_CONFIG)
self.decoder_in = self.encode_mu + tf.sqrt(tf.exp(self.encode_logvar)) * tf.random_normal(tf.shape(self.encode_mu), 0, 1)
self.decoder = Decoder()
self.decode_mu, self.decode_logvar = self.decoder.get_net(self.decoder_in, self.training, config=DECODER_CONFIG)
self.decode_posterior = tf.distributions.Normal(self.decode_mu, tf.sqrt(tf.exp(self.decode_logvar)))
# self.sample_decode_mu, self.sample_decode_logvar = self.decoder.get_net(self.decoder_in, self.training, config=DECODER_CONFIG, reuse=reuse)
# self.sample_decode_posterior = tf.distributions.Normal(self.sample_decode_mu, tf.sqrt(tf.exp(self.sample_decode_logvar)))
if 'unconditional' not in self.train_mode:
if self.use_recurrent_dynamics:
self.latent_dynamics = RecurrentLatentDynamics()
in_shape = tf.shape(self.decoder_in)
z_in = tf.reshape(self.decoder_in, (self.batch_size, self.T, LATENT_DIM))
task_in = tf.reshape(self.task_in, (self.batch_size, self.T, self.task_dim))
mu, logvar, self.rnn_initial_state, self.rnn_final_state = self.latent_dynamics.get_net(z_in, task_in, self.T, self.training, config=LATENT_DYNAMICS_CONFIG)
self.conditional_encode_mu = tf.reshape(mu, in_shape)
self.conditional_encode_logvar = tf.reshape(logvar, in_shape)
self.conditional_encode_posterior = tf.distributions.Normal(self.conditional_encode_mu, tf.sqrt(tf.exp(self.conditional_encode_logvar)))
trans_mu, trans_logvar, self.trans_rnn_initial_state, self.trans_rnn_final_state = self.latent_dynamics.get_net(self.latent_in, self.latent_task_in, 1, self.training, config=LATENT_DYNAMICS_CONFIG, reuse=True)
self.latent_trans_mu = tf.reshape(trans_mu, [1, 1, LATENT_DIM])
self.latent_trans_logvar = tf.reshape(trans_logvar, [1, 1, LATENT_DIM])
self.latent_trans_posterior = tf.distributions.Normal(self.latent_trans_mu, tf.sqrt(tf.exp(self.latent_trans_logvar)))
else:
self.latent_dynamics = LatentDynamics()
self.conditional_encode_mu, self.conditional_encode_logvar = self.latent_dynamics.get_net(self.decoder_in, self.task_in, self.training, config=LATENT_DYNAMICS_CONFIG)
self.conditional_encode_posterior = tf.distributions.Normal(self.conditional_encode_mu, tf.sqrt(tf.exp(self.conditional_encode_logvar)))
self.latent_trans_mu, self.latent_trans_logvar = self.latent_dynamics.get_net(tf.reshape(self.latent_in, (1, LATENT_DIM)), tf.reshape(self.latent_task_in, (1, self.task_dim)), self.training, config=LATENT_DYNAMICS_CONFIG, reuse=True)
self.latent_trans_posterior = tf.distributions.Normal(self.latent_trans_mu, tf.sqrt(tf.exp(self.latent_trans_logvar)))
self.conditional_decoder_in = self.conditional_encode_mu + tf.sqrt(tf.exp(self.conditional_encode_logvar)) * tf.random_normal(tf.shape(self.conditional_encode_mu), 0, 1)
self.conditional_decode_mu, self.conditional_decode_logvar = self.decoder.get_net(self.conditional_decoder_in, self.training, config=DECODER_CONFIG, reuse=True)
self.conditional_decode_posterior = tf.distributions.Normal(self.conditional_decode_mu, tf.sqrt(tf.exp(self.conditional_decode_logvar)))
self.offset_encode_mu, self.offset_encode_logvar = self.encoder.get_net(self.offset_in / 255., self.training, fc_in=self.offset_fc_in, config=ENCODER_CONFIG, reuse=True)
self.offset_encode_posterior = tf.distributions.Normal(self.offset_encode_mu, tf.sqrt(tf.exp(self.offset_encode_logvar)))
if self.dist_constraint:
self.before_offset_encode_mu, self.before_offset_encode_logvar = self.Encoder.get_net(self.before_offset_in/255., self.training, fc_in=self.fc_in, config=ENCODER_CONFIG, reuse=True)
self.before_offset_encode_posterior = tf.distributions.Normal(self.before_offset_encode_mu, tf.sqrt(tf.exp(self.before_offset_encode_logvar)))
self.latent_prior = tf.distributions.Normal(tf.zeros_initializer()(tf.shape(self.encode_mu)), 1.)
self.fitted_prior = tf.distributions.Normal(tf.zeros_initializer()(LATENT_DIM), 1.)
def overshoot_latents(self, d=-1):
if d < 0:
d = self.T
if self.use_recurrent_dynamics:
latent_in = tf.reshape(self.decoder_in, [self.batch_size, self.T, LATENT_DIM])
task_in = tf.reshape(self.task_in, [self.batch_size, self.T, self.task_dim])
z_in = tf.concat([latent_in, task_in], axis=-1)
latent_mu = tf.reshape(self.conditional_encode_mu, [self.batch_size, self.T, LATENT_DIM])
latent_logvar= tf.reshape(self.conditional_encode_logvar, [self.batch_size, self.T, LATENT_DIM])
cell = self.latent_dynamics.lstm_cell
w = self.latent_dynamics.weights
b = self.latent_dynamics.bias
init_state = self.latent_dynamics.initial_state
last_state = self.latent_dynamics.last_state
zero_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32)
outs = {i: [] for i in range(self.T)}
cur_state = zero_state
for i in range(self.T):
cur_out = z_in[:, i, :]
for j in range(i+1, np.minimum(self.T, i+d+1)):
cur_out, cur_state = cell(cur_out, cur_state)
if j == i+1:
next_state = cur_state
cur_out = tf.nn.bias_add(tf.matmul(cur_out, w), b)
outs[j].append(cur_out)
cur_out = tf.split(cur_out, 2, -1)[0]
cur_out = tf.concat([cur_out, task_in[:, j, :]], axis=-1)
cur_state = next_state
else:
latent_in = tf.reshape(self.decoder_in, [self.batch_size, self.T, LATENT_DIM])
task_in = tf.reshape(self.task_in, [self.batch_size, self.T, self.task_dim])
z_in = tf.concat([latent_in, task_in], axis=-1)
latent_mu = tf.reshape(self.conditional_encode_mu, [self.batch_size, self.T, LATENT_DIM])
latent_logvar= tf.reshape(self.conditional_encode_logvar, [self.batch_size, self.T, LATENT_DIM])
outs = {i: [] for i in range(self.T)}
for i in range(self.T):
cur_out = z_in[:, i, :]
for j in range(i+1, self.T):
cur_out = self.latent_dynamics.apply(cur_out)
outs[j].append(cur_out)
cur_out = tf.split(cur_out, 2, -1)[0]
cur_out = tf.concat([cur_out, task_in[:, j, :]], axis=-1)
return outs
def init_solver(self):
import tensorflow as tf
beta = self.beta
beta_d = self.beta_d
# self.decoder_loss = -tf.reduce_sum(tf.log(ecode_posterior.prob(self.x_in)+1e-6), axis=tuple(range(1, len(self.decode_mu.shape))))
self.decoder_loss = tf.reduce_sum(((self.x_in / 255.) - self.decode_mu)**2)#, axis=tuple(range(1, len(self.decode_mu.shape))))
self.loss = self.decoder_loss
if self.use_prior:
self.kl_loss = beta*tf.reduce_sum(tf.distributions.kl_divergence(self.encode_posterior, self.latent_prior))#, axis=tuple(range(1, len(self.encode_mu.shape))))
self.loss += self.kl_loss
# self.elbo = self.decoder_loss + beta * self.kl_loss
# self.loss = self.elbo
if 'unconditional' not in self.train_mode:
# self.conditional_decoder_loss = -tf.reduce_sum(tf.log(conditional_decode_posterior.prob(self.offset_in)+1e-6))#, axis=tuple(range(1, len(self.conditional_decode_mu.shape))))
self.conditional_decoder_loss = tf.reduce_sum((self.offset_in / 255. - self.conditional_decode_mu)**2)#, axis=tuple(range(1, len(self.conditional_decode_mu.shape))))
self.loss += self.conditional_decoder_loss
if self.use_prior:
self.conditional_kl_loss = beta*tf.reduce_sum(tf.distributions.kl_divergence(self.conditional_encode_posterior, self.latent_prior))#, axis=tuple(range(1, len(self.conditional_encode_mu.shape))))
self.loss += self.conditional_kl_loss
# self.conditional_elbo = self.conditional_decoder_loss + beta * self.conditional_kl_loss
self.conditional_prediction_loss = tf.reduce_sum(tf.distributions.kl_divergence(self.conditional_encode_posterior, self.offset_encode_posterior))#, axis=tuple(range(1, len(self.conditional_encode_mu.shape))))
self.loss += self.conditional_prediction_loss
if self.dist_constraint:
self.near_loss = 0.1*tf.reduce_sum(tf.distributions.kl_divergence(self.encode_posterior, self.offset_encode_posterior))#, axis=tuple(range(1, len(self.far_encode_mu.shape))))
self.dist_loss = -0.1*tf.reduce_sum(tf.distributions.kl_divergence(self.offset_encode_posterior, self.before_offset_encode_posterior))#, axis=tuple(range(1, len(self.far_encode_mu.shape))))
self.loss += self.dist_loss + self.near_loss
if self.use_overshooting:
outs = self.overshoot_latents(5)
for t in range(1, self.T):
true_mu, true_logvar = self.offset_encode_mu[t*self.batch_size:(t+1)*self.batch_size], self.offset_encode_logvar[t*self.batch_size:(t+1)*self.batch_size]
true_mu = tf.stop_gradient(true_mu)
true_logvar = tf.stop_gradient(true_logvar)
prior = tf.distributions.Normal(true_mu, tf.sqrt(tf.exp(true_logvar)))
for out in outs[t]:
mu, logvar = tf.split(out, 2, axis=-1)
posterior = tf.distributions.Normal(mu, tf.sqrt(tf.exp(logvar)))
self.loss += 1./(self.T) * beta_d * tf.reduce_sum(tf.distributions.kl_divergence(posterior, prior))#, axis=tuple(range(1, len(self.conditional_encode_mu.shape))))
# self.loss += 1./(self.T) * beta_d * tf.reduce_sum(tf.distributions.kl_divergence(posterior, self.latent_prior))#, axis=tuple(range(1, len(self.conditional_encode_mu.shape))))
self.loss = self.loss / (self.batch_size * self.T)
# if self.dist_constraint:
# offset_loss = tf.reduce_sum((self.encode_mu-self.offset_encode_mu)**2 axis=tuple(range(1, len(self.encode_mu.shape))))
# self.loss += offset_loss
# far_offset_loss = -tf.reduce_sum((self.encode_mu-self.far_offset_encode_mu)**2 axis=tuple(range(1, len(self.encode_mu.shape))))
# self.loss += self.far_offset_loss_mask * far_offset_loss
self.lr = tf.compat.v1.placeholder(tf.float32)
self.opt = tf.train.AdamOptimizer(self.lr)
# sess.run(tf.variables_initializer(self.opt.variables()))
train_op = self.opt.minimize(self.loss)
# opt_grad_vars = self.opt.compute_gradients(self.loss)
# clip_grad = [(tf.clip_by_norm(grad, 1), var) for grad, var in opt_grad_vars if grad is not None]
# train_op = self.opt.apply_gradients(clip_grad)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.train_op = tf.group([train_op, update_ops])
def update(self):
for step in range(self.train_iters):
# start_t = time.time()
ind = np.random.choice(list(range(len(self.obs_data)-self.batch_size)), 1)[0]
# print 'ind:', time.time() - start_t
obs_batch = self.obs_data[ind:ind+self.batch_size]
task_batch = self.task_data[ind:ind+self.batch_size]
# print 'data:', time.time() - start_t
obs = obs_batch[:, 1:self.T+1]
next_obs = obs_batch[:, 2:self.T+2]
before_obs = obs_batch[:, :self.T]
task_path = task_batch[:, :self.T]
# obs = np.concatenate([obs_batch[:, :self.T], np.zeros([self.batch_size, 1]+list(self.obs_dims))], axis=1)
# next_obs = np.concatenate([ obs_batch[:, 1:self.T+1], np.zeros([self.batch_size, 1]+list(self.obs_dims))], axis=1)
# far_obs = np.concatenate([obs_batch[:, 2:self.T+2], np.zeros([self.batch_size, 2]+list(self.obs_dims))], axis=1)
# task_path = np.concatenate([task_batch[:, :self.T], -1*np.ones([self.batch_size, 1, self.task_dim])], axis=1)
obs = obs.reshape([self.batch_size*self.T]+self.obs_dims)
next_obs = next_obs.reshape([self.batch_size*self.T]+self.obs_dims)
before_obs = before_obs.reshape([self.batch_size*self.T]+self.obs_dims)
task_path = task_path.reshape([self.batch_size*self.T, self.task_dim])
# inds = np.random.choice(range(len(self.obs_data)), self.batch_size)
# obs = []
# next_obs = []
# task_path = []
# for i in inds:
# print i
# next_obs_batch = np.array([self.obs_data[i] for i in inds])[0]
# next_task_batch = np.array([self.task_data[i] for i in inds])[0]
# obs1 = next_obs_batch[:self.T-1].reshape([self.T-1]+list(self.obs_dims))
# obs.append(np.concatenate([obs1, np.zeros([1]+list(self.obs_dims))], 0))
# obs2 = next_obs_batch[1:self.T].reshape([self.T-1]+list(self.obs_dims))
# next_obs.append(np.concatenate([np.zeros([1]+list(self.obs_dims)), obs2], 0))
# task = next_task_batch[:self.T-1].reshape([self.T-1, self.task_dim])
# task_path.append(np.concatenate([task, -1*np.ones([1, self.task_dim])], 0))
# print 'start:', time.time() - start_t
self.sess.run(self.train_op, feed_dict={self.x_in: obs,
self.offset_in: next_obs,
self.before_offset_in: before_obs,
self.task_in: task_path,
self.training: True,
self.lr: self.cur_lr,})
# print 'train:', time.time() - start_t
# print step
# inds = np.random.choice(range(len(self.task_data)), 1)#self.batch_size)
# next_obs_batch = np.array([self.obs_data[i] for i in inds])[0]
# next_task_batch = np.array([self.task_data[i] for i in inds])[0]
# obs1 = next_obs_batch[:self.T-1].reshape([-1]+list(self.obs_dims))
# obs2 = next_obs_batch[1:self.T].reshape([-1]+list(self.obs_dims))
# task = next_task_batch[:self.T-1].reshape([-1, self.task_dim])
# self.sess.run(self.train_op, feed_dict={self.x_in: obs1,
# self.offset_in: obs2,
# self.task_in: task,
# self.lr: self.cur_lr,
# self.training: True})
self.cur_lr *= 0.99999
self.load_step += self.train_iters
print(('Updated VAE', self.load_step))
def fit_prior(self):
latents = []
inds = np.random.choice(list(range(len(self.obs_data))), np.minimum(1000, len(self.obs_data)))
for i in range(len(inds)):
print(i)
batch = self.obs_data[inds[i]]
latents.extend(self.get_latents(batch))
self.prior_mean = np.mean(latents, axis=0)
self.prior_std = np.std(latents, axis=0)
self.fitted_prior = tf.distributions.Normal(self.prior_mean, self.prior_std)
def sample_prior(self):
return self.sess.run(self.fitted_prior.sample())
def check_loss(self):
ind = np.random.choice(list(range(len(self.obs_data)-self.batch_size)), 1)[0]
obs_batch = self.obs_data[ind:ind+self.batch_size]
task_batch = self.task_data[ind:ind+self.batch_size]
before_obs = obs_batch[:, :self.T]
obs = obs_batch[:, 1:self.T+1]
next_obs = obs_batch[:, 2:self.T+2]
task_path = task_batch[:, :self.T]
# obs = np.concatenate([obs_batch[:, :self.T-1], np.zeros([self.batch_size, 1]+list(self.obs_dims))], axis=1)
# next_obs = np.concatenate([np.zeros([self.batch_size, 1]+list(self.obs_dims)), obs_batch[:, 1:self.T]], axis=1)
# task_path = np.concatenate([task_batch[:, :self.T-1], -1*np.ones([self.batch_size, 1, self.task_dim])], axis=1)
before_obs = obs.reshape([self.batch_size*self.T]+self.obs_dims)
obs = next_obs.reshape([self.batch_size*self.T]+self.obs_dims)
next_obs = far_obs.reshape([self.batch_size*self.T]+self.obs_dims)
task_path = task_path.reshape([self.batch_size*self.T, self.task_dim])
# inds = np.random.choice(range(len(self.obs_data)), self.batch_size)
# obs = []
# next_obs = []
# task_path = []
# for i in inds:
# print i
# next_obs_batch = np.array([self.obs_data[i] for i in inds])[0]
# next_task_batch = np.array([self.task_data[i] for i in inds])[0]
# obs1 = next_obs_batch[:self.T-1].reshape([self.T-1]+list(self.obs_dims))
# obs.append(np.concatenate([obs1, np.zeros([1]+list(self.obs_dims))], 0))
# obs2 = next_obs_batch[1:self.T].reshape([self.T-1]+list(self.obs_dims))
# next_obs.append(np.concatenate([np.zeros([1]+list(self.obs_dims)), obs2], 0))
# task = next_task_batch[:self.T-1].reshape([1, self.task_dim])
# task_path.append(np.concatenate([task, -1*np.ones([1, self.task_dim])], 0))
return self.sess.run(self.loss, feed_dict={self.x_in: obs,
self.offset_in: next_obs,
self.before_offset_in: before_obs,
self.task_in: task_path,
self.training: True,
self.lr: self.cur_lr,})
def get_latents(self, obs):
if len(obs) < self.batch_size*self.T:
s = obs.shape
obs = np.r_[obs, np.zeros((self.batch_size*self.T-s[0], s[1], s[2], s[3]))]
return self.sess.run(self.encode_mu, feed_dict={self.x_in: obs, self.training: True})
def get_next_latents(self, z, task, h=None):
z = np.array(z)
task = np.array(task)
if self.use_recurrent_dynamics:
z = z.reshape((1, 1, LATENT_DIM))
task = task.reshape((1, 1, self.task_dim))
z, h = self.sess.run([self.latent_trans_mu, self.trans_rnn_final_state], feed_dict={self.latent_in: z, self.latent_task_in: task, self.trans_rnn_initial_state: h, self.training: True})
else:
z = self.sess.run(self.latent_trans_mu, feed_dict={self.latent_in: z, self.latent_task_in: task, self.training: True})
h = None
return z.reshape(LATENT_DIM), h
def next_latents_kl_pentalty(self, obs, task):
return self.sess.run(self.conditional_kl_loss, feed_dict={self.x_in: obs, self.task_in: task, self.training: True})
def decode_latent(self, latents):
if len(latents) < self.batch_size*self.T:
s = latents.shape
latents = np.r_[latents, np.zeros((self.batch_size*self.T-s[0], s[1]))]
return self.sess.run(self.decode_mu, feed_dict={self.decoder_in: latents, self.training: True})
def test_decode(self, i=10000, t=3):
o = self.obs_data[i, t].copy()
z = self.get_latents(np.array([o]))
d = self.decode_latent(np.array([z[0]]))
d[d < 0] = 0
d[d > 1] = 1
d = (255*d).astype(np.uint8)
if len(o) < self.batch_size*self.T:
s = o.shape
o = np.r_[[o], np.zeros((self.batch_size*self.T-1, s[0], s[1], s[2]))]
d2 = self.sess.run(self.decode_mu, feed_dict={self.x_in: o, self.training: True})
d2[d2 < 0] = 0
d2[d2 > 1] = 1
d2 = (255.*d2).astype(np.uint8)
return o, d, d2
| 51.892063 | 249 | 0.625413 | 4,669 | 32,692 | 4.143071 | 0.079674 | 0.019644 | 0.035618 | 0.029001 | 0.641956 | 0.559295 | 0.50822 | 0.44634 | 0.417132 | 0.392215 | 0 | 0.015993 | 0.242598 | 32,692 | 629 | 250 | 51.974563 | 0.765236 | 0.227456 | 0 | 0.240506 | 0 | 0.005063 | 0.037025 | 0.003948 | 0 | 0 | 0 | 0 | 0.005063 | 1 | 0.050633 | false | 0.005063 | 0.037975 | 0.005063 | 0.121519 | 0.022785 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa389f109591fd4a273de870b0746fd285d4e8c0 | 1,995 | py | Python | egs/cold/utils_cold.py | jvel07/ast | 600e7cf952ec59ac9cc1bb3170d3da7578e1f384 | [
"BSD-3-Clause"
] | null | null | null | egs/cold/utils_cold.py | jvel07/ast | 600e7cf952ec59ac9cc1bb3170d3da7578e1f384 | [
"BSD-3-Clause"
] | null | null | null | egs/cold/utils_cold.py | jvel07/ast | 600e7cf952ec59ac9cc1bb3170d3da7578e1f384 | [
"BSD-3-Clause"
] | null | null | null | import os
import csv
from os import walk
import pandas as pd
def create_csv_for_ast():
"""
Creates csv file required for the AST pipeline in the form of:
folder,filename,label
hc,006B_feher.wav,3
"""
audio_folder = '/home/egasj/data/audio/cold2'
audio_list = os.listdir(audio_folder)
for_csv = []
df_labels = pd.read_csv("data/labels.csv", dtype=str)
labels = df_labels.label.values
label_map = {'1': 'cold', '0': 'healthy'}
for idx, row in df_labels.iterrows():
lbl_num = row['label']
lbl_cat = label_map[lbl_num]
df_labels['folder'][idx] = lbl_cat
df_labels.to_csv("data/cold_meta_2.csv", index=False)
# for class_id, filename in zip(labels, audio_list):
# for_csv.append([label_map[class_id], filename, class_id])
# for_csv.sort()
# with open("data/cold_meta.csv", "w+") as my_csv:
# csv_writer = csv.writer(my_csv, delimiter=',')
# csv_writer.writerow(['folder', 'filename', 'label'])
# csv_writer.writerows(for_csv)
def create_class_label_idx_csv():
"""
Creates csv file required for the AST pipeline in the form of:
index,mid,display_name
3, /m/21rwj03, hc
"""
audio_folder = '/media/jvel/data/audio/cold2'
audio_list = os.listdir(audio_folder)
for_csv = []
df_labels = pd.read_csv("data/labels.csv", dtype=str)
labels = df_labels.label.values
label_map = {'1': 'cold', '0': 'healthy'}
for class_id, filename in zip(labels, audio_list):
for_csv.append([class_id, '/m/21rwj' + str(class_id).zfill(2), label_map[class_id]])
# for_csv.append([class_label, '/m/21rwj' + str(class_label).zfill(2), folder])
with open("data/cold_class_label_indices.csv", "w+") as my_csv:
csvWriter = csv.writer(my_csv, delimiter=',')
csvWriter.writerow(['index', 'mid', 'display_name'])
csvWriter.writerows(for_csv)
# create_csv_for_ast()
# create_class_label_idx_csv() | 31.666667 | 92 | 0.647619 | 294 | 1,995 | 4.142857 | 0.27551 | 0.039409 | 0.036946 | 0.024631 | 0.477011 | 0.385057 | 0.385057 | 0.385057 | 0.385057 | 0.385057 | 0 | 0.013924 | 0.20802 | 1,995 | 63 | 93 | 31.666667 | 0.756962 | 0.336842 | 0 | 0.344828 | 0 | 0 | 0.163249 | 0.070189 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.137931 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa395d6ef886688eb3608066593248a6c2696e14 | 3,946 | py | Python | src/sql_app/database.py | Wedding-APIs-System/Backend-APi | 5a03be5f36ce8ca7e3abba2d64b63c55752697f3 | [
"MIT"
] | null | null | null | src/sql_app/database.py | Wedding-APIs-System/Backend-APi | 5a03be5f36ce8ca7e3abba2d64b63c55752697f3 | [
"MIT"
] | null | null | null | src/sql_app/database.py | Wedding-APIs-System/Backend-APi | 5a03be5f36ce8ca7e3abba2d64b63c55752697f3 | [
"MIT"
] | null | null | null | import os
import logging
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
logger = logging.getLogger()
def init_connection_engine():
# Database configuration
db_config = {
# [START cloud_sql_mysql_sqlalchemy_limit]
# Pool size is the maximum number of permanent connections to keep.
"pool_size": 5,
# Temporarily exceeds the set pool_size if no connections are available.
"max_overflow": 2,
# The total number of concurrent connections for your application will be
# a total of pool_size and max_overflow.
# [END cloud_sql_mysql_sqlalchemy_limit]
# [START cloud_sql_mysql_sqlalchemy_backoff]
# SQLAlchemy automatically uses delays between failed connection attempts,
# but provides no arguments for configuration.
# [END cloud_sql_mysql_sqlalchemy_backoff]
# [START cloud_sql_mysql_sqlalchemy_timeout]
# 'pool_timeout' is the maximum number of seconds to wait when retrieving a
# new connection from the pool. After the specified amount of time, an
# exception will be thrown.
"pool_timeout": 30, # 30 seconds
# [END cloud_sql_mysql_sqlalchemy_timeout]
# [START cloud_sql_mysql_sqlalchemy_lifetime]
# 'pool_recycle' is the maximum number of seconds a connection can persist.
# Connections that live longer than the specified amount of time will be
# reestablished
"pool_recycle": 1800, # 30 minutes
# [END cloud_sql_mysql_sqlalchemy_lifetime]
}
return init_tcp_connection_engine(db_config)
def init_tcp_connection_engine(db_config):
# [START cloud_sql_mysql_sqlalchemy_create_tcp]
# Remember - storing secrets in plaintext is potentially unsafe. Consider using
# something like https://cloud.google.com/secret-manager/docs/overview to help keep
# secrets secret.
# Google Cloud Sql credentials
# os.environ instead of os.getenv to raise an exception in case that the VAR doesnt't exist
db_user = os.environ["DB_USER"]
db_pass = os.environ["DB_PASS"]
db_name = os.environ["DB_NAME"]
db_host = os.environ["DB_HOST"]
# Extract host and port from db_host
host_args = db_host.split(":")
db_hostname, db_port = host_args[0], int(host_args[1])
pool = sqlalchemy.create_engine(
# Equivalent URL:
# mysql+pymysql://<db_user>:<db_pass>@<db_host>:<db_port>/<db_name>
sqlalchemy.engine.url.URL.create(
drivername="mysql+pymysql",
username=db_user, # e.g. "my-database-user"
password=db_pass, # e.g. "my-database-password"
host=db_hostname, # e.g. "127.0.0.1"
port=db_port, # e.g. 3306
database=db_name, # e.g. "my-database-name"
),
**db_config
)
# [END cloud_sql_mysql_sqlalchemy_create_tcp]
return pool
def create_connection():
'''
This function is created to start the db connection
'''
db = init_connection_engine()
stmt = "CREATE TABLE IF NOT EXISTS guests (user_id INTEGER PRIMARY KEY AUTO_INCREMENT, \
family_id INT NOT NULL, name VARCHAR(150) NOT NULL, \
phone_number VARCHAR(150) NOT NULL, attendance_confirmation BOOLEAN, \
allergies VARCHAR(250), additional_comments VARCHAR(255))"
try:
with db.connect() as conn:
conn.execute(stmt)
except Exception as e:
logger.exception(e)
def orm_connection():
'''
This function is created to start the db connection
'''
engine = init_connection_engine()
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
return SessionLocal, engine
# if __name__=='__main__':
# #create_connection()
# SessionLocal, engine = orm_connection()
| 34.920354 | 95 | 0.678662 | 506 | 3,946 | 5.06917 | 0.369565 | 0.034308 | 0.050682 | 0.089669 | 0.249513 | 0.127485 | 0.069396 | 0.041326 | 0.041326 | 0.041326 | 0 | 0.012024 | 0.241257 | 3,946 | 113 | 96 | 34.920354 | 0.844689 | 0.474404 | 0 | 0 | 0 | 0 | 0.043435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0.040816 | 0.122449 | 0 | 0.265306 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa3977f6f37ce0e007f8c173013076ee9e9974f1 | 3,350 | py | Python | toeicbert/bert_toeic.py | graykode/BERT-TOEIC | da97af28e91e843025c8cfeabddd99ed1c0dbcc8 | [
"MIT"
] | 108 | 2019-04-29T18:27:07.000Z | 2021-12-11T13:19:01.000Z | toeicbert/bert_toeic.py | graykode/BERT-TOEIC | da97af28e91e843025c8cfeabddd99ed1c0dbcc8 | [
"MIT"
] | 5 | 2019-05-09T20:18:33.000Z | 2020-06-15T13:40:12.000Z | toeicbert/bert_toeic.py | graykode/BERT-TOEIC | da97af28e91e843025c8cfeabddd99ed1c0dbcc8 | [
"MIT"
] | 23 | 2019-04-30T01:34:39.000Z | 2021-11-06T19:07:06.000Z | """
reference : https://github.com/huggingface/pytorch-pretrained-BERT/issues/80,
https://www.scribendi.ai/can-we-use-bert-as-a-language-model-to-assign-score-of-a-sentence/
code by Tae Hwan Jung(@graykode)
"""
import re
import json
import torch
from unidecode import unidecode
from argparse import ArgumentParser
from pytorch_pretrained_bert import BertTokenizer,BertForMaskedLM
def to_clean(text):
return unidecode(text.strip())
def show(question, candidates, predict_idx, answer=None):
print('=============================')
print('Question : %s\n' % question)
if answer != None:
print('Real Answer : %s\n' % answer)
print('1) %s 2) %s 3) %s 4) %s\n' %
(candidates[0], candidates[1], candidates[2], candidates[3]))
print("BERT's Answer => [%s]\n" % candidates[predict_idx])
def get_score(model, tokenizer, question_tensors, segment_tensors, masked_index, candidate):
candidate_tokens = tokenizer.tokenize(candidate) # warranty -> ['warrant', '##y']
candidate_ids = tokenizer.convert_tokens_to_ids(candidate_tokens)
predictions = model(question_tensors, segment_tensors)
predictions_candidates = predictions[0, masked_index, candidate_ids].mean()
return predictions_candidates.item()
def solve(row, bertmodel='bert-base-uncased'):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
tokenizer = BertTokenizer.from_pretrained(bertmodel)
model = BertForMaskedLM.from_pretrained(bertmodel).to(device)
model.eval()
question = re.sub('\_+', ' [MASK] ', to_clean(row['question']))
question_tokens = tokenizer.tokenize(question)
masked_index = question_tokens.index('[MASK]')
# make segment which is divided with sentence A or B, but we set all '0' as sentence A
segment_ids = [0] * len(question_tokens)
segment_tensors = torch.tensor([segment_ids]).to(device)
# question tokens convert to ids and tensors
question_ids = tokenizer.convert_tokens_to_ids(question_tokens)
question_tensors = torch.tensor([question_ids]).to(device)
candidates = [to_clean(row['1']), to_clean(row['2']), to_clean(row['3']), to_clean(row['4'])]
predict_tensor = torch.tensor([get_score(model, tokenizer, question_tensors, segment_tensors,
masked_index, candidate) for candidate in candidates])
predict_idx = torch.argmax(predict_tensor).item()
if 'answer' in row:
show(row['question'], candidates, predict_idx, row['answer'])
else:
show(row['question'], candidates, predict_idx, None)
def main():
parser = ArgumentParser()
parser.add_argument("-m", '--model' , type=str, required=False, default='bert-base-uncased',
choices=["bert-base-uncased", "bert-large-uncased",
"bert-base-cased", "bert-large-cased"])
parser.add_argument("-f", '--file', type=str, required=True)
args = parser.parse_args()
with open(args.file) as data_file:
file = json.load(data_file)
for (key, row) in file.items():
if 'question' in row and '1' in row and '2' in row and '3' in row and '4' in row:
solve(row, args.model)
else:
print('key of %s : No required options.' % key)
continue | 40.853659 | 107 | 0.661194 | 430 | 3,350 | 5.011628 | 0.313953 | 0.01949 | 0.046404 | 0.038979 | 0.126218 | 0.126218 | 0.065893 | 0.065893 | 0.065893 | 0.065893 | 0 | 0.007798 | 0.196119 | 3,350 | 82 | 108 | 40.853659 | 0.792425 | 0.111045 | 0 | 0.034483 | 0 | 0 | 0.113367 | 0.009814 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086207 | false | 0 | 0.103448 | 0.017241 | 0.224138 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa3bd882152529b2762687cd8d9160289ff2238d | 7,965 | py | Python | online_gp/models/online_ski_regression.py | wjmaddox/online_gp | 3bff4c347263a9b8b1f0aa801a986f4aaa019a66 | [
"Apache-2.0"
] | 31 | 2021-03-05T00:51:34.000Z | 2022-02-07T09:52:20.000Z | online_gp/models/online_ski_regression.py | wjmaddox/online_gp | 3bff4c347263a9b8b1f0aa801a986f4aaa019a66 | [
"Apache-2.0"
] | 1 | 2021-11-24T07:18:28.000Z | 2021-11-24T12:07:20.000Z | online_gp/models/online_ski_regression.py | wjmaddox/online_gp | 3bff4c347263a9b8b1f0aa801a986f4aaa019a66 | [
"Apache-2.0"
] | 1 | 2021-05-19T19:12:36.000Z | 2021-05-19T19:12:36.000Z | import torch
import gpytorch
from gpytorch.mlls import ExactMarginalLogLikelihood
from online_gp.mlls.batched_woodbury_marginal_log_likelihood import BatchedWoodburyMarginalLogLikelihood
from online_gp.models.batched_fixed_noise_online_gp import FixedNoiseOnlineSKIGP
from gpytorch.kernels import RBFKernel, ScaleKernel
from online_gp.models.stems import Identity
from online_gp.mlls.streaming_partial_mll import sm_partial_mll
from online_gp.utils import regression
from torch.optim.lr_scheduler import CosineAnnealingLR
from online_gp.settings import detach_interp_coeff
class OnlineSKIRegression(torch.nn.Module):
def __init__(self, stem, init_x, init_y, lr, grid_size, grid_bound, covar_module=None, **kwargs):
super().__init__()
self.stem = stem.to(init_x.device)
assert init_y.ndim == 2, "targets must have explicit output dimension"
if init_y.size(-1) == 1:
target_batch_shape = []
else:
target_batch_shape = torch.Size([init_y.size(-1)])
features = self.stem(init_x).detach()
noise_term = torch.ones_like(init_y)
grid_bound += 1e-1
self.gp = FixedNoiseOnlineSKIGP(
features,
init_y,
noise_term,
covar_module=covar_module,
grid_bounds=torch.tensor([[-grid_bound, grid_bound]] * stem.output_dim),
grid_size=[grid_size] * stem.output_dim,
learn_additional_noise=True
)
self.mll = BatchedWoodburyMarginalLogLikelihood(self.gp.likelihood, self.gp)
self.gp_optimizer = torch.optim.Adam(self.gp.parameters(), lr=lr)
self.stem_optimizer = torch.optim.Adam(self.stem.parameters(), lr=lr)
self._target_batch_shape = target_batch_shape
self.target_dim = init_y.size(-1)
self._raw_inputs = [init_x]
def forward(self, inputs):
inputs = inputs.view(-1, self.stem.input_dim)
features = self.stem(inputs)
return self.gp(features)
def _reshape_targets(self, targets):
targets = targets.view(-1, self.target_dim)
if targets.size(-1) == 1:
targets = targets.squeeze(-1)
else:
targets = targets.t()
return targets
def predict(self, inputs):
self.eval()
pred_dist = self(inputs)
pred_mean = pred_dist.mean.view(-1, self.target_dim)
pred_var = pred_dist.variance.view(-1, self.target_dim)
pred_var = pred_var + self.gp.likelihood.second_noise
return pred_mean, pred_var
def evaluate(self, inputs, targets):
inputs = inputs.view(-1, self.stem.input_dim)
targets = targets.view(-1, self.target_dim)
dataset = torch.utils.data.TensorDataset(inputs, targets)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1024)
# Don't use `torch.no_grad` here, caches will be used for training
self.eval()
rmse, nll = 0, 0
num_batches = len(dataloader)
for input_batch, target_batch in dataloader:
pred_mean, pred_var = self.predict(input_batch)
rmse += (pred_mean - target_batch).pow(2).mean().sqrt().item() / num_batches
diag_dist = torch.distributions.Normal(pred_mean, pred_var.sqrt())
nll += -diag_dist.log_prob(target_batch).mean().item() / num_batches
return rmse, nll
def fit(self, inputs, targets, num_epochs, test_dataset=None):
records = []
gp_lr_sched = CosineAnnealingLR(self.gp_optimizer, num_epochs, 1e-4)
stem_lr_sched = CosineAnnealingLR(self.stem_optimizer, num_epochs, 1e-4)
features = self._refresh_features(inputs, targets)
for epoch in range(num_epochs):
self.train()
self.mll.train()
self.stem_optimizer.zero_grad()
self.gp_optimizer.zero_grad()
train_dist = self.gp(features)
loss = -self.mll(train_dist, targets).sum()
loss.backward()
self.stem_optimizer.step()
self.gp_optimizer.step()
stem_lr_sched.step()
gp_lr_sched.step()
features = self._refresh_features(inputs, targets)
rmse = nll = float('NaN')
if test_dataset is not None:
test_x, test_y = test_dataset[:]
rmse, nll = self.evaluate(test_x, test_y)
records.append({'epoch': epoch + 1, 'train_loss': loss.item(),
'test_rmse': rmse, 'test_nll': nll,
'noise': self.gp.likelihood.second_noise_covar.noise.mean().item()})
with detach_interp_coeff(True):
self._refresh_features(inputs, targets)
self.eval()
return records
def update(self, inputs, targets, update_stem=True, update_gp=True):
inputs = inputs.view(-1, self.stem.input_dim)
targets = targets.view(-1, self.target_dim)
stem_loss = self._update_stem(inputs, targets) if update_stem else 0.
gp_loss = self._update_gp(inputs, targets) if update_gp else 0.
with torch.no_grad():
features = self.stem(inputs)
noise_term = torch.ones_like(targets)
self.gp.condition_on_observations(features, targets, noise_term, inplace=True)
self._raw_inputs = [torch.cat([*self._raw_inputs, inputs])]
self.stem.train()
if update_stem:
self._get_features(inputs)
self.eval()
return stem_loss, gp_loss
def _update_gp(self, inputs, targets):
self.gp_optimizer.zero_grad()
self.gp.train()
self.mll.train()
with gpytorch.settings.skip_logdet_forward(True):
features = self.stem(inputs)
train_dist = self.gp(features.detach())
loss = -self.mll(train_dist, targets).sum()
loss.backward()
self.gp_optimizer.step()
self.gp.zero_grad()
self.gp.eval()
return loss.item()
def _update_stem(self, inputs, targets):
self.stem_optimizer.zero_grad()
num_seen = self.gp.num_data
self.stem.eval() # we want deterministic features, so BatchNorm should be in eval mode
new_features = self.stem(inputs)
if new_features.requires_grad is False:
return 0
targets = targets.transpose(-1, -2).unsqueeze(-1)
loss = -sm_partial_mll(self.gp, new_features, targets, num_seen).sum()
loss.backward()
self.stem_optimizer.step()
return loss.item()
def _get_features(self, inputs):
# update batch norm stats
inputs = inputs.view(-1, self.stem.input_dim)
num_inputs = inputs.size(0)
num_seen = self._raw_inputs[0].size(0)
batch_size = 1024
batch_idxs = torch.randint(0, num_seen, (batch_size,))
input_batch = self._raw_inputs[0][batch_idxs]
input_batch = torch.cat([inputs, input_batch])
features = self.stem(input_batch)
return features[:num_inputs]
def _refresh_features(self, inputs, targets):
features = self.stem(inputs)
self.set_train_data(features, targets)
self.gp.zero_grad() # dump W-related caches
# self.gp.init_kernel_cache() # refresh W'y
return features
def set_train_data(self, inputs, targets):
noise = torch.ones_like(targets)
self.gp.set_train_data(inputs, targets, noise)
def set_lr(self, gp_lr, stem_lr=None, bn_mom=None):
stem_lr = gp_lr if stem_lr is None else stem_lr
self.gp_optimizer = torch.optim.Adam(self.gp.parameters(), lr=gp_lr)
self.stem_optimizer = torch.optim.Adam(self.stem.parameters(), lr=stem_lr)
if bn_mom is not None:
for m in self.stem.modules():
if isinstance(m, torch.nn.BatchNorm1d):
m.momentum = bn_mom
@property
def noise(self):
return self.gp.likelihood.noise | 40.227273 | 104 | 0.637288 | 1,027 | 7,965 | 4.703019 | 0.1889 | 0.034783 | 0.01677 | 0.022774 | 0.221532 | 0.157971 | 0.130642 | 0.113043 | 0.087371 | 0.087371 | 0 | 0.007958 | 0.258506 | 7,965 | 198 | 105 | 40.227273 | 0.809854 | 0.027621 | 0 | 0.22619 | 0 | 0 | 0.010725 | 0 | 0 | 0 | 0 | 0 | 0.005952 | 1 | 0.083333 | false | 0 | 0.065476 | 0.005952 | 0.22619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa3fa05f92e375e420e83c370cd6b301c00874c3 | 4,033 | py | Python | parasol/util/logging.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 66 | 2019-01-07T23:59:26.000Z | 2021-12-29T16:51:56.000Z | parasol/util/logging.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 8 | 2019-01-09T01:35:54.000Z | 2021-08-23T20:05:03.000Z | parasol/util/logging.py | snasiriany/parasol | 88b99704676fb1253b8bc6402665a3174a00072d | [
"MIT"
] | 21 | 2019-03-26T01:02:33.000Z | 2022-01-26T20:34:34.000Z | import os
from path import Path
import tensorflow as tf
import tempfile
import contextlib
import sys
from abc import ABCMeta, abstractmethod
gfile = tf.gfile
__all__ = ['tee_out']
class Tee:
def __init__(self, original, target):
self.original = original
self.target = target
def write(self, b):
self.original.write(b)
self.target.write(b)
@contextlib.contextmanager
def tee_out(out_dir):
out_dir = Path(out_dir)
stdout = tempfile.NamedTemporaryFile(delete=False)
old_stdout = sys.stdout
old_stderr = sys.stderr
stderr = tempfile.NamedTemporaryFile(delete=False)
try:
with StdoutTee(stdout.name, buff=1) as out, StderrTee(stderr.name, buff=1) as err:
yield
except:
raise
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
with gfile.GFile(out_dir / 'stdout.log', 'w') as fp:
with gfile.GFile(stdout.name, 'r') as out:
fp.write(out.read())
with gfile.GFile(out_dir / 'stderr.log', 'w') as fp:
with gfile.GFile(stderr.name, 'r') as err:
fp.write(err.read())
os.remove(stdout.name)
os.remove(stderr.name)
class Tee(object):
"""
duplicates streams to a file.
credits : http://stackoverflow.com/q/616645
"""
def __init__(self, filename, mode="a", buff=0, file_filters=None, stream_filters=None):
"""
writes both to stream and to file.
file_filters is a list of callables that processes a string just before being written
to the file.
stream_filters is a list of callables that processes a string just before being written
to the stream.
both stream & filefilters must return a string or None.
"""
self.filename = filename
self.mode = mode
self.buff = buff
self.file_filters = file_filters or []
self.stream_filters = stream_filters or []
self.stream = None
self.fp = None
@abstractmethod
def set_stream(self, stream):
"""
assigns "stream" to some global variable e.g. sys.stdout
"""
pass
@abstractmethod
def get_stream(self):
"""
returns the original stream e.g. sys.stdout
"""
pass
def write(self, message):
stream_message = message
for f in self.stream_filters:
stream_message = f(stream_message)
if stream_message is None:
break
file_message = message
for f in self.file_filters:
file_message = f(file_message)
if file_message is None:
break
if self.stream and stream_message is not None:
self.stream.write(stream_message)
if self.fp and file_message is not None:
self.fp.write(file_message)
def flush(self):
if self.stream:
self.stream.flush()
if self.fp:
self.fp.flush()
os.fsync(self.fp.fileno())
def __enter__(self):
self.stream = self.get_stream()
self.fp = open(self.filename, self.mode, self.buff)
self.set_stream(self)
def __exit__(self, *args):
self.close()
def __del__(self):
self.close()
def read(self):
pass
def close(self):
if self.stream != None:
self.set_stream(self.stream)
self.stream = None
if self.fp != None:
self.fp.close()
self.fp = None
def isatty(self):
return self.stream.isatty()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.filename)
__str__ = __repr__
__unicode__ = __repr__
class StdoutTee(Tee):
def set_stream(self, stream):
sys.stdout = stream
def get_stream(self):
return sys.stdout
class StderrTee(Tee):
def set_stream(self, stream):
sys.stderr = stream
def get_stream(self):
return sys.stderr
| 25.852564 | 95 | 0.595338 | 511 | 4,033 | 4.514677 | 0.232877 | 0.06502 | 0.041612 | 0.032943 | 0.210663 | 0.153446 | 0.13264 | 0.062419 | 0.062419 | 0.062419 | 0 | 0.003234 | 0.309943 | 4,033 | 155 | 96 | 26.019355 | 0.825728 | 0.115795 | 0 | 0.175926 | 0 | 0 | 0.011638 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.175926 | false | 0.027778 | 0.064815 | 0.037037 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa3fad46de697055a92d5f620e657beb35b10f4b | 14,674 | py | Python | qubiter/adv_applications/StairsCkt_writer.py | yourball/qubiter | 5ef0ea064fa8c9f125f7951a01fbb88504a054a5 | [
"Apache-2.0"
] | 3 | 2019-10-03T04:27:36.000Z | 2021-02-13T17:49:34.000Z | qubiter/adv_applications/StairsCkt_writer.py | yourball/qubiter | 5ef0ea064fa8c9f125f7951a01fbb88504a054a5 | [
"Apache-2.0"
] | null | null | null | qubiter/adv_applications/StairsCkt_writer.py | yourball/qubiter | 5ef0ea064fa8c9f125f7951a01fbb88504a054a5 | [
"Apache-2.0"
] | 2 | 2020-10-07T15:22:19.000Z | 2021-06-07T04:59:58.000Z | from qubiter.SEO_writer import *
import itertools as it
import pprint as pp
import collections as col
class StairsCkt_writer(SEO_writer):
"""
This class is a subclass of class SEO_writer and it writes a "Stairs
Circuit". For example, this is what the Picture file of a Stairs Circuit
looks like for num_bits = 3
U | |
O---U |
@---U |
O---O---U
O---@---U
@---O---U
@---@---U
Here, U is a general U(2) matrix with 4 parameters, all of which can be
made into placeholder variables. If each U is represented by a node and
the controls of each U represent its parents, then this quantum circuit
can be represented by a fully connected Quantum Bayesian Network (QB
net). (See my >10 year old blog called "Quantum Bayesian Networks" for
more info than you would ever want to know about QB nets).
This class can also be asked to construct a QB net that is **not** fully
connected, by limiting the number of controls for a given U to fewer
than all the ones to its left. For example, suppose that in the
num_bits=3 case, we restrict the parents of the U in the last step to
just one, instead of the 2 parents that it has in the fully connected
case. Then we get
U | |
O---U |
@---U |
O---+---U
@---+---U
or
U | |
O---U |
@---U |
| O---U
| @---U
The constructor of this class has as input an ordered dictionary called
gate_str_to_rads_list. This dictionary gives for each gate in the
quantum circuit, a gate string gate_str that specifies the gate.
gate_str_to_rads_list maps gate_str to a list of 4 floats (or
placeholder variables for those floats) for the 4 parameters of the U
matrix. For example, here are possible values for gate_str_to_rads_list
for the num_bits=3 fully connected qb net
with every rads_list item filled with the same constant .3
{'prior': [0.3, 0.3, 0.3, 0.3],
'2F': [0.3, 0.3, 0.3, 0.3],
'2T': [0.3, 0.3, 0.3, 0.3],
'2F1F': [0.3, 0.3, 0.3, 0.3],
'2F1T': [0.3, 0.3, 0.3, 0.3],
'2T1F': [0.3, 0.3, 0.3, 0.3],
'2T1T': [0.3, 0.3, 0.3, 0.3]}
with every rads_list item filled by a random number between 0 and 2pi
{'prior': [0.46731839721496604,
0.012285135138256131,
0.20001353832948487,
0.36694428209569985],
'2F': [4.1968011007222898,
5.1978252498063808,
4.8063090848060321,
4.2509081392354409],
'2T': [4.3359074640905213,
2.0749617893052315,
4.555666727197961,
5.3092010293653802],
'2F1F': [0.99177045463186475,
3.3344615340103325,
2.1441702948866386,
2.4603764283165521],
'2F1T': [4.0909522483111145,
2.0714182784661888,
5.4034187072431923,
6.0856723571386766],
'2T1F': [4.0000452017061194,
3.7193341571216658,
3.381322125034953,
5.4492142181489802],
'2T1T': [6.2597553541046853,
0.077807529496169509,
3.7389318319862217,
6.2233264819972307]}
with every rads_list item filled by a unique placeholder variable string
{'prior': ['#50', '#51', '#52', '#53'],
'2F': ['#500', '#501', '#502', '#503'],
'2T': ['#510', '#511', '#512', '#513'],
'2F1F': ['#5000', '#5001', '#5002', '#5003'],
'2F1T': ['#5010', '#5011', '#5012', '#5013'],
'2T1F': ['#5100', '#5101', '#5102', '#5103'],
'2T1T': ['#5110', '#5111', '#5112', '#5113']}
This is what gate_str_to_rads_list looks like in the num_bits=3 case,
when the last U has only one parent (qbit 2) instead of two parents (
qbits 1 and 2):
{'prior': ['#50', '#51', '#52', '#53'],
'2F': ['#500', '#501', '#502', '#503'],
'2T': ['#510', '#511', '#512', '#513'],
'2F1_': ['#5050', '#5051', '#5052', '#5053'],
'2T1_': ['#5150', '#5151', '#5152', '#5153']}
Note that all placeholder strings begin with '#5' to insure that once
the hash character is removed, the remaining number doesn't start with
'0'. Note that characters '_' and '5' represent bits whose values are
unspecified.
Attributes
----------
gate_str_to_rads_list : OrderedDict[str, list[float|str]]
"""
def __init__(self, gate_str_to_rads_list, file_prefix, emb, **kwargs):
"""
Constructor
This constructor writes English and Picture files but it doesn't
close those files after writing them. You must do that yourself
using close_files().
Parameters
----------
gate_str_to_rads_list : dict[str, list[float|str]]
file_prefix : str
file prefix for English and Picture files written by this class
emb : CktEmbedder
kwargs : dict
key-word arguments of SEO_writer
Returns
-------
"""
SEO_writer.__init__(self, file_prefix, emb, **kwargs)
self.gate_str_to_rads_list = gate_str_to_rads_list
self.write()
@staticmethod
def get_gate_str_to_rads_list(num_bits, fill_type, rads_const=None,
u2_bit_to_higher_bits=None):
"""
This method returns a gate_str_to_rads_list constructed according to
the specs given by its arguments.
fill_type is a string in ['const', 'rand', '#int'] The 3 types of
fill_type have already been illustrated in the class docstring. If
the fill_type is 'const', then the method expects a float for
rads_const.
u2_bit_to_higher_bits is used to restrict the controls of each U.
For example, for num_bits=3,
u2_bit_to_higher_bits = {0: [1, 2], 1: [2], 2: []}
specifies a fully connected qb net, whereas
u2_bit_to_higher_bits = {0: [2], 1: [2], 2: []}
means qubit 0 has qubit 2 but not 1 as parent.
Parameters
----------
num_bits : int
fill_type : str
either 'const', 'rand' or '#int'
rads_const : float | None
u2_bit_to_higher_bits : dict[int, list[int]]
Returns
-------
OrderedDict[str, list[float]]
"""
# each "step" may have several gate_str of same length
const_list = [rads_const for k in range(4)]
gate_str_to_rads_list = col.OrderedDict()
if fill_type == 'const':
assert rads_const is not None
gate_str_to_rads_list['prior'] = const_list
elif fill_type == 'rand':
rand_list = list(np.random.random_sample((4,)))
gate_str_to_rads_list['prior'] = rand_list
elif fill_type == '#int':
# all placeholder variables will start
# with 5 to avoid starting with 0
gate_str_to_rads_list['prior'] = ['#50', '#51', '#52', '#53']
else:
assert False, 'unsupported fill type'
pair = ['F', 'T']
singlet = ['_']
for tup_len in range(1, num_bits):
u2_pos = num_bits - tup_len - 1
pa_range = range(u2_pos+1, num_bits)
parent_to_list = {k: pair for k in pa_range}
if u2_bit_to_higher_bits:
parent_to_list = {k: singlet for k in pa_range}
for pa_bit in u2_bit_to_higher_bits[u2_pos]:
assert u2_pos < pa_bit < num_bits
parent_to_list[pa_bit] = pair
list_of_lists = [parent_to_list[k] for k in reversed(pa_range)]
# print("mmmnnnnnn", list_of_lists)
for tuple_of_FTs in it.product(*list_of_lists):
s = ''
for k in range(tup_len):
s += str(num_bits - 1 - k) + tuple_of_FTs[k]
if fill_type == 'const':
gate_str_to_rads_list[s] = const_list
elif fill_type == 'rand':
rand_list = list(2*np.pi*np.random.random_sample((4,)))
gate_str_to_rads_list[s] = rand_list
elif fill_type == '#int':
hash_str = '#5'
for k in range(tup_len):
x = tuple_of_FTs[k]
if x == '_':
hash_str += '5'
elif x == 'F':
hash_str += '0'
elif x == 'T':
hash_str += '1'
gate_str_to_rads_list[s] =\
[hash_str + str(k) for k in range(4)]
else:
assert False
return gate_str_to_rads_list
@staticmethod
def get_all_var_nums(gate_str_to_rads_list):
"""
This method scans each rads_list of gate_str_to_rads_list for items
of the type '#x' where x is an int. Every int x is added to a list
all_var_nums which is returned.
Parameters
----------
gate_str_to_rads_list : OrderedDict[str, list[float|str]]
Returns
-------
list[int]
"""
all_var_nums = []
for rads_list in gate_str_to_rads_list.values():
for rads in rads_list:
if isinstance(rads, str) and rads[0] == '#':
all_var_nums.append(int(rads[1:]))
return all_var_nums
@staticmethod
def get_var_num_to_rads(gate_str_to_rads_list, fill_type,
rads_const=None):
"""
This method returns a dict var_num_to_rads obtained as follows. The
rads lists in gate_str_to_rads_list are scanned for items of the
type '#x' where x is an int. Then x is mapped to a float, either the
constant rads_const if fill_type is 'const', or a random number if
fill type is 'rand'.
Parameters
----------
gate_str_to_rads_list : OrderedDict[str, list[float|str]]
fill_type : str
rads_const : float | None
Returns
-------
dict[int, float]
"""
var_num_to_rads = {}
for rads_list in gate_str_to_rads_list.values():
for rads in rads_list:
if isinstance(rads, str) and rads[0] == '#':
if fill_type == 'const':
assert rads_const is not None
var_num_to_rads[int(rads[1:])] = rads_const
elif fill_type == 'rand':
var_num_to_rads[int(rads[1:])] =\
2*np.pi*np.random.random()
else:
assert False, 'unsupported fill type'
return var_num_to_rads
@staticmethod
def make_array_from_gate_str_to_rads_list(gate_str_to_rads_list):
"""
This method returns a numpy array which is constructed from
gate_str_to_rads_list by vertically stacking (with np.vstack()) its
rads_lists
Parameters
----------
gate_str_to_rads_list : dict[str, list[float|str]]
Returns
-------
np.ndarray
"""
return np.vstack([np.array(rads_list) for rads_list in
gate_str_to_rads_list.values()])
def get_u2_pos(self, gate_str):
"""
Given a well formed gate_str (one of the keys of
gate_str_to_rads_list), this method returns the bit position of the
U(2) matrix.
Parameters
----------
gate_str : str
Returns
-------
int
"""
num_bits = self.emb.num_bits_bef
if gate_str != 'prior':
u2_pos = num_bits - len(gate_str) // 2 - 1
else:
u2_pos = num_bits-1
return u2_pos
@staticmethod
def get_controls_from_gate_str(num_bits, gate_str):
"""
This method returns an object of class Controls, constructed from
info in the input `gate_str` (a well formed key of
gate_str_to_rads_lis)
Parameters
----------
num_bits : int
gate_str : str
Returns
-------
Controls
"""
trols = Controls(num_bits)
if gate_str != 'prior':
for k in range(len(gate_str)//2):
trol_pos = int(gate_str[2 * k])
trol_kind = gate_str[2 * k + 1]
# allow for possibility that trol_kind = '_' (no trol)
if trol_kind == 'T':
trols.bit_pos_to_kind[trol_pos] = True
elif trol_kind == 'F':
trols.bit_pos_to_kind[trol_pos] = False
trols.refresh_lists()
return trols
def write(self):
"""
This method writes English and Picture files for a Stairs Circuit.
Returns
-------
"""
num_bits = self.emb.num_bits_bef
for gate_str, rads_list in self.gate_str_to_rads_list.items():
num_bits = self.emb.num_bits_bef
trols = StairsCkt_writer.get_controls_from_gate_str(
num_bits, gate_str)
u2_pos = self.get_u2_pos(gate_str)
self.write_controlled_one_bit_gate(u2_pos, trols,
OneBitGates.u2, rads_list)
if __name__ == "__main__":
def main():
num_bits = 3
for fill_type in ['const', 'rand', '#int']:
di = StairsCkt_writer.get_gate_str_to_rads_list(
num_bits, fill_type, rads_const=.3)
pp.pprint(di)
u2_bit_to_higher_bits = {0: [2], 1: [2], 2: []}
di = StairsCkt_writer.get_gate_str_to_rads_list(
num_bits, "#int", u2_bit_to_higher_bits=u2_bit_to_higher_bits)
pp.pprint(di)
vn_to_r = StairsCkt_writer.get_var_num_to_rads(di,
fill_type='const',
rads_const=.3)
pp.pprint(vn_to_r)
arr = StairsCkt_writer.make_array_from_gate_str_to_rads_list(di)
print("arr=\n", arr)
num_bits = 4
gate_str_to_rads_list = StairsCkt_writer.get_gate_str_to_rads_list(
num_bits, '#int')
file_prefix = 'stairs_writer_test'
emb = CktEmbedder(num_bits, num_bits)
wr = StairsCkt_writer(gate_str_to_rads_list, file_prefix, emb)
wr.close_files()
wr.print_eng_file()
wr.print_pic_file()
main()
| 34.690307 | 78 | 0.54784 | 1,952 | 14,674 | 3.872439 | 0.190574 | 0.056489 | 0.050007 | 0.070512 | 0.364863 | 0.296997 | 0.219738 | 0.193147 | 0.157428 | 0.138114 | 0 | 0.087428 | 0.346804 | 14,674 | 422 | 79 | 34.772512 | 0.7012 | 0.462859 | 0 | 0.263889 | 0 | 0 | 0.028432 | 0 | 0 | 0 | 0 | 0 | 0.041667 | 1 | 0.0625 | false | 0 | 0.027778 | 0 | 0.138889 | 0.048611 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa400db121cb6c6f95ccff51526ffebbb7b28f58 | 906 | py | Python | app/config.py | dmitrypol/flask_template | 11a6da0d6a73dde315b304cf9bc55f3f24039db2 | [
"MIT"
] | null | null | null | app/config.py | dmitrypol/flask_template | 11a6da0d6a73dde315b304cf9bc55f3f24039db2 | [
"MIT"
] | null | null | null | app/config.py | dmitrypol/flask_template | 11a6da0d6a73dde315b304cf9bc55f3f24039db2 | [
"MIT"
] | null | null | null | ''' config settings '''
import os
from logging.config import dictConfig
APP_NAME = os.environ.get('app_name')
HOME_DIR = os.environ.get('home_dir')
LOGS_DIR = f'{HOME_DIR}logs/'
TMP_DIR = f'{HOME_DIR}tmp/'
APP_ENV = os.environ.get('APP_ENV', 'dev')
SECRET_KEY = 'foobar'
if APP_ENV == 'test':
pass
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '{timestamp:%(asctime)s, level:%(levelname)s, module:%(module)s, lineno:%(lineno)d, %(message)s}',
}},
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.handlers.TimedRotatingFileHandler',
'formatter': 'default',
'filename': f'{LOGS_DIR}{APP_NAME}-{APP_ENV}.log',
'when': 'D',
'interval': 1,
'backupCount': 7
}
},
'root': {
'level': 'INFO',
'handlers': ['file']
}
})
| 25.166667 | 116 | 0.537528 | 101 | 906 | 4.673267 | 0.504951 | 0.059322 | 0.076271 | 0.063559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004518 | 0.267108 | 906 | 35 | 117 | 25.885714 | 0.706325 | 0.016556 | 0 | 0.064516 | 0 | 0.032258 | 0.412231 | 0.110985 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.032258 | 0.064516 | 0 | 0.064516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa40a010af57c5955822612ea1119571c07785d6 | 780 | py | Python | demography/models/census_estimate.py | The-Politico/politico-civic-demography | 080bb964b64b06db7fd04386530e893ceed1cf98 | [
"MIT"
] | null | null | null | demography/models/census_estimate.py | The-Politico/politico-civic-demography | 080bb964b64b06db7fd04386530e893ceed1cf98 | [
"MIT"
] | null | null | null | demography/models/census_estimate.py | The-Politico/politico-civic-demography | 080bb964b64b06db7fd04386530e893ceed1cf98 | [
"MIT"
] | null | null | null | from django.db import models
from geography.models import Division
class CensusEstimate(models.Model):
"""
Individual census series estimates.
"""
division = models.ForeignKey(
Division,
on_delete=models.CASCADE,
related_name='census_estimates'
)
variable = models.ForeignKey(
'CensusVariable',
on_delete=models.CASCADE,
related_name='estimates'
)
estimate = models.FloatField()
@property
def full_code(self):
return '{}_{}'.format(
self.variable.table.code,
self.variable.code
)
def __str__(self):
return '{} {}_{}'.format(
self.division.code,
self.variable.table.code,
self.variable.code
)
| 22.941176 | 39 | 0.588462 | 73 | 780 | 6.123288 | 0.452055 | 0.071588 | 0.107383 | 0.09396 | 0.308725 | 0.308725 | 0.165548 | 0 | 0 | 0 | 0 | 0 | 0.305128 | 780 | 33 | 40 | 23.636364 | 0.824723 | 0.044872 | 0 | 0.230769 | 0 | 0 | 0.071331 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0.076923 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa412488d2e830eb6fa26a4b4d7acb934e19908d | 12,753 | py | Python | PythonPlotting/AcquisitionScript.py | DCHartlen/WindEnergyProject | 5921cbe99e9c744014e41e6dc628c1f7aa2f3b6f | [
"MIT"
] | null | null | null | PythonPlotting/AcquisitionScript.py | DCHartlen/WindEnergyProject | 5921cbe99e9c744014e41e6dc628c1f7aa2f3b6f | [
"MIT"
] | null | null | null | PythonPlotting/AcquisitionScript.py | DCHartlen/WindEnergyProject | 5921cbe99e9c744014e41e6dc628c1f7aa2f3b6f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Python script which handles GUI interaction, data acquisition, filtering
and processing. Generally compiled to a single executable for simpler
distrubution.
Created By: D.C. Hartlen, EIT
Created On: 16-JUN-2018
Modified By:
Modified On:
Requires: PoltDataGUI.py (contains all Qt objects and layout)
"""
import PlotDataGUI # This imports py script containing all GUI elements
from PyQt5 import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
import sys
import numpy as np
import time
import serial
import serial.tools.list_ports
class PlottingApp(QtGui.QMainWindow, PlotDataGUI.Ui_MainWindow):
"""Define class which handles all GUI interaction"""
# Define class specific, shared variables
firstRunFlag = True
runningFlag = False
# Qt specific input to allor for updating
updateTimer = QtCore.QTimer()
dataArray = np.zeros(1) # Input data array
currentMaxVolts = 0 # Tracks max voltage
stationaryBeforeScroll = 500 # Number of data points visable on screen
iTicker = 0 # Iteration counter. Used to fill array before scrolling starts
# Set a default com port (the last one). Adjustable via dialog box
availablePorts = serial.tools.list_ports.comports()
nPorts = len(availablePorts)
arduinoPort = availablePorts[-1].device
# Low pass filter coef. Adjustable via dialog box
beta = 0.150
#------------------------------------------------------------------------------
def __init__(self, parent=None):
"""Acquisition constructor. Used to define buttons and setup plot"""
# Change pyqtgraph to have white backgound
pg.setConfigOption('background', 'w')
# Call constructor in GUI script as chile
super(PlottingApp, self).__init__(parent)
# Setup the GUI
self.setupUi(self)
# Define information about the plot window specifically
self.mainPlotWindow.plotItem.showGrid(True, True, 0.7)
self.mainPlotWindow.setRange(xRange=[0, self.stationaryBeforeScroll], yRange=[0,0.8])
self.mainPlotWindow.setLabels(left = 'Voltage (V)',bottom = 'Time')
# Define a maximum voltage reached line
self.maxVoltsLine = pg.InfiniteLine(angle=0)
self.maxVoltsLine.setPen(color="FFA500", width=2)
self.mainPlotWindow.addItem(self.maxVoltsLine)
# Define the voltage curve to be plotted (in front of max voltage)
self.voltageCurve = self.mainPlotWindow.plot()
self.voltageCurve.setPen('b',width=2)
# Print to status bar
self.statusbar.showMessage('Ready to go!')
# Define action for when start recording is clicked
self.startPlotting.clicked.connect(self.InitializeRun)
# Define action for the stop recording button
self.stopPlotting.clicked.connect(self.StopTicker)
# Define action for the reset button.
self.resetPlots.clicked.connect(self.resetUI)
# Define a timer object to run method Update Ticker every timeout
self.updateTimer.timeout.connect(self.UpdatePlot)
# Initialize the max voltage
self.maxVoltsOut.insert("%0.3f" % self.currentMaxVolts)
# Define an message box to open when about/information in menu bar is selected
self.actionAbout.triggered.connect(self.AboutMessage)
# Define a message box for help
self.actionHelp.triggered.connect(self.HelpMessages)
# Define a dialog box to select the appropriate COM Port
self.actionSelectCOMPort.triggered.connect(self.dialogSelectPort)
# Define a dialog box to change filter parameters
self.actionSetFilterCoef.triggered.connect(self.dialogFilterParams)
#------------------------------------------------------------------------------
def InitializeRun(self):
"""Define write data to file"""
# Things to be completed during the first activation
if self.firstRunFlag == True:
# Check the comport for functional arduino
self.statusbar.showMessage("Checking COM port for arduino...")
# Connect to com port specified by user. Reads one peice of data to make sure
# it works. If not, will return error message without stopping program.
try:
self.arduinoInput = serial.Serial(self.arduinoPort, 9600, timeout=5)
# Read from serial port. This is encoded in bytes
dataIn = self.arduinoInput.readline()
# Decode the byte
dataIn = float(dataIn[0:len(dataIn)-2].decode("utf-8"))
except:
# If there is an exception, return without starting collection
self.statusbar.showMessage("Connection Failed. Check Port and Arduino.")
return()
self.statusbar.showMessage('Executing First Run Tasks') # Insitu debug
# Change the first run flag to false
self.firstRunFlag = False
self.runningFlag = True
# Disable start button
self.startPlotting.setEnabled(False)
# Start timer for updating the plot
self.updateTimer.start(5)
# flush serial inputs so far
self.arduinoInput.flushInput()
else:
# Debug message
return()
#------------------------------------------------------------------------------
def StopTicker(self):
""" Stops the ticker plot, closes output file."""
if self.runningFlag == True:
self.runningFlag = False
self.updateTimer.stop()
self.stopPlotting.setEnabled(False)
# close com port to arduino
self.arduinoInput.close()
self.statusbar.showMessage('Plotting Stopped. Awaiting Reset')
else:
return()
#------------------------------------------------------------------------------
def resetUI(self):
""" Reset the GUI for the next run """
if self.runningFlag == False & self.firstRunFlag == False:
# Enable both buttons
self.stopPlotting.setEnabled(True)
self.startPlotting.setEnabled(True)
# Reset flags
self.firstRunFlag = True
# Reset data array and max volt tracker to zero
self.dataArray = np.zeros(1)
self.currentMaxVolts = 0
self.iTicker = 0
# Reset dialog box and plot
self.maxVoltsOut.clear()
self.maxVoltsOut.insert("%0.3f" % self.currentMaxVolts)
self.maxVoltsLine.setValue(self.currentMaxVolts)
self.voltageCurve.setData(self.dataArray)
self.statusbar.showMessage("Ready to go!")
else:
self.statusbar("Unable to reset at this time")
return()
#------------------------------------------------------------------------------
def UpdatePlot(self):
"""Updates ticker plot with data from serial port"""
if self.runningFlag == True:
# Read from serial port. This is encoded in bytes
dataIn = self.arduinoInput.readline()
# Decode the byte
dataIn = float(dataIn[0:len(dataIn)-2].decode("utf-8"))
# Filter input data
filteredIn = (1-self.beta)*self.dataArray[-1] + self.beta*dataIn
# If the number of data points is less than the size of the screen,
# accumulate data
if self.iTicker < self.stationaryBeforeScroll:
# append new data to existing data array
self.dataArray = np.append(self.dataArray,filteredIn)
self.iTicker += 1
# Start scrolling data otherwise.
else:
# slice data array such that all data is rolled back one index
self.dataArray[:-1] = self.dataArray[1:]
# Overwrite last value in array
self.dataArray[-1] = filteredIn
# Print the current input to the status bar
self.statusbar.showMessage("Running: V = %0.3f V" % filteredIn)
# Update the plot for animation
self.voltageCurve.setData(self.dataArray)
# if new voltage is large than maximum voltage, replace max and print to screen
if filteredIn > self.currentMaxVolts:
self.currentMaxVolts = filteredIn
self.maxVoltsOut.clear()
self.maxVoltsOut.insert("%0.3f" % self.currentMaxVolts)
self.maxVoltsLine.setValue(self.currentMaxVolts)
else:
self.statusbar.showMessage('Not Recording')
#------------------------------------------------------------------------------
def AboutMessage(self):
""" Method to create message box which displays "about" information"""
self.statusbar.showMessage('About Information selected')
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle('About')
msgbox.setText('Wind Energy Demonstration\n'\
'Data Collection and Plotting\n'\
'Developed by D.C. Hartlen, 2018\n'\
'Distributed under MIT License\n\n'\
'Qt used in GUI development.\n'\
'Qt distrubited under GNU LPGL')
msgbox.exec()
#------------------------------------------------------------------------------
def HelpMessages(self):
""" Method to create message box which provides instructions"""
self.statusbar.showMessage('Help selected')
msgbox = QtWidgets.QMessageBox()
msgbox.setWindowTitle('Help')
msgbox.setText('Operation:\n'\
'1) System attempts to select available COM Port as this is\n'\
' typically where the arduino is located. To manually\n'\
' specify the COM port, go to file and select select \n'\
' "Select COM Port". Choose the appropriate port and OK.\n'\
'2) Press Start to start collecting and and plotting data.\n'\
'3) press Stop to stop plotting. Does not reset plotted data.\n'\
'4) Press Reset to clear plotted dataand prepare for next run.')
msgbox.exec()
#------------------------------------------------------------------------------
def dialogSelectPort(self):
""" Method creates a dialog box for the selection of avaiable com ports"""
#Initalize an empty list of comports
items = [None] * self.nPorts
# Populate list
for i in range(self.nPorts):
items[i] = self.availablePorts[i].device
# Create a dialog instance
selectedPort, okPressed = QtWidgets.QInputDialog.getItem(self,
"Select COM Port",
"Select COM Port:",
items,
0,
False)
# If an item from the list is selected and ok is pressed, return
# the comport name and exit the menu.
if okPressed and selectedPort:
self.statusbar.showMessage(selectedPort)
self.arduinoPort = selectedPort
#------------------------------------------------------------------------------
def dialogFilterParams(self):
""" Method creates a dialog box to set filter parameters """
# Create a dialog instance
newParam, okPressed = QtWidgets.QInputDialog.getDouble(self,
"Set Filter Parameters","Beta (0<beta<1):", self.beta, 0, 1, 3)
# If an item from the list is selected and ok is pressed, return
# the comport name and exit the menu.
if okPressed:
self.statusbar.showMessage("New filter coefficient: Beta = %0.3f" % newParam)
self.beta = newParam
#------------------------------------------------------------------------------
# This conditional executes the loop
if __name__=="__main__":
# Define that the app will draw from pyqt4
app = QtGui.QApplication(sys.argv)
# Set style of app to 'CleanLooks'
style = app.setStyle('CleanLooks')
# Set the layout and behavour of the app by linking it to class generated above
form = PlottingApp()
# Start the app
form.show()
form.update() #start with something
sys.exit(app.exec_())
# Print debug on exit | 42.368771 | 94 | 0.567631 | 1,329 | 12,753 | 5.431904 | 0.316027 | 0.02341 | 0.039895 | 0.009143 | 0.154176 | 0.133398 | 0.101815 | 0.086716 | 0.086716 | 0.086716 | 0 | 0.008604 | 0.298283 | 12,753 | 301 | 95 | 42.368771 | 0.798078 | 0.348153 | 0 | 0.180645 | 0 | 0 | 0.128535 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058065 | false | 0 | 0.051613 | 0 | 0.187097 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa419e59c2d627db6fce2ac5a99fc5030dcc69d2 | 393 | py | Python | Day 04/4.2.who_paying_bill.py | Dheer08/100-days-of-code | 05d0e5e6613f924ab083e13f28a7a0446bd34434 | [
"MIT"
] | null | null | null | Day 04/4.2.who_paying_bill.py | Dheer08/100-days-of-code | 05d0e5e6613f924ab083e13f28a7a0446bd34434 | [
"MIT"
] | null | null | null | Day 04/4.2.who_paying_bill.py | Dheer08/100-days-of-code | 05d0e5e6613f924ab083e13f28a7a0446bd34434 | [
"MIT"
] | null | null | null | import random
seed = int(input("Create a seed number : "))
random.seed(seed)
names_string = input("Enter Everybody names, seperated by comma : ")
names = names_string.split(",")
# print(names)
length = len(names)
num = random.randint(0,length-1)
print(f"{names[num]} is going to pay for the meal today")
# Another way
print(f"{random.choice(names)} is going to pay for the dinner today") | 23.117647 | 69 | 0.712468 | 63 | 393 | 4.412698 | 0.571429 | 0.071942 | 0.064748 | 0.086331 | 0.129496 | 0.129496 | 0 | 0 | 0 | 0 | 0 | 0.00597 | 0.147583 | 393 | 17 | 69 | 23.117647 | 0.823881 | 0.061069 | 0 | 0 | 0 | 0 | 0.474114 | 0.059946 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa4212a851ccccd0fca582de1bc1bb251ccc6cda | 1,972 | py | Python | utils/bots/TutorBot/cogs/TutorLoop.py | Space-Turtle0/Timmy-SchoolSimplified | 4dae17ed2fb155663e3751377216031b58c5707e | [
"MIT"
] | null | null | null | utils/bots/TutorBot/cogs/TutorLoop.py | Space-Turtle0/Timmy-SchoolSimplified | 4dae17ed2fb155663e3751377216031b58c5707e | [
"MIT"
] | null | null | null | utils/bots/TutorBot/cogs/TutorLoop.py | Space-Turtle0/Timmy-SchoolSimplified | 4dae17ed2fb155663e3751377216031b58c5707e | [
"MIT"
] | null | null | null | from datetime import datetime
import discord
import pytz
from core import database
from discord.ext import commands, tasks
class TutorBotLoop(commands.Cog):
def __init__(self, bot):
self.bot = bot
@tasks.loop(seconds=60.0)
async def voiceCheck(self):
now = datetime.utcnow()
for entry in database.TutorBot_Sessions:
if (datetime.utcnow() - entry.Date).seconds > 300:
continue
else:
tutor = await self.bot.fetch_user(entry.TutorID)
student = await self.bot.fetch_user(entry.StudentID)
botch = await self.bot.fetch_user(862480236965003275)
embed = discord.Embed(title = "ALERT: You have a Tutor Session Soon!", description = "Please make sure you both communicate and set up a private voice channel!", color = discord.Color.green())
embed.add_field(name = "Tutor Session Details", value = f"**Tutor:** {tutor.name}\n**Student:** {student.name}\n**Session ID:** {entry.SessionID}\n**Time:** {entry.Time}")
try:
await tutor.send(embed = embed)
except:
await botch.send("Unable to send a reminder DM to you {tutor.mention}!", embed = embed)
try:
await student.send(embed = embed)
except:
print(f"Unable to Send a Reminder DM to: {student.id}")
#M = now.strftime("%p")
#Time = now.strftime("%-I:%-M")
#Date = now.strftime("%-m/%-d/%Y")
#(database.TutorBot_Sessions.Date == Date) & (database.TutorBot_Sessions.Time == Time) & (database.TutorBot_Sessions.AMorPM == M)
#query = database.TutorBot_Sessions.select().where((database.TutorBot_Sessions.Date == Date) & (database.TutorBot_Sessions.Time == Time))
#if query.exists():
# pass
def setup(bot):
bot.add_cog(TutorBotLoop(bot))
| 38.666667 | 208 | 0.589757 | 231 | 1,972 | 4.965368 | 0.424242 | 0.097646 | 0.146469 | 0.044464 | 0.218832 | 0.200523 | 0.155187 | 0.111595 | 0.111595 | 0.111595 | 0 | 0.017082 | 0.287525 | 1,972 | 50 | 209 | 39.44 | 0.799288 | 0.190162 | 0 | 0.133333 | 0 | 0.033333 | 0.213476 | 0.049748 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.166667 | 0 | 0.266667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa45ffc6efe22e91100b0576fa5ed00b765b6dda | 699 | py | Python | main.py | keyfun/aws-rekognition-sample | 9982b658697c73878ed448bf80e69105f6cc2eb3 | [
"MIT"
] | null | null | null | main.py | keyfun/aws-rekognition-sample | 9982b658697c73878ed448bf80e69105f6cc2eb3 | [
"MIT"
] | 1 | 2021-06-01T23:59:36.000Z | 2021-06-01T23:59:36.000Z | main.py | keyfun/aws-rekognition-sample | 9982b658697c73878ed448bf80e69105f6cc2eb3 | [
"MIT"
] | null | null | null | import hashlib
from api.rekognition import Rekognition
from api.s3 import S3
collection_id = 'sample'
region = 'ap-northeast-1'
bucket = 'keyfun-' + region + '-rekognition-sample'
sample = 'samples/sample_001.jpg'
image_id = hashlib.md5(sample.encode()).hexdigest()
rek = Rekognition()
s3 = S3(region=region)
# rek.create_collection(collection_id=collection_id)
# s3.create_bucket(bucket)
# s3.upload_file(bucket, sample, image_id)
# rek.detect_faces(bucket=bucket, photo=image_id)
# rek.add_faces_to_collection(collection_id, bucket=bucket, photo=image_id)
# rek.search_faces_by_image_collection(collection_id, bucket, image_id)
rek.list_collections()
rek.describe_collection(collection_id)
| 26.884615 | 75 | 0.7897 | 99 | 699 | 5.323232 | 0.363636 | 0.136622 | 0.166983 | 0.083491 | 0.102467 | 0.102467 | 0 | 0 | 0 | 0 | 0 | 0.017296 | 0.090129 | 699 | 25 | 76 | 27.96 | 0.811321 | 0.440629 | 0 | 0 | 0 | 0 | 0.177546 | 0.057441 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa4609c9923ac356c0bb21a663481e4a652a11fd | 2,630 | py | Python | 2021/18/part_1.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | 2021/18/part_1.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | 2021/18/part_1.py | lvaughn/advent | ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
import sys
def add(a, b):
return ['['] + a + [','] + b + [']']
def reduce(num):
changed = True
while changed:
changed, num = do_explode(num)
if not changed:
changed, num = do_split(num)
return num
def do_explode(num):
# returns (changed, value)
depth = 0
loc = 0
for loc in range(len(num)):
if num[loc] == '[':
depth += 1
if depth > 4:
# Boom
left_part = list(num[:loc])
left = num[loc + 1]
right = num[loc + 3]
right_part = num[loc + 5:]
# Fix left
i = len(left_part) - 1
changed = False
while i >= 0 and not changed:
if type(left_part[i]) is int:
left_part[i] += left
changed = True
i -= 1
# Fix right
i = 0
changed = False
while i < len(right_part) and not changed:
if type(right_part[i]) is int:
right_part[i] += right
changed = True
i += 1
# return the value
return True, left_part + [0] + right_part
elif num[loc] == ']':
depth -= 1
loc += 1
return False, num
def do_split(num):
for loc in range(len(num)):
val = num[loc]
if type(val) is int and val >= 10:
return True, num[:loc] + ['[', val // 2, ',', val//2 + val % 2, ']'] + num[loc+1:]
return False, num
def magnitude(num):
if type(num) != list:
return num
return magnitude(num[0]) * 3 + magnitude(num[1]) * 2
def nested_to_array(num):
if type(num) == list:
return ['['] + nested_to_array(num[0]) + [','] + nested_to_array(num[1]) + [']']
return [num]
def array_to_nested(ls):
return eval(''.join(str(a) for a in ls))
numbers = []
with open(sys.argv[1], "r") as infile:
for line in infile:
numbers.append(nested_to_array(eval(line)))
total = numbers[0]
for n in numbers[1:]:
total = reduce(add(total, n))
mag = magnitude(array_to_nested(total))
print(f"Part 1: {mag}")
max_mag = -1
for i in range(len(numbers)-1):
for j in range(i+1, len(numbers)):
s = magnitude(array_to_nested(reduce(add(numbers[i], numbers[j]))))
max_mag = max(max_mag, s)
s = magnitude(array_to_nested(reduce(add(numbers[j], numbers[i]))))
max_mag = max(max_mag, s)
print(f"Part 2: {max_mag}")
| 26.836735 | 94 | 0.484411 | 351 | 2,630 | 3.527066 | 0.216524 | 0.043619 | 0.042003 | 0.038772 | 0.219709 | 0.189015 | 0.063005 | 0.063005 | 0 | 0 | 0 | 0.021951 | 0.376426 | 2,630 | 97 | 95 | 27.113402 | 0.732927 | 0.03308 | 0 | 0.178082 | 0 | 0 | 0.016562 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09589 | false | 0 | 0.013699 | 0.027397 | 0.260274 | 0.027397 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa46a0ab41bf282ac37c5c4ab7b71da7274950af | 4,556 | py | Python | extract_code.py | jithin-mathew/Python-Autocomplete | be24bf029527773a410ad2b5e45ea9f9754feb43 | [
"MIT"
] | 1 | 2020-01-10T04:07:44.000Z | 2020-01-10T04:07:44.000Z | extract_code.py | MindaugasVaitkus2/python_autocomplete | 22eed500f4933420415fdb220454cee9ca7cc730 | [
"MIT"
] | null | null | null | extract_code.py | MindaugasVaitkus2/python_autocomplete | 22eed500f4933420415fdb220454cee9ca7cc730 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Parse all files and write to a single file
"""
import os
from pathlib import Path
from typing import List, NamedTuple
from lab.logger import Logger
from parser import tokenizer
from parser.tokenizer import encode, parse_string
COMMENT = '#'
MULTI_COMMENT = '"""'
_logger = Logger()
class _PythonFile(NamedTuple):
relative_path: str
project: str
path: Path
class _GetPythonFiles:
"""
Get list of python files and their paths inside `data/source` folder
"""
def __init__(self):
self.source_path = Path(os.getcwd()) / 'data' / 'source'
self.files: List[_PythonFile] = []
self.get_python_files(self.source_path)
def add_file(self, path: Path):
"""
Add a file to the list of tiles
"""
project = path.relative_to(self.source_path).parents
project = project[len(project) - 2]
relative_path = path.relative_to(self.source_path / project)
self.files.append(_PythonFile(relative_path=str(relative_path),
project=str(project),
path=path))
def get_python_files(self, path: Path):
"""
Recursively collect files
"""
for p in path.iterdir():
if p.is_dir():
self.get_python_files(p)
else:
if p.suffix == '.py':
self.add_file(p)
def _fix_indentation(parsed: List[tokenizer.ParsedToken]) -> List[tokenizer.ParsedToken]:
"""
Change indentation tokens. Remove `DEDENT` tokens and
add `INDENT` tokens to each line.
This is easier for prediction.
"""
res: List[tokenizer.ParsedToken] = []
indentation = 0
indented = False
for t in parsed:
if t.type == tokenizer.TokenType.indent:
indentation += 1
elif t.type == tokenizer.TokenType.dedent:
indentation -= 1
elif t.type in [tokenizer.TokenType.new_line,
tokenizer.TokenType.eof]:
indented = False
res.append(t)
else:
if not indented:
for _ in range(indentation):
res.append(tokenizer.ParsedToken(tokenizer.TokenType.indent, 0))
indented = True
res.append(t)
return res
def _remove_comments(parsed: List[tokenizer.ParsedToken]) -> List[tokenizer.ParsedToken]:
"""
Remove comment tokens
"""
res = []
for p in parsed:
if p.type == tokenizer.TokenType.comment:
continue
else:
res.append(p)
return res
def _remove_empty_lines(parsed: List[tokenizer.ParsedToken]) -> List[tokenizer.ParsedToken]:
"""
Remove empty lines
"""
tokens = [tokenizer.TokenType.new_line, tokenizer.TokenType.new_line]
res = []
for p in parsed:
for i in range(1):
tokens[i] = tokens[i + 1]
tokens[-1] = p.type
all_new_line = True
for t in tokens:
if t != tokenizer.TokenType.new_line:
all_new_line = False
if all_new_line:
continue
else:
res.append(p)
return res
def _read_file(path: Path) -> List[int]:
"""
Read and encode a file
"""
with open(str(path)) as f:
content = f.read()
parsed = parse_string(content)
parsed = _remove_comments(parsed)
parsed = _remove_empty_lines(parsed)
parsed = _fix_indentation(parsed)
serialized = encode(parsed)
# deserialized = tokenizer.deserialize(serialized)
# for i in range(len(serialized)):
# assert deserialized[i] == parsed[i]
#
# res = to_text(deserialized)
# print(res)
return serialized
def main():
source_files = _GetPythonFiles().files
_logger.info(files=len(source_files))
with open(str(Path(os.getcwd()) / 'data' / 'all.py'), 'w') as f:
with _logger.section("Parse", total_steps=len(source_files)):
for i, source in enumerate(source_files):
try:
serialized = _read_file(source.path)
except Exception as e:
print(source.path)
print(e)
continue
serialized = [str(t) for t in serialized]
f.write(f"{str(source.path)}\n")
f.write(" ".join(serialized) + "\n")
f.flush()
_logger.progress(i + 1)
if __name__ == '__main__':
main()
| 26.33526 | 92 | 0.571773 | 520 | 4,556 | 4.859615 | 0.248077 | 0.064108 | 0.066482 | 0.039573 | 0.180451 | 0.151959 | 0.095766 | 0.074397 | 0 | 0 | 0 | 0.002915 | 0.322212 | 4,556 | 172 | 93 | 26.488372 | 0.815415 | 0.117208 | 0 | 0.192308 | 0 | 0 | 0.016499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.057692 | 0 | 0.221154 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa48815dd6feb0594b2d117c61d0600cad0b6e04 | 4,654 | py | Python | mod/archiver.py | Mtgxyz2/poyobot | 2b0a7d7dc57bd09a5dd91f863de7003abcef38bd | [
"BSD-2-Clause"
] | null | null | null | mod/archiver.py | Mtgxyz2/poyobot | 2b0a7d7dc57bd09a5dd91f863de7003abcef38bd | [
"BSD-2-Clause"
] | null | null | null | mod/archiver.py | Mtgxyz2/poyobot | 2b0a7d7dc57bd09a5dd91f863de7003abcef38bd | [
"BSD-2-Clause"
] | null | null | null | """This is a module that archives channel contents and uploads them as one/
multiple tar.xz files"""
from utils import Cog, is_mod, command
from discord.ext import commands
import os
import io
import hashlib
import datetime
import discord
from .tar import TARInstance
from . import queue
__author__ = "Dark Kirb"
__license__ = "BSD-2clause"
__website__ = "https://github.com/DarkKirb/poyobot/blob/master/mod/archiver.py"
__version__ = "1.0"
dependencies = ["tar", "queue"]
class Archiver(Cog):
@command()
@queue.queue_cmd
async def archiver(self, ctx, channel: discord.TextChannel = None,
include_images: bool = True, name: str=None):
"""Archives the current channel (but doesn't delete it)"""
if channel is None:
channel = ctx.message.channel
if name is None:
name = ""
else:
name += "-"
name += channel.name + "-" + ctx.message.created_at.isoformat()
if not include_images:
name += "-text-only"
if channel.guild.id != ctx.message.guild.id:
await ctx.send("You cannot archive a channel from another guild.")
return
if not await is_mod(ctx.message.guild, ctx.message.author,
channel):
await ctx.send("You need to be moderator to archive a channel!")
return
msg = await ctx.send("Archiving the channel. This might take a while")
number = 0
async with TARInstance(ctx, name) as tar:
tar.mkdir("imgs")
f = None
last_day = None
async def archive_message(message):
nonlocal f, last_day, number
if last_day is None or last_day != message.created_at.date():
if f is not None:
await f.flush()
await f.close()
last_day = message.created_at.date()
fname = os.path.join(last_day.isoformat() + ".log")
f = await tar[fname]
initial_str = (f"[{message.created_at.isoformat()}] " +
f"<{message.author.name}" +
f"#{message.author.discriminator}>")
padding_len = len(initial_str)
padding = ' ' * padding_len
firstline = message.content.split("\n")[0]
await f.write(f"{initial_str} {firstline}\n")
for line in message.content.split("\n")[1:]:
await f.write(f"{padding} {line}\n")
if include_images:
for attachment in message.attachments:
f2 = io.BytesIO()
await attachment.save(f2)
filehash = hashlib.sha256(f2.read()).hexdigest()
fsize = f2.tell()
f2.seek(0)
ext = attachment.filename.rpartition('.')[2]
await f.write(
f"{padding} Attachment " +
f"{attachment.filename} {attachment.url} ")
if fsize < 7*1024*1024:
fname = os.path.join("imgs", f"{filehash}.{ext}")
async with tar.open(fname, "wb") as f3:
await f3.write(f2.read())
await f.write(f"(saved as {fname})\n")
else:
await f.write(f"(too large to be saved 🙁)\n")
f2 = None # save memory
for embed in message.embeds:
await f.write(f"{padding} EMBED: {embed.to_dict()}\n")
number += 1
if not number % 250:
await msg.edit(content=f"Archived {number} messages…\n")
found = True
message_ts = datetime.datetime.fromtimestamp(1420070400)
while found:
found = False
async for message in channel.history(
limit=None,
reverse=True,
after=message_ts):
await archive_message(message)
found = True
message_ts = message.created_at
if f is not None:
await f.flush()
await f.close()
await msg.edit(content="Archived all messages. Packing…\n")
await msg.delete()
await ctx.send("Done.")
def setup(bot):
global cog
cog = Archiver(bot)
| 39.777778 | 79 | 0.497207 | 504 | 4,654 | 4.517857 | 0.35119 | 0.02635 | 0.028986 | 0.031621 | 0.078612 | 0.053579 | 0.029864 | 0.029864 | 0.029864 | 0.029864 | 0 | 0.015468 | 0.402664 | 4,654 | 116 | 80 | 40.12069 | 0.801079 | 0.022991 | 0 | 0.116505 | 0 | 0 | 0.139639 | 0.024314 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009709 | false | 0 | 0.087379 | 0 | 0.126214 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa4afc77e0789b9159c7a048c78462961b5777a0 | 3,136 | py | Python | src/utils.py | Lioscro/cs155-miniproject3 | b0e2d70c3ef55fe6159e06fc7d15a7ce15ac87e5 | [
"MIT"
] | null | null | null | src/utils.py | Lioscro/cs155-miniproject3 | b0e2d70c3ef55fe6159e06fc7d15a7ce15ac87e5 | [
"MIT"
] | null | null | null | src/utils.py | Lioscro/cs155-miniproject3 | b0e2d70c3ef55fe6159e06fc7d15a7ce15ac87e5 | [
"MIT"
] | null | null | null | import os
import pickle
import re
import nltk
# Paths to text files
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(ROOT_DIR, 'data')
MODELS_DIR = os.path.join(ROOT_DIR, 'models')
SHAKESPEARE_PATH = os.path.join(DATA_DIR, 'shakespeare.txt')
SPENSER_PATH = os.path.join(DATA_DIR, 'spenser.txt')
SYLLABLE_PATH = os.path.join(DATA_DIR, 'Syllable_dictionary.txt')
SHAKESPEARE_PARSER = re.compile(r'\s{19}[0-9]+\n(?P<sonnet>.+?)(?:\n\n\n|$)', re.DOTALL)
SPENSER_PARSER = re.compile(r'[IVXL]+\n\n(?P<sonnet>.+?)(?:\n\n|$)', re.DOTALL)
LINE_PARSER = re.compile(r'\s*(?P<line>.+)\s*')
def check_nltk_package(package):
try:
nltk.data.find(package)
except LookupError:
nltk.download(package)
def save_pickle(obj, path):
"""Pickle and save a Python object to the given path.
"""
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_pickle(path):
"""Load a pickled Python object from the given path.
"""
with open(path, 'rb') as f:
return pickle.load(f)
def load_shakespeare():
"""Load shakespeare.txt. Returns a list of lists.
The outer list contains sonnets, the inner list contains lines.
All leading and trailing spaces are removed.
This function does no preprocessing.
"""
with open(SHAKESPEARE_PATH, 'r') as f:
text = f.read()
return [
LINE_PARSER.findall(sonnet)
for sonnet in SHAKESPEARE_PARSER.findall(text)
]
def load_spenser():
"""Load shakespeare.txt. Returns a list of lists.
The outer list contains sonnets, the inner list contains lines.
All leading and trailing spaces are removed.
This function does no preprocessing.
"""
with open(SPENSER_PATH, 'r') as f:
text = f.read()
return [
LINE_PARSER.findall(sonnet)
for sonnet in SPENSER_PARSER.findall(text)
]
def syllable_dic():
"""Load Syllable_dictionary.txt. Returns a dictionary; keys are
words and values are syllables.
"""
f = open(SYLLABLE_PATH, 'r')
syllable_data = f.read()
f.close()
remove = ',.?!:;()'
syllable_data_split = syllable_data.split('\n')
syllable_data_split.pop()
keys = []
vals = []
for elem in syllable_data_split:
elem_split = elem.split(' ')
new_key = elem_split[0].lower()
for char in remove:
new_key = new_key.replace(char, '')
keys.append(new_key)
keys.append(new_key + "_e")
if len(elem_split) == 2:
vals.append(elem_split[1])
vals.append(elem_split[1])
elif len(elem_split) == 3:
if 'E' in elem_split[1]:
vals.append(elem_split[2])
vals.append(elem_split[1][1])
elif 'E' in elem_split[2]:
vals.append(elem_split[1])
vals.append(elem_split[2][1])
else:
vals.append(elem_split[2])
vals.append(elem_split[2])
else:
continue
dic = {keys[i]: int(vals[i]) for i in range(len(keys))}
return dic | 30.153846 | 88 | 0.611288 | 439 | 3,136 | 4.225513 | 0.264237 | 0.072776 | 0.060377 | 0.081941 | 0.469542 | 0.422102 | 0.340701 | 0.340701 | 0.334232 | 0.292183 | 0 | 0.008134 | 0.255102 | 3,136 | 104 | 89 | 30.153846 | 0.785959 | 0.196429 | 0 | 0.197183 | 0 | 0.014085 | 0.071778 | 0.040783 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084507 | false | 0 | 0.056338 | 0 | 0.197183 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa4d713b94f6a50481f5b2250ca7e92bece12d55 | 3,059 | py | Python | basic_algorithms/Q_learning.py | ChrisRanger/RL_study | 9e9233401cefa9d2ba44cc063b1c906812bdffe1 | [
"MIT"
] | null | null | null | basic_algorithms/Q_learning.py | ChrisRanger/RL_study | 9e9233401cefa9d2ba44cc063b1c906812bdffe1 | [
"MIT"
] | null | null | null | basic_algorithms/Q_learning.py | ChrisRanger/RL_study | 9e9233401cefa9d2ba44cc063b1c906812bdffe1 | [
"MIT"
] | null | null | null | import gym
import numpy as np
import time
class Qlearning_Agent(object):
def __init__(self, state_dim, act_dim, learning_rate=0.01, gamma=0.9, e_greed=0.1):
# size of action space
# learning rate
# discount factor
# probability for random action
# Q table
self.act_n = act_dim
self.lr = learning_rate
self.gamma = gamma
self.epsilon = e_greed
self.Q = np.zeros((state_dim, act_dim))
# epsilon-greedy to move
def action(self, state):
# greedy
if np.random.uniform(0, 1) < (1.0 - self.epsilon):
action = self.greedy(state)
# random to discover
else:
action = np.random.choice(self.act_n)
return action
# get ptimal action according to Q table and current state
def greedy(self, state):
Q_list = self.Q[state, :] # current state's Q value
maxQ = np.max(Q_list) # find max Q
action_list = np.where(Q_list == maxQ)[0] # when exists many actions for max Q
action = np.random.choice(action_list)
return action
# learning
def iterate(self, state, action, reward, state_next, done):
""" on-policy
state: current state_t
action: action_t
reward: reward for action_t
state_next: state_t+1
action_next: action_t+1
done: if episode is over
"""
current_Q = self.Q[state, action]
if done:
target_Q = reward
else:
# Q learning
target_Q = reward + self.gamma * np.max(self.Q[state_next, :])
self.Q[state, action] += self.lr * (target_Q - current_Q) # update Q table
def test(env, agent):
total_reward = 0
state = env.reset()
while True:
# greedy, no need to exploit
action = agent.greedy(state)
state_next, reward, done, _ = env.step(action)
total_reward += reward
state = state_next
#env.render()
if done:
break
return total_reward
if __name__ == '__main__':
env = gym.make("CliffWalking-v0") # 0 up, 1 right, 2 down, 3 left
agent = Qlearning_Agent(
state_dim=env.observation_space.n,
act_dim=env.action_space.n,
learning_rate=0.1,
gamma=0.9,
e_greed=0.1)
for episode in range(500):
total_steps = 0
total_reward = 0
state = env.reset()
while True:
action = agent.action(state) # 根据算法选择一个动作
state_next, reward, done, _ = env.step(action) # 与环境进行一个交互
# Q learning
agent.iterate(state, action, reward, state_next, done)
# update
state = state_next
total_reward += reward
total_steps += 1
if done:
break
print('Episode %s: steps = %s , reward = %.1f' % (episode, total_steps, total_reward))
#env.render()
test_reward = test(env, agent)
print('test reward = %.1f' % (test_reward)) | 30.59 | 94 | 0.567179 | 397 | 3,059 | 4.198992 | 0.274559 | 0.043191 | 0.023995 | 0.016797 | 0.133173 | 0.133173 | 0.097181 | 0.040792 | 0 | 0 | 0 | 0.016732 | 0.335731 | 3,059 | 100 | 95 | 30.59 | 0.803642 | 0.186009 | 0 | 0.328125 | 0 | 0 | 0.033179 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.046875 | 0 | 0.1875 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa4dec78a51a2a82255bdba515be608593cc536f | 2,611 | py | Python | data/templates/data/upload.mako.py | sumukh210991/Cyberweb | 297bd54c9e223d38818b802087055e397c403f1c | [
"Apache-2.0"
] | null | null | null | data/templates/data/upload.mako.py | sumukh210991/Cyberweb | 297bd54c9e223d38818b802087055e397c403f1c | [
"Apache-2.0"
] | null | null | null | data/templates/data/upload.mako.py | sumukh210991/Cyberweb | 297bd54c9e223d38818b802087055e397c403f1c | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1467227488.105134
_enable_loop = True
_template_filename = '/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/data/upload.mako'
_template_uri = '/data/upload.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = ['headtags', 'footer', 'header']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/layout.mako', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer(u'\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n\n\n')
__M_writer(u'<form action="" enctype="multipart/form-data" method="post">\n<p>\nPlease specify a file to upload:<br>\n<input type="file" name="datafile" size="40">\n</p>\n<div>\n<input type="submit" value="Send">\n</div>\n</form>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_headtags(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_footer(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_header(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"64": 8, "33": 2, "34": 3, "35": 6, "36": 9, "37": 12, "38": 17, "74": 68, "44": 5, "48": 5, "54": 11, "68": 8, "58": 11, "28": 0}, "uri": "/data/upload.mako", "filename": "/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/data/upload.mako"}
__M_END_METADATA
"""
| 33.050633 | 300 | 0.671773 | 351 | 2,611 | 4.584046 | 0.347578 | 0.056557 | 0.044748 | 0.044748 | 0.500932 | 0.416408 | 0.416408 | 0.416408 | 0.399006 | 0.375388 | 0 | 0.032649 | 0.178859 | 2,611 | 78 | 301 | 33.474359 | 0.717817 | 0.00766 | 0 | 0.580645 | 0 | 0.032258 | 0.170601 | 0.089978 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112903 | false | 0.016129 | 0.032258 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa4f0d1977f80c2d73ae349124e162d84a4a043d | 5,348 | py | Python | pascal.py | Alisa1114/Feature-Weighting-and-Boosting | b1251117d576fb7f6b79de70c2d0092519351e6a | [
"BSD-3-Clause"
] | 11 | 2021-02-28T12:54:51.000Z | 2021-12-14T08:18:51.000Z | pascal.py | Alisa1114/Feature-Weighting-and-Boosting | b1251117d576fb7f6b79de70c2d0092519351e6a | [
"BSD-3-Clause"
] | 2 | 2021-06-04T11:16:02.000Z | 2021-12-13T10:20:52.000Z | pascal.py | Alisa1114/Feature-Weighting-and-Boosting | b1251117d576fb7f6b79de70c2d0092519351e6a | [
"BSD-3-Clause"
] | 3 | 2021-07-01T13:53:02.000Z | 2021-11-18T06:39:43.000Z |
import torch.utils.data as data
import os
from PIL import Image
from utils import preprocess, get_cats, AvgPool2d
import numpy as np
import matplotlib.pyplot as plt
import torch
import random
random.seed(1991)
from random import choice
from torchvision import transforms
class VOCSegmentationRandom(data.Dataset):
CLASSES = [
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'potted-plant', 'sheep', 'sofa', 'train',
'tv/monitor'
]
def __init__(self, root, train=True, transform=None, target_transform=None, download=False,
crop_size=512, group=0, num_folds=4, num_shots=1, batch_size=8, iteration=10000):
self.root = root
_voc_root = os.path.join(self.root, 'VOC2012')
_list_dir = os.path.join(_voc_root, 'list')
self.transform = transform
self.target_transform = target_transform
self.train = train
self.crop_size = crop_size
if group == 'all':
self.cats = range(1, 21)
else:
self.cats = [x + 1 for x in sorted(get_cats('train' if train else 'val', group, num_folds))]
self.cats_set = set(self.cats)
self.num_shots = num_shots
self.batch_size = batch_size
self.cat_dict = {c:i for i, c in enumerate(self.cats)}
if self.train:
_list_f = os.path.join(_list_dir, 'train_aug.txt')
else:
_list_f = os.path.join(_list_dir, 'val.txt')
if train:
data_file = 'data/data_{}_{}.pkl'.format(group, num_folds)
else:
data_file = 'data/pascal_val_{}_{}.pkl'.format(group, num_folds)
if not train:
file = 'data/val_pascal_{}_{}_5shot_new.pkl'.format(group, num_folds)
self.list_data = torch.load(file)
if not os.path.isfile(data_file):
self.images = {k: {} for k in self.cats}
self.list_images = {k: [] for k in self.cats}
with open(_list_f, 'r') as lines:
for line in lines:
img_id = line.split()[0][12:23].replace('\n', '')
print(img_id)
_image = _voc_root + line.split()[0]
_mask = _voc_root + line.split()[1]
labels = np.array(Image.open(_mask))
img_label = set(x for x in np.unique(labels).tolist() if x not in [255, 0] and (labels==x).sum() > 1000)
real_label = list(img_label & self.cats_set)
for label in real_label:
self.images[label][img_id] = (_image, _mask, real_label)
self.list_images[label].append(img_id)
torch.save((self.images, self.list_images), data_file)
print('finished')
else:
self.images, self.list_images = torch.load(data_file)
self.img_transform = transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor()
])
# self.extract_aff_lab_func = ExtractAffinityLabelInRadius(cropsize=crop_size//8, radius=5)
self.iteration = iteration
def __getitem__(self, index):
if self.train:
chosen_label = choice(self.cats)
image_id_q = choice(self.list_images[chosen_label])
image_id_s = choice(self.list_images[chosen_label])
else:
image_id_q, image_id_s, chosen_label = self.list_data[index]
img_q, target_q = self.get_img_info(chosen_label, image_id_q)
if self.num_shots == 1:
img_s, target_s = self.get_img_info(chosen_label, image_id_s)
return img_q, target_q, img_s, target_s, self.cat_dict[chosen_label]
else:
img_s, target_s = [], []
for image_id_ in image_id_s:
img_, target_ = self.get_img_info(chosen_label, image_id_)
img_s.append(img_)
target_s.append(target_)
img_q, img_s[0] = img_s[0], img_q
target_q, target_s[0] = target_s[0], target_q
return img_q, target_q, img_s, target_s, chosen_label
def get_img_info(self, chosen_label, image_id):
# image_id = choice(self.list_images[chosen_label])
image = self.images[chosen_label][image_id]
_img = Image.open(image[0]).convert('RGB')
_target = Image.open(image[1])
_img, _target = preprocess(_img, _target,
flip=False,
scale=None,
crop=(self.crop_size, self.crop_size))
if self.transform is not None:
_img = self.transform(_img)
if self.target_transform is not None:
_target = self.target_transform(_target)
target = torch.zeros_like(_target)
target[_target.int() == int(chosen_label)] = 1
return _img, target
def __len__(self):
if self.train:
return self.iteration * self.batch_size
else:
return len(self.list_data)
if __name__ == '__main__':
dataset = VOCSegmentationRandom('data/VOCdevkit', train=False)
for data in dataset:
print() | 35.417219 | 124 | 0.577786 | 688 | 5,348 | 4.203488 | 0.242733 | 0.049447 | 0.033887 | 0.037344 | 0.174274 | 0.117911 | 0.107192 | 0.052559 | 0.019364 | 0 | 0 | 0.015447 | 0.310022 | 5,348 | 151 | 125 | 35.417219 | 0.768293 | 0.025991 | 0 | 0.087719 | 0 | 0 | 0.054563 | 0.011527 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0 | 0.087719 | 0 | 0.184211 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa5066a2301a293878e0443ef2c993e963db12df | 6,833 | py | Python | assignment 7/matrix-hw7/hw7.py | dhruvgairola/linearAlgebra-coursera | 20109133b9e53a7a38cbd17d8ca1fa1316bbf0d3 | [
"MIT"
] | 6 | 2015-09-18T02:07:21.000Z | 2020-04-22T17:05:11.000Z | assignment 7/matrix-hw7/hw7.py | dhruvgairola/linearAlgebra-coursera | 20109133b9e53a7a38cbd17d8ca1fa1316bbf0d3 | [
"MIT"
] | null | null | null | assignment 7/matrix-hw7/hw7.py | dhruvgairola/linearAlgebra-coursera | 20109133b9e53a7a38cbd17d8ca1fa1316bbf0d3 | [
"MIT"
] | 10 | 2015-09-05T03:54:00.000Z | 2020-04-21T12:56:40.000Z | # version code 1049
# Please fill out this stencil and submit using the provided submission script.
from orthogonalization import orthogonalize,project_orthogonal
import orthonormalization
from mat import Mat,transpose
from vec import Vec
from vecutil import list2vec, zero_vec
from matutil import listlist2mat,mat2rowdict,mat2coldict
from QR import factor
from triangular import triangular_solve
from solver import solve
## Problem 1
def basis(vlist):
'''
Input:
- vlist: a list of Vecs
Output:
- a list of linearly independent Vecs with equal span to vlist
'''
return [v for v in orthogonalize(vlist) if square_norm(v) > 1E-20]
def square_norm(v):
return v * v
## Problem 2
def subset_basis(vlist):
'''
Input:
- vlist: a list of Vecs
Output:
- linearly independent subset of vlist with the same span as vlist
'''
return [vlist[k] for k, v in enumerate(orthogonalize(vlist)) if square_norm(v) > 1E-20]
## Problem 3
def orthogonal_vec2rep(Q, b):
'''
Input:
- Q: an orthogonal Mat
- b: Vec whose domain equals the column-label set of Q.
Output:
- The coordinate representation of b in terms of the rows of Q.
Example:
>>> Q = Mat(({0, 1}, {0, 1}), {(0, 1): 0, (1, 0): 0, (0, 0): 2, (1, 1): 2})
>>> b = Vec({0, 1},{0: 4, 1: 2})
>>> orthogonal_vec2rep(Q, b) == Vec({0, 1},{0: 8, 1: 4})
True
'''
return Q * b
## Problem 4
def orthogonal_change_of_basis(A, B, a):
'''
Input:
- A: an orthogonal Mat
- B: an orthogonal Mat whose column labels are the row labels of A
- a: the coordinate representation in terms of rows of A of some vector v
Output:
- the Vec b such that b is the coordinate representation of v in terms of columns of B
Example:
>>> A = Mat(({0, 1, 2}, {0, 1, 2}), {(0, 1): 0, (1, 2): 0, (0, 0): 1, (2, 0): 0, (1, 0): 0, (2, 2): 1, (0, 2): 0, (2, 1): 0, (1, 1): 1})
>>> B = Mat(({0, 1, 2}, {0, 1, 2}), {(0, 1): 0, (1, 2): 0, (0, 0): 2, (2, 0): 0, (1, 0): 0, (2, 2): 2, (0, 2): 0, (2, 1): 0, (1, 1): 2})
>>> a = Vec({0, 1, 2},{0: 4, 1: 1, 2: 3})
>>> orthogonal_change_of_basis(A, B, a) == Vec({0, 1, 2},{0: 8, 1: 2, 2: 6})
True
'''
return (a * A) * B
## Problem 5
def orthonormal_projection_orthogonal(W, b):
'''
Input:
- W: Mat whose rows are orthonormal
- b: Vec whose labels are equal to W's column labels
Output:
- The projection of b orthogonal to W's row space.
Example:
>>> W = Mat(({0, 1}, {0, 1, 2}), {(0, 1): 0, (1, 2): 0, (0, 0): 1, (1, 0): 0, (0, 2): 0, (1, 1): 1})
>>> b = Vec({0, 1, 2},{0: 3, 1: 1, 2: 4})
>>> orthonormal_projection_orthogonal(W, b) == Vec({0, 1, 2},{0: 0, 1: 0, 2: 4})
True
'''
return project_orthogonal(b,mat2rowdict(W).values())
## Problem 6
# Write your solution for this problem in orthonormalization.py.
## Problem 7
# Write your solution for this problem in orthonormalization.py.
## Problem 8
# Please give each solution as a Vec
least_squares_A1 = listlist2mat([[8, 1], [6, 2], [0, 6]])
least_squares_Q1 = listlist2mat([[.8,-0.099],[.6, 0.132],[0,0.986]])
least_squares_R1 = listlist2mat([[10,2],[0,6.08]])
least_squares_b1 = list2vec([10, 8, 6])
x_hat_1 = Vec({0, 1},{0: 1.0832236842105263, 1: 0.9838815789473685})
least_squares_A2 = listlist2mat([[3, 1], [4, 1], [5, 1]])
least_squares_Q2 = listlist2mat([[.424, .808],[.566, .115],[.707, -.577]])
least_squares_R2 = listlist2mat([[7.07, 1.7],[0,.346]])
least_squares_b2 = list2vec([10,13,15])
x_hat_2 = Vec({0, 1},{0: 2.5010988382075188, 1: 2.658959537572257})
## Problem 9
def QR_solve(A, b):
'''
Input:
- A: a Mat
- b: a Vec
Output:
- vector x that minimizes norm(b - A*x)
Example:
>>> domain = ({'a','b','c'},{'A','B'})
>>> A = Mat(domain,{('a','A'):-1, ('a','B'):2,('b','A'):5, ('b','B'):3,('c','A'):1,('c','B'):-2})
>>> Q, R = QR.factor(A)
>>> b = Vec(domain[0], {'a': 1, 'b': -1})
>>> x = QR_solve(A, b)
>>> result = A.transpose()*(b-A*x)
>>> result * result < 1E-10
True
'''
Q, R = factor(A)
c = transpose(Q) * b
x_hat = solve(R, transpose(Q) * b)
return x_hat
# from vecutil import *
# from orthonormalization import *
# import QR
# from mat import Mat,transpose
# from solver import solve
# from vec import Vec
# from math import sqrt
# import matutil
# from hw7 import *
# vlist =[list2vec(x) for x in [[2, 4, 3, 5, 0], [4, -2, -5, 4, 0], [-8, 14, 21, -2, 0], [-1, -4,-4, 0, 0], [-2, -18, -19, -6, 0], [5, -3, 1, -5, 2]]]
# print(basis(vlist))
# v = [2, 4, 3, 5, 0]
# v * v
# Q = Mat(({0, 1}, {0, 1}), {(0, 1): 0, (1, 0): 0, (0, 0): 2, (1, 1): 2})
# b = Vec({0, 1},{0: 4, 1: 2})
# orthogonal_vec2rep(Q, b) == Vec({0, 1},{0: 8, 1: 4})
# A = Mat(({0, 1, 2}, {0, 1, 2}), {(0, 1): 0, (1, 2): 0, (0, 0): 1, (2, 0): 0, (1, 0): 0, (2, 2): 1, (0, 2): 0, (2, 1): 0, (1, 1): 1})
# B = Mat(({0, 1, 2}, {0, 1, 2}), {(0, 1): 0, (1, 2): 0, (0, 0): 2, (2, 0): 0, (1, 0): 0, (2, 2): 2, (0, 2): 0, (2, 1): 0, (1, 1): 2})
# a = Vec({0, 1, 2},{0: 4, 1: 1, 2: 3})
# orthogonal_change_of_basis(A, B, a) == Vec({0, 1, 2},{0: 8, 1: 2, 2: 6})
# A2 = matutil.listlist2mat([[1/sqrt(2), 1/sqrt(2), 0], [1/sqrt(3), -1/sqrt(3), 1/sqrt(3)], [-1/sqrt(6), 1/sqrt(6), 2/sqrt(6)]])
# B2 = matutil.listlist2mat([[1/sqrt(2), 1/sqrt(2), 0], [1/sqrt(3), -1/sqrt(3), 1/sqrt(3)], [-1/sqrt(6), 1/sqrt(6), 2/sqrt(6)]])
# a2 = Vec({0, 1, 2}, {0: sqrt(2), 1: 1/sqrt(3), 2: 2/sqrt(6)})
# orthogonal_change_of_basis(A2, B2, a2)
# W = Mat(({0, 1}, {0, 1, 2}), {(0, 1): 0, (1, 2): 0, (0, 0): 1, (1, 0): 0, (0, 2): 0, (1, 1): 1})
# b = Vec({0, 1, 2},{0: 3, 1: 1, 2: 4})
# orthonormal_projection_orthogonal(W, b) == Vec({0, 1, 2},{0: 0, 1: 0, 2: 4})
# L = [list2vec(v) for v in [[4,3,1,2],[8,9,-5,-5],[10,1,-1,5]]]
# print(matutil.coldict2mat(L))
# Qlist, Rlist = aug_orthonormalize(L)
# print(matutil.coldict2mat(Qlist))
# print(matutil.coldict2mat(Rlist))
# # to solve prob 8, read lecture 8-8 early half
# B = list2vec([10,8,6])
# Q = matutil.listlist2mat([[0.8, -0.099],[0.6, 0.132],[0,0.986]])
# R = matutil.listlist2mat([[10,2],[0,6.08]])
# A = Q * R
# c = transpose(Q) * B
# x = solve(R, c)
# x
# B = list2vec([10,13,15])
# Q = matutil.listlist2mat([[.424, .808],[.566, .115],[.707, -.577]])
# R = matutil.listlist2mat([[7.07, 1.7],[0,.346]])
# A = Q * R
# c = transpose(Q) * B
# x = solve(R, c)
# x
# domain = ({'a','b','c'},{'A','B'})
# A = Mat(domain,{('a','A'):-1, ('a','B'):2,('b','A'):5, ('b','B'):3,('c','A'):1,('c','B'):-2})
# Q, R = QR.factor(A)
# b = Vec(domain[0], {'a': 1, 'b': -1})
# x = QR_solve(A, b)
# result = A.transpose()*(b-A*x)
# result * result < 1E-10 | 33.660099 | 150 | 0.528026 | 1,225 | 6,833 | 2.900408 | 0.137959 | 0.037715 | 0.023642 | 0.030397 | 0.435407 | 0.407543 | 0.374894 | 0.338869 | 0.319167 | 0.298339 | 0 | 0.13374 | 0.231816 | 6,833 | 203 | 151 | 33.660099 | 0.543151 | 0.710962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.194444 | false | 0 | 0.25 | 0.027778 | 0.638889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa518462e0d5362162e3cdfe53f5d43249ab4064 | 963 | py | Python | modules/api/src/test/functional/vinyldns_context.py | nspadaccino/vinyldns | 1c2635a4414cfa5e8b28987f12a90ba8c6a09044 | [
"Apache-2.0"
] | null | null | null | modules/api/src/test/functional/vinyldns_context.py | nspadaccino/vinyldns | 1c2635a4414cfa5e8b28987f12a90ba8c6a09044 | [
"Apache-2.0"
] | null | null | null | modules/api/src/test/functional/vinyldns_context.py | nspadaccino/vinyldns | 1c2635a4414cfa5e8b28987f12a90ba8c6a09044 | [
"Apache-2.0"
] | null | null | null | class VinylDNSTestContext:
name_server_ip: str = None
resolver_ip: str = None
dns_zone_name: str = None
dns_key_name: str = None
dns_key: str = None
dns_key_algo: str = None
vinyldns_url: str = None
teardown: bool = False
enable_safety_check: bool = False
@staticmethod
def configure(name_server_ip: str, resolver_ip: str, zone: str, key_name: str, key: str, key_algo: str, url: str, teardown: bool, enable_safety_check: bool = False) -> None:
VinylDNSTestContext.name_server_ip = name_server_ip
VinylDNSTestContext.resolver_ip = resolver_ip
VinylDNSTestContext.dns_zone_name = zone
VinylDNSTestContext.dns_key_name = key_name
VinylDNSTestContext.dns_key = key
VinylDNSTestContext.dns_key_algo = key_algo
VinylDNSTestContext.vinyldns_url = url
VinylDNSTestContext.teardown = teardown
VinylDNSTestContext.enable_safety_check = enable_safety_check
| 41.869565 | 177 | 0.722741 | 119 | 963 | 5.512605 | 0.193277 | 0.074695 | 0.073171 | 0.059451 | 0.131098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.213915 | 963 | 22 | 178 | 43.772727 | 0.866579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0 | 0 | 0.52381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa53a099a090c0bf220674ca97aced246d4ab29e | 68,314 | py | Python | evohome_rf/parsers.py | NotBobTheBuilder/evohome_rf | c3d9c3563d43fbe19a33c0493cde0864c1f4a23a | [
"MIT"
] | null | null | null | evohome_rf/parsers.py | NotBobTheBuilder/evohome_rf | c3d9c3563d43fbe19a33c0493cde0864c1f4a23a | [
"MIT"
] | null | null | null | evohome_rf/parsers.py | NotBobTheBuilder/evohome_rf | c3d9c3563d43fbe19a33c0493cde0864c1f4a23a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""Evohome RF - payload processors."""
import logging
import re
from datetime import datetime as dt
from datetime import timedelta as td
from typing import Optional, Union
from .command import Command
from .const import (
ATTR_DHW_VALVE,
ATTR_DHW_VALVE_HTG,
ATTR_HTG_CONTROL,
CODE_000C_DEVICE_TYPE,
CODE_0005_ZONE_TYPE,
CODE_0418_DEVICE_CLASS,
CODE_0418_FAULT_STATE,
CODE_0418_FAULT_TYPE,
CODE_SCHEMA,
CODES_SANS_DOMAIN_ID,
DOMAIN_TYPE_MAP,
MAY_USE_DOMAIN_ID,
MAY_USE_ZONE_IDX,
SYSTEM_MODE_MAP,
ZONE_MODE_MAP,
__dev_mode__,
)
from .devices import FanSwitch
from .exceptions import CorruptPacketError, CorruptPayloadError
from .helpers import dev_hex_to_id
from .helpers import dtm_from_hex as _dtm
from .helpers import dts_from_hex
from .opentherm import (
EN,
FLAG8,
FLAGS,
HB,
LB,
OPENTHERM_MESSAGES,
OPENTHERM_MSG_TYPE,
S8,
U8,
VAL,
VAR,
ot_msg_value,
parity,
)
from .ramses import RAMSES_CODES, RAMSES_DEVICES, RQ, RQ_MAY_HAVE_PAYLOAD
from .schema import MAX_ZONES
DEV_MODE = __dev_mode__
TEST_MODE = True
_LOGGER = logging.getLogger(__name__)
if DEV_MODE:
_LOGGER.setLevel(logging.DEBUG)
def _idx_new(seqx, msg) -> dict:
# TODO: To rationalise
assert len(seqx) == 2, seqx
if msg.code in CODES_SANS_DOMAIN_ID: # don't idx, even though some != "00"
return {}
if seqx in ("F8", "F9", "FA", "FB", "FC", "FD", "FE"):
return {"domain_id": seqx}
# finally:
assert seqx == "00", seqx
return {}
def _idx(seqx, msg) -> dict:
"""Check the index of a payload (usually a domain id or a zone idx).
Determine if a payload has an entity id, and return: {"id_name": seqx} or {}.
The challenge is that payloads starting with (e.g.):
- "00" are *often not* a zone idx, and
- "01", "02", etc. *may not* be a zone idx
Anything in the range F0-FF appears to be a domain id (no false +ve/-ves).
"""
if msg.code in ("1F09", "1FC9", "2E04"): # no idx, even though some != "00"
# 1F09: "FF" (I), "00" (RP), "F8" (W, after 1FC9)
# 1FC9: dict is currently encoded in a way that id/idx is not used
# 2E04: payload[:2] is system mode, would fail final assert
return {}
# TODO: 000C to a UFC should be ufh_ifx, not zone_idx
elif msg.code == "000C" and msg.src.type == "02":
assert int(seqx, 16) < 0x08, f"unknown ufh_idx: '{seqx}'"
if msg.raw_payload[4:6] == "7F":
return {
"ufh_idx": seqx,
"zone_id": None,
}
assert (
int(msg.raw_payload[4:6], 16) < msg._gwy.config[MAX_ZONES]
), f"unknown zone_idx: '{seqx}'"
return {
"ufh_idx": seqx,
"zone_id": msg.raw_payload[4:6],
}
elif msg.code == "000C":
assert (
int(seqx, 16) < 1 if msg.raw_payload[2:4] == "0D" else 2
), f"unknown _idx: '{seqx}'"
if msg.raw_payload[2:4] in ("0D", "0E"): # ("000D", "000E", "010E")
return {"domain_id": "FA"}
if msg.raw_payload[2:4] == "0F":
return {"domain_id": "FC"}
assert int(seqx, 16) < msg._gwy.config[MAX_ZONES], f"unknown zone_idx: '{seqx}'"
return {"zone_idx": seqx}
elif msg.code == "0418": # log_idx, but dict may include domain_id/zone_idx
assert int(seqx, 16) < 64, f"unknown log_idx: '{seqx}'"
return {"log_idx": seqx} # TODO: a 'null' RP also has log_idx == 0
elif msg.code == "10A0": # can be 2 DHW zones per system
assert seqx in ("00", "01"), f"unknown dhw_idx: '{seqx}'"
return {"dhw_idx": seqx}
elif msg.code == "22C9": # these are UFH-specific
assert int(seqx, 16) < 0x08, f"unknown ufh_idx: '{seqx}'"
return {"ufh_idx": seqx} # TODO: confirm is / is not zone_idx
elif msg.code in ("31D9", "31DA"): # ventilation
assert seqx in ("00", "01", "21"), f"unknown hvac_id: '{seqx}'"
return {} # {"hvac_id": seqx}
# 045 I --- 03:183434 --:------ 03:183434 1060 003 00FF00
if {"03", "12", "22"} & {msg.src.type} and msg.src.type == msg.devs[2].type:
# CM92x can do heating (other_idx = 00) and optionally cooling (other_idx = 01)
# msg.code in ("0008", "0009", "1030", "1060", "1100", "2309", "1030", "313F"):
if msg.code not in ("000A", "1030", "2309", "30C9"):
assert seqx == "00"
return {}
assert int(seqx, 16) < msg._gwy.config[MAX_ZONES]
return {"other_idx": seqx} # TODO: Should be parent_idx, but still a WIP
elif msg.code in ("0002", "2D49"): # non-evohome: hometronics
return {"other_idx": seqx}
elif msg.code == "0016": # WIP, not normally {"uses_zone_idx": True}
# if {"12", "22"} & {msg.src.type, msg.dst.type}:
assert int(seqx, 16) < msg._gwy.config[MAX_ZONES]
idx_name = "zone_idx" if msg.src.type in ("01", "02", "18") else "parent_idx"
return {idx_name: seqx}
elif msg.code in MAY_USE_DOMAIN_ID and seqx in DOMAIN_TYPE_MAP:
# no false +ve/-ves, although FF is not a true domain
return {"domain_id": seqx}
elif msg.code in MAY_USE_ZONE_IDX:
assert (
int(seqx, 16) < msg._gwy.config[MAX_ZONES]
), f"'{seqx}' exceeds max zone index"
if {"01", "02", "23"} & {msg.src.type, msg.dst.type}: # to/from a controller
if msg.src.type == "02" and msg.src == msg.dst:
idx_name = "ufh_idx"
elif msg.src.type in ("01", "02", "23", "18"):
idx_name = "zone_idx"
else:
idx_name = "parent_idx"
return {idx_name: seqx}
# 055 I 028 03:094242 --:------ 03:094242 30C9 003 010B22
elif msg.src.type == "03": # TODO: WIP
return {"parent_idx": seqx} # not zone_idx
elif msg.code in ("????"):
assert seqx == "FF" # only a few "FF"
return {}
assert seqx == "00", f"expecting seqx 00, got: {seqx}"
return {}
def parser_decorator(func):
"""Validate message payload (or meta-data), e.g payload length)."""
def check_verb_code_src(msg) -> None:
# STEP 1: Check verb/code pair against src device type
if msg.src.type not in RAMSES_DEVICES:
raise CorruptPacketError(f"Unknown src device type: {msg.src.id} (0x00)")
elif msg.code not in RAMSES_DEVICES[msg.src.type]:
if RAMSES_DEVICES[msg.src.type]:
raise CorruptPacketError(
f"Invalid code for {msg.src.id}: {msg.code} (0x01)"
)
elif msg.verb not in RAMSES_DEVICES[msg.src.type][msg.code]:
if RAMSES_DEVICES[msg.src.type][msg.code]:
raise CorruptPacketError(
f"Invalid verb/code for {msg.src.id}: {msg.verb}/{msg.code} (0x02)"
)
def check_verb_code_dst(msg) -> None:
# STEP 2: Check (expected) verb/code pair against dst device type
if msg.dst.type in ("--", "63"):
pass
elif msg.dst.type not in RAMSES_DEVICES:
raise CorruptPacketError(f"Unknown dst device type: {msg.dst.id} (0x10)")
elif msg.verb == " I":
pass
elif msg.code not in RAMSES_DEVICES[msg.dst.type]:
if RAMSES_DEVICES[msg.dst.type]:
raise CorruptPacketError(
f"Invalid code for {msg.dst.id}: {msg.code} (0x11)"
)
elif msg.verb == " W" and msg.code in ("0001",):
pass
elif msg.verb == "RQ" and msg.code in ("3EF0",) and msg.dst.type == "13":
# RQ --- 01:145038 13:237335 --:------ 3EF0 001 00 # 13: doesn't RP/3EF0
pass
# else: # TODO: this is a bit problematic
# verb = {"RQ": "RP", "RP": "RQ", " W": " I"}[msg.verb]
# if verb not in RAMSES_DEVICES[msg.dst.type][msg.code]:
# if RAMSES_DEVICES[msg.dst.type][msg.code]:
# raise CorruptPacketError(
# f"Invalid verb/code for {msg.dst.id}: {verb}/{msg.code} (0x12)
# )
def check_verb_code_payload(msg, payload) -> None:
# STEP 2: Check payload against verb/code pair
try:
regexp = RAMSES_CODES[msg.code][msg.verb]
if not re.compile(regexp).match(payload):
raise CorruptPayloadError(f"Expecting payload to match '{regexp}'")
except KeyError:
pass
def wrapper(*args, **kwargs) -> Optional[dict]:
"""Check the length of a payload."""
payload, msg = args[0], args[1]
# STEP 0: Check verb/code pair against src/dst device type & payload
if msg.code != "1FC9":
check_verb_code_src(msg)
check_verb_code_dst(msg)
# STEP 3: These are expections to the following rules
if msg.src.type in ("08", "31"): # Honeywell Jasper HVAC
return func(*args, **kwargs)
check_verb_code_payload(msg, payload) # can't use msg.payload
# STEP 4: Next check W
# z_idx/d_id: 0001, 0008, 1FC9 (array)
# special: 1100 (00|FC)
# zone_idx: 0004, 000A, 2309/2349,
# none_idx: 1F09 (xx), 2E04 (xx), 313F (00)
# unknown: 01D0, 01E9
if msg.verb == " W": # TODO: WIP, need to check _idx()
if msg.code in ("0001",):
return {**_idx(payload[:2], msg), **func(*args, **kwargs)}
# 045 W --- 12:010740 01:145038 --:------ 2309 003 0401F4
if msg.code in ("2309", "2349") and msg.src.type in ("12", "22", "34"):
assert int(payload[:2], 16) < msg._gwy.config[MAX_ZONES]
return func(*args, **kwargs)
# TODO: these are WIP
if msg.code == "1F09":
assert payload[:2] == "F8"
return func(*args, **kwargs)
if msg.code in ("1FC9",):
return func(*args, **kwargs)
# assert payload[:2] in ("00", "FC") # ("1100", "2309", "2349")
return func(*args, **kwargs)
# STEP 5: Then check I, RP
if msg.verb != "RQ": # i.e. in (" I", "RP")
result = func(*args, **kwargs)
if isinstance(result, list):
return result
return {
**_idx(payload[:2], msg),
**result,
}
# STEP 6: Finally check RQ
try:
regexp = RAMSES_CODES[msg.code][RQ]
# assert (
# re.compile(regexp).match(payload)
# ), f"Expecting payload to match '{regexp}'"
except KeyError:
hint1 = " to support an RQ" if msg.code in RAMSES_CODES else ""
hint2 = (
" (OK to ignore)"
if "18" in (msg.src.type, msg.dst.type)
else " - please report to the github repo as an issue"
)
raise CorruptPacketError(f"Code {msg.code} not known{hint1}{hint2}")
else:
if msg.src.type != "18a" and not re.compile(regexp).match(payload):
hint2 = (
" (this is OK to ignore)"
if "18" in (msg.src.type, msg.dst.type)
else " - please report this as an issue"
)
raise CorruptPayloadError(f"Payload doesn't match '{regexp}'{hint2}")
result = _idx(payload[:2], msg)
if RAMSES_CODES[msg.code].get(RQ_MAY_HAVE_PAYLOAD):
result.update(func(*args, **kwargs))
return result
return wrapper
def _bool(value: str) -> Optional[bool]: # either 00 or C8
"""Return a boolean."""
assert value in ("00", "C8", "FF"), value
return {"00": False, "C8": True}.get(value)
def _date(value: str) -> Optional[str]: # YY-MM-DD
"""Return a date string in the format YY-MM-DD."""
assert len(value) == 8, value
if value == "FFFFFFFF":
return
return dt(
year=int(value[4:8], 16),
month=int(value[2:4], 16),
day=int(value[:2], 16) & 0b11111, # 1st 3 bits: DayOfWeek
).strftime("%Y-%m-%d")
def _percent(value: str) -> Optional[float]: # a percentage 0-100% (0.0 to 1.0)
"""Return a percentage, 0-100% with resolution of 0.5%."""
assert len(value) == 2, value
if value in ("FE", "FF"): # TODO: diff b/w FE (seen with 3150) & FF
return
assert int(value, 16) <= 200, "max value should be C8"
return int(value, 16) / 200
def _str(value: str) -> Optional[str]: # printable ASCII characters
"""Return a string of printable ASCII characters."""
_string = bytearray([x for x in bytearray.fromhex(value) if 31 < x < 127])
return _string.decode("ascii").strip() if _string else None
def _temp(value: str) -> Union[float, bool, None]:
"""Return a two's complement Temperature/Setpoint.
Accepts a 4-byte string.
"""
assert len(value) == 4, "{value} should be 2 bytes long"
if value == "31FF": # means: N/A (== 127.99, 2s complement)
return
if value == "7EFF": # possibly only for setpoints?
return False
if value == "7FFF": # also: FFFF?, means: N/A (== 327.67)
return
temp = int(value, 16)
return (temp if temp < 2 ** 15 else temp - 2 ** 16) / 100
def _flag8(byte, *args) -> list:
"""Split a byte (as a str) into a list of 8 bits (1/0)."""
ret = [0] * 8
byte = bytes.fromhex(byte)[0]
for i in range(0, 8):
ret[i] = byte & 1
byte = byte >> 1
return ret
@parser_decorator # rf_unknown
def parser_0001(payload, msg) -> Optional[dict]:
# When in test mode, a 12: will send a W every 6 seconds, *on?* the second:
# 12:39:56.099 061 W --- 12:010740 --:------ 12:010740 0001 005 0000000501
# 12:40:02.098 061 W --- 12:010740 --:------ 12:010740 0001 005 0000000501
# 12:40:08.099 058 W --- 12:010740 --:------ 12:010740 0001 005 0000000501
# sent by a THM every 5s when is signal strength test mode (0505, except 1st pkt)
# 13:48:38.518 080 W --- 12:010740 --:------ 12:010740 0001 005 0000000501
# 13:48:45.518 074 W --- 12:010740 --:------ 12:010740 0001 005 0000000505
# 13:48:50.518 077 W --- 12:010740 --:------ 12:010740 0001 005 0000000505
# sent by a CTL before a rf_check
# 15:12:47.769 053 W --- 01:145038 --:------ 01:145038 0001 005 FC00000505
# 15:12:47.869 053 RQ --- 01:145038 13:237335 --:------ 0016 002 00FF
# 15:12:47.880 053 RP --- 13:237335 01:145038 --:------ 0016 002 0017
# 12:30:18.083 047 W --- 01:145038 --:------ 01:145038 0001 005 0800000505
# 12:30:23.084 049 W --- 01:145038 --:------ 01:145038 0001 005 0800000505
# 15:03:33.187 054 W --- 01:145038 --:------ 01:145038 0001 005 FC00000505
# 15:03:38.188 063 W --- 01:145038 --:------ 01:145038 0001 005 FC00000505
# 15:03:43.188 064 W --- 01:145038 --:------ 01:145038 0001 005 FC00000505
# 15:13:19.757 053 W --- 01:145038 --:------ 01:145038 0001 005 FF00000505
# 15:13:24.758 054 W --- 01:145038 --:------ 01:145038 0001 005 FF00000505
# 15:13:29.758 068 W --- 01:145038 --:------ 01:145038 0001 005 FF00000505
# 15:13:34.759 063 W --- 01:145038 --:------ 01:145038 0001 005 FF00000505
# loopback (not Tx'd) by a HGI80 whenever its button is pressed
# 00:22:41.540 --- I --- --:------ --:------ --:------ 0001 005 00FFFF02FF
# 00:22:41.757 --- I --- --:------ --:------ --:------ 0001 005 00FFFF0200
# 00:22:43.320 --- I --- --:------ --:------ --:------ 0001 005 00FFFF02FF
# 00:22:43.415 --- I --- --:------ --:------ --:------ 0001 005 00FFFF0200
# From a CM927:
# W/--:/--:/12:/00-0000-0501 = Test transmit
# W/--:/--:/12:/00-0000-0505 = Field strength
assert msg.verb in (" I", " W"), msg.verb
assert msg.len == 5, msg.len
assert payload[:2] in ("FC", "FF") or (
int(payload[:2], 16) < msg._gwy.config[MAX_ZONES]
), payload[:2]
assert payload[2:6] in ("0000", "FFFF"), payload[2:6]
assert payload[6:8] in ("02", "05"), payload[6:8]
return {
**_idx(payload[:2], msg), # not fully understood
"unknown_0": payload[2:6],
"unknown_1": payload[6:8],
"unknown_2": payload[8:],
}
@parser_decorator # sensor_weather
def parser_0002(payload, msg) -> Optional[dict]:
# I --- 03:125829 --:------ 03:125829 0002 004 03020105 # seems to be faked
assert msg.len == 4
return {
**_idx(payload[:2], msg),
"temperature": _temp(payload[2:6]),
"_light_level": payload[6:], # light level
}
@parser_decorator # zone_name
def parser_0004(payload, msg) -> Optional[dict]:
# RQ payload is zz00; limited to 12 chars in evohome UI? if "7F"*20: not a zone
assert msg.len == 22, msg.len
assert payload[2:4] == "00", payload[2:4]
if payload[4:] == "7F" * 20:
return {**_idx(payload[:2], msg)}
result = {
**_idx(payload[:2], msg),
"name": _str(payload[4:]),
}
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
cmd = Command.set_zone_name(msg.dst.id, payload[:2], result["name"])
assert cmd.payload == payload, _str(payload)
# TODO: remove me...
return result
@parser_decorator # system_zone (add/del a zone?)
def parser_0005(payload, msg) -> Optional[dict]:
# 047 I --- 34:064023 --:------ 34:064023 0005 012 000A0000 000F0000 00100000
# 045 I --- 01:145038 --:------ 01:145038 0005 004 00000100
# RQ payload is xx00, controller wont respond to a xx
def _parser(seqx) -> dict:
assert len(seqx) in (8, 12) # 8 for evohome, 12 for Hometronics (16 zones)
assert seqx[:2] == payload[:2]
assert seqx[:2] == "00" # done in _idx
# assert payload[2:4] in CODE_0005_ZONE_TYPE, f"Unknown zone_type: {seqx[2:4]}"
max_zones = msg._gwy.config[MAX_ZONES]
return {
"zone_mask": (_flag8(seqx[4:6]) + _flag8(seqx[6:8]))[:max_zones],
"zone_type": CODE_0005_ZONE_TYPE.get(seqx[2:4], seqx[2:4]),
}
if msg.verb == "RQ":
assert payload[:2] == "00", payload[:2]
return {
"zone_type": CODE_0005_ZONE_TYPE.get(payload[2:4], payload[2:4]),
}
assert msg.verb in (" I", "RP")
if msg.src.type == "34":
assert msg.len == 12, msg.len # or % 4?
return [_parser(payload[i : i + 8]) for i in range(0, len(payload), 8)]
assert msg.src.type in ("01", "02") # and "23"?
return _parser(payload)
@parser_decorator # schedule_sync (any changes?)
def parser_0006(payload, msg) -> Optional[dict]:
"""Return the total number of changes to the schedules, including the DHW schedule.
An RQ is sent every ~60s by a RFG100, an increase will prompt it to send a run of
RQ/0404s (it seems to assume only the zones may have changed?).
"""
# 16:10:34.288 053 RQ --- 30:071715 01:145038 --:------ 0006 001 00
# 16:10:34.291 053 RP --- 01:145038 30:071715 --:------ 0006 004 00050008
if msg.verb == "RQ":
assert payload == "00" # implies msg.len == 1 byte
return {}
assert msg.verb == "RP"
assert msg.len == 4 # should bs: 0005-nnnn
assert payload[:2] == "00" # otherwise: payload[2:] == "FFFFFF", invalid
if payload[2:] == "FFFFFF": # RP to an invalid RQ
return {}
assert payload[2:4] == "05"
return {
"change_counter": int(payload[4:], 16),
"_header": payload[:4],
}
@parser_decorator # relay_demand (domain/zone/device)
def parser_0008(payload, msg) -> Optional[dict]:
# https://www.domoticaforum.eu/viewtopic.php?f=7&t=5806&start=105#p73681
# e.g. Electric Heat Zone
if msg.src.type == "31" and msg.len == 13: # Honeywell Japser ?HVAC
return {
"ordinal": f"0x{payload[2:8]}",
"blob": payload[8:],
}
assert msg.len == 2, "expecting length 2"
if payload[:2] not in ("F9", "FA", "FC"):
assert int(payload[:2], 16) < msg._gwy.config[MAX_ZONES], payload[:2]
return {
**_idx(payload[:2], msg),
"relay_demand": _percent(payload[2:4]),
}
@parser_decorator # relay_failsafe
def parser_0009(payload, msg) -> Union[dict, list]:
"""The relay failsafe mode.
The failsafe mode defines the relay behaviour if the RF communication is lost (e.g.
when a room thermostat stops communicating due to discharged batteries):
enabled - if RF communication is lost, relay will be held in OFF position
disabled - if RF communication is lost, relay will cycle at 20% ON, 80% OFF
This setting may need to be enabled to ensure prost protect mode.
"""
# TODO: can only be max one relay per domain/zone
# can get: 003 or 006, e.g.: FC01FF-F901FF or FC00FF-F900FF
# 095 I --- 23:100224 --:------ 23:100224 0009 003 0100FF # 2-zone ST9520C
def _parser(seqx) -> dict:
assert (
seqx[:2] in ("F9", "FC") or int(seqx[:2], 16) < msg._gwy.config[MAX_ZONES]
)
assert seqx[2:4] in ("00", "01"), seqx[2:4]
assert seqx[4:] in ("00", "FF"), seqx[4:]
return {
**_idx(seqx[:2], msg),
"failsafe_enabled": {"00": False, "01": True}.get(seqx[2:4]),
}
if msg.is_array:
assert msg.len >= 3 and msg.len % 3 == 0, msg.len # assuming not RQ
return [_parser(payload[i : i + 6]) for i in range(0, len(payload), 6)]
assert msg.len == 3, msg.len
return _parser(payload)
@parser_decorator # zone_config (zone/s)
def parser_000a(payload, msg) -> Union[dict, list, None]:
# 11:21:10.674 063 RQ --- 34:044203 01:158182 --:------ 000A 001 08
# 11:21:10.736 045 RP --- 01:158182 34:044203 --:------ 000A 006 081001F409C4
# 13:13:08.273 045 RQ --- 22:017139 01:140959 --:------ 000A 006 080001F40DAC
# 13:13:08.288 045 RP --- 01:140959 22:017139 --:------ 000A 006 081001F40DAC
def _parser(seqx) -> dict:
# if seqx[2:] == "007FFF7FFF": # (e.g. RP) a null zone
bitmap = int(seqx[2:4], 16)
return {
**_idx(seqx[:2], msg),
"min_temp": _temp(seqx[4:8]),
"max_temp": _temp(seqx[8:]),
"local_override": not bool(bitmap & 1),
"openwindow_function": not bool(bitmap & 2),
"multiroom_mode": not bool(bitmap & 16),
"_unknown_bitmap": f"0b{bitmap:08b}",
} # cannot determine zone_type from this information
if msg.verb == "RQ" and msg.len <= 2:
return _idx(payload[:2], msg)
if msg.is_array: # TODO: these msgs can require 2 pkts!
assert msg.len >= 6 and msg.len % 6 == 0, "expecting length mod 6"
return [_parser(payload[i : i + 12]) for i in range(0, len(payload), 12)]
assert msg.len == 6, "expecting length 6"
result = _parser(payload)
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
KEYS = (
"min_temp",
"max_temp",
"local_override",
"openwindow_function",
"multiroom_mode",
)
cmd = Command.set_zone_config(
msg.dst.id, payload[:2], **{k: v for k, v in result.items() if k in KEYS}
)
assert cmd.payload == payload, cmd.payload
# TODO: remove me...
return result
@parser_decorator # zone_devices
def parser_000c(payload, msg) -> Optional[dict]:
# 045 I --- 34:092243 --:------ 34:092243 000C 018 000A7FFFFFFF 000F7FFFFFFF 00107FFFFFFF # noqa: E501
# 045 RP --- 01:145038 18:013393 --:------ 000C 006 00000010DAFD
# 045 RP --- 01:145038 18:013393 --:------ 000C 012 01000010DAF5 01000010DAFB
# RQ payload is zz00, NOTE: aggregation of parsing taken here
def _parser(seqx) -> dict:
assert len(seqx) == 12, len(seqx)
assert seqx[:2] == payload[:2], seqx[:2]
# assert seqx[2:4] in CODE_000C_DEVICE_TYPE, f"Unknown device_type: {seqx[2:4]}"
assert seqx[4:6] == "7F" or int(seqx[4:6], 16) < msg._gwy.config[MAX_ZONES]
return {dev_hex_to_id(seqx[6:12]): seqx[4:6]}
if msg.verb == "RQ":
assert msg.len == 2, msg.len
else:
assert msg.len >= 6 and msg.len % 6 == 0, msg.len # assuming not RQ
device_class = CODE_000C_DEVICE_TYPE.get(payload[2:4], f"unkown_{payload[2:4]}")
if device_class == ATTR_DHW_VALVE and msg.raw_payload[:2] == "01":
device_class = ATTR_DHW_VALVE_HTG
if msg.verb == "RQ":
return {**_idx(payload[:2], msg), "device_class": device_class}
devices = [_parser(payload[i : i + 12]) for i in range(0, len(payload), 12)]
return {
# **_idx(payload[:2], msg),
"device_class": device_class,
"devices": [k for d in devices for k, v in d.items() if v != "7F"],
} # TODO: the assumption that all domain_id/zones_idx are the same is wrong
@parser_decorator # unknown, from STA
def parser_000e(payload, msg) -> Optional[dict]:
assert payload in ("000000", "000014") # rarely, from STA:xxxxxx
return {"unknown_0": payload}
@parser_decorator # rf_check
def parser_0016(payload, msg) -> Optional[dict]:
# TODO: does 0016 include parent_idx
# 09:05:33.178 046 RQ --- 22:060293 01:078710 --:------ 0016 002 0200
# 09:05:33.194 064 RP --- 01:078710 22:060293 --:------ 0016 002 021E
# 12:47:25.080 048 RQ --- 12:010740 01:145038 --:------ 0016 002 0800
# 12:47:25.094 045 RP --- 01:145038 12:010740 --:------ 0016 002 081E
assert msg.verb in ("RQ", "RP"), msg.verb
assert msg.len == 2, msg.len # for both RQ/RP, but RQ/00 will work
# assert payload[:2] == "00" # e.g. RQ/22:/0z00 (parent_zone), but RQ/07:/0000?
rf_value = int(payload[2:4], 16)
return {
"rf_strength": min(int(rf_value / 5) + 1, 5),
"rf_value": rf_value,
}
@parser_decorator # language (of device/system) # NOTE: refactored
def parser_0100(payload, msg) -> Optional[dict]:
if msg.len == 1:
assert msg.verb == "RQ"
return {}
return {
"language": _str(payload[2:6]),
"_unknown_0": payload[6:],
}
@parser_decorator # unknown, from a HR91 (when its buttons are pushed)
def parser_01d0(payload, msg) -> Optional[dict]:
# 23:57:28.869 045 W --- 04:000722 01:158182 --:------ 01D0 002 0003
# 23:57:28.931 045 I --- 01:158182 04:000722 --:------ 01D0 002 0003
# 23:57:31.581 048 W --- 04:000722 01:158182 --:------ 01E9 002 0003
# 23:57:31.643 045 I --- 01:158182 04:000722 --:------ 01E9 002 0000
# 23:57:31.749 050 W --- 04:000722 01:158182 --:------ 01D0 002 0000
# 23:57:31.811 045 I --- 01:158182 04:000722 --:------ 01D0 002 0000
assert msg.len == 2, msg.len
assert payload[2:] in ("00", "03"), payload[2:]
return {"unknown_0": payload[2:]}
@parser_decorator # unknown, from a HR91 (when its buttons are pushed)
def parser_01e9(payload, msg) -> Optional[dict]:
# 23:57:31.581348 048 W --- 04:000722 01:158182 --:------ 01E9 002 0003
# 23:57:31.643188 045 I --- 01:158182 04:000722 --:------ 01E9 002 0000
assert msg.len == 2, msg.len
assert payload[2:] in ("00", "03"), payload[2:]
return {"unknown_0": payload[2:]}
@parser_decorator # zone_schedule (fragment)
def parser_0404(payload, msg) -> Optional[dict]:
# Retreival of Zone schedule
# 18:02:53.700 057 RQ --- 30:185469 01:037519 --:------ 0404 007 00200008000100
# 18:02:53.764 052 RP --- 01:037519 30:185469 --:------ 0404 048 002000082901036...
# 18:02:55.606 054 RQ --- 30:185469 01:037519 --:------ 0404 007 00200008000203
# 18:02:55.652 053 RP --- 01:037519 30:185469 --:------ 0404 048 002000082902034D...
# 18:02:57.300 054 RQ --- 30:185469 01:037519 --:------ 0404 007 00200008000303
# 18:02:57.338 052 RP --- 01:037519 30:185469 --:------ 0404 038 002000081F0303C1...
# Retreival of DHW schedule
# 18:04:26.097 055 RQ --- 30:185469 01:037519 --:------ 0404 007 00230008000100
# 18:04:26.170 049 RP --- 01:037519 30:185469 --:------ 0404 048 0023000829010368...
# 18:04:30.097 054 RQ --- 30:185469 01:037519 --:------ 0404 007 00230008000203
# 18:04:30.144 047 RP --- 01:037519 30:185469 --:------ 0404 048 00230008290203ED...
# 18:04:34.997 056 RQ --- 30:185469 01:037519 --:------ 0404 007 00230008000303
# 18:04:35.019 047 RP --- 01:037519 30:185469 --:------ 0404 014 002300080703031F...
def _header(seqx) -> dict:
assert seqx[2:4] in ("20", "23"), seqx[2:4] # Zones, DHW
assert seqx[4:8] == "0008", seqx[4:8]
return {
# **_idx(payload[:2], msg), # added by wrapper
"frag_index": int(seqx[10:12], 16),
"frag_total": int(seqx[12:], 16),
"frag_length": int(seqx[8:10], 16),
}
if msg.verb == "RQ":
assert msg.len == 7, msg.len
return _header(payload[:14])
assert msg.verb in ("RP", " I", " W"), msg.verb
return {
**_header(payload[:14]),
"fragment": payload[14:],
}
@parser_decorator # system_fault
def parser_0418(payload, msg) -> Optional[dict]:
"""In testing: 10 * 6 log entries in the UI, but 63 via RQs."""
# 045 RP --- 01:145038 18:013393 --:------ 0418 022 000000B00401010000008694A3CC7FFFFF70000ECC8A # noqa
# 045 RP --- 01:145038 18:013393 --:------ 0418 022 00C001B004010100000086949BCB7FFFFF70000ECC8A # noqa
# 045 RP --- 01:145038 18:013393 --:------ 0418 022 000000B0000000000000000000007FFFFF7000000000 # noqa
# 000 RP --- 01:037519 18:140805 --:------ 0418 022 004024B0060006000000CB94A112FFFFFF70007AD47D # noqa
if payload[2:] == CODE_SCHEMA["0418"]["null_rp"][2:]:
# a null log entry, or: is payload[38:] == "000000" sufficient?
return {}
#
assert msg.verb in (" I", "RP"), msg.verb
assert msg.len == 22, msg.len
assert payload[:2] == "00", payload[:2] # likely always 00
assert payload[2:4] in CODE_0418_FAULT_STATE, payload[2:4] # C0 don't appear in UI?
assert int(payload[4:6], 16) <= 63, payload[4:6] # TODO: upper limit is: 60? 63?
assert payload[8:10] in CODE_0418_FAULT_TYPE, payload[8:10]
assert int(payload[10:12], 16) < msg._gwy.config[MAX_ZONES] or (
payload[10:12] in ("F9", "FA", "FC") # "1C"?
), payload[10:12]
assert payload[12:14] in CODE_0418_DEVICE_CLASS, payload[12:14]
assert payload[28:30] in ("7F", "FF"), payload[28:30]
result = {
"log_idx": payload[4:6],
"timestamp": dts_from_hex(payload[18:30]),
"fault_state": CODE_0418_FAULT_STATE.get(payload[2:4], payload[2:4]),
"fault_type": CODE_0418_FAULT_TYPE.get(payload[8:10], payload[8:10]),
"device_class": CODE_0418_DEVICE_CLASS.get(payload[12:14], payload[12:14]),
} # TODO: stop using __idx()?
if payload[10:12] == "FC" and result["device_class"] == "actuator":
result["device_class"] = ATTR_HTG_CONTROL # aka Boiler relay
if payload[12:14] != "00": # Controller
key_name = (
"zone_id"
if int(payload[10:12], 16) < msg._gwy.config[MAX_ZONES]
else "domain_id"
) # TODO: don't use zone_idx (for now)
result.update({key_name: payload[10:12]})
if payload[38:] == "000002": # "00:000002 for Unknown?
result.update({"device_id": None})
elif payload[38:] not in ("000000", "000001"): # "00:000001 for Controller?
result.update({"device_id": dev_hex_to_id(payload[38:])})
assert payload[6:8] == "B0", payload[6:8] # unknown_1, ?priority
assert payload[14:18] == "0000", payload[14:18] # unknown_2
assert payload[30:38] == "FFFF7000", payload[30:38] # unknown_3
result.update(
{
"_unknown_1": payload[6:8],
"_unknown_2": payload[14:18],
"_unknown_3": payload[30:38],
}
)
# return {
# "log_idx": result["log_idx"],
# "log_entry": [v for k, v in result.items() if k != "log_idx"],
# }
return result
@parser_decorator # unknown, from STA
def parser_042f(payload, msg) -> Optional[dict]:
# 055 I --- 34:064023 --:------ 34:064023 042F 008 00000000230023F5
# 063 I --- 34:064023 --:------ 34:064023 042F 008 00000000240024F5
# 049 I --- 34:064023 --:------ 34:064023 042F 008 00000000250025F5
# 045 I --- 34:064023 --:------ 34:064023 042F 008 00000000260026F5
# 045 I --- 34:092243 --:------ 34:092243 042F 008 0000010021002201
# 000 I 34:011469 --:------ 34:011469 042F 008 00000100030004BC
assert msg.len in (8, 9), msg.len # non-evohome are 9
assert payload[:2] == "00", payload[:2]
return {
"counter_1": int(payload[2:6], 16),
"counter_2": int(payload[6:10], 16),
"counter_total": int(payload[10:14], 16),
"unknown_0": payload[14:],
}
@parser_decorator # unknown, from THM
def parser_0b04(payload, msg) -> Optional[dict]:
# 12:04:57.244 063 I --- --:------ --:------ 12:207082 0B04 002 00C8
# 12:04:58.235 063 I --- --:------ --:------ 12:207082 0B04 002 00C8
# 12:04:58.252 064 I --- --:------ --:------ 12:207082 0B04 002 00C8
# above every 24h
assert msg.len == 2, msg.len
assert payload[:2] == "00", payload[:2]
assert payload[2:] == "C8", payload[2:]
return {"_unknown_0": payload[2:]}
@parser_decorator # mixvalve_config (zone)
def parser_1030(payload, msg) -> Optional[dict]:
def _parser(seqx) -> dict:
assert seqx[2:4] == "01", seqx[2:4]
param_name = {
"C8": "max_flow_setpoint", # 55 (0-99) C
"C9": "min_flow_setpoint", # 15 (0-50) C
"CA": "valve_run_time", # 150 (0-240) sec, aka actuator_run_time
"CB": "pump_run_time", # 15 (0-99) sec
"CC": "_unknown_0", # ?boolean?
}[seqx[:2]]
return {param_name: int(seqx[4:], 16)}
assert msg.len == 1 + 5 * 3, msg.len
assert payload[30:] in ("00", "01"), payload[30:]
params = [_parser(payload[i : i + 6]) for i in range(2, len(payload), 6)]
result = {
**_idx(payload[:2], msg),
**{k: v for x in params for k, v in x.items()},
}
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
KEYS = (
"max_flow_setpoint",
"min_flow_setpoint",
"valve_run_time",
"pump_run_time",
)
cmd = Command.set_mix_valve_params(
msg.dst.id, payload[:2], **{k: v for k, v in result.items() if k in KEYS}
)
assert cmd.payload == payload, cmd.payload
# TODO: remove me...
return result
@parser_decorator # device_battery (battery_state)
def parser_1060(payload, msg) -> Optional[dict]:
"""Return the battery state.
Some devices (04:) will also report battery level.
"""
# 06:48:23.948 049 I --- 12:010740 --:------ 12:010740 1060 003 00FF01
# 16:18:43.515 051 I --- 12:010740 --:------ 12:010740 1060 003 00FF00
# 16:14:44.180 054 I --- 04:056057 --:------ 04:056057 1060 003 002800
# 17:34:35.460 087 I --- 04:189076 --:------ 01:145038 1060 003 026401
assert msg.len == 3, msg.len
assert payload[4:6] in ("00", "01")
return {
"battery_low": payload[4:] == "00",
"battery_level": _percent(payload[2:4]),
}
@parser_decorator # unknown (non-Evohome, e.g. ST9520C)
def parser_1090(payload, msg) -> dict:
# 14:08:05.176 095 RP --- 23:100224 22:219457 --:------ 1090 005 007FFF01F4
# 18:08:05.809 095 RP --- 23:100224 22:219457 --:------ 1090 005 007FFF01F4
# this is an educated guess
assert msg.len == 5, msg.len
assert int(payload[:2], 16) < 2, payload[:2]
return {
**_idx(payload[:2], msg),
"temp_0": _temp(payload[2:6]),
"temp_1": _temp(payload[6:10]),
}
@parser_decorator # dhw_params
def parser_10a0(payload, msg) -> Optional[dict]:
# RQ --- 01:136410 10:067219 --:------ 10A0 002 0000
# RQ --- 07:017494 01:078710 --:------ 10A0 006 00-1566-00-03E4
# RQ --- 07:045960 01:145038 --:------ 10A0 006 00-31FF-00-31FF (null)
# RQ --- 07:045960 01:145038 --:------ 10A0 006 00-1770-00-03E8
# RQ --- 07:045960 01:145038 --:------ 10A0 006 00-1374-00-03E4
# RQ --- 07:030741 01:102458 --:------ 10A0 006 00-181F-00-03E4
# RQ --- 07:036831 23:100224 --:------ 10A0 006 01-1566-00-03E4 (non-evohome)
# RQ --- 30:185469 01:037519 --:------ 0005 002 000E
# RP --- 01:037519 30:185469 --:------ 0005 004 000E0300 # two DHW valves
# RQ --- 30:185469 01:037519 --:------ 10A0 001 01 (01 )
if msg.verb == "RQ" and msg.len == 1:
# 045 RQ --- 07:045960 01:145038 --:------ 10A0 006 0013740003E4
# 037 RQ --- 18:013393 01:145038 --:------ 10A0 001 00
# 054 RP --- 01:145038 18:013393 --:------ 10A0 006 0013880003E8
return _idx(payload[:2], msg)
assert msg.len in (1, 3, 6), msg.len # OTB uses 3, evohome uses 6
assert payload[:2] in ("00", "01"), payload[:2] # can be two DHW valves/system
result = {}
if msg.len >= 2:
setpoint = _temp(payload[2:6]) # 255 for OTB? iff no DHW?
result = {"setpoint": None if setpoint == 255 else setpoint} # 30.0-85.0 C
if msg.len >= 4:
result["overrun"] = int(payload[6:8], 16) # 0-10 minutes
if msg.len >= 6:
result["differential"] = _temp(payload[8:12]) # 1.0-10.0 C
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
KEYS = ("setpoint", "overrun", "differential")
cmd = Command.set_dhw_params(
msg.dst.id, **{k: v for k, v in result.items() if k in KEYS}
)
assert cmd.payload == payload, cmd.payload
# TODO: remove me...
return result
@parser_decorator # device_info
def parser_10e0(payload, msg) -> Optional[dict]:
assert msg.len in (19, 28, 30, 36, 38), msg.len # a non-evohome seen with 30
date_2 = _date(payload[20:28]) # could be 'FFFFFFFF'
date_1 = _date(payload[28:36]) # could be 'FFFFFFFF'
return { # TODO: add version?
"_unknown": payload[:20],
"date_2": date_2 if date_2 else "0000-00-00",
"date_1": date_1 if date_1 else "0000-00-00",
"description": _str(payload[36:]),
}
@parser_decorator # tpi_params (domain/zone/device)
def parser_1100(payload, msg) -> Optional[dict]:
if msg.src.type == "08": # Honeywell Japser ?HVAC
assert msg.len == 19, msg.len
return {
"ordinal": f"0x{payload[2:8]}",
"blob": payload[8:],
}
if msg.verb == "RQ":
assert msg.len == 2
return {} # No payload
assert msg.len in (5, 8), msg.len
assert payload[:2] in ("00", "FC"), payload[:2]
# 2020-09-23T19:25:04.767331 047 I --- 13:079800 --:------ 13:079800 1100 008 00170498007FFF01 # noqa
assert int(payload[2:4], 16) / 4 in range(1, 13), payload[2:4]
assert int(payload[4:6], 16) / 4 in range(1, 31), payload[4:6]
assert int(payload[6:8], 16) / 4 in range(0, 16), payload[6:8]
assert payload[8:10] in ("00", "FF"), payload[8:10]
# for TPI
# - cycle_rate: 6, (3, 6, 9, 12)
# - min_on_time: 1 (1-5)
# - min_off_time: 1 (1-?)
# for heatpump
# - cycle_rate: 1-9
# - min_on_time: 1, 5, 10,...30
# - min_off_time: 0, 5, 10, 15
def _parser(seqx) -> dict:
return {
**_idx(seqx[:2], msg),
"cycle_rate": int(int(payload[2:4], 16) / 4), # cycles/hour
"min_on_time": int(payload[4:6], 16) / 4, # min
"min_off_time": int(payload[6:8], 16) / 4, # min
"_unknown_0": payload[8:10], # always 00, FF?
}
result = _parser(payload)
if msg.len > 5:
assert payload[14:] == "01", payload[14:]
result.update(
{
"proportional_band_width": _temp(payload[10:14]), # 1.5 (1.5-3.0) C
"_unknown_1": payload[14:], # always 01?
}
)
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
KEYS = ("cycle_rate", "min_on_time", "min_off_time", "proportional_band_width")
cmd = Command.set_tpi_params(
msg.dst.id, payload[:2], **{k: v for k, v in result.items() if k in KEYS}
)
assert cmd.payload == payload, cmd.payload
# TODO: remove me...
return result
@parser_decorator # dhw_temp
def parser_1260(payload, msg) -> Optional[dict]:
if msg.verb == "RQ" and msg.len <= 2:
return _idx(payload[:2], msg)
assert msg.len == 3, msg.len
assert payload[:2] == "00", payload[:2] # all DHW pkts have no domain
return {"temperature": _temp(payload[2:])}
@parser_decorator # outdoor_temp
def parser_1290(payload, msg) -> Optional[dict]:
# evohome responds to an RQ
assert msg.len == 3, msg.len
assert payload[:2] == "00", payload[:2] # no domain
return {"temperature": _temp(payload[2:])}
@parser_decorator # indoor_humidity (Nuaire RH sensor)
def parser_12a0(payload, msg) -> Optional[dict]:
# assert msg.len == 6 if type == ?? else 2, msg.len
assert payload[:2] == "00", payload[:2] # domain?
rh = int(payload[2:4], 16) / 100 if payload[2:4] != "EF" else None
if msg.len == 2:
return {"relative_humidity": rh}
assert msg.len == 6, f"pkt length is {msg.len}, expected 6"
return {
"relative_humidity": rh,
"temperature": _temp(payload[4:8]),
"dewpoint_temp": _temp(payload[8:12]),
}
@parser_decorator # window_state (of a device/zone)
def parser_12b0(payload, msg) -> Optional[dict]:
assert payload[2:] in ("0000", "C800", "FFFF"), payload[2:] # "FFFF" means N/A
# assert msg.len == 3, msg.len # implied
return {
**_idx(payload[:2], msg),
"window_open": _bool(payload[2:4]),
}
@parser_decorator # displayed_temp (on a TR87RF bound to a RFG100)
def parser_12c0(payload, msg) -> Optional[dict]:
assert payload[:2] == "00", f"expecting 00, not {payload[:2]}"
assert payload[4:] == "01", f"expecting 01, not {payload[4:]}"
temp = None if payload[2:4] == "80" else int(payload[2:4], 16) / 2
return {"temperature": temp}
@parser_decorator # system_sync
def parser_1f09(payload, msg) -> Optional[dict]:
# TODO: Try RQ/1F09/"F8-FF" (CTL will RP to a RQ/00)
assert msg.len == 3, "expecting length 3"
# assert payload[:2] in ("00", "F8", "FF") # W uses F8, non-Honeywell use 00
seconds = int(payload[2:6], 16) / 10
next_sync = msg.dtm + td(seconds=seconds)
return {
"remaining_seconds": seconds,
"_next_sync": dt.strftime(next_sync, "%H:%M:%S"),
}
@parser_decorator # dhw_mode
def parser_1f41(payload, msg) -> Optional[dict]:
assert msg.len in (6, 12), msg.len
assert payload[:2] == "00", payload[:2] # all DHW pkts have no domain
# 053 RP --- 01:145038 18:013393 --:------ 1F41 006 00FF00FFFFFF # no stored DHW
assert payload[2:4] in ("00", "01", "FF"), payload[2:4]
assert payload[4:6] in ZONE_MODE_MAP, payload[4:6]
assert payload[6:12] == "FFFFFF", payload[6:12]
if payload[4:6] == "04":
assert msg.len == 12, msg.len
result = {
"active": {"00": False, "01": True, "FF": None}[payload[2:4]],
"mode": ZONE_MODE_MAP.get(payload[4:6]),
}
if payload[4:6] == "04": # temporary_override
result["until"] = _dtm(payload[12:24])
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
KEYS = ("active", "mode", "until")
cmd = Command.set_dhw_mode(
msg.dst.id, **{k: v for k, v in result.items() if k in KEYS}
)
assert cmd.payload == payload, cmd.payload
# TODO: remove me...
return result
@parser_decorator # rf_bind
def parser_1fc9(payload, msg) -> Optional[dict]:
# 17:02:31.964172 064 I --- 07:045960 --:------ 07:045960 1FC9 012 0012601CB388001FC91CB388 # noqa: E501
# 17:02:31.980015 065 W --- 01:145038 07:045960 --:------ 1FC9 006 0010A006368E # noqa: E501
# 17:02:32.004055 064 I --- 07:045960 01:145038 --:------ 1FC9 006 0012601CB388 # noqa: E501
# 17:03:35.012706 053 I --- 01:145038 --:------ 01:145038 1FC9 018 FA000806368EFC3B0006368EFA1FC906368E # noqa: E501
# 17:03:35.658983 045 W --- 13:081807 01:145038 --:------ 1FC9 006 003EF0353F8F # noqa: E501
# 17:03:35.675856 053 I --- 01:145038 13:081807 --:------ 1FC9 006 00FFFF06368E # noqa: E501
# this is an array of codes
# 049 I --- 01:145038 --:------ 01:145038 1FC9 018 07-0008-06368E FC-3B00-06368E 07-1FC9-06368E # noqa: E501
# 047 I --- 01:145038 --:------ 01:145038 1FC9 018 FA-0008-06368E FC-3B00-06368E FA-1FC9-06368E # noqa: E501
# 065 I --- 01:145038 --:------ 01:145038 1FC9 024 FC-0008-06368E FC-3150-06368E FB-3150-06368E FC-1FC9-06368E # noqa: E501
# HW valve binding:
# 063 I --- 01:145038 --:------ 01:145038 1FC9 018 FA-0008-06368E FC-3B00-06368E FA-1FC9-06368E # noqa: E501
# CH valve binding:
# 071 I --- 01:145038 --:------ 01:145038 1FC9 018 F9-0008-06368E FC-3B00-06368E F9-1FC9-06368E # noqa: E501
# ZoneValve zone binding
# 045 W --- 13:106039 01:145038 --:------ 1FC9 012 00-3EF0-359E37 00-3B00-359E37
# DHW binding..
# 045 W --- 13:163733 01:145038 --:------ 1FC9 012 00-3EF0-367F95 00-3B00-367F95
# 049 I --- 01:145038 --:------ 01:145038 1FC9 018 F9-0008-06368E FC-3B00-06368E F9-1FC9-06368E # noqa
# the new (heatpump-aware) BDR91:
# 045 RP --- 13:035462 18:013393 --:------ 1FC9 018 00-3EF0-348A86 00-11F0-348A86 90-7FE1-DD6ABD # noqa
def _parser(seqx) -> dict:
# print(dev_hex_to_id(seqx[6:]))
assert seqx[6:] == payload[6:12] # all with same controller
if seqx[:2] not in ("F9", "FA", "FB", "FC"): # or: not in DOMAIN_TYPE_MAP: ??
assert int(seqx[:2], 16) < msg._gwy.config[MAX_ZONES]
return [seqx[:2], seqx[2:6], dev_hex_to_id(seqx[6:])]
assert msg.len >= 6 and msg.len % 6 == 0, msg.len # assuming not RQ
assert msg.verb in (" I", " W", "RP"), msg.verb # devices will respond to a RQ!
assert msg.src.id == dev_hex_to_id(payload[6:12]), payload[6:12]
return [
_parser(payload[i : i + 12])
for i in range(0, len(payload), 12)
if payload[i : i + 2] != "90" # TODO: WIP
]
@parser_decorator # opentherm_sync, otb_sync
def parser_1fd4(payload, msg) -> Optional[dict]:
assert msg.verb == " I", msg.verb
assert msg.len == 3, msg.len
assert payload[:2] == "00", payload[:2]
return {"ticker": int(payload[2:], 16)}
@parser_decorator # now_next_setpoint (non-Evohome, e.g. Sundial programmer)
def parser_2249(payload, msg) -> Optional[dict]:
# see: https://github.com/jrosser/honeymon/blob/master/decoder.cpp#L357-L370
# 095 I --- 23:100224 --:------ 23:100224 2249 007 00-7EFF-7EFF-FFFF
# 095 I --- 23:100224 --:------ 23:100224 2249 007 00-7EFF-7EFF-FFFF
def _parser(seqx) -> dict:
minutes = int(seqx[10:], 16)
next_setpoint = msg.dtm + td(minutes=minutes)
return {
**_idx(seqx[:2], msg),
"setpoint_now": _temp(seqx[2:6]),
"setpoint_next": _temp(seqx[6:10]),
"minutes_remaining": minutes,
"_next_setpoint": dt.strftime(next_setpoint, "%H:%M:%S"),
}
# the ST9520C can support two heating zones, so: msg.len in (7, 14)?
if msg.is_array: # TODO: can these msgs require >1 pkts? - seems unlikely
assert msg.len >= 7 and msg.len % 7 == 0, msg.len
return [_parser(payload[i : i + 14]) for i in range(0, len(payload), 14)]
assert msg.len == 7, msg.len
return _parser(payload)
@parser_decorator # ufh_setpoint, TODO: max length = 24?
def parser_22c9(payload, msg) -> Optional[dict]:
def _parser(seqx) -> dict:
assert seqx[10:], seqx[10:]
return {
**_idx(seqx[:2], msg),
"temp_low": _temp(seqx[2:6]),
"temp_high": _temp(seqx[6:10]),
"_unknown_0": seqx[10:],
}
assert msg.len >= 6 and msg.len % 6 == 0, msg.len
return [_parser(payload[i : i + 12]) for i in range(0, len(payload), 12)]
@parser_decorator # message_22d0 - system switch?
def parser_22d0(payload, msg) -> Optional[dict]:
assert payload[:2] == "00", payload[:2] # has no domain?
assert payload[2:] == "000002", payload[2:]
return {"unknown": payload[2:]}
@parser_decorator # boiler_setpoint
def parser_22d9(payload, msg) -> Optional[dict]:
assert msg.len == 3, msg.len
assert payload[:2] == "00", payload[:2]
return {"boiler_setpoint": _temp(payload[2:6])}
@parser_decorator # switch_mode
def parser_22f1(payload, msg) -> Optional[dict]:
# 11:42:43.149 081 I 051 --:------ --:------ 49:086353 22F1 003 000304
# 11:42:49.587 071 I 052 --:------ --:------ 49:086353 22F1 003 000404
# 11:42:49.685 072 I 052 --:------ --:------ 49:086353 22F1 003 000404
# 11:42:49.784 072 I 052 --:------ --:------ 49:086353 22F1 003 000404
assert msg.len == 3, msg.len
assert payload[:2] == "00", payload[:2] # has no domain
assert payload[4:] in ("04", "0A"), payload[4:]
bitmap = int(payload[2:4], 16)
_bitmap = {"_bitmap": bitmap}
if bitmap in FanSwitch.FAN_MODES:
_action = {FanSwitch.FAN_MODE: FanSwitch.FAN_MODES[bitmap]}
elif bitmap in (9, 10):
_action = {FanSwitch.HEATER_MODE: FanSwitch.HEATER_MODES[bitmap]}
else:
_action = {}
return {
**_action,
**_bitmap,
"unknown_0": payload[4:],
}
@parser_decorator # switch_boost
def parser_22f3(payload, msg) -> Optional[dict]:
# NOTE: for boost timer for high
assert msg.len == 3, msg.len
assert payload[:2] == "00", payload[:2] # has no domain
assert payload[2:4] == "00", payload[2:4]
assert payload[4:6] in ("0A", "14", "1E"), payload[4:6]
return {FanSwitch.BOOST_TIMER: int(payload[4:6], 16)}
@parser_decorator # setpoint (of device/zones)
def parser_2309(payload, msg) -> Union[dict, list, None]:
# 055 RQ --- 12:010740 13:163733 --:------ 2309 003 0007D0
# 046 RQ --- 12:010740 01:145038 --:------ 2309 003 03073A
def _parser(seqx) -> dict:
return {**_idx(seqx[:2], msg), "setpoint": _temp(seqx[2:])}
if msg.verb == "RQ" and msg.len <= 2:
return _idx(payload[:2], msg)
if msg.is_array:
assert msg.len >= 3 and msg.len % 3 == 0, "expecting length mod 3"
return [_parser(payload[i : i + 6]) for i in range(0, len(payload), 6)]
assert msg.len == 3, "expecting length 3"
result = _parser(payload)
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
cmd = Command.set_zone_setpoint(msg.dst.id, payload[:2], result["setpoint"])
assert cmd.payload == payload, cmd.payload
# TODO: remove me...
return result
@parser_decorator # zone_mode
def parser_2349(payload, msg) -> Optional[dict]:
# RQ --- 34:225071 30:258557 --:------ 2349 001 00
# RP --- 30:258557 34:225071 --:------ 2349 013 007FFF00FFFFFFFFFFFFFFFFFF
# I --- 10:067219 --:------ 10:067219 2349 004 00000001
if msg.verb == "RQ":
assert msg.len in (1, 2, 7), "expecting len 1,2,7"
assert False
assert msg.verb in (" I", "RP", " W"), msg.verb
assert msg.len in (4, 7, 13), msg.len # has a dtm if mode == "04", OTB has 4
assert payload[6:8] in ZONE_MODE_MAP, f"unknown zone_mode: {payload[6:8]}"
result = {
"mode": ZONE_MODE_MAP.get(payload[6:8]),
"setpoint": _temp(payload[2:6]),
}
if msg.len >= 7:
# assert payload[8:14] == "FFFFFF", payload[8:14]
if payload[8:14] == "FF" * 3: # 03/FFFFFF OK if W?
assert payload[6:8] in ("00", "02", "04"), f"{payload[6:8]} (00)"
else:
assert payload[6:8] in ("03",), f"{payload[6:8]} (01)"
result["minutes_remaining"] = int(payload[8:14], 16)
if msg.len >= 13:
if payload[14:] == "FF" * 6:
assert payload[6:8] in ("00", "02"), f"{payload[6:8]} (02)"
result["until"] = None
else:
assert payload[6:8] not in ("00", "02"), f"{payload[6:8]} (03)"
result["until"] = _dtm(payload[14:26])
# TODO: remove me...
if False and TEST_MODE and msg.verb == " W":
KEYS = ("setpoint", "mode", "until")
cmd = Command.set_zone_mode(
msg.dst.id, payload[:2], **{k: v for k, v in result.items() if k in KEYS}
)
assert cmd.payload == payload, f"test payload: {cmd.payload}"
# TODO: remove me...
return {
**_idx(payload[:2], msg),
**result,
}
@parser_decorator # hometronics _state (of unknwon)
def parser_2d49(payload, msg) -> dict:
assert (
payload[:2] in ("88", "FD") or int(payload[:2], 16) < msg._gwy.config[MAX_ZONES]
), payload[:2]
assert payload[2:] in ("0000", "C800"), payload[2:] # would "FFFF" mean N/A?
# assert msg.len == 3, msg.len # implied
return {
**_idx(payload[:2], msg),
"_state": _bool(payload[2:4]),
}
@parser_decorator # system_mode
def parser_2e04(payload, msg) -> Optional[dict]:
# if msg.verb == " W":
# RQ/2E04/FF
# I --— 01:020766 --:------ 01:020766 2E04 016 FFFFFFFFFFFFFF0007FFFFFFFFFFFF04 # Manual # noqa: E501
# I --— 01:020766 --:------ 01:020766 2E04 016 FFFFFFFFFFFFFF0000FFFFFFFFFFFF04 # Automatic/times # noqa: E501
if msg.len == 8: # evohome
assert payload[:2] in SYSTEM_MODE_MAP, payload[:2] # TODO: check AutoWithReset
elif msg.len == 16: # hometronics, lifestyle ID:
assert 0 <= int(payload[:2], 16) <= 15 or payload[:2] == "FF", payload[:2]
assert payload[16:18] in ("00", "07"), payload[16:18]
assert payload[30:32] == "04", payload[30:32]
# assert False
else:
# msg.len in (8, 16) # evohome 8, hometronics 16
assert False, f"Packet length is {msg.len} (expecting 8, 16)"
result = {
"system_mode": SYSTEM_MODE_MAP.get(payload[:2], payload[:2]),
"until": _dtm(payload[2:14]) if payload[14:16] != "00" else None,
} # TODO: double-check the final "00"
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
KEYS = ("system_mode", "until")
cmd = Command.set_system_mode(
msg.dst.id, **{k: v for k, v in result.items() if k in KEYS}
)
assert cmd.payload == payload, cmd.payload
# TODO: remove me...
return result
@parser_decorator # temperature (of device, zone/s)
def parser_30c9(payload, msg) -> Optional[dict]:
def _parser(seqx) -> dict:
return {**_idx(seqx[:2], msg), "temperature": _temp(seqx[2:])}
if msg.is_array:
assert msg.len >= 3 and msg.len % 3 == 0, "length!" # assuming not RQ
return [_parser(payload[i : i + 6]) for i in range(0, len(payload), 6)]
assert msg.len == 3, f"length is {msg.len}, expecting 3"
return _parser(payload)
@parser_decorator # unknown, from STA, VCE
def parser_3120(payload, msg) -> Optional[dict]:
# sent by STAs every ~3:45:00, why?
assert payload[:10] == "0070B00000", payload[:10]
assert payload[12:] == "FF", payload[12:]
return {
"unknown_1": payload[10:12],
"unknown_0": payload[:10],
"unknown_2": payload[12:],
}
@parser_decorator # datetime
def parser_313f(payload, msg) -> Optional[dict]:
# 2020-03-28T03:59:21.315178 045 RP --- 01:158182 04:136513 --:------ 313F 009 00FC3500A41C0307E4 # noqa: E501
# 2020-03-29T04:58:30.486343 045 RP --- 01:158182 04:136485 --:------ 313F 009 00FC8400C51D0307E4 # noqa: E501
# 2020-05-31T11:37:50.351511 056 I --- --:------ --:------ 12:207082 313F 009 0038021ECB1F0507E4 # noqa: E501
# https://www.automatedhome.co.uk/vbulletin/showthread.php?5085-My-HGI80-equivalent-Domoticz-setup-without-HGI80&p=36422&viewfull=1#post36422
# every day at ~4am TRV/RQ->CTL/RP, approx 5-10secs apart (CTL respond at any time)
if msg.verb == "RQ":
assert payload == "00", payload # implies msg.len == 1 byte
return {}
assert msg.len == 9
assert payload[:2] == "00" # evohome is always "00FC"? OTB is always 00xx
if msg.src.type == "01":
assert payload[2:4] in ("F0", "FC"), payload[2:4]
elif msg.src.type in ("12", "22"):
assert payload[2:4] == "38", payload[2:4]
elif msg.src.type == "30":
assert payload[2:4] == "60", payload[2:4]
else:
assert False, payload[2:4]
result = {
"datetime": _dtm(payload[4:18]),
"is_dst": True if bool(int(payload[4:6], 16) & 0x80) else None,
"_unknown_0": payload[2:4],
}
# TODO: remove me...
if TEST_MODE and msg.verb == " W":
cmd = Command.set_system_time(msg.dst.id, result["datetime"])
payload = payload[:4] + "00" + payload[6:] # 00, 01, 02, 03?
assert cmd.payload == payload, cmd.payload
# TODO: remove me...
return result
@parser_decorator # heat_demand (of device, FC domain)
def parser_3150(payload, msg) -> Optional[dict]:
# event-driven, and periodically; FC domain is maximum of all zones
# TODO: all have a valid domain will UFC/CTL respond to an RQ, for FC, for a zone?
# I --- 04:136513 --:------ 01:158182 3150 002 01CA < Often see CA
def _parser(seqx) -> dict:
# assert seqx[:2] == "FC" or (int(seqx[:2], 16) < MAX_ZONES) # <5, 8 for UFC
return {**_idx(seqx[:2], msg), "heat_demand": _percent(seqx[2:])}
if msg.src.type == "02" and msg.is_array: # TODO: hometronics only?
return [_parser(payload[i : i + 4]) for i in range(0, len(payload), 4)]
assert msg.len == 2, msg.len # msg.src.type in ("01","02","10","04")
return _parser(payload) # TODO: check UFC/FC is == CTL/FC
@parser_decorator # ???
def parser_31d9(payload, msg) -> Optional[dict]:
assert payload[2:4] in ("00", "06"), payload[2:4]
assert payload[4:6] == "FF" or int(payload[4:6], 16) <= 200, payload[4:6]
if msg.len == 3: # usu: I -->20: (no seq#)
return {
**_idx(payload[:2], msg),
FanSwitch.FAN_RATE: _percent(payload[4:6]), # NOTE: is 31DA/payload[38:40]
"unknown_0": payload[2:4],
}
assert msg.len == 17, msg.len # usu: I 30:-->30:, (or 20:) with a seq#!
assert payload[6:8] == "00", payload[6:8]
assert payload[8:32] in ("00" * 12, "20" * 12), payload[8:32]
return {
# **_idx(payload[:2], msg),
FanSwitch.FAN_RATE: _percent(payload[4:6]), # NOTE: is 31D9/payload[4:6]
"unknown_0": payload[2:4],
"unknown_2": payload[6:8],
"unknown_3": payload[8:32],
"unknown_4": payload[32:],
}
@parser_decorator # UFC HCE80 (Nuaire humidity)
def parser_31da(payload, msg) -> Optional[dict]:
assert msg.len == 29, msg.len # usu: I CTL-->CTL
assert payload[2:10] == "EF007FFF", payload[2:10]
assert payload[12:30] == "EF7FFF7FFF7FFF7FFF", payload[12:30]
assert payload[34:36] == "EF", payload[34:36]
assert payload[42:44] == "00", payload[42:44]
assert payload[46:48] in ("00", "EF"), payload[46:48]
assert payload[48:] in ("EF7FFF7FFF", "EF7FFFFFFF"), payload[48:]
rh = int(payload[10:12], 16) / 100 if payload[10:12] != "EF" else None # not /200!
return {
# **_idx(payload[:2], msg),
FanSwitch.FAN_RATE: _percent(payload[38:40]), # NOTE: is 31D9/payload[4:6]
"relative_humidity": rh,
FanSwitch.BOOST_TIMER: int(payload[44:46], 16),
"unknown_3": payload[36:38],
"unknown_1": payload[30:32],
"unknown_2": payload[32:34],
}
@parser_decorator # external ventilation
def parser_31e0(payload, msg) -> Optional[dict]:
# seems active when humdity > 0.57-0.59
assert msg.len == 4, msg.len # usu: I VNT->GWY
assert payload[:4] == "0000", payload[:4] # domain?
assert payload[4:] in ("0000", "C800"), payload[4:]
return {
"active": _bool(payload[4:6]),
"_unknown_0": payload[:4],
"_unknown_1": payload[6:],
}
@parser_decorator # opentherm_msg
def parser_3220(payload, msg) -> Optional[dict]:
assert msg.len == 5 and payload[:2] == "00", "Invalid OpenTherm payload"
# these are OpenTherm-specific assertions
if msg.src.type != "18": # TODO: remove this workaround
assert int(payload[2:4], 16) // 0x80 == parity(
int(payload[2:], 16) & 0x7FFFFFFF
), "Invalid OpenTherm check bit"
ot_msg_type = (int(payload[2:4], 16) & 0x70) >> 4
assert int(payload[2:4], 16) & 0x0F == 0
ot_msg_id = int(payload[4:6], 16)
assert (
ot_msg_id in OPENTHERM_MESSAGES["messages"]
), f"Unknown OpenTherm msg id: {ot_msg_id} (0x{ot_msg_id:02X})"
message = OPENTHERM_MESSAGES["messages"].get(ot_msg_id)
msg_name = message.get(FLAGS, message.get(VAR)) # TODO: could still be a dict
result = {
"msg_id": f"0x{payload[4:6]}", # ot_msg_id,
"msg_name": msg_name,
"msg_type": OPENTHERM_MSG_TYPE[ot_msg_type],
}
if not message:
return {**result, "value_raw": payload[6:]}
if msg.verb == "RQ":
assert ot_msg_type < 0b011, f"Invalid OpenTherm msg type: 0b{ot_msg_type:03b}"
assert payload[6:] == "0000", payload[6:]
return {
**result,
"description": message[EN],
}
# TODO: Should be > 0b011, but >= 0b011 seems required?
assert ot_msg_type >= 0b011, f"Invalid OpenTherm msg type: 0b{ot_msg_type:03b}"
if ot_msg_type != 0b111 and isinstance(message.get(VAR), dict):
if isinstance(message[VAL], dict):
result["value_hb"] = ot_msg_value(
payload[6:8], message[VAL].get(HB, message[VAL])
)
result["value_lb"] = ot_msg_value(
payload[8:], message[VAL].get(LB, message[VAL])
)
else:
result["value_hb"] = ot_msg_value(payload[6:8], message[VAL])
result["value_lb"] = ot_msg_value(payload[8:], message[VAL])
elif ot_msg_type != 0b111:
if message[VAL] in (FLAG8, U8, S8):
result["value"] = ot_msg_value(payload[6:8], message[VAL])
else:
result["value"] = ot_msg_value(payload[6:], message[VAL])
return {
**result,
"description": message[EN],
}
@parser_decorator # actuator_sync (aka sync_tpi: TPI cycle sync)
def parser_3b00(payload, msg) -> Optional[dict]:
# system timing master: the device that sends I/FCC8 pkt controls the heater relay
"""Decode a 3B00 packet (actuator_sync).
The heat relay regularly broadcasts a 3B00 at the end(?) of every TPI cycle, the
frequency of which is determined by the (TPI) cycle rate in 1100.
The CTL subsequently broadcasts a 3B00 (i.e. at the start of every TPI cycle).
The OTB does not send these packets, but the CTL sends a regular broadcast anyway
for the benefit of any zone actuators (e.g. zone valve zones).
"""
# 053 I --- 13:209679 --:------ 13:209679 3B00 002 00C8
# 045 I --- 01:158182 --:------ 01:158182 3B00 002 FCC8
# 052 I --- 13:209679 --:------ 13:209679 3B00 002 00C8
# 045 I --- 01:158182 --:------ 01:158182 3B00 002 FCC8
# 063 I --- 01:078710 --:------ 01:078710 3B00 002 FCC8
# 064 I --- 01:078710 --:------ 01:078710 3B00 002 FCC8
assert msg.len == 2, msg.len
assert payload[:2] in {"01": "FC", "13": "00", "23": "FC"}.get(msg.src.type, "00")
assert payload[2:] == "C8", payload[2:] # Could it be a percentage?
return {
**_idx(payload[:2], msg),
"actuator_sync": _bool(payload[2:]),
}
@parser_decorator # actuator_state
def parser_3ef0(payload, msg) -> dict:
# Some of this data thanks to @ReneKlootwijk
if msg.src.type in "08": # Honeywell Japser ?HVAC
assert msg.len == 20, msg.len
return {
"ordinal": f"0x{payload[2:8]}",
"blob": payload[8:],
}
assert payload[:2] == "00", f"byte 1: {payload[:2]}"
if 1 < msg.len <= 3:
assert payload[2:4] in ("00", "C8", "FF"), f"byte 1: {payload[2:4]}"
assert payload[4:6] == "FF", f"byte 2: {payload[4:6]}"
if msg.len > 3: # for all OTB
if payload[2:4] != "FF":
assert int(payload[2:4], 16) <= 100, f"byte 1: {payload[2:4]}"
assert payload[4:6] in ("10", "11"), f"byte 2: {payload[4:6]}"
assert payload[8:12] in ("0000", "00FF"), f"byte 4: {payload[4:6]}" # "FFFF"?
if msg.len > 6: # <= 9: # for some OTB
assert payload[-2:] in ("00", "64"), f"byte x: {payload[-2:]}"
result = {
"actuator_enabled": bool(_percent(payload[2:4])),
"modulation_level": _percent(payload[2:4]), # TODO: rel_modulation_level
"_unknown_2": _flag8(payload[4:6]),
}
if msg.len > 3: # for OTB (there's no reliable) modulation_level <-> flame_state)
# assert payload[6:8] in (
# "00", "01", "02", "04", "08", "0A", "0C", "42",
# ), payload[6:8]
result.update(
{
"_unknown_3": _flag8(payload[6:8]),
"flame_active": bool(int(payload[6:8], 0x10) & 1 << 3),
"dhw_active": bool(int(payload[6:8], 0x10) & 1 << 2),
"ch_enabled": bool(int(payload[6:8], 0x10) & 1 << 1),
"_unknown_4": payload[8:10],
"_unknown_5": payload[10:12],
}
)
if msg.len > 6:
result.update(
{
"_unknown_6": _flag8(payload[12:14]),
"ch_active": bool(int(payload[12:14], 0x10) & 1 << 0),
"ch_setpoint": int(payload[14:16], 0x10),
"max_rel_modulation": int(payload[16:18], 0x10),
}
)
return result
@parser_decorator # actuator_cycle
def parser_3ef1(payload, msg) -> dict:
if msg.src.type == "08": # Honeywell Japser ?HVAC
assert msg.len == 18, f"expecting len 18, got {msg.len}"
return {
"ordinal": f"0x{payload[2:8]}",
"blob": payload[8:],
}
if msg.src.type == "31" and msg.len == 12: # or (12, 20) Honeywell Japser ?HVAC
# assert msg.len == 12, f"expecting len 12, got {msg.len}"
return {
"ordinal": f"0x{payload[2:8]}",
"blob": payload[8:],
}
if msg.verb == "RQ":
assert msg.len == 1, f"expecting len 1, got: {msg.len}"
return {}
assert msg.verb == "RP", msg.verb
assert msg.len == 7, msg.len
assert payload[:2] == "00", payload[:2]
assert _percent(payload[10:12]) <= 1, payload[10:12]
# assert payload[12:] == "FF"
cycle_countdown = None if payload[2:6] == "7FFF" else int(payload[2:6], 16)
return {
**_idx(payload[:2], msg),
"actuator_enabled": bool(_percent(payload[10:12])),
"modulation_level": _percent(payload[10:12]),
"actuator_countdown": int(payload[6:10], 16),
"cycle_countdown": cycle_countdown, # not for OTB, == "7FFF"
"_unknown_0": payload[12:], # for OTB != "FF"
}
# @parser_decorator # faked puzzle pkt shouldn't be decorated
def parser_7fff(payload, msg) -> Optional[dict]:
LOOKUP = {"01": "evohome_rf", "02": "impersonating", "03": "message"}
if payload[:2] == "00":
return {
"datetime": dts_from_hex(payload[2:14]),
"message": _str(payload[16:]),
}
elif payload[:2] in LOOKUP:
return {LOOKUP[payload[:2]]: _str(payload[2:])}
elif payload[:2] == "7F":
return {
"datetime": dts_from_hex(payload[2:14]),
"counter": int(payload[16:20], 16),
"interval": int(payload[22:26], 16) / 100,
}
return {
"header": payload[:2],
"payload": payload[2:],
}
@parser_decorator
def parser_unknown(payload, msg) -> Optional[dict]:
# TODO: it may be useful to generically search payloads for hex_ids, commands, etc.
raise NotImplementedError
| 37.046638 | 145 | 0.570644 | 9,813 | 68,314 | 3.878732 | 0.114134 | 0.044349 | 0.019547 | 0.028322 | 0.416294 | 0.349561 | 0.279859 | 0.221927 | 0.175319 | 0.148967 | 0 | 0.165125 | 0.259244 | 68,314 | 1,843 | 146 | 37.066739 | 0.586989 | 0.32336 | 0 | 0.330695 | 0 | 0.00088 | 0.108832 | 0.001933 | 0 | 0 | 0.001713 | 0.003256 | 0.19613 | 1 | 0.07212 | false | 0.004398 | 0.013193 | 0.003518 | 0.211961 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa582fe1c4d06e71af1c000cbf96f3d850ec2e07 | 4,343 | py | Python | wechat_django/pay/admin/payapp.py | UltraVacuum/wechat-django | 0ea243067f5bc5e69cab6c4585d5e46d9399a583 | [
"MIT"
] | 166 | 2019-02-23T10:19:33.000Z | 2022-02-03T06:50:15.000Z | wechat_django/pay/admin/payapp.py | UltraVacuum/wechat-django | 0ea243067f5bc5e69cab6c4585d5e46d9399a583 | [
"MIT"
] | 19 | 2019-05-07T07:28:32.000Z | 2021-06-02T06:56:03.000Z | wechat_django/pay/admin/payapp.py | UltraVacuum/wechat-django | 0ea243067f5bc5e69cab6c4585d5e46d9399a583 | [
"MIT"
] | 54 | 2019-02-27T07:55:57.000Z | 2021-09-02T06:47:51.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from wechat_django.admin.base import has_wechat_permission
from wechat_django.admin.wechatapp import WeChatAppAdmin
from wechat_django.constants import AppType
from wechat_django.models import WeChatApp
from ..models import WeChatPay
class WeChatPayForm(forms.ModelForm):
clear_certs = forms.BooleanField(
label=_("clear certs"), initial=False, required=False,
help_text=_("Your mch_cert already uploaded"))
_mch_cert = forms.FileField(
label=_("mch_cert"), required=False, help_text=_("商户证书"))
_mch_key = forms.FileField(
label=_("mch_key"), required=False, help_text=_("商户证书私钥"))
class Meta(object):
model = WeChatPay
fields = (
"title", "name", "weight", "mch_id", "api_key", "sub_mch_id",
"mch_app_id", "_mch_cert", "_mch_key", "clear_certs")
widgets = dict(
api_key=forms.PasswordInput(render_value=True),
)
def __init__(self, *args, **kwargs):
super(WeChatPayForm, self).__init__(*args, **kwargs)
# 处理字段
inst = self.instance
if inst.pk:
self._readonly_field("name")
self._readonly_field("mch_id")
if not inst.mch_app_id:
self._remove_field("sub_mch_id")
self._remove_field("mch_app_id")
if inst.pk and inst.mch_cert and inst.mch_key:
self._remove_field("_mch_cert")
self._remove_field("_mch_key")
else:
self._remove_field("clear_certs")
def _remove_field(self, field):
self.fields[field].widget = forms.widgets.HiddenInput()
self.fields[field].disabled = True
def _readonly_field(self, field):
self.fields[field].disabled = True
def clean__mch_cert(self):
file = self.cleaned_data.get("_mch_cert")
if file:
return file.read()
return None
def clean__mch_key(self):
file = self.cleaned_data.get("_mch_key")
if file:
return file.read()
return None
def clean(self):
rv = super(WeChatPayForm, self).clean()
mch_cert = rv.get("_mch_cert")
mch_key = rv.get("_mch_key")
if (mch_cert or mch_key) and not (mch_cert and mch_key):
self.add_error(
"_mch_cert", _("must upload both mch_cert and mch_key"))
return rv
def _post_clean(self):
super(WeChatPayForm, self)._post_clean()
# 处理证书
if self.cleaned_data.get("clear_certs"):
self.instance.mch_cert = None
self.instance.mch_key = None
if self.cleaned_data.get("_mch_cert"):
self.instance.mch_cert = self.cleaned_data.pop("_mch_cert")
if self.cleaned_data.get("_mch_key"):
self.instance.mch_key = self.cleaned_data.pop("_mch_key")
class WeChatPayInline(admin.StackedInline):
form = WeChatPayForm
model = WeChatPay
def get_extra(self, request, obj=None):
return 0 if obj.pay else 1
admin.site.unregister(WeChatApp)
@admin.register(WeChatApp)
class WeChatAppWithPayAdmin(WeChatAppAdmin):
inlines = (WeChatPayInline,)
def get_inline_instances(self, request, obj=None):
rv = super(WeChatAppWithPayAdmin, self).get_inline_instances(request,
obj)
if not obj\
or not obj.type & (AppType.SERVICEAPP
| AppType.MINIPROGRAM
| AppType.PAYPARTNER)\
or not has_wechat_permission(request, obj, "pay", "manage"):
rv = tuple(filter(lambda o: not isinstance(o, WeChatPayInline),
rv))
return rv
def get_deleted_objects(self, objs, request):
from ..models import UnifiedOrder
deleted_objects, model_count, perms_needed, protected =\
super(WeChatAppWithPayAdmin, self).get_deleted_objects(objs, request)
ignored_models = (UnifiedOrder._meta.verbose_name,)
perms_needed = perms_needed.difference(ignored_models)
return deleted_objects, model_count, perms_needed, protected
| 35.024194 | 81 | 0.628598 | 515 | 4,343 | 4.998058 | 0.271845 | 0.048951 | 0.040793 | 0.034965 | 0.186092 | 0.149573 | 0.086247 | 0.029526 | 0.029526 | 0 | 0 | 0.000947 | 0.27032 | 4,343 | 123 | 82 | 35.308943 | 0.811297 | 0.007138 | 0 | 0.122449 | 0 | 0 | 0.07753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0.010204 | 0.102041 | 0.010204 | 0.387755 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa5d9ad056b4f5558d2afcdd7928ea6866daa9d5 | 2,514 | py | Python | resources/lib/default.py | Tenzer/plugin.video.nfl-teams | 66b9b1f113865095125be231e578ac1e491cca1e | [
"MIT"
] | 3 | 2017-09-29T13:21:59.000Z | 2020-06-01T03:49:52.000Z | resources/lib/default.py | Tenzer/plugin.video.nfl-teams | 66b9b1f113865095125be231e578ac1e491cca1e | [
"MIT"
] | 8 | 2015-08-17T19:28:54.000Z | 2018-07-28T16:00:41.000Z | resources/lib/default.py | Tenzer/plugin.video.nfl-teams | 66b9b1f113865095125be231e578ac1e491cca1e | [
"MIT"
] | 4 | 2017-09-15T08:36:47.000Z | 2019-03-02T20:50:56.000Z | from os import path
from resources.lib.menu import Menu
class Default(object):
"""Static object, which simply creates a list of teams."""
_teams = [
{"short": "cardinals", "long": "Arizona Cardinals"},
{"short": "falcons", "long": "Atlanta Falcons"},
{"short": "ravens", "long": "Baltimore Ravens"},
{"short": "bills", "long": "Buffalo Bills"},
{"short": "panthers", "long": "Carolina Panthers"},
{"short": "bears", "long": "Chicago Bears"},
{"short": "bengals", "long": "Cincinnati Bengals"},
{"short": "browns", "long": "Cleveland Browns"},
# {"short": "cowboys", "long": "Dallas Cowboys"}, # The website doesn't have video categories
{"short": "broncos", "long": "Denver Broncos"},
{"short": "lions", "long": "Detroit Lions"},
{"short": "packers", "long": "Green Bay Packers"},
{"short": "texans", "long": "Houston Texans"},
{"short": "colts", "long": "Indianapolis Colts"},
{"short": "jaguars", "long": "Jacksonville Jaguars"},
{"short": "chiefs", "long": "Kansas City Chiefs"},
{"short": "chargers", "long": "Los Angeles Chargers"},
{"short": "rams", "long": "Los Angeles Rams"},
{"short": "dolphins", "long": "Miami Dolphins"},
{"short": "vikings", "long": "Minnesota Vikings"},
# {"short": "patriots", "long": "New England Patriots"}, # The website doesn't have video categories
{"short": "saints", "long": "New Orleans Saints"},
{"short": "giants", "long": "New York Giants"},
{"short": "jets", "long": "New York Jets"},
{"short": "raiders", "long": "Oakland Raiders"},
{"short": "eagles", "long": "Philadelphia Eagles"},
{"short": "steelers", "long": "Pittsburgh Steelers"},
{"short": "fourtyniners", "long": "San Francisco 49ers"},
{"short": "seahawks", "long": "Seattle Seahawks"},
{"short": "buccaneers", "long": "Tampa Bay Buccaneers"},
{"short": "titans", "long": "Tennessee Titans"},
{"short": "redskins", "long": "Washington Redskins"}
]
def __init__(self):
with Menu(["none"]) as menu:
for team in self._teams:
menu.add_item({
"url_params": {"team": team["short"]},
"name": team["long"],
"folder": True,
"thumbnail": path.join("resources", "images", "{0}.png".format(team["short"]))
})
| 47.433962 | 109 | 0.531424 | 243 | 2,514 | 5.465021 | 0.481481 | 0.021084 | 0.02259 | 0.024096 | 0.060241 | 0.060241 | 0.060241 | 0.060241 | 0 | 0 | 0 | 0.001603 | 0.255768 | 2,514 | 52 | 110 | 48.346154 | 0.708177 | 0.097056 | 0 | 0 | 0 | 0 | 0.460858 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0 | 0.045455 | 0 | 0.113636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa5eaab037bf4f2be765797f9f48ad9afeef59c9 | 6,091 | py | Python | utils/recreate_model.py | pkalluri/pcnn-pp | 1eb9c1c1bf9cf3d5396f9686e033faf8bc08a025 | [
"MIT"
] | null | null | null | utils/recreate_model.py | pkalluri/pcnn-pp | 1eb9c1c1bf9cf3d5396f9686e033faf8bc08a025 | [
"MIT"
] | null | null | null | utils/recreate_model.py | pkalluri/pcnn-pp | 1eb9c1c1bf9cf3d5396f9686e033faf8bc08a025 | [
"MIT"
] | null | null | null | """
Recreate a tensorflow model from input args
"""
import numpy as np
import tensorflow as tf
from pixel_cnn_pp import nn
from pixel_cnn_pp.model import model_spec
def recreate_model(args, batch_size_generator):
# fix random seed for reproducibility
rng = np.random.RandomState(args.seed)
tf.set_random_seed(args.seed)
# energy distance or maximum likelihood?
if args.energy_distance:
loss_fun = nn.energy_distance # todo: this is currently broken, because it does not take the same args as the following loss
else:
loss_fun = nn.discretized_mix_logistic_loss
# initialize data loaders for train/test splits
if args.data_set in ['imagenet', 'cifar','cifar_sorted']:
if args.data_set == 'imagenet' and args.class_conditional:
raise("We currently don't have labels for the small imagenet data set")
if args.data_set == 'cifar':
import data.cifar10_data as cifar10_data
DataLoader = cifar10_data.DataLoader
elif args.data_set == 'cifar_sorted':
import data.cifar10_sorted_data as cifar10_sorted_data
DataLoader = cifar10_sorted_data.DataLoader
elif args.data_set == 'imagenet':
import data.imagenet_data as imagenet_data
DataLoader = imagenet_data.DataLoader
train_data = DataLoader(args.data_dir, 'train', args.batch_size * args.nr_gpu, rng=rng, shuffle=True, return_labels=args.class_conditional)
obs_shape = train_data.get_observation_size() # e.g. a tuple (32,32,3)
elif args.data_set.startswith('cifar'):
# one class of cifar
import data.cifar10_class_data as cifar10_class_data
DataLoader = cifar10_class_data.DataLoader
which_class = int(args.data_set.split('cifar')[1])
train_data = DataLoader(args.data_dir, which_class, 'train', args.batch_size * args.nr_gpu, rng=rng, shuffle=True, return_labels=args.class_conditional)
test_data = DataLoader(args.data_dir, which_class, 'test', args.batch_size * args.nr_gpu, shuffle=False, return_labels=args.class_conditional)
obs_shape = train_data.get_observation_size() # e.g. a tuple (32,32,3)
else:
import data.npz_data as from_file_data
DataLoader = from_file_data.DataLoader
train_data = DataLoader(args.data_dir, args.data_set, 'train', args.batch_size * args.nr_gpu, rng=rng, shuffle=True, return_labels=args.class_conditional)
obs_shape = train_data.get_observation_size() # e.g. a tuple (32,32,3)
# data place holders
print("creating data place holders...")
x_init = tf.placeholder(tf.float32, shape=(batch_size_generator,) + obs_shape)
xs = [tf.placeholder(tf.float32, shape=(batch_size_generator, ) + obs_shape) for i in range(args.nr_gpu)]
# if the model is class-conditional we'll set up label placeholders + one-hot encodings 'h' to condition on
if args.class_conditional:
print("creating label placeholders...")
num_labels = train_data.get_num_labels()
y_init = tf.placeholder(tf.int32, shape=(batch_size_generator,))
h_init = tf.one_hot(y_init, num_labels)
y_sample = np.split(np.mod(np.arange(batch_size_generator*args.nr_gpu), num_labels), args.nr_gpu)
h_sample = [tf.one_hot(tf.Variable(y_sample[i], trainable=False), num_labels) for i in range(args.nr_gpu)]
ys = [tf.placeholder(tf.int32, shape=(batch_size_generator,)) for i in range(args.nr_gpu)]
hs = [tf.one_hot(ys[i], num_labels) for i in range(args.nr_gpu)]
else:
h_init = None
h_sample = [None] * args.nr_gpu
hs = h_sample
# create the model
print("creating model...")
model_opt = { 'nr_resnet': args.nr_resnet, 'nr_filters': args.nr_filters, 'nr_logistic_mix': args.nr_logistic_mix, 'resnet_nonlinearity': args.resnet_nonlinearity, 'energy_distance': args.energy_distance }
model = tf.make_template('model', model_spec)
# run once for data dependent initialization of parameters
print("running init_pass...")
init_pass = model(x_init, h_init, init=True, dropout_p=args.dropout_p, **model_opt)
# keep track of moving average
all_params = tf.trainable_variables()
ema = tf.train.ExponentialMovingAverage(decay=args.polyak_decay)
# # get loss gradients over multiple GPUs + sampling
grads = []
loss_gen = []
loss_gen_test = []
print("getting sample generation functions on gpu...")
new_x_gen = []
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
# train
out = model(xs[i], hs[i], ema=None, dropout_p=args.dropout_p, **model_opt)
loss_gen.append(loss_fun(tf.stop_gradient(xs[i]), out, args.accumulator, args.entropy))
# gradients
grads.append(tf.gradients(loss_gen[i], all_params, colocate_gradients_with_ops=True))
# test
out = model(xs[i], hs[i], ema=ema, dropout_p=0., **model_opt)
loss_gen_test.append(loss_fun(xs[i], out, args.accumulator, args.entropy))
# sample
out = model(xs[i], h_sample[i], ema=ema, dropout_p=0, **model_opt)
if args.energy_distance:
new_x_gen.append(out[0])
else:
if args.sampling_variance is not None:
new_x_gen.append(nn.sample_from_narrow_discretized_mix_logistic(out, args.nr_logistic_mix, var=args.sampling_variance))
else:
new_x_gen.append(nn.sample_from_discretized_mix_logistic(out, args.nr_logistic_mix))
# add losses and gradients together and get training updates
with tf.device('/gpu:0'):
for i in range(1,args.nr_gpu):
loss_gen[0] += loss_gen[i]
loss_gen_test[0] += loss_gen_test[i]
for j in range(len(grads[0])):
grads[0][j] += grads[i][j]
# init & save
print("generating initializer and saver...")
initializer = tf.global_variables_initializer()
saver = tf.train.Saver()
return saver, obs_shape, new_x_gen, xs
| 47.585938 | 209 | 0.675915 | 878 | 6,091 | 4.444191 | 0.226651 | 0.027678 | 0.029985 | 0.016914 | 0.330343 | 0.330343 | 0.30856 | 0.223988 | 0.145566 | 0.130702 | 0 | 0.010699 | 0.21737 | 6,091 | 127 | 210 | 47.96063 | 0.807846 | 0.1187 | 0 | 0.111111 | 0 | 0 | 0.077154 | 0 | 0 | 0 | 0 | 0.007874 | 0 | 1 | 0.011111 | false | 0.022222 | 0.1 | 0 | 0.122222 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa5f57bba7fc2204f87b9e210d3887c38b63b701 | 7,734 | py | Python | detectia/engine/box.py | kartik4949/detectia | e62165372b2abac63239d540dfa8c3ee0eafd75c | [
"Apache-2.0"
] | 3 | 2021-02-02T10:59:26.000Z | 2021-11-20T07:56:29.000Z | detectia/engine/box.py | kartik4949/detectia | e62165372b2abac63239d540dfa8c3ee0eafd75c | [
"Apache-2.0"
] | null | null | null | detectia/engine/box.py | kartik4949/detectia | e62165372b2abac63239d540dfa8c3ee0eafd75c | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Kartik Sharma. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Box utility classes. """
import functools
from typing import Dict, Text, Tuple
import tensorflow as tf
from ..config import Config
from .utils import compute_iou_boxes
class BoxEncoder:
""" a BBox encoder class. """
def __init__(self, config: Config):
self.config = config
self.anchors = config.anchors
self.input_image_shape = config.input_image_shape
self.grids = config.grids
self.num_classes = config.num_classes
self.num_anchors = len(self.anchors) // len(self.grids)
self.num_scales = config.num_scales
@staticmethod
def _assign_grid(box, grid):
""" helper utility class. """
return tf.math.floor(box[:, 0] * grid), tf.math.floor(box[:, 1] * grid)
@staticmethod
def _best_anchors(boxes, anchors):
"""_best_anchors.
Args:
boxes : pseudo bounding boxes (N, 4).
anchors : pseudo anchor boxes (A, 4).
Returns:
Intersection over union matrix (N, A).
"""
def _compute_iou_along_boxes(box):
nonlocal anchors
_compute_iou = functools.partial(compute_iou_boxes, b2=box)
return tf.map_fn(_compute_iou, anchors)
return tf.map_fn(_compute_iou_along_boxes, boxes)
@tf.function
def compute_targets(self, boxes: tf.Tensor, class_ids: tf.Tensor) -> Dict:
"""compute_targets.
Computes targets for each scale level.
Args:
boxes : boxes relative to input image. (N, x1, y1, x2, y2).
class_ids : class_ids (N,)
Returns:
list of individual scale targets [(grid, grid, A, O, C)*num_scales].
"""
# assert num of anchors are compatible with num_scales.
anchor_ratio = self.num_anchors % self.num_scales
assert anchor_ratio == 0, "Each feature scale should have same num anchors."
# indices should be integer.
class_ids = tf.cast(class_ids, tf.int32)
# calculate centroid (cx,cy) and width,height of bboxes w.r.t image.
bb_xy = (boxes[:, 0:2] + boxes[:, 2:4]) / 2
bb_wh = boxes[:, 2:4] - boxes[:, 0:2]
# normalize i.e 0-1 range.
normalized_boxes_xy = bb_xy / self.input_image_shape
normalized_boxes_wh = bb_wh / self.input_image_shape
# one hot encoding classes.
one_hot_classes = tf.one_hot(class_ids, self.num_classes)
# final true normalize boxes with objectness and class i.e (x, y, w, h, o, c)
normalized_boxes = tf.concat(
[
normalized_boxes_xy,
normalized_boxes_wh,
tf.ones(shape=(tf.shape(class_ids)[0], 1)),
one_hot_classes,
],
axis=-1,
)
# convert (wh) to (0,0,w,h) points
pseudo_bboxes = tf.concat([tf.zeros(shape=tf.shape(bb_wh)), bb_wh], axis=-1)
pseudo_anchor_boxes = tf.concat(
[tf.zeros(shape=tf.shape(self.anchors)), self.anchors], axis=-1
)
# get the iou matrix for anchors.
_best_anchors = self._best_anchors(pseudo_bboxes, pseudo_anchor_boxes)
# top iou anchor id.
_best_anchors_ids = tf.argmax(_best_anchors, axis=-1)
_best_anchors_ids = tf.cast(_best_anchors_ids, tf.int32)
# targets i.e level 1, 2, 3, etc.
targets = {}
for i in range(self.num_scales):
# grid shape i.e (13, 26, 52, etc)
grid = self.grids[i]
# init zero array target for the level.
target_lvl = tf.zeros(
shape=(grid, grid, self.num_anchors, 5 + self.num_classes),
dtype=tf.float32,
)
# calculate anchors for the current scale_level i.e (1, 2, 3)
lower_bound = tf.cast(
tf.greater(i * self.num_anchors - 1, _best_anchors_ids), dtype=tf.int32
)
upper_bound = tf.cast(
tf.greater(_best_anchors_ids, i * self.num_anchors + self.num_anchors),
dtype=tf.int32,
)
anchors_level_ids = tf.math.logical_not(
tf.cast(lower_bound + upper_bound, tf.bool)
)
# get the best match boxes with anchors in the level.
best_boxes_lvl = tf.boolean_mask(normalized_boxes, anchors_level_ids)
best_boxes_lvl = tf.cast(best_boxes_lvl, tf.float32)
# anchors ids at the level.
best_anchors_ids_lvl = tf.boolean_mask(_best_anchors_ids, anchors_level_ids)
# get grid box step for the matched boxes .
gi, gj = self._assign_grid(best_boxes_lvl, grid)
# update the target level array at matched anchor id with best boxes.
idx = tf.stack(
[
tf.cast(gi, tf.int32),
tf.cast(gj, tf.int32),
tf.cast(best_anchors_ids_lvl, tf.int32),
]
)
idx = tf.transpose(idx)
target_lvl = tf.tensor_scatter_nd_update(
target_lvl, [idx % self.num_anchors], [best_boxes_lvl]
)
targets.update({f"scale_level_{i+1}": target_lvl})
return targets
class BoxDecoder:
""" BoxDecoder utility class. """
def __init__(self, config: Config):
self.config = config
self.input_shape = config.input_image_shape
@tf.function
def decode_model_features(self, features: tf.Tensor, anchors: tf.Tensor) -> Tuple:
"""decode_model_features.
Decodes feature ouputs from model.
Args:
features : model feaure ouput (B, Gi, Gj, A, (5 + C)).
anchors : anchors for the feature.
Returns:
True box_xy, box_wh confidence and class probs.
"""
grid_shape = tf.shape(features)[1:3]
anchors = tf.reshape(tf.constant(anchors), [1, 1, 1, len(anchors), 2])
# create grid tensor with relative cx, cy as values.
grid_x = tf.tile(
tf.reshape(tf.range(0, grid_shape[0]), shape=[grid_shape[0], 1, 1, 1]),
[1, grid_shape[0], 1, 1],
)
grid_y = tf.tile(
tf.reshape(tf.range(0, grid_shape[1]), shape=[1, grid_shape[1], 1, 1]),
[grid_shape[1], 1, 1, 1],
)
grid_cells = tf.cast(tf.concat([grid_x, grid_y], axis=-1), tf.float32)
# Yolov3 https://arxiv.org/abs/1804.02767
# bx = sigmoid(tx) + cx
# bh = e^ph * th
box_xy = tf.nn.sigmoid(features[..., :2]) + grid_cells
box_xy = box_xy / tf.cast(grid_shape[..., ::-1], features.dtype)
box_wh = tf.exp(features[..., 2:4]) * tf.cast(anchors, tf.float32)
# TODO reverse the input_shape.
box_wh = box_wh / tf.cast(self.input_shape, features.dtype)
# confidence and class probs
confidence = tf.nn.sigmoid(features[..., 4])
class_probs = tf.nn.sigmoid(features[..., 5:])
return box_xy, box_wh, confidence, class_probs
| 36.140187 | 88 | 0.58663 | 1,025 | 7,734 | 4.236098 | 0.250732 | 0.020958 | 0.025795 | 0.018425 | 0.127591 | 0.065638 | 0.049286 | 0.036849 | 0.036849 | 0.02211 | 0 | 0.021719 | 0.297517 | 7,734 | 213 | 89 | 36.309859 | 0.777471 | 0.294285 | 0 | 0.074074 | 0 | 0 | 0.012454 | 0 | 0 | 0 | 0 | 0.004695 | 0.009259 | 1 | 0.064815 | false | 0 | 0.046296 | 0 | 0.175926 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa605d19c492468616ed6e121ab6fe0fe6f5305d | 975 | py | Python | homeassistant/components/waze_travel_time/__init__.py | tizzen33/core | 2a1884a1f7a07848b8b63afd29f59c81f1ffaf62 | [
"Apache-2.0"
] | 7 | 2019-08-15T13:36:58.000Z | 2020-03-18T10:46:29.000Z | homeassistant/components/waze_travel_time/__init__.py | tizzen33/core | 2a1884a1f7a07848b8b63afd29f59c81f1ffaf62 | [
"Apache-2.0"
] | 87 | 2020-07-15T13:43:35.000Z | 2022-03-23T07:43:10.000Z | homeassistant/components/waze_travel_time/__init__.py | jawilson/home-assistant | dfe193b2776f5ab915ea89ca2b88fca0b3a07f7b | [
"Apache-2.0"
] | 7 | 2018-10-04T10:12:45.000Z | 2021-12-29T20:55:40.000Z | """The waze_travel_time component."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import (
async_entries_for_config_entry,
async_get,
)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Load the saved entities."""
if entry.unique_id is not None:
hass.config_entries.async_update_entry(entry, unique_id=None)
ent_reg = async_get(hass)
for entity in async_entries_for_config_entry(ent_reg, entry.entry_id):
ent_reg.async_update_entity(entity.entity_id, new_unique_id=entry.entry_id)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
return await hass.config_entries.async_unload_platforms(config_entry, PLATFORMS)
| 34.821429 | 87 | 0.766154 | 130 | 975 | 5.430769 | 0.338462 | 0.077904 | 0.072238 | 0.093484 | 0.073654 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148718 | 975 | 27 | 88 | 36.111111 | 0.850602 | 0.031795 | 0 | 0 | 0 | 0 | 0.006818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa628f7adbca49209a17bfc56df6f5eba24330d0 | 10,234 | py | Python | t2t_bert/utils/tensor2tensor/envs/gym_env_problem.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 34 | 2018-12-19T01:00:57.000Z | 2021-03-26T09:36:37.000Z | t2t_bert/utils/tensor2tensor/envs/gym_env_problem.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 11 | 2018-12-25T03:37:59.000Z | 2021-08-25T14:43:58.000Z | t2t_bert/utils/tensor2tensor/envs/gym_env_problem.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 9 | 2018-12-27T08:00:44.000Z | 2020-06-08T03:05:14.000Z | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for envs that store their history.
EnvProblem subclasses Problem and also implements the Gym interface (step,
reset, render, close, seed)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing.pool
import time
import gym
import numpy as np
from tensor2tensor.envs import env_problem
from tensor2tensor.envs import trajectory
import tensorflow as tf
class GymEnvProblem(env_problem.EnvProblem):
"""An EnvProblem implemented as a batch of gym envs.
This implementation should work well for cases where the env is not batched by
default ex: any gym env. In this case we create `batch_size` number of envs
and store them in a list. Any function then that interacts with the envs, like
reset, step or close goes over the env list to do the needful, ex: when reset
is called with specific indices we reset only those indices, etc.
The usage of this class will look like the following:
# 1. Creates and initializes the env_problem.
ep = env_problem.EnvProblem(...)
# 2. One needs to call reset() at the start, this resets all envs.
ep.reset()
# 3. Call step with actions for all envs, i.e. len(action) = batch_size
obs, rewards, dones, infos = ep.step(actions)
# 4. Figure out which envs got done and reset only those.
ep.reset(indices=env_problem_utils.done_indices(dones))
# 5. Go back to Step #3 to further interact with the env or just dump the
# generated data to disk by calling:
ep.generate_data(...)
# 6. If we now need to use this object again to play a few more iterations
# perhaps with a different batch size or maybe not recording the data, then
# we need to re-initialize environments and do some book-keeping, call:
ep.initialize_environments(batch_size)
# 7. Go back to Step #2, i.e. reset all envs.
NOTE: Look at `EnvProblemTest.test_interaction_with_env` and/or
`EnvProblemTest.test_generate_data`
NOTE: We rely heavily that the underlying environments expose a gym style
interface, i.e. in addition to reset(), step() and close() we have access to
the following properties: observation_space, action_space, reward_range.
"""
def __init__(self, base_env_name=None, env_wrapper_fn=None, reward_range=None,
**kwargs):
"""Initializes this class by creating the envs and managing trajectories.
Args:
base_env_name: (string) passed to `gym.make` to make the underlying
environment.
env_wrapper_fn: (callable(env): env) Applies gym wrappers to the base
environment.
reward_range: (tuple(number, number) or None) the first element is the
minimum reward and the second is the maximum reward, used to clip and
process the raw reward in `process_rewards`. If None, this is inferred
from the inner environments.
**kwargs: (dict) Arguments passed to the base class.
"""
# Name for the base environment, will be used in `gym.make` in
# the default implementation of `initialize_environments`.
self._base_env_name = base_env_name
# An env generates data when it is given actions by an agent which is either
# a policy or a human -- this is supposed to be the `id` of the agent.
#
# In practice, this is used only to store (and possibly retrieve) history
# to an appropriate directory.
self._agent_id = "default"
# We clip rewards to this range before processing them further, as described
# in `process_rewards`.
self._reward_range = reward_range
# Initialize the environment(s).
# This can either be a list of environments of len `batch_size` or this can
# be a Neural Network, in which case it will be fed input with first
# dimension = `batch_size`.
self._envs = None
self._pool = None
self._env_wrapper_fn = env_wrapper_fn
# Call the super's ctor. It will use some of the member fields, so we call
# it in the end.
super(GymEnvProblem, self).__init__(**kwargs)
@property
def base_env_name(self):
return self._base_env_name
def _verify_same_spaces(self):
"""Verifies that all the envs have the same observation and action space."""
# Pre-conditions: self._envs is initialized.
if self._envs is None:
raise ValueError("Environments not initialized.")
if not isinstance(self._envs, list):
tf.logging.warning("Not checking observation and action space "
"compatibility across envs, since there is just one.")
return
# NOTE: We compare string representations of observation_space and
# action_space because compositional classes like space.Tuple don't return
# true on object comparison.
if not all(
str(env.observation_space) == str(self.observation_space)
for env in self._envs):
err_str = ("All environments should have the same observation space, but "
"don't.")
tf.logging.error(err_str)
# Log all observation spaces.
for i, env in enumerate(self._envs):
tf.logging.error("Env[%d] has observation space [%s]", i,
env.observation_space)
raise ValueError(err_str)
if not all(
str(env.action_space) == str(self.action_space) for env in self._envs):
err_str = "All environments should have the same action space, but don't."
tf.logging.error(err_str)
# Log all action spaces.
for i, env in enumerate(self._envs):
tf.logging.error("Env[%d] has action space [%s]", i, env.action_space)
raise ValueError(err_str)
def initialize_environments(self, batch_size=1, parallelism=1, **kwargs):
"""Initializes the environments.
Args:
batch_size: (int) Number of `self.base_env_name` envs to initialize.
parallelism: (int) If this is greater than one then we run the envs in
parallel using multi-threading.
**kwargs: (dict) Kwargs to pass to gym.make.
"""
assert batch_size >= 1
self._envs = [
gym.make(self.base_env_name, **kwargs) for _ in range(batch_size)
]
self._parallelism = parallelism
self._pool = multiprocessing.pool.ThreadPool(self._parallelism)
if self._env_wrapper_fn is not None:
self._envs = list(map(self._env_wrapper_fn, self._envs))
self._verify_same_spaces()
# If self.reward_range is None, i.e. this means that we should take the
# reward range of the env.
if self.reward_range is None:
self._reward_range = self._envs[0].reward_range
# This data structure stores the history of each env.
#
# NOTE: Even if the env is a NN and can step in all batches concurrently, it
# is still valuable to store the trajectories separately.
self._trajectories = trajectory.BatchTrajectory(batch_size=batch_size)
def assert_common_preconditions(self):
# Asserts on the common pre-conditions of:
# - self._envs is initialized.
# - self._envs is a list.
assert self._envs
assert isinstance(self._envs, list)
@property
def observation_space(self):
return self._envs[0].observation_space
@property
def action_space(self):
return self._envs[0].action_space
@property
def reward_range(self):
return self._reward_range
def seed(self, seed=None):
if not self._envs:
tf.logging.info("`seed` called on non-existent envs, doing nothing.")
return None
if not isinstance(self._envs, list):
tf.logging.warning("`seed` called on non-list envs, doing nothing.")
return None
tf.logging.warning(
"Called `seed` on EnvProblem, calling seed on the underlying envs.")
for env in self._envs:
env.seed(seed)
return super(GymEnvProblem, self).seed(seed=seed)
def close(self):
if not self._envs:
tf.logging.info("`close` called on non-existent envs, doing nothing.")
return
if not isinstance(self._envs, list):
tf.logging.warning("`close` called on non-list envs, doing nothing.")
return
# Call close on all the envs one by one.
for env in self._envs:
env.close()
def _reset(self, indices):
"""Resets environments at indices shouldn't pre-process or record.
Args:
indices: list of indices of underlying envs to call reset on.
Returns:
np.ndarray of stacked observations from the reset-ed envs.
"""
# This returns a numpy array with first dimension `len(indices)` and the
# rest being the dimensionality of the observation.
return np.stack([self._envs[index].reset() for index in indices])
def _step(self, actions):
"""Takes a step in all environments, shouldn't pre-process or record.
Args:
actions: (np.ndarray) with first dimension equal to the batch size.
Returns:
a tuple of stacked raw observations, raw rewards, dones and infos.
"""
assert len(actions) == len(self._envs)
observations = [None] * self.batch_size
rewards = [None] * self.batch_size
dones = [None] * self.batch_size
infos = [{} for _ in range(self.batch_size)]
def apply_step(i):
t1 = time.time()
observations[i], rewards[i], dones[i], infos[i] = self._envs[i].step(
actions[i])
t2 = time.time()
infos[i]["__bare_env_run_time__"] = t2 - t1
if self._parallelism > 1:
self._pool.map(apply_step, range(self.batch_size))
else:
for i in range(self.batch_size):
apply_step(i)
# Convert each list (observations, rewards, ...) into np.array and return a
# tuple.
return tuple(map(np.stack, [observations, rewards, dones, infos]))
| 35.411765 | 80 | 0.696697 | 1,506 | 10,234 | 4.613546 | 0.244356 | 0.031088 | 0.012666 | 0.010794 | 0.137737 | 0.119315 | 0.100317 | 0.084197 | 0.061744 | 0.043178 | 0 | 0.004008 | 0.219758 | 10,234 | 288 | 81 | 35.534722 | 0.866124 | 0.520031 | 0 | 0.212389 | 0 | 0 | 0.129025 | 0.004508 | 0 | 0 | 0 | 0 | 0.044248 | 1 | 0.115044 | false | 0 | 0.088496 | 0.035398 | 0.318584 | 0.00885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa62b78dde6f0af318037e70c2093fdfa17a0acc | 287 | py | Python | Lessons/Sorting/Triangle/solution.py | matheuscordeiro/Codility | 617803c2c070592d8d67f747d616f9b6c14585b2 | [
"MIT"
] | null | null | null | Lessons/Sorting/Triangle/solution.py | matheuscordeiro/Codility | 617803c2c070592d8d67f747d616f9b6c14585b2 | [
"MIT"
] | null | null | null | Lessons/Sorting/Triangle/solution.py | matheuscordeiro/Codility | 617803c2c070592d8d67f747d616f9b6c14585b2 | [
"MIT"
] | null | null | null | #!/local/usr/bin/python3
def solution(A):
A = sorted(A)
size_A = len(A)
for i in range(size_A-2):
if (
A[i] + A[i+1] > A[i+2] and
A[i] + A[i+2] > A[i+1] and
A[i+1] + A[i+2] > A[i]
):
return 1
return 0
| 19.133333 | 38 | 0.390244 | 52 | 287 | 2.115385 | 0.384615 | 0.163636 | 0.081818 | 0.072727 | 0.172727 | 0.109091 | 0 | 0 | 0 | 0 | 0 | 0.060976 | 0.428571 | 287 | 14 | 39 | 20.5 | 0.609756 | 0.080139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa648ba044d003f5130a5363467e278c4cc48748 | 2,684 | py | Python | src/matching_driver.py | river-li/DeepBinDiff | a5f6fa1a23743ca462a126d3636e8fc4099ac841 | [
"BSD-3-Clause"
] | null | null | null | src/matching_driver.py | river-li/DeepBinDiff | a5f6fa1a23743ca462a126d3636e8fc4099ac841 | [
"BSD-3-Clause"
] | null | null | null | src/matching_driver.py | river-li/DeepBinDiff | a5f6fa1a23743ca462a126d3636e8fc4099ac841 | [
"BSD-3-Clause"
] | null | null | null | import sys
import os
import numpy as np
# from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import utility
# import time
import preprocessing
if sys.platform != "win32":
embedding_file = "./vec_all"
func_embedding_file = "./func_vec_all"
node2addr_file = "./data/DeepBD/nodeIndexToCode"
func2addr_file = "./data/DeepBD/functionIndexToCode"
bin_edgelist_file = "./data/DeepBD/edgelist"
bin_features_file = "./data/DeepBD/features"
func_features_file = "./data/DeepBD/func.features"
ground_truth_file = "./data/DeepBD/addrMapping"
else:
embedding_file = ".\\vec_all"
func_embedding_file = ".\\func_vec_all"
node2addr_file = ".\\data\\DeepBD\\nodeIndexToCode"
func2addr_file = ".\\data\\DeepBD\\functionIndexToCode"
bin_edgelist_file = ".\\data\\DeepBD\\edgelist"
bin_features_file = ".\\data\\DeepBD\\features"
func_features_file = ".\\data\\DeepBD\\func.features"
ground_truth_file = ".\\data\\DeepBD\\addrMapping"
# whether use deepwalk to create embeddings for each function or not
# Set to false as default, which can get better result for now.
EBD_CALL_GRAPH = False
def pre_matching(bin1_name, bin2_name, toBeMergedBlocks={}):
# if sys.platform != "win32":
tadw_command = "python3 ./src/performTADW.py --method tadw --input " + bin_edgelist_file + " --graph-format edgelist --feature-file " + bin_features_file + " --output vec_all"
os.system(tadw_command)
ebd_dic, _ = utility.ebd_file_to_dic(embedding_file)
node_in_bin1, _node_in_bin2 = utility.readNodeInfo(node2addr_file)
bin1_mat = []
bin2_mat = []
node_map = {}
for idx, line in ebd_dic.items():
if idx < node_in_bin1:
bin1_mat.append(line)
node_map[str(idx)] = len(bin1_mat) - 1
else:
bin2_mat.append(line)
node_map[str(idx)] = len(bin2_mat) - 1
bin1_mat = np.array(bin1_mat)
bin2_mat = np.array(bin2_mat)
sim_result = utility.similarity_gpu(bin1_mat, bin2_mat)
print("Perform matching...")
matched_pairs, inserted, deleted = utility.matching(node_in_bin1, ebd_dic, sim_result, node_map, toBeMergedBlocks)
print("matched pairs: ")
print(matched_pairs)
# print("Inserted blocks: ")
# print(inserted)
# print("Deleted blocks: ")
# print(deleted)
# if __name__ == '__main__' :
# # here is cross-platform configurations.
# # actually I can do this in more elegant way, but it is enough for testing.
# # np.set_printoptions(threshold=np.inf, suppress=True) # set numpy options
# sys.exit(two_level_matching('yes_830_o1', 'yes_830_o3'))
| 31.952381 | 179 | 0.683681 | 343 | 2,684 | 5.06414 | 0.376093 | 0.055268 | 0.096718 | 0.050662 | 0.339666 | 0.339666 | 0.339666 | 0.339666 | 0.306275 | 0.306275 | 0 | 0.017593 | 0.195231 | 2,684 | 83 | 180 | 32.337349 | 0.786574 | 0.227645 | 0 | 0.043478 | 0 | 0 | 0.257546 | 0.16261 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.108696 | 0 | 0.130435 | 0.065217 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa64bca046d0e91feef2ef2745645f4d6bd68d95 | 8,639 | py | Python | statistic_analysis/evaluate_model.py | VishnuDuttSharma/deep-multirobot-task | eccf44ac5b15659da2c958d67e18e196956a779b | [
"MIT"
] | 3 | 2022-02-26T17:23:16.000Z | 2022-03-24T00:03:33.000Z | statistic_analysis/evaluate_model.py | VishnuDuttSharma/deep-multirobot-task | eccf44ac5b15659da2c958d67e18e196956a779b | [
"MIT"
] | null | null | null | statistic_analysis/evaluate_model.py | VishnuDuttSharma/deep-multirobot-task | eccf44ac5b15659da2c958d67e18e196956a779b | [
"MIT"
] | 2 | 2022-02-26T15:20:10.000Z | 2022-03-08T04:21:32.000Z | import sys
sys.path.append('/home/vishnuds/baxterB/multi_robot/deep-multirobot-task/')
import argparse
from utils.config import *
from constants import *
import torch
import random
import numpy as np
from agents import *
import argparse
from graphs.models.coverageplanner import CoveragePlannerNet
import time
import pickle
import pandas as pd
def calculate_reward(grid, robot_pos, action_list, fov=FOV, get_mask=False):
"""
Function to calculate the reward calculated by all the robots based on an action vector.
For this we first update locations of each robot, then create a mask which has 1s only around the new robots locations (square of side (2*FOV+1) for each robot)
Parameters
----------
grid: 2D grid containing rewards
robot_pos: Current position for each robot on the grid (NUM_ROBOTx2 size vector)
action_list: List of action for each robot
Returns
-------
total_reward: Total reward calculated by the robots using action_list (the action vector)
"""
# Convert the integer actions to 2D vector of location differences using DIR_DICT dictionary
act = np.array([DIR_DICT[k] for k in action_list])
# Calcuate new locations for each robot
new_pos = robot_pos + act
# Make sure that the new locatiosn are within the grid
new_pos = new_pos.clip(min=0, max=GRID_SIZE-1)
# Initialize a mask of same shape as grid
mask = np.zeros(grid.shape, dtype=int)
# iterate over each robot position
for c_pos, n_pos in zip(robot_pos, new_pos):
# Set the values to 1 in the mask at each robot's fov
# also make sure that the indices do not go out of grid
# Calculate the bounding box ranges for the box generated by robot moving from the current location (c_pos) to new location (n_pos)
# This box has a padding of size FOV on each size
r_lim_lef = max(0, min(c_pos[0]-fov, n_pos[0]-fov))
c_lim_top = max(0, min(c_pos[1]-fov, n_pos[1]-fov))
r_lim_rgt = min(max(c_pos[0]+fov+1, n_pos[0]+fov+1), GRID_SIZE)
c_lim_bot = min(max(c_pos[1]+fov+1, n_pos[1]+fov+1), GRID_SIZE)
# Set the locations withing mask (i.e. witing robot's vision when it moved) to 1
mask[r_lim_lef:r_lim_rgt, c_lim_top:c_lim_bot] = 1
if get_mask:
return mask
# Find total reward as number of 1s in the masked grid
total_reward = np.sum(grid * mask)
return total_reward
# parse the path of the json config file
arg_parser = argparse.ArgumentParser(description="")
arg_parser.add_argument(
'config',
metavar='config_json_file',
default='None',
help='The Configuration file in json format')
arg_parser.add_argument('--mode', type=str, default='train')
arg_parser.add_argument('--log_time_trained', type=str, default='0')
arg_parser.add_argument('--timeid', type=str, default=None)
arg_parser.add_argument('--tgt_feat', type=int, default=20)
arg_parser.add_argument('--rbt_feat', type=int, default=10)
arg_parser.add_argument('--num_agents', type=int, default=10)
arg_parser.add_argument('--map_w', type=int, default=20)
arg_parser.add_argument('--map_density', type=int, default=1)
arg_parser.add_argument('--map_type', type=str, default='map')
arg_parser.add_argument('--trained_num_agents', type=int, default=10)
arg_parser.add_argument('--trained_map_w', type=int, default=20)
arg_parser.add_argument('--trained_map_density', type=int, default=1)
arg_parser.add_argument('--trained_map_type', type=str, default='map')
arg_parser.add_argument('--nGraphFilterTaps', type=int, default=0)
arg_parser.add_argument('--hiddenFeatures', type=int, default=0)
arg_parser.add_argument('--num_testset', type=int, default=4500)
arg_parser.add_argument('--test_epoch', type=int, default=0)
arg_parser.add_argument('--lastest_epoch', action='store_true', default=False)
arg_parser.add_argument('--best_epoch', action='store_true', default=False)
arg_parser.add_argument('--con_train', action='store_true', default=False)
arg_parser.add_argument('--test_general', action='store_true', default=False)
arg_parser.add_argument('--train_TL', action='store_true', default=False)
arg_parser.add_argument('--Use_infoMode', type=int, default=0)
arg_parser.add_argument('--log_anime', action='store_true', default=False)
arg_parser.add_argument('--rate_maxstep', type=int, default=2)
arg_parser.add_argument('--commR', type=int, default=6)
np.random.seed(1337)
random.seed(1337)
args = arg_parser.parse_args()
config = process_config(args)
# print('CONFIG:')
# print(config)
config['device'] = torch.device('cuda:0')
config.mode = 'test'
config.num_agents = 20
config.tgt_feat = 20
config.rbt_feat = 10
config.max_epoch = 500
config.learning_rate = 0.005
config.nGraphFilterTaps = 5
timeid = args.timeid
model = CoveragePlannerNet(config)
filename = f'{config.save_data}/experiments/dcpOE_map20x20_rho1_{config.num_agents}Agent/K{config.nGraphFilterTaps}_HS0/{timeid}/checkpoints/checkpoint_{config.max_epoch}.pth.tar'
# filename = '/home/vishnuds/baxterB/multi_robot/gnn_log_data/dummy_model/checkpoint_500.pth.tar'
print(f'loading model from: {filename}')
checkpoint = torch.load(filename, map_location='cuda:{}'.format(config.gpu_device))
model.load_state_dict(checkpoint['state_dict'])
model = model.to(config.device)
print(model)
for inf_d in [10, 20, 30, 40 ,50]:
graph_arr = pickle.load(open(f'./robot{config.num_agents}/inf{inf_d}/grid_data.pkl', 'rb'))
robot_pos_arr = pickle.load(open(f'./robot{config.num_agents}/inf{inf_d}/robot_pos_data.pkl', 'rb'))
model_data_list = pickle.load(open(f'./robot{config.num_agents}/inf{inf_d}/model_data.pkl', 'rb'))
cg_time = pickle.load(open(f'./robot{config.num_agents}/inf{inf_d}/time_data.pkl', 'rb'))
cg_action = pickle.load(open(f'./robot{config.num_agents}/inf{inf_d}/action_data.pkl', 'rb'))
print('###')
print(f'Inf: {inf_d}')
print(graph_arr.shape, robot_pos_arr.shape)
pred_time_list = []
pred_action_list = []
numFeature = (config.tgt_feat + config.rbt_feat )
feat = model_data_list[0]
feat_reshaped = feat[:,:,:numFeature,:].reshape(feat.shape[0], feat.shape[1], numFeature*2)
batch_size = 1
for i in range(0,feat.shape[0],batch_size):
with torch.no_grad():
start_index = i
end_index = i + batch_size
start_time = time.time()
inputGPU = torch.FloatTensor(feat_reshaped[start_index:end_index]).to(config.device)
gsoGPU = torch.FloatTensor(model_data_list[1][start_index:end_index]).to(config.device)
# gsoGPU = gsoGPU.unsqueeze(0)
targetGPU = torch.LongTensor(model_data_list[2][start_index:end_index]).to(config.device)
# Should not transpose if flattening the batch
# batch_targetGPU = targetGPU.permute(1, 0, 2)
batch_targetGPU = targetGPU
# agent.optimizer.zero_grad()
# print('Data shapes: ', inputGPU.shape, gsoGPU.shape)
# model
model.addGSO(gsoGPU)
predict = model(inputGPU)
acts = predict.detach().cpu().numpy().argmax(axis=2)
pred_time_list.append(time.time() - start_time)
pred_action_list.append(acts)
# pred_list_long.append(np.array([p.detach().cpu().numpy() for p in predict]).transpose(1,0,2))
pred_action_list = np.concatenate(pred_action_list, axis=0)
rand_time_list = []
rand_action_list = []
for i in range(feat.shape[0]):
start_time = time.time()
acts = np.random.randint(0,5, (1,inf_d))
rand_time_list.append(time.time() - start_time)
rand_action_list.append(acts)
df = pd.DataFrame(index=1+np.arange(feat.shape[0]))
df['CG_time'] = cg_time
df['Rand_time'] = rand_time_list
df['GNN_time'] = pred_time_list
cg_rwd_list = []
rnd_rwd_list = []
pred_rwd_list = []
for i in range(feat.shape[0]):
grid = graph_arr[i]
robot_pos = robot_pos_arr[i]
rwd = calculate_reward(grid, robot_pos, cg_action[i], fov=FOV, get_mask=False)
cg_rwd_list.append(rwd)
rwd = calculate_reward(grid, robot_pos, rand_action_list[i][0], fov=FOV, get_mask=False)
rnd_rwd_list.append(rwd)
rwd = calculate_reward(grid, robot_pos, pred_action_list[i], fov=FOV, get_mask=False)
pred_rwd_list.append(rwd)
df['CG_reward'] = cg_rwd_list
df['Rand_reward'] = rnd_rwd_list
df['GNN_reward'] = pred_rwd_list
df.to_csv(f'./summary_train_{config.num_agents}_inf_{inf_d}.csv', index=False)
| 36.146444 | 179 | 0.697882 | 1,326 | 8,639 | 4.319005 | 0.220965 | 0.045574 | 0.056574 | 0.09429 | 0.301379 | 0.265235 | 0.240964 | 0.22455 | 0.165532 | 0.132705 | 0 | 0.016646 | 0.172474 | 8,639 | 238 | 180 | 36.298319 | 0.784445 | 0.196319 | 0 | 0.043478 | 0 | 0.007246 | 0.167226 | 0.081061 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007246 | false | 0 | 0.094203 | 0 | 0.115942 | 0.036232 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa68d0a15021a2db7f128a9380b0ab10bd7b50b5 | 3,401 | py | Python | lldb/packages/Python/lldbsuite/test/lang/cpp/global_operators/TestCppGlobalOperators.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/lang/cpp/global_operators/TestCppGlobalOperators.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/lang/cpp/global_operators/TestCppGlobalOperators.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | """
Test that global operators are found and evaluated.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCppGlobalOperators(TestBase):
mydir = TestBase.compute_mydir(__file__)
def prepare_executable_and_get_frame(self):
self.build()
# Get main source file
src_file = "main.cpp"
src_file_spec = lldb.SBFileSpec(src_file)
self.assertTrue(src_file_spec.IsValid(), "Main source file")
# Get the path of the executable
exe_path = self.getBuildArtifact("a.out")
# Load the executable
target = self.dbg.CreateTarget(exe_path)
self.assertTrue(target.IsValid(), VALID_TARGET)
# Break on main function
main_breakpoint = target.BreakpointCreateBySourceRegex(
"// break here", src_file_spec)
self.assertTrue(
main_breakpoint.IsValid() and main_breakpoint.GetNumLocations() >= 1,
VALID_BREAKPOINT)
# Launch the process
args = None
env = None
process = target.LaunchSimple(
args, env, self.get_process_working_directory())
self.assertTrue(process.IsValid(), PROCESS_IS_VALID)
# Get the thread of the process
self.assertTrue(
process.GetState() == lldb.eStateStopped,
PROCESS_STOPPED)
thread = lldbutil.get_stopped_thread(
process, lldb.eStopReasonBreakpoint)
return thread.GetSelectedFrame()
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr21765")
def test_equals_operator(self):
frame = self.prepare_executable_and_get_frame()
test_result = frame.EvaluateExpression("operator==(s1, s2)")
self.assertTrue(
test_result.IsValid() and test_result.GetValue() == "false",
"operator==(s1, s2) = false")
test_result = frame.EvaluateExpression("operator==(s1, s3)")
self.assertTrue(
test_result.IsValid() and test_result.GetValue() == "true",
"operator==(s1, s3) = true")
test_result = frame.EvaluateExpression("operator==(s2, s3)")
self.assertTrue(
test_result.IsValid() and test_result.GetValue() == "false",
"operator==(s2, s3) = false")
def do_new_test(self, frame, expr, expected_value_name):
"""Evaluate a new expression, and check its result"""
expected_value = frame.FindValue(
expected_value_name, lldb.eValueTypeVariableGlobal)
self.assertTrue(expected_value.IsValid())
expected_value_addr = expected_value.AddressOf()
self.assertTrue(expected_value_addr.IsValid())
got = frame.EvaluateExpression(expr)
self.assertTrue(got.IsValid())
self.assertEqual(
got.GetValueAsUnsigned(),
expected_value_addr.GetValueAsUnsigned())
got_type = got.GetType()
self.assertTrue(got_type.IsPointerType())
self.assertEqual(got_type.GetPointeeType().GetName(), "Struct")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr21765")
def test_operator_new(self):
frame = self.prepare_executable_and_get_frame()
self.do_new_test(frame, "new Struct()", "global_new_buf")
self.do_new_test(frame, "new(new_tag) Struct()", "tagged_new_buf")
| 35.427083 | 81 | 0.654219 | 366 | 3,401 | 5.860656 | 0.314208 | 0.078322 | 0.023776 | 0.032168 | 0.275524 | 0.25641 | 0.181818 | 0.181818 | 0.14359 | 0.086713 | 0 | 0.008867 | 0.237283 | 3,401 | 95 | 82 | 35.8 | 0.818042 | 0.071744 | 0 | 0.171875 | 0 | 0 | 0.095572 | 0 | 0 | 0 | 0 | 0 | 0.21875 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.171875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa6bf1ff4b0ad8a1103a8a372237b9c811319131 | 3,823 | py | Python | src/eddington/fit_result.py | AssafZohar/eddington | c67536c41a66a1f96d0aa85d5113b11b79759a7e | [
"Apache-2.0"
] | null | null | null | src/eddington/fit_result.py | AssafZohar/eddington | c67536c41a66a1f96d0aa85d5113b11b79759a7e | [
"Apache-2.0"
] | null | null | null | src/eddington/fit_result.py | AssafZohar/eddington | c67536c41a66a1f96d0aa85d5113b11b79759a7e | [
"Apache-2.0"
] | null | null | null | """Fitting result class that will be returned by the fitting algorithm."""
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import scipy.stats as stats
from eddington.print_util import to_precise_string
@dataclass(repr=False) # pylint: disable=too-many-instance-attributes
class FitResult:
"""
Dataclass that contains the relevant parameters returned by a fitting algorithm.
:param a0: The initial guess for the fit function parameters.
:param a: The result for the fitting parameters.
:param aerr: Estimated errors of a.
:param arerr: Estimated relative errors of a (equivilant to aerr/a).
:param acov: Covarance matrix of a.
:param degrees_of_freedom: How many degrees of freedom of the fittings.
:param chi2: Optimization evaluation for the fit.
:param chi2_reduced: Reduced chi2.
:param p_probability: P-probability (p-value) of the fitting, evaluated from
chi2_reduced.
"""
a0: np.ndarray # pylint: disable=invalid-name
a: np.ndarray # pylint: disable=invalid-name
aerr: np.ndarray
arerr: np.ndarray = field(init=False)
acov: np.ndarray
degrees_of_freedom: int
chi2: float
chi2_reduced: float = field(init=False)
p_probability: float = field(init=False)
precision: int = field(default=3)
__pretty_string: Optional[str] = field(default=None, init=False)
def __post_init__(self):
"""Post init methods."""
self.aerr = np.array(self.aerr)
self.acov = np.array(self.acov)
self.a0 = np.array(self.a0)
self.a = np.array(self.a)
self.arerr = np.abs(self.aerr / self.a) * 100
self.chi2_reduced = self.chi2 / self.degrees_of_freedom
self.p_probability = stats.chi2.sf(self.chi2, self.degrees_of_freedom)
def print_or_export(self, file_path=None):
"""
Write the result to a file or print it to console.
:param file_path: str ot None. Path to write the result in. if None, prints
to console.
"""
if file_path is None:
print(self.pretty_string)
return
with open(file_path, mode="w") as output_file:
output_file.write(self.pretty_string)
@property
def pretty_string(self):
"""Pretty representation string."""
if self.__pretty_string is None:
self.__pretty_string = self.__build_pretty_string()
return self.__pretty_string
def __repr__(self):
"""Representation string."""
return self.pretty_string
def __build_pretty_string(self):
old_precision = np.get_printoptions()["precision"]
np.set_printoptions(precision=self.precision)
a_value_string = "\n".join(
[
self.__a_value_string(i, a, aerr, arerr)
for i, (a, aerr, arerr) in enumerate(zip(self.a, self.aerr, self.arerr))
]
)
repr_string = f"""Results:
========
Initial parameters' values:
\t{" ".join(str(i) for i in self.a0)}
Fitted parameters' values:
{a_value_string}
Fitted parameters covariance:
{self.acov}
Chi squared: {to_precise_string(self.chi2, self.precision)}
Degrees of freedom: {self.degrees_of_freedom}
Chi squared reduced: {to_precise_string(self.chi2_reduced, self.precision)}
P-probability: {to_precise_string(self.p_probability, self.precision)}
"""
np.set_printoptions(precision=old_precision)
return repr_string
def __a_value_string(self, i, a, aerr, arerr): # pylint: disable=invalid-name
a_string = to_precise_string(a, self.precision)
aerr_string = to_precise_string(aerr, self.precision)
arerr_string = to_precise_string(arerr, self.precision)
return f"\ta[{i}] = {a_string} \u00B1 {aerr_string} ({arerr_string}% error)"
| 36.409524 | 88 | 0.675647 | 518 | 3,823 | 4.793436 | 0.252896 | 0.048329 | 0.042288 | 0.028997 | 0.131293 | 0.074104 | 0 | 0 | 0 | 0 | 0 | 0.008073 | 0.222338 | 3,823 | 104 | 89 | 36.759615 | 0.827111 | 0.26079 | 0 | 0 | 0 | 0 | 0.185377 | 0.047637 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.073529 | 0 | 0.411765 | 0.088235 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa6d3303ab07a6e2e5b2e47a98fbad8404aad9a5 | 3,807 | py | Python | utils/dataset.py | ixtiyoruz/segmentation_for_road_lines | 0b307e8722bd94b19aa65eee6ae62456186c87ce | [
"MIT"
] | 2 | 2020-05-26T06:58:35.000Z | 2021-02-26T07:17:31.000Z | utils/dataset.py | ixtiyoruz/segmentation_for_road_lines | 0b307e8722bd94b19aa65eee6ae62456186c87ce | [
"MIT"
] | null | null | null | utils/dataset.py | ixtiyoruz/segmentation_for_road_lines | 0b307e8722bd94b19aa65eee6ae62456186c87ce | [
"MIT"
] | null | null | null | """
author: ikhtiyor
date: 27.11.2019
this script is only designed to work with multiple gpus, i think it will work with one gpu as well ))
original :https://github.com/mcdavid109/Multi-GPU-Training/blob/master/TrainingDemo.ipynb
"""
import tensorflow as tf
from threading import Thread
import cv2
import numpy as np
class Dataset():
def __init__(self,x_paths, y_paths,y_exist, batch_size, img_height, img_width, no_of_classes=5):
self.x_paths = x_paths
self.y_paths = y_paths
self.y_exist = y_exist
self.batch_size = batch_size
self.len_data = len(x_paths)
self._make_inputs()
self.idx = -1
self.num_threads = 2
self.num_batch = self.len_data // self.batch_size + 1
self.img_height = img_height
self.img_width = img_width
self.no_of_classes = no_of_classes
self.augmentators = []
# this is only for segmentation
self.layer_idx = np.arange(self.img_height).reshape(self.img_height, 1)
self.component_idx = np.tile(np.arange(self.img_width), (self.img_height, 1))
def make_augmentators(self, augment_fc):
self.augmentators.append(augment_fc)
def _make_inputs(self):
self.inputs = tf.placeholder(shape=[self.img_height,self.img_width,3],dtype=tf.float32,name='data_x')
self.labels = tf.placeholder(shape=[self.img_height, self.img_width, self.no_of_classes],dtype=tf.int32,name='data_y')
self.line_existance_labels = tf.placeholder(tf.float32, shape=[self.no_of_classes-1], name="data_existance_y")
self.queue = tf.FIFOQueue(shapes=[[self.img_height,self.img_width,3],[self.img_height, self.img_width, self.no_of_classes], [self.no_of_classes-1]],
dtypes=[tf.float32, tf.float32, tf.float32],
shared_name="fifoqueue",capacity=self.batch_size*2)
self.enqueue_op = self.queue.enqueue([self.inputs,self.labels, self.line_existance_labels])
self._queue_close = self.queue.close(cancel_pending_enqueues=True)
def next_batch(self):
batch_x , batch_y, batch_existance_y = self.queue.dequeue_many(self.batch_size)
return batch_x, batch_y, batch_existance_y
def close_queue(self, session):
session.run(self._queue_close)
def _pre_batch_queue(self,sess,coord):
while not coord.should_stop():
self.idx += 1
index = self.idx % self.len_data
# read the next img:
img = cv2.imread(self.x_paths[index], -1)
# read existance label as well
train_existance_label= self.y_exist[index]
# read the next label:
trainId_label = cv2.imread(self.y_paths[index], -1)
for augment_fc in self.augmentators:
img, trainId_label = augment_fc((img, trainId_label))
# convert the label to onehot:
onehot_label = np.zeros((self.img_height, self.img_width, self.no_of_classes), dtype=np.float32)
onehot_label[self.layer_idx, self.component_idx, np.int32(trainId_label)] = 1
sess.run(self.enqueue_op,feed_dict = {self.inputs : img,self.labels: onehot_label, self.line_existance_labels:train_existance_label})
def start_queue_threads(self,sess,coord):
queue_threads = [Thread(target=self._pre_batch_queue, args=(sess, coord))
for i in range(self.num_threads)]
for queue_thread in queue_threads:
coord.register_thread(queue_thread)
queue_thread.daemon = True
queue_thread.start()
return queue_threads
| 40.5 | 156 | 0.638035 | 518 | 3,807 | 4.42471 | 0.264479 | 0.048866 | 0.051047 | 0.041885 | 0.151832 | 0.128709 | 0.118674 | 0.083333 | 0.083333 | 0.056719 | 0 | 0.016037 | 0.262937 | 3,807 | 94 | 157 | 40.5 | 0.800784 | 0.092987 | 0 | 0 | 0 | 0 | 0.010803 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122807 | false | 0 | 0.070175 | 0 | 0.245614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa6e3bc01a42670eb25d6faf12fc1c7eb5d3b785 | 6,374 | py | Python | logicmonitor_sdk/models/privilege.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | logicmonitor_sdk/models/privilege.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | logicmonitor_sdk/models/privilege.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Privilege(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_name': 'str',
'sub_operation': 'str',
'operation': 'str',
'object_id': 'str',
'object_type': 'str'
}
attribute_map = {
'object_name': 'objectName',
'sub_operation': 'subOperation',
'operation': 'operation',
'object_id': 'objectId',
'object_type': 'objectType'
}
def __init__(self, object_name=None, sub_operation=None, operation=None, object_id=None, object_type=None): # noqa: E501
"""Privilege - a model defined in Swagger""" # noqa: E501
self._object_name = None
self._sub_operation = None
self._operation = None
self._object_id = None
self._object_type = None
self.discriminator = None
if object_name is not None:
self.object_name = object_name
if sub_operation is not None:
self.sub_operation = sub_operation
self.operation = operation
self.object_id = object_id
self.object_type = object_type
@property
def object_name(self):
"""Gets the object_name of this Privilege. # noqa: E501
:return: The object_name of this Privilege. # noqa: E501
:rtype: str
"""
return self._object_name
@object_name.setter
def object_name(self, object_name):
"""Sets the object_name of this Privilege.
:param object_name: The object_name of this Privilege. # noqa: E501
:type: str
"""
self._object_name = object_name
@property
def sub_operation(self):
"""Gets the sub_operation of this Privilege. # noqa: E501
:return: The sub_operation of this Privilege. # noqa: E501
:rtype: str
"""
return self._sub_operation
@sub_operation.setter
def sub_operation(self, sub_operation):
"""Sets the sub_operation of this Privilege.
:param sub_operation: The sub_operation of this Privilege. # noqa: E501
:type: str
"""
self._sub_operation = sub_operation
@property
def operation(self):
"""Gets the operation of this Privilege. # noqa: E501
:return: The operation of this Privilege. # noqa: E501
:rtype: str
"""
return self._operation
@operation.setter
def operation(self, operation):
"""Sets the operation of this Privilege.
:param operation: The operation of this Privilege. # noqa: E501
:type: str
"""
if operation is None:
raise ValueError("Invalid value for `operation`, must not be `None`") # noqa: E501
self._operation = operation
@property
def object_id(self):
"""Gets the object_id of this Privilege. # noqa: E501
:return: The object_id of this Privilege. # noqa: E501
:rtype: str
"""
return self._object_id
@object_id.setter
def object_id(self, object_id):
"""Sets the object_id of this Privilege.
:param object_id: The object_id of this Privilege. # noqa: E501
:type: str
"""
if object_id is None:
raise ValueError("Invalid value for `object_id`, must not be `None`") # noqa: E501
self._object_id = object_id
@property
def object_type(self):
"""Gets the object_type of this Privilege. # noqa: E501
:return: The object_type of this Privilege. # noqa: E501
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""Sets the object_type of this Privilege.
:param object_type: The object_type of this Privilege. # noqa: E501
:type: str
"""
if object_type is None:
raise ValueError("Invalid value for `object_type`, must not be `None`") # noqa: E501
self._object_type = object_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Privilege, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Privilege):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.58296 | 304 | 0.590681 | 757 | 6,374 | 4.796565 | 0.187583 | 0.046268 | 0.082622 | 0.078491 | 0.412834 | 0.331314 | 0.300468 | 0.282016 | 0.108235 | 0.087304 | 0 | 0.016778 | 0.317383 | 6,374 | 222 | 305 | 28.711712 | 0.817743 | 0.324443 | 0 | 0.069307 | 0 | 0 | 0.09282 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.158416 | false | 0 | 0.029703 | 0 | 0.326733 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa6f105c15e2c14c924c8bb7b892e931a1b7b022 | 248 | py | Python | control flow/ensure_has_divisible.py | gdc3000/advanced-python | 54d0cd1198d508cf687617af61869678c2b3c71b | [
"MIT"
] | null | null | null | control flow/ensure_has_divisible.py | gdc3000/advanced-python | 54d0cd1198d508cf687617af61869678c2b3c71b | [
"MIT"
] | null | null | null | control flow/ensure_has_divisible.py | gdc3000/advanced-python | 54d0cd1198d508cf687617af61869678c2b3c71b | [
"MIT"
] | null | null | null | items = [2, 36, 25, 9]
divisor = 12
for item in items:
if item % divisor == 0:
found = item
break
else: #no break
items.append(divisor)
found = divisor
print(f'{items} contain {found} which is a multiple of {divisor}') | 20.666667 | 66 | 0.608871 | 37 | 248 | 4.081081 | 0.675676 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0.274194 | 248 | 12 | 66 | 20.666667 | 0.788889 | 0.032258 | 0 | 0 | 0 | 0 | 0.233333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fa711b437bb42bbb75e4cf45b9096758421c949e | 6,810 | py | Python | src/Draft.py | brycewilliams99/NFL-Draft-Simulation | 00b9b226e6b2a36f9b1e686d836fd8ee68e26f35 | [
"CC0-1.0"
] | null | null | null | src/Draft.py | brycewilliams99/NFL-Draft-Simulation | 00b9b226e6b2a36f9b1e686d836fd8ee68e26f35 | [
"CC0-1.0"
] | null | null | null | src/Draft.py | brycewilliams99/NFL-Draft-Simulation | 00b9b226e6b2a36f9b1e686d836fd8ee68e26f35 | [
"CC0-1.0"
] | null | null | null |
import nflgame
import random
from os import system, name
'''
The Draft class deals with the fantasy league distribution of players to the teams participating.
'''
class Draft:
'''
Methods:
player_pick(): Function that helps the user draft the players they want onto their team.
Returns:
user_team (dict): A dictionary that has the users players.
'''
def __init__(self):
'''
__init__ is used to initialize local variables.
'''
self.taken = []
self.pos_list = ['QB','RB','WR','TE','K','LB','DB']
self.user_team = {}
#Dictionary to store users players.
self.bot_team = {}
self.bot_team1 = {}
self.bot_team2 = {}
self.bot_team3 = {}
#qb_pos = 'QB'
#rb_pos = 'RB' These are just place holders used for reference, will be deleted
#wr_pos = 'WR' in the end.
#te_pos = 'TE'
#kicker_pos = 'K'
#def_pos = 'D/SP'
def player_pick(self, pos, taken, user_team):
'''
player_pick is a function that is used to let the user pick and choose who they want on their team.
Args:
pos (str): The first argument is a string that is pre-defined, so the user would not
see this variable until they are done with the draft.
taken (list): The second argument is a list that stores all of the players that have already
been drafted by other users.
user_team (dict): user_team is a dictionary that stores all of the players that the user has on
his/her team.
Returns:
dict: function returns a dictionary that holds the users picks. It is formatted so that it is
{pos_name, pos} EX: {Tom Brady: QB}
If the name player is already taken, it returns an error message that the player is taken already.
'''
player_list = []
games = nflgame.games(2019, 2, kind='REG')
players = nflgame.combine_game_stats(games)
meta = nflgame.players
print()
print(pos)
for player in players:
if player.player.position in [pos]:
print(meta[player.playerid].name)
player_name = meta[player.playerid].name
player_list.append(player_name)
print()
pos_name = input("Enter the " + pos + "'s name you would like to draft from above list (Case Sensitive): ")
print()
hit = False
while hit == False:
#Check to see if the player is already taken.
if pos_name in taken:
#If the player is already taken, then error message is given.
pos_name = input("He is taken! Enter another " + pos +"'s name you would like to draft (Case Sensitive): ")
elif pos_name not in player_list:
pos_name = input("That player is not a " + pos +" or was spelled wrong! Enter another " + pos +"'s name you would like to draft (Case Sensitive): ")
else:
#If player is available, it will print out that player was added.
print(pos_name + " was added to your team!")
#Player is then added to the taken list.
self.taken.append(pos_name)
#Player stored in Users team.
self.user_team[pos]=pos_name
hit = True
def bot_pick(self, pos, taken, bot_team):
'''
bot_pick is a function that is used to randomly generate bot teams for user to play against.
Args:
pos (str): The first argument is a list of all of the positions so they can be looped through.
taken (list): The second argument is a list that stores all of the players that have already
been drafted by other users.
bot_team (dict): bot_team is a dictionary that stores all of the players that the bot has on
its team.
Returns:
dict: function returns a dictionary that holds randomly generated teams for the bots. It is formatted so that it is
{pos_name, pos} EX: {Tom Brady: QB}
'''
player_list = []
games = nflgame.games(2019, 2, kind='REG')
players = nflgame.combine_game_stats(games)
meta = nflgame.players
for player in players:
if player.player.position in [pos]:
player_name = meta[player.playerid].name
player_list.append(player_name)
hit = False
while hit == False:
pos_name = random.choice(player_list)
if pos_name in taken:
hit = False
else:
self.taken.append(pos_name)
self.bot_team[pos]=pos_name
hit = True
def draft_start(self):
'''
draft_start is a method that takes care of the draft simulation as a whole.
Args:
self: The self argument is just the data that is being called from the Draft class.
Returns:
The method returns the dictionaries for the user and the three bot teams that are participating.
'''
print("Welcome to the Draft!")
choice_draft_check = 0
choice_draft = input("Would you like to start the Draft? Y/N: ")
while choice_draft_check != 1:
if choice_draft == "Y" or "y":
choice_draft_check += 1
elif choice_draft == "N" or "n":
print("Restart Program")
return
else:
choice_draft = input("Invalid choice! Would you like to start the Draft? Y/N: ")
#User Team
for pos in self.pos_list:
self.player_pick(pos, self.taken, self.user_team)
print("Your team consists of: " + str(self.user_team))
print ()
#Bot 1 Team
self.bot_team1 = self.bot_team
for pos in self.pos_list:
self.bot_pick(pos,self.taken,self.bot_team)
print("Bot 1 team consists of: " + str(self.bot_team1))
print()
#Bot 2 Team
self.bot_team2 = self.bot_team
for pos in self.pos_list:
self.bot_pick(pos,self.taken,self.bot_team)
print("Bot 2 team consists of: " + str(self.bot_team2))
print()
#Bot 3 Team
self.bot_team3 = self.bot_team
for pos in self.pos_list:
self.bot_pick(pos,self.taken,self.bot_team)
print("Bot 3 team consists of: " + str(self.bot_team3))
print()
| 35.65445 | 164 | 0.56094 | 900 | 6,810 | 4.137778 | 0.215556 | 0.037594 | 0.023631 | 0.016112 | 0.494629 | 0.435822 | 0.392589 | 0.379699 | 0.335124 | 0.292696 | 0 | 0.00645 | 0.362555 | 6,810 | 190 | 165 | 35.842105 | 0.851417 | 0.363877 | 0 | 0.505747 | 0 | 0 | 0.138781 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0 | 0.034483 | 0 | 0.103448 | 0.183908 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3afa5e3bc12dc3c00dfeba2bc72592cde7e21518 | 8,735 | py | Python | src/dijkstar/graph.py | wylee/Dijkstar | ee0615ea909ef1c6b79275efa95715e52e1d9d45 | [
"MIT"
] | 43 | 2018-04-03T07:31:59.000Z | 2022-03-12T20:12:25.000Z | src/dijkstar/graph.py | wylee/Dijkstar | ee0615ea909ef1c6b79275efa95715e52e1d9d45 | [
"MIT"
] | 16 | 2018-05-12T14:41:55.000Z | 2022-02-10T11:36:25.000Z | src/dijkstar/graph.py | wylee/Dijkstar | ee0615ea909ef1c6b79275efa95715e52e1d9d45 | [
"MIT"
] | 16 | 2017-11-25T11:00:26.000Z | 2021-09-27T20:27:34.000Z | import marshal
import os
from collections.abc import MutableMapping
from copy import copy
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
class Graph(MutableMapping):
"""A very simple graph type.
Its structure looks like this::
{u: {v: e, ...}, ...} # Node v is a adjacent to u via edge e
Edges can be of any type. Nodes have to be hashable since they're
used as dictionary keys. ``None`` should *not* be used as a node.
Graphs are *directed* by default. To create an undirected graph, use
the ``undirected`` flag:
>>> graph = Graph(undirected=True)
Note that all this does is automatically add the edge ``(v, u)``
when ``(u, v)`` is added. In addition, when a node is deleted, its
incoming nodes will be deleted also.
"""
def __init__(self, data=None, undirected=False):
self._data = {}
self._undirected = undirected
if data is not None:
self.update(data)
def __getitem__(self, u):
return self.get_node(u)
def __setitem__(self, u, neighbors):
self.add_node(u, neighbors)
def __delitem__(self, u):
self.remove_node(u)
def __iter__(self):
return iter(self._data)
def __len__(self):
return self.node_count
def __eq__(self, other):
if isinstance(other, dict):
return self._data == other
return self._data == other._data
def __repr__(self):
return repr(self._data)
def get_data(self):
"""Return the underlying data dict."""
return self._data
def subgraph(self, nodes, disconnect=False):
"""Get a subgraph with the specified nodes.
If ``disconnect`` is specified, the nodes will be disconnected
from each other; this is useful when creating annex graphs.
"""
subgraph = self.__class__()
for u in nodes:
neighbors = self[u]
for v, edge in neighbors.items():
u, v, edge = copy(u), copy(v), copy(edge)
subgraph.add_edge(u, v, edge)
if disconnect:
for u in nodes:
neighbors = subgraph[u]
for v in nodes:
if v in neighbors:
del neighbors[v]
return subgraph
def add_edge(self, u, v, edge=None):
"""Add an ``edge`` from ``u`` to ``v``.
If the graph is undirected, the ``edge`` will be added from
``v`` to ``u`` also.
"""
data = self._data
undirected = self._undirected
if u in data:
neighbors = data[u]
neighbors[v] = edge
else:
data[u] = {v: edge}
if undirected:
if v in data:
neighbors = data[v]
neighbors[u] = edge
else:
data[v] = {u: edge}
elif v not in data:
data[v] = {}
return edge
def get_edge(self, u, v):
"""Get edge ``(u, v)``."""
return self._data[u][v]
def remove_edge(self, u, v):
"""Remove edge ``(u, v)``."""
data = self._data
del data[u][v]
if u in data[v]:
del data[v][u]
@property
def edge_count(self):
count = sum(len(neighbors) for neighbors in self._data.values())
if self._undirected:
assert count % 2 == 0
count = count // 2
return count
def add_node(self, u, neighbors=None):
"""Add node ``u`` and, optionally, its ``neighbors``.
Adds or updates the node ``u``. If ``u`` isn't already in the
graph, it will be created with the specified ``neighbors``. If
it is, it will be updated with the specified ``neighbors``.
Note that if ``u`` is already in the graph, only its existing
neighbors that are *also* specified in ``neighbors`` will be
affected; other neighbors will be left as is. To clear a node
completely, use ``del graph[u]``.
``neighbors``
An optional dict of neighbors like ``{v1: e1, v2: e2, ...}``.
"""
data = self._data
undirected = self._undirected
directed = not undirected
if neighbors is None:
neighbors = {}
if directed or u not in data:
# For a directed graph, add u if it's not present or replace
# it completely if is.
#
# For an undirected graph, add u if it's not present. If it
# is, add new neighbors and update existing neighbors, but
# leave other neighbors alone.
data[u] = {}
node_data = data[u]
for v, e in neighbors.items():
node_data[v] = e
if undirected:
if v not in data:
data[v] = {u: e}
else:
data[v][u] = e
elif v not in data:
data[v] = {}
return node_data
def get_node(self, u):
"""Get node ``u``."""
return self._data[u]
def remove_node(self, u):
"""Remove node ``u``.
In addition to removing the node itself from the underlying data
dict, which in turn removes its outgoing edges, this also
removes the node's incoming edges.
"""
data = self._data
undirected = self._undirected
neighbors = data[u]
if undirected:
for v in neighbors:
del data[v][u]
else:
# Remove edges from all other nodes to the removed node.
for neighbors in data.values():
if u in neighbors:
del neighbors[u]
del data[u]
@property
def node_count(self):
return len(self._data)
@classmethod
def _read(cls, reader, from_):
"""Read from path or open file using specified reader."""
if isinstance(from_, str):
with open(from_, "rb") as fp:
data = reader(fp)
else:
data = reader(from_)
return cls(data)
def _write(self, writer, to):
"""Write to path or open file using specified writer."""
if isinstance(to, str):
with open(to, "wb") as fp:
writer(self._data, fp)
else:
writer(self._data, to)
@classmethod
def guess_load(cls, from_, ext=None):
"""Read graph based on extension or attempt all loaders.
If a file name with an extension is passed *or* a file and an
extension are passed, load the graph from the file based on the
extension.
Otherwise, try to load the file using pickle, and if that fails,
with marshal.
"""
if not ext and isinstance(from_, str):
_, ext = os.path.splitext(from_)
if ext:
ext = ext.lstrip(".")
if ext == "pickle":
return cls.load(from_)
elif ext == "marshal":
return cls.unmarshal(from_)
try:
return Graph.load(from_)
except pickle.UnpicklingError:
from_.seek(0)
try:
# NOTE: We don't simply call Graph.unmarshal() here
# because errors raised by Graph._read() when it calls
# Graph(data) could be conflated with errors raised by
# marshal.load().
data = marshal.load(from_)
except (EOFError, ValueError, TypeError):
pass
else:
return cls(data)
raise ValueError(
"Could not guess how to load graph; Graph.guess_load() requires either a file with "
"a .pickle or .marshal extension, for the extension/type of the file to be specified, "
"or for the file to be loadable with Graph.load() or Graph.unmarshal()."
)
@classmethod
def load(cls, from_):
"""Read graph using pickle."""
return cls._read(pickle.load, from_)
def dump(self, to):
"""Write graph using pickle."""
self._write(pickle.dump, to)
@classmethod
def unmarshal(cls, from_):
"""Read graph using marshal.
Marshalling is quite a bit faster than pickling, but only the
following types are supported: booleans, integers, long
integers, floating point numbers, complex numbers, strings,
Unicode objects, tuples, lists, sets, frozensets, dictionaries,
and code objects.
"""
return cls._read(marshal.load, from_)
def marshal(self, to):
"""Write graph using marshal."""
self._write(marshal.dump, to)
| 29.710884 | 99 | 0.547911 | 1,103 | 8,735 | 4.247507 | 0.213962 | 0.029029 | 0.014941 | 0.006403 | 0.085592 | 0.059125 | 0.020918 | 0.020918 | 0 | 0 | 0 | 0.001419 | 0.35478 | 8,735 | 293 | 100 | 29.812287 | 0.829844 | 0.328105 | 0 | 0.234568 | 0 | 0 | 0.046678 | 0 | 0 | 0 | 0 | 0 | 0.006173 | 1 | 0.154321 | false | 0.006173 | 0.04321 | 0.030864 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3afacb281aacfb22424406c2e024ce4e5a5d1a91 | 4,032 | py | Python | mapgenerator.py | fakeseph/utmun2020 | eb3844805056e47ac1c996f8dfef941765d6f6f4 | [
"CC0-1.0"
] | null | null | null | mapgenerator.py | fakeseph/utmun2020 | eb3844805056e47ac1c996f8dfef941765d6f6f4 | [
"CC0-1.0"
] | null | null | null | mapgenerator.py | fakeseph/utmun2020 | eb3844805056e47ac1c996f8dfef941765d6f6f4 | [
"CC0-1.0"
] | null | null | null | from typing import List, TextIO
import math
import pygame
pygame.init()
white = (255,255,255)
black = (0,0,0)
width = 900
height = 750
gameDisplay = pygame.display.set_mode((width, height))
gameDisplay.fill(white)
######################################################
# helper functions
def latlongtopixel(lat, long):
x = (1150 * (180 + int(long)) / 360) % 1150 + (1150 / 2)
latRad = int(lat) * math.pi / 180
mercN = math.log(math.tan((math.pi / 4) + (latRad / 2)))
y = (1150 / 2) - (1150 * mercN / (2 * math.pi))
return [int(x - (1150 / 2)), int(y)]
#x = (width * (180 + int(long)) / 360) % width + (width / 2)
#latRad = int(lat) * math.pi / 180
#mercN = math.log(math.tan((math.pi / 4) + (latRad / 2)))
#y = (height / 2) - (width * mercN / (2 * math.pi))
# return [int(x - (width / 2)), int(y)]
def findgdp(country):
print('$' + str(countrytogdp[country]) + ' in 2011 US dollars')
# draw the african continent in grey
contpoints = []
contdata = open('continent.csv')
contdata.readline()
contline = contdata.readline()
while contline != '':
startoflat1 = contline.find(',')
endoflat1 = contline.find(',', startoflat1 + 1)
startoflong1 = endoflat1 + 1
latitude1 = float(contline[(startoflat1 + 1):endoflat1])
longitude1 = float(contline[startoflong1:(len(contline) - 1)])
c1 = latlongtopixel(latitude1, longitude1)
contpoints.append(c1)
contline = contdata.readline()
pygame.draw.polygon(gameDisplay, (112, 112, 112), tuple(contpoints))
######################################################
# function that takes all border coordinates in one file and adds to a coords_dict
coords_dict = {}
borderdata = open('africaborders.csv')
borderdata.readline()
dataline = borderdata.readline()
while dataline != '':
startoflat = dataline.find(',')
endoflat = dataline.find(',', startoflat + 1)
startoflong = endoflat + 1
latitude = dataline[(startoflat + 1):endoflat]
longitude = dataline[startoflong:(len(dataline) - 1)]
country_name = dataline[:startoflat]
if country_name not in coords_dict:
coords_dict[country_name] = [[latitude, longitude]]
coords_dict[country_name].append([latitude, longitude])
dataline = borderdata.readline()
# gathering gdp data
gdpdata = open('gdpdata.csv')
gdpdata.readline()
gdpstr = gdpdata.readline()
countrytogdp = {}
# drawing
list_of_countries = list(coords_dict.keys())
listofcoords = []
for name in list_of_countries:
country_coordinates = coords_dict[name]
for item in country_coordinates:
latpoint = float(item[0])
longpoint = float(item[1])
c = latlongtopixel(latpoint, longpoint)
listofcoords.append(c)
# function that reads gdp data and determines colour
gdp = float(gdpstr[9:(len(gdpstr) - 2)])
countrytogdp[name] = gdp
if 0 < gdp <= 500:
colour = (18, 82, 123)
elif 500 < gdp <= 1000:
colour = (25, 112, 167)
elif 1000 < gdp <= 1500:
colour = (44, 154, 224)
elif 1500 < gdp <= 2000:
colour = (89, 176, 230)
elif 2000 < gdp <= 2500:
colour = (111, 186, 234)
elif 2500 < gdp <= 3000:
colour = (133, 197, 237)
elif 3000 < gdp <= 3500:
colour = (155, 208, 241)
elif 3500 < gdp <= 4000:
colour = (178, 218, 244)
elif 4000 < gdp <= 4500:
colour = (200, 229, 247)
elif 4500 < gdp <= 5000:
colour = (222, 240, 250)
elif 5000 < gdp:
colour = (245, 250, 253)
pygame.draw.polygon(gameDisplay, colour, tuple(listofcoords))
pygame.draw.polygon(gameDisplay, black, tuple(listofcoords), 1)
# resetting variables
listofcoords = []
if gdpstr != '':
gdpstr = gdpdata.readline()
######################################################
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
pygame.display.update() | 26.352941 | 82 | 0.588294 | 468 | 4,032 | 5.029915 | 0.363248 | 0.029737 | 0.021665 | 0.035684 | 0.06627 | 0.06627 | 0.06627 | 0.047579 | 0.047579 | 0.047579 | 0 | 0.092472 | 0.235615 | 4,032 | 153 | 83 | 26.352941 | 0.671317 | 0.115823 | 0 | 0.084211 | 0 | 0 | 0.019168 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021053 | false | 0 | 0.031579 | 0 | 0.063158 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3afc12d762baf67dcc94658852521e272b20fdb8 | 4,703 | py | Python | OtherTransformations/FileToVar/UpdateVariables.py | mintproject/ModelCatalog | 194a2bfcd0b43b3c2703493e67eef85cc36072f3 | [
"CC-BY-2.0"
] | null | null | null | OtherTransformations/FileToVar/UpdateVariables.py | mintproject/ModelCatalog | 194a2bfcd0b43b3c2703493e67eef85cc36072f3 | [
"CC-BY-2.0"
] | 46 | 2019-05-02T23:45:06.000Z | 2021-01-11T18:41:10.000Z | OtherTransformations/FileToVar/UpdateVariables.py | mintproject/ModelCatalog | 194a2bfcd0b43b3c2703493e67eef85cc36072f3 | [
"CC-BY-2.0"
] | 3 | 2019-05-10T09:14:05.000Z | 2019-07-26T22:37:34.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 31 12:14:19 2019
@author: Maria Stoica
@description: Script to update variables entries in 'VariablePresentation.csv'
"""
import pandas as pd
# file names:
rel_dir = '../'
rel_var_dir = rel_dir + 'Variables/'
PIHM_input = 'PIHM_input.csv'
PIHM_output = 'PIHM_output.csv'
FLDAS = 'FLDAS_Variables.csv'
ECON_input = 'ECON_input.csv'
ECON_output = 'ECON_output.csv'
#CYCLES_input = 'CYCLES_input.csv'
#CYCLES_output = 'CYCLES_output.csv'
variables_mc = 'VariablePresentation.csv'
variables_mc_backup = 'VariablePresentationBackup.csv'
# read data:
try:
pihm = pd.read_csv( rel_var_dir + PIHM_input, usecols = [ 'Short Name', 'Long Name', 'GSN'] )
pihm = pihm.append(pd.read_csv( rel_var_dir + PIHM_output, usecols = [ 'Short Name', 'Long Name', 'GSN'] )).fillna('')
except:
try:
pihm = pd.read_csv( rel_var_dir + PIHM_input, usecols = [ 'Short Name', 'Long Name', 'GSN'], encoding = 'iso-8859-1' )
pihm = pihm.append(pd.read_csv( rel_var_dir + PIHM_output, usecols = [ 'Short Name', 'Long Name', 'GSN'], encoding = 'iso-8859-1' )).fillna('')
except:
print('PIHM file unreadable.')
try:
fldas = pd.read_csv( rel_var_dir + FLDAS, usecols = [ 'Short Name', 'Long Name', 'GSN'] ).fillna('')
except:
try:
fldas = pd.read_csv( rel_var_dir + FLDAS, usecols = [ 'Short Name', 'Long Name', 'GSN'], encoding = 'iso-8859-1' ).fillna('')
except:
print('FLDAS file unreadable.')
try:
econ = pd.read_csv( rel_var_dir + ECON_input, usecols = [ 'Short Name', 'Long Name', 'GSN'] )
econ = econ.append(pd.read_csv( rel_var_dir + ECON_output, usecols = [ 'Short Name', 'Long Name', 'GSN'] )).fillna('')
except:
try:
econ = pd.read_csv( rel_var_dir + ECON_input, usecols = [ 'Short Name', 'Long Name', 'GSN'], encoding = 'iso-8859-1' )
econ = econ.append(pd.read_csv( rel_var_dir + ECON_output, usecols = [ 'Short Name', 'Long Name', 'GSN'], encoding = 'iso-8859-1' )).fillna('')
except:
print('ECON file unreadable.')
try:
var_pres = pd.read_csv( rel_dir + variables_mc ).fillna('')
except:
try:
var_pres = pd.read_csv( rel_dir + variables_mc, encoding = 'iso-8859-1' )
except:
print('VariablePresentation.csv file unreadable.')
var_pres = None
if not var_pres is None:
var_pres.to_csv(rel_dir + variables_mc_backup, index=False)
label_col = 'https://w3id.org/mint/modelCatalog#hasStandardVariable'
model_col = 'https://w3id.org/mint/modelCatalog#VariablePresentation'
shortname_col = 'https://w3id.org/mint/modelCatalog#hasShortName'
longname_col = 'https://w3id.org/mint/modelCatalog#hasLongName'
for i in var_pres.index:
model = var_pres.loc[i,model_col].split('_')[0]
if model.lower() == 'pihm':
sn = var_pres.loc[i,shortname_col]
ln = var_pres.loc[i,longname_col]
if (sn != '') or (ln != ''):
try:
pihm_var = []
if (sn != ''):
pihm_var = pihm.loc[pihm['Short Name']==sn,'GSN']
if len(pihm_var) == 0:
pihm_var = pihm.loc[pihm['Long Name']==ln,'GSN']
pihm_var = pihm_var.iloc[0]
label = var_pres.loc[i,label_col]
if label != pihm_var:
print('Changing variable label for PIHM ',sn,' from ',label,' to ',pihm_var,'.')
var_pres.loc[i,label_col] = pihm_var
except:
print('Warning! PIHM short name ',sn,' not found!')
if model.lower() == 'fldas':
sn = var_pres.loc[i,shortname_col]
if sn != '':
try:
fldas_var = fldas.loc[fldas['Short Name']==sn,'GSN'].iloc[0]
label = var_pres.loc[i,label_col]
if label != fldas_var:
print('Changing variable label for FLDAS ',sn,' from ',label,' to ',fldas_var,'.')
var_pres.loc[i,label_col] = fldas_var
except:
print('Warning! FLDAS short name ',sn,' not found!')
if model.lower() == 'econ':
sn = var_pres.loc[i,shortname_col]
if sn!='':
try:
econ_var = econ.loc[econ['Short Name']==sn,'GSN'].iloc[0]
label = var_pres.loc[i,label_col]
if label != econ_var:
print('Changing variable label for ECON ',sn,' from ',label,' to ',econ_var,'.')
var_pres.loc[i,label_col] = econ_var
except:
print('Warning! ECON short name ',sn,' not found!')
var_pres.to_csv( rel_dir + variables_mc, index = False) | 40.543103 | 151 | 0.590687 | 628 | 4,703 | 4.229299 | 0.157643 | 0.04744 | 0.040663 | 0.054217 | 0.587726 | 0.567018 | 0.484187 | 0.449925 | 0.404744 | 0.400226 | 0 | 0.015152 | 0.256219 | 4,703 | 116 | 152 | 40.543103 | 0.74414 | 0.057835 | 0 | 0.32967 | 0 | 0 | 0.234895 | 0.017651 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.010989 | 0 | 0.010989 | 0.10989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3afc47c7026c899e17d1b9fe33bd54c5395be6c7 | 2,001 | py | Python | adv_patch_bench/dataloaders/mtsd.py | chawins/adv-patch-bench | 224c4a39f9322cd27312deffbf5e8c882bce3dd2 | [
"MIT"
] | 1 | 2021-09-05T05:23:29.000Z | 2021-09-05T05:23:29.000Z | adv_patch_bench/dataloaders/mtsd.py | chawins/adv-patch-bench | 224c4a39f9322cd27312deffbf5e8c882bce3dd2 | [
"MIT"
] | null | null | null | adv_patch_bench/dataloaders/mtsd.py | chawins/adv-patch-bench | 224c4a39f9322cd27312deffbf5e8c882bce3dd2 | [
"MIT"
] | null | null | null | import os
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from .eval_sampler import DistributedEvalSampler
def get_loader_sampler(root, transform, args, split):
"""dist eval introduces slight variance to results if
num samples != 0 mod (batch size * num gpus)
"""
dataset = datasets.ImageFolder(os.path.join(root, split), transform=transform)
sampler = None
if args.distributed:
if split == 'train':
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = DistributedEvalSampler(dataset)
loader = torch.utils.data.DataLoader(
dataset, batch_size=args.batch_size, shuffle=(sampler is None),
num_workers=args.workers, pin_memory=True, sampler=sampler,
drop_last=(split == 'train'))
return loader, sampler
def load_mtsd(args):
input_size = MTSD['input_dim'][-1]
train_transform_list = [
transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0),
# transforms.RandomEqualize(p=1.0),
transforms.RandomResizedCrop(input_size, scale=(0.64, 1.0)),
transforms.RandomPerspective(distortion_scale=0.2, p=0.5),
transforms.ToTensor()
]
val_transform_list = [
# transforms.RandomEqualize(p=1.0),
transforms.Resize((input_size, input_size)),
# transforms.CenterCrop(input_size),
transforms.ToTensor()
]
train_transform = transforms.Compose(train_transform_list)
val_transform = transforms.Compose(val_transform_list)
train_loader, train_sampler = get_loader_sampler(args.data, train_transform, args, 'train')
val_loader, _ = get_loader_sampler(args.data, val_transform, args, 'val')
return train_loader, train_sampler, val_loader
MTSD = {
'normalize': {
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225],
},
'loader': load_mtsd,
'input_dim': (3, 128, 128)
}
| 31.761905 | 95 | 0.678661 | 243 | 2,001 | 5.419753 | 0.366255 | 0.034169 | 0.036446 | 0.039484 | 0.092635 | 0.056188 | 0 | 0 | 0 | 0 | 0 | 0.033291 | 0.204398 | 2,001 | 62 | 96 | 32.274194 | 0.79397 | 0.09995 | 0 | 0.046512 | 0 | 0 | 0.032511 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.116279 | 0 | 0.209302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3afc9e77b428e7de059cbfff154f06ed696effcb | 845 | py | Python | dz5/zad4.py | vedrankolka/APR | 4f8afefc74f3d0f67f5d2ec665c93a4b38fbdf2f | [
"Apache-2.0"
] | null | null | null | dz5/zad4.py | vedrankolka/APR | 4f8afefc74f3d0f67f5d2ec665c93a4b38fbdf2f | [
"Apache-2.0"
] | null | null | null | dz5/zad4.py | vedrankolka/APR | 4f8afefc74f3d0f67f5d2ec665c93a4b38fbdf2f | [
"Apache-2.0"
] | null | null | null | from main import *
r = lambda t: t
integrate(EulerIntegrator(A, B, step=step, r=r), x0, start, end, title="Euler")
integrate(ReversedEulerIntegrator(A, B, step=step, r=r), x0, start, end, title="Reversed Euler")
integrate(TrapezeIntegrator(A, B, step=step, r=r), x0, start, end, title="Trapeze")
integrate(RungeKutta4Real(A, B, step=step, r=r), x0, start, end, title="Runge-Kutta 4 real")
trapezeIntegrator = TrapezeIntegrator(A, B, step=step, r=r)
eulerIntegrator = EulerIntegrator(A, B, step=step, r=r)
reverseEulerIntegrator = ReversedEulerIntegrator(A, B, step=step, r=r)
integrate(PECEIntegrator(eulerIntegrator, reverseEulerIntegrator, 2), x0, start, end,
title="PE(CE)^2 Euler-ReversedEuler")
integrate(PECEIntegrator(eulerIntegrator, trapezeIntegrator, 1), x0, start, end,
title="PECE Euler-Trapeze (aka Heune)")
| 46.944444 | 96 | 0.728994 | 115 | 845 | 5.356522 | 0.304348 | 0.022727 | 0.068182 | 0.113636 | 0.412338 | 0.412338 | 0.412338 | 0.175325 | 0.175325 | 0.175325 | 0 | 0.014845 | 0.123077 | 845 | 17 | 97 | 49.705882 | 0.816464 | 0 | 0 | 0 | 0 | 0 | 0.12071 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3afe96ca9f28d0d390f392aacf1496e69d9a2303 | 9,220 | py | Python | cats/data_modules/combine.py | AWehrhahn/CATS | 40b9f21ffccda8f70f9d1a9d7335102083847ce3 | [
"MIT"
] | 1 | 2022-02-02T16:14:02.000Z | 2022-02-02T16:14:02.000Z | cats/data_modules/combine.py | AWehrhahn/CATS | 40b9f21ffccda8f70f9d1a9d7335102083847ce3 | [
"MIT"
] | null | null | null | cats/data_modules/combine.py | AWehrhahn/CATS | 40b9f21ffccda8f70f9d1a9d7335102083847ce3 | [
"MIT"
] | null | null | null | import numpy as np
from astropy import units as u
import matplotlib.pyplot as plt
from copy import deepcopy
from scipy.optimize import curve_fit
from scipy.ndimage.filters import gaussian_filter1d
from scipy.interpolate import RegularGridInterpolator
from astropy.nddata import StdDevUncertainty
import astroplan
from tqdm import tqdm
from astropy.constants import c
from ..least_squares.least_squares import least_squares
from .datasource import DataSource
from ..spectrum import SpectrumArray
def detect_ouliers(spectra: SpectrumArray):
flux = np.copy(spectra.flux)
for i in range(len(spectra)):
for j, k in zip(spectra.segments[:-1], spectra.segments[1:]):
flux[i, j:k] /= np.nanpercentile(flux[i, j:k], 95)
median = np.nanmedian(flux, axis=0)
flux = np.abs(flux - median)
mad = np.nanmedian(flux, axis=0)
mad *= 1.4826 # to scale to gaussian sigma
mask = flux > 5 * mad
mask |= np.isnan(spectra.flux)
return mask
def combine_observations(spectra: SpectrumArray):
# TODO: The telluric spectrum will change between observations
# and therefore influence the recovered stellar parameters
# Especially when we combine data from different transits!
# for i in range(spectra.shape[0]):
# plt.plot(spectra.wavelength[i], spectra.flux[i], "r")
# Shift to the same reference frame (telescope)
print("Shift observations to the telescope restframe")
spectra = spectra.shift("telescope", inplace=True)
# Arbitrarily choose the central grid as the common one
print("Combine all observations")
wavelength = spectra.wavelength[len(spectra) // 2]
spectra = spectra.resample(wavelength, inplace=True, method="linear")
# Detects ouliers based on the Median absolute deviation
mask = detect_ouliers(spectra)
# TODO: other approach
# s(i) = f(i) (1 - w / dw * g) + f(i+1) w / dw * g
# g = sqrt((1 + beta) / (1 - beta)) - 1
rv = np.zeros(len(spectra))
for i in range(len(spectra)):
rv[i] = spectra.reference_frame.to_barycentric(spectra.datetime[i]).to_value(
"km/s"
)
rv -= np.mean(rv)
rv *= -1
rv /= c.to_value("km/s")
rv = np.sqrt((1 + rv) / (1 - rv)) - 1
wave = np.copy(wavelength.to_value("AA"))
for l, r in zip(spectra.segments[:-1], spectra.segments[1:]):
wave[l:r] /= np.gradient(wave[l:r])
g = wave[None, :] * rv[:, None]
# TODO: the tellurics are scaled by the airmass, which we should account for here, when creating the master stellar
# TODO: Could we fit a linear polynomial to each wavelength point? as a function of time/airmass?
# TODO: Then the spectrum would be constant, since there is no change, but for tellurics we would see a difference
# TODO: But there is a different offset for the tellurics, and the stellar features
yflux = spectra.flux.to_value(1)
flux = np.zeros(yflux.shape)
coeff = np.zeros((yflux.shape[1], 2))
airmass = spectra.airmass
mask = ~mask
for i in tqdm(range(spectra.flux.shape[1])):
coeff[i] = np.polyfit(airmass[mask[:, i]], yflux[:, i][mask[:, i]], 1)
flux[:, i] = np.polyval(coeff[i], airmass)
# fitfunc = lambda t0, t1, f: (t0[None, :] + t1[None, :] * airmass[:, None]) * (
# f + g * np.diff(f, append=f[-2])
# )
def plotfunc(airmass, t0, t1, f, fp, g):
tell = t0 + t1 * airmass[:, None]
tell = np.clip(tell, 0, 1)
stel = f + g * (fp - f) # np.diff(f, append=2 * f[-1] - f[-2])
obs = tell * stel
return obs
def fitfunc(param):
t0 = 1
n = param.size // 2
t1 = param[:n]
f = param[n:]
fp = np.roll(f, -1)
model = plotfunc(airmass, t0, t1, f, fp, g[:, l:r])
resid = model - yflux[:, l:r]
return resid.ravel()
def regularization(param):
n = param.size // 2
t1 = param[:n]
f = param[n:]
d1 = np.gradient(t1)
d2 = np.gradient(f)
reg = np.concatenate((d1, d2))
return reg ** 2
t0 = np.ones_like(coeff[:, 1])
t1 = coeff[:, 0] / coeff[:, 1]
t1[(t1 > 0.1) | (t1 < -2)] = -2
f = np.copy(coeff[:, 1])
for k in tqdm(range(1)):
for l, r in tqdm(
zip(spectra.segments[:-1], spectra.segments[1:]),
total=spectra.nseg,
leave=False,
):
n = r - l
# Smooth first guess
mu = gaussian_filter1d(t1[l:r], 1)
var = gaussian_filter1d((t1[l:r] - mu) ** 2, 11)
sig = np.sqrt(var) * 80 + 0.5
sig = np.nan_to_num(sig)
smax = int(np.ceil(np.nanmax(sig))) + 1
points = [t1[l:r]] + [gaussian_filter1d(t1[l:r], i) for i in range(1, smax)]
smooth = RegularGridInterpolator((np.arange(smax), np.arange(n)), points)(
(sig, np.arange(n))
)
t1[l:r] = smooth
# plt.plot(t1[l:r])
# plt.plot(f[l:r])
# plt.show()
# fold = np.copy(f[l:r])
# told = np.copy(t1[l:r])
# Bounds for the optimisation
bounds = np.zeros((2, 2 * n))
bounds[0, :n], bounds[0, n:] = -2, 0
bounds[1, :n], bounds[1, n:] = 0, 1
x0 = np.concatenate((t1[l:r], f[l:r]))
x0 = np.nan_to_num(x0)
x0 = np.clip(x0, bounds[0], bounds[1])
res = least_squares(
fitfunc,
x0,
method="trf",
bounds=bounds,
max_nfev=200,
tr_solver="lsmr",
tr_options={"atol": 1e-2, "btol": 1e-2},
jac_sparsity="auto",
regularization=regularization,
r_scale=0.2,
verbose=2,
diff_step=0.01,
)
t0[l:r] = 1
t1[l:r] = res.x[:n]
f[l:r] = res.x[n:]
# lower, upper = [-2, 0, 0], [0, 1, 1]
# for i in tqdm(range(l, r - 1), leave=False):
# x0 = [t1[i], f[i], f[i + 1]]
# x0 = np.nan_to_num(x0)
# x0 = np.clip(x0, lower, upper)
# res = least_squares(fitfunc, x0, method="trf", bounds=[lower, upper])
# t0[i] = 1
# t1[i], f[i] = res.x[0], res.x[1]
# t0[l:r] = gaussian_filter1d(t0[l:r], 0.5)
# t1[l:r] = gaussian_filter1d(t1[l:r], 0.5)
# f[l:r] = gaussian_filter1d(f[l:r], 0.5)
# total = 0
# for i in range(len(spectra)):
# total += np.sum(
# (plotfunc(airmass[i], t0, t1, f, np.roll(f, -1), g[i]) - yflux[i]) ** 2
# )
# print(total)
# TODO: t0 should be 1 in theory, however it is not in practice because ?
tell = t0 + t1 * airmass[:, None]
tell = np.clip(tell, 0, 1)
tell = tell << spectra.flux.unit
# i = 10
# plt.plot(wavelength, yflux[i], label="observation")
# plt.plot(
# wavelength,
# plotfunc(airmass[i], t0, t1, f, np.roll(f, -1), g[i]),
# label="combined",
# )
# plt.plot(wavelength, tell[i], label="telluric")
# plt.plot(wavelength, f, label="stellar")
# plt.legend()
# plt.show()
flux = f + g * (np.roll(f, -1, axis=0) - f)
# flux = np.tile(f, (len(spectra), 1))
flux = flux << spectra.flux.unit
wave = np.tile(wavelength, (len(spectra), 1)) << spectra.wavelength.unit
uncs = np.nanstd(flux, axis=0)
uncs = np.tile(uncs, (len(spectra), 1))
uncs = StdDevUncertainty(uncs, copy=False)
spec = SpectrumArray(
flux=flux,
spectral_axis=wave,
uncertainty=uncs,
segments=spectra.segments,
datetime=spectra.datetime,
star=spectra.star,
planet=spectra.planet,
observatory_location=spectra.observatory_location,
reference_frame="telescope",
)
tell = SpectrumArray(
flux=tell,
spectral_axis=wave,
uncertainty=uncs,
segments=spectra.segments,
datetime=spectra.datetime,
star=spectra.star,
planet=spectra.planet,
observatory_location=spectra.observatory_location,
reference_frame="telescope",
)
# print("Shift observations to the telescope restframe")
# spec = spec.shift("barycentric", inplace=True)
# spec = spec.shift("telescope", inplace=True)
spec = spec.resample(spectra.wavelength, method="linear", inplace=True)
tell = tell.resample(spectra.wavelength, method="linear", inplace=True)
return spec, tell
class CombineStellar(DataSource):
def __init__(self, spectra, mask, telluric, detector, stellar):
# combine
spectra = deepcopy(spectra)
self.combined, self.telluric = combine_observations(spectra)
def get(self, wrange, time):
idx = self.combined.datetime == time
idx = np.where(idx)[0][0]
spec = deepcopy(self.combined[idx])
return spec
def get_telluric(self, wrange, time):
idx = self.telluric.datetime == time
idx = np.where(idx)[0][0]
spec = deepcopy(self.telluric[idx])
return spec
| 33.772894 | 119 | 0.560412 | 1,249 | 9,220 | 4.097678 | 0.220977 | 0.010551 | 0.008597 | 0.010746 | 0.280774 | 0.248339 | 0.227823 | 0.19324 | 0.154162 | 0.154162 | 0 | 0.02928 | 0.296204 | 9,220 | 272 | 120 | 33.897059 | 0.759439 | 0.258894 | 0 | 0.189349 | 0 | 0 | 0.021119 | 0 | 0 | 0 | 0 | 0.003676 | 0 | 1 | 0.047337 | false | 0 | 0.08284 | 0 | 0.177515 | 0.011834 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d703c2bfe66aeef17d4a6d74279c43c3b503b54d | 4,024 | py | Python | scripts/atc_visualizations/dashboard.py | efeerdur/atomic-threat-coverage | 68ca9213d63a1fb68a9ea7473a8857d42ddce92f | [
"Apache-2.0"
] | 542 | 2019-08-13T08:38:06.000Z | 2022-03-26T23:03:39.000Z | scripts/atc_visualizations/dashboard.py | An0nYm0u5101/atomic-threat-coverage | b3a44165904cfcdae7d3422e8e93504f8223778d | [
"Apache-2.0"
] | 56 | 2019-08-14T07:24:34.000Z | 2021-10-24T00:50:56.000Z | scripts/atc_visualizations/dashboard.py | An0nYm0u5101/atomic-threat-coverage | b3a44165904cfcdae7d3422e8e93504f8223778d | [
"Apache-2.0"
] | 97 | 2019-08-15T07:18:46.000Z | 2022-03-24T03:58:45.000Z | #!/usr/bin/env python3
import scripts.atc_visualizations.base as base
import json
import uuid
from ast import literal_eval
class KibanaDashboardObject(base.BaseKibana):
"""Base Kibana DashboardObject"""
def __init__(self, title=None):
self.title = str()
self.description = str()
self.panelsJSON = list() # double escaping
self.optionsJSON = dict() # double escaping
self.timeRestore = bool()
self.kibanaSavedObjectMeta = dict()
self.version = 1
self.hits = 0
self._id = 1
if title:
self.title = title
self.optionsJSON = {'darkTheme': False}
self.kibanaSavedObjectMeta["searchSourceJSON"] = {
"query": {
"query": "",
"language": "lucene"
},
"filter": []
}
def validate(self):
# TODO: Write validate method
return True
def __call__(self):
if self.validate():
return self.__dict__
def __repr__(self):
return str(self.__call__())
def json_export_api(self, return_dict=False):
_tmp = {}
test = self.__dict__
str_test = str(test)
_tmp["attributes"] = literal_eval(str_test)
_tmp["type"] = "dashboard"
_tmp.pop("_id", None)
_tmp["attributes"]["panelsJSON"] = json.dumps(
_tmp["attributes"]["panelsJSON"]
)
_tmp["attributes"]["optionsJSON"] = json.dumps(
_tmp["attributes"]["optionsJSON"]
)
_tmp["attributes"]["kibanaSavedObjectMeta"]["searchSourceJSON"] =\
json.dumps(
_tmp["attributes"]["kibanaSavedObjectMeta"]["searchSourceJSON"]
)
_tmp["attributes"].pop("_id", None)
_tmp["id"] = str(uuid.uuid4())
if return_dict:
return _tmp
else:
return json.dumps(_tmp)
def json_export_gui(self, return_dict=False):
_tmp = {}
test = self.__dict__
str_test = str(test)
_tmp["_source"] = literal_eval(str_test)
_tmp["_type"] = "dashboard"
_tmp.pop("_id", None)
_tmp["_source"]["panelsJSON"] = json.dumps(
_tmp["_source"]["panelsJSON"]
)
_tmp["_source"]["optionsJSON"] = json.dumps(
_tmp["_source"]["optionsJSON"]
)
_tmp["_source"]["kibanaSavedObjectMeta"]["searchSourceJSON"] =\
json.dumps(
_tmp["_source"]["kibanaSavedObjectMeta"]["searchSourceJSON"]
)
_tmp["_source"].pop("_id", None)
_tmp["_id"] = str(uuid.uuid4())
if return_dict:
return _tmp
else:
return json.dumps(_tmp)
def add_visualization(self, vis, x=0, y=0, w=20, h=20):
if vis.get('type') == "search":
vis["uuid"] = vis.get('title').replace(" ", "_")
_vis = base.BasePanelsJson(vis_uuid=vis["uuid"], type="search")
elif vis.get('type') in ["visualization", "visualisation"]:
_vis = base.BasePanelsJson(vis_uuid=vis["uuid"])
_vis.gridData.x = x
_vis.gridData.y = y
_vis.gridData.w = w
_vis.gridData.h = h
_vis.gridData.i = str(self._id)
_vis.panelIndex = str(self._id)
self.panelsJSON.append(_vis)
self._id += 1
def add_saved_search(
self, saved_search_id=None, saved_search_name=None,
x=0, y=0, w=20, h=20):
if not saved_search_id and not saved_search_name:
raise Exception("What about providing id or name?")
if saved_search_name and not saved_search_id:
# Some logic to convert name to id
pass
self.add_visualization(vis=saved_search_id, x=x, y=y, w=w, h=h)
def set_dark_theme(self):
self.optionsJSON.update({'darkTheme': True})
def set_query(self, query):
ssjs = self.kibanaSavedObjectMeta["searchSourceJSON"]
ssjs["query"]["query"] = str(query)
| 29.807407 | 79 | 0.560885 | 431 | 4,024 | 4.951276 | 0.25522 | 0.048735 | 0.044986 | 0.022493 | 0.251172 | 0.205248 | 0.205248 | 0.172446 | 0.172446 | 0.1612 | 0 | 0.006788 | 0.304423 | 4,024 | 134 | 80 | 30.029851 | 0.755627 | 0.035288 | 0 | 0.171429 | 0 | 0 | 0.1547 | 0.021694 | 0 | 0 | 0 | 0.007463 | 0 | 1 | 0.095238 | false | 0.009524 | 0.038095 | 0.019048 | 0.209524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7048eae9eda66aff1a18f973b4e02185a8aed6c | 1,899 | py | Python | tests/test_allocation_sources.py | eriksf/atmosphere-cli | ed7a955d91ae1bda352cea2eadc199d2ef9533c7 | [
"BSD-3-Clause"
] | 7 | 2017-06-29T18:05:48.000Z | 2019-03-05T23:17:10.000Z | tests/test_allocation_sources.py | eriksf/atmosphere-cli | ed7a955d91ae1bda352cea2eadc199d2ef9533c7 | [
"BSD-3-Clause"
] | 5 | 2018-03-07T20:17:15.000Z | 2018-09-27T22:38:01.000Z | tests/test_allocation_sources.py | eriksf/atmosphere-cli | ed7a955d91ae1bda352cea2eadc199d2ef9533c7 | [
"BSD-3-Clause"
] | null | null | null | from .mock_server import get_free_port, start_mock_server
from atmosphere.api import AtmosphereAPI
from atmosphere.main import AtmosphereApp
from atmosphere.allocation_source import AllocationSourceList
class TestAllocationSources(object):
@classmethod
def setup_class(cls):
cls.mock_server_port = get_free_port()
cls.mock_users_base_url = 'http://localhost:{port}'.format(port=cls.mock_server_port)
cls.mock_users_bad_base_url = 'http://localhosty:{port}'.format(port=cls.mock_server_port)
start_mock_server(cls.mock_server_port)
def test_allocation_source_list_description(self):
app = AtmosphereApp()
allocation_source_list = AllocationSourceList(app, None)
assert allocation_source_list.get_description() == 'List allocation sources for a user.'
def test_getting_allocation_sources_when_response_is_not_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_bad_base_url)
response = api.get_allocation_sources()
assert not response.ok
def test_getting_allocation_sources_when_response_is_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_base_url)
response = api.get_allocation_sources()
assert response.ok
assert response.message['results'][0]['id'] == 1 and response.message['results'][0]['name'] == 'eriksf'
def test_getting_allocation_source_when_response_is_not_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_bad_base_url)
response = api.get_allocation_source(1)
assert not response.ok
def test_getting_allocation_source_when_response_is_ok(self):
api = AtmosphereAPI('token', base_url=self.mock_users_base_url)
response = api.get_allocation_source(1)
assert response.ok
assert response.message['id'] == 1 and response.message['name'] == 'eriksf'
| 46.317073 | 111 | 0.741969 | 248 | 1,899 | 5.314516 | 0.229839 | 0.053111 | 0.039454 | 0.051593 | 0.580425 | 0.534143 | 0.487102 | 0.440061 | 0.308042 | 0.288316 | 0 | 0.003788 | 0.165877 | 1,899 | 40 | 112 | 47.475 | 0.828283 | 0 | 0 | 0.363636 | 0 | 0 | 0.073723 | 0 | 0 | 0 | 0 | 0 | 0.212121 | 1 | 0.181818 | false | 0 | 0.121212 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d704e89f11dac0ca44aa2cced87deb5cf4011d60 | 2,058 | py | Python | scripts/process_image.py | NielsTilch/torchxrayvision | 5a5a51feaf3d24e4b2c6a056528ea3e70db82758 | [
"Apache-2.0"
] | 1 | 2022-03-09T15:43:35.000Z | 2022-03-09T15:43:35.000Z | scripts/process_image.py | NielsTilch/torchxrayvision | 5a5a51feaf3d24e4b2c6a056528ea3e70db82758 | [
"Apache-2.0"
] | null | null | null | scripts/process_image.py | NielsTilch/torchxrayvision | 5a5a51feaf3d24e4b2c6a056528ea3e70db82758 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import os,sys
sys.path.insert(0,"..")
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import argparse
import skimage, skimage.io
import pprint
import torch
import torch.nn.functional as F
import torchvision, torchvision.transforms
import torchxrayvision as xrv
parser = argparse.ArgumentParser()
parser.add_argument('-f', type=str, default="", help='')
parser.add_argument('img_path', type=str)
parser.add_argument('-weights', type=str,default="densenet121-res224-all")
parser.add_argument('-feats', default=False, help='', action='store_true')
parser.add_argument('-cuda', default=False, help='', action='store_true')
parser.add_argument('-resize', default=False, help='', action='store_true')
cfg = parser.parse_args()
img = skimage.io.imread(cfg.img_path)
img = xrv.datasets.normalize(img, 255)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
# the models will resize the input to the correct size so this is optional.
if cfg.resize:
transform = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop(),
xrv.datasets.XRayResizer(224)])
else:
transform = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop()])
img = transform(img)
model = xrv.models.get_model(cfg.weights)
output = {}
with torch.no_grad():
img = torch.from_numpy(img).unsqueeze(0)
if cfg.cuda:
img = img.cuda()
model = model.cuda()
if cfg.feats:
feats = model.features(img)
feats = F.relu(feats, inplace=True)
feats = F.adaptive_avg_pool2d(feats, (1, 1))
output["feats"] = list(feats.cpu().detach().numpy().reshape(-1))
preds = model(img).cpu()
output["preds"] = dict(zip(xrv.datasets.default_pathologies,preds[0].detach().numpy()))
if cfg.feats:
print(output)
else:
pprint.pprint(output)
| 26.727273 | 91 | 0.676871 | 281 | 2,058 | 4.893238 | 0.423488 | 0.039273 | 0.074182 | 0.048 | 0.202909 | 0.182545 | 0.16 | 0.069818 | 0.069818 | 0 | 0 | 0.014767 | 0.177357 | 2,058 | 76 | 92 | 27.078947 | 0.797401 | 0.076774 | 0 | 0.078431 | 0 | 0 | 0.073351 | 0.01161 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.215686 | 0 | 0.215686 | 0.078431 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d706af9c1e3186d4c7b7997463b83fda5eb62001 | 11,478 | py | Python | finper_app.py | smferro54/finper_kunin | 620ad52ccd944e18535495e632bae1dc7470d973 | [
"MIT"
] | 3 | 2021-08-11T19:00:04.000Z | 2022-01-02T00:31:59.000Z | finper_app.py | smferro54/finper_kunin | 620ad52ccd944e18535495e632bae1dc7470d973 | [
"MIT"
] | null | null | null | finper_app.py | smferro54/finper_kunin | 620ad52ccd944e18535495e632bae1dc7470d973 | [
"MIT"
] | 1 | 2021-10-17T14:11:35.000Z | 2021-10-17T14:11:35.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 11:24:17 2021
@author: Pipe San Martín
"""
import base64
import streamlit as st
from PIL import Image
import datetime
import extractor
import procesamiento
import visualizaciones
def download_csv(df, name='tus_transacciones'):
csv = df.to_csv(index=False)
base = base64.b64encode(csv.encode()).decode()
file = (f'<a href="data:file/csv;base64,{base}" download="%s.csv">Descarga tus datos aquí</a>' % (name))
return file
# SETUP ------------------------------------------------------------------------
favicon = Image.open("favicon.ico")
st.set_page_config(page_title='Finper', page_icon = favicon, layout = 'wide', initial_sidebar_state = 'auto')
# ROW 1 ------------------------------------------------------------------------
row1_spacer1, row1_1, row1_spacer2, row1_2, row1_spacer3 = st.beta_columns(
(.1, 2, 1.5, 1, .1)
)
Title_html = """
<style>
.title h1{
user-select: none;
font-size: 43px;
color: white;
background: repeating-linear-gradient(-45deg, red 0%, yellow 7.14%, rgb(0,255,0) 14.28%, rgb(0,255,255) 21.4%, cyan 28.56%, blue 35.7%, magenta 42.84%, red 50%);
background-size: 600vw 600vw;
-webkit-text-fill-color: transparent;
-webkit-background-clip: text;
animation: slide 10s linear infinite forwards;
}
@keyframes slide {
0%{
background-position-x: 0%;
}
100%{
background-position-x: 600vw;
}
}
</style>
<div class="title">
<h1>Finper</h1>
</div>
"""
with row1_1:
st.markdown(Title_html, unsafe_allow_html=True)
#row1_1.title('Finper')
st.markdown("_tu **vida financiera** en un **dashboard**_")
with row1_2:
st.write('')
st.markdown("Hecho con :heartbeat: por [Pipe](https://www.linkedin.com/in/pipesanmartin/)")
# ROW 2 ------------------------------------------------------------------------
with st.form("User_auth"):
row2_spacer1, row2_1, row2_spacer2, row2_2, row2_spacer3 = st.beta_columns(
(.1, 1.6, .1, 1.6, .1)
)
with row2_1:
fintoc_link = st.text_input('Ingresa tu fintoc link', value="", key='fintoc_link', type="password")
boton = st.form_submit_button("Traer mis productos financieros")
with row2_2:
apikey = st.text_input('y tu apikey', value="", key='apikey', type="password")
#with row2_3:
# fecha = st.date_input('¿desde qué fecha generamos el dashboard?', datetime.date(2019, 7, 6))
if boton:
# Haciendo algunos check's
if not apikey or not fintoc_link:
st.warning("Cueeck, ingresa tu apikey o fintoc link")
st.stop()
with st.spinner('Estamos ingresando..'):
try:
data, options = extractor.datos_cuenta(fintoc_link, apikey)
except Exception as e:
st.error("No se logró la autentificación")
st.warning(f"Revisa que la apikey: {apikey} y link: {fintoc_link}, sean correctos")
st.info("Abajo los detalles")
st.exception(e)
st.stop()
#st.session_state.fintoc_link = fintoc_link
#st.session_state.apikey = apikey
st.session_state.accounts_data = data
st.session_state.options = options
st.success('¡Ingresamos!')
# ROW 3 ------------------------------------------------------------------------
with st.form("User_account"):
row3_spacer1, row3_1, row3_spacer2, row3_2, row3_spacer3 = st.beta_columns((.1, 1.6, .1, 1.6, .1))
with row3_1:
if "options" not in st.session_state:
st.markdown("Obten tu link y apikey de [Fintoc](https://fintoc.com/)")
st.caption("Y luego haz click en 'Traer mis productos financieros'")
else:
option = st.radio('Escoge tu cuenta', st.session_state.options, key='option')
st.caption("Generalmente la opción que contenga '__cuenta__' es la que más usas 🐜")
#st.session_state.option = option
st.session_state.number_selected = st.session_state.option.split(",")[1].split()[-1]
st.session_state.nombre_cuenta = st.session_state.option.split(",")[0]
st.session_state.identificador = st.session_state.accounts_data[st.session_state.accounts_data['number'] == st.session_state.number_selected]['id'].values[0]
st.session_state.monto_actual = st.session_state.accounts_data[st.session_state.accounts_data['number'] == st.session_state.number_selected]['balance.current'].values[0]
st.session_state.currency = st.session_state.accounts_data[st.session_state.accounts_data['number'] == st.session_state.number_selected]['currency'].values[0]
st.session_state.rut = st.session_state.accounts_data[st.session_state.accounts_data['number'] == st.session_state.number_selected]['holder_id'].values[0]
st.session_state.nombre_prop = st.session_state.accounts_data[st.session_state.accounts_data['number'] == st.session_state.number_selected]['holder_name'].values[0]
json_data = {
"propietario": st.session_state.nombre_prop,
"rut": st.session_state.rut,
"nombre_cuenta": st.session_state.nombre_cuenta,
"N° cuenta": st.session_state.number_selected,
"currency": st.session_state.currency,
"disponible": str(st.session_state.monto_actual)
}
st.session_state.json = json_data
boton_2 = st.form_submit_button("Crear mi dashboard")
with row3_2:
if "option" not in st.session_state:
st.caption("🐜")
else:
fecha = st.date_input('¿desde qué fecha generamos el dashboard?', datetime.date(2020, 7, 6), key="date")
#st.session_state.date = fecha
if "json" not in st.session_state:
st.caption("")
else:
st.json(st.session_state.json)
if boton_2:
with st.spinner('Estamos recolectando tus datos..'):
try:
todo_movimientos = extractor.extraccion_movimientos(st.session_state.fintoc_link, st.session_state.apikey, st.session_state.identificador, st.session_state.date)
except Exception as e:
st.error("No los pudimos extraer")
st.error("Abajo los detalles")
st.exception(e)
st.stop()
with st.spinner("Ahora vamos a limpiar los datos"):
try:
df_tmp = procesamiento.crea_dataframe(todo_movimientos)
df = procesamiento.new_features(df_tmp)
except Exception as e:
st.error("No logramos limpiarlos")
st.error("Abajo los detalles")
st.exception(e)
st.stop()
with st.spinner("Finalmente los prepararemos para mostrartelos"):
try:
transactions_date_group, df_daily, df_monthly = procesamiento.transaction_group(df, st.session_state.monto_actual )
ingresos_df, ingresos_monthly = procesamiento.ingresos_group(df)
gastos_df, gastos_monthly = procesamiento.gastos_group(df)
dff, in_dff, out_dff = procesamiento.crea_dff_in_out(df)
in_dff_tmp, in_dff_tmp_cum = procesamiento.prepara_para_bar_race(in_dff)
out_dff_tmp, out_dff_tmp_cum = procesamiento.prepara_para_bar_race(out_dff)
sankey_data = procesamiento.prepara_para_sankey(ingresos_df, gastos_df)
last_moves = procesamiento.tabla_ultimos_movimientos(df)
st.session_state.readytoshow = True
except Exception as e:
st.error("No logramos prepararlos")
st.error("Abajo los detalles")
st.exception(e)
st.stop()
st.session_state.last_moves = last_moves
st.session_state.ingresos_monthly = ingresos_monthly
st.session_state.gastos_monthly = gastos_monthly
st.session_state.df_monthly = df_monthly
st.session_state.sankey_data = sankey_data
st.session_state.in_dff_tmp_cum = in_dff_tmp_cum
st.session_state.out_dff_tmp_cum = out_dff_tmp_cum
# ROW 4 ------------------------------------------------------------------------
if "readytoshow" not in st.session_state:
st.caption(" ")
else:
st.plotly_chart(visualizaciones.ingresos_gastos_timeseries(st.session_state.ingresos_monthly, st.session_state.gastos_monthly, st.session_state.df_monthly, st.session_state.currency, st.session_state.date), use_container_width=True)
st.plotly_chart(visualizaciones.origen_destino_dinero_sankey(st.session_state.sankey_data, st.session_state.date), use_container_width=True)
row4_1, row4_2 = st.beta_columns((.4, 3))
with row4_2:
st.subheader("Bar Chart Race de ingresos y gastos")
st.caption("Comparte estos videos con tu ehm..... mejor guardalos para ti")
with st.spinner("Haciendo los videos tipo 'bar chart race'"):
st.write(visualizaciones.race_chart_bar(st.session_state.in_dff_tmp_cum, "Mis ingresos en el tiempo"))
st.write(visualizaciones.race_chart_bar(st.session_state.in_dff_tmp_cum, "Mis gastos en el tiempo"))
# Last ROW ------------------------------------------------------------------------
rowlast_spacer1, rowlast_1, rowlast_spacer2, rowlast_2, rowlast_spacer3 = st.beta_columns((.1, 1, .1, 2, .1))
with rowlast_1:
st.subheader("Datos de la cuenta")
st.text(f"Propietario: {st.session_state.json['propietario']}")
st.text(f"Rut: {st.session_state.json['rut']}")
st.text(f"Tipo: {st.session_state.json['nombre_cuenta']}")
st.text(f"N° cuenta: {st.session_state.json['N° cuenta']}")
st.text(f"Divisa: {st.session_state.json['currency']}")
st.text(f"Disponible: {st.session_state.json['disponible']}")
#st.plotly_chart(visualizaciones.indicador_dinero_disponible(st.session_state.monto_actual),use_container_width=True)
with rowlast_2:
st.subheader("Últimas transacciones")
st.plotly_chart(visualizaciones.plotly_table(st.session_state.last_moves),use_container_width=True) | 36.208202 | 237 | 0.558373 | 1,319 | 11,478 | 4.645944 | 0.247157 | 0.107213 | 0.166775 | 0.039491 | 0.388381 | 0.309073 | 0.274967 | 0.215894 | 0.168244 | 0.157148 | 0 | 0.023854 | 0.302405 | 11,478 | 317 | 238 | 36.208202 | 0.740477 | 0.084422 | 0 | 0.150289 | 0 | 0.017341 | 0.239013 | 0.038443 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00578 | false | 0.011561 | 0.040462 | 0 | 0.052023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d706c7b9bde3126c4f66c80764ba40a0b2eb3d37 | 752 | py | Python | ACM ICPC/String/Anagram/is_anagram.py | shreejitverma/GeeksforGeeks | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 2 | 2022-02-18T05:14:28.000Z | 2022-03-08T07:00:08.000Z | ACM ICPC/String/Anagram/is_anagram.py | shivaniverma1/Competitive-Programming-1 | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 6 | 2022-01-13T04:31:04.000Z | 2022-03-12T01:06:16.000Z | ACM ICPC/String/Anagram/is_anagram.py | shivaniverma1/Competitive-Programming-1 | d7bcb166369fffa9a031a258e925b6aff8d44e6c | [
"MIT"
] | 2 | 2022-02-14T19:53:53.000Z | 2022-02-18T05:14:30.000Z | def is_anagram(a: str, b: str):
count = [0 for _ in range(26)]
count2 = [0 for _ in range(26)]
a_length, b_length = len(a), len(b)
if a_length != b_length:
return False
for i in range(a_length):
if 'a' <= a[i] <= 'z':
count[ord(a[i]) - ord('a')] += 1
elif 'A' <= a[i] <= 'Z':
count2[ord(a[i]) - ord('A')] += 1
if 'a' <= b[i] <= 'z':
count[ord(b[i]) - ord('a')] -= 1
elif 'A' <= b[i] <= 'Z':
count2[ord(b[i]) - ord('A')] -= 1
if any(count) or any(count2):
return False
return True
if __name__ == '__main__':
a, b = 'Abc', 'cab'
if is_anagram(a, b):
print('Anagrams')
else:
print('Not Anagrams')
| 24.258065 | 45 | 0.448138 | 116 | 752 | 2.758621 | 0.293103 | 0.075 | 0.0625 | 0.075 | 0.25 | 0.15625 | 0 | 0 | 0 | 0 | 0 | 0.028455 | 0.345745 | 752 | 30 | 46 | 25.066667 | 0.621951 | 0 | 0 | 0.083333 | 0 | 0 | 0.06117 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d70cb983ab6aa60b1274954b1db16da87b68a781 | 5,112 | py | Python | fs_image/nspawn_in_subvol/non_booted.py | singhaditya28/fs_image | 3d122da48eab8b26e5add6754cc1f91296139c58 | [
"MIT"
] | null | null | null | fs_image/nspawn_in_subvol/non_booted.py | singhaditya28/fs_image | 3d122da48eab8b26e5add6754cc1f91296139c58 | [
"MIT"
] | null | null | null | fs_image/nspawn_in_subvol/non_booted.py | singhaditya28/fs_image | 3d122da48eab8b26e5add6754cc1f91296139c58 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''
Read the `run.py` docblock first. Then, review the docs for
`new_nspawn_opts` and `PopenArgs`, and invoke `popen_non_booted_nspawn`.
This file uses `systemd-nspawn --as-pid2` to run nspawn's internal "stub
init" as PID 1 of the container, and have that start `opts.cmd` as PID 2.
Security note: We use `--console=pipe`, which means that FDs that point at
your terminal may make it inside the container, allowing the guest to
synthesize keystrokes on the host.
'''
import functools
import subprocess
from contextlib import contextmanager
from typing import Iterable
from .args import _NspawnOpts, PopenArgs
from .cmd import maybe_popen_and_inject_fds, _NspawnSetup, _nspawn_setup
from .common import _nspawn_version, _PopenWrapper
def run_non_booted_nspawn(
opts: _NspawnOpts, popen_args: PopenArgs,
*, popen_wrappers: Iterable[_PopenWrapper] = (), # Doc on `_PopenWrapper`
) -> subprocess.CompletedProcess:
with functools.reduce(
(lambda x, f: f(x)), popen_wrappers, popen_non_booted_nspawn
)(opts, popen_args) as proc:
cmd_stdout, cmd_stderr = proc.communicate()
return subprocess.CompletedProcess(
args=proc.args,
returncode=proc.returncode,
stdout=cmd_stdout,
stderr=cmd_stderr,
)
@contextmanager
def popen_non_booted_nspawn(
opts: _NspawnOpts, popen_args: PopenArgs,
) -> Iterable[subprocess.Popen]:
with _nspawn_setup(opts, popen_args) as setup, \
_popen_non_booted_nspawn(setup) as proc:
yield proc
@contextmanager
def _popen_non_booted_nspawn(setup: _NspawnSetup) -> Iterable[subprocess.Popen]:
opts = setup.opts
# Lets get the version locally right up front. If this fails we'd like to
# know early rather than later.
version = _nspawn_version()
cmd = [
*setup.nspawn_cmd,
# Add `--as-pid2` to run an nspawn-provided stub "init" process as
# PID 1 of the container, which starts our actual workload as PID 2.
# The command in `opts.cmd` is not (currently) meant to be an "init"
# process. And a PID 1 of a PID namespace must be a functioning
# "init" at least insofar as signal handling is concerned.
'--as-pid2',
f'--user={opts.user.pw_name}',
]
# This is last to let the user have final say over the environment.
cmd.extend(['--setenv=' + se for se in setup.cmd_env])
if version >= 242:
# This essentially reverts to pre-242 behavior, where the container
# has direct access to the caller's FDs 0/1/2, which may be the
# running host's TTY/PTY (or other privileged FDs). This is a
# SECURITY RISK in the sense that if the container is running
# untrusted code, it can now synthesize keystrokes on a host
# terminal and escape.
#
# We leave it like this for a few reasons:
# - We currently only build trusted code with the toolchain.
# Securing it against untrusted code is a big effort, covering
# more than just this specific vulnerability.
# - Being able to use container invocations in pipelines is
# very useful.
# - In the boot case, we `nsenter`, which is subject to the
# same attack. We don't have code to interpose a PTY there.
# - If we wanted to mitigate the risk, we could later do so:
# * Add an `--interactive` mode that interposes a PTY, for
# when the user wants that. Default to that when no command
# is given to the CLI, otherwise use `--non-interactive`.
# * In non-interactive mode, replace FDs 0/1/2 with
# something that interposes a pipe -- i.e. instead of
# `/dev/pts/0`, the container sees `[pipe]`, which
# is `splice`d to write to the original PTY.
# In fact, the mitigation really belongs in `systemd-nspawn`, we
# may yet propose it to upstream.
cmd.append('--console=pipe')
assert setup.popen_args.boot_console is None, setup # Should be unset
cmd_popen = functools.partial(
# NB: stdout is stderr if stdout is None, this is also our contract.
setup.subvol.popen_as_root,
check=setup.popen_args.check,
env=setup.nspawn_env, # `cmd_env` is set via `nspawn` args
stdin=setup.popen_args.stdin,
stdout=setup.popen_args.stdout,
stderr=setup.popen_args.stderr,
)
with maybe_popen_and_inject_fds(
(*cmd, '--', *opts.cmd),
opts,
cmd_popen,
set_listen_fds=True, # We must pass FDs through `systemd-nspawn`
) as proc:
# NB: While we could `return` here, the caller would then need to
# remember not to use the this `proc` as a context (since it's
# already entered). So instead, ensure use as a context manager.
yield proc
| 42.247934 | 80 | 0.66295 | 726 | 5,112 | 4.567493 | 0.389807 | 0.024427 | 0.027141 | 0.030157 | 0.087756 | 0.058203 | 0.028347 | 0.028347 | 0 | 0 | 0 | 0.005793 | 0.257042 | 5,112 | 120 | 81 | 42.6 | 0.867299 | 0.554969 | 0 | 0.107143 | 0 | 0 | 0.027003 | 0.011701 | 0 | 0 | 0 | 0 | 0.017857 | 1 | 0.053571 | false | 0 | 0.125 | 0 | 0.196429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d70f13571e81cf43b9b41933519bbe90bc4f8680 | 3,521 | py | Python | Informatics/Deep Learning/I2DL (IN2346) - TUM/Week_11/exercise_11/exercise_code/rnn/sentiment_dataset.py | MarcosSalib/Cocktail_MOOC | 46279c2ec642554537c639702ed8e540ea49afdf | [
"MIT"
] | null | null | null | Informatics/Deep Learning/I2DL (IN2346) - TUM/Week_11/exercise_11/exercise_code/rnn/sentiment_dataset.py | MarcosSalib/Cocktail_MOOC | 46279c2ec642554537c639702ed8e540ea49afdf | [
"MIT"
] | null | null | null | Informatics/Deep Learning/I2DL (IN2346) - TUM/Week_11/exercise_11/exercise_code/rnn/sentiment_dataset.py | MarcosSalib/Cocktail_MOOC | 46279c2ec642554537c639702ed8e540ea49afdf | [
"MIT"
] | null | null | null | import os
import re
import pickle
import random
import torch
import pytorch_lightning as pl
from torch.utils.data.dataset import Dataset
from torch.nn.utils.rnn import pad_sequence
from ..util.download_utils import download_dataset
def download_data(data_dir='./data'):
url = 'https://vision.in.tum.de/webshare/g/i2dl/SentimentData.zip'
download_dataset(url, data_dir, 'SentimentData.zip')
return os.path.join(data_dir, 'SentimentData')
def tokenize(text):
return [s.lower() for s in re.split(r'\W+', text) if len(s) > 0]
def load_vocab(base_dir):
vocab_file = os.path.join(base_dir, 'vocab.pkl')
with open(vocab_file, 'rb') as f:
vocab = pickle.load(f)
return vocab
def load_sentiment_data(base_dir, vocab):
train_file = os.path.join(base_dir, 'train_data.pkl')
val_file = os.path.join(base_dir, 'val_data.pkl')
test_file = os.path.join(base_dir, 'test_data.pkl')
def load_data(file_name):
with open(file_name, 'rb') as f:
data = pickle.load(f)
unk = vocab['<unk>']
result = []
for text, label in data:
tokens = tokenize(text)
indices = [vocab.get(token, unk) for token in tokens]
result.append((text, tokens, indices, label))
return result
train_data = load_data(train_file)
val_data = load_data(val_file)
test_data = load_data(test_file)
return train_data, val_data, test_data
def create_dummy_data(base_dir, sample_size=3, max_len=20, min_len=5):
vocab = load_vocab(base_dir)
train_data, _, _ = load_sentiment_data(base_dir, vocab)
train_data1 = [
(text, label)
for text, tokens, _, label in train_data
if min_len <= len(tokens) <= max_len and label == 1
]
train_data0 = [
(text, label)
for text, tokens, _, label in train_data
if min_len <= len(tokens) <= max_len and label == 0
]
data = random.sample(train_data1, sample_size) + random.sample(train_data0, sample_size)
return data
class SentimentDataset(pl.LightningDataModule):
def __init__(self, data, batch_size=16):
"""
Inputs:
data: list of tuples (raw_text, tokens, token_indices, label)
"""
self.data = data
self.data.sort(key=lambda x: len(x[1]), reverse=True)
self.batch_size = batch_size
def __len__(self):
return len(self.data)
def __getitem__(self, i):
"""
Inputs:
i: an integer value to index data
Outputs:
data: A dictionary of {data, label}
"""
_, _, indices, label = self.data[i]
return {
'data': torch.tensor(indices).long(),
'label': torch.tensor(label).float()
}
def train_dataloader(self):
return DataLoader(self.train_data, batch_size=self.batch_size, num_workers=4)
def val_dataloader(self):
return DataLoader(self.val_data, batch_size=self.batch_size, num_workers=4)
def test_dataloader(self):
return DataLoader(self.test_data, batch_size=self.batch_size, num_workers=4)
def collate(batch):
"""
To be passed to DataLoader as the `collate_fn` argument
"""
assert isinstance(batch, list)
data = pad_sequence([b['data'] for b in batch])
lengths = torch.tensor([len(b['data']) for b in batch])
label = torch.stack([b['label'] for b in batch])
return {
'data': data,
'label': label,
'lengths': lengths
}
| 29.588235 | 92 | 0.634763 | 491 | 3,521 | 4.338086 | 0.260692 | 0.029577 | 0.023474 | 0.026291 | 0.255399 | 0.207512 | 0.153052 | 0.121127 | 0.121127 | 0.121127 | 0 | 0.006795 | 0.247657 | 3,521 | 118 | 93 | 29.838983 | 0.797282 | 0.063618 | 0 | 0.073171 | 0 | 0 | 0.060019 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 1 | 0.158537 | false | 0 | 0.109756 | 0.060976 | 0.426829 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d710533c57458b043ec36c62b421073ebf281eaf | 3,948 | py | Python | distribution_shift_framework/classification/experiment_lib_test.py | deepmind/distribution_shift_framework | f6946779128dd90ae820f5c36fb5dbfad58ad885 | [
"Apache-2.0"
] | 4 | 2022-03-17T08:44:12.000Z | 2022-03-22T11:55:57.000Z | distribution_shift_framework/classification/experiment_lib_test.py | deepmind/distribution_shift_framework | f6946779128dd90ae820f5c36fb5dbfad58ad885 | [
"Apache-2.0"
] | null | null | null | distribution_shift_framework/classification/experiment_lib_test.py | deepmind/distribution_shift_framework | f6946779128dd90ae820f5c36fb5dbfad58ad885 | [
"Apache-2.0"
] | 1 | 2022-03-17T10:51:29.000Z | 2022-03-17T10:51:29.000Z | #!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distribution_shift_framework.classification.experiment_lib."""
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
from distribution_shift_framework.classification import config
from distribution_shift_framework.classification import experiment_lib
import jax
from jaxline import platform
_PREV_JAX_CONFIG = None
def setUpModule():
global _PREV_JAX_CONFIG
_PREV_JAX_CONFIG = jax.config.values.copy()
# Disable jax optimizations to speed up test.
jax.config.update('jax_disable_most_optimizations', True)
def tearDownModule():
# Set config to previous values.
jax.config.values.update(**_PREV_JAX_CONFIG)
class ExperimentLibTest(parameterized.TestCase):
@parameterized.parameters([
# Different algorithms.
dict(algorithm='CORAL', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
dict(algorithm='DANN', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
dict(algorithm='ERM', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
dict(algorithm='IRM', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
dict(algorithm='SagNet', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
# Different datasets.
dict(algorithm='ERM', test_case='ood', model='resnet18',
dataset_name='small_norb', label='label_category',
property_label='label_azimuth', number_of_seeds=1),
dict(algorithm='ERM', test_case='ood', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
# Different test cases.
dict(algorithm='ERM', test_case='lowdata', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
dict(algorithm='ERM', test_case='correlated.lowdata', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
dict(algorithm='ERM', test_case='lowdata.noise', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
dict(algorithm='ERM', test_case='lowdata.fixeddata', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
])
def test_train(self, **kwargs):
kwargs['training_steps'] = 3
kwargs['use_fake_data'] = True
kwargs['batch_size'] = 8
options = ','.join([f'{k}={v}' for k, v in kwargs.items()])
cfg = config.get_config(options)
with flagsaver.flagsaver(config=cfg, jaxline_mode='train'):
platform.main(experiment_lib.Experiment, [])
if __name__ == '__main__':
absltest.main()
| 41.557895 | 77 | 0.706687 | 500 | 3,948 | 5.336 | 0.328 | 0.082459 | 0.082459 | 0.098951 | 0.488756 | 0.482009 | 0.435532 | 0.435532 | 0.435532 | 0.435532 | 0 | 0.014661 | 0.170719 | 3,948 | 94 | 78 | 42 | 0.800244 | 0.200861 | 0 | 0.383333 | 0 | 0 | 0.208626 | 0.009585 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.116667 | 0 | 0.183333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7138f6c9c4c78194a8eb1866945071de64afa47 | 4,302 | py | Python | connect/support/kafka_segments.py | dixonwhitmire/connect | 800d821c8f6d6abff6485b43727353b909ef4b76 | [
"Apache-2.0"
] | 33 | 2020-06-16T11:47:03.000Z | 2022-03-24T02:41:00.000Z | connect/support/kafka_segments.py | dixonwhitmire/connect | 800d821c8f6d6abff6485b43727353b909ef4b76 | [
"Apache-2.0"
] | 470 | 2020-06-12T01:18:43.000Z | 2022-02-20T23:08:00.000Z | connect/support/kafka_segments.py | dixonwhitmire/connect | 800d821c8f6d6abff6485b43727353b909ef4b76 | [
"Apache-2.0"
] | 30 | 2020-06-12T19:36:09.000Z | 2022-01-31T15:25:35.000Z | """
kafka_segments.py
Connect convenience functions to handle Kafka message segmentation
"""
import uuid
import math
import time
import logging
from connect.config import get_settings
logger = logging.getLogger(__name__)
settings = get_settings()
def segment_message(msg, chunk_size=settings.kafka_message_chunk_size):
"""
Utility function to segment a large message into chunks that the producer can send to the broker.
The function yields each chunk with the relevant information that can be stored in the message headers
one at a time by the Producer client and sent off to the broker.
Allows for the creation of headers that uniquely identify segments by their id(uuid), msg_segment_count
and a 1-index based counter.
Example usage of `segment_message`:
```
for segment, identifier, count, index in segment_message(msg, self.segment_size):
segment_headers = {
ID: identifier,
COUNT: count,
INDEX: index
}
future = loop.create_future()
final_headers = {**headers, **segment_headers}
self.producer.produce(topic_to_send, segment, key=key, callback=self._kafka_callback(loop, future), headers=final_headers)
futures.append(future)
```
The consumer checks for the presence of message headers (with unique identifiers) and can use the
combine_segments method to join the individual segments back into the larger message.
"""
if type(msg) == str:
msg_bytes = msg.encode("utf-8")
elif type(msg) == bytes:
msg_bytes = msg
else:
msg = "Msg can only be of type bytes or string"
logger.error(msg)
raise ValueError(msg)
msg_size = len(msg_bytes)
msg_segment_count = math.ceil(msg_size / chunk_size)
start = 0
counter = 1
identifier = str(uuid.uuid4()).encode("utf-8")
while start < msg_size:
end = start + chunk_size if start + chunk_size < msg_size else msg_size
msg_segment = msg_bytes[start:end]
start = end
yield (
msg_segment,
identifier,
str(msg_segment_count).encode("utf-8"),
str(counter).encode("utf-8"),
)
counter += 1
ID = "fragment.identifier"
COUNT = "fragment.count"
INDEX = "fragment.index"
_message_store = {}
def combine_segments(value, headers):
"""
Util method to re-combine chunked messages that were produced by the segment_message util function above.
Additionally check example usage on segment_message function above to get a sense of how we could use custom
{key: value} header dicts in order to uniquely identify semgents and recombine them.
This function accesses and updates a common cache in `_message_store`. We purge unused semgents
in this cache with a configurable eviction time.
"""
identifier = headers[ID].decode("utf-8")
count = int(headers[COUNT].decode("utf-8"))
index = int(headers[INDEX].decode("utf-8"))
message_segments = None
if identifier in _message_store:
message_segments = _message_store[identifier]
message_segments["last_accessed"] = time.time()
else:
message_segments = {
"bitset": [0 for _ in range(count)],
"segments": [None for _ in range(count)],
"last_accessed": time.time(),
}
_message_store[identifier] = message_segments
message_segments["segments"][index - 1] = value
message_segments["bitset"][index - 1] = 1
message = None
if message_segments["bitset"] == [1 for _ in range(count)]:
del _message_store[identifier]
message = b"".join(message_segments["segments"])
_purge_segments()
return message
def _purge_segments():
for identifier in list(_message_store.keys()):
last_accessed = _message_store[identifier]["last_accessed"]
current_time = time.time() - settings.kafka_segments_purge_timeout
# trace log
if last_accessed < current_time:
logger.trace(f"Purging message segments with identifier: {identifier}")
del _message_store[identifier]
| 34.97561 | 142 | 0.651093 | 534 | 4,302 | 5.073034 | 0.314607 | 0.055371 | 0.040605 | 0.032115 | 0.027316 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005375 | 0.264761 | 4,302 | 122 | 143 | 35.262295 | 0.851091 | 0.403998 | 0 | 0.060606 | 0 | 0 | 0.105177 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.075758 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7151f3c1abade39f68a83d8bc839f7b40b0a6d5 | 4,427 | py | Python | lookups/psi4.py | donerancl/sedre | ad87ea8db7508ffce0060e4aca3eb176fda9e329 | [
"MIT"
] | null | null | null | lookups/psi4.py | donerancl/sedre | ad87ea8db7508ffce0060e4aca3eb176fda9e329 | [
"MIT"
] | 1 | 2020-03-21T00:10:49.000Z | 2020-03-21T00:10:49.000Z | lookups/psi4.py | donerancl/sedre | ad87ea8db7508ffce0060e4aca3eb176fda9e329 | [
"MIT"
] | 1 | 2020-03-25T14:34:49.000Z | 2020-03-25T14:34:49.000Z | from .. import common
anynum = common.anynum
import copy
#Delimiters and regular expressions specific to psi4
lookup = {
'program': 'psi4',
'generic_delim': {
'end': '\*\*\* tstop\(\) called on',
'start': '\*\*\* tstart\(\) called on'
},
'exit_success': '\*\*\* Psi4 exiting successfully. Buy a developer a beer'
}
#TODO energies: ran enormous number of methods, find correct delims in there
#lookup energies, properties should be regexes that return the value asked for
#lookup section should contain regexes that do not return a value
lookup['energy'] = {
'HF': {
'raw': 'Final Energy:[ ]+' + anynum,
'Nuc': 'Nuclear Repulsion Energy =[ ]+' + anynum,
'1eE': 'One-Electron Energy =[ ]+' + anynum,
'2eE': 'Two-Electron Energy =[ ]+' + anynum
},
'DFT': {
'raw': 'Final Energy:[ ]+' + anynum,
'Nuc': 'Nuclear Repulsion Energy =[ ]+' + anynum,
'1eE': 'One-Electron Energy =[ ]+' + anynum,
'2eE': 'Two-Electron Energy =[ ]+' + anynum
},
'DFMP2': {
'raw': '(?<!SCS) Total Energy[ ]+=[ ]+' + anynum
},
'MP2': {
'raw': '(?<!SCS) Total Energy[ ]+=[ ]+' + anynum
},
'CCSD': {
'raw': 'Total CCSD energy[ ]+\(file100\)[ =]+' + anynum,
'corr': '(?<!Total) CCSD energy[ ]+\(file100\)[ =]+' + anynum,
'MP2': '\* MP2 total energy[ =]+' + anynum,
'dMP2': 'MP2 correlation energy[ ]+' + anynum,
'd(T)': '\(T\) energy[ =]+' + anynum,
'(T)': '\* CCSD\(T\) total energy[ =]+' + anynum
},
'DETCI' : {
'raw': 'DETCI Root 0 energy =\s+([-.0-9]+)',
'root': 'DETCI Root [0-9]+ energy =\s+([-.0-9]+)'
}
}
lookup['sections'] = {
'GEOM':{
'start': '==> Geometry <==',
'end': 'Nuclear'
},
'HF': {
'start': 'HF Reference',
'end': lookup['generic_delim']['end'],
'post': '==> Post-Iterations <=='
},
'DFT': {
'start': 'KS Reference',
'end': 'Computation Completed'
},
'opt': {
'start': '==> Convergence Check <==',
'end': 'OPTKING Finished Execution'
},
'freq': {
'start': '==> Harmonic Vibrational Analysis <==',
'end': 'Total G, Free enthalpy'
},
'MP2': {
'start': '2nd-Order Moller-Plesset Theory',
'end': lookup['generic_delim']['end']
},
'DFMP2': {
'start': '2nd-Order Density-Fitted Moller-Plesset Theory',
'end': lookup['generic_delim']['end']
},
'CCSD': {
'start': '\* CCENERGY \*',
'end': lookup['generic_delim']['end'] + '|' + lookup['exit_success']
},
'DETCI': {
'start': "D E T C I",
'end': lookup['generic_delim']['end'] + '|' + lookup['exit_success']
}
}
lookup['properties'] = {
'GEOM': {
'cart':'([A-Z]|[A-Z][A-Z])\s+([-.0-9]+)\s+([-.0-9]+)\s+([-.0-9]+)\s+'#([-.0-9]+)'
},
'HF': {
'diis': 'DIIS ([a-z]+).',
'mom': 'MOM ([a-z]+).',
'frac': 'Fractional occupation ([a-z]+).',
'guess': 'Guess Type is ([A-Za-z0-9]+)',
'basis': 'Basis Set: ([A-Za-z0-9]+)',
'alg': 'SCF Algorithm Type is ([A-Za-z]+)'
}, #,
#'post':
#{'orbital_energies':'[ ]+([1-9A-Z]+)[]+([-.0-9]+)'}},
'DFT': {
'diis': 'DIIS ([a-z]+).',
'mom': 'MOM ([a-z]+).',
'frac': 'Fractional occupation ([a-z]+).',
'guess': 'Guess Type is ([A-Za-z0-9]+)',
'basis': 'Basis Set: ([A-Za-z0-9]+)',
'alg': 'SCF Algorithm Type is ([A-Za-z]+)'
}, #,
#'post':
#{'orbital_energies':'[ ]+([1-9A-Z]+)[]+([-.0-9]+)'}}
'opt': {
'iteration':
'[ ]+([0-9]+)[ ]+([\\+-e.0-9]+)[ o\*]+([\\+-e.0-9]+)[ o\*]+([\\+-e.0-9]+)[ o\*]+([\\+-e.0-9]+)[ o\*]+([\\+-e.0-9]+)[ o\*]+',
'summary':
'[ ]+([0-9]+)[ ]+([-.0-9]+)[ ]+([-.0-9]+)[ ]+([-.0-9]+)[ ]+([-.0-9]+)[ ]+([-.0-9]+)',
'entry':
' ([A-Za-z]+)[ ]+([-.0-9]+)[ ]+([-.0-9]+)[ ]+([-.0-9]+)',
'success':
'Optimization is complete!'
},
'freq': {
'entry':
'Freq \[cm\^-1\][ ]+([-.0-9i]+)[ ]+([-.0-9i]+)[ ]+([-.0-9i]+)',
'zpve': 'Total ZPE, Electronic energy at 0 \[K\][ ]+([-.0-9]+)'
}
}
#add aliases
#lookup['energy']['HF'] = copy.deepcopy(lookup['energy']['SCF']['raw'])
#lookup['energy']['DFT'] = copy.deepcopy(lookup['energy']['SCF']['raw'])
| 32.792593 | 132 | 0.441157 | 479 | 4,427 | 4.05428 | 0.321503 | 0.025747 | 0.010814 | 0.014418 | 0.453141 | 0.38826 | 0.354274 | 0.354274 | 0.267765 | 0.261586 | 0 | 0.029012 | 0.268127 | 4,427 | 134 | 133 | 33.037313 | 0.57037 | 0.124464 | 0 | 0.40678 | 0 | 0.042373 | 0.553599 | 0.015536 | 0 | 0 | 0 | 0.007463 | 0 | 1 | 0 | false | 0 | 0.016949 | 0 | 0.016949 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d715c9c0d93075f2858b5107d648e914b27eedeb | 5,445 | py | Python | api.py | hensm/bnu_times | fe155ed85ce583858b0e405f3cfa76d8eccc0c3c | [
"MIT"
] | null | null | null | api.py | hensm/bnu_times | fe155ed85ce583858b0e405f3cfa76d8eccc0c3c | [
"MIT"
] | null | null | null | api.py | hensm/bnu_times | fe155ed85ce583858b0e405f3cfa76d8eccc0c3c | [
"MIT"
] | null | null | null | import re
import requests
import sys
from bs4 import BeautifulSoup
from dataclasses import dataclass, asdict
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, List, Union
TIMETABLE_URL = "https://mytimetable.bucks.ac.uk"
@dataclass
class TimetableEntry:
name: str
desc: str
type: str
module: str
time_start: datetime
time_end: datetime
weeks: str
room: str
staff: List[str]
@dataclass
class Timetable:
student_id: str
student_name: str
student_course: str
date_start: datetime.date
days: List[List[TimetableEntry]]
def get_form_fields(content: bytes):
"""
Gets first form field values from HTML content.
"""
soup = BeautifulSoup(content, "html.parser")
fields: Dict[str, Union[str, List[str]]] = {}
form = soup.find("form")
for input in form.find_all("input"):
if not (input.has_attr("name") and input.has_attr("value")):
continue
if input["type"] in ("text", "hidden", "submit"):
fields[input["name"]] = input["value"]
for select in soup.find_all("select"):
opts = select.find_all("option")
fields[select["name"]] = [
opt["value"] for opt in opts if opt.has_attr("selected")
]
return fields
session: requests.Session = None
session_res: requests.Response = None
def ensure_valid_session_state():
"""
Makes a request to the timetable site index for session
cookies/data. Follows a 302 redirect to the timetable index,
submits an ASP form to switch to the student timetables
section for subsequent requests.
"""
global session
global session_res
# TODO: Fix session expiry issues
session = requests.Session()
initial_res = session.get(TIMETABLE_URL)
# Get ASP form data from initial page and switch to student
# timetables section.
session_res = session.post(initial_res.url, data=dict(
get_form_fields(initial_res.content),
**{
"__EVENTTARGET": "LinkBtn_studentsbytext",
"tLinkType": "information"
}))
class Week(Enum):
Current = "t"
Next = "n"
def get_timetable(student_id: str, week: Week) -> Timetable:
"""
Makes a request for the timetable with a given student ID.
"""
re_student_id = re.compile(r"^\d{8}$")
if not re_student_id.match(student_id):
print("invalid id %s" % student_id, file=sys.stderr)
return None
ensure_valid_session_state()
# Make request for individual timetable
res_timetable = session.post(session_res.url, data=dict(
get_form_fields(session_res.content),
**{
"__EVENTTARGET": "tObjectInput",
"tObjectInput": student_id,
"lbWeeks": week.value,
"lbDays": "1-7",
"dlType": "TextSpreadsheet;swsurl;SWSNET Student TextSpreadsheet"
}))
# The list timetable provides a week-by-week view with 7
# HTML tables containing the events Mon-Sun, all with the
# same column info, so collect all the data into dataclasses
# for export.
timetable_soup = BeautifulSoup(res_timetable.content, "html.parser")
timetable_data_days = []
header_student_name = timetable_soup.select_one(".header-0-0-0")
header_student_course = timetable_soup.select_one(".header-0-0-2")
header_time = timetable_soup.select_one(".header-1-2-3")
if header_student_name is None or \
header_student_course is None or \
header_time is None:
return None
week_start, _ = map(
lambda x: datetime.strptime(x, "%d %b %Y"),
header_time.text.split("-"))
for day_index, sheet in enumerate(timetable_soup.select(".spreadsheet")):
time_day = week_start + timedelta(days=day_index)
def parse_event_time(time_string: str):
return datetime.combine(
time_day,
datetime.strptime(time_string, "%H:%M").time())
def split_strip(string: str, sep=","):
return [x.strip() for x in string.split(sep)]
sheet_data = []
for row in sheet.select("tr:not(.columnTitles)"):
# idx column description
# -------------------------------------------------------
# 0 name Session name
# 1 desc Short session description
# 2 type Type of session
# 3 module Module string
# 4 time_start DateTime string for start of session
# 5 time_end DateTime string for end of session
# 6 weeks List of applicable week ranges
# 7 room Room string
# 8 staff List of assigned staff
cols = [col.text for col in row.find_all("td")]
# Cleanup data
cols[4] = parse_event_time(cols[4])
cols[5] = parse_event_time(cols[5])
cols[6] = split_strip(cols[6])
cols[8] = split_strip(cols[8])
sheet_data.append(TimetableEntry(*cols))
timetable_data_days.append(sheet_data)
if not len(timetable_data_days):
return None
return Timetable(
student_id=student_id,
student_name=header_student_name.text,
student_course=header_student_course.text,
date_start=week_start.date(),
days=timetable_data_days)
| 29.754098 | 77 | 0.618733 | 677 | 5,445 | 4.812408 | 0.299852 | 0.027624 | 0.020872 | 0.020258 | 0.043585 | 0.034991 | 0.034991 | 0 | 0 | 0 | 0 | 0.008612 | 0.274931 | 5,445 | 182 | 78 | 29.917582 | 0.816616 | 0.202204 | 0 | 0.081818 | 0 | 0 | 0.095383 | 0.016874 | 0 | 0 | 0 | 0.005495 | 0 | 1 | 0.045455 | false | 0 | 0.072727 | 0.018182 | 0.354545 | 0.009091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d716fe934fdaffec877ca7cad1dfcf924afe4662 | 323 | py | Python | Dataset/Leetcode/train/48/257.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/48/257.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/48/257.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution(object):
def XXX(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: None Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
for i in range(n):
for j in range(n):
matrix[j][n-1-i] = matrix[i][j]
| 24.846154 | 76 | 0.501548 | 44 | 323 | 3.681818 | 0.613636 | 0.08642 | 0.098765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004854 | 0.362229 | 323 | 12 | 77 | 26.916667 | 0.781553 | 0.303406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d71794340cb5e1f96a75cb4fa45af8b8c899ed28 | 1,570 | py | Python | manage.py | unicef/nomenklatura-1 | da24a6c0ef841fb604a401c14169b5cadb31de33 | [
"MIT"
] | 6 | 2020-09-04T15:16:52.000Z | 2022-03-15T05:44:52.000Z | manage.py | unicef/nomenklatura-1 | da24a6c0ef841fb604a401c14169b5cadb31de33 | [
"MIT"
] | null | null | null | manage.py | unicef/nomenklatura-1 | da24a6c0ef841fb604a401c14169b5cadb31de33 | [
"MIT"
] | null | null | null | import logging
from flask_assets import ManageAssets
from flask_script import Manager
from sqlalchemy.sql import Alias
from nomenklatura.assets import assets
from nomenklatura.core import db
from nomenklatura.model import Dataset
from nomenklatura.views import app
manager = Manager(app)
manager.add_command('assets', ManageAssets(assets))
logger = logging.getLogger(__name__)
@manager.command
def createdb():
""" Make the database. """
logger.info('Create DB')
logger.info('Create Extensions')
db.engine.execute("CREATE EXTENSION IF NOT EXISTS hstore;")
db.engine.execute("CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;")
db.create_all()
@manager.command
def flush(dataset):
logger.info('Flushing')
ds = Dataset.by_name(dataset)
for alias in Alias.all_unmatched(ds):
db.session.delete(alias)
db.session.commit()
@manager.option('-h', '--host', dest='host', default='127.0.0.1')
@manager.option('-p', '--port', dest='port', type=int, default=8000)
@manager.option('-w', '--workers', dest='workers', type=int, default=8)
def gunicorn(host, port, workers):
"""Start the Server with Gunicorn"""
from gunicorn.app.base import Application
class FlaskApplication(Application):
def init(self, parser, opts, args):
return {
'bind': '{0}:{1}'.format(host, port),
'workers': workers
}
def load(self):
return app
application = FlaskApplication()
return application.run()
if __name__ == '__main__':
manager.run()
| 26.610169 | 71 | 0.675796 | 193 | 1,570 | 5.404145 | 0.440415 | 0.061361 | 0.032598 | 0.040268 | 0.078619 | 0.078619 | 0.078619 | 0.078619 | 0 | 0 | 0 | 0.010252 | 0.192357 | 1,570 | 58 | 72 | 27.068966 | 0.812303 | 0.031847 | 0 | 0.047619 | 0 | 0 | 0.132626 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119048 | false | 0 | 0.214286 | 0.047619 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d71a849e7f153bdd751e9f25aeb7f3c4b5af42c2 | 3,629 | py | Python | game.py | haresh03/the-mystery-island | da424761db5926433a7b61fb5ccb593da39fb4a5 | [
"Apache-2.0"
] | null | null | null | game.py | haresh03/the-mystery-island | da424761db5926433a7b61fb5ccb593da39fb4a5 | [
"Apache-2.0"
] | null | null | null | game.py | haresh03/the-mystery-island | da424761db5926433a7b61fb5ccb593da39fb4a5 | [
"Apache-2.0"
] | null | null | null | import time
import random
def print_slow(s):
print(s)
time.sleep(3)
def valid_input():
while True:
choice = input()
if choice == '1':
return choice
break
elif choice == '2':
return choice
break
else:
print(">> Enter either 1 or 2! <<")
def start():
print_slow("#DAY 1: You are stuck on some island in the vast"
" sea after your ship got sunk.")
print_slow(">> You are the only survivor alive!")
print_slow(">> You figured out ways to stay alive on this island\n")
print_slow("#DAY 19: You spend weeks, after which you saw"
" a ship far away from you!")
print_slow("==> What would you do?\n"
"1: Wave your hand at the ship\n"
"2: Swim to approach it\n[ Enter 1 or 2 ]")
choice = valid_input()
if choice == '1':
print_slow(">> Seems good. But this didn't work and"
" the ship sailed away!\n")
day45()
else:
print_slow(">> While swimming you found out"
" there are sharks out there. oops!")
game_over()
def day45():
print_slow("#Day 45: Roaming on the island you found a box.")
print_slow("==> What would you want to do?\n"
"1: Open it!\n"
"2: Throw it in the ocean.\n[ Enter 1 or 2 ]")
choice = valid_input()
if choice == '1':
box = random.choice(['Teleporter', 'Clothes', 'Gold', 'Silver',
'Camera', 'Lamp'])
print_slow(">> Well you found: " + box)
if box == 'Teleporter':
print_slow('>> You used it to go home')
end()
elif box == 'Lamp':
print_slow(">> You played with the lamp and found a magical genie,"
" who offered you only one wish")
print_slow("==> What would you wish for?\n"
"1: Wish to go home.\n"
"2: Wish for $100 billion dollars.\n[ Enter 1 or 2 ]")
choice = valid_input()
if choice == '1':
end()
else:
print_slow('>> Keep it safe with you :-)\n')
day78()
else:
print_slow('>> Keep it safe with you :-)\n')
day78()
else:
print_slow(">> Well doesn't matter. :-) \n")
day78()
def day78():
print_slow("#Day 78: You tried to go deep inside "
"the island and found a cave")
print_slow("==> What would you want to do?\n"
"1: Go inside!\n"
"2: Return back.\n[ Enter 1 or 2 ]")
choice = valid_input()
if choice == '1':
print_slow(">> There was a hungry lion inside, oops!")
game_over()
else:
print_slow(">> Well, good! :-)")
print_slow("#Day 99: You saw a helicopter asked it for help,"
" it passed you a rope to climb.")
end()
def end():
print_slow('>> You successfully escaped the island, congrats! <<')
game_over()
def game_over():
print_slow("** GAME OVER! **")
print_slow(">> Would you like to play again? <<\n1.YES\n2.NO")
choice = valid_input()
if choice == '1':
start()
else:
print_slow("*** Thanks for playing! ***")
print_slow("** WELCOME! **")
print_slow("The Mystery Island - A Text Based Adventure Game\n"
"By -- Haresh Gaikwad\n")
if __name__ == "__main__":
start()
| 31.556522 | 80 | 0.495453 | 461 | 3,629 | 3.800434 | 0.342733 | 0.143836 | 0.044521 | 0.047945 | 0.212329 | 0.188356 | 0.174087 | 0.174087 | 0.174087 | 0.174087 | 0 | 0.02305 | 0.378341 | 3,629 | 114 | 81 | 31.833333 | 0.753546 | 0 | 0 | 0.381443 | 0 | 0 | 0.433001 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072165 | false | 0.010309 | 0.020619 | 0 | 0.113402 | 0.309278 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d71df2929e2a2cc15c161c732c7af2e26a693aad | 1,798 | py | Python | exploration/scripts/nbconverted/latent_offset_PA1673like.py | greenelab/Pseudomonas_latent_spaces | 0d78dc927a246c49f631abeddc0b952add4c6d0c | [
"BSD-3-Clause"
] | null | null | null | exploration/scripts/nbconverted/latent_offset_PA1673like.py | greenelab/Pseudomonas_latent_spaces | 0d78dc927a246c49f631abeddc0b952add4c6d0c | [
"BSD-3-Clause"
] | 12 | 2018-07-02T19:35:31.000Z | 2019-03-09T00:24:09.000Z | exploration/scripts/nbconverted/latent_offset_PA1673like.py | greenelab/Pseudomonas_latent_spaces | 0d78dc927a246c49f631abeddc0b952add4c6d0c | [
"BSD-3-Clause"
] | 1 | 2018-06-25T14:21:51.000Z | 2018-06-25T14:21:51.000Z |
# coding: utf-8
# In[1]:
#-------------------------------------------------------------------------------------------------------------------------------
# By Alexandra Lee (July 2018)
#
# Take the average of the encoded gene expression for the two experimental conditions
# Take the difference of the averages -- this will be the offset for the latent space
#-------------------------------------------------------------------------------------------------------------------------------
import os
import pandas as pd
import numpy as np
randomState = 123
from numpy.random import seed
seed(randomState)
# In[2]:
# load arguments
lowest_file = os.path.join(os.path.dirname(os.getcwd()), "encoded", "PA1673_full_old", "train_lowest_2layer_10latent_encoded.txt")
highest_file = os.path.join(os.path.dirname(os.getcwd()), "encoded", "PA1673_full_old", "train_highest_2layer_10latent_encoded.txt")
# output files
out_file = os.path.join(os.path.dirname(os.getcwd()), "data", "PA1673_full_old", "train_offset_2layer_10latent.txt")
# In[3]:
# read in data
lowest_data = pd.read_table(lowest_file, header=0, sep='\t', index_col=0)
highest_data = pd.read_table(highest_file, header=0, sep='\t', index_col=0)
lowest_data.head(5)
# In[4]:
highest_data.head(5)
# In[5]:
# Average gene expression across samples in training set
train_lowest_mean = lowest_data.mean(axis=0)
train_highest_mean = highest_data.mean(axis=0)
train_lowest_mean
# In[6]:
train_highest_mean
# In[7]:
# Generate offset using average gene expression in original dataset
train_offset_latent = train_highest_mean - train_lowest_mean
train_offset_latent_df = pd.Series.to_frame(train_offset_latent).transpose()
train_offset_latent_df
# In[8]:
# output
train_offset_latent_df.to_csv(out_file, sep='\t')
| 22.197531 | 132 | 0.647942 | 248 | 1,798 | 4.455645 | 0.370968 | 0.032579 | 0.076923 | 0.038009 | 0.21629 | 0.18371 | 0.18371 | 0.18371 | 0.140271 | 0.108597 | 0 | 0.028125 | 0.110122 | 1,798 | 80 | 133 | 22.475 | 0.6625 | 0.38376 | 0 | 0 | 0 | 0 | 0.167897 | 0.104244 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d71f1e376a02903ff521f93b0d0f04e95f4f878f | 16,401 | py | Python | drizzlepac/findobj.py | jhunkeler/drizzlepac | 09ca153b8e4f4e03dd155c61243d722e1c40caee | [
"BSD-3-Clause"
] | 2 | 2020-02-10T16:15:58.000Z | 2021-03-24T20:08:03.000Z | drizzlepac/findobj.py | jhunkeler/drizzlepac | 09ca153b8e4f4e03dd155c61243d722e1c40caee | [
"BSD-3-Clause"
] | null | null | null | drizzlepac/findobj.py | jhunkeler/drizzlepac | 09ca153b8e4f4e03dd155c61243d722e1c40caee | [
"BSD-3-Clause"
] | 1 | 2020-09-02T18:08:39.000Z | 2020-09-02T18:08:39.000Z | """
A suite of functions for finding sources in images.
:Authors: Warren Hack, Mihai Cara
:License: :doc:`LICENSE`
"""
import sys
import math
import numpy as np
from scipy import signal, ndimage
import stsci.imagestats as imagestats
from . import cdriz
__all__ = ['gaussian1', 'gausspars', 'gaussian', 'moments', 'errfunc',
'findstars', 'apply_nsigma_separation', 'xy_round',
'precompute_sharp_round', 'sharp_round', 'roundness', 'immoments',
'nmoment', 'centroid', 'cmoment', 'central_moments', 'covmat',
'help', 'getHelpAsString']
#def gaussian(amplitude, xcen, ycen, xsigma, ysigma):
#from numpy import *
FWHM2SIG = 2*np.sqrt(2*np.log(2))
#def gaussian1(height, x0, y0, fwhm, nsigma=1.5, ratio=1., theta=0.0):
def gaussian1(height, x0, y0, a, b, c):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
a, b, c - ellipse parameters (coefficients in the quadratic form)
"""
return lambda x, y: height * np.exp(-0.5* (a*(x-x0)**2 + b*(x-x0)*(y-y0) + c*(y-y0)**2))
def gausspars(fwhm, nsigma=1.5, ratio=1, theta=0.):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
fwhm - full width at half maximum of the observation
nsigma - cut the gaussian at nsigma
ratio = ratio of xsigma/ysigma
theta - angle of position angle of the major axis measured
counter-clockwise from the x axis
Returns dimensions nx and ny of the elliptical kernel as well as the
ellipse parameters a, b, c, and f when defining an ellipse through the
quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f
"""
xsigma = fwhm / FWHM2SIG
ysigma = ratio * xsigma
f = nsigma**2/2.
theta = np.deg2rad(theta)
cost = np.cos(theta)
sint = np.sin(theta)
if ratio == 0: # 1D Gaussian
if theta == 0 or theta == 180:
a = 1/xsigma**2
b = 0.0
c = 0.0
elif theta == 90:
a = 0.0
b = 0.0
c = 1/xsigma**2
else:
print('Unable to construct 1D Gaussian with these parameters\n')
raise ValueError
nx = 2 * int(max(2, (xsigma*nsigma*np.abs(cost))))+1
ny = 2 * int(max(2, (xsigma*nsigma*np.abs(sint))))+1
else: #2D gaussian
xsigma2 = xsigma * xsigma
ysigma2 = ysigma * ysigma
a = cost**2/xsigma2 + sint**2/ysigma2
b = 2 * cost * sint *(1.0/xsigma2-1.0/ysigma2)
c = sint**2/xsigma2 + cost**2/ysigma2
d = b**2 - 4*a*c # discriminant
# nx = int(2*max(2, math.sqrt(-8*c*f/d)))+1
# ny = int(2*max(2, math.sqrt(-8*a*f/d)))+1
nx = 2 * int(2*max(1, nsigma*math.sqrt(-c/d)))+1
ny = 2 * int(2*max(1, nsigma*math.sqrt(-a/d)))+1
return nx, ny, a, b, c, f
def gaussian(height, center_x, center_y, width_x, width_y):
#Returns a gaussian function with the given parameters
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*exp(
-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)
def moments(data,cntr):
"""
Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments.
"""
total = data.sum()
#X, Y = np.indices(data.shape)
#x = (X*data).sum()/total
#y = (Y*data).sum()/total
x,y = cntr
xi = int(x)
yi = int(y)
if xi < 0 or xi >= data.shape[1] or yi < 0 or yi >= data.shape[0]:
raise ValueError
col = data[:, xi]
width_x = np.sqrt(abs(((np.arange(col.size)-y)**2*col).sum()/col.sum()))
row = data[yi, :]
width_y = np.sqrt(abs(((np.arange(row.size)-x)**2*row).sum()/row.sum()))
height = data.max()
return height, x, y, width_x, width_y
def errfunc(p, *args):
func = gaussian1(*p)
ret =np.ravel(func(*args[1:]) - args[0])
return ret
def findstars(jdata, fwhm, threshold, skymode,
peakmin=None, peakmax=None, fluxmin=None, fluxmax=None,
nsigma=1.5, ratio=1.0, theta=0.0,
use_sharp_round=False,mask=None,
sharplo=0.2,sharphi=1.0,roundlo=-1.0,roundhi=1.0):
# store input image size:
(img_ny, img_nx) = jdata.shape
# Define convolution inputs
nx, ny, a, b, c, f = gausspars(fwhm, nsigma=nsigma, ratio= ratio, theta=theta)
xc = nx//2
yc = ny//2
yin, xin = np.mgrid[0:ny, 0:nx]
kernel = gaussian1(1.0, xc, yc, a, b, c)(xin,yin)
# define size of extraction box for each source based on kernel size
grx = xc
gry = yc
# DAOFIND STYLE KERNEL "SHAPE"
rmat = np.sqrt((xin-xc)**2 + (yin-yc)**2)
rmatell = a*(xin-xc)**2 + b*(xin-xc)*(yin-yc) + c*(yin-yc)**2
xyrmask = np.where((rmatell <= 2*f) | (rmat <= 2.001),1,0).astype(np.int16)
# Previous *style* computation for kernel "shape":
#xyrmask = np.where(rmat <= max(grx,gry),1,0).astype(np.int16)
npts = xyrmask.sum()
rmask = kernel*xyrmask
denom = (rmask*rmask).sum() - rmask.sum()**2/npts
nkern = (rmask - (rmask.sum()/npts))/denom # normalize kernel to preserve
# fluxes for thresholds
nkern *= xyrmask
# initialize values used for getting source centers
relerr = 1./((rmask**2).sum() - (rmask.sum()**2/xyrmask.sum()))
xsigsq = (fwhm / FWHM2SIG)**2
ysigsq = (ratio**2) * xsigsq
# convolve image with gaussian kernel
convdata = signal.convolve2d(jdata, nkern, boundary='symm', mode='same').astype(np.float32)
# clip image to create regions around each source for segmentation
if mask is None:
tdata=np.where(convdata > threshold, convdata, 0)
else:
tdata=np.where((convdata > threshold) & mask, convdata, 0)
# segment image and find sources
s = ndimage.morphology.generate_binary_structure(2, 2)
ldata, nobj = ndimage.label(tdata, structure=s)
fobjects = ndimage.find_objects(ldata)
fluxes = []
fitind = []
if nobj < 2:
print('No objects found for this image. Please check value of "threshold".')
return fitind,fluxes
# determine center of each source, while removing spurious sources or
# applying limits defined by the user
ninit = 0
ninit2 = 0
s2m, s4m = precompute_sharp_round(nx, ny, xc, yc)
satur = False # Default assumption if use_sharp_round=False
sharp = None
round1 = None
round2 = None
for ss,n in zip(fobjects,range(len(fobjects))):
ssx = ss[1].stop - ss[1].start
ssy = ss[0].stop - ss[0].start
if ssx >= tdata.shape[1]-1 or ssy >= tdata.shape[0]-1:
continue
yr0 = ss[0].start - gry
yr1 = ss[0].stop + gry + 1
if yr0 <= 0 or yr1 >= img_ny: continue # ignore sources within ny//2 of edge
xr0 = ss[1].start - grx
xr1 = ss[1].stop + grx + 1
if xr0 <= 0 or xr1 >= img_nx: continue # ignore sources within nx//2 of edge
ssnew = (slice(yr0,yr1),slice(xr0,xr1))
region = tdata[ssnew]
cntr = centroid(region)
# Define region centered on max value in object (slice)
# This region will be bounds-checked to insure that it only accesses
# a valid section of the image (not off the edge)
maxpos = (int(cntr[1]+0.5)+ssnew[0].start,int(cntr[0]+0.5)+ssnew[1].start)
yr0 = maxpos[0] - gry
yr1 = maxpos[0] + gry + 1
if yr0 < 0 or yr1 > img_ny:
continue
xr0 = maxpos[1] - grx
xr1 = maxpos[1] + grx + 1
if xr0 < 0 or xr1 > img_nx:
continue
# Simple Centroid on the region from the input image
jregion = jdata[yr0:yr1,xr0:xr1]
src_flux = jregion.sum()
src_peak = jregion.max()
if (peakmax is not None and src_peak >= peakmax):
continue
if (peakmin is not None and src_peak <= peakmin):
continue
if fluxmin and src_flux <= fluxmin:
continue
if fluxmax and src_flux >= fluxmax:
continue
datamin = jregion.min()
datamax = jregion.max()
if use_sharp_round:
# Compute sharpness and first estimate of roundness:
dregion = convdata[yr0:yr1,xr0:xr1]
satur, round1, sharp = \
sharp_round(jregion, dregion, xyrmask, xc, yc,
s2m, s4m, nx, ny, datamin, datamax)
# Filter sources:
if sharp is None or (sharp < sharplo or sharp > sharphi):
continue
if round1 is None or (round1 < roundlo or round1 > roundhi):
continue
px, py, round2 = xy_round(jregion, grx, gry, skymode,
kernel, xsigsq, ysigsq, datamin, datamax)
# Filter sources:
if px is None:
continue
if use_sharp_round and not satur and \
(round2 is None or round2 < roundlo or round2 > roundhi):
continue
fitind.append((px + xr0, py + yr0, sharp, round1, round2))
# compute a source flux value
fluxes.append(src_flux)
fitindc, fluxesc = apply_nsigma_separation(fitind, fluxes, fwhm*nsigma / 2)
return fitindc, fluxesc
def apply_nsigma_separation(fitind,fluxes,separation,niter=10):
"""
Remove sources which are within nsigma*fwhm/2 pixels of each other, leaving
only a single valid source in that region.
This algorithm only works for sources which end up sequentially next to each other
based on Y position and removes enough duplicates to make the final source list more
managable. It sorts the positions by Y value in order to group those at the
same positions as much as possible.
"""
for n in range(niter):
if len(fitind) < 1:
break
fitarr = np.array(fitind,np.float32)
fluxarr = np.array(fluxes,np.float32)
inpind = np.argsort(fitarr[:,1])
npind = fitarr[inpind]
fluxind = fluxarr[inpind]
fitind = npind.tolist()
fluxes = fluxind.tolist()
dx = npind[1:,0] - npind[:-1,0]
dy = npind[1:,1] - npind[:-1,1]
dr = np.sqrt(np.power(dx,2)+np.power(dy,2))
nsame = np.where(dr <= separation)[0]
if nsame.shape[0] > 0:
for ind in nsame[-1::-1]:
#continue # <- turn off filtering by source separation
del fitind[ind]
del fluxes[ind]
else:
break
return fitind,fluxes
def xy_round(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin=None,datamax=None):
""" Compute center of source
Original code from IRAF.noao.digiphot.daofind.apfind ap_xy_round()
"""
nyk,nxk = ker2d.shape
if datamin is None:
datamin = data.min()
if datamax is None:
datamax = data.max()
# call C function for speed now...
xy_val = cdriz.arrxyround(data,x0,y0,skymode,ker2d,xsigsq,ysigsq,datamin,datamax)
if xy_val is None:
x = None
y = None
round = None
else:
x = xy_val[0]
y = xy_val[1]
round = xy_val[2]
return x,y,round
def precompute_sharp_round(nxk, nyk, xc, yc):
"""
Pre-computes mask arrays to be used by the 'sharp_round' function
for roundness computations based on two- and four-fold symmetries.
"""
# Create arrays for the two- and four-fold symmetry computations:
s4m = np.ones((nyk,nxk),dtype=np.int16)
s4m[yc, xc] = 0
s2m = np.ones((nyk,nxk),dtype=np.int16)
s2m[yc, xc] = 0
s2m[yc:nyk, 0:xc] = -1;
s2m[0:yc+1, xc+1:nxk] = -1;
return s2m, s4m
def sharp_round(data, density, kskip, xc, yc, s2m, s4m, nxk, nyk,
datamin, datamax):
"""
sharp_round -- Compute first estimate of the roundness and sharpness of the
detected objects.
A Python translation of the AP_SHARP_ROUND IRAF/DAOFIND function.
"""
# Compute the first estimate of roundness:
sum2 = np.sum(s2m*density)
sum4 = np.sum(s4m*abs(density))
if sum2 == 0.0:
round = 0.0
elif sum4 <= 0.0: # eps?
round = None
else:
round = 2.0 * sum2 / sum4
# Eliminate the sharpness test if the central pixel is bad:
mid_data_pix = data[yc, xc]
mid_dens_pix = density[yc, xc]
if mid_data_pix > datamax:
return True, round, None
if mid_data_pix < datamin:
return False, round, None
########################
# Sharpness statistics:
satur = np.max(kskip*data) > datamax
# Exclude pixels (create a mask) outside the [datamin, datamax] range:
uskip = np.where((data >= datamin) & (data <= datamax), 1, 0)
# Update the mask with the "skipped" values from the convolution kernel:
uskip *= kskip
# Also, exclude central pixel:
uskip[yc, xc] = 0
npixels = np.sum(uskip)
if (npixels < 1 or mid_dens_pix <= 0.0):
return satur, round, None
sharp = (mid_data_pix - np.sum(uskip*data)/npixels) / mid_dens_pix
#sharp = (mid_data_pix - np.mean(uskip*data)) / mid_dens_pix
return satur, round, sharp
def roundness(im):
"""
from astropy.io import fits as pyfits
data=pyfits.getdata('j94f05bgq_flt.fits',ext=1)
star0=data[403:412,423:432]
star=data[396:432,3522:3558]
In [53]: findobj.roundness(star0)
Out[53]: 0.99401955054989544
In [54]: findobj.roundness(star)
Out[54]: 0.83091919980660645
"""
perimeter = im.shape[0]*2 +im.shape[1]*2 -4
area = im.size
return 4*np.pi*area/perimeter**2
def immoments(im, p,q):
x = list(range(im.shape[1]))
y = list(range(im.shape[0]))
#coord=np.array([x.flatten(),y.flatten()]).T
"""
moment = 0
momentx = 0
for i in x.flatten():
moment+=momentx
sumx=0
for j in y.flatten():
sumx+=i**0*j**0*star0[i,j]
"""
moment = np.sum([i**p*j**q*im[i,j] for j in x for i in y], dtype=np.float64)
return moment
#ss=[i**0*j**0*list(star0[i,j].flatten()) for i in list(x.flatten()) for j in list(y.flatten())]
def nmoment(im,p,q):
m = immoments(im,p,q)
nmoment = m/np.sum(im, dtype=np.float64)
return nmoment
def centroid(im):
"""
Computes the centroid of an image using the image moments:
centroid = {m10/m00, m01/m00}
These calls point to Python version of moments function
m00 = immoments(im,0,0)
m10 = immoments(im, 1,0)
m01 = immoments(im,0,1)
"""
# These calls point to Python version of moments function
m00 = cdriz.arrmoments(im,0,0)
m10 = cdriz.arrmoments(im, 1,0)
m01 = cdriz.arrmoments(im,0,1)
ycen = m10 / m00
xcen = m01 / m00
return xcen, ycen
def cmoment(im,p,q):
xcen,ycen = centroid(im)
#x,y=np.meshgrid(range(403,412),range(423,432))
x = list(range(im.shape[1]))
y = list(range(im.shape[0]))
mu = np.sum([(i-xcen)**p * (j-ycen)**q * im[i,j] for i in y for j in x],
dtype=np.float64)
return mu
def central_moments(im):
xcen,ycen = centroid(im)
mu00 = cmoment(im,p=0,q=0)
mu01 = 0.
mu10 = 0.
mu11 = immoments(im,1,1) - xcen * immoments(im,0,1)
mu20 = immoments(im,2,0) - xcen * immoments(im,1,0)
mu02 = immoments(im,0,2) - ycen*immoments(im,0,1)
mu21 = immoments(im,2,1) - 2*xcen*immoments(im,1,1) - ycen*immoments(im,2,0) + \
2*xcen**2*immoments(im,0,1)
mu12 = immoments(im,1,2) - 2*ycen*immoments(im,1,1) - xcen*immoments(im,0,2) + \
2*ycen**2*immoments(im,1,0)
mu30 = immoments(im,3,0) - 3*xcen*immoments(im,2,0) + 2*xcen**2*immoments(im,1,0)
mu03 = immoments(im,0,3) - 3*ycen*immoments(im,0,2) + 2*ycen**2*immoments(im,0,1)
cmoments = {'mu00': mu00,
'mu01': mu01,
'mu10': mu10,
'mu11': mu11,
'mu20': mu20,
'mu02': mu02,
'mu21': mu21,
'mu12': mu12,
'mu30': mu30,
'mu03': mu03
}
return cmoments
def covmat(im):
cmoments = central_moments(im)
nmu20 = cmoments['mu20'] / cmoments['mu00']
nmu02 = cmoments['mu02'] / cmoments['mu00']
nmu11 = cmoments['mu11'] / cmoments['mu00']
covmat = np.array([[nmu20, nmu11],[nmu11,nmu02]])
return covmat
| 31.41954 | 96 | 0.583806 | 2,413 | 16,401 | 3.927891 | 0.205553 | 0.029015 | 0.012661 | 0.006858 | 0.148238 | 0.104452 | 0.098755 | 0.085883 | 0.057607 | 0.044946 | 0 | 0.053515 | 0.28108 | 16,401 | 521 | 97 | 31.479846 | 0.750318 | 0.262972 | 0 | 0.110738 | 0 | 0 | 0.033603 | 0.003907 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057047 | false | 0.003356 | 0.020134 | 0 | 0.147651 | 0.006711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d71fa41f6663e74d46cda250587cc5e796083b2c | 1,243 | py | Python | chpt17/stopwatch.py | maxmacdon/Automate | 2f405604ddfde55848798ebb2ad273f089bce466 | [
"MIT"
] | null | null | null | chpt17/stopwatch.py | maxmacdon/Automate | 2f405604ddfde55848798ebb2ad273f089bce466 | [
"MIT"
] | null | null | null | chpt17/stopwatch.py | maxmacdon/Automate | 2f405604ddfde55848798ebb2ad273f089bce466 | [
"MIT"
] | null | null | null | #! python3
import time
import pyperclip
def stopwatch():
""" records time spent per task, prints and stores in clipboard
Args:
None
Returns:
None
"""
# Display the program's instructions.
print('Press ENTER to begin. Afterwards, press ENTER to "click" the stopwatch. Press Ctrl-C to quit.')
input() # press Enter to begin
print('Started.')
clip = ''
startTime = time.time() # get the first lap's start time
lastTime = startTime
lapNum = 1
# Start tracking the lap times.
try:
while True:
input()
lapTime = round(time.time() - lastTime, 2)
totalTime = round(time.time() - startTime, 2)
info = 'Lap #%s: %s (%s)' % (str(lapNum).rjust(2), str(totalTime).center(7), str(lapTime).rjust(6))
clip += info + '\n'
print(info, end='')
lapNum += 1
lastTime = time.time() # reset the last lap time
except KeyboardInterrupt:
# Handle the Ctrl-C exception to keep its error message from displaying.
pyperclip.copy(clip)
print('\nDone.')
print('Results available in clipboard')
if __name__ == "__main__":
stopwatch() | 31.075 | 111 | 0.574417 | 147 | 1,243 | 4.802721 | 0.544218 | 0.045326 | 0.050992 | 0.048159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009346 | 0.311344 | 1,243 | 40 | 112 | 31.075 | 0.815421 | 0.253419 | 0 | 0.076923 | 0 | 0.038462 | 0.18324 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.115385 | 0.192308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d721ffc6bed13962f5ffde3d59c65c7f1c217148 | 1,421 | py | Python | vision/cloud-client/detect/set_endpoint.py | lgruen/python-docs-samples | 9da3b7530ee233e369fbc39d0cec9829f2d7777b | [
"Apache-2.0"
] | 1 | 2020-08-12T22:54:55.000Z | 2020-08-12T22:54:55.000Z | vision/cloud-client/detect/set_endpoint.py | iuztemur/python-docs-samples | 09bc50d610741d693c4e99e03854822b37a647ba | [
"Apache-2.0"
] | null | null | null | vision/cloud-client/detect/set_endpoint.py | iuztemur/python-docs-samples | 09bc50d610741d693c4e99e03854822b37a647ba | [
"Apache-2.0"
] | 1 | 2021-02-10T11:08:58.000Z | 2021-02-10T11:08:58.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def set_endpoint():
"""Change your endpoint"""
# [START vision_set_endpoint]
from google.cloud import vision
client_options = {'api_endpoint': 'eu-vision.googleapis.com'}
client = vision.ImageAnnotatorClient(client_options=client_options)
# [END vision_set_endpoint]
image_source = vision.types.ImageSource(
image_uri='gs://cloud-samples-data/vision/text/screen.jpg')
image = vision.types.Image(source=image_source)
response = client.text_detection(image=image)
print('Texts:')
for text in response.text_annotations:
print('{}'.format(text.description))
vertices = ['({},{})'.format(vertex.x, vertex.y)
for vertex in text.bounding_poly.vertices]
print('bounds: {}\n'.format(','.join(vertices)))
if __name__ == '__main__':
set_endpoint()
| 33.046512 | 74 | 0.704433 | 188 | 1,421 | 5.196809 | 0.590426 | 0.061412 | 0.026612 | 0.032753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006903 | 0.184377 | 1,421 | 42 | 75 | 33.833333 | 0.836066 | 0.438424 | 0 | 0 | 0 | 0 | 0.151671 | 0.089974 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.125 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d724e05fb14a8ae7c424d56f650d440ab9b343b0 | 3,490 | py | Python | utilityhelper/common/util/crc/crc-itu.py | leileigong/utility-helper | 0494658440432554d100109b6e3c2ba044cd560a | [
"MIT"
] | null | null | null | utilityhelper/common/util/crc/crc-itu.py | leileigong/utility-helper | 0494658440432554d100109b6e3c2ba044cd560a | [
"MIT"
] | null | null | null | utilityhelper/common/util/crc/crc-itu.py | leileigong/utility-helper | 0494658440432554d100109b6e3c2ba044cd560a | [
"MIT"
] | 1 | 2019-01-29T06:37:38.000Z | 2019-01-29T06:37:38.000Z | #coding:utf-8
from __future__ import (print_function, unicode_literals)
"""
2字节的CRC-ITU 比特位对应校验的数据的比特位之间的关系
CRC15 => 3,7
CR14 => 2,6
CR13 => 1,5
CR12 => 0,4
C411 => 3
CR10 =>2, CR15 => 2, 3,7
CR9 =>1, CR14 => 1, 2,6
CR8 =>0, CR13 => 0, 1,5
CR7 => CR12 => 0,4
CR6 => 3
CR5 => 2
CR4 => 1
CR3 => 0, CR15 => 0,3,7
CR2 => CR14 => 2,6
CR1 => CR13 => 1,5
CR0 => CR12 => 0,4
"""
def get_bytecrc_itu(ucChar, uslpwCrc):
tmpChar = ucChar ^ (uslpwCrc & 0x00FF)
tmpChar = (tmpChar ^ (tmpChar << 4))
tmp = tmpChar & 0x00FF
usNewlpwCrc = (uslpwCrc >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)
return usNewlpwCrc
def get_bytecrc_itu2(ucChar, uslpwCrc):
databit7 = (ucChar >> 7) & 0x0001
databit6 = (ucChar >> 6) & 0x0001
databit5 = (ucChar >> 5) & 0x0001
databit4 = (ucChar >> 4) & 0x0001
databit3 = (ucChar >> 3) & 0x0001
databit2 = (ucChar >> 2) & 0x0001
databit1 = (ucChar >> 1) & 0x0001
databit0 = (ucChar >> 0) & 0x0001
crc15 = (uslpwCrc >> 15) & 0x0001
crc14 = (uslpwCrc >> 14) & 0x0001
crc13 = (uslpwCrc >> 13) & 0x0001
crc12 = (uslpwCrc >> 12) & 0x0001
crc11 = (uslpwCrc >> 11) & 0x0001
crc10 = (uslpwCrc >> 10) & 0x0001
crc9 = (uslpwCrc >> 9) & 0x0001
crc8 = (uslpwCrc >> 8) & 0x0001
crc7 = (uslpwCrc >> 7) & 0x0001
crc6 = (uslpwCrc >> 6) & 0x0001
crc5 = (uslpwCrc >> 5) & 0x0001
crc4 = (uslpwCrc >> 4) & 0x0001
crc3 = (uslpwCrc >> 3) & 0x0001
crc2 = (uslpwCrc >> 2) & 0x0001
crc1 = (uslpwCrc >> 1) & 0x0001
crc0 = (uslpwCrc >> 0) & 0x0001
# CRC Byte2
newcrcbit15 = databit7 ^ crc7 ^ databit3 ^ crc3
newcrcbit14 = databit6 ^ crc6 ^ databit2 ^ crc2
newcrcbit13 = databit5 ^ crc5 ^ databit1 ^ crc1
newcrcbit12 = databit4 ^ crc4 ^ databit0 ^ crc0
newcrcbit11 = databit3 ^ crc3
newcrcbit10 = newcrcbit15 ^ databit2 ^ crc2
newcrcbit9 = newcrcbit14 ^ databit1 ^ crc1
newcrcbit8 = newcrcbit13 ^ databit0 ^ crc0
# CRC Byte1
newcrcbit7 = newcrcbit12 ^ crc15
newcrcbit6 = databit3 ^ crc3 ^ crc14
newcrcbit5 = databit2 ^ crc2 ^ crc13
newcrcbit4 = databit1 ^ crc1 ^ crc12
newcrcbit3 = newcrcbit15 ^ databit0 ^ crc0 ^ crc11
newcrcbit2 = newcrcbit14 ^ crc10
newcrcbit1 = newcrcbit13 ^ crc9
newcrcbit0 = newcrcbit12 ^ crc8
newcrc = (newcrcbit15 << 15) | (newcrcbit14 << 14) | (newcrcbit13 << 13) | (newcrcbit12 << 12) |\
(newcrcbit11 << 11) | (newcrcbit10 << 10) | (newcrcbit9 << 9) | (newcrcbit8 << 8) |\
(newcrcbit7 << 7) | (newcrcbit6 << 6) | (newcrcbit5 << 5) | (newcrcbit4 << 4) |\
(newcrcbit3 << 3) | (newcrcbit2 << 2) | (newcrcbit1 << 1) | (newcrcbit0 << 0)
return newcrc
def get_crc_itu(data):
initCrc = 0x6363
lpwCrc = initCrc
if isinstance(data, (list, str)):
for ch in data:
if isinstance(ch, int):
tmpdata = ch
elif isinstance(ch, str):
tmpdata = ord(ch)
lpwCrc = get_bytecrc_itu2(tmpdata, lpwCrc)
elif isinstance(data, int):
lpwCrc = get_bytecrc_itu2(data, lpwCrc)
return lpwCrc
if __name__ == "__main__":
data = [0x00, '\x00']
data = "\x12\x34"
crc = get_crc_itu(data)
print(hex(crc), bytearray(crc))
| 31.160714 | 101 | 0.543266 | 374 | 3,490 | 5 | 0.328877 | 0.02139 | 0.009626 | 0.013904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.175773 | 0.323496 | 3,490 | 111 | 102 | 31.441441 | 0.616264 | 0.009169 | 0 | 0 | 0 | 0 | 0.006757 | 0 | 0 | 0 | 0.056081 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.014085 | 0 | 0.098592 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d725f4f9f4d9f957979f6b314f2174ff7b482585 | 65,048 | py | Python | SimPEG/electromagnetics/static/utils/static_utils.py | WouterDls/simpeg | 6b8ef01e123d3bab24aa6a2364200f7114017d06 | [
"MIT"
] | 1 | 2021-10-21T04:22:36.000Z | 2021-10-21T04:22:36.000Z | SimPEG/electromagnetics/static/utils/static_utils.py | WouterDls/simpeg | 6b8ef01e123d3bab24aa6a2364200f7114017d06 | [
"MIT"
] | null | null | null | SimPEG/electromagnetics/static/utils/static_utils.py | WouterDls/simpeg | 6b8ef01e123d3bab24aa6a2364200f7114017d06 | [
"MIT"
] | 1 | 2021-12-29T00:06:07.000Z | 2021-12-29T00:06:07.000Z | import numpy as np
from scipy.interpolate import LinearNDInterpolator, interp1d, griddata
from scipy.spatial import cKDTree
from numpy import matlib
import discretize
from discretize import TensorMesh
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import ticker
import warnings
from ....data import Data
from .. import resistivity as dc
from ....utils import (
closestPoints,
mkvc,
surface2ind_topo,
model_builder,
define_plane_from_points,
)
from ....utils.io_utils import (
read_dcip2d_ubc,
write_dcip2d_ubc,
read_dcip3d_ubc,
write_dcip3d_ubc,
)
from ....utils.plot_utils import plot_1d_layer_model
from ....utils.code_utils import deprecate_method
try:
import plotly.graph_objects as grapho
has_plotly = True
except:
has_plotly = False
DATA_TYPES = {
"apparent resistivity": [
"apparent resistivity",
"appresistivity",
"apparentresistivity",
"apparent-resistivity",
"apparent_resistivity",
"appres",
],
"apparent conductivity": [
"apparent conductivity",
"appconductivity",
"apparentconductivity",
"apparent-conductivity",
"apparent_conductivity",
"appcon",
],
"apparent chargeability": [
"apparent chargeability",
"appchargeability",
"apparentchargeability",
"apparent-chargeability",
"apparent_chargeability",
],
"potential": ["potential", "potentials", "volt", "V", "voltages", "voltage"],
}
SPACE_TYPES = {
"half space": ["half space", "half-space", "half_space", "halfspace", "half"],
"whole space": ["whole space", "whole-space", "whole_space", "wholespace", "whole"],
}
#######################################################################
# SURVEY GEOMETRY
#######################################################################
def electrode_separations(survey_object, electrode_pair="all", **kwargs):
"""
Calculate horizontal separation between specific or all electrodes.
Input:
survey_object : SimPEG.electromagnetics.static.survey.Survey
A DC or IP survey object
electrode_pair : str or list of str
A string or list of strings from the following {'all', 'AB', 'MN', 'AM', 'AN', 'BM', 'BN}
Output:
list of np.ndarray
For each electrode pair specified, the electrode distance is returned
in a list.
"""
if "survey_type" in kwargs:
warnings.warn(
"The survey_type is no longer necessary to calculate electrode separations. "
"Feel free to remove it from the call. This option will be removed in SimPEG 0.16.0",
FutureWarning,
)
if not isinstance(electrode_pair, list):
if electrode_pair.lower() == "all":
electrode_pair = ["AB", "MN", "AM", "AN", "BM", "BN"]
elif isinstance(electrode_pair, str):
electrode_pair = [electrode_pair.upper()]
else:
raise TypeError(
"electrode_pair must be either a string, list of strings, or an "
"ndarray containing the electrode separation distances you would "
"like to calculate not {}".format(type(electrode_pair))
)
elecSepDict = {}
AB = []
MN = []
AM = []
AN = []
BM = []
BN = []
for src in survey_object.source_list:
# pole or dipole source
if isinstance(src.location, list):
a_loc = src.location[0]
b_loc = src.location[1]
else:
a_loc = src.location
b_loc = np.inf * np.ones_like(src.location)
for rx in src.receiver_list:
# pole or dipole receiver
if isinstance(rx.locations, list):
M = rx.locations[0]
N = rx.locations[1]
else:
M = rx.locations
N = -np.inf * np.ones_like(rx.locations)
n_rx = np.shape(M)[0]
A = matlib.repmat(a_loc, n_rx, 1)
B = matlib.repmat(b_loc, n_rx, 1)
# Compute distances
AB.append(np.sqrt(np.sum((A - B) ** 2.0, axis=1)))
MN.append(np.sqrt(np.sum((M - N) ** 2.0, axis=1)))
AM.append(np.sqrt(np.sum((A - M) ** 2.0, axis=1)))
AN.append(np.sqrt(np.sum((A - N) ** 2.0, axis=1)))
BM.append(np.sqrt(np.sum((B - M) ** 2.0, axis=1)))
BN.append(np.sqrt(np.sum((B - N) ** 2.0, axis=1)))
# Stack to vector and define in dictionary
if "AB" in electrode_pair:
if AB:
AB = np.hstack(AB)
elecSepDict["AB"] = AB
if "MN" in electrode_pair:
if MN:
MN = np.hstack(MN)
elecSepDict["MN"] = MN
if "AM" in electrode_pair:
if AM:
AM = np.hstack(AM)
elecSepDict["AM"] = AM
if "AN" in electrode_pair:
if AN:
AN = np.hstack(AN)
elecSepDict["AN"] = AN
if "BM" in electrode_pair:
if BM:
BM = np.hstack(BM)
elecSepDict["BM"] = BM
if "BN" in electrode_pair:
if BN:
BN = np.hstack(BN)
elecSepDict["BN"] = BN
return elecSepDict
def pseudo_locations(survey, wenner_tolerance=0.1, **kwargs):
"""
Calculates the pseudo-sensitivity locations for 2D and 3D surveys.
Input:
survey : SimPEG.electromagnetics.static.resistivity.Survey
A DC or IP survey
wenner_tolerance : float
If the center location for a source and receiver pair are within wenner_tolerance,
we assume the datum was collected with a wenner configuration and the pseudo-location
is computed based on the AB electrode spacing.
Output:
tuple of numpy.ndarray of the form (midxy, midz)
For 2D surveys, *midxy* is a vector containing the along line position.
For 3D surveys, *midxy* is an (n, 2) numpy array containing the (x,y) positions.
In eithere case, *midz* is a vector containing the pseudo-depth locations.
"""
if not isinstance(survey, dc.Survey):
raise TypeError(f"Input must be instance of {dc.Survey}, not {type(survey)}")
if len(kwargs) > 0:
warnings.warn(
"The keyword arguments of this function have been deprecated."
" All of the necessary information is now in the DC survey class",
DeprecationWarning,
)
# Pre-allocate
midpoints = []
ds = []
for ii, source in enumerate(survey.source_list):
src_loc = source.location
if isinstance(src_loc, list):
src_midpoint = (src_loc[0] + src_loc[1]) / 2
else:
src_midpoint = src_loc
src_midpoint = src_midpoint.reshape((1, len(src_midpoint)))
for receiver in source.receiver_list:
rx_locs = receiver.locations
if isinstance(rx_locs, list):
rx_midpoints = (rx_locs[0] + rx_locs[1]) / 2
else:
rx_midpoints = rx_locs
n_loc = rx_midpoints.shape[0]
# Midpoint locations
midpoints.append((np.tile(src_midpoint, (n_loc, 1)) + rx_midpoints) / 2)
# Vector path from source midpoint to receiver midpoints
ds.append((rx_midpoints - np.tile(src_midpoint, (n_loc, 1))))
midpoints = np.vstack(midpoints)
ds = np.vstack(ds)
pseudo_depth = np.zeros_like(midpoints)
# wenner-like electrode groups (are source and rx midpoints in same place)
is_wenner = np.sqrt(np.sum(ds[:, :-1] ** 2, axis=1)) < wenner_tolerance
# Pseudo depth is AB/2
if np.any(is_wenner):
temp = np.abs(electrode_separations(survey, ["AB"])["AB"]) / 2
pseudo_depth[is_wenner, -1] = temp[is_wenner]
# Takes into account topography.
if np.any(~is_wenner):
L = np.sqrt(np.sum(ds[~is_wenner, :] ** 2, axis=1)) / 2
dz = ds[~is_wenner, -1]
pseudo_depth[~is_wenner, 0] = (dz / 2) * (ds[~is_wenner, 0] / L)
if np.shape(ds)[1] > 2:
pseudo_depth[~is_wenner, 1] = (dz / 2) * (ds[~is_wenner, 1] / L)
pseudo_depth[~is_wenner, -1] = (
np.sqrt(np.sum(ds[~is_wenner, :-1] ** 2, axis=1)) / 2
)
return midpoints - pseudo_depth
def geometric_factor(survey_object, space_type="half space", **kwargs):
"""
Calculate Geometric Factor. Assuming that data are normalized voltages
Input:
:param SimPEG.electromagnetics.static.resistivity.Survey dc_survey: DC survey object
:param str survey_type: Either 'dipole-dipole' | 'pole-dipole'
| 'dipole-pole' | 'pole-pole'
:param str space_type: Assuming whole-space or half-space
('whole-space' | 'half-space')
Output:
:return numpy.ndarray G: Geometric Factor
"""
if "survey_type" in kwargs:
warnings.warn(
"The survey_type is no longer necessary to calculate geometric factor. "
"Feel free to remove it from the call. This option will be removed in SimPEG 0.16.0",
FutureWarning,
)
# Set factor for whole-space or half-space assumption
if space_type.lower() in SPACE_TYPES["whole space"]:
spaceFact = 4.0
elif space_type.lower() in SPACE_TYPES["half space"]:
spaceFact = 2.0
else:
raise TypeError("'space_type must be 'whole space' | 'half space'")
elecSepDict = electrode_separations(
survey_object, electrode_pair=["AM", "BM", "AN", "BN"]
)
AM = elecSepDict["AM"]
BM = elecSepDict["BM"]
AN = elecSepDict["AN"]
BN = elecSepDict["BN"]
# Determine geometric factor G based on electrode separation distances.
# For case where source and/or receivers are pole, terms will be
# divided by infinity.
G = 1 / AM - 1 / BM - 1 / AN + 1 / BN
return G / (spaceFact * np.pi)
def apparent_resistivity_from_voltage(
survey, volts, space_type="half space", eps=1e-10
):
"""
Calculate apparent resistivities from normalized voltages.
Input:
:param SimPEG.electromagnetics.static.resistivity.Survey: DC survey
:param numpy.ndarray volts: normalized voltage measurements [V/A]
:param String space_type: 'half_space' or 'whole_space'
:param float eps: Regularizer in case of a null geometric factor
Output:
:return rhoApp: apparent resistivity
"""
G = geometric_factor(survey, space_type=space_type)
# Calculate apparent resistivity
# absolute value is required because of the regularizer
rhoApp = np.abs(volts * (1.0 / (G + eps)))
return rhoApp
def convert_survey_3d_to_2d_lines(
survey, lineID, data_type="volt", output_indexing=False
):
"""
Convert a 3D survey into a list of local 2D surveys.
Here, the user provides a Survey whose geometry is defined
for use in a 3D simulation and a 1D numpy.array which defines the
line ID for each datum. The function returns a list of local
2D survey objects. The change of coordinates for electrodes is
[x, y, z] to [s, z], where s is the distance along the profile
line. For each line, s = 0 defines the A-electrode location
for the first source in the source list.
Input:
:param survey: DC survey class object
:param lineID: A numpy.array (nD,) containing the line ID for each datum
Output:
:param survey: List of 2D DC survey class object
:rtype: List of SimPEG.electromagnetics.static.resistivity.Survey
"""
# Find all unique line id
unique_lineID = np.unique(lineID)
# If you output indexing to keep track of possible sorting
k = np.arange(0, survey.nD)
out_indices_list = []
ab_locs_all = np.c_[survey.locations_a, survey.locations_b]
mn_locs_all = np.c_[survey.locations_m, survey.locations_n]
# For each unique lineID
survey_list = []
for ID in unique_lineID:
source_list = []
# Source locations for this line
lineID_index = np.where(lineID == ID)[0]
ab_locs, ab_index = np.unique(
ab_locs_all[lineID_index, :], axis=0, return_index=True
)
# Find s=0 location and heading for line
start_index = lineID_index[ab_index]
out_indices = []
kID = k[lineID_index] # data indices part of this line
r0 = mkvc(ab_locs_all[start_index[0], 0:2]) # (x0, y0) for the survey line
rN = mkvc(ab_locs_all[start_index[-1], 0:2]) # (x, y) for last electrode
uvec = (rN - r0) / np.sqrt(
np.sum((rN - r0) ** 2)
) # unit vector for line orientation
# Along line positions and elevation for electrodes on current line
# in terms of position elevation
a_locs_s = np.c_[
np.dot(ab_locs_all[lineID_index, 0:2] - r0[0], uvec),
ab_locs_all[lineID_index, 2],
]
b_locs_s = np.c_[
np.dot(ab_locs_all[lineID_index, 3:5] - r0[0], uvec),
ab_locs_all[lineID_index, -1],
]
m_locs_s = np.c_[
np.dot(mn_locs_all[lineID_index, 0:2] - r0[0], uvec),
mn_locs_all[lineID_index, 2],
]
n_locs_s = np.c_[
np.dot(mn_locs_all[lineID_index, 3:5] - r0[0], uvec),
mn_locs_all[lineID_index, -1],
]
# For each source in the line
for ii, ind in enumerate(ab_index):
# Get source location
src_loc_a = mkvc(a_locs_s[ind, :])
src_loc_b = mkvc(b_locs_s[ind, :])
# Get receiver locations
rx_index = np.where(
np.isclose(a_locs_s[:, 0], src_loc_a[0], atol=1e-3)
& np.isclose(b_locs_s[:, 0], src_loc_b[0], atol=1e-3)
)[0]
rx_loc_m = m_locs_s[rx_index, :]
rx_loc_n = n_locs_s[rx_index, :]
# Extract pole and dipole receivers
k_ii = kID[rx_index]
is_pole_rx = np.all(np.isclose(rx_loc_m, rx_loc_n, atol=1e-3), axis=1)
rx_list = []
if any(is_pole_rx):
rx_list += [
dc.receivers.Pole(rx_loc_m[is_pole_rx, :], data_type=data_type)
]
out_indices.append(k_ii[is_pole_rx])
if any(~is_pole_rx):
rx_list += [
dc.receivers.Dipole(
rx_loc_m[~is_pole_rx, :],
rx_loc_n[~is_pole_rx, :],
data_type=data_type,
)
]
out_indices.append(k_ii[~is_pole_rx])
# Define Pole or Dipole Sources
if np.all(np.isclose(src_loc_a, src_loc_b, atol=1e-3)):
source_list.append(dc.sources.Pole(rx_list, src_loc_a))
else:
source_list.append(dc.sources.Dipole(rx_list, src_loc_a, src_loc_b))
# Create a 2D survey and add to list
survey_list.append(dc.survey.Survey(source_list))
if output_indexing:
out_indices_list.append(np.hstack(out_indices))
if output_indexing:
return survey_list, out_indices_list
else:
return survey_list
#####################################################################
# PLOTTING
#####################################################################
def plot_pseudosection(
data,
dobs=None,
plot_type="contourf",
ax=None,
clim=None,
scale="linear",
pcolor_opts={},
contourf_opts={},
scatter_opts={},
mask_topography=False,
create_colorbar=True,
cbar_opts={},
cbar_label="",
cax=None,
data_locations=False,
data_type=None,
space_type="half space",
**kwargs,
):
"""
Plot 2D DC/IP data in pseudo-section.
This utility allows the user to image 2D DC/IP data in pseudosection as
either a scatter plot or as a filled contour plot.
Parameters
----------
data : SimPEG.electromagnetics.static.survey.Survey or SimPEG.data.Data
A DC or IP survey object defining a 2D survey line, or a Data object containing
that same type of survey object.
dobs : numpy.ndarray (ndata,) or None
A data vector containing volts, integrated chargeabilities, apparent
resistivities, apparent chargeabilities or data misfits.
plot_type: {'contourf', 'pcolor', or 'scatter'}
'scatter' creates a scatter plot, 'contourf' creates a filled contour plot, and
'pcolor' creates a linearly interpolated plot.
ax: mpl_toolkits.mplot3d.axes.Axes, optional
An axis for the plot
clim : list, optional
list containing the minimum and maximum value for the color range,
i.e. [vmin, vmax]
scale: {'linear', 'log'}
Plot on linear or log base 10 scale
pcolor_opts : dict, optional
Dictionary defining kwargs for pcolor plot if `plot_type=='pcolor'`
contourf_opts : dict, optional
Dictionary defining kwargs for filled contour plot if `plot_type=='contourf'`
scatter_opts : dict, optional
Dictionary defining kwargs for scatter plot if `plot_type=='scatter'`
mask_topography : bool
This freature should be set to True when there is significant topography and the user
would like to mask interpolated locations in the filled contour plot that lie
above the surface topography.
create_colorbar : bool
If *True*, a colorbar is automatically generated. If *False*, it is not.
If multiple planes are being plotted, only set the first scatter plot
to *True*
cbar_opts : dict
Dictionary defining kwargs for the colorbar
cbar_label : str
A string stating the color bar label for the
data; e.g. 'S/m', '$\\Omega m$', '%'
cax : mpl_toolkits.mplot3d.axes.Axes, optional
An axis object for the colorbar
data_type: {None, "apparent_conductivity", "apparent_resistivity"}, optional
if dobs is None, this will transform the data vector in the `survey` parameter
when it is a SimPEG.data.Data object from voltage to the requested `data_type`.
This occurs when `dobs` is `None`.
space_type: {'half space', "whole space"}
space type to use for the transformation from voltage to `data_type`
if `dobs` is `None`.
Output:
mpl_toolkits.mplot3d.axes3d.Axes3D
The axis object that holds the plot
"""
if "pcolorOpts" in kwargs:
warnings.warn(
"The pcolorOpts keyword has been deprecated. Please use "
"pcolor_opts instead. This will be removed in version"
" 0.16.0 of SimPEG",
FutureWarning,
)
pcolor_opts = kwargs.pop("pcolorOpts")
if "data_location" in kwargs:
warnings.warn(
"The data_location keyword has been deprecated. Please use "
"data_locations instead. This will be removed in version"
" 0.16.0 of SimPEG",
FutureWarning,
)
data_locations = kwargs.pop("data_location")
if "contour_opts" in kwargs:
warnings.warn(
"The contour_opts keyword has been deprecated. Please use "
"contourf_opts instead. This will be removed in version"
" 0.16.0 of SimPEG",
FutureWarning,
)
contourf_opts = kwargs.pop("contour_opts")
removed_kwargs = ["dim", "y_values", "sameratio", "survey_type"]
for kwarg in removed_kwargs:
if kwarg in kwargs:
warnings.warn(
r"The {kwarg} keyword has been removed. This will become an error in "
"version 0.16.0 of SimPEG",
DerecationWarning,
)
kwargs.pop(kwarg)
if len(kwargs) > 0:
warnings.warn("plot_pseudosection unused kwargs: {list(kwargs.keys())}")
if plot_type.lower() not in ["pcolor", "contourf", "scatter"]:
raise ValueError(
"plot_type must be 'pcolor', 'contourf', or 'scatter'. The input value of "
f"{plot_type} is not recognized"
)
# Get plotting locations from survey geometry
try:
# this should work if "data" was a Data object
survey = data.survey
if dobs is None:
dobs = data.dobs
# Transform it to the type specified in data_type (assuming it was voltage)
if data_type in (
DATA_TYPES["apparent conductivity"] + DATA_TYPES["apparent resistivity"]
):
dobs = apparent_resistivity_from_voltage(
survey, dobs, space_type=space_type
)
if data_type in DATA_TYPES["apparent conductivity"]:
dobs = 1.0 / dobs
except AttributeError:
# Assume "data" was a DC survey
survey = data
if dobs is None:
raise ValueError(
"If the first argument is a DC survey, dobs must not be None"
)
try:
locations = pseudo_locations(survey)
except Exception:
raise TypeError(
"The first argument must be a resitivity.Survey, or a Data object with a "
"resistivity.Survey."
)
# Create an axis for the pseudosection if None
if ax is None:
fig = plt.figure(figsize=(10, 4))
ax = fig.add_axes([0.1, 0.1, 0.7, 0.8])
cax = fig.add_axes([0.85, 0.1, 0.03, 0.8])
if clim is None:
vmin = vmax = None
else:
vmin, vmax = clim
# Create default norms
if scale == "log":
norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax)
else:
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
x, z = locations[:, 0], locations[:, -1]
# Scatter plot
if plot_type == "scatter":
# grab a shallow copy
s_opts = scatter_opts.copy()
s = s_opts.pop("s", 40)
norm = s_opts.pop("norm", norm)
if isinstance(norm, mpl.colors.LogNorm):
dobs = np.abs(dobs)
data_plot = ax.scatter(x, z, s=s, c=dobs, norm=norm, **s_opts)
# Filled contour plot
elif plot_type == "contourf":
opts = contourf_opts.copy()
norm = opts.pop("norm", norm)
if isinstance(norm, mpl.colors.LogNorm):
dobs = np.abs(dobs)
if scale == "log":
try:
levels = opts.get("levels", "auto")
locator = ticker.MaxNLocator(levels)
levels = locator.tick_values(np.log10(dobs.min()), np.log10(dobs.max()))
levels = 10 ** levels
opts["levels"] = levels
except TypeError:
pass
data_plot = ax.tricontourf(x, z, dobs, norm=norm, **opts,)
if data_locations:
ax.plot(x, z, "k.", ms=1, alpha=0.4)
elif plot_type == "pcolor":
opts = pcolor_opts.copy()
norm = opts.pop("norm", norm)
if isinstance(norm, mpl.colors.LogNorm):
dobs = np.abs(dobs)
data_plot = ax.tripcolor(
x, z, dobs, shading="gouraud", norm=norm, **pcolor_opts
)
if data_locations:
ax.plot(x, z, "k.", ms=1, alpha=0.4)
# Use a filled polygon to mask everything above
# that has a pseudo-location above the positions
# for nearest electrode spacings
if mask_topography:
electrode_locations = np.unique(
np.r_[
survey.locations_a,
survey.locations_b,
survey.locations_m,
survey.locations_n,
],
axis=0,
)
zmax = np.max(electrode_locations[:, 1])
tree = cKDTree(locations)
_, nodeInds = tree.query(electrode_locations)
poly_locations = locations[nodeInds, :]
poly_locations = np.r_[
np.c_[np.min(poly_locations[:, 0]), zmax],
poly_locations,
np.c_[np.max(poly_locations[:, 0]), zmax],
]
ax.fill(
poly_locations[:, 0], poly_locations[:, 1], facecolor="w", linewidth=0.5
)
z_top = np.max(z)
z_bot = np.min(z)
ax.set_ylim(z_bot - 0.03 * (z_top - z_bot), z_top + 0.03 * (z_top - z_bot))
ax.set_xlabel("Line position (m)")
ax.set_ylabel("Pseudo-elevation (m)")
# Define colorbar
if create_colorbar:
cbar = plt.colorbar(
data_plot,
format="%.2e",
fraction=0.06,
orientation="vertical",
cax=cax,
ax=ax,
**cbar_opts,
)
vmin = np.nanmin(dobs)
vmax = np.nanmax(dobs)
if scale == "log":
ticks = np.logspace(np.log10(vmin), np.log10(vmax), 7)
else:
ticks = np.linspace(vmin, vmax, 7)
cbar.set_ticks(ticks)
cbar.ax.minorticks_off()
cbar.set_label(cbar_label, labelpad=10)
cbar.ax.tick_params()
return ax, data_plot
if has_plotly:
def plot_3d_pseudosection(
survey,
dvec,
marker_size=4,
vlim=None,
scale="linear",
units="",
plane_points=None,
plane_distance=10.0,
cbar_opts=None,
marker_opts=None,
layout_opts=None,
):
"""
Plot 3D DC/IP data in pseudo-section as a scatter plot.
This utility allows the user to produce a scatter plot of 3D DC/IP data at
all pseudo-locations. If a plane is specified, the user may create a scatter
plot using points near that plane.
Input:
survey : SimPEG.electromagnetics.static.survey.Survey
A DC or IP survey object
dvec : numpy.ndarray
A data vector containing volts, integrated chargeabilities, apparent
resistivities or apparent chargeabilities.
marker_size : int
Sets the marker size for the points on the scatter plot
vlim : list
list containing the minimum and maximum value for the color range,
i.e. [vmin, vmax]
scale: str
Plot on linear or log base 10 scale {'linear','log'}
units : str
A sting in d3 formatting the specified the units of *dvec*
plane_points : list of numpy.ndarray
A list of length 3 which contains the three xyz locations required to
define a plane; i.e. [xyz1, xyz2, xyz3]. This functionality is used to
plot only data that lie near this plane. A list of [xyz1, xyz2, xyz3]
can be entered for multiple planes.
plane_distance : float or list of float
Distance tolerance for plotting data that are near the plane(s) defined by
**plane_points**. A list is used if the *plane_distance* is different
for each plane.
cbar_opts: dict
Dictionary containing colorbar properties formatted according to plotly.graph_objects.scatter3d.cbar
marker_opts : dict
Dictionary containing marker properties formatted according to plotly.graph_objects.scatter3d
layout_opts : dict
Dictionary defining figure layout properties, formatted according to plotly.Layout
Output:
fig:
A plotly figure
"""
locations = pseudo_locations(survey)
# Scaling
if scale == "log":
plot_vec = np.log10(dvec)
tick_format = ".2f"
tick_prefix = "10^"
hovertemplate = (
"x: %{x:.2f}<br>y: %{y:.2f}<br>z: %{z:.2f}<br>value: %{customdata:.3e} "
+ units
)
else:
plot_vec = dvec
tick_format = "g"
tick_prefix = None
hovertemplate = (
"x: %{x:.2f}<br>y: %{y:.2f}<br>z: %{z:.2f}<br>value: %{customdata:.6g} "
+ units
)
if vlim is None:
vlim = [np.min(plot_vec), np.max(plot_vec)]
elif scale == "log":
vlim = [np.log10(vlim[0]), np.log10(vlim[1])]
# Set colorbar properties. Start with default values and replace any
# keys that need to be updated.
cbar = {
"thickness": 20,
"title": units,
"tickprefix": tick_prefix,
"tickformat": tick_format,
}
if cbar_opts is not None:
cbar = {key: cbar_opts.get(key, cbar[key]) for key in cbar}
# Set marker properties. Start with default values and replace any
# keys that need to be updated.
marker = {
"size": 4,
"colorscale": "viridis",
"cmin": vlim[0],
"cmax": vlim[1],
"opacity": 0.8,
"colorbar": cbar,
}
if marker_opts is not None:
marker = {key: marker_opts.get(key, marker[key]) for key in marker}
# 3D scatter plot
if plane_points == None:
marker["color"] = plot_vec
scatter_data = [
grapho.Scatter3d(
x=locations[:, 0],
y=locations[:, 1],
z=locations[:, 2],
customdata=dvec,
hovertemplate=hovertemplate,
name="",
mode="markers",
marker=marker,
)
]
else:
# Place in list if only one plane defined
if isinstance(plane_points[0], np.ndarray):
plane_points = [plane_points]
# Expand to list of only one plane distance for all planes
if isinstance(plane_distance, list) != True:
plane_distance = len(plane_points) * [plane_distance]
# Pre-allocate index for points on plane(s)
k = np.zeros(len(plot_vec), dtype=bool)
for ii in range(0, len(plane_points)):
p1, p2, p3 = plane_points[ii]
a, b, c, d = define_plane_from_points(p1, p2, p3)
k = k | (
np.abs(
a * locations[:, 0]
+ b * locations[:, 1]
+ c * locations[:, 2]
+ d
)
/ np.sqrt(a ** 2 + b ** 2 + c ** 2)
< plane_distance[ii]
)
if np.all(k == 0):
raise IndexError(
"""No locations are within *plane_distance* of any plane(s)
defined by *plane_points*. Try increasing *plane_distance*."""
)
marker["color"] = plot_vec[k]
scatter_data = [
grapho.Scatter3d(
x=locations[k, 0],
y=locations[k, 1],
z=locations[k, 2],
customdata=dvec[k],
mode="markers",
marker=marker,
)
]
fig = grapho.Figure(data=scatter_data)
fig.update_layout(
scene=dict(
xaxis=dict(title="X[m]"),
yaxis=dict(title="Y[m]"),
zaxis=dict(title="Z[m]"),
),
scene_camera=dict(eye=dict(x=1.5, y=-1.5, z=1.5)),
)
if layout_opts is not None:
fig.update_layout(**layout_opts)
return fig
#########################################################################
# GENERATE SURVEYS
#########################################################################
def generate_survey_from_abmn_locations(
*,
locations_a=None,
locations_b=None,
locations_m=None,
locations_n=None,
data_type=None,
output_sorting=False,
):
"""
Use A, B, M and N electrode locations to construct a 2D or 3D DC/IP survey.
Parameters
----------
locations_a : numpy.array
An (n, dim) numpy array containing A electrode locations
locations_b : None or numpy.array
An (n, dim) numpy array containing B electrode locations. If None,
we assume all sources are Pole sources.
locations_m : numpy.array
An (n, dim) numpy array containing M electrode locations
locations_n : numpy.array
An (n, dim) numpy array containing N electrode locations. If None,
we assume all receivers are Pole receivers.
data_type : str
Must be one of {'volt', 'apparent_conductivity', 'apparent_resistivity', 'apparent_chargeability'}
output_sorting : bool
This option is used if the ABMN locations are sorted during the creation of the survey
and you would like to sort any data vectors associated with the electrode locations.
If False, the function will output a SimPEG.electromagnetic.static.survey.Survey object.
If True, the function will output a tuple containing the survey object and a numpy array
(n,) that will sort the data vector to match the order of the electrodes in the survey.
Returns
-------
survey
A SimPEG.electromagnetic.static.survey.Survey object
sort_index
A numpy array which defines any sorting that took place when creating the survey
"""
if locations_a is None:
raise TypeError("Locations for A electrodes must be provided.")
if locations_m is None:
raise TypeError("Locations for M electrodes must be provided.")
assert data_type.lower() in [
"volt",
"apparent_conductivity",
"apparent_resistivity",
"apparent_chargeability",
], "data_type must be one of 'volt', 'apparent_conductivity', 'apparent_resistivity', 'apparent_chargeability'"
if locations_b is None:
locations_b = locations_a
if locations_n is None:
locations_n = locations_m
if (
locations_a.shape == locations_b.shape == locations_m.shape == locations_n.shape
) == False:
raise ValueError(
"Arrays containing A, B, M and N electrode locations must be same shape."
)
# Set up keeping track of sorting of rows and unique sources
n_rows = np.shape(locations_a)[0]
k = np.arange(0, n_rows)
out_indices = []
unique_ab, ab_index = np.unique(
np.c_[locations_a, locations_b], axis=0, return_index=True
)
ab_index = np.sort(ab_index)
# Loop over all unique source locations
source_list = []
for ii, ind in enumerate(ab_index):
# Get source location
src_loc_a = mkvc(locations_a[ind, :])
src_loc_b = mkvc(locations_b[ind, :])
# Get receiver locations
rx_index = np.where(
(
(np.sqrt(np.sum((locations_a - src_loc_a) ** 2, axis=1)) < 1e-3)
& (np.sqrt(np.sum((locations_b - src_loc_b) ** 2, axis=1)) < 1e-3)
)
)[0]
rx_loc_m = locations_m[rx_index, :]
rx_loc_n = locations_n[rx_index, :]
# Extract pole and dipole receivers
k_ii = k[rx_index]
is_pole_rx = np.all(np.isclose(rx_loc_m, rx_loc_n, atol=1e-3), axis=1)
rx_list = []
if any(is_pole_rx):
rx_list += [dc.receivers.Pole(rx_loc_m[is_pole_rx, :], data_type=data_type)]
out_indices.append(k_ii[is_pole_rx])
if any(~is_pole_rx):
rx_list += [
dc.receivers.Dipole(
rx_loc_m[~is_pole_rx, :],
rx_loc_n[~is_pole_rx, :],
data_type=data_type,
)
]
out_indices.append(k_ii[~is_pole_rx])
# Define Pole or Dipole Sources
if np.all(np.isclose(src_loc_a, src_loc_b, atol=1e-3)):
source_list.append(dc.sources.Pole(rx_list, src_loc_a))
else:
source_list.append(dc.sources.Dipole(rx_list, src_loc_a, src_loc_b))
# Create outputs
out_indices = np.hstack(out_indices)
survey = dc.survey.Survey(source_list)
if any(k != out_indices):
warnings.warn(
"Ordering of ABMN locations changed when generating survey. "
"Associated data vectors will need sorting. Set output_sorting to "
"True for sorting indices."
)
if output_sorting:
return survey, out_indices
else:
return survey
def generate_dcip_survey(endl, survey_type, a, b, n, dim=3, **kwargs):
"""
Load in endpoints and survey specifications to generate Tx, Rx location
stations.
Assumes flat topo for now...
Input:
:param numpy.ndarray endl: input endpoints [x1, y1, z1, x2, y2, z2]
:param discretize.base.BaseMesh mesh: discretize mesh object
:param str survey_type: 'dipole-dipole' | 'pole-dipole' |
'dipole-pole' | 'pole-pole' | 'gradient'
:param int a: pole seperation
:param int b: dipole separation
:param int n: number of rx dipoles per tx
Output:
:return SimPEG.electromagnetics.static.resistivity.Survey dc_survey: DC survey object
"""
if "d2flag" in kwargs:
warnings.warn(
"The d2flag is no longer necessary to construct a survey. "
"Feel free to remove it from the call. This option will be removed in SimPEG 0.16.0",
FutureWarning,
)
def xy_2_r(x1, x2, y1, y2):
r = np.sqrt(np.sum((x2 - x1) ** 2.0 + (y2 - y1) ** 2.0))
return r
# Evenly distribute electrodes and put on surface
# Mesure survey length and direction
dl_len = xy_2_r(endl[0, 0], endl[1, 0], endl[0, 1], endl[1, 1])
dl_x = (endl[1, 0] - endl[0, 0]) / dl_len
dl_y = (endl[1, 1] - endl[0, 1]) / dl_len
nstn = int(np.floor(dl_len / a))
# Compute discrete pole location along line
stn_x = endl[0, 0] + np.array(range(int(nstn))) * dl_x * a
stn_y = endl[0, 1] + np.array(range(int(nstn))) * dl_y * a
if dim == 2:
ztop = np.linspace(endl[0, 1], endl[0, 1], nstn)
# Create line of P1 locations
M = np.c_[stn_x, ztop]
# Create line of P2 locations
N = np.c_[stn_x + a * dl_x, ztop]
elif dim == 3:
stn_z = np.linspace(endl[0, 2], endl[0, 2], nstn)
# Create line of P1 locations
M = np.c_[stn_x, stn_y, stn_z]
# Create line of P2 locations
N = np.c_[stn_x + a * dl_x, stn_y + a * dl_y, stn_z]
# Build list of Tx-Rx locations depending on survey type
# Dipole-dipole: Moving tx with [a] spacing -> [AB a MN1 a MN2 ... a MNn]
# Pole-dipole: Moving pole on one end -> [A a MN1 a MN2 ... MNn a B]
SrcList = []
if survey_type != "gradient":
for ii in range(0, int(nstn) - 1):
if survey_type.lower() in ["dipole-dipole", "dipole-pole"]:
tx = np.c_[M[ii, :], N[ii, :]]
# Current elctrode separation
AB = xy_2_r(tx[0, 1], endl[1, 0], tx[1, 1], endl[1, 1])
elif survey_type.lower() in ["pole-dipole", "pole-pole"]:
tx = np.r_[M[ii, :]]
# Current elctrode separation
AB = xy_2_r(tx[0], endl[1, 0], tx[1], endl[1, 1])
else:
raise TypeError(
"survey_type must be 'dipole-dipole' | 'pole-dipole' | "
"'dipole-pole' | 'pole-pole' not {}".format(survey_type)
)
# Rx.append(np.c_[M[ii+1:indx, :], N[ii+1:indx, :]])
# Number of receivers to fit
nstn = int(np.min([np.floor((AB - b) / a), n]))
# Check if there is enough space, else break the loop
if nstn <= 0:
continue
# Compute discrete pole location along line
stn_x = N[ii, 0] + dl_x * b + np.array(range(int(nstn))) * dl_x * a
stn_y = N[ii, 1] + dl_y * b + np.array(range(int(nstn))) * dl_y * a
# Create receiver poles
if dim == 3:
stn_z = np.linspace(endl[0, 2], endl[0, 2], nstn)
# Create line of P1 locations
P1 = np.c_[stn_x, stn_y, stn_z]
# Create line of P2 locations
P2 = np.c_[stn_x + a * dl_x, stn_y + a * dl_y, stn_z]
if survey_type.lower() in ["dipole-dipole", "pole-dipole"]:
rxClass = dc.Rx.Dipole(P1, P2)
elif survey_type.lower() in ["dipole-pole", "pole-pole"]:
rxClass = dc.Rx.Pole(P1)
elif dim == 2:
ztop = np.linspace(endl[0, 1], endl[0, 1], nstn)
# Create line of P1 locations
P1 = np.c_[stn_x, np.ones(nstn).T * ztop]
# Create line of P2 locations
P2 = np.c_[stn_x + a * dl_x, np.ones(nstn).T * ztop]
if survey_type.lower() in ["dipole-dipole", "pole-dipole"]:
rxClass = dc.Rx.Dipole(P1, P2)
elif survey_type.lower() in ["dipole-pole", "pole-pole"]:
rxClass = dc.Rx.Pole(P1)
if survey_type.lower() in ["dipole-dipole", "dipole-pole"]:
srcClass = dc.Src.Dipole([rxClass], M[ii, :], N[ii, :])
elif survey_type.lower() in ["pole-dipole", "pole-pole"]:
srcClass = dc.Src.Pole([rxClass], M[ii, :])
SrcList.append(srcClass)
elif survey_type.lower() == "gradient":
# Gradient survey takes the "b" parameter to define the limits of a
# square survey grid. The pole seperation within the receiver grid is
# define the "a" parameter.
# Get the edge limit of survey area
min_x = endl[0, 0] + dl_x * b
min_y = endl[0, 1] + dl_y * b
max_x = endl[1, 0] - dl_x * b
max_y = endl[1, 1] - dl_y * b
# Define the size of the survey grid (square for now)
box_l = np.sqrt((min_x - max_x) ** 2.0 + (min_y - max_y) ** 2.0)
box_w = box_l / 2.0
nstn = int(np.floor(box_l / a))
# Compute discrete pole location along line
stn_x = min_x + np.array(range(int(nstn))) * dl_x * a
stn_y = min_y + np.array(range(int(nstn))) * dl_y * a
# Define number of cross lines
nlin = int(np.floor(box_w / a))
lind = range(-nlin, nlin + 1)
npoles = int(nstn * len(lind))
rx = np.zeros([npoles, 6])
for ii in range(len(lind)):
# Move station location to current survey line This is a
# perpendicular move then line survey orientation, hence the y, x
# switch
lxx = stn_x - lind[ii] * a * dl_y
lyy = stn_y + lind[ii] * a * dl_x
M = np.c_[lxx, lyy, np.ones(nstn).T * ztop]
N = np.c_[lxx + a * dl_x, lyy + a * dl_y, np.ones(nstn).T * ztop]
rx[(ii * nstn) : ((ii + 1) * nstn), :] = np.c_[M, N]
if dim == 3:
rxClass = dc.Rx.Dipole(rx[:, :3], rx[:, 3:])
elif dim == 2:
M = M[:, [0, 2]]
N = N[:, [0, 2]]
rxClass = dc.Rx.Dipole(rx[:, [0, 2]], rx[:, [3, 5]])
srcClass = dc.Src.Dipole([rxClass], (endl[0, :]), (endl[1, :]))
SrcList.append(srcClass)
survey_type = "dipole-dipole"
survey = dc.Survey(SrcList, survey_type=survey_type.lower())
return survey
def generate_dcip_sources_line(
survey_type,
data_type,
dimension_type,
end_points,
topo,
num_rx_per_src,
station_spacing,
):
"""
Generate the source list for a 2D or 3D DC/IP survey line.
This utility will create the list of DC/IP source objects for a single line of
2D or 3D data. The topography, orientation, spacing and number of receivers
can be specified by the user. This function can be used to define multiple lines
of DC/IP, which can be appended to create the sources for an entire survey.
Input:
:param str survey_type: 'dipole-dipole' | 'pole-dipole' |
'dipole-pole' | 'pole-pole'
:param str data_type: 'volt' | 'apparent_conductivity' |
'apparent_resistivity' | 'apparent_chargeability'
:param str dimension_type: '2D' or '3D'
:param np.array end_points: horizontal end points [x1, x2] or [x1, x2, y1, y2]
:param float, (N, 2) np.array for 2D or (N, 3) np.array for 3D: topography
:param int num_rx_per_src: number of receivers per souces
:param float station_spacing : distance between stations
Output:
:return SimPEG.electromagnetics.static.resistivity.Survey dc_survey: DC survey object
"""
assert survey_type.lower() in [
"pole-pole",
"pole-dipole",
"dipole-pole",
"dipole-dipole",
], "survey_type must be one of 'pole-pole', 'pole-dipole', 'dipole-pole', 'dipole-dipole'"
assert data_type.lower() in [
"volt",
"apparent_conductivity",
"apparent_resistivity",
"apparent_chargeability",
], "data_type must be one of 'volt', 'apparent_conductivity', 'apparent_resistivity', 'apparent_chargeability'"
assert dimension_type.upper() in [
"2D",
"2.5D",
"3D",
], "dimension_type must be one of '2D' or '3D'"
def xy_2_r(x1, x2, y1, y2):
r = np.sqrt(np.sum((x2 - x1) ** 2.0 + (y2 - y1) ** 2.0))
return r
# Compute horizontal locations of sources and receivers
x1 = end_points[0]
x2 = end_points[1]
if dimension_type == "3D":
# Station locations
y1 = end_points[2]
y2 = end_points[3]
L = xy_2_r(x1, x2, y1, y2)
nstn = int(np.floor(L / station_spacing) + 1)
dl_x = (x2 - x1) / L
dl_y = (y2 - y1) / L
stn_x = x1 + np.array(range(int(nstn))) * dl_x * station_spacing
stn_y = y1 + np.array(range(int(nstn))) * dl_y * station_spacing
# Station xyz locations
P = np.c_[stn_x, stn_y]
if np.size(topo) == 1:
P = np.c_[P, topo * np.ones((nstn))]
else:
fun_interp = LinearNDInterpolator(topo[:, 0:2], topo[:, -1])
P = np.c_[P, fun_interp(P)]
else:
# Station locations
y1 = 0.0
y2 = 0.0
L = xy_2_r(x1, x2, y1, y2)
nstn = int(np.floor(L / station_spacing) + 1)
stn_x = x1 + np.array(range(int(nstn))) * station_spacing
# Station xyz locations
if np.size(topo) == 1:
P = np.c_[stn_x, topo * np.ones((nstn))]
else:
fun_interp = interp1d(topo[:, 0], topo[:, -1])
P = np.c_[stn_x, fun_interp(stn_x)]
# Build list of Tx-Rx locations depending on survey type
# Dipole-dipole: Moving tx with [a] spacing -> [AB a MN1 a MN2 ... a MNn]
# Pole-dipole: Moving pole on one end -> [A a MN1 a MN2 ... MNn a B]
source_list = []
if survey_type.lower() == "pole-pole":
rx_shift = 0
elif survey_type.lower() in ["pole-dipole", "dipole-pole"]:
rx_shift = 1
elif survey_type.lower() == "dipole-dipole":
rx_shift = 2
for ii in range(0, int(nstn - rx_shift)):
if dimension_type == "3D":
D = xy_2_r(stn_x[ii + rx_shift], x2, stn_y[ii + rx_shift], y2)
else:
D = xy_2_r(stn_x[ii + rx_shift], x2, y1, y2)
# Number of receivers to fit
nrec = int(np.min([np.floor(D / station_spacing), num_rx_per_src]))
# Check if there is enough space, else break the loop
if nrec <= 0:
continue
# Create receivers
if survey_type.lower() in ["dipole-pole", "pole-pole"]:
rxClass = dc.receivers.Pole(
P[ii + rx_shift + 1 : ii + rx_shift + nrec + 1, :], data_type=data_type
)
elif survey_type.lower() in ["dipole-dipole", "pole-dipole"]:
rxClass = dc.receivers.Dipole(
P[ii + rx_shift : ii + rx_shift + nrec, :],
P[ii + rx_shift + 1 : ii + rx_shift + nrec + 1, :],
data_type=data_type,
)
# Create sources
if survey_type.lower() in ["pole-dipole", "pole-pole"]:
srcClass = dc.sources.Pole([rxClass], P[ii, :])
elif survey_type.lower() in ["dipole-dipole", "dipole-pole"]:
srcClass = dc.sources.Dipole([rxClass], P[ii, :], P[ii + 1, :])
source_list.append(srcClass)
return source_list
def xy_2_lineID(dc_survey):
"""
Read DC survey class and append line ID.
Assumes that the locations are listed in the order
they were collected. May need to generalize for random
point locations, but will be more expensive
Input:
:param DCdict Vectors of station location
Output:
:return LineID Vector of integers
"""
# Compute unit vector between two points
nstn = dc_survey.nSrc
# Pre-allocate space
lineID = np.zeros(nstn)
linenum = 0
indx = 0
for ii in range(nstn):
if ii == 0:
A = dc_survey.source_list[ii].location[0]
B = dc_survey.source_list[ii].location[1]
xout = np.mean([A[0:2], B[0:2]], axis=0)
xy0 = A[:2]
xym = xout
# Deal with replicate pole location
if np.all(xy0 == xym):
xym[0] = xym[0] + 1e-3
continue
A = dc_survey.source_list[ii].location[0]
B = dc_survey.source_list[ii].location[1]
xin = np.mean([A[0:2], B[0:2]], axis=0)
vec1, r1 = r_unit(xout, xin) # Compute vector between neighbours
vec2, r2 = r_unit(xym, xin) # Compute vector between current stn and mid-point
vec3, r3 = r_unit(xy0, xin) # Compute vector between current stn and start line
vec4, r4 = r_unit(xym, xy0) # Compute vector between mid-point and start line
# Compute dot product
ang1 = np.abs(vec1.dot(vec2))
ang2 = np.abs(vec3.dot(vec4))
# If the angles are smaller then 45d, than next point is on a new line
if ((ang1 < np.cos(np.pi / 4.0)) | (ang2 < np.cos(np.pi / 4.0))) & (
np.all(np.r_[r1, r2, r3, r4] > 0)
):
# Re-initiate start and mid-point location
xy0 = A[:2]
xym = xin
# Deal with replicate pole location
if np.all(xy0 == xym):
xym[0] = xym[0] + 1e-3
linenum += 1
indx = ii
else:
xym = np.mean([xy0, xin], axis=0)
lineID[ii] = linenum
xout = xin
return lineID
def r_unit(p1, p2):
"""
r_unit(x, y) : Function computes the unit vector
between two points with coordinates p1(x1, y1) and p2(x2, y2)
"""
assert len(p1) == len(p2), "locs must be the same shape."
dx = []
for ii in range(len(p1)):
dx.append((p2[ii] - p1[ii]))
# Compute length of vector
r = np.linalg.norm(np.asarray(dx))
if r != 0:
vec = dx / r
else:
vec = np.zeros(len(p1))
return vec, r
def gettopoCC(mesh, actind, option="top"):
"""
Get topography from active indices of mesh.
"""
if mesh._meshType == "TENSOR":
if mesh.dim == 3:
mesh2D = discretize.TensorMesh([mesh.hx, mesh.hy], mesh.x0[:2])
zc = mesh.cell_centers[:, 2]
ACTIND = actind.reshape((mesh.vnC[0] * mesh.vnC[1], mesh.vnC[2]), order="F")
ZC = zc.reshape((mesh.vnC[0] * mesh.vnC[1], mesh.vnC[2]), order="F")
topoCC = np.zeros(ZC.shape[0])
for i in range(ZC.shape[0]):
ind = np.argmax(ZC[i, :][ACTIND[i, :]])
if option == "top":
dz = mesh.hz[ACTIND[i, :]][ind] * 0.5
elif option == "center":
dz = 0.0
else:
raise Exception()
topoCC[i] = ZC[i, :][ACTIND[i, :]].max() + dz
return mesh2D, topoCC
elif mesh.dim == 2:
mesh1D = discretize.TensorMesh([mesh.hx], [mesh.x0[0]])
yc = mesh.cell_centers[:, 1]
ACTIND = actind.reshape((mesh.vnC[0], mesh.vnC[1]), order="F")
YC = yc.reshape((mesh.vnC[0], mesh.vnC[1]), order="F")
topoCC = np.zeros(YC.shape[0])
for i in range(YC.shape[0]):
ind = np.argmax(YC[i, :][ACTIND[i, :]])
if option == "top":
dy = mesh.hy[ACTIND[i, :]][ind] * 0.5
elif option == "center":
dy = 0.0
else:
raise Exception()
topoCC[i] = YC[i, :][ACTIND[i, :]].max() + dy
return mesh1D, topoCC
elif mesh._meshType == "TREE":
inds = mesh.get_boundary_cells(actind, direction="zu")[0]
if option == "top":
dz = mesh.h_gridded[inds, -1] * 0.5
elif option == "center":
dz = 0.0
return mesh.cell_centers[inds, :-1], mesh.cell_centers[inds, -1] + dz
def drapeTopotoLoc(mesh, pts, actind=None, option="top", topo=None):
"""
Drape location right below (cell center) the topography
"""
if mesh.dim == 2:
# if shape is (*, 1) or (*, 2) just grab first column
if pts.ndim == 2 and pts.shape[1] in [1, 2]:
pts = pts[:, 0]
if pts.ndim > 1:
raise ValueError("pts should be 1d array")
elif mesh.dim == 3:
if pts.shape[1] not in [2, 3]:
raise ValueError("shape of pts should be (x, 3) or (x, 2)")
# just grab the xy locations in the first two columns
pts = pts[:, :2]
else:
raise NotImplementedError()
if actind is None:
actind = surface2ind_topo(mesh, topo)
if mesh._meshType == "TENSOR":
meshtemp, topoCC = gettopoCC(mesh, actind, option=option)
inds = closestPoints(meshtemp, pts)
topo = topoCC[inds]
out = np.c_[pts, topo]
elif mesh._meshType == "TREE":
if mesh.dim == 3:
uniqXYlocs, topoCC = gettopoCC(mesh, actind, option=option)
inds = closestPointsGrid(uniqXYlocs, pts)
out = np.c_[uniqXYlocs[inds, :], topoCC[inds]]
else:
uniqXlocs, topoCC = gettopoCC(mesh, actind, option=option)
inds = closestPointsGrid(uniqXlocs, pts, dim=1)
out = np.c_[uniqXlocs[inds], topoCC[inds]]
else:
raise NotImplementedError()
return out
def genTopography(mesh, zmin, zmax, seed=None, its=100, anisotropy=None):
if mesh.dim == 3:
mesh2D = discretize.TensorMesh([mesh.hx, mesh.hy], x0=[mesh.x0[0], mesh.x0[1]])
out = model_builder.randomModel(
mesh.vnC[:2], bounds=[zmin, zmax], its=its, seed=seed, anisotropy=anisotropy
)
return out, mesh2D
elif mesh.dim == 2:
mesh1D = discretize.TensorMesh([mesh.hx], x0=[mesh.x0[0]])
out = model_builder.randomModel(
mesh.vnC[:1], bounds=[zmin, zmax], its=its, seed=seed, anisotropy=anisotropy
)
return out, mesh1D
else:
raise Exception("Only works for 2D and 3D models")
def closestPointsGrid(grid, pts, dim=2):
"""Move a list of points to the closest points on a grid.
:param numpy.ndarray pts: Points to move
:rtype: numpy.ndarray
:return: nodeInds
"""
if dim == 1:
nodeInds = np.asarray(
[np.abs(pt - grid).argmin() for pt in pts.tolist()], dtype=int
)
else:
tree = cKDTree(grid)
_, nodeInds = tree.query(pts)
return nodeInds
def gen_3d_survey_from_2d_lines(
survey_type,
a,
b,
n_spacing,
n_lines=5,
line_length=200.0,
line_spacing=20.0,
x0=0,
y0=0,
z0=0,
src_offset_y=0.0,
dim=3,
is_IO=True,
):
"""
Generate 3D DC survey using gen_DCIPsurvey function.
Input:
:param str survey_type: 'dipole-dipole' | 'pole-dipole' |
'dipole-pole' | 'pole-pole' | 'gradient'
:param int a: pole seperation
:param int b: dipole separation
:param int n_spacing: number of rx dipoles per tx
Output:
:return SimPEG.dc.SurveyDC.Survey survey_3d: 3D DC survey object
"""
ylocs = np.arange(n_lines) * line_spacing + y0
survey_lists_2d = []
srcList = []
line_inds = []
for i, y in enumerate(ylocs):
# Generate DC survey object
xmin, xmax = x0, x0 + line_length
ymin, ymax = y, y
zmin, zmax = 0, 0
IO_2d = dc.IO()
endl = np.array([[xmin, ymin, zmin], [xmax, ymax, zmax]])
survey_2d = gen_DCIPsurvey(endl, survey_type, a, b, n_spacing, dim=3,)
srcList.append(survey_2d.source_list)
survey_2d = IO_2d.from_abmn_locations_to_survey(
survey_2d.locations_a[:, [0, 2]],
survey_2d.locations_b[:, [0, 2]],
survey_2d.locations_m[:, [0, 2]],
survey_2d.locations_n[:, [0, 2]],
survey_type,
dimension=2,
)
survey_lists_2d.append(survey_2d)
line_inds.append(np.ones(survey_2d.nD, dtype=int) * i)
line_inds = np.hstack(line_inds)
srcList = sum(srcList, [])
survey_3d = dc.Survey(srcList)
IO_3d = dc.IO()
survey_3d.locations_a[:, 1] += src_offset_y
survey_3d.locations_b[:, 1] += src_offset_y
survey_3d = IO_3d.from_abmn_locations_to_survey(
survey_3d.locations_a,
survey_3d.locations_b,
survey_3d.locations_m,
survey_3d.locations_n,
survey_type,
dimension=3,
line_inds=line_inds,
)
return IO_3d, survey_3d
############
# Deprecated
############
def plot_pseudoSection(
data,
ax=None,
survey_type="dipole-dipole",
data_type="appConductivity",
space_type="half-space",
clim=None,
scale="linear",
sameratio=True,
pcolorOpts={},
data_location=False,
dobs=None,
dim=2,
):
warnings.warn(
"The plot_pseudoSection method has been deprecated. Please use "
"plot_pseudosection instead. This will be removed in version"
" 0.16.0 of SimPEG",
FutureWarning,
)
return plot_pseudosection(
data,
ax=ax,
survey_type=survey_type,
data_type=data_type,
space_type=space_type,
clim=clim,
scale=scale,
pcolor_opts=pcolorOpts,
data_locations=data_location,
dobs=dobs,
)
def apparent_resistivity(
data_object,
survey_type=None,
space_type="half space",
dobs=None,
eps=1e-10,
**kwargs,
):
warnings.warn(
"The apparent_resistivity method has been deprecated. Please use "
"apparent_resistivity_from_voltage instead. This will be removed in version"
" 0.16.0 of SimPEG",
DeprecationWarning,
)
if survey_type is not None:
warnings.warn(
"Keyword argument 'survey_type' is no longer necessary. "
"Survey may now have a mix of pole and dipole sources and receivers. "
"This will be removed in version 0.16.0 of SimPEG",
FutureWarning,
)
if dobs is None:
dobs = data_object.dobs
return apparent_resistivity_from_voltage(
data_object.survey, dobs, space_type=space_type, eps=eps, **kwargs
)
source_receiver_midpoints = deprecate_method(
pseudo_locations, "source_receiver_midpoints", "0.16.0"
)
def plot_layer(rho, mesh, **kwargs):
warnings.warn(
"The plot_layer method has been deprecated. Please use "
"plot_1d_layer_model instead. This will be removed in version"
" 0.17.0 of SimPEG",
DeprecationWarning,
)
return plot_1d_layer_model(mesh.hx, rho, z0=mesh.origin[0], **kwargs)
def convertObs_DC3D_to_2D(survey, lineID, flag="local"):
warnings.warn(
"The convertObs_DC3D_to_2D method has been deprecated. Please use "
"convert_3d_survey_to_2d. This will be removed in version"
" 0.16.0 of SimPEG",
FutureWarning,
)
return convert_survey_3d_to_2d_lines(survey, lineID)
def getSrc_locs(survey):
warnings.warn(
"The getSrc_locs method has been deprecated. Source "
"locations are now computed as a method of the survey "
"class. Please use Survey.source_locations(). This method "
" will be removed in version 0.17.0 of SimPEG",
DeprecationWarning,
)
return survey.source_locations()
def writeUBC_DCobs(
fileName,
data,
dim,
format_type,
survey_type="dipole-dipole",
ip_type=0,
comment_lines="",
):
"""
Write UBC GIF DCIP 2D or 3D observation file
Input:
:param str fileName: including path where the file is written out
:param SimPEG.Data data: DC data object
:param int dim: either 2 | 3
:param str format_type: either 'surface' | 'general' | 'simple'
:param str survey_type: 'dipole-dipole' | 'pole-dipole' |
'dipole-pole' | 'pole-pole' | 'gradient'
Output:
:return: UBC2D-Data file
:rtype: file
"""
warnings.warn(
"The writeUBC_DCobs method has been deprecated. Please use "
"write_dcip2d_ubc or write_dcip3d_ubc instead. These are imported "
"from SimPEG.utils.io_utils. This function will be removed in version"
" 0.17.0 of SimPEG",
DeprecationWarning,
)
if dim == 2:
write_dcip2d_ubc(
fileName,
data,
"volt",
"dobs",
format_type=format_type,
comment_lines=comment_lines,
)
elif dim == 3:
write_dcip3d_ubc(
fileName,
data,
"volt",
"dobs",
format_type=format_type,
comment_lines=comment_lines,
)
def writeUBC_DClocs(
fileName,
dc_survey,
dim,
format_type,
survey_type="dipole-dipole",
ip_type=0,
comment_lines="",
):
"""
Write UBC GIF DCIP 2D or 3D locations file
Input:
:param str fileName: including path where the file is written out
:param SimPEG.electromagnetics.static.resistivity.Survey dc_survey: DC survey object
:param int dim: either 2 | 3
:param str survey_type: either 'SURFACE' | 'GENERAL'
Output:
:rtype: file
:return: UBC 2/3D-locations file
"""
warnings.warn(
"The writeUBC_DClocs method has been deprecated. Please use "
"write_dcip2d_ubc or write_dcip3d_ubc instead. These are imported "
"from SimPEG.utils.io_utils. This function will be removed in version"
" 0.17.0 of SimPEG",
DeprecationWarning,
)
data = Data(dc_survey)
if dim == 2:
write_dcip2d_ubc(
fileName,
data,
"volt",
"survey",
format_type=format_type,
comment_lines=comment_lines,
)
elif dim == 3:
write_dcip3d_ubc(
fileName,
data,
"volt",
"survey",
format_type=format_type,
comment_lines=comment_lines,
)
def readUBC_DC2Dpre(fileName):
"""
Read UBC GIF DCIP 2D observation file and generate arrays
for tx-rx location
Input:
:param string fileName: path to the UBC GIF 3D obs file
Output:
:return survey: 2D DC survey class object
:rtype: SimPEG.electromagnetics.static.resistivity.Survey
Created on Mon March 9th, 2016 << Doug's 70th Birthday !! >>
@author: dominiquef
"""
warnings.warn(
"The readUBC_DC2Dpre method has been deprecated. Please use "
"read_dcip2d_ubc instead. This is imported "
"from SimPEG.utils.io_utils. This function will be removed in version"
" 0.17.0 of SimPEG",
DeprecationWarning,
)
return read_dcip2d_ubc(fileName, "volt", "general")
def readUBC_DC3Dobs(fileName, data_type="volt"):
"""
Read UBC GIF DCIP 3D observation file and generate arrays
for tx-rx location
Input:
:param string fileName: path to the UBC GIF 3D obs file
Output:
:param rx, tx, d, wd
:return
"""
warnings.warn(
"The readUBC_DC3Dobs method has been deprecated. Please use "
"read_dcip3d_ubc instead. This is imported "
"from SimPEG.utils.io_utils. This function will be removed in version"
" 0.17.0 of SimPEG",
DeprecationWarning,
)
return read_dcip3d_ubc(fileName, data_type)
gen_DCIPsurvey = deprecate_method(
generate_dcip_survey, "gen_DCIPsurvey", removal_version="0.16.0"
)
def generate_dcip_survey_line(
survey_type, data_type, endl, topo, ds, dh, n, dim_flag="2.5D", sources_only=False
):
warnings.warn(
"The gen_dcip_survey_line method has been deprecated. Please use "
"generate_dcip_sources_line instead. This will be removed in version"
" 0.17.0 of SimPEG",
DeprecationWarning,
)
source_list = generate_dcip_sources_line(
survey_type, data_type, dim_flag, endl, topo, n, ds
)
if sources_only:
return source_list
else:
return dc.Survey(source_list, survey_type=survey_type.lower())
| 32.42672 | 115 | 0.571593 | 8,561 | 65,048 | 4.210022 | 0.099404 | 0.01526 | 0.009322 | 0.007075 | 0.407719 | 0.355863 | 0.301731 | 0.273098 | 0.239221 | 0.216164 | 0 | 0.020785 | 0.314368 | 65,048 | 2,005 | 116 | 32.442893 | 0.787349 | 0.253997 | 0 | 0.299919 | 0 | 0.00489 | 0.137511 | 0.012913 | 0 | 0 | 0 | 0 | 0.004075 | 1 | 0.023635 | false | 0.000815 | 0.017115 | 0 | 0.067645 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d725febf567e9015bab0f40154e4c04483835973 | 2,003 | py | Python | ArcPy/addCampos_idSeq_idworkspace.py | phporath/GIS-Tools | 5a1613dfcd516ae1194dd4f1d3981ed11aa0dfa7 | [
"MIT"
] | null | null | null | ArcPy/addCampos_idSeq_idworkspace.py | phporath/GIS-Tools | 5a1613dfcd516ae1194dd4f1d3981ed11aa0dfa7 | [
"MIT"
] | null | null | null | ArcPy/addCampos_idSeq_idworkspace.py | phporath/GIS-Tools | 5a1613dfcd516ae1194dd4f1d3981ed11aa0dfa7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import arcpy
import csv
arcpy.env.workspace = input('Inserir endereço onde estão salvos os Shapefiles: ') #colocar endereço onde estão as camadas
shapes = arcpy.ListFeatureClasses()
checker_fieldname = ''
nchecker_idseq = 0
nchecker_idw_kspace = 0
for shape in shapes:
fields = arcpy.ListFields(shape)
for field in fields:
checker_fieldname = field.name
if checker_fieldname == 'idseq':
nchecker_idseq += 1
else:
nchecker_idseq = nchecker_idseq
print(nchecker_idseq)
if checker_fieldname == 'idw_kspace':
nchecker_idw_kspace += 1
else:
nchecker_idw_kspace = nchecker_idw_kspace
print(nchecker_idw_kspace)
if nchecker_idseq == 0:
arcpy.AddField_management(shape, 'idseq', 'SHORT', 4, '', '', '', 'NULLABLE', 'REQUIRED') #criar uma linha para cada campo novo a ser criado
else:
arcpy.DeleteField_management(shape, 'idseq')
arcpy.AddField_management(shape, 'idseq', 'SHORT', 4, '', '', '', 'NULLABLE', 'REQUIRED')
print(shape)
nchecker_idseq = 0
if nchecker_idw_kspace == 0:
arcpy.AddField_management(shape, 'idw_kspace', 'SHORT', 4, '', '', '', 'NULLABLE', 'REQUIRED') #criar uma linha para cada campo novo a ser criado
else:
arcpy.DeleteField_management(shape, 'idw_kspace')
arcpy.AddField_management(shape, 'idw_kspace', 'SHORT', 4, '', '', '', 'NULLABLE', 'REQUIRED')
print(shape)
nchecker_idw_kspace = 0
arcpy.CalculateField_management(shape, 'idseq', 0, "PYTHON_9.3")
arcpy.CalculateField_management(shape, 'idw_kspace', 0, "PYTHON_9.3")
print('Fim do processo')
| 37.792453 | 162 | 0.556665 | 201 | 2,003 | 5.348259 | 0.313433 | 0.100465 | 0.110698 | 0.104186 | 0.465116 | 0.385116 | 0.385116 | 0.351628 | 0.351628 | 0.269767 | 0 | 0.014329 | 0.337993 | 2,003 | 52 | 163 | 38.519231 | 0.79638 | 0.078382 | 0 | 0.368421 | 0 | 0 | 0.132537 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0.131579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d726d9f94ddf507be2b941cbf3b7373710ec83ec | 3,915 | py | Python | num6/num6_gui.py | Almas-Ali/Num6 | c6ddd4dfc8e7fc45edd677afd943f55d3598f4cc | [
"MIT"
] | 7 | 2021-04-18T14:55:13.000Z | 2021-09-24T15:54:15.000Z | num6/num6_gui.py | Almas-Ali/Num6 | c6ddd4dfc8e7fc45edd677afd943f55d3598f4cc | [
"MIT"
] | 2 | 2021-04-16T11:11:53.000Z | 2021-04-18T15:05:09.000Z | num6/num6_gui.py | Almas-Ali/Num6 | c6ddd4dfc8e7fc45edd677afd943f55d3598f4cc | [
"MIT"
] | 2 | 2021-04-16T12:33:56.000Z | 2021-04-18T15:42:47.000Z | # num6_gui.py
from tkinter import *
from tkinter.messagebox import *
import num6
__version__ = '0.3.2'
def exit_f():
retv = askquestion('Confirmation', 'Are you sure you want to exit ? ')
if retv == 'yes':
root.destroy()
elif retv == 'no':
return 'Exit permission denied ! '
else:
return 'Exit permission denied ! '
def num6_encrypt():
return num6.encrypt(string=normal_value.get())
def num6_decrypt():
return num6.decrypt(string=encrypted_value.get())
def Clear():
print('Clearing this')
def Copy():
print('Coping this')
root = Tk()
root.title(f'Num6 - {__version__}')
winsize_height = root.winfo_screenheight
winsize_width = root.winfo_screenwidth
# root.geometry(f'{winsize_height}x{winsize_width}')
root.geometry('750x550')
# root.resizable(0, 0)
root.protocol("WM_DELETE_WINDOW", exit_f)
def about():
showinfo('About', f'Num6 - version {__version__}')
def author():
showinfo('Author', 'Developer:\nMd. Almas Ali')
def open():
showinfo('Result', 'Openning file.')
mainmenu = Menu(root)
Filemenu = Menu(mainmenu, tearoff=0)
Filemenu.add_command(label='Open', command=open)
Filemenu.add_separator()
Filemenu.add_command(label='Exit', command=exit_f)
mainmenu.add_cascade(label='File', menu=Filemenu)
aboutmenu = Menu(mainmenu, tearoff=0)
aboutmenu.add_command(label='About', command=about)
mainmenu.add_cascade(label='About', menu=aboutmenu)
authormenu = Menu(mainmenu, tearoff=0)
authormenu.add_command(label='Author', command=author)
mainmenu.add_cascade(label='Author', menu=authormenu)
root.config(menu=mainmenu)
f1 = Frame(root, bg="lightblue", borderwidth=6, relief=SUNKEN)
f1.pack(side='top', fill='x')
f2 = Frame(root, bg='aqua')
f2.pack(side='bottom', fill="x")
f3 = Frame(root, bg="lightblue", borderwidth=6, relief=SUNKEN)
f3.pack()
f4 = Frame(root, bg="lightblue", borderwidth=6, relief=SUNKEN)
f4.pack()
Label(f1, text='Num6', fg='green', bg="lightblue",
font="Times 26 bold").pack(padx=8, pady=5)
Label(f2, text="© Copyright by Md. Almas Ali.",
fg='green', bg='aqua', font="Times 8 bold").pack()
encryption_value = StringVar()
decryption_value = StringVar()
scrollbar = Scrollbar(f3)
scrollbar.pack(side='right', fill='y')
scrollbar2 = Scrollbar(f3, orient='horizontal')
scrollbar2.pack(side='bottom', fill='x')
Label(f3, text='Normal value :', bg='lightblue').pack()
normal_value = Text(f3, bg='white', fg='black',
yscrollcommand=scrollbar.set, xscrollcommand=scrollbar2.set, height=10, width=700)
normal_value.pack(expand=True, fill='both')
scrollbar.config(command=normal_value.yview)
scrollbar2.config(command=normal_value.xview)
scrollbar = Scrollbar(f4)
scrollbar.pack(side='right', fill='y')
scrollbar2 = Scrollbar(f4, orient='horizontal')
scrollbar2.pack(side='bottom', fill='x')
Label(f4, text='Encryped value :', bg='lightblue').pack()
encrypted_value = Text(f4, bg='white', fg='black',
yscrollcommand=scrollbar.set, xscrollcommand=scrollbar2.set, height=10, width=700)
encrypted_value.pack(expand=True, fill='both')
scrollbar.config(command=encrypted_value.yview)
scrollbar2.config(command=encrypted_value.xview)
Button(f4, text='Clear', bg='darkgreen', fg='white', command=Clear).pack()
Button(f4, text='Copy', bg='darkgreen', fg='white', command=Copy).pack()
Button(f4, text='Encrypt', bg='darkgreen',
fg='white', command=num6_encrypt).pack()
#Text(f2, textvariable=encryption_value, bg='lightblue', fg='black', font='Times 8 bold').pack(pady=30)
#Button(f1, text='Encrypt', bg='darkgreen', fg='white', command=num6_encrypt).pack(pady=30)
#Label(f1, text='Encrypted value :', bg='lightblue').pack()
#Text(f2, textvariable=decryption_value, bg='lightblue', fg='black', font='Times 8 bold').pack(pady=30)
#Button(f1, text='Decrypt', bg='darkgreen', fg='white', command=num6_decrypt).pack(pady=30)
root.mainloop()
| 27 | 103 | 0.706769 | 535 | 3,915 | 5.078505 | 0.26729 | 0.036437 | 0.029444 | 0.033125 | 0.3629 | 0.306588 | 0.295915 | 0.295915 | 0.213471 | 0.140596 | 0 | 0.025529 | 0.11954 | 3,915 | 144 | 104 | 27.1875 | 0.762402 | 0.1341 | 0 | 0.095238 | 0 | 0 | 0.175096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.035714 | 0.02381 | 0.178571 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7277920c94cb48a30fa8fe50d5f101bd58e052f | 4,576 | py | Python | dist_forest_time_tile_full_84_features/modis_dataset.py | edisonguo/ml_geoglam | ba18926931f95965308af9059ca114edf298b2f8 | [
"Apache-2.0"
] | null | null | null | dist_forest_time_tile_full_84_features/modis_dataset.py | edisonguo/ml_geoglam | ba18926931f95965308af9059ca114edf298b2f8 | [
"Apache-2.0"
] | null | null | null | dist_forest_time_tile_full_84_features/modis_dataset.py | edisonguo/ml_geoglam | ba18926931f95965308af9059ca114edf298b2f8 | [
"Apache-2.0"
] | null | null | null | import glob
import os
import numpy as np
from osgeo import gdal
import netCDF4 as nc
import datetime
def input_mask(pq_mask_path):
#pq_ds = gdal.Open('HDF4_EOS:EOS_GRID:"{}":MOD_Grid_BRDF:BRDF_Albedo_Ancillary'.format(pq_mask_path))
pq_ds = gdal.Open('HDF4_EOS:EOS_GRID:"{}":MOD_Grid_BRDF:BRDF_Albedo_LandWaterType'.format(pq_mask_path))
pq_raw = pq_ds.GetRasterBand(1).ReadAsArray()
mask = 0b0000000000001111
#pq = pq_raw>>4 & mask
# in collection 6 I don't need to shift 4 places
pq = pq_raw & mask
snow_ds = gdal.Open('HDF4_EOS:EOS_GRID:"{}":MOD_Grid_BRDF:Snow_BRDF_Albedo'.format(pq_mask_path))
snow_pq = np.equal(snow_ds.ReadAsArray(), 0)
# True if pq is 1, 2 or 4
pq = np.logical_and(snow_pq, np.logical_or(np.logical_or(np.equal(pq, np.ones(1)), np.equal(pq, np.ones(1)*2)), np.equal(pq, np.ones(1)*4)))
#pq = np.logical_or(np.logical_or(np.equal(pq, np.ones(1)), np.equal(pq, np.ones(1)*2)), np.equal(pq, np.ones(1)*4))
return pq.reshape(2400*2400)
def input_stack(tile_path):
bands = [gdal.Open('HDF4_EOS:EOS_GRID:"{}":MOD_Grid_BRDF:Nadir_Reflectance_Band{}'.format(tile_path, b)) for b in range(1, 8)]
bands_stack = np.empty((2400*2400, 84), dtype=np.float32)
#add band
for b in range(0, len(bands)):
bands_stack[:, b] = np.nan_to_num( bands[b].GetRasterBand(1).ReadAsArray().reshape((2400*2400, ))*.0001 )
ii = 7
#add log(band)
bands_stack[:, ii:ii+7] = np.nan_to_num( np.log(bands_stack[:, :7]) )
#add band*log(band)
bands_stack[:, ii+7:ii+14] = np.nan_to_num( bands_stack[:, :7] * bands_stack[:, ii:ii+7] )
ii += 14
#add band*next_band
for b in range(7):
for b2 in range(b+1, 7):
bands_stack[:, ii] = np.nan_to_num( bands_stack[:, b] * bands_stack[:, b2] )
ii += 1
#add log(band)*log(next_band)
for b in range(7):
for b2 in range(b+1, 7):
bands_stack[:, ii] = np.nan_to_num( bands_stack[:, b+7] * bands_stack[:, b2+7] )
ii += 1
#add (next_band-band)/(next_band+band)
for b in range(7):
for b2 in range(b+1, 7):
bands_stack[:, ii] = np.nan_to_num( (bands_stack[:, b2]-bands_stack[:, b]) / (bands_stack[:, b2]+bands_stack[:, b]) )
ii += 1
#--------------------------------
# this bit new, by JPG
#bands_stack[:, -1] = np.ones((bands_stack.shape[0],), dtype=bands_stack.dtype)
#--------------------------------
return bands_stack
def get_masked_bands(arr, mask):
res_masks = mask & (np.sum(arr[:, :7] < 0, axis=1) == 0) & (np.sum(arr[:, :7] > 1, axis=1) == 0)
return arr[res_masks, :], res_masks
def get_x(h, v, year, month, day):
root_path = "/g/data2/u39/public/data/modis/lpdaac-tiles-c6"
tile_pattern = os.path.join(root_path, "MCD43A4.006/%d.%.2d.%.2d/MCD43A4.*.h%.2dv%.2d.006.*.hdf" % (year, month, day, h, v))
tile_path = glob.glob(tile_pattern)[0]
mask_pattern = os.path.join(root_path, "MCD43A2.006/%d.%.2d.%.2d/MCD43A2.*.h%.2dv%.2d.006.*.hdf" % (year, month, day, h, v))
mask_path = glob.glob(mask_pattern)[0]
mask = input_mask(mask_path)
arr = input_stack(tile_path)
masked_arr, arr_masks = get_masked_bands(arr, mask)
return masked_arr, arr_masks
def get_y_timestamps(h, v, year):
fc_path = '/g/data2/tc43/modis-fc/v310/tiles/8-day/cover/FC.v310.MCD43A4.h%.2dv%.2d.%d.006.nc' % (h, v, year)
timestamps = []
with nc.Dataset(fc_path, 'r', format='NETCDF4') as src:
ts = nc.num2date(src['time'][:], src['time'].units, src['time'].calendar)
for i in xrange(ts.shape[0]):
timestamps.append(ts[i])
return timestamps
def get_y(h, v, year, month, day, mask=None):
fc_path = '/g/data2/tc43/modis-fc/v310/tiles/8-day/cover/FC.v310.MCD43A4.h%.2dv%.2d.%d.006.nc' % (h, v, year)
fc = None
bands = ["phot_veg", "nphot_veg", "bare_soil"]
with nc.Dataset(fc_path, 'r', format='NETCDF4') as src:
ts = nc.num2date(src['time'][:], src['time'].units, src['time'].calendar)
dt = datetime.datetime(year, month, day)
for i in xrange(ts.shape[0]):
if dt == ts[i]:
for ib, bnd in enumerate(bands):
data = src[bnd][i, ...].reshape(-1)
if fc is None:
fc = np.empty((data.shape[0], len(bands)), dtype=data.dtype)
fc[:, ib] = data
if not mask is None:
fc = fc[mask, ...]
break
return fc
| 37.508197 | 144 | 0.588724 | 742 | 4,576 | 3.463612 | 0.202156 | 0.085603 | 0.021012 | 0.025681 | 0.478988 | 0.408949 | 0.363035 | 0.347471 | 0.347471 | 0.33463 | 0 | 0.055915 | 0.222247 | 4,576 | 121 | 145 | 37.818182 | 0.666198 | 0.130682 | 0 | 0.223684 | 0 | 0.052632 | 0.141883 | 0.125221 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0 | 0.078947 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d727c4e779d0c9ddbc00eb9ad5ef2d2424713029 | 6,902 | py | Python | nn_model.py | Alexzsh/chinese_short_text_classification | de16359b4c83cc18c0478c33e211cc3f85b8e36b | [
"MIT"
] | 1 | 2020-11-25T09:03:02.000Z | 2020-11-25T09:03:02.000Z | nn_model.py | Alexzsh/chinese_short_text_classification | de16359b4c83cc18c0478c33e211cc3f85b8e36b | [
"MIT"
] | null | null | null | nn_model.py | Alexzsh/chinese_short_text_classification | de16359b4c83cc18c0478c33e211cc3f85b8e36b | [
"MIT"
] | null | null | null | # coding: utf-8
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
class TNNConfig(object):
"""NN配置参数"""
def __init__(self,nn):
self.nn=nn
embedding_dim = 300 # 词向量维度
seq_length = 40 # 序列长度
num_classes = 6 # 类别数
num_filters = 256 # 卷积核数目
kernel_size = 5 # 卷积核尺寸
vocab_size = 5000 # 词汇表达小
filter_sizes=(2,3,4)
hidden_dim = 128 # 全连接层神经元
dropout_keep_prob = 0.5 # dropout保留比例
learning_rate = 1e-3 # 学习率
batch_size = 64 # 每批训练大小
num_epochs = 10 # 总迭代轮次
print_per_batch = 100 # 每多少轮输出一次结果
save_per_batch = 10 # 每多少轮存入tensorboard
l2_reg_lambda=0.0
num_layers = 2 # 隐藏层层数
rnn = 'gru' # lstm 或 gru
class TextNN(object):
"""文本分类,CNN模型"""
def __init__(self, config):
self.config = config
# 三个待输入的数据
self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes], name='input_y')
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.cnn() if config.nn=="cnn" else self.rnn()
def cnn(self):
"""CNN模型"""
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([self.config.vocab_size, self.config.embedding_dim], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(self.config.filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, self.config.embedding_dim, 1, self.config.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[self.config.num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
# 返回一个Tensor [<tf.Tensor 'conv-maxpool-3/pool:0' shape=(?, 1, 1, 128) dtype=float32>]
pooled = tf.nn.max_pool(
h,
ksize=[1, self.config.seq_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = self.config.num_filters * len(self.config.filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
# 扁平化 -1可以指代任何值,主要看另一项 -1=总的大小/另一项 在这里就是1 表示把pool展开成一行
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.keep_prob)
# 全连接层 Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, self.config.num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[self.config.num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + self.config.l2_reg_lambda * l2_loss
self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.acc = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def rnn(self):
"""rnn模型"""
def lstm_cell(): # lstm核
return tf.contrib.rnn.BasicLSTMCell(self.config.hidden_dim, state_is_tuple=True)
def gru_cell(): # gru核
return tf.contrib.rnn.GRUCell(self.config.hidden_dim)
def dropout(): # 为每一个rnn核后面加一个dropout层
if (self.config.rnn == 'lstm'):
cell = lstm_cell()
else:
cell = gru_cell()
return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
# 词向量映射
with tf.device('/cpu:0'):
embedding = tf.get_variable('embedding', [self.config.vocab_size, self.config.embedding_dim])
embedding_inputs = tf.nn.embedding_lookup(embedding, self.input_x)
with tf.name_scope("rnn"):
# 多层rnn网络
cells = [dropout() for _ in range(self.config.num_layers)]
rnn_cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
_outputs, _ = tf.nn.dynamic_rnn(cell=rnn_cell, inputs=embedding_inputs, dtype=tf.float32)
last = _outputs[:, -1, :] # 取最后一个时序输出作为结果
with tf.name_scope("score"):
# 全连接层,后面接dropout以及relu激活
fc = tf.layers.dense(last, self.config.hidden_dim, name='fc1')
fc = tf.contrib.layers.dropout(fc, self.keep_prob)
fc = tf.nn.relu(fc)
# 分类器
self.logits = tf.layers.dense(fc, self.config.num_classes, name='fc2')
self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1) # 预测类别
with tf.name_scope("optimize"):
# 损失函数,交叉熵
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(cross_entropy)
# 优化器
self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
with tf.name_scope("accuracy"):
# 准确率
correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)
self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def bigru_att():
#todo
pass
| 40.127907 | 108 | 0.592872 | 891 | 6,902 | 4.399551 | 0.278339 | 0.066327 | 0.028061 | 0.034439 | 0.243367 | 0.192857 | 0.163265 | 0.152551 | 0.101531 | 0.061224 | 0 | 0.021721 | 0.286294 | 6,902 | 171 | 109 | 40.362573 | 0.774056 | 0.108664 | 0 | 0.052632 | 0 | 0 | 0.034653 | 0 | 0 | 0 | 0 | 0.005848 | 0 | 1 | 0.070175 | false | 0.008772 | 0.017544 | 0.017544 | 0.280702 | 0.008772 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d727fadefe3f72c82fdb72c345357c380c9185c4 | 3,702 | py | Python | force_bdss/io/tests/test_workflow_writer.py | scottwedge/force-bdss | c3fdf6a190fa448dbaecedcb81aaba504563dee3 | [
"BSD-2-Clause"
] | 2 | 2019-08-19T16:02:40.000Z | 2020-10-01T11:38:00.000Z | force_bdss/io/tests/test_workflow_writer.py | scottwedge/force-bdss | c3fdf6a190fa448dbaecedcb81aaba504563dee3 | [
"BSD-2-Clause"
] | 339 | 2017-07-06T14:35:40.000Z | 2021-05-04T14:18:11.000Z | force_bdss/io/tests/test_workflow_writer.py | scottwedge/force-bdss | c3fdf6a190fa448dbaecedcb81aaba504563dee3 | [
"BSD-2-Clause"
] | 2 | 2019-03-05T14:17:25.000Z | 2020-06-26T01:48:07.000Z | # (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import json
import unittest
import tempfile
from force_bdss.core.execution_layer import ExecutionLayer
from force_bdss.core.kpi_specification import KPISpecification
from force_bdss.io.workflow_reader import WorkflowReader
from force_bdss.tests.dummy_classes.factory_registry import (
DummyFactoryRegistry,
)
from force_bdss.io.workflow_writer import (
WorkflowWriter,
)
from force_bdss.core.workflow import Workflow
from force_bdss.core.input_slot_info import InputSlotInfo
class TestWorkflowWriter(unittest.TestCase):
def setUp(self):
self.registry = DummyFactoryRegistry()
self.mco_factory = self.registry.mco_factories[0]
self.mco_parameter_factory = self.mco_factory.parameter_factories[0]
self.data_source_factory = self.registry.data_source_factories[0]
def sample_workflow(self):
wf = Workflow()
wf.mco_model = self.mco_factory.create_model()
wf.mco_model.parameters = [self.mco_parameter_factory.create_model()]
wf.mco_model.kpis = [KPISpecification()]
wf.execution_layers = [
ExecutionLayer(
data_sources=[
self.data_source_factory.create_model(),
self.data_source_factory.create_model(),
]
),
ExecutionLayer(
data_sources=[self.data_source_factory.create_model()]
),
]
return wf
def test_write(self):
wfwriter = WorkflowWriter()
workflow = self.sample_workflow()
tmp_file = tempfile.NamedTemporaryFile()
filename = tmp_file.name
wfwriter.write(workflow, filename)
with open(filename) as f:
result = json.load(f)
self.assertIn("version", result)
self.assertIn("workflow", result)
self.assertIn("mco_model", result["workflow"])
self.assertIn("execution_layers", result["workflow"])
def test_write_and_read(self):
wfwriter = WorkflowWriter()
workflow = self.sample_workflow()
tmp_file = tempfile.NamedTemporaryFile()
filename = tmp_file.name
wfwriter.write(workflow, filename)
wfreader = WorkflowReader(self.registry)
wf_result = wfreader.read(filename)
self.assertEqual(
wf_result.mco_model.factory.id, workflow.mco_model.factory.id
)
self.assertEqual(len(wf_result.execution_layers), 2)
self.assertEqual(len(wf_result.execution_layers[0].data_sources), 2)
self.assertEqual(len(wf_result.execution_layers[1].data_sources), 1)
def test_get_workflow_data(self):
wfwriter = WorkflowWriter()
workflow = Workflow()
self.assertDictEqual(
wfwriter.get_workflow_data(workflow),
workflow.__getstate__(),
)
def test_write_and_read_empty_workflow(self):
workflow = Workflow()
wfwriter = WorkflowWriter()
tmp_file = tempfile.NamedTemporaryFile()
filename = tmp_file.name
wfwriter.write(workflow, filename)
wfreader = WorkflowReader(self.registry)
wf_result = wfreader.read(filename)
self.assertIsNone(wf_result.mco_model)
def test_traits_to_dict(self):
wf = self.sample_workflow()
exec_layer = wf.execution_layers[0]
exec_layer.data_sources[0].input_slot_info = [InputSlotInfo()]
datastore_list = exec_layer.__getstate__()
new_slotdata = datastore_list["data_sources"][0]["model_data"][
"input_slot_info"
]
self.assertNotIn("__traits_version__", new_slotdata)
| 33.963303 | 77 | 0.670448 | 404 | 3,702 | 5.85396 | 0.247525 | 0.026638 | 0.038478 | 0.028753 | 0.376321 | 0.340803 | 0.303594 | 0.286258 | 0.25074 | 0.202537 | 0 | 0.006738 | 0.23825 | 3,702 | 108 | 78 | 34.277778 | 0.831915 | 0.019719 | 0 | 0.306818 | 0 | 0 | 0.030621 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.079545 | false | 0 | 0.113636 | 0 | 0.215909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d728bc5cc637050e80fe9e969a199615b6644704 | 2,418 | py | Python | Utils/ActionSelection.py | DavidLSmyth/DroneCoordinatedSearch | 99173ef63c726049596fb79eda168b4fc3a550a8 | [
"MIT"
] | 1 | 2018-12-26T04:13:06.000Z | 2018-12-26T04:13:06.000Z | Utils/ActionSelection.py | DavidLSmyth/DroneCoordinatedSearch | 99173ef63c726049596fb79eda168b4fc3a550a8 | [
"MIT"
] | null | null | null | Utils/ActionSelection.py | DavidLSmyth/DroneCoordinatedSearch | 99173ef63c726049596fb79eda168b4fc3a550a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 11:55:04 2018
@author: 13383861
"""
import random
from Utils.AgentObservation import AgentObservation
from Utils.ObservationSetManager import ObservationSetManager
from Utils.UE4Coord import UE4Coord
from Utils.UE4Grid import UE4Grid
from Utils.BeliefMap import create_belief_map, BeliefMap
def get_move_from_belief_map_epsilon_greedy(belief_map: BeliefMap, current_grid_loc: UE4Coord, epsilon: float, eff_radius = None) -> UE4Coord:
'''Epsilon greedy move selection based on neighbors in belief map'''
#assume grid is regular, get all neighbors that are within max(lat_spacing, long_spacing)7
#assuming that lat_spacing < 2* lng_spacing and visa versa
if not eff_radius:
eff_radius = max(belief_map.get_grid().get_lat_spacing(), belief_map.get_grid().get_lng_spacing())
#a list of UE4Coord
neighbors = belief_map.get_grid().get_neighbors(current_grid_loc, eff_radius)
#don't move to new position if can't find any neighbors to move to
if not neighbors:
return current_grid_loc
#neighbors = list(filter(lambda grid_loc: grid_loc.get_dist_to_other(current_grid_loc) <= eff_radius and grid_loc!=current_grid_loc, bel_map.keys()))
if random.random() < epsilon:
#epsilon random
return_move = random.choice(neighbors)
else:
#otherwise choose move that has highest value
max_move_value = 0
for neighbor in neighbors:
if belief_map.get_belief_map_component(neighbor).likelihood > max_move_value:
max_move_value = belief_map.get_belief_map_component(neighbor).likelihood
return_move = neighbor
# move = max(map(lambda neighbor: bel_map[neighbor].likelihood, neighbors))
return return_move
if __name__ == "__main__":
test_grid = UE4Grid(1, 1, UE4Coord(0,0), 6, 5)
obs1 = AgentObservation(UE4Coord(0,0),0.5, 1, 1234, 'agent2')
obs2 = AgentObservation(UE4Coord(0,0),0.7, 2, 1235, 'agent2')
obs3 = AgentObservation(UE4Coord(0,1),0.95, 3, 1237, 'agent2')
#(grid, agent_name, prior = {})
obs_man = ObservationSetManager("agent1")
obs_man.update_rav_obs_set('agent2', [obs1, obs2, obs3])
belief_map = obs_man.get_discrete_belief_map_from_observations(test_grid)
assert get_move_from_belief_map_epsilon_greedy(belief_map, UE4Coord(1,1), 0.0, 1.8) == UE4Coord(0,1) | 46.5 | 153 | 0.722498 | 345 | 2,418 | 4.791304 | 0.342029 | 0.08167 | 0.042347 | 0.029038 | 0.203872 | 0.108893 | 0.108893 | 0.108893 | 0.050817 | 0 | 0 | 0.045983 | 0.181555 | 2,418 | 52 | 154 | 46.5 | 0.789288 | 0.283706 | 0 | 0 | 0 | 0 | 0.022209 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.033333 | false | 0 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d72b88c082f21e21154cc27967716ac7170b498f | 3,403 | py | Python | MSData/AutomateAllRows.py | ANURAGWARRING/HyasynthBioAllCode | 60afd8c7609f9cf9982cc8a4ee85e42c8f48cd05 | [
"MIT"
] | null | null | null | MSData/AutomateAllRows.py | ANURAGWARRING/HyasynthBioAllCode | 60afd8c7609f9cf9982cc8a4ee85e42c8f48cd05 | [
"MIT"
] | null | null | null | MSData/AutomateAllRows.py | ANURAGWARRING/HyasynthBioAllCode | 60afd8c7609f9cf9982cc8a4ee85e42c8f48cd05 | [
"MIT"
] | null | null | null | import openpyxl
import math
def ALLCal(ReadCol,WriteColAvg,WriteColSd):
path = "Example for data processing (3).xlsx"
wb_obj = openpyxl.load_workbook(path)
sheet_obj = wb_obj.active
count = 0
NumOfSamples = sheet_obj.cell(row=1, column=5).value
IntNumOfSamples = int(NumOfSamples)
print("Total number of samples:", end='')
print(IntNumOfSamples)
SumOfSamples = []
AvgOfSamplesR = []
OrganSamples = []
NewOrganSamples = []
Sd = []
Dilution = sheet_obj.cell(row=3, column=5).value
NumberOfReplicates = sheet_obj.cell(row=2, column=5).value
IntNumberOfReplicates = int(NumberOfReplicates)
print("Total number of Replicates:", end='')
print(IntNumberOfReplicates)
for i in range(1, IntNumOfSamples + 1):
SumOfSamples.append(0)
print(SumOfSamples)
m_row = sheet_obj.max_row
for i in range(10, m_row + 1):
cell_obj = sheet_obj.cell(row=i, column=4)
count = count + 1
if cell_obj.value == None:
break
print("Total Occupied Rows:", end='')
print(count)
for j in range(0, count - 1):
cell_obj = sheet_obj.cell(row=10 + j, column=ReadCol)
OrganSamples.append(cell_obj.value)
print(NewOrganSamples)
for j in range(0, IntNumOfSamples):
for i in range(0, count - 1):
if ((IntNumOfSamples * i + j) < (count - 1)):
NewOrganSamples.append(OrganSamples[IntNumOfSamples * i + j])
for j in range(0, count - 1):
if (NewOrganSamples[j] == None):
NewOrganSamples[j] = 0
print("Total number of NewOrganSamples:", end='')
print(NewOrganSamples)
SplitNewOrganOfSamples = [NewOrganSamples[i:i + IntNumberOfReplicates] for i in
range(0, len(NewOrganSamples), IntNumberOfReplicates)]
print("Total number of SplitNewOrganOfSamples:", end='')
print(SplitNewOrganOfSamples)
for row in SplitNewOrganOfSamples:
NoneCount = 0
k = 0
SumOfSamples[k] = 0
for i in row:
if (i == 0):
NoneCount = NoneCount + 1
SumOfSamples[k] = i + SumOfSamples[k]
print("SumOfSamples:", end='')
print(SumOfSamples[k])
print("NoneCount:", end='')
print(NoneCount)
if ((IntNumberOfReplicates - NoneCount) == 0):
AvgOfSamplesR.append(0)
else:
AvgOfSamplesR.append((SumOfSamples[k]) / (IntNumberOfReplicates - NoneCount))
print(AvgOfSamplesR)
for i in range(0, len(AvgOfSamplesR)):
cell_obj = sheet_obj.cell(row=i + 10, column=WriteColAvg)
cell_obj.value = AvgOfSamplesR[i]*Dilution
#standard Deviation
for i in range(0,len(SplitNewOrganOfSamples)):
print(SplitNewOrganOfSamples[i])
Sum1 = 0
for j in SplitNewOrganOfSamples[i]:
print(j)
Sum1 = (AvgOfSamplesR[i]-j)**2+Sum1
Sd.append(math.sqrt(Sum1/3))
print(Sd)
for i in range(0, len(AvgOfSamplesR)):
cell_obj = sheet_obj.cell(row=i + 10, column=WriteColSd)
cell_obj.value = AvgOfSamplesR[i]*Dilution
wb_obj.save(path)
ALLCal(5,15,25)
ALLCal(6,16,26)
ALLCal(7,17,27)
ALLCal(8,18,28)
ALLCal(9,19,29)
ALLCal(10,20,30)
ALLCal(11,21,31)
ALLCal(12,22,32)
| 32.721154 | 90 | 0.60241 | 396 | 3,403 | 5.116162 | 0.237374 | 0.034551 | 0.023692 | 0.051826 | 0.19003 | 0.155972 | 0.098717 | 0.058243 | 0.058243 | 0.058243 | 0 | 0.037566 | 0.280341 | 3,403 | 103 | 91 | 33.038835 | 0.78971 | 0.005289 | 0 | 0.089888 | 0 | 0 | 0.06128 | 0.007012 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011236 | false | 0 | 0.022472 | 0 | 0.033708 | 0.224719 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d72be399f483f65bbbe646bff25b79d80c7925af | 7,717 | py | Python | bugsnag/client.py | ForroKulcs/bugsnag-python | 107c1add31a2202cc08ef944aa00ab96996b247a | [
"MIT"
] | null | null | null | bugsnag/client.py | ForroKulcs/bugsnag-python | 107c1add31a2202cc08ef944aa00ab96996b247a | [
"MIT"
] | null | null | null | bugsnag/client.py | ForroKulcs/bugsnag-python | 107c1add31a2202cc08ef944aa00ab96996b247a | [
"MIT"
] | null | null | null | import sys
import threading
from functools import wraps
from typing import Union, Tuple, Callable, Optional, List, Type
from bugsnag.configuration import Configuration, RequestConfiguration
from bugsnag.event import Event
from bugsnag.handlers import BugsnagHandler
from bugsnag.sessiontracker import SessionTracker
import bugsnag
__all__ = ('Client',)
class Client:
"""
A Bugsnag monitoring and reporting client.
>>> client = Client(api_key='...') # doctest: +SKIP
"""
def __init__(self, configuration: Optional[Configuration] = None,
install_sys_hook=True, **kwargs):
self.configuration = configuration or Configuration() # type: Configuration # noqa: E501
self.session_tracker = SessionTracker(self.configuration)
self.configuration.configure(**kwargs)
if install_sys_hook:
self.install_sys_hook()
def capture(self,
exceptions: Union[Tuple[Type, ...], Callable, None] = None,
**options):
"""
Run a block of code within the clients context.
Any exception raised will be reported to bugsnag.
>>> with client.capture(): # doctest: +SKIP
... raise Exception('an exception passed to bugsnag then reraised')
The context can optionally include specific types to capture.
>>> with client.capture((TypeError,)): # doctest: +SKIP
... raise Exception('an exception which does get captured')
Alternately, functions can be decorated to capture any
exceptions thrown during execution and reraised.
>>> @client.capture # doctest: +SKIP
... def foo():
... raise Exception('an exception passed to bugsnag then reraised')
The decoration can optionally include specific types to capture.
>>> @client.capture((TypeError,)) # doctest: +SKIP
... def foo():
... raise Exception('an exception which does not get captured')
"""
if callable(exceptions):
return ClientContext(self, (Exception,))(exceptions)
return ClientContext(self, exceptions, **options)
def notify(self, exception: BaseException, asynchronous=None, **options):
"""
Notify bugsnag of an exception.
>>> client.notify(Exception('Example')) # doctest: +SKIP
"""
event = Event(exception, self.configuration,
RequestConfiguration.get_instance(), **options)
self.deliver(event, asynchronous=asynchronous)
def notify_exc_info(self, exc_type, exc_value, traceback,
asynchronous=None, **options):
"""
Notify bugsnag of an exception via exc_info.
>>> client.notify_exc_info(*sys.exc_info()) # doctest: +SKIP
"""
exception = exc_value
options['traceback'] = traceback
event = Event(exception, self.configuration,
RequestConfiguration.get_instance(), **options)
self.deliver(event, asynchronous=asynchronous)
def excepthook(self, exc_type, exc_value, traceback):
if self.configuration.auto_notify:
self.notify_exc_info(
exc_type, exc_value, traceback,
severity='error',
unhandled=True,
severity_reason={
'type': 'unhandledException'
})
def install_sys_hook(self):
self.sys_excepthook = sys.excepthook
def excepthook(*exc_info):
self.excepthook(*exc_info)
if self.sys_excepthook:
self.sys_excepthook(*exc_info)
sys.excepthook = excepthook
sys.excepthook.bugsnag_client = self
if hasattr(threading, 'excepthook'):
self.threading_excepthook = threading.excepthook
def threadhook(args):
self.excepthook(args[0], args[1], args[2])
if self.threading_excepthook:
self.threading_excepthook(args)
threading.excepthook = threadhook
threading.excepthook.bugsnag_client = self
def uninstall_sys_hook(self):
client = getattr(sys.excepthook, 'bugsnag_client', None)
if client is self:
sys.excepthook = self.sys_excepthook
self.sys_excepthook = None
if hasattr(threading, 'excepthook'):
client = getattr(threading.excepthook, 'bugsnag_client', None)
if client is self:
threading.excepthook = self.threading_excepthook
self.threading_excepthook = None
def deliver(self, event: Event,
asynchronous: Optional[bool] = None):
"""
Deliver the exception event to Bugsnag.
"""
if not self.should_deliver(event):
return
def run_middleware():
initial_severity = event.severity
initial_reason = event.severity_reason.copy()
def send_payload():
if asynchronous is None:
options = {}
else:
options = {'asynchronous': asynchronous}
if event.api_key is None:
bugsnag.logger.warning(
"No API key configured, couldn't notify")
return
if initial_severity != event.severity:
event.severity_reason = {
'type': 'userCallbackSetSeverity'
}
else:
event.severity_reason = initial_reason
payload = event._payload()
try:
self.configuration.delivery.deliver(self.configuration,
payload, options)
except Exception as e:
bugsnag.logger.exception('Notifying Bugsnag failed %s', e)
# Trigger session delivery
self.session_tracker.send_sessions()
self.configuration.middleware.run(event, send_payload)
self.configuration.internal_middleware.run(event, run_middleware)
def should_deliver(self, event: Event) -> bool:
# Return early if we shouldn't notify for current release stage
if not self.configuration.should_notify():
return False
# Return early if we should ignore exceptions of this type
if self.configuration.should_ignore(event.exception):
return False
return True
def log_handler(self, extra_fields: List[str] = None) -> BugsnagHandler:
return BugsnagHandler(client=self, extra_fields=extra_fields)
class ClientContext:
def __init__(self, client,
exception_types: Optional[Tuple[Type, ...]] = None,
**options):
self.client = client
self.options = options
if 'severity' in options:
options['severity_reason'] = dict(type='userContextSetSeverity')
self.exception_types = exception_types or (Exception,)
def __call__(self, function: Callable):
@wraps(function)
def decorate(*args, **kwargs):
try:
return function(*args, **kwargs)
except self.exception_types as e:
self.client.notify(e, source_func=function, **self.options)
raise
return decorate
def __enter__(self):
pass
def __exit__(self, *exc_info):
if any(exc_info):
if any(isinstance(exc_info[1], e) for e in self.exception_types):
self.client.notify_exc_info(*exc_info, **self.options)
return False
| 33.995595 | 98 | 0.594661 | 759 | 7,717 | 5.902503 | 0.225296 | 0.04933 | 0.022768 | 0.022321 | 0.243304 | 0.21875 | 0.148884 | 0.130134 | 0.076786 | 0.076786 | 0 | 0.001328 | 0.316963 | 7,717 | 226 | 99 | 34.146018 | 0.848606 | 0.17455 | 0 | 0.156716 | 0 | 0 | 0.039065 | 0.007355 | 0 | 0 | 0 | 0 | 0 | 1 | 0.141791 | false | 0.007463 | 0.067164 | 0.007463 | 0.30597 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d732c40de10d58d09ce7d71edeec18d0329d1895 | 499 | py | Python | gyoiboard/urls.py | gyoisamurai/GyoiBoard | aebd29820a39d1d88b9e5874b56f923b1c88d170 | [
"MIT"
] | 3 | 2021-07-03T12:41:39.000Z | 2021-11-18T17:23:08.000Z | gyoiboard/urls.py | gyoisamurai/GyoiBoard | aebd29820a39d1d88b9e5874b56f923b1c88d170 | [
"MIT"
] | null | null | null | gyoiboard/urls.py | gyoisamurai/GyoiBoard | aebd29820a39d1d88b9e5874b56f923b1c88d170 | [
"MIT"
] | 3 | 2021-06-12T13:25:48.000Z | 2021-09-30T19:06:49.000Z | from django.contrib import admin
from django.urls import path
from django.conf.urls import include, url
urlpatterns = [
# Administrator.
path('admin/', admin.site.urls),
# Applications.
path('atd/', include('atd.urls')),
path('gyoithon/', include('gyoithon.urls')),
# Authentication.
path('api-auth/', include('rest_framework.urls')),
url(r'^rest-auth/', include('rest_auth.urls')),
url(r'^rest-auth/registration/', include('rest_auth.registration.urls')),
]
| 26.263158 | 77 | 0.669339 | 61 | 499 | 5.42623 | 0.377049 | 0.096677 | 0.090634 | 0.072508 | 0.096677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150301 | 499 | 18 | 78 | 27.722222 | 0.78066 | 0.088176 | 0 | 0 | 0 | 0 | 0.31929 | 0.113082 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.272727 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d739fb2d6c6c68935b4d4618363aa26d4fee994e | 2,447 | py | Python | project/base/base_model.py | mvshalamov/tornado_tpl | 62795706b6c1064748fda4547225644d9acee9ac | [
"MIT"
] | null | null | null | project/base/base_model.py | mvshalamov/tornado_tpl | 62795706b6c1064748fda4547225644d9acee9ac | [
"MIT"
] | null | null | null | project/base/base_model.py | mvshalamov/tornado_tpl | 62795706b6c1064748fda4547225644d9acee9ac | [
"MIT"
] | null | null | null | import inspect
from .exceptions import ModelException
from .descriptors import BaseDescriptor
class ModelMeta(type):
def __new__(mcs, name, bases, dct):
fields = {
attr_name: {
"name_table_column": val.column_name, "value": None
} for (attr_name, val) in dct.items() if isinstance(val, BaseDescriptor)
}
dct['fields'] = fields
return type.__new__(mcs, name, bases, dct)
class Model(metaclass=ModelMeta):
@classmethod
def init_by_data(cls, init_data):
attrs = [attr for attr in dir(cls) if not inspect.ismethod(attr)]
obj = cls()
for key, value in init_data.items():
key = key.replace('.', '_')
if key not in attrs:
raise ModelException('Attribute - %s, not find in model' % key)
setattr(obj, key, value)
obj.initial_by_values()
return obj
def initial_by_values(self):
"""
вызываем при изменение атрибутов модели
:return: None
"""
for key in self.fields:
self.fields[key]['value'] = getattr(self, key)
def list_keys_and_values(self):
"""
:return:
"""
return [(v['name_table_column'] if v['name_table_column'] else k, v['value']) for k, v in self.fields.items()]
async def save(self, db, table_name):
self.initial_by_values()
values = self.list_keys_and_values()
sql_data = """
INSERT INTO {table_name} (
{columns_names}
) VALUES (
{variables}
);
""".format(
variables=','.join('%s' for v in values), table_name=table_name, columns_names=','.join(v[0] for v in values)
)
conn = await db.getconn()
with db.manage(conn):
sql_data = conn.mogrify(
sql_data, [v[1] for v in values]
)
res = await conn.execute(
sql_data
)
return res
@classmethod
async def all(cls, db, table_name):
sql_data = """
select * from {table_name};
""".format(
table_name=table_name,
)
conn = await db.getconn()
with db.manage(conn):
sql_data = conn.mogrify(sql_data)
res = await conn.execute(
sql_data
)
return res.fetchall()
| 27.806818 | 121 | 0.532897 | 281 | 2,447 | 4.455516 | 0.309609 | 0.057508 | 0.035942 | 0.028754 | 0.178914 | 0.15016 | 0.15016 | 0.15016 | 0.094249 | 0.094249 | 0 | 0.001271 | 0.356763 | 2,447 | 87 | 122 | 28.126437 | 0.794155 | 0.025337 | 0 | 0.21875 | 0 | 0 | 0.129614 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.046875 | 0 | 0.21875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d73c7b939f6234eb8ab0cc3a2f7c857f34ea23cf | 2,040 | py | Python | tif_plot/tif_plot.py | maak-sdu/pyScattData | c42af0ceb625a4ac74158cfa442b92215bd570ff | [
"BSD-3-Clause"
] | null | null | null | tif_plot/tif_plot.py | maak-sdu/pyScattData | c42af0ceb625a4ac74158cfa442b92215bd570ff | [
"BSD-3-Clause"
] | 4 | 2022-01-27T12:57:15.000Z | 2022-03-03T10:53:48.000Z | tif_plot/example/tif_plot.py | maak-sdu/pyScattData | c42af0ceb625a4ac74158cfa442b92215bd570ff | [
"BSD-3-Clause"
] | 1 | 2022-01-26T09:55:16.000Z | 2022-01-26T09:55:16.000Z | import sys
from pathlib import Path
import matplotlib.pyplot as plt
import fabio
DPI = 600
# FIGSIZE = (6,4)
CMAP = "viridis"
def tif_plot(tif_files, output_folders):
for f in tif_files:
tif = fabio.open(f).data
fig, ax = plt.subplots(dpi=DPI)#, figsize=FIGSIZE)
im = ax.imshow(tif, cmap=CMAP, aspect="equal")
ax.tick_params(axis="x",
top=True,
bottom=False,
labeltop=True,
labelbottom=False)
ax.tick_params(axis="y",
left=True,
right=False,
labelleft=True,
labelright=False)
cbar = fig.colorbar(im, ax=ax)
cbar.formatter.set_powerlimits((0,0))
for e in output_folders:
plt.savefig(f"{e}/{f.stem}.{e}", bbox_inches="tight")
plt.close()
return None
def main():
tif_path = Path.cwd() / "tif"
if not tif_path.exists():
tif_path.mkdir()
print(f"{80*'-'}\nA folder called '{tif_path.name}' has been created.\n"
f"Please place your .tif files there and rerun the program."
f"\n{80*'-'}")
sys.exit()
tif_files = list(tif_path.glob("*.tif"))
if len(tif_files) == 0:
print(f"{80*'-'}\nNo .tif files were found in the '{tif_path.name}' "
f"folder.\nPlease place your .{tif_path.name} files there and "
f"rerun the program.\n{80*'-'}")
sys.exit()
output_folders = ["png",
# "pdf",
# "svg",
]
for e in output_folders:
if not (Path.cwd() / e).exists():
(Path.cwd() / e).mkdir()
print(f"{80*'-'}\nPlotting .tif files...")
tif_plot(tif_files, output_folders)
print(f"{80*'-'}\nDone plotting.\n{80*'-'}\nPlease see the "
f"{output_folders} folder(s).\n{80*'-'}")
return None
if __name__ == "__main__":
main()
# End of file.
| 29.142857 | 80 | 0.509314 | 256 | 2,040 | 3.933594 | 0.421875 | 0.063555 | 0.031778 | 0.029791 | 0.093347 | 0.055611 | 0 | 0 | 0 | 0 | 0 | 0.017897 | 0.342647 | 2,040 | 69 | 81 | 29.565217 | 0.733035 | 0.029412 | 0 | 0.113208 | 0 | 0 | 0.228977 | 0.013678 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037736 | false | 0 | 0.075472 | 0 | 0.150943 | 0.075472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d73d96d006dd6295167a81a32ad34a88d09d7b56 | 1,820 | py | Python | tests/integration/src/case/conf_replacer.py | divenswu/proximabilin | 9eb7db8f215e4ac5f43023c725fbc4997c2ccaee | [
"Apache-2.0"
] | 103 | 2021-09-30T03:54:41.000Z | 2022-03-30T09:05:11.000Z | tests/integration/src/case/conf_replacer.py | divenswu/proximabilin | 9eb7db8f215e4ac5f43023c725fbc4997c2ccaee | [
"Apache-2.0"
] | 10 | 2021-11-02T02:31:12.000Z | 2022-03-24T07:56:21.000Z | tests/integration/src/case/conf_replacer.py | divenswu/proximabilin | 9eb7db8f215e4ac5f43023c725fbc4997c2ccaee | [
"Apache-2.0"
] | 21 | 2021-10-18T04:35:48.000Z | 2022-03-29T08:04:38.000Z | # Copyright 2021 Alibaba, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, time, os, json, configparser
import shutil
class ConfReplacer:
def __init__(self, conf_path):
self.conf_path = conf_path
self.global_conf = os.path.join(conf_path, 'global.conf')
self.cf = configparser.ConfigParser()
def init(self):
arr = self.cf.read(self.global_conf)
if len(arr) == 0:
print (self.global_conf)
return False
self.items = self.cf.items("common")
return True
def replace(self):
files = os.listdir(self.conf_path)
for f in files:
if f.endswith('.tpl'):
src_name = os.path.join(self.conf_path, f)
dst_name = src_name[0:-4]
shutil.copyfile(src_name, dst_name)
for item in self.items:
cmd = "sed -i 's#${%s}#%s#g' %s" % (item[0], item[1], dst_name)
ret = os.system(cmd)
# print (cmd)
if ret != 0:
print (cmd)
return False
return True
if __name__ == '__main__':
if len(sys.argv) != 2:
print ('usage: ./conf_replacer.py conf_directory')
sys.exit(-1)
replacer = ConfReplacer(sys.argv[1])
ret = replacer.init()
if not ret:
sys.exit(-1)
if not replacer.replace():
sys.exit(-1)
sys.exit(0)
| 30.847458 | 74 | 0.654396 | 272 | 1,820 | 4.272059 | 0.448529 | 0.051635 | 0.041308 | 0.027539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014265 | 0.22967 | 1,820 | 58 | 75 | 31.37931 | 0.814551 | 0.337363 | 0 | 0.179487 | 0 | 0 | 0.078086 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.051282 | 0 | 0.25641 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d73e9784ceb4dda4adde1e781728709480d12cec | 2,442 | py | Python | src/summe_eval_human_summary.py | mayu-ot/rethinking-evs | 7a3b05e63ba531c89ac022e3a98bfd37d22d60a2 | [
"MIT"
] | 59 | 2019-04-27T02:36:58.000Z | 2022-03-31T06:14:08.000Z | src/summe_eval_human_summary.py | ledduy610/rethinking-evs | 022e76005ca87bd2d01cd6d05e4ca3356ada179d | [
"MIT"
] | 5 | 2019-06-27T08:23:14.000Z | 2021-09-16T11:36:30.000Z | src/summe_eval_human_summary.py | ledduy610/rethinking-evs | 022e76005ca87bd2d01cd6d05e4ca3356ada179d | [
"MIT"
] | 8 | 2019-05-24T06:52:43.000Z | 2022-01-29T04:17:49.000Z | from tools.summarizer import summarize
from tools.io import load_summe_mat
from tools.segmentation import get_segment_summe
from joblib import Parallel, delayed
import pandas as pd
from sklearn.metrics import f1_score
import numpy as np
def get_summe_gssummary():
summe_data = load_summe_mat('data/raw/summe/GT/')
gold_standard = []
for item in summe_data:
user_anno = item['user_anno']
user_anno = user_anno.T
user_anno = user_anno.astype(np.bool)
gold_standard.append(
{
'gs_summary': user_anno,
'video': item['video']
}
)
return gold_standard
def summe_human_score(metric='mean'):
gs_summary = get_summe_gssummary()
num_videos = len(gs_summary)
acc_mean = 0
acc_min = 0
acc_max = 0
ds = []
for item in gs_summary:
gs_sum = item['gs_summary']
N = len(gs_sum)
gs_results = np.zeros((N,))
for i in range(N):
res = [f1_score(gs_sum[x], gs_sum[i]) for x in range(N) if x != i]
if metric=='mean':
gs_results[i] = np.mean(res)
elif metric=='max':
gs_results[i] = np.max(res)
else:
raise RuntimeError
worst_human = gs_results.min()
avr_human = gs_results.mean()
best_human = gs_results.max()
acc_mean += avr_human
acc_min += worst_human
acc_max += best_human
ds.append({'video': item['video'],
'metric': metric,
'worst_human': worst_human,
'avg_human': avr_human,
'best_human': best_human})
return pd.DataFrame(ds)
def main():
df_mean = summe_human_score('mean')
print("""
Scores by taking the 'average' over all reference summaries
(Reproduction of human scores in the original SumMe paper)
""")
print(df_mean[['worst_human', 'avg_human', 'best_human']].mean(axis=0))
df_max = summe_human_score('max')
print("""
Scores by taking the 'maximum' over all reference summaries
""")
print(df_max[['worst_human', 'avg_human', 'best_human']].mean(axis=0))
df = pd.concat([df_mean, df_max])
df.to_csv('data/processed/summe_human_eval.csv')
if __name__=='__main__':
main() | 27.75 | 78 | 0.568796 | 313 | 2,442 | 4.159744 | 0.309904 | 0.043011 | 0.043011 | 0.036866 | 0.092166 | 0.058372 | 0.058372 | 0.058372 | 0.058372 | 0.058372 | 0 | 0.004232 | 0.322686 | 2,442 | 88 | 79 | 27.75 | 0.78295 | 0 | 0 | 0.058824 | 0 | 0 | 0.173966 | 0.014327 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044118 | false | 0 | 0.102941 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |