hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c714b1ddddc46eec5e3ba7e352040969745d64a | 4,072 | py | Python | Applications/digit_visualization.py | kevin-linps/Handwritten-digits-recognition | 631145b7dee6c57e6288249cdc4d5d14daa3b789 | [
"MIT"
] | null | null | null | Applications/digit_visualization.py | kevin-linps/Handwritten-digits-recognition | 631145b7dee6c57e6288249cdc4d5d14daa3b789 | [
"MIT"
] | null | null | null | Applications/digit_visualization.py | kevin-linps/Handwritten-digits-recognition | 631145b7dee6c57e6288249cdc4d5d14daa3b789 | [
"MIT"
] | null | null | null | from sklearn.datasets import load_digits
from joblib import load
import numpy as np
import tensorflow as tf
from random import randint
from tkinter import*
from tkinter.ttk import*
class DigitVisualizer(object):
def __init__(self):
self.root = Tk()
self.options = options = ["", "scikit-learn", "MNIST"]
self.database = StringVar(self.root)
self.database.set(self.options[0])
self.Fr = Frame(self.root)
self.L1 = Label(self.Fr, text="Data set:")
self.L2 = Label(self.Fr, text="Answer:", width=10)
self.L3 = Label(self.Fr, text="Model's Guess:", width=15)
self.Op = OptionMenu(self.Fr, self.database, *self.options)
self.B1 = Button(self.Fr, text="Choose an image", command=self.choose_image)
self.Ca = Canvas(self.root, width=450, height=450, bg="white")
self.pixels = []
self.setup()
def setup(self):
# Construct the screeen
self.root.resizable(width=False, height=False)
self.root.geometry("500x500")
self.root.title("Handwritten Digits Visualizer")
self.Op.config(width=10)
# Place the widgets on the screen
self.L1.pack(side=LEFT)
self.Op.pack(side=LEFT)
self.L2.pack(side=LEFT, padx=5)
self.L3.pack(side=LEFT, padx=5)
self.B1.pack(side=LEFT, padx=5)
self.Fr.pack(pady=3)
self.Ca.pack(pady=3)
self.root.mainloop()
def choose_image(self):
if self.database.get() == "scikit-learn":
n = randint(0, 1797)
digit = self.predict_digit(n)
self.L2.config(text = "Answer: " + str(sk_y[n]))
self.L3.config(text = "Model's Guess: " + str(digit))
self.paint_digit(n)
elif self.database.get() == "MNIST":
n = randint(0, 70000)
digit = self.predict_digit(n)
self.L2.config(text = "Answer: " + str(tf_y[n]))
self.L3.config(text = "Model's Guess: " + str(digit))
self.paint_digit(n)
def predict_digit(self, n):
if self.database.get() == "scikit-learn":
data = sk_x[n].flatten()
return sk_model.predict([data])[0]
elif self.database.get() == "MNIST":
data = tf_x[n].reshape(1, 28, 28, 1)/255
respond = tf_model.predict(data)
return np.argmax(respond)
def paint_digit(self, n):
for item in self.pixels:
self.Ca.delete(item)
self.pixels = []
if self.database.get() == "scikit-learn":
grid = sk_x[n]
L = 450/8
elif self.database.get() == "MNIST":
grid = tf_x[n]
L = 450/28
for h in range(len(grid)):
for w in range(len(grid[0])):
if grid[h][w] != 0:
colour = self.create_colour_code(grid[h][w])
rect = self.Ca.create_rectangle(w*L, h*L, (w+1)*L, (h+1)*L,
fill = colour, outline = colour)
self.pixels.append(rect)
def create_colour_code(self, number):
if self.database.get() == "scikit-learn":
number *= 16
H = hex(abs(255-number))[2:]
if (len(H) == 1):
H = "0" + H
return "#" + H * 3
if __name__ == "__main__":
# Load models externally so they are loaded when the screen is up
sk_model = load('svm_model.joblib')
tf_model = tf_model = tf.keras.models.load_model('CNN_model.h5')
# Load the digits database from scikit-learn
sk_digits = load_digits()
sk_x = sk_digits.data.reshape(1797, 8, 8).astype('int')
sk_y = sk_digits.target
# Load MNIST database using TensorFlow
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
tf_x = np.concatenate((X_train, X_test))
tf_y = np.concatenate((y_train, y_test))
DigitVisualizer() | 32.31746 | 85 | 0.547151 | 542 | 4,072 | 4.005535 | 0.282288 | 0.055274 | 0.048365 | 0.031322 | 0.204975 | 0.17181 | 0.091202 | 0.091202 | 0.091202 | 0.091202 | 0 | 0.030935 | 0.317289 | 4,072 | 126 | 86 | 32.31746 | 0.75 | 0.048379 | 0 | 0.166667 | 0 | 0 | 0.067557 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.077778 | 0 | 0.188889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c74bacb20ff5caf67e57505ff6e9b9042ab5e62 | 1,419 | py | Python | modules/nasa/nasa.py | BenjamooseCalto/SlasherBot | e43414db795df991ce0b3fa97eb71a6889a7bd1d | [
"MIT"
] | 1 | 2021-11-17T16:25:52.000Z | 2021-11-17T16:25:52.000Z | modules/nasa/nasa.py | BenjamooseCalto/SlasherBot | e43414db795df991ce0b3fa97eb71a6889a7bd1d | [
"MIT"
] | null | null | null | modules/nasa/nasa.py | BenjamooseCalto/SlasherBot | e43414db795df991ce0b3fa97eb71a6889a7bd1d | [
"MIT"
] | null | null | null | import requests
import json
import os
from dotenv import load_dotenv
from datetime import date
load_dotenv()
NASA_API_KEY = os.getenv("NASA_API_KEY")
DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(DIR, "data")
TODAY = date.today()
# astronomy picture of the day
class Apod:
def __init__(self):
self.nasa_url = "https://api.nasa.gov/planetary/apod"
self.data_file = os.path.join(DATA_DIR, "apod.json")
def __req(self):
apod = requests.get(f"{self.nasa_url}?api_key={NASA_API_KEY}").json()
with open(self.data_file, "w") as file:
json.dump(apod, file, indent=4)
self.__index(apod)
def __index(self, apod):
self.copyright = apod["copyright"] if "copyright" in apod else None
self.date = apod["date"]
self.explanation = apod["explanation"]
self.url = apod["hdurl"] if "hdurl" in apod else apod["url"]
self.media_type = apod["media_type"]
self.service_version = apod["service_version"]
self.title = apod["title"]
def get_apod(self):
with open(self.data_file, "r") as file:
data = json.load(file)
if data["date"] != TODAY:
self.__req()
return "retrieved new data"
else:
self.__index(data)
return "used existing data"
def main():
pass
if __name__ == "__main__":
pass
| 27.288462 | 77 | 0.606765 | 192 | 1,419 | 4.239583 | 0.34375 | 0.029484 | 0.036855 | 0.039312 | 0.04914 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000961 | 0.266385 | 1,419 | 51 | 78 | 27.823529 | 0.78098 | 0.019732 | 0 | 0.05 | 0 | 0 | 0.161267 | 0.027358 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.05 | 0.125 | 0 | 0.325 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c74f5f4eb195fd1ee610295ead2e9842469804d | 582 | py | Python | src/arg_parser.py | apj-graham/TextCensor | cb278639e37ec8735feacdad430ba8ee3f8d835d | [
"MIT"
] | null | null | null | src/arg_parser.py | apj-graham/TextCensor | cb278639e37ec8735feacdad430ba8ee3f8d835d | [
"MIT"
] | null | null | null | src/arg_parser.py | apj-graham/TextCensor | cb278639e37ec8735feacdad430ba8ee3f8d835d | [
"MIT"
] | null | null | null | """ Command line arg parser"""
import argparse
def parse_arguments():
"""Process command line arguments using Python standard argument parser
Returns:
Parsed args: list
List of argumments entered in the command line
"""
parser = argparse.ArgumentParser(description="Text Sensorer")
parser.add_argument(
"banned_words_file",
type=str,
help="Text file with a lost of banned words, one per line",
)
parser.add_argument("document", type=str, help="Text file with text to sensor")
return parser.parse_args()
| 29.1 | 83 | 0.67354 | 73 | 582 | 5.287671 | 0.589041 | 0.085492 | 0.088083 | 0.07772 | 0.119171 | 0.119171 | 0 | 0 | 0 | 0 | 0 | 0 | 0.238832 | 582 | 19 | 84 | 30.631579 | 0.871332 | 0.30756 | 0 | 0 | 0 | 0 | 0.315508 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c75b21afe74cd6ad541abc3a4cc3156f9fad434 | 1,646 | py | Python | climate.py | STRIDES-Codes/A-culturally-humble-and-affordable-digital-platform-to-empower-at-risk-families | 9174ab2a795c0bf4425af80afe7e63346519836c | [
"MIT"
] | 1 | 2021-06-24T17:26:11.000Z | 2021-06-24T17:26:11.000Z | climate.py | STRIDES-Codes/A-culturally-humble-and-affordable-digital-platform-to-empower-at-risk-families | 9174ab2a795c0bf4425af80afe7e63346519836c | [
"MIT"
] | 1 | 2021-06-22T18:53:32.000Z | 2021-06-22T18:53:32.000Z | climate.py | STRIDES-Codes/A-culturally-humble-and-affordable-digital-platform-to-empower-at-risk-families | 9174ab2a795c0bf4425af80afe7e63346519836c | [
"MIT"
] | 2 | 2021-06-22T18:11:22.000Z | 2021-06-22T18:55:34.000Z | import requests
import os
from datetime import datetime
#user_api = os.environ["70e13cd941f66d90de87185e335ed7d7"]
location = input("Enter the city name: ")
complete_api_link = "https://api.openweathermap.org/data/2.5/weather?q="+location+"&appid="+"70e13cd941f66d90de87185e335ed7d7"
api_link = requests.get(complete_api_link)
api_data = api_link.json()
#create variables to store and display data
temp_city = (((api_data['main']['temp']) - 273.15)*(9/5) +32)
weather_desc = api_data['weather'][0]['description']
hmdt = api_data['main']['humidity']
wind_spd = (api_data['wind']['speed']*1.609)
date_time = datetime.now().strftime("%d %b %Y | %I:%M:%S %p")
t_min = (((api_data['main']['temp_min']) - 273.15)*(9/5) +32)
t_max = (((api_data['main']['temp_max']) - 273.15)*(9/5) +32)
category = []
print ("-------------------------------------------------------------")
print ("Weather Stats for - {} || {}".format(location.upper(), date_time))
print ("-------------------------------------------------------------")
print ("Current temperature is: {:.2f} deg F".format(temp_city))
print ("Current weather desc :",weather_desc)
print ("Current Humidity :",hmdt, '%')
print ("Current wind speed :{:.2f} mph".format(wind_spd))
print ("Today's minimum temperature is {:.2f} deg F ".format(t_min))
print ("Today's maximum temperature is {:.2f} deg F ".format(t_max))
if temp_city > 75 and hmdt > 68:
category = 1
elif temp_city < 75 and hmdt < 68:
category = 2
elif temp_city > 75 and hmdt < 68:
category = 3
elif temp_city < 75 and hmdt > 68:
category = 4
else:
category = 5
print("Categorization #:",category)
| 34.291667 | 126 | 0.622722 | 227 | 1,646 | 4.374449 | 0.387665 | 0.049345 | 0.04431 | 0.052367 | 0.225579 | 0.198389 | 0.173212 | 0.093656 | 0 | 0 | 0 | 0.070728 | 0.132442 | 1,646 | 47 | 127 | 35.021277 | 0.62465 | 0.060146 | 0 | 0.057143 | 0 | 0 | 0.373947 | 0.099806 | 0.028571 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.085714 | 0 | 0.085714 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c7864b4d2e0901902c3b5d32f59abe8511bfc96 | 4,008 | py | Python | email_parser/model.py | KeepSafe/ks-email-parser | 10997e949fe07add28fed5644076174baf715f7a | [
"Apache-2.0"
] | 10 | 2015-09-05T09:04:23.000Z | 2021-09-14T03:13:19.000Z | email_parser/model.py | KeepSafe/ks-email-parser | 10997e949fe07add28fed5644076174baf715f7a | [
"Apache-2.0"
] | 34 | 2015-07-23T02:21:25.000Z | 2021-12-13T19:45:17.000Z | email_parser/model.py | KeepSafe/ks-email-parser | 10997e949fe07add28fed5644076174baf715f7a | [
"Apache-2.0"
] | 3 | 2015-09-05T09:04:25.000Z | 2018-10-16T21:03:43.000Z | from collections import namedtuple
from enum import Enum
import string
import json
class PlaceholderType(Enum):
attribute = 'attribute'
raw = 'raw'
text = 'text'
image = 'image'
bitmap = 'bitmap'
class EmailType(Enum):
marketing = 'marketing'
transactional = 'transactional'
Email = namedtuple('Email', ['name', 'locale', 'path'])
Template = namedtuple('Template', ['name', 'styles_names', 'styles', 'content', 'placeholders', 'type'])
class MetaPlaceholder:
def __init__(self, name, my_type=PlaceholderType.text, attributes=None):
self.name = name
self.type = my_type
self.attributes = attributes
def __getstate__(self):
state = dict(self.__dict__)
state['type'] = state['type'].value
return state
def __eq__(self, other):
try:
my_state = self.__getstate__()
other_state = other.__getstate__()
return my_state == other_state
except Exception:
return False
class Placeholder:
def __init__(self, name, content, is_global=False, p_type=PlaceholderType.text, variants=None, opt_attr=None):
self.name = name
self.is_global = is_global
self.type = p_type
self._content = content
self.variants = variants or {}
self._opt_attr = opt_attr
def __iter__(self):
attributes = dict(self.__dict__)
if self._opt_attr:
attributes.update(self._opt_attr)
for k, v in attributes.items():
if k is 'type':
yield k, self.type.value
elif k is '_content':
yield 'content', v
elif k is '_opt_attr':
continue
else:
yield k, v
def __eq__(self, other):
return dict(self) == dict(other)
def get_content(self, variant=None):
if variant:
return self.variants.get(variant, self._content)
else:
return self._content
def pick_variant(self, variant):
self._content = self.get_content(variant)
self.variants = {}
return self
class ModelJsonEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, MetaPlaceholder):
as_dict = o.__getstate__()
return as_dict
return super().default(o)
class BitmapPlaceholder(Placeholder):
def __init__(self, name, bitmap_id, src, alt=None, is_global=False, variants=None, **opt_attr):
self.name = name
self.id = bitmap_id
self.src = src
self.alt = alt
self.is_global = is_global
self.type = PlaceholderType.bitmap
self.variants = variants or {}
self._opt_attr = opt_attr
def get_content(self, variant=None):
wrapper = string.Template("""<div class="bitmap-wrapper" style="$style">\n\t\t$img\n\t</div>""")
img = string.Template("""<img id="$id" src="$src"$optional style="max-width: 100%;$img_style"/>""")
mapping = dict(self._opt_attr)
optional = ""
div_style = "vertical-align: middle;text-align: center;"
constraints = ""
if self.alt:
optional += " alt=\"{}\"".format(self.alt)
for style_tag in ['max-width', 'max-height']:
if style_tag in mapping:
constraints += "{}: {};".format(style_tag, mapping[style_tag])
mapping.update({
'style': div_style + constraints,
'id': self.id,
'optional': optional,
'src': self.src,
'img_style': constraints
})
mapping['img'] = img.substitute(mapping)
final_wrapper = wrapper.substitute(mapping)
return final_wrapper
def set_attr(self, attr):
self._opt_attr = attr
class MissingPatternParamError(Exception):
pass
class MissingSubjectError(Exception):
pass
class MissingTemplatePlaceholderError(Exception):
pass
class RenderingError(Exception):
pass
| 28.225352 | 114 | 0.598303 | 448 | 4,008 | 5.118304 | 0.236607 | 0.03358 | 0.028783 | 0.019625 | 0.123419 | 0.085041 | 0.061928 | 0.037505 | 0.037505 | 0.037505 | 0 | 0.00105 | 0.286926 | 4,008 | 141 | 115 | 28.425532 | 0.80126 | 0 | 0 | 0.171171 | 0 | 0.018018 | 0.098303 | 0.014222 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0.036036 | 0.036036 | 0.009009 | 0.387387 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c7f8b7cb506a5896f0b6519e5c5e507dc48f95c | 289 | py | Python | ffmpeg_sample/test03_pydub_mp4_to_mp3.py | amaraimusi/python_sample | 7d5524b3d42ecfeff89fbfb8fe2c2c61a18143ac | [
"MIT"
] | null | null | null | ffmpeg_sample/test03_pydub_mp4_to_mp3.py | amaraimusi/python_sample | 7d5524b3d42ecfeff89fbfb8fe2c2c61a18143ac | [
"MIT"
] | null | null | null | ffmpeg_sample/test03_pydub_mp4_to_mp3.py | amaraimusi/python_sample | 7d5524b3d42ecfeff89fbfb8fe2c2c61a18143ac | [
"MIT"
] | null | null | null | print ('pydubでmp4からmp3を抽出: pydubでmp4を読み込み,mp3としてエクスポートする')
# pydubを使うにはffmpegが必要-
from pydub import AudioSegment
# mp4ファイルの読み込み
audio = AudioSegment.from_file("./test_data/MVI_0887.MP4", "mp4")
# mp3としてエクスポートする
audio.export("./test_data/output03.mp3", format="mp3")
print('Success!')
| 20.642857 | 65 | 0.771626 | 31 | 289 | 7.064516 | 0.709677 | 0.073059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060606 | 0.086505 | 289 | 13 | 66 | 22.230769 | 0.768939 | 0.16609 | 0 | 0 | 0 | 0 | 0.466102 | 0.326271 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c7ff7c2f5ff5e4caa0296f3203ae31f6342578a | 502 | py | Python | app/views/__init__.py | healplease/dota_2_responses_bot | 866fd0f81fa462f6df7c71bcfbd6f5fd0eae654b | [
"MIT"
] | null | null | null | app/views/__init__.py | healplease/dota_2_responses_bot | 866fd0f81fa462f6df7c71bcfbd6f5fd0eae654b | [
"MIT"
] | null | null | null | app/views/__init__.py | healplease/dota_2_responses_bot | 866fd0f81fa462f6df7c71bcfbd6f5fd0eae654b | [
"MIT"
] | null | null | null | from flask import Blueprint, request, abort
import telebot
from app import bot
from config import Config
main_bp = Blueprint("bot", __name__)
@main_bp.route(f"/{Config.TELEGRAM_API_TOKEN}", methods=["POST"])
def webhook():
if request.headers.get('content-type') == 'application/json':
json_string = request.get_data(as_text=True)
update = telebot.types.Update.de_json(json_string)
bot.bot.process_new_updates([update])
return '', 200
else:
abort(403)
| 27.888889 | 65 | 0.693227 | 68 | 502 | 4.897059 | 0.647059 | 0.036036 | 0.084084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014599 | 0.181275 | 502 | 17 | 66 | 29.529412 | 0.79562 | 0 | 0 | 0 | 0 | 0 | 0.125498 | 0.055777 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.285714 | 0 | 0.428571 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c8109850360df8a5c2ea892504f91b7344fc373 | 4,865 | py | Python | nasdaqdatalink/util.py | edvn0/data-link-python | 1920b0d71f6068b14854493cfd1879bd67479581 | [
"MIT"
] | 1,178 | 2015-07-23T07:49:48.000Z | 2022-03-30T19:11:15.000Z | nasdaqdatalink/util.py | edvn0/data-link-python | 1920b0d71f6068b14854493cfd1879bd67479581 | [
"MIT"
] | 105 | 2015-07-31T18:43:51.000Z | 2022-02-27T18:00:22.000Z | nasdaqdatalink/util.py | edvn0/data-link-python | 1920b0d71f6068b14854493cfd1879bd67479581 | [
"MIT"
] | 330 | 2015-08-13T16:28:12.000Z | 2022-03-15T19:34:42.000Z | from inflection import parameterize
import dateutil.parser
import re
import pandas
from six import string_types
class Util(object):
@staticmethod
def constructed_path(path, params={}):
for key in list(params.copy().keys()):
modified_path = path.replace(":%s" % key, str(params[key]))
if modified_path != path:
params.pop(key, None)
path = modified_path
return path
# http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
@staticmethod
def merge_to_dicts(x, y):
z = x.copy()
z.update(y)
return z
@staticmethod
def methodize(string):
return parameterize(string.replace(r'.', '')).replace(r'-', '_')
@staticmethod
def merge_options(key, dic, **options):
updated = dic
# try to merge if options already has key, otherwise just assign
if key in options:
# respect values in options over hash
updated = Util.merge_to_dicts(dic, options[key])
options[key] = updated
return options
@staticmethod
def convert_to_dates(dic_or_list):
if isinstance(dic_or_list, dict):
for k, v in list(dic_or_list.items()):
dic_or_list[k] = Util.convert_to_dates(v)
elif isinstance(dic_or_list, list):
for idx, v in enumerate(dic_or_list):
dic_or_list[idx] = Util.convert_to_dates(v)
else:
return Util.convert_to_date(dic_or_list)
return dic_or_list
@staticmethod
def convert_to_date(value):
if isinstance(value, string_types) and re.search(r'^\d{4}-\d{2}-\d{2}$', value):
# convert to datetime.date
return dateutil.parser.parse(value).date()
elif isinstance(value, string_types) and re.search(r'^\d{4}-\d{2}-\d{2}T[\d:\.]+Z$', value):
# convert to datetime.datetime, default timezone is utc
return dateutil.parser.parse(value)
else:
return value
@staticmethod
def convert_options(request_type, **options):
if request_type == 'get':
return Util._convert_options_for_get_request(**options)
elif request_type == 'post':
return Util._convert_options_for_post_request(**options)
else:
raise Exception('Can only convert options for get or post requests')
@staticmethod
def _convert_options_for_get_request(**options):
new_options = dict()
if 'params' in options.keys():
for key, value in options['params'].items():
is_dict = False
if isinstance(value, (list, pandas.Series)):
key = key + '[]'
else:
if isinstance(value, dict) and value != {}:
new_value = dict()
is_dict = True
old_key = key
for k, v in value.items():
key = key + '.' + k
if isinstance(v, list):
key = key + '[]'
new_value[key] = v
key = old_key
if is_dict:
new_options.update(new_value)
else:
new_options[key] = value
return {'params': new_options}
@staticmethod
def _convert_options_for_post_request(**options):
new_options = dict()
if 'params' in options.keys():
for key, value in options['params'].items():
if isinstance(value, dict) and value != {}:
new_value = dict()
is_dict = True
old_key = key
for k, v in value.items():
key = key + '.' + k
new_value[key] = v
key = old_key
else:
is_dict = False
if is_dict:
new_options.update(new_value)
else:
new_options[key] = value
return {'json': new_options}
@staticmethod
def convert_to_columns_list(meta, type):
columns = []
for key in meta:
columns.extend([key[type]])
return columns
@staticmethod
def convert_column_names(meta):
if meta is None:
return []
# Dataset API call
if 'column_names' in meta.keys():
the_list = [Util.methodize(x) for x in meta['column_names']]
return list(the_list)
# Datatable API call
elif 'columns' in meta.keys():
return list([Util.methodize(x) for x in meta['columns']])
else:
return []
| 34.503546 | 109 | 0.529496 | 555 | 4,865 | 4.464865 | 0.214414 | 0.066586 | 0.032688 | 0.035109 | 0.390234 | 0.300242 | 0.255851 | 0.238902 | 0.216303 | 0.216303 | 0 | 0.003592 | 0.370606 | 4,865 | 140 | 110 | 34.75 | 0.805683 | 0.065159 | 0 | 0.452991 | 0 | 0 | 0.04141 | 0.006388 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094017 | false | 0 | 0.042735 | 0.008547 | 0.299145 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c859ef2fdb30334826347aa98bfe2eb542cf971 | 1,283 | py | Python | dymos/examples/aircraft_steady_flight/range_rate_comp.py | kaushikponnapalli/dymos | 3fba91d0fc2c0e8460717b1bec80774676287739 | [
"Apache-2.0"
] | 104 | 2018-09-08T16:52:27.000Z | 2022-03-10T23:35:30.000Z | dymos/examples/aircraft_steady_flight/range_rate_comp.py | kaushikponnapalli/dymos | 3fba91d0fc2c0e8460717b1bec80774676287739 | [
"Apache-2.0"
] | 628 | 2018-06-27T20:32:59.000Z | 2022-03-31T19:24:32.000Z | dymos/examples/aircraft_steady_flight/range_rate_comp.py | kaushikponnapalli/dymos | 3fba91d0fc2c0e8460717b1bec80774676287739 | [
"Apache-2.0"
] | 46 | 2018-06-27T20:54:07.000Z | 2021-12-19T07:23:32.000Z | import numpy as np
import openmdao.api as om
class RangeRateComp(om.ExplicitComponent):
"""
Calculates range rate based on true airspeed and flight path angle.
"""
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
self.add_input('TAS', val=np.ones(nn), desc='True airspeed', units='m/s')
self.add_input('gam', val=np.zeros(nn), desc='Flight path angle', units='rad')
self.add_output('dXdt:range', val=np.ones(nn), desc='Velocity along the ground (no wind)',
units='m/s')
# Setup partials
ar = np.arange(self.options['num_nodes'])
self.declare_partials(of='*', wrt='*', dependent=False)
self.declare_partials(of='dXdt:range', wrt='TAS', rows=ar, cols=ar)
self.declare_partials(of='dXdt:range', wrt='gam', rows=ar, cols=ar)
def compute(self, inputs, outputs):
TAS = inputs['TAS']
gam = inputs['gam']
outputs['dXdt:range'] = TAS*np.cos(gam)
def compute_partials(self, inputs, partials):
TAS = inputs['TAS']
gam = inputs['gam']
partials['dXdt:range', 'TAS'] = np.cos(gam)
partials['dXdt:range', 'gam'] = -TAS * np.sin(gam)
| 32.075 | 98 | 0.600935 | 174 | 1,283 | 4.373563 | 0.37931 | 0.070959 | 0.074901 | 0.082786 | 0.302234 | 0.202365 | 0.086728 | 0 | 0 | 0 | 0 | 0 | 0.234606 | 1,283 | 39 | 99 | 32.897436 | 0.774949 | 0.064692 | 0 | 0.166667 | 0 | 0 | 0.163007 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0 | 0.291667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c88a1e0585372d087cb7550c2e1a04e8a26f2ca | 1,408 | py | Python | detect_secrets/plugins/artifactory.py | flyingbarron/detect-secrets | 5f9887179794ce037d97c1b343623eb5937ce800 | [
"Apache-2.0"
] | null | null | null | detect_secrets/plugins/artifactory.py | flyingbarron/detect-secrets | 5f9887179794ce037d97c1b343623eb5937ce800 | [
"Apache-2.0"
] | null | null | null | detect_secrets/plugins/artifactory.py | flyingbarron/detect-secrets | 5f9887179794ce037d97c1b343623eb5937ce800 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import re
import requests
from .base import RegexBasedDetector
from detect_secrets.core.constants import VerifiedResult
class ArtifactoryDetector(RegexBasedDetector):
"""Scans for Artifactory credentials."""
secret_type = 'Artifactory Credentials'
denylist = [
# artifactory tokens begin with AKC
re.compile(r'(?:(?<==|:|")|(?<=\s)|(?<=^))AKC[a-zA-Z0-9]{10,}'), # api token
# artifactory encrypted passwords begin with AP[A-Z]
re.compile(r'(?:(?<==|:|")|(?<=\s)|(?<=^))AP[\dABCDEF][a-zA-Z0-9]{8,}'), # password
]
artifactory_url = 'na.artifactory.swg-devops.com/artifactory'
def verify(self, token, **kwargs):
try:
if type(token) == bytes:
token = token.decode('UTF-8')
headers = {'X-JFrog-Art-API': token}
response = requests.get(
'https://%s/api/system/ping' % self.artifactory_url,
headers=headers,
)
if response.status_code == 200:
return VerifiedResult.VERIFIED_TRUE
elif response.status_code == 401 or response.status_code == 403:
return VerifiedResult.VERIFIED_FALSE
else:
return VerifiedResult.UNVERIFIED
except requests.exceptions.RequestException:
return VerifiedResult.UNVERIFIED
| 34.341463 | 94 | 0.598722 | 145 | 1,408 | 5.717241 | 0.565517 | 0.096502 | 0.065139 | 0.026538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016505 | 0.268466 | 1,408 | 40 | 95 | 35.2 | 0.78835 | 0.098722 | 0 | 0.068966 | 0 | 0 | 0.169841 | 0.115079 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c8c5dd8b8a7b4a84ff0b5f8ea3221747c7f6f98 | 13,822 | py | Python | implementations/cyclegan/cyclegan.py | Lmy0217/PyTorch-GAN | fa6317f8ecffddeff88caccfbe9d581b2b0c342c | [
"MIT"
] | null | null | null | implementations/cyclegan/cyclegan.py | Lmy0217/PyTorch-GAN | fa6317f8ecffddeff88caccfbe9d581b2b0c342c | [
"MIT"
] | null | null | null | implementations/cyclegan/cyclegan.py | Lmy0217/PyTorch-GAN | fa6317f8ecffddeff88caccfbe9d581b2b0c342c | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
import torch
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
##############################
# RESNET
##############################
class ResidualBlock(nn.Module):
def __init__(self, in_features):
super(ResidualBlock, self).__init__()
conv_block = [ nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
nn.InstanceNorm2d(in_features),
nn.ReLU(inplace=True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
nn.InstanceNorm2d(in_features) ]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x):
return x + self.conv_block(x)
class GeneratorResNet(nn.Module):
def __init__(self, in_channels=3, out_channels=3, res_blocks=9):
super(GeneratorResNet, self).__init__()
# Initial convolution block
model = [ nn.ReflectionPad2d(3),
nn.Conv2d(in_channels, 64, 7),
nn.InstanceNorm2d(64),
nn.ReLU(inplace=True) ]
# Downsampling
in_features = 64
out_features = in_features*2
for _ in range(2):
model += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True) ]
in_features = out_features
out_features = in_features*2
# Residual blocks
for _ in range(res_blocks):
model += [ResidualBlock(in_features)]
# Upsampling
out_features = in_features//2
for _ in range(2):
model += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True) ]
in_features = out_features
out_features = in_features//2
# Output layer
model += [ nn.ReflectionPad2d(3),
nn.Conv2d(64, out_channels, 7),
nn.Tanh() ]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
##############################
# Discriminator
##############################
class Discriminator(nn.Module):
def __init__(self, in_channels=3):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, normalize=True):
"""Returns downsampling layers of each discriminator block"""
layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]
if normalize:
layers.append(nn.InstanceNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*discriminator_block(in_channels, 64, normalize=False),
*discriminator_block(64, 128),
*discriminator_block(128, 256),
*discriminator_block(256, 512),
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(512, 1, 4, padding=1)
)
def forward(self, img):
return self.model(img)
import random
import time
import datetime
import sys
from torch.autograd import Variable
import torch
import numpy as np
from torchvision.utils import save_image
class ReplayBuffer():
def __init__(self, max_size=50):
assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0,1) > 0.5:
i = random.randint(0, self.max_size-1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return))
class LambdaLR():
def __init__(self, n_epochs, offset, decay_start_epoch):
assert ((n_epochs - decay_start_epoch) > 0), "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch)/(self.n_epochs - self.decay_start_epoch)
import argparse
import os
import numpy as np
import math
import itertools
import datetime
import time
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
import datasets
import scipy.io
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=1, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--decay_epoch', type=int, default=100, help='epoch from which to start lr decay')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--img_height', type=int, default=64, help='size of image height')
parser.add_argument('--img_width', type=int, default=64, help='size of image width')
parser.add_argument('--channels', type=int, default=21, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=10, help='interval between sampling images from generators')
parser.add_argument('--n_residual_blocks', type=int, default=9, help='number of residual blocks in generator')
opt = parser.parse_args()
print(opt)
# Create sample and checkpoint directories
model = 'cyclegan_s3_%s_%s_%s_%s_%s_%s_%s' % (opt.n_epochs, opt.batch_size, opt.decay_epoch, opt.lr, opt.b1, opt.b2, opt.n_residual_blocks)
os.makedirs('results/' + model + '/predict', exist_ok=True)
os.makedirs('results/' + model + '/save', exist_ok=True)
# Losses
criterion_GAN = torch.nn.MSELoss()
criterion_cycle = torch.nn.L1Loss()
criterion_identity = torch.nn.L1Loss()
cuda = True if torch.cuda.is_available() else False
# Calculate output of image discriminator (PatchGAN)
patch = (1, opt.img_height // 2**4, opt.img_width // 2**4)
# Initialize generator and discriminator
G_AB = GeneratorResNet(in_channels=opt.channels, out_channels=opt.channels, res_blocks=opt.n_residual_blocks)
G_BA = GeneratorResNet(in_channels=opt.channels, out_channels=opt.channels, res_blocks=opt.n_residual_blocks)
D_A = Discriminator(in_channels=opt.channels)
D_B = Discriminator(in_channels=opt.channels)
if cuda:
G_AB = G_AB.cuda()
G_BA = G_BA.cuda()
D_A = D_A.cuda()
D_B = D_B.cuda()
criterion_GAN.cuda()
criterion_cycle.cuda()
criterion_identity.cuda()
# Initialize weights
G_AB.apply(weights_init_normal)
G_BA.apply(weights_init_normal)
D_A.apply(weights_init_normal)
D_B.apply(weights_init_normal)
# Loss weights
lambda_cyc = 10
lambda_id = 0.5 * lambda_cyc
# Optimizers
optimizer_G = torch.optim.Adam(itertools.chain(G_AB.parameters(), G_BA.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D_A = torch.optim.Adam(D_A.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D_B = torch.optim.Adam(D_B.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
# Learning rate update schedulers
lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(optimizer_G, lr_lambda=LambdaLR(opt.n_epochs, 0, opt.decay_epoch).step)
lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(optimizer_D_A, lr_lambda=LambdaLR(opt.n_epochs, 0, opt.decay_epoch).step)
lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(optimizer_D_B, lr_lambda=LambdaLR(opt.n_epochs, 0, opt.decay_epoch).step)
Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
# Buffers of previously generated samples
fake_A_buffer = ReplayBuffer()
fake_B_buffer = ReplayBuffer()
# data loader
MI = datasets.forName('MI')
trainset = MI(data_type='train')
testset = MI(data_type='test', cfg=trainset.cfg, ms=trainset.ms, transform=trainset.transform, target_transform=trainset.target_transform)
dataloader = DataLoader(trainset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu)
val_dataloader = DataLoader(testset, batch_size=5, shuffle=False, num_workers=0)
# ----------
# Training
# ----------
prev_time = time.time()
for epoch in range(opt.n_epochs):
for i, (real_A, real_B, _) in enumerate(dataloader):
# Set model input
real_A = Variable(real_A.type(Tensor))
real_B = Variable(real_B.type(Tensor))
# Adversarial ground truths
valid = Variable(Tensor(np.ones((real_A.size(0), *patch))), requires_grad=False)
fake = Variable(Tensor(np.zeros((real_A.size(0), *patch))), requires_grad=False)
# ------------------
# Train Generators
# ------------------
optimizer_G.zero_grad()
# Identity loss
loss_id_A = criterion_identity(G_BA(real_A), real_A)
loss_id_B = criterion_identity(G_AB(real_B), real_B)
loss_identity = (loss_id_A + loss_id_B) / 2
# GAN loss
fake_B = G_AB(real_A)
loss_GAN_AB = criterion_GAN(D_B(fake_B), valid)
fake_A = G_BA(real_B)
loss_GAN_BA = criterion_GAN(D_A(fake_A), valid)
loss_GAN = (loss_GAN_AB + loss_GAN_BA) / 2
# Cycle loss
recov_A = G_BA(fake_B)
loss_cycle_A = criterion_cycle(recov_A, real_A)
recov_B = G_AB(fake_A)
loss_cycle_B = criterion_cycle(recov_B, real_B)
loss_cycle = (loss_cycle_A + loss_cycle_B) / 2
# Total loss
loss_G = loss_GAN + \
lambda_cyc * loss_cycle + \
lambda_id * loss_identity
loss_G.backward()
optimizer_G.step()
# -----------------------
# Train Discriminator A
# -----------------------
optimizer_D_A.zero_grad()
# Real loss
loss_real = criterion_GAN(D_A(real_A), valid)
# Fake loss (on batch of previously generated samples)
fake_A_ = fake_A_buffer.push_and_pop(fake_A)
loss_fake = criterion_GAN(D_A(fake_A_.detach()), fake)
# Total loss
loss_D_A = (loss_real + loss_fake) / 2
loss_D_A.backward()
optimizer_D_A.step()
# -----------------------
# Train Discriminator B
# -----------------------
optimizer_D_B.zero_grad()
# Real loss
loss_real = criterion_GAN(D_B(real_B), valid)
# Fake loss (on batch of previously generated samples)
fake_B_ = fake_B_buffer.push_and_pop(fake_B)
loss_fake = criterion_GAN(D_B(fake_B_.detach()), fake)
# Total loss
loss_D_B = (loss_real + loss_fake) / 2
loss_D_B.backward()
optimizer_D_B.step()
loss_D = (loss_D_A + loss_D_B) / 2
# --------------
# Log Progress
# --------------
# Determine approximate time left
batches_done = epoch * len(dataloader) + i
batches_left = opt.n_epochs * len(dataloader) - batches_done
time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
sys.stdout.write("\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, adv: %f, cycle: %f, identity: %f] ETA: %s" %
(epoch, opt.n_epochs,
i, len(dataloader),
loss_D.item(), loss_G.item(),
loss_GAN.item(), loss_cycle.item(),
loss_identity.item(), time_left))
# Update learning rates
lr_scheduler_G.step()
lr_scheduler_D_A.step()
lr_scheduler_D_B.step()
for i, (real_A, real_B, _) in enumerate(val_dataloader):
real_A = Variable(real_A.type(Tensor))
fake_B = G_AB(real_A)
real_B = Variable(real_B.type(Tensor))
fake_A = G_BA(real_B)
img_sample = {'fake_A': np.array(fake_A.data), 'fake_B': np.array(fake_B.data), 'real_A': np.array(real_A.data), 'real_B': np.array(real_B.data)}
scipy.io.savemat('results/%s/predict/%s' % (model, epoch), img_sample)
break
if epoch % opt.sample_interval == 0:
# Save model checkpoints
torch.save(G_AB.state_dict(), 'results/%s/save/G_AB_%d.pth' % (model, epoch))
torch.save(G_BA.state_dict(), 'results/%s/save/G_BA_%d.pth' % (model, epoch))
torch.save(D_A.state_dict(), 'results/%s/save/D_A_%d.pth' % (model, epoch))
torch.save(D_B.state_dict(), 'results/%s/save/D_B_%d.pth' % (model, epoch))
| 36.66313 | 153 | 0.621835 | 1,860 | 13,822 | 4.368817 | 0.162903 | 0.004676 | 0.025105 | 0.008368 | 0.346173 | 0.307408 | 0.243539 | 0.199852 | 0.163672 | 0.142506 | 0 | 0.017136 | 0.244248 | 13,822 | 376 | 154 | 36.760638 | 0.76077 | 0.074808 | 0 | 0.193548 | 0 | 0.004032 | 0.076179 | 0.012604 | 0 | 0 | 0 | 0 | 0.008065 | 1 | 0.048387 | false | 0 | 0.112903 | 0.016129 | 0.205645 | 0.004032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c8d204460f651f443693d3ba1cf2af74a7770b4 | 4,368 | py | Python | src/python/platforms/android/logger.py | mi-ac/clusterfuzz | 0b5c023eca9e3aac41faba17da8f341c0ca2ddc7 | [
"Apache-2.0"
] | 1 | 2021-12-20T14:48:42.000Z | 2021-12-20T14:48:42.000Z | src/python/platforms/android/logger.py | mi-ac/clusterfuzz | 0b5c023eca9e3aac41faba17da8f341c0ca2ddc7 | [
"Apache-2.0"
] | 1 | 2022-02-26T12:13:15.000Z | 2022-02-26T12:13:15.000Z | src/python/platforms/android/logger.py | henryzz0/clusterfuzz | 0427ed8328d6bd6e18540087793a41531bbaafea | [
"Apache-2.0"
] | 1 | 2021-11-06T06:22:00.000Z | 2021-11-06T06:22:00.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""System log manager related functions."""
import re
import time
from . import adb
from metrics import logs
def clear_log():
"""Clear log."""
adb.run_as_root()
adb.run_shell_command(['stop', 'logd'])
adb.run_shell_command(['start', 'logd'])
time.sleep(0.1)
adb.run_command(['logcat', '-c'])
def is_line_valid(line):
"""Returns true if we consider this line in logs."""
if re.match(r'^[-]+ beginning of', line):
return False
is_chromium_resource_load = 'NotifyBeforeURLRequest' in line
# Discard noisy debug and verbose output.
# http://developer.android.com/tools/debugging/debugging-log.html.
at_least_info_level = not (line.startswith('D/') or line.startswith('V/'))
return is_chromium_resource_load or at_least_info_level
def filter_log_output(output):
"""Filters log output. Removes debug info, etc and normalize output."""
if not output:
return ''
filtered_output = ''
last_process_tuple = (None, None)
for line in output.splitlines():
if not is_line_valid(line):
continue
# To parse frames like:
# E/v8 (18890): Error installing extension 'v8/LoadTimes'.
# {log_level}/{process_name}({process_id}): {message}
m_line = re.match(r'^[VDIWEFS]/(.+)\(\s*(\d+)\)[:](.*)$', line)
if not m_line:
logs.log_error('Failed to parse logcat line: %s' % line)
continue
process_name = m_line.group(1).strip()
process_id = int(m_line.group(2))
filtered_line = m_line.group(3).rstrip()[1:]
# Process Android crash stack frames and convert into sanitizer format.
m_crash_state = re.match(r'\s*#([0-9]+)\s+pc\s+([xX0-9a-fA-F]+)\s+(.+)',
filtered_line)
if m_crash_state:
frame_no = int(m_crash_state.group(1))
frame_address = m_crash_state.group(2)
frame_binary = m_crash_state.group(3).strip()
# Ignore invalid frames, helps to prevent errors
# while symbolizing.
if '<unknown>' in frame_binary:
continue
# Normalize frame address.
if not frame_address.startswith('0x'):
frame_address = '0x%s' % frame_address
# Seperate out the function argument.
frame_binary = (frame_binary.split(' '))[0]
# Normalize line into the same sanitizer tool format.
filtered_line = (' #%d %s (%s+%s)' % (frame_no, frame_address,
frame_binary, frame_address))
# Process Chrome crash stack frames and convert into sanitizer format.
# Stack frames don't have paranthesis around frame binary and address, so
# add it explicitly to allow symbolizer to catch it.
m_crash_state = re.match(
r'\s*#([0-9]+)\s+([xX0-9a-fA-F]+)\s+([^(]+\+[xX0-9a-fA-F]+)$',
filtered_line)
if m_crash_state:
frame_no = int(m_crash_state.group(1))
frame_address = m_crash_state.group(2)
frame_binary_and_address = m_crash_state.group(3).strip()
filtered_line = (' #%d %s (%s)' % (frame_no, frame_address,
frame_binary_and_address))
# Add process number if changed.
current_process_tuple = (process_name, process_id)
if current_process_tuple != last_process_tuple:
filtered_output += '--------- %s (%d):\n' % (process_name, process_id)
last_process_tuple = current_process_tuple
filtered_output += filtered_line + '\n'
return filtered_output
def log_output(additional_flags=''):
"""Return log data without noise and some normalization."""
output = adb.run_command('logcat -d -v brief %s *:V' % additional_flags)
return filter_log_output(output)
def log_output_before_last_reboot():
"""Return log data from last reboot without noise and some normalization."""
return log_output(additional_flags='-L')
| 34.393701 | 78 | 0.667353 | 614 | 4,368 | 4.555375 | 0.350163 | 0.021452 | 0.039328 | 0.034322 | 0.197712 | 0.159814 | 0.13729 | 0.13729 | 0.082231 | 0.082231 | 0 | 0.011574 | 0.208791 | 4,368 | 126 | 79 | 34.666667 | 0.797743 | 0.354625 | 0 | 0.171875 | 0 | 0.03125 | 0.120971 | 0.057226 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.0625 | 0 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c8d356d491a155e613167dc98b233ac8c74f668 | 3,724 | py | Python | nst.py | Oppac/neural_style_transfer | 4cbdede7bfee76fc2113320138c4b43329d515ae | [
"MIT"
] | null | null | null | nst.py | Oppac/neural_style_transfer | 4cbdede7bfee76fc2113320138c4b43329d515ae | [
"MIT"
] | null | null | null | nst.py | Oppac/neural_style_transfer | 4cbdede7bfee76fc2113320138c4b43329d515ae | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import os
from PIL import Image
import torch
import torchvision
from torchvision import models, transforms
import nst_vgg
def show_img(processed_img, denorm=True, show=False, save=False, name="img"):
if denorm:
unloader = transforms.Compose([
transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.255],
std=[1/0.229, 1/0.224, 1/0.225]),
transforms.ToPILImage()
])
else:
unloader = transforms.ToPILImage()
img = unloader(processed_img.squeeze(0))
if save:
if not os.path.exists(r".\images"):
os.makedirs(r".\images")
img.save(f"images\{name}.jpg")
if show:
plt.imshow(img)
plt.show()
def gram_matrix(tensor):
_, channels, height, width = tensor.size()
tensor = tensor.view(channels, height * width)
gram = torch.mm(tensor, tensor.t())
return gram / (channels * height * width)
if __name__ == "__main__":
content_img_path = "images\jade.jpg"
style_img_path = "images\starry_night.jpg"
output_name = "jade_output"
nb_epoch = 100
content_weights = 1
style_weights = 100
random_init = False
content_layers = ['conv4_2']
style_layers =['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transform = transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)])
content_img = Image.open(content_img_path)
content_img = transform(content_img).unsqueeze(0).to(device)
style_img = Image.open(style_img_path)
style_img = transform(style_img).unsqueeze(0).to(device)
if random_init:
output_img = torch.randn_like(content_img).requires_grad_(True).to(device)
else:
output_img = content_img.clone().requires_grad_(True).to(device)
layers_used = content_layers + style_layers
model = nst_vgg.Vgg19Nst(features=layers_used).to(device).eval()
content_maps = model(content_img)
style_maps = model(style_img)
content_rep = {layer: content_maps[layer].squeeze(axis=0) for layer in content_layers}
style_grams = {layer: gram_matrix(style_maps[layer]) for layer in style_layers}
optimizer = torch.optim.LBFGS([output_img])
mse_loss_content = torch.nn.MSELoss(reduction='mean')
mse_loss_style = torch.nn.MSELoss(reduction='sum')
run = 0
while run <= nb_epoch:
def closure():
global run
optimizer.zero_grad()
content_loss = 0
style_loss = 0
output_rep = model(output_img)
for name in content_layers:
content_loss += mse_loss_content(content_rep[name], output_rep[name].squeeze(axis=0))
content_loss /= len(content_layers)
output_grams = {layer: gram_matrix(output_rep[layer]) for layer in style_layers}
for name in style_layers:
style_loss += mse_loss_style(style_grams[name], output_grams[name])
style_loss /= len(style_layers)
total_loss = (content_weights * content_loss) + (style_weights * style_loss)
total_loss.backward()
if run%5 == 0:
print(f" {run} | total_loss: {total_loss} | content_loss: {content_weights * content_loss} | style_loss: {style_weights * style_loss}")
run += 1
return total_loss
optimizer.step(closure)
output_img.data.clamp_(-1.5, 1.5)
show_img(output_img, save=True, name=output_name)
| 32.666667 | 151 | 0.635875 | 490 | 3,724 | 4.585714 | 0.271429 | 0.035603 | 0.025367 | 0.021362 | 0.136627 | 0.078327 | 0 | 0 | 0 | 0 | 0 | 0.03738 | 0.245704 | 3,724 | 113 | 152 | 32.955752 | 0.762549 | 0 | 0 | 0.044944 | 0 | 0.011236 | 0.073577 | 0.006176 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033708 | false | 0 | 0.078652 | 0 | 0.134831 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c8e4f728661e9013312b8bf21ac0b32ea850f1b | 1,272 | py | Python | src/micromind/analysis/analysis.py | MicroMedIAn/MicroMind | 93e3e91ce4f210cf0a676a79edcdcf04fa6b4818 | [
"MIT"
] | 1 | 2021-01-21T13:56:45.000Z | 2021-01-21T13:56:45.000Z | src/micromind/analysis/analysis.py | MicroMedIAn/MicroMind | 93e3e91ce4f210cf0a676a79edcdcf04fa6b4818 | [
"MIT"
] | null | null | null | src/micromind/analysis/analysis.py | MicroMedIAn/MicroMind | 93e3e91ce4f210cf0a676a79edcdcf04fa6b4818 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Provides Generic Classes to make an image analysis.
"""
from abc import ABC, abstractmethod
import pandas as pd
class InputData(ABC):
def __init__(self, data):
self._content = data
@abstractmethod
def read(self):
pass
class Cohort(InputData):
def __init__(self, dataframe, workdir=None):
super().__init__(dataframe)
self.workdir = workdir
def read(self):
for _, row in self._content.iterrows():
filepath = row.path
name = row.id
if row.todo == 1 and filepath != 0:
if self.workdir:
filepath = str(self.workdir / filepath)
print(type(filepath))
yield (name, filepath)
class AnalysisCV(object):
'''
'''
def __init__(self, procedure):
self.procedure = procedure
def run(self, input_data):
print('running analysis !!')
all_results = {}
for (name, filepath) in input_data.read():
result = self.procedure.run(filepath, name)
results_df = pd.DataFrame(result, columns=result[0].keys())
all_results[name] = results_df
results_df.to_csv(name + '.csv')
return all_results
| 24.461538 | 71 | 0.578616 | 143 | 1,272 | 4.951049 | 0.447552 | 0.029661 | 0.04661 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003436 | 0.313679 | 1,272 | 51 | 72 | 24.941176 | 0.80756 | 0.056604 | 0 | 0.060606 | 0 | 0 | 0.019475 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0.030303 | 0.060606 | 0 | 0.363636 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c8ecb018183e918f2e576eac2f75548d94537a1 | 1,648 | py | Python | microraiden/test/fixtures/channel_manager.py | andrevmatos/microraiden | 2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd | [
"MIT"
] | 417 | 2017-09-19T19:06:23.000Z | 2021-11-28T05:39:23.000Z | microraiden/test/fixtures/channel_manager.py | andrevmatos/microraiden | 2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd | [
"MIT"
] | 259 | 2017-09-19T20:42:57.000Z | 2020-11-18T01:31:41.000Z | microraiden/test/fixtures/channel_manager.py | andrevmatos/microraiden | 2d51e78afaf3c0a8ddab87e59a5260c0064cdbdd | [
"MIT"
] | 126 | 2017-09-19T17:11:39.000Z | 2020-12-17T17:05:27.000Z | import pytest
import types
from microraiden.channel_manager import ChannelManager, Blockchain
def start_channel_manager(channel_manager, use_tester, mine_sync_event):
if use_tester:
mine_sync_event.clear()
# monkeypatch Blockchain::_update() to wait for an sync event
def update_patched(self: Blockchain):
Blockchain._update(self)
mine_sync_event.wait()
channel_manager.blockchain._update = types.MethodType(
update_patched, channel_manager.blockchain)
# it is pointless to do busy loop as the Blockchain blocks on sync
channel_manager.blockchain.poll_interval = 0
def stop_patched(self: ChannelManager):
mine_sync_event.set()
ChannelManager.stop(self)
self.stop = types.MethodType(ChannelManager.stop, self)
channel_manager.stop = types.MethodType(
stop_patched, channel_manager)
def fail(greenlet):
raise greenlet.exception
channel_manager.link_exception(fail)
channel_manager.start()
return channel_manager
@pytest.fixture
def channel_manager(
web3,
receiver_privkey,
channel_manager_contract,
token_contract,
use_tester,
mine_sync_event,
state_db_path,
patched_contract,
revert_chain
):
manager = ChannelManager(
web3,
channel_manager_contract,
token_contract,
receiver_privkey,
n_confirmations=5,
state_filename=state_db_path
)
start_channel_manager(manager, use_tester, mine_sync_event)
yield manager
manager.stop()
| 27.932203 | 74 | 0.678398 | 181 | 1,648 | 5.867403 | 0.353591 | 0.19774 | 0.073446 | 0.06403 | 0.161959 | 0.054614 | 0 | 0 | 0 | 0 | 0 | 0.003281 | 0.260316 | 1,648 | 58 | 75 | 28.413793 | 0.867925 | 0.075243 | 0 | 0.173913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.065217 | 0 | 0.195652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c8ff5a294638c5829d3aece5a82745250ddd2be | 1,378 | py | Python | q26-50/q39.py | ljn1999/LeetCode-problems | a1ec54c45ef66530bdb58efae6ef2bb059626484 | [
"MIT"
] | 4 | 2020-07-10T06:56:36.000Z | 2020-07-21T02:39:05.000Z | q26-50/q39.py | ljn1999/LeetCode-problems | a1ec54c45ef66530bdb58efae6ef2bb059626484 | [
"MIT"
] | null | null | null | q26-50/q39.py | ljn1999/LeetCode-problems | a1ec54c45ef66530bdb58efae6ef2bb059626484 | [
"MIT"
] | null | null | null | # 2020.08.11
# Problem Statement:
# https://leetcode.com/problems/combination-sum/
# Made a mistake on making one_ans a global variable
# Got inspired by a post in Discussion section
# https://leetcode.com/problems/combination-sum/discuss/16554/Share-My-Python-Solution-beating-98.17
class Solution:
def dfs(candidates, target, one_ans, answer):
# a combination is found, append to answer
if target == 0:
answer.append(one_ans)
return
# loop the list, fix one element and find the others recursively
for i in range(0, len(candidates)):
if candidates[i] > target: return
if len(one_ans) > 0 and candidates[i] < one_ans[-1]:
# should search for nums larger or equal to the current largest one in one_ans
# search for next element
continue
else:
# fix one element, find the rest recursively
Solution.dfs(candidates, target-candidates[i], one_ans+[candidates[i]], answer)
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
# sort the input list
candidates.sort()
# initialize answer to return
answer = []
# do the dfs
Solution.dfs(candidates, target, [], answer)
return answer
| 37.243243 | 100 | 0.607402 | 173 | 1,378 | 4.797688 | 0.473988 | 0.050602 | 0.068675 | 0.057831 | 0.091566 | 0.091566 | 0 | 0 | 0 | 0 | 0 | 0.021921 | 0.30479 | 1,378 | 37 | 101 | 37.243243 | 0.844468 | 0.419448 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c91230eda3c02d22e992e2852660ff98bfc74bb | 2,049 | py | Python | pointnet2/evaluate.py | signinit/Pointnet2_PyTorch | 868933c6610b4409a4d0d0cf4514a423a826337a | [
"Unlicense"
] | null | null | null | pointnet2/evaluate.py | signinit/Pointnet2_PyTorch | 868933c6610b4409a4d0d0cf4514a423a826337a | [
"Unlicense"
] | null | null | null | pointnet2/evaluate.py | signinit/Pointnet2_PyTorch | 868933c6610b4409a4d0d0cf4514a423a826337a | [
"Unlicense"
] | null | null | null | import os
import math
import numpy as np
import hydra
import omegaconf
import pytorch_lightning as pl
import torch
from pytorch_lightning.loggers import TensorBoardLogger
from pointnet2.models import PointNet2SemSegSSG
from pointnet2.models import PointNet2ClassificationSSG
from pointnet2.models import PointNet2LinearSSG
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def hydra_params_to_dotdict(hparams):
def _to_dot_dict(cfg):
res = {}
for k, v in cfg.items():
if isinstance(v, omegaconf.DictConfig):
res.update(
{k + "." + subk: subv for subk, subv in _to_dot_dict(v).items()}
)
elif isinstance(v, (str, int, float, bool)):
res[k] = v
return res
return _to_dot_dict(hparams)
@hydra.main("config/config.yaml")
def main(cfg):
device = torch.device("cuda")
all_points = np.loadtxt(cfg.input, delimiter=",")[:,:3]
batches = math.ceil(all_points.shape[0] / 4096)
np_points = np.resize(all_points, (batches, 4096, 3))
points = torch.from_numpy(np_points).float().cuda()
if(cfg.task_model.name == "cls-ssg"):
model = PointNet2ClassificationSSG.load_from_checkpoint(cfg.weights)
if(cfg.task_model.name == "sem-ssg"):
model = PointNet2SemSegSSG.load_from_checkpoint(cfg.weights)
if(cfg.task_model.name == "lin-ssg"):
model = PointNet2LinearSSG.load_from_checkpoint(cfg.weights)
model.eval()
model.to(device)
results = model(points).detach().cpu()
if(cfg.task_model.name == "cls-ssg"):
classes = torch.argmax(results, dim=1).numpy()
print(classes[0])
if(cfg.task_model.name == "lin-ssg"):
print(results.numpy()[0,0])
if(cfg.task_model.name == "sem-ssg"):
classes = torch.argmax(results, dim=1).numpy()
np.savetxt("out.txt", np.concatenate([all_points, classes.reshape((-1,1))[:len(all_points)]], axis=1), delimiter=",", fmt="%.6f")
if __name__ == "__main__":
main() | 33.590164 | 137 | 0.65593 | 268 | 2,049 | 4.858209 | 0.358209 | 0.023041 | 0.041475 | 0.064516 | 0.228879 | 0.207373 | 0.205837 | 0.127496 | 0.070661 | 0.070661 | 0 | 0.017791 | 0.20449 | 2,049 | 61 | 138 | 33.590164 | 0.780982 | 0 | 0 | 0.156863 | 0 | 0 | 0.041951 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.215686 | 0 | 0.313725 | 0.039216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c93fa2f88c274755575d33c437763be8ae2720b | 2,587 | py | Python | driver/user/test.py | walcarher/Demo2Achieve | 62c206e1ccf05f83e20bb1ed3a6518460de5eb9a | [
"MIT"
] | null | null | null | driver/user/test.py | walcarher/Demo2Achieve | 62c206e1ccf05f83e20bb1ed3a6518460de5eb9a | [
"MIT"
] | null | null | null | driver/user/test.py | walcarher/Demo2Achieve | 62c206e1ccf05f83e20bb1ed3a6518460de5eb9a | [
"MIT"
] | null | null | null | import sys
import torch
import math
import time
# Importiong CHIMERA module for comuunication between TX2 and C10GX
import chimera_lib
def init_tensor(tensor):
i = 0
C = tensor.size(1)
H = tensor.size(2)
W = tensor.size(3)
for h in range(H):
for w in range(W):
for c in range(C):
tensor[0][c][h][w] = i
i += 1
if i == 256:
i = 0
return tensor
print("Int32 tensor transfer test (chunks of 2048 DWords)")
# FPGA communication function declaration
class fpga_comm(torch.nn.Module):
@staticmethod
def open():
if chimera_lib.open():
sys.exit("FPGA device could not be opened")
@staticmethod
def close():
chimera_lib.close()
@staticmethod
def quantize(input):
output = chimera_lib.read(input)
return output
@staticmethod
def write(input):
chimera_lib.write(input)
@staticmethod
def read(input):
output = chimera_lib.read(input)
return output
# Function call
comm = fpga_comm()
# Open FPGA Device
comm.open()
# Start empty 32-bit Integer tensor
input = torch.zeros((1,8,16,16), dtype = torch.int32, device = "cuda")
# Start input tensor with an increasing sequence from 0 up to 255
# for C*H*W/256 times starting with dimension 1, then 2 and finally 3
input = init_tensor(input)
# Quantize and pack DWORDs in a single 32b Integer (4 DWORDs with 4 UInt8 per address)
# Tensor depth (channel) dimension is reduced by 4
print("Quantize tensor to Int8 and compress into a Int32 Tensor with C/4")
start = time.time()
quantized_input = chimera_lib.quantize(input)
elapsed = time.time() - start
print("Quantization elapsed time:", elapsed*1000, " ms")
# Write tensor to On-Chip memory on FPGA
print("Write Int32 tensor sequence with values from 0 to 255 for CHW/256 times")
start = time.time()
comm.write(quantized_input)
elapsed = time.time() - start
print("Write elapsed time:", elapsed*1000, " ms")
# Initialize an zeros ouput tensor with a given dimension and size to be read
output = torch.zeros((1,8,16,16), dtype = torch.int32, device = "cuda")
# Read tensor from On-Chip memory from FPGA as Integer32
print("Read Int32 tensor with a sequence from 0-255")
start = time.time()
output = comm.read(output)
elapsed = time.time() - start
print("Read elapsed time:", elapsed*1000, " ms")
# Closing device and freeing memory
#print(output)
comm.close() | 31.938272 | 87 | 0.644376 | 369 | 2,587 | 4.482385 | 0.319783 | 0.042322 | 0.023579 | 0.036276 | 0.195284 | 0.136638 | 0.100363 | 0.100363 | 0.049577 | 0.049577 | 0 | 0.044909 | 0.25976 | 2,587 | 81 | 88 | 31.938272 | 0.818799 | 0.253962 | 0 | 0.288136 | 0 | 0 | 0.18573 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.084746 | 0 | 0.254237 | 0.118644 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c954511893cf792b80b02a154e88c3cdb069232 | 3,065 | py | Python | trainmodel.py | gummideepak/wine-prediction-aws | f990edb94c4ec70b8525d0d0013b2d2a8d357d23 | [
"MIT"
] | null | null | null | trainmodel.py | gummideepak/wine-prediction-aws | f990edb94c4ec70b8525d0d0013b2d2a8d357d23 | [
"MIT"
] | null | null | null | trainmodel.py | gummideepak/wine-prediction-aws | f990edb94c4ec70b8525d0d0013b2d2a8d357d23 | [
"MIT"
] | null | null | null | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('ml-wine').getOrCreate()
inputTrainDF = spark.read.option("delimiter", ";").csv('/data/train.csv', header = True, inferSchema = True)
inputTestDF = spark.read.option("delimiter", ";").csv('/data/test.csv', header = True, inferSchema = True)
from pyspark.mllib.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
# select the columns to be used as the features (all except `quality`)
featureColumns = [c for c in inputTrainDF.columns if c != 'quality']
# create and configure the assembler
assembler = VectorAssembler(inputCols=featureColumns,
outputCol="features")
# transform the original data
#dataDF = assembler.transform(inputDF)
from pyspark.ml.evaluation import RegressionEvaluator
# create a regression evaluator with RMSE metrics
evaluator = RegressionEvaluator(
labelCol='quality', predictionCol="prediction", metricName="rmse")
# split the input data into traning and test dataframes with 70% to 30% weights
#(trainingDF, testDF) = inputDF.randomSplit([0.7, 0.3])
from pyspark.ml import Pipeline
from pyspark.ml.regression import RandomForestRegressor
# define the random forest estimator
rf = RandomForestRegressor(featuresCol="features", labelCol="quality", numTrees=100, maxBins=128, maxDepth=20, \
minInstancesPerNode=5, seed=33)
rfPipeline = Pipeline(stages=[assembler, rf])
# train the random forest model
rfPipelineModel = rfPipeline.fit(inputTrainDF)
# rfTrainingPredictions = rfPipelineModel.transform(inputTrainDF)
# rfTestPredictions = rfPipelineModel.transform(inputTestDF)
# print("Random Forest RMSE on traning data = %g" % evaluator.evaluate(rfTrainingPredictions))
# print("Random Forest RMSE on test data = %g" % evaluator.evaluate(rfTestPredictions))
rfPipelineModel.write().overwrite().save('output/rf.model')
# load the andom forest pipeline from the dist
from pyspark.ml import PipelineModel
loadedModel = PipelineModel.load('output/rf.model')
# loadedPredictionsDF = loadedModel.transform(inputTestDF)
# # evaluate the model again to see if we get the same performance
# print("Loaded model RMSE = %g" % evaluator.evaluate(loadedPredictionsDF))
predictions = loadedModel.transform(inputTestDF)
# labels_and_predictions = inputTestDF.map(lambda x: x.quality).zip(predictions)
# acc = labels_and_predictions.filter(lambda x: x[0] == x[1]).count() / float(inputTestDF.count())
# print("Model accuracy: %.3f%%" % (acc * 100))
# Evaluate the model.
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
print("Evaluating the model...")
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Accuracy = %g " % accuracy)
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction", metricName="f1")
f1 = evaluator.evaluate(predictions)
print("F1 = %g " % f1)
print("Model prediction finished... terminating.")
| 40.328947 | 114 | 0.761827 | 346 | 3,065 | 6.736994 | 0.419075 | 0.037752 | 0.033462 | 0.020592 | 0.170742 | 0.102102 | 0.075504 | 0 | 0 | 0 | 0 | 0.010797 | 0.123654 | 3,065 | 75 | 115 | 40.866667 | 0.857036 | 0.390538 | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.258065 | 0 | 0.258065 | 0.129032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c962d2b2b972925f8f21862c876382f1d3aad02 | 5,962 | py | Python | translate_column.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 7 | 2015-05-14T09:36:36.000Z | 2022-03-30T14:32:21.000Z | translate_column.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 1 | 2015-07-14T11:47:25.000Z | 2015-07-17T01:45:26.000Z | translate_column.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 7 | 2015-07-25T22:29:29.000Z | 2022-03-01T21:26:14.000Z | #!/usr/bin/env python
"""
This script takes a tab delimited text table and creates a new column from
an existing one using an external translation table.
based on stats/columnMaker.py
author: John Eppley
"""
import sys
import re
import os.path
import traceback
import logging
import argparse
from edl.util import *
def main():
# set up CLI
description = """
This script takes a tab delimited text table and creates a new column
from an existing one using an external translation table.
"""
parser = argparse.ArgumentParser(description=description)
add_IO_arguments(parser)
parser.add_argument(
"-f",
"--fillMissing",
dest="fill",
metavar="FILL",
help="Put FILL in column when value not in map. If not used, "
"entire line is skipped. If set to 'KEY', value in key "
"column is used."),
parser.add_argument("-m", "--mapFile", dest="mapFile",
metavar="MAPFILE", help="Location of mapping table.")
parser.add_argument(
"-c",
"--column",
dest="col",
type=int,
default=1,
help="Column number (first column is 1)",
metavar="COLUMN")
parser.add_argument(
"-C",
"--newColumn",
dest="newcol",
type=int,
default=None,
help="Column number to insert new column after. Default is the "
"after the source column. 0=>make it the first column. "
"-1=>make it the last column.",
metavar="COLUMN")
parser.add_argument(
"-D",
"--deleteColumn",
dest="delcols",
default=[],
action='append',
metavar='COLUMN',
help="Delete this column (starting at 1, after new column "
"inserted). May be used multiple times for multiple columns")
# log level and help
add_universal_arguments(parser)
arguments = parser.parse_args()
setup_logging(arguments)
logging.info("Value map from: " + arguments.mapFile)
logging.debug("Fill: '%s'" % (arguments.fill))
translation = parseMapFile(arguments.mapFile)
for (inhandle, outhandle) in inputIterator(arguments):
# setup some counters
ncols = 0
total_lines = 0
skipped_lines = 0
lines_kept = 0
first_invalid_line = 0
invalid_line = None
# loop over lines
for i, line in enumerate(inhandle):
total_lines += 1
line = line.rstrip('\r\n')
if not line or line.startswith('#'):
skipped_lines += 1
continue
try:
cells = line.split('\t')
if ncols == 0:
# count columns and check requested column number
ncols = len(cells)
if arguments.col > ncols:
sys.exit("first line has fewer columns (%d) "
"than requested column number(%d)!" %
(ncols, arguments.col))
# get value from column
value = cells[arguments.col - 1]
if value in translation:
newCol = translation[value]
else:
if arguments.fill is not None:
if arguments.fill == 'KEY':
newCol = value
else:
newCol = arguments.fill
else:
logging.debug(
"skipping value not in translation: %s" %
(value))
skipped_lines += 1
continue
# insert new value
if arguments.newcol is None:
cells.insert(arguments.col, newCol)
elif arguments.newcol < 0 or arguments.newcol >= ncols:
cells.append(newCol)
else:
cells.insert(arguments.newcol, newCol)
# perform any requested column deletions
for delcol in sorted(
[int(c) for c in arguments.delcols], reverse=True):
cells.pop(delcol - 1)
new_line = '\t'.join(cells)
# print >> outhandle, new_line
print(new_line, file=outhandle)
lines_kept += 1
except Exception:
logging.warn(
"Unexpected error (%s): %s" %
(sys.exc_info()[0], sys.exc_info()[1]))
logging.warn(traceback.format_tb(sys.exc_info()[2]))
logging.warn('Skipping %s' % (line))
skipped_lines += 1
if not invalid_line:
first_invalid_line = i + 1
invalid_line = line
# set insertion column for logging
if arguments.newcol is None:
inserted = arguments.col + 1
elif arguments.newcol < 0 or arguments.newcol >= ncols:
inserted = ncols
else:
inserted = arguments.newcol + 1
valid_lines = total_lines - skipped_lines
message = "Processed: %s\nCreated column %d with mapfile %s \
applied to column %d" % (inhandle,
inserted,
arguments.mapFile,
arguments.col)
if valid_lines > 0:
message += '\nkept %d of %d lines.' % (lines_kept, total_lines)
else:
message += '\nNo valid lines found'
if skipped_lines > 0:
message += '\nSkipped %d lines' % (skipped_lines)
logging.info(message)
if invalid_line:
logging.warn(
'Invalid lines found! EG line #%d: "%s"' %
(first_invalid_line, invalid_line))
if (__name__ == '__main__'):
main()
| 34.068571 | 77 | 0.513754 | 622 | 5,962 | 4.839228 | 0.307074 | 0.029236 | 0.028239 | 0.010631 | 0.13289 | 0.097674 | 0.097674 | 0.097674 | 0.069767 | 0.069767 | 0 | 0.007752 | 0.394163 | 5,962 | 174 | 78 | 34.264368 | 0.825581 | 0.076149 | 0 | 0.178571 | 0 | 0 | 0.179599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007143 | false | 0 | 0.05 | 0 | 0.057143 | 0.007143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c9711e4e49200b93147b341f9a990331b390404 | 1,501 | py | Python | dopamine/thesis/scratch/aim_stress.py | xqz-u/dopamine | d562750a58bcf681a6f8b590f4e4dfb263654b5e | [
"Apache-2.0"
] | null | null | null | dopamine/thesis/scratch/aim_stress.py | xqz-u/dopamine | d562750a58bcf681a6f8b590f4e4dfb263654b5e | [
"Apache-2.0"
] | null | null | null | dopamine/thesis/scratch/aim_stress.py | xqz-u/dopamine | d562750a58bcf681a6f8b590f4e4dfb263654b5e | [
"Apache-2.0"
] | null | null | null | import aim
import numpy as np
path = "/home/xqz-u/uni/thesis/dopamine/thesis/tests"
redundancy = 1
epochs = 5
min_steps = 1000
for redund in range(redundancy):
run = aim.Run(repo=path, experiment=f"redund_{redund}")
for epoch in range(epochs):
epoch_steps = 0
while epoch_steps <= min_steps:
step = 0
done = False
while not done:
run.track(
np.random.rand(),
name="random_val",
step=step,
epoch=epoch,
# context={"subset": "train"},
)
if step > min_steps // 2:
done = bool(np.random.randint(0, 2))
step += 1
print(f"{redund}-{epoch}: {step} , {epoch_steps}")
epoch_steps += step
res = aim.Repo(path)
df = []
# query = "metric.name == 'random_val' and run.experiment == 'pippone'"
query = "metric.name == 'random_val' and run.experiment == 'redund_0'"
for run in res.query_metrics(query).iter_runs():
el = run.dataframe()
if el is not None:
df.append(el)
# aim_logger = aim.Run(repo=path, experiment="pippo")
# aim_logger.track(2, "ciccio", step=2002, epoch=1)
# aim_logger.track(4, "ciccio", step=2002, epoch=2)
# repo = aim.Repo(path)
# query = "metric.name == 'ciccio'"
# df = []
# for run in repo.query_metrics(query).iter_runs():
# el = run.dataframe()
# if el is not None:
# df.append(el)
| 28.865385 | 71 | 0.547635 | 194 | 1,501 | 4.139175 | 0.345361 | 0.039851 | 0.048568 | 0.034869 | 0.313823 | 0.254047 | 0.254047 | 0.254047 | 0.154421 | 0.154421 | 0 | 0.024155 | 0.31046 | 1,501 | 51 | 72 | 29.431373 | 0.751691 | 0.289141 | 0 | 0 | 0 | 0 | 0.160342 | 0.041746 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c9866df395f86986d3f718f21a467aeff46fa3e | 46,535 | py | Python | PerceptualMetric/psrc/perceptree/data/predict.py | PolasekT/ICTree | d13ad603101805bcc288411504ecffd6f2e1f365 | [
"MIT"
] | 3 | 2021-12-09T22:37:03.000Z | 2022-02-16T13:40:44.000Z | PerceptualMetric/psrc/perceptree/data/predict.py | PolasekT/ICTree | d13ad603101805bcc288411504ecffd6f2e1f365 | [
"MIT"
] | null | null | null | PerceptualMetric/psrc/perceptree/data/predict.py | PolasekT/ICTree | d13ad603101805bcc288411504ecffd6f2e1f365 | [
"MIT"
] | 3 | 2021-12-09T22:37:08.000Z | 2022-02-03T14:38:39.000Z | # -*- coding: utf-8 -*-
"""
Score prediction processing system.
"""
import filecmp
import itertools
import json
import math
import os
import pathlib
import re
import secrets
import shutil
import subprocess
import sys
from typing import Dict, List, Optional, Tuple, Union
import gzip
import h5py
import numpy as np
import pandas as pd
import pickle as pk
from perceptree.common.cache import deep_copy_dict
from perceptree.common.cache import update_dict_recursively
from perceptree.common.configuration import Config
from perceptree.common.configuration import Configurable
from perceptree.common.logger import Logger
from perceptree.common.util import reshape_scalar
from perceptree.common.util import numpy_array_to_tuple_numpy
from perceptree.common.util import tuple_array_to_numpy
from perceptree.common.logger import LoadingBar
from perceptree.data.loader import BaseDataLoader
from perceptree.data.loader import DataLoader
from perceptree.data.loader import CustomDataLoader
from perceptree.data.treeio import TreeFile
from perceptree.data.treeio import TreeImage
from perceptree.data.featurizer import DataFeaturizer
from perceptree.model.base import BaseModel
class Prediction(Logger):
""" Wrapper around a single requested prediction. For details, see initialize_data(). """
def __init__(self, tree_id: int, view_ids: Optional[Union[int, List[int]]] = None,
tree_file: Optional[TreeFile] = None,
tree_views: Optional[Dict[int, Dict[str, TreeImage]]] = None,
tree_view_types: Optional[List[str]] = None,
data_loader: Optional[BaseDataLoader] = None,
complete_tree: bool = True,
load_expected_scores: bool = True,
use_dataset_tree: bool = True,
external_scores: Optional[dict] = None,
clean_copy: Optional["Prediction"] = None,
data_source: Optional[str] = "unknown"):
self._tree_id = None
self._view_ids = None
self._tree_file = None
self._tree_views = None
self._tree_view_types = None
self._complete_tree = None
self._score_prediction = None
self._score_expected = None
self._max_score = None
self._data_source = None
if clean_copy is not None:
self._initialize_copy(clean_copy)
else:
self.initialize_data(tree_id=tree_id, view_ids=view_ids,
tree_file=tree_file, tree_views=tree_views,
tree_view_types=tree_view_types,
complete_tree=complete_tree,
load_expected_scores=load_expected_scores,
external_scores=external_scores,
use_dataset_tree=use_dataset_tree,
data_loader=data_loader, data_source=data_source)
def _initialize_copy(self, clean_copy: "Prediction"):
""" Perform a shallow copy from given prediction and clear the score prediction. """
self._tree_id = clean_copy._tree_id
self._view_ids = clean_copy._view_ids
self._tree_file = clean_copy._tree_file
self._tree_views = clean_copy._tree_views
self._tree_view_types = clean_copy._tree_view_types
self._complete_tree = clean_copy._complete_tree
self._score_expected = clean_copy._score_expected
self._score_prediction = None
self._max_score = clean_copy._max_score
self._data_source = clean_copy._data_source
def initialize_data(self, tree_id: Union[int, Tuple[int, int]],
view_ids: Optional[Union[Union[int, Tuple[int, int]], List[Union[int, Tuple[int, int]]]]] = None,
tree_file: Optional[TreeFile] = None,
tree_views: Optional[Dict[int, Dict[str, TreeImage]]] = None,
tree_view_types: Optional[List[str]] = None,
complete_tree: bool = True,
load_expected_scores: bool = True,
use_dataset_tree: bool = True,
external_scores: Optional[dict] = None,
data_loader: Optional[BaseDataLoader] = None,
data_source: Optional[str] = "unknown"):
"""
Initialize data required for this prediction.
:param tree_id: Identifier of the tree, optionally with variant identifier.
:param view_ids: Optional identifiers of the view, -1 for all views.
Optionally also can contain a list of view ids. Additionally, view
variant may be specified.
:param tree_file: Optional pre-loaded tree file data.
:param tree_views: Optional pre-loaded views data indexed by view id
and view type respectively. If specified, view_id should be >= -1!
:param tree_view_types: Optional list of view types to load.
:param complete_tree: Does this prediction cover the tree as a whole?
:param load_expected_scores: Load ground-truth scores for this prediction?
:param use_dataset_tree: Allow use of dataset trees if any information is
missing?
:param external_scores: Optional dictionary mapping tree IDs to external
loaded scores.
:param data_loader: Optional data-loader used to get
TreeFile/TreeImage instances when they are not
specified.
:param data_source: Name of the source where the tree data came from.
"""
if tree_views is not None and view_ids is None:
raise RuntimeError("Non-negative view id must be specified when using tree_view!")
if isinstance(tree_id, int):
tree_id = (tree_id, 0)
if isinstance(view_ids, int):
view_ids = (view_ids, 0)
all_views = isinstance(view_ids, tuple) and view_ids[0] < 0
if isinstance(view_ids, list):
view_ids = [
(view_id, 0) if isinstance(view_ids, int) else view_id
for view_id in view_ids
]
elif isinstance(view_ids, tuple):
view_ids = tuple_array_to_numpy([ view_ids ])
else:
view_ids = reshape_scalar([ ])
if tree_views is not None and len(tree_views) != len(view_ids) and not all_views:
raise RuntimeError("Number of view images and view ids must be equal!")
prepare_views = len(view_ids) and tree_views is None
data_loader_required = (tree_file is None) or prepare_views or load_expected_scores
if data_loader_required and data_loader is None:
raise RuntimeError("Data loader is necessary to load some required data, but it is unavailable!")
# Prepare views if requested and not specified.
if prepare_views:
view_catalogue = data_loader.full_view_catalogue
if tree_id not in view_catalogue.index:
raise RuntimeError(f"Tree ID \"{tree_id}\" is not present in view catalogue!")
if min(min(view_ids)) < 0:
# Get all possible views.
new_view_ids = [ ]
for view_id in view_ids:
if view_id[0] >= 0 and view_id[1] >= 0:
new_view_ids.append(view_id)
else:
view_slice = tuple((
view_id[0] if view_id[0] >= 0 else slice(None),
view_id[1] if view_id[1] >= 0 else slice(None)
))
new_view_ids += list(np.unique(numpy_array_to_tuple_numpy([
(idx[2], idx[3])
for idx in view_catalogue.loc[tree_id + view_slice].index
])))
view_ids = numpy_array_to_tuple_numpy(new_view_ids)
tree_views = { }
for view_id in view_ids:
# Recover all modalities for requested views.
single_view_catalogue = view_catalogue.loc[(tree_id[0], tree_id[1], view_id[0], view_id[1])]
tree_views[view_id] = {
view_type: view_data.data or TreeImage(
image_path=f"{data_loader.view_base_path}/{view_data.path}"
)
for view_type, view_data in single_view_catalogue.iterrows()
if tree_view_types is None or view_type in tree_view_types
}
# Prepare tree data if not specified.
if tree_file is None and use_dataset_tree:
tree_data = data_loader.tree_data
if tree_id not in tree_data:
raise RuntimeError(f"Tree ID \"{tree_id}\" is not present in tree data catalogue!")
tree_file = tree_data[tree_id]
# Prepare score data if requested.
if load_expected_scores:
score_expected = { }
for tree_view_id in [ (tree_id[0], tree_id[1], -1, 0) ] \
if complete_tree else \
[ (tree_id[0], tree_id[1], view_id[0], view_id[1]) for view_id in view_ids ]:
if external_scores is not None:
if tree_view_id not in external_scores:
raise RuntimeError(f"Tree ID/View ID \"{tree_view_id}\" is not present in external score data!")
score_expected[( tree_view_id[2], tree_view_id[3] )] = external_scores[tree_view_id]
elif use_dataset_tree:
if tree_view_id not in data_loader.full_scores_indexed.index:
raise RuntimeError(f"Tree ID/View ID \"{tree_view_id}\" is not present in tree score data!")
score_expected[( tree_view_id[2], tree_view_id[3] )] = \
data_loader.full_scores_indexed.loc[tree_view_id].to_dict()
else:
score_expected = None
if len(data_loader.full_scores_indexed) > 0:
max_score = data_loader.full_scores_indexed.jod.max()
max_score = 4.5
else:
max_score = 4.5
self._tree_id = tree_id
self._view_ids = list(view_ids)
self._complete_tree = complete_tree
self._tree_file = tree_file
self._tree_views = tree_views
self._score_expected = score_expected
self._max_score = max_score
self._data_source = data_source
@property
def tree_id(self) -> int:
""" Get tree identifier used by this prediction. """
return self._tree_id
@property
def view_ids(self) -> List[int]:
""" Get view identifiers used by this prediction. """
return self._view_ids
@property
def data_source(self) -> str:
""" Get source of where the tree data came from. """
return self._data_source or "unknown"
@property
def complete_tree(self) -> List[int]:
""" Does this prediction cover tree as a whole (True) or individual views (False). """
return self._complete_tree
@property
def tree_file(self) -> TreeFile:
""" Get pre-loaded tree file for the target tree. """
return self._tree_file
@property
def tree_views(self) -> Optional[Dict[int, Dict[str, TreeFile]]]:
""" Get pre-loaded tree views for the target tree. """
return self._tree_views
@property
def score_expected(self) -> Optional[Dict[int, Dict[str, float]]]:
""" Get expected scores, if they were requested. """
return self._score_expected
@property
def score_prediction(self) -> Optional[Dict[int, Dict[str, float]]]:
""" Get score prediction for this tree or its views depending on complete_tree(). """
return self._score_prediction
@score_prediction.setter
def score_prediction(self, score: Union[List[float], Dict[int, float]]):
""" Set new predicted score for this tree. """
if self._complete_tree and len(score) != 1:
raise RuntimeError(f"Unable to set non-scalar scores for a complete tree ({len(score)})!")
if not self._complete_tree and self._score_expected and len(score) != len(self._score_expected):
raise RuntimeError(f"Unable to set non-list scores for a non-complete tree "
f"({len(self._score_expected)} vs {len(score)})!")
view_ids = [ ( -1, 0) ] if self._complete_tree else self.view_ids
score = {
view_id: score[view_idx]
for view_idx, view_id in enumerate(view_ids)
} if not isinstance(score, dict) else score
self._score_prediction = {
view_id: {
"jod": min(self._max_score, score[view_id]),
"jod_low": min(self._max_score, score[view_id]),
"jod_high": min(self._max_score, score[view_id]),
"jod_var": 0.0
}
for view_id in view_ids
}
def clean_copy(self) -> "Prediction":
""" Create a clean clone of this tree without score prediction. """
return Prediction(tree_id=-1, clean_copy=self)
class PredictionProcessor(Logger, Configurable):
"""
Input file loading and caching system.
"""
COMMAND_NAME = "Predict"
""" Name of this command, used for configuration. """
def __init__(self, config: Config):
super().__init__(config=config)
self._set_instance()
self._featurizer = self.get_instance(DataFeaturizer)
self._data_loader = self.get_instance(DataLoader)
self.__l.info("Initializing prediction processing system...")
@staticmethod
def _parse_prediction_request(prediction_spec: str, complete_tree: bool) -> dict:
""" Parse given prediction specification and return constructor arguments dictionary. """
if prediction_spec == "-1":
# Perform prediction on all trees in the test set.
return {
"tree_id": -1,
"tree_id_variant": 0,
"loading_required": False,
"file_path": None,
"view_paths": None,
"complete_tree": complete_tree,
"name": prediction_spec,
}
elif prediction_spec.isdecimal():
# Perform a single prediction for given tree id.
return {
"tree_id": ( int(prediction_spec), 0 ),
"loading_required": False,
"file_path": None,
"view_paths": None,
"complete_tree": complete_tree,
"name": prediction_spec,
}
elif prediction_spec == "train" or prediction_spec == "valid" or prediction_spec == "test":
# Perform prediction on all trees in the split.
return {
"tree_id": ( prediction_spec, 0 ),
"loading_required": False,
"file_path": None,
"view_paths": None,
"complete_tree": complete_tree,
"name": prediction_spec,
}
else:
# Complex external data specification.
# tree:<PATH>,id:<ID>(;<VID>),view:<ID>(;<VID>):<TYPE>:<PATH>,...
spec_parts = prediction_spec.split(",")
result = {
"tree_id": None,
"loading_required": False,
"file_path": None,
"view_paths": { },
"complete_tree": complete_tree,
"name": prediction_spec,
}
for spec_part in spec_parts:
segments = spec_part.split(":")
if len(segments) < 1:
raise TypeError(f"Prediction specification \"{prediction_spec}\" contains invalid segments!")
if segments[0] == "id":
if ";" in segments[1]:
parts = segments[1].split(";")
segments[1] = ( int(parts[0]), int(parts[1]) )
else:
segments[1] = int(segments[1])
result["tree_id"] = segments[1]
elif segments[0] == "tree":
result["file_path"] = segments[1]
elif segments[0] == "view":
if ";" in segments[1]:
parts = segments[1].split(";")
segments[1] = ( int(parts[0]), int(parts[1]) )
else:
segments[1] = int(segments[1])
if len(segments) >= 4:
result["view_paths"] = update_dict_recursively(
result["view_paths"], {
segments[1]: {
segments[2]: segments[3]
}
},
create_keys=True
)
result["loading_required"] = True
elif len(segments) >= 3:
result["view_paths"] = update_dict_recursively(
result["view_paths"], {
segments[1]: {
segments[2]: None
}
},
create_keys=True
)
result["loading_required"] = False
else:
result["view_paths"] = update_dict_recursively(
result["view_paths"], {
segments[1]: { }
},
create_keys=True
)
result["loading_required"] = False
else:
raise TypeError(f"Unknown segment specifier \"{segments[0]}\" provided in prediction specification!")
#assert(result["tree_id"] is None)
return result
@classmethod
def register_options(cls, parser: Config.Parser):
""" Register configuration options for this class. """
parser.add_argument("--predict-tree",
action="append",
default=[], type=lambda x : PredictionProcessor._parse_prediction_request(x, True),
metavar=("<NUM>|<PRED_SPEC>|train|valid|test"),
dest=cls._add_config_parameter("predict_tree"),
help="Request prediction of tree scores. Use -1 to predict all "
"trees in the loaded set or tree ID for specific ID. Use "
"tree:<PATH>,view:<ID>:<TYPE>:<PATH>,... to load "
"external data. Specify train/valid/test to predict results "
"for training/validation/testing data in the currently loaded "
"data.")
parser.add_argument("--predict-tree-folder",
action="append",
default=[], type=str,
metavar=("INPUT_FOLDER"),
dest=cls._add_config_parameter("predict_tree_folder"),
help="Predict for all trees within given folder.")
parser.add_argument("--predict-views",
action="append",
default=[], type=lambda x : PredictionProcessor._parse_prediction_request(x, False),
metavar=("<NUM>|<PRED_SPEC>|train|valid|test"),
dest=cls._add_config_parameter("predict_views"),
help="Request prediction of view scores. Use -1 to predict all "
"trees in the loaded set or tree ID for specific ID. Use "
"tree:<PATH>,view:<ID>:<TYPE>:<PATH>,... to load "
"external data. Specify train/valid/test to predict results "
"for training/validation/testing data in the currently loaded "
"data.")
parser.add_argument("--predict-views-folder",
action="append",
default=[], type=str,
metavar=("INPUT_FOLDER"),
dest=cls._add_config_parameter("predict_views_folder"),
help="Predict for all trees within given folder.")
parser.add_argument("--predict-views-folder-unstructured",
action="append",
default=[], type=str,
metavar=("INPUT_FOLDER"),
dest=cls._add_config_parameter("predict_views_folder_unstructured"),
help="Predict for all png images in given folder.")
parser.add_argument("--predict-external",
action="append",
default=[], type=lambda x : PredictionProcessor._parse_prediction_request(x, False),
metavar=("<PRED_SPEC>"),
dest=cls._add_config_parameter("predict_external"),
help="Request prediction of view scores. Use -1 to predict all "
"trees in the loaded set or tree ID for specific ID. Use "
"tree:<PATH>,view:<ID>:<TYPE>:<PATH>,... to load "
"external data. Specify train/valid/test to predict results "
"for training/validation/testing data in the currently loaded "
"data.")
parser.add_argument("--predict-external-featurizer",
action="store",
default=None, type=str,
metavar=("<PATH/TO/TreeIOViewer>"),
dest=cls._add_config_parameter("predict_external_featurizer"),
help="Request prediction of scores for external tree file. Use "
"tree:<PATH>,view:<ID>:<TYPE>:<PATH>,... to load external "
"data and automatically perform rendering and feature "
"extraction as necessary. <TYPE> and <PATH> for views are "
"optional, if not specified they are generated automatically.")
parser.add_argument("--predict-external-workdir",
action="store",
default="./featurization_cache/", type=str,
metavar=("<PATH/TO/WORKDIR>"),
dest=cls._add_config_parameter("predict_external_workdir"),
help="Path to work directory used to automatically featurize "
"external tree files using --predict-external.")
parser.add_argument("--predict-external-scores",
action="store",
default=None, type=str,
metavar=("SCORES.CSV"),
dest=cls._add_config_parameter("predict_external_scores"),
help="Location of external scores repository.")
parser.add_argument("--export-predictions",
action="store",
default=None, type=str,
metavar=("PATH/TO/OUTPUT.PRE"),
dest=cls._add_config_parameter("export_predictions"),
help="Export prediction requests into an external storage.")
parser.add_argument("--import-predictions",
action="store",
default=None, type=str,
metavar=("PATH/TO/INPUT.PRE"),
dest=cls._add_config_parameter("import_predictions"),
help="Import prediction requests from an external storage.")
def _featurize_external_spec(self, external_spec: dict) -> dict:
"""
Perform featurization of given external prediction specification and return
record with filled tree_file and tree_views keys.
:param external_spec: Input specification, which will be left unmodified.
:return: Returns the filled specification.
"""
# Make local copy to keep the original intact.
spec = deep_copy_dict(external_spec)
# Use following paths.
cache_path = pathlib.Path(self.c.predict_external_workdir).absolute()
featurizer_path = pathlib.Path(self.c.predict_external_featurizer).absolute()
featurizer_cwd = featurizer_path.parent
# Deduce paths for the input file and its cached variant.
input_tree_path = pathlib.Path(spec["file_path"]).absolute()
input_tree_name = input_tree_path.with_suffix("").name
cache_input_tree_dir = cache_path / "input" / input_tree_name
cache_input_tree_path = cache_input_tree_dir / input_tree_path.name
cache_output_tree_dir = cache_path / "output"
cache_output_tree_path = cache_output_tree_dir / input_tree_name / input_tree_path.name
# Use cached version, if it already exists.
cache_ready = cache_input_tree_path.is_file() and cache_output_tree_path.is_file() and \
filecmp.cmp(input_tree_path, cache_input_tree_path)
if not cache_ready:
# We need to featurize the tree -> Create the working directory structure.
cache_input_tree_dir.mkdir(parents=True, exist_ok=True)
cache_output_tree_dir.mkdir(parents=True, exist_ok=True)
shutil.copyfile(input_tree_path, cache_input_tree_path)
# Execute the featurizer on the target directories.
exe_parameters = "-feature {input_path}:{output_path}:./data/genviews/genviews_start.tbat"
args = [ str(featurizer_path) ] + exe_parameters.format(
input_path=str(cache_input_tree_dir),
output_path=str(cache_output_tree_dir)
).split(" ")
self.__l.info(f"Running featurizer on \"{input_tree_path}\"...")
with subprocess.Popen(args, cwd=featurizer_cwd) as p:
p.wait()
self.__l.info("\tFeaturization complete!")
# Recover a list of views generated by the featurizer.
all_views = { }
for screen_path in cache_output_tree_path.parent.glob("*screen*.png"):
screen_name = str(screen_path.name)
matches = re.match(r".*_screen_(\d+)_(\w+).png", screen_name)
if not matches:
continue
view_id = int(matches.group(1))
view_type = matches.group(2)
all_views = update_dict_recursively(
all_views, { view_id: { view_type: str(screen_path) } },
create_keys=True
)
if len(spec["view_paths"]) == 0:
spec["complete_tree"] = True
spec["view_paths"] = { -1: { } }
else:
spec["complete_tree"] = False
view_paths = { }
for view_id, view_spec in spec["view_paths"].items():
if view_id < 0:
# add all views generated by the featurizer.
view_paths = update_dict_recursively(view_paths, all_views, create_keys=True)
else:
# Add only the single view or use pre-defined path.
if len(view_spec) == 0:
# Add single featurized view.
if view_id not in all_views:
self.__l.error(f"Requested view id {view_id} was not provided, nor created "
f"by featurizer, skipping!")
continue
view_paths = update_dict_recursively(
view_paths, { view_id: all_views[view_id] },
create_keys=True
)
else:
# Add fully specified view.
view_paths = update_dict_recursively(
view_paths, { view_id: view_spec },
create_keys=True
)
# Update the specification with featurized data.
spec["file_path"] = str(cache_output_tree_path)
spec["view_paths"] = view_paths
return spec
def _prepare_prediction_specs(self, tree_specs: List[dict], tree_folders: List[str],
view_specs: List[dict], view_folders: List[str],
image_folders: List[str], external_specs: List[dict]
) -> List[dict]:
"""
Prepare all prediction specifications for prediction.
:param tree_specs: List of requests for tree score prediction.
:param tree_folders: List of folder to get trees to predict from.
:param view_specs: List of requests for view score prediction.
:param view_folders: List of folder to get trees to predict from.
:param image_folders: List of folder to get unstructured images from.
:param external_specs: List of requests for external score predictions.
:return: Returns list of predictions.
"""
def match_tree(name: str, ext: str):
matches = re.match(r"(\d+)_.*" + ext, name)
if not matches:
matches = re.match(r"tree_(\d+)" + ext, name)
if not matches:
matches = re.match(r"tree(\d+)" + ext, name)
if not matches:
matches = re.match(r"(\d+)" + ext, name)
if not matches:
matches = re.match(r"tree(\d+)_branch_(\d+)" + ext, name)
return matches
def find_views(tree_path: Union[str, pathlib.Path]) -> Optional[Dict[int, pathlib.Path]]:
tree_path = pathlib.Path(tree_path)
view_path = tree_path.with_suffix(".png")
if view_path.exists():
return { ( 0, 0 ): view_path }
view_path = pathlib.Path(str(tree_path.with_suffix("")) + "_screen_0_base.png")
if view_path.exists():
idx = 0
result = { }
while view_path.exists():
result[( idx, 0 )] = view_path
idx += 1
view_path = pathlib.Path(str(tree_path.with_suffix("")) + f"_screen_{idx}_base.png")
return result
view_path = pathlib.Path(str(tree_path.with_suffix("")) + "_screen_0.png")
if view_path.exists():
idx = 0
result = { }
while view_path.exists():
result[( idx, 0 )] = view_path
idx += 1
view_path = pathlib.Path(str(tree_path.with_suffix("")) + f"_screen_{idx}.png")
return result
return None
tree_folder_specs = [ ]
for tree_folder in tree_folders:
for tree_path in pathlib.Path(tree_folder).glob("**/*.tree"):
matches = match_tree(name=tree_path.name, ext=".tree")
if not matches:
continue
tree_id = int(matches.group(1))
if len(matches.groups()) >= 2:
variant_id = int(matches.group(2))
else:
variant_id = 0
view_paths = find_views(tree_path)
if view_paths is None:
continue
tree_folder_specs.append({
"tree_id": ( tree_id, variant_id ),
"loading_required": True,
"file_path": str(tree_path),
"view_paths": {
view_idx: { "base": str(view_path) }
for view_idx, view_path in view_paths.items()
},
"complete_tree": True,
"name": tree_path.name,
})
tree_view_specs = [ ]
for tree_folder in view_folders:
for tree_path in pathlib.Path(tree_folder).glob("**/*.tree"):
matches = match_tree(name=tree_path.name, ext=".tree")
if not matches:
continue
tree_id = int(matches.group(1))
if len(matches.groups()) >= 2:
variant_id = int(matches.group(2))
else:
variant_id = 0
view_paths = find_views(tree_path)
if view_paths is None:
continue
tree_view_specs.append({
"tree_id": ( tree_id, variant_id ),
"loading_required": True,
"file_path": str(tree_path),
"view_paths": {
view_idx: { "base": str(view_path) }
for view_idx, view_path in view_paths.items()
},
"complete_tree": False,
"name": tree_path.name,
})
tree_image_specs = [ ]
for image_folder in image_folders:
for image_path in pathlib.Path(image_folder).glob("**/*.png"):
matches = match_tree(name=image_path.name, ext=".png")
if matches:
tree_id = int(matches.group(1))
else:
tree_id = None
tree_image_specs.append({
"tree_id": tree_id,
"loading_required": True,
"file_path": None,
"view_paths": { ( 0, 0 ): { "base": str(image_path) } },
"complete_tree": False,
"name": image_path.name,
})
prediction_specs = [ ]
id_counter = self._data_loader.tree_catalogue.index.max() + 1
if math.isnan(id_counter):
id_counter = 0
all_tree_ids = self._data_loader.tree_ids
default_splits = self._featurizer.generate_default_splits()
def determine_split(search_id: Tuple[int, int], splits: dict, default_name: str) -> str:
for split_name, split_ids in splits.items():
if search_id in list(split_ids):
return split_name
return default_name
for spec in tree_specs + tree_folder_specs + view_specs + tree_view_specs + tree_image_specs:
if spec["tree_id"] is None:
spec["tree_id"] = id_counter
id_counter += 1
tree_id = spec["tree_id"]
if isinstance(tree_id, int):
# TODO - Add support for tree variants.
tree_id = ( tree_id, 0 )
if isinstance(tree_id[0], str):
# Add all trees from given split:
tree_ids = default_splits[ tree_id ]
elif tree_id[0] < 0:
# Convert -1 to all of the available trees:
if tree_id[1] < 0:
# Keep all variants.
tree_ids = all_tree_ids
else:
# Keep only specified variant.
tree_ids = [
id
for id in all_tree_ids
if id[1] == tree_id[1]
]
else:
# Just use the single tree ID provided.
tree_ids = [ tree_id ]
for tree_id in tree_ids:
tree_id_spec = spec.copy()
tree_id_spec["tree_id"] = tree_id
tree_id_spec["tree_file"] = None
tree_id_spec["tree_views"] = None
tree_id_spec["load_expected_scores"] = not spec["loading_required"]
#tree_id_spec["load_expected_scores"] = True
tree_id_spec["source"] = determine_split(
search_id=tree_id, splits=default_splits,
default_name="external"
) if not spec["loading_required"] else "external"
prediction_specs.append(tree_id_spec)
for spec in external_specs:
if spec["tree_id"] is None:
spec["tree_id"] = id_counter
id_counter += 1
tree_id = spec["tree_id"]
if isinstance(tree_id, int):
# TODO - Add support for tree variants.
tree_id = ( tree_id, 0 )
tree_id_spec = self._featurize_external_spec(external_spec=spec)
tree_id_spec["tree_id"] = tree_id
tree_id_spec["load_expected_scores"] = False
tree_id_spec["source"] = "featurized"
prediction_specs.append(tree_id_spec)
return prediction_specs
def _load_external_scores(self, external_scores_path: str) -> dict:
""" Load external scores from given path. """
score_df = pd.read_csv(external_scores_path, sep=";")
scores = { }
if "condition" in score_df.columns:
# External scores from matlab optimization script.
tree_re = re.compile(r"tree([0-9]+)")
for idx, score_row in score_df.iterrows():
tree_id = int(tree_re.match(score_row["condition"]).group(1))
score_record = {
"jod": float(score_row["jod"]),
"jod_low": float(score_row["jod_low"]),
"jod_high": float(score_row["jod_high"]),
"jod_var": float(score_row["var"]),
}
scores[( tree_id, 0, 0, 0 )] = score_record.copy()
scores[( tree_id, 0, -1, 0 )] = score_record.copy()
else:
raise RuntimeError(f"Unknown format of external scores in given file \"{external_scores_path}\"!")
# TODO - Re-scale the external scores to the same interval the dataset uses?
"""
min_score = np.min([ sc["jod"] for sc in scores.values() ])
max_score = np.max([ sc["jod"] for sc in scores.values() ])
gt_min_score = 1.0
gt_max_score = 4.0294
for idx in scores.keys():
scores[idx]["jod"] = (((scores[idx]["jod"] - min_score) / (max_score - min_score)) * \
(gt_max_score - gt_min_score)) + gt_min_score
"""
return scores
def _prepare_predictions(self, prediction_specs: List[dict],
external_scores_path: Optional[str]) -> List[Prediction]:
"""
Prepare list of requested predictions from given specs.
:param prediction_specs: List of specs for score predictions.
:param external_scores_path: Location of external scores repository.
:return: Returns list of requested predictions.
"""
# Load external scores if requested.
if external_scores_path is not None:
external_scores = self._load_external_scores(external_scores_path=external_scores_path)
else:
external_scores = None
# Resulting list of prediction specifications.
predictions = [ ]
# Caches for repeated loading of the same files.
tree_file_cache = { }
tree_view_cache = { }
load_node_data = self.config["data.load_node_data"]
self.__l.info(f"Preparing {len(prediction_specs)} predictions...")
prediction_mapping = [ ]
loading_process = LoadingBar("", max=len(prediction_specs))
for spec in prediction_specs:
tree_file = None
tree_views = { }
view_types = [ ]
if spec["loading_required"]:
file_path = spec["file_path"]
if file_path is not None:
if file_path not in tree_file_cache:
tree_file_cache[ file_path ] = TreeFile(
file_path=file_path,
load_node=load_node_data
)
tree_file = tree_file_cache[ file_path ]
view_paths = spec.get("view_paths", { })
for view_id, view_spec in view_paths.items():
if isinstance(view_id, int):
view_id = ( view_id, 0 )
if view_id[0] < 0:
view_types += list(view_spec.keys())
continue
for view_type, view_path in view_spec.items():
if view_path not in tree_view_cache:
tree_view_cache[ view_path ] = TreeImage(
image_path=view_path
)
tree_view = tree_view_cache[ view_path ]
tree_views = update_dict_recursively(
tree_views, {
view_id: {
view_type: tree_view
}
},
create_keys=True
)
else:
tree_file = spec["tree_file"]
tree_views = spec["tree_views"]
view_paths = spec.get("view_paths", { })
view_types = [
view_type
for view_id, view_spec in view_paths.items()
for view_type in view_spec.keys()
]
view_ids = list(tree_views.keys()) if (tree_views and not spec["complete_tree"]) else ( -1, 0 )
prediction_mapping.append({
"name": spec["name"],
"tree_id": spec["tree_id"],
"view_ids": view_ids,
})
predictions.append(Prediction(
tree_id=spec["tree_id"], view_ids=view_ids,
tree_file=tree_file, tree_views=tree_views or None,
tree_view_types=view_types or None,
data_loader=self._data_loader,
complete_tree=spec["complete_tree"],
load_expected_scores=spec["load_expected_scores"] or external_scores is not None,
use_dataset_tree=not spec["loading_required"],
external_scores=external_scores,
data_source=spec["source"]
))
loading_process.next(1)
loading_process.finish()
self.__l.info("Prediction Mapping: ")
for pred in prediction_mapping:
self.__l.info(f"\t\"{pred['name']}\" as \"{pred['tree_id']}, {pred['view_ids']}\"")
return predictions
def _import_predictions(self, input_path: str
) -> (List[Prediction], CustomDataLoader):
""" Import predictions from given input path. """
pre_compressed = gzip.open(input_path, "r")
pre_data = pk.load(pre_compressed)
return pre_data["predictions"], pre_data["dataset"]
def _export_predictions(self, predictions: List[Prediction],
predictions_dataset: CustomDataLoader,
output_path: str):
""" Export given predictions into the output path. """
output_path = pathlib.Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
pre_data = {
"predictions": predictions,
"dataset": predictions_dataset
}
pre_compressed = gzip.open(output_path, "w")
pk.dump(pre_data, pre_compressed, protocol=pk.HIGHEST_PROTOCOL)
def process(self, models: Dict[str, BaseModel]) -> Dict[str, Tuple[BaseModel, List[Prediction]]]:
""" Perform prediction processing operations. """
self.__l.info("Starting prediction processing operations...")
if self.c.predict_external and self.c.predict_external_featurizer is None:
raise RuntimeError("Unable to predict external files, please provided featurizer path!")
if self.c.import_predictions is not None:
predictions, predictions_dataset = self._import_predictions(
input_path=self.c.import_predictions
)
else:
prediction_specs = self._prepare_prediction_specs(
tree_specs=self.c.predict_tree,
tree_folders=self.c.predict_tree_folder,
view_specs=self.c.predict_views,
view_folders=self.c.predict_views_folder,
image_folders=self.c.predict_views_folder_unstructured,
external_specs=self.c.predict_external
)
predictions = self._prepare_predictions(
prediction_specs=prediction_specs,
external_scores_path=self.c.predict_external_scores
)
predictions_dataset = CustomDataLoader(
data={ "predictions": predictions }
)
if self.c.export_predictions is not None:
self._export_predictions(
predictions=predictions,
predictions_dataset=predictions_dataset,
output_path=self.c.export_predictions
)
self.__l.info(f"Prepared {len(predictions)} prediction requests!")
self.__l.info(f"Prepared prediction dataset!")
models_predictions = { }
for model_name, model in models.items():
self.__l.info(f"Predicting using model {model_name}...")
model_predictions = [ ]
for idx, prediction in enumerate(predictions):
self.__l.info(f"Prediction {idx + 1}/{len(predictions)}")
model_prediction = prediction.clean_copy()
model.predict(prediction=model_prediction, data=predictions_dataset)
model_predictions.append(model_prediction)
self.__l.info(f"\t {idx + 1}/{len(predictions)} Done!")
models_predictions[model_name] = (model, model_predictions)
self.__l.info("\tPrediction processing finished!")
return models_predictions
| 43.983932 | 121 | 0.543763 | 5,077 | 46,535 | 4.723656 | 0.087059 | 0.025269 | 0.007089 | 0.007005 | 0.400092 | 0.307773 | 0.265908 | 0.231882 | 0.214244 | 0.20432 | 0 | 0.005121 | 0.366305 | 46,535 | 1,057 | 122 | 44.025544 | 0.808132 | 0.10014 | 0 | 0.311787 | 0 | 0 | 0.126675 | 0.021593 | 0 | 0 | 0 | 0.000946 | 0 | 1 | 0.032953 | false | 0 | 0.050697 | 0 | 0.120406 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c994363f99eee7490fd431fed675e714864a410 | 2,295 | py | Python | glavred_api.py | vikmary/ArticleHelperBot | aaac531ad1ff8d676416163e4b1c958e6a0bff64 | [
"Apache-2.0"
] | null | null | null | glavred_api.py | vikmary/ArticleHelperBot | aaac531ad1ff8d676416163e4b1c958e6a0bff64 | [
"Apache-2.0"
] | null | null | null | glavred_api.py | vikmary/ArticleHelperBot | aaac531ad1ff8d676416163e4b1c958e6a0bff64 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import math
import requests
class GlavRed:
WORDS = re.compile('([А-Яа-яA-Za-z0-9-]+([^А-Яа-яA-Za-z0-9-]+)?)')
def __init__(self):
self.sess = None
self.url = "https://glvrd.ru/api/v0/"
self.headers = \
{"Content-Type": "application/x-www-form-urlencoded"}
def __enter__(self):
self.sess = requests.Session()
return self
def __exit__(self, *args):
self.sess.close()
def get_score(self, texts, score_type):
texts_and_frams = []
for t in texts:
res = self.proofread(t, score_type)
fragments, hints = res['fragments'][0], res['hints']
for i in range(len(fragments)):
hint_id = fragments[i]['hint']
fragments[i]['hint'] = hints[hint_id]
t_f = {'text': t,
'fragments': fragments}
texts_and_frams.append(t_f)
return self._get_score(texts_and_frams)
def proofread(self, text, score_type):
assert score_type in ('red', 'blue')
resp = self.sess.post('{}@proofread/{}/'.format(self.url,
score_type),
headers=self.headers,
data={'chunks': text})
if resp.status_code != 200:
raise Exception("Status code `{}` received for chunks=`{}`"\
.format(resp.status_code, text))
return resp.json()
def _get_score(self, texts_and_frams):
n, r, t = 0, 0, 0
for t_f in texts_and_frams:
t += self.num_words(t_f['text'])
for f in t_f['fragments']:
if f['hint']['penalty']:
n += f['hint']['penalty']
r += f['hint']['weight'] / 100
if not t:
return 0.
score = math.floor(100. * math.pow(1 - r / t, 3)) - n
#print("n = {}, r = {}, t = {}".format(n, r, t))
score = min(max(score, 0.), 100.)
if score % 10 == 0:
return score / 10
else:
return round(score / 10, 1)
@classmethod
def num_words(cls, text):
return len(cls.WORDS.sub('.', text.strip()))
| 30.197368 | 72 | 0.488889 | 281 | 2,295 | 3.839858 | 0.370107 | 0.041705 | 0.060241 | 0.012975 | 0.055607 | 0.018536 | 0 | 0 | 0 | 0 | 0 | 0.023761 | 0.35817 | 2,295 | 75 | 73 | 30.6 | 0.708758 | 0.039216 | 0 | 0 | 0 | 0.017857 | 0.12 | 0.035 | 0 | 0 | 0 | 0 | 0.017857 | 1 | 0.125 | false | 0 | 0.053571 | 0.017857 | 0.339286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c99f5d5be16c305a88c9fd25acdc419446a3f26 | 2,003 | py | Python | topics/Tree/Sum_of_Root_To_Leaf_Binary_Numbers_1022/[Iterative_DFS_stack_with_tuple]_Sum_of_Root_To_Leaf_Binary_Numbers_1022.py | DmitryNaimark/leetcode-solutions-python | 16af5f3a9cb8469d82b14c8953847f0e93a92324 | [
"MIT"
] | 1 | 2019-10-31T11:06:23.000Z | 2019-10-31T11:06:23.000Z | topics/Tree/Sum_of_Root_To_Leaf_Binary_Numbers_1022/[Iterative_DFS_stack_with_tuple]_Sum_of_Root_To_Leaf_Binary_Numbers_1022.py | DmitryNaimark/leetcode-solutions-python | 16af5f3a9cb8469d82b14c8953847f0e93a92324 | [
"MIT"
] | null | null | null | topics/Tree/Sum_of_Root_To_Leaf_Binary_Numbers_1022/[Iterative_DFS_stack_with_tuple]_Sum_of_Root_To_Leaf_Binary_Numbers_1022.py | DmitryNaimark/leetcode-solutions-python | 16af5f3a9cb8469d82b14c8953847f0e93a92324 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/sum-of-root-to-leaf-binary-numbers/
# ---------------------------------------------------
class TreeNode:
def __init__(self, val):
self.val = val
self.left = self.right = None
# Runtime Complexity: O(N)
# Space Complexity: O(max_depth), which is O(N) in worst case(skewed Tree) or O(max_depth) in case of balanced tree.
class Solution:
def sumRootToLeaf(self, node: TreeNode) -> int:
if not node:
return 0
stack = deque()
stack.append((node, node.val))
total_sum = 0
while stack:
node, cur_sum = stack.pop()
if not node.left and not node.right:
total_sum += cur_sum
if node.left:
stack.append((node.left, cur_sum * 2 + node.left.val))
if node.right:
stack.append((node.right, cur_sum * 2 + node.right.val))
return total_sum
# ---------------------------------------------------
# Uses DN functions:
# ---------------------------------------------------
from collections import deque
def createBinaryTreeFromArray(arr):
if arr is None or len(arr) == 0:
return None
root_node = TreeNode(arr[0])
q = deque()
q.append(root_node)
i = 1
while q and i < len(arr):
node = q.popleft()
if node:
if arr[i] is not None:
node.left = TreeNode(arr[i])
q.append(node.left)
i += 1
if i < len(arr) and arr[i] is not None:
node.right = TreeNode(arr[i])
q.append(node.right)
i += 1
return root_node
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
print(solution.sumRootToLeaf(createBinaryTreeFromArray([1, 0, 1, 0, 1, 0, 1])))
print(solution.sumRootToLeaf(createBinaryTreeFromArray([1, None, 0])))
| 27.067568 | 116 | 0.481777 | 230 | 2,003 | 4.126087 | 0.3 | 0.05058 | 0.047418 | 0.023182 | 0.200211 | 0.084299 | 0 | 0 | 0 | 0 | 0 | 0.012703 | 0.292561 | 2,003 | 73 | 117 | 27.438356 | 0.657022 | 0.264104 | 0 | 0.046512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.023256 | 0 | 0.232558 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c9bd139a54c7ce6d1cd42e45754183bd0d46282 | 19,330 | py | Python | test_roam.py | jmurty/roam | e8f89619f7d809c17316891ce3259f1e5210a44c | [
"Apache-2.0"
] | 3 | 2019-06-03T13:49:21.000Z | 2021-11-24T06:39:37.000Z | test_roam.py | jmurty/roam | e8f89619f7d809c17316891ce3259f1e5210a44c | [
"Apache-2.0"
] | 1 | 2019-06-18T01:26:08.000Z | 2019-06-18T01:26:08.000Z | test_roam.py | jmurty/roam | e8f89619f7d809c17316891ce3259f1e5210a44c | [
"Apache-2.0"
] | null | null | null | import pytest
from roam import r, r_strict, MISSING, Roamer, RoamPathException
class DataTester:
""" Class to convert dict data to object with attributes """
def __init__(self, **kwargs):
self.kwargs = kwargs
for n, v in kwargs.items():
if n.startswith("_"):
n = n[1:]
setattr(self, n, v)
def __dir__(self):
return self.kwargs.keys()
@property
def as_dict(self):
return self.kwargs
# Amended subset of GitHub user data for 'jmurty' from
# https://api.github.com/users/jmurty/repos
github_data = [
{
"name": "java-xmlbuilder",
"full_name": "jmurty/java-xmlbuilder",
"private": False,
"owner": {
"login": "jmurty",
"url": "https://api.github.com/users/jmurty",
"type": "User",
"fn": lambda message: message, # Added to test in-data callable
},
"description": "XML Builder is a utility that allows simple XML documents to be constructed using relatively sparse Java code",
"fork": False,
"url": "https://api.github.com/repos/jmurty/java-xmlbuilder",
"created_at": "2014-03-05T22:48:04Z",
"updated_at": "2019-04-29T14:03:24Z",
"pushed_at": "2017-09-01T13:08:26Z",
"homepage": None,
"size": 164,
"language": "Java",
"archived": False,
"disabled": False,
"open_issues_count": 0,
"license": {
"key": "apache-2.0",
"name": "Apache License 2.0",
"spdx_id": "Apache-2.0",
"url": "https://api.github.com/licenses/apache-2.0",
},
"forks": 15,
"open_issues": 0,
"watchers": 85,
"default_branch": "master",
},
{
"name": "xml4h",
"full_name": "jmurty/xml4h",
"private": False,
"owner": {
"login": "jmurty",
"url": "https://api.github.com/users/jmurty",
"type": "User",
},
"description": "XML for Humans in Python",
"fork": False,
"url": "https://api.github.com/repos/jmurty/xml4h",
"created_at": "2012-08-14T13:49:43Z",
"updated_at": "2018-09-21T16:15:44Z",
"pushed_at": "2015-07-13T15:07:28Z",
"homepage": "xml4h.readthedocs.org",
"size": 663,
"language": "Python",
"archived": False,
"disabled": False,
"open_issues_count": 6,
"license": {
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit",
},
"forks": 2,
"open_issues": 6,
"watchers": 37,
"default_branch": "master",
},
]
github_data0 = github_data[0]
# Select data from https://en.wikipedia.org/wiki/Monty_Python_filmography
python_filmography = [
DataTester(
title="Monty Python's Flying Circus",
type="tv",
years=DataTester(_from=1975, to=1975),
writers=[
DataTester(name="Monty Python", group=True),
DataTester(name="Neil Innes"),
DataTester(name="Douglas Adams"),
],
),
DataTester(
title="Monty Python and the Holy Grail",
type="movie",
years=DataTester(_from=1969, to=1974),
writers=[DataTester(name="Monty Python", group=True)],
),
]
class TestRoamer:
def test_missing_has_rich_falsey_behaviour(self):
assert not MISSING
assert not MISSING
assert len(MISSING) == 0
for _ in MISSING:
pytest.fail("Shouldn't be able to iterate over MISSING")
def test_getattr_traversal(self):
assert r(github_data0).license == github_data0["license"]
assert r(github_data0).license.name == github_data0["license"]["name"]
assert r(github_data0).license.name # Truthy
def test_attr_traversal_missing(self):
assert r(github_data0).x == MISSING
assert r(github_data0).license.x == MISSING
assert not r(github_data0).license.x # Falsey
assert not r(github_data0).license.x.y # Falsey
# Confirm the underlying item is the MISSING singleton
assert r(github_data0).license.x() is MISSING
assert r(github_data0).license.x() == MISSING
assert not r(github_data0).license.x() # Falsey
def test_getitem_traversal(self):
assert r(github_data0)["license"] == github_data0["license"]
assert r(github_data0)["license"]["name"] == github_data0["license"]["name"]
assert r(github_data0).license.name[0] == github_data0["license"]["name"][0]
assert r(github_data0).license.name[-1] == github_data0["license"]["name"][-1]
assert r(github_data0).license.name[-1] # Truthy
def test_getitem_traversal_missing(self):
assert r(github_data0)["x"] == MISSING
assert r(github_data0)["license"]["x"] == MISSING
assert r(github_data0)["license"]["name"]["x"] == MISSING
assert not r(github_data0)["license"]["name"]["x"] # Falsey
assert not r(github_data0)["license"]["name"]["x"]["y"] # Falsey
# Confirm the underlying item is the MISSING singleton
assert r(github_data0)["license"]["name"]["x"]() is MISSING
assert r(github_data0)["license"]["name"]["x"]() == MISSING
assert not r(github_data0)["license"]["name"]["x"]() # Falsey
def test_getattr_and_getitem_traversal(self):
assert r(github_data0).license["name"] == github_data0["license"]["name"]
assert r(github_data0)["license"].name == github_data0["license"]["name"]
assert r(github_data0)["license"].name[1] == github_data0["license"]["name"][1]
def test_getattr_and_getitem_traversal_missing(self):
assert r(github_data0).license["x"] == MISSING
assert r(github_data0)["license"].x == MISSING
# Confirm the underlying item is the MISSING singleton
assert r(github_data0).license["x"]() is MISSING
assert r(github_data0)["license"].x() is MISSING
def test_fail_fast(self):
with pytest.raises(RoamPathException) as ex:
r(github_data0, _raise=True).x
assert (
str(ex.value)
== "<RoamPathException: missing step 1 .x for path <dict>.x at <dict>"
" with keys ['name', 'full_name', 'private', 'owner', 'description', 'fork',"
" 'url', 'created_at', 'updated_at', 'pushed_at', 'homepage', 'size', 'language',"
" 'archived', 'disabled', 'open_issues_count', 'license', 'forks', 'open_issues',"
" 'watchers', 'default_branch']>"
)
with pytest.raises(RoamPathException) as ex:
r(github_data0, _raise=True).license["x"]
assert (
str(ex.value)
== "<RoamPathException: missing step 2 ['x'] for path <dict>.license['x'] at <dict>"
" with keys ['key', 'name', 'spdx_id', 'url']>"
)
with pytest.raises(RoamPathException) as ex:
r(github_data0, _raise=True)["license"].name.x
assert (
str(ex.value)
== "<RoamPathException: missing step 3 .x for path <dict>['license'].name.x at <str>>"
)
def test_slice_traversal(self):
assert r(github_data)[:] == github_data[:]
assert r(github_data)[1:] == github_data[1:]
def test_slice_traversal_missing(self):
assert len(github_data) == 2
assert r(github_data)[3] == MISSING
assert r(github_data)[3]() is MISSING
assert r(github_data)[3:] == []
assert r(github_data)[2:4] == github_data[2:4]
def test_call_returns_item(self):
assert r(github_data0).license.name() is github_data0["license"]["name"]
assert r(github_data0)["license"]["name"]() is github_data0["license"]["name"]
def test_call_returns_item_missing(self):
assert r(github_data0).x() == MISSING
assert r(github_data0)["x"]() is MISSING
assert r(github_data0).license.x() == MISSING
assert r(github_data0)["name"].x() is MISSING
def test_call_raise_on_item_missing(self):
with pytest.raises(RoamPathException) as ex:
r(github_data0).x(_raise=True)
assert (
str(ex.value)
== "<RoamPathException: missing step 1 .x for path <dict>.x at <dict>"
" with keys ['name', 'full_name', 'private', 'owner', 'description', 'fork',"
" 'url', 'created_at', 'updated_at', 'pushed_at', 'homepage', 'size', 'language',"
" 'archived', 'disabled', 'open_issues_count', 'license', 'forks', 'open_issues',"
" 'watchers', 'default_branch']>"
)
with pytest.raises(RoamPathException) as ex:
r(github_data0).license["name"].x(_raise=True)
assert (
str(ex.value)
== "<RoamPathException: missing step 3 .x for path <dict>.license['name'].x at <str>>"
)
with pytest.raises(RoamPathException) as ex:
r(github_data0, _raise=True).license["name"].x()
assert (
str(ex.value)
== "<RoamPathException: missing step 3 .x for path <dict>.license['name'].x at <str>>"
)
# Test standard exception raised if user tries to call uncallable data
with pytest.raises(TypeError) as ex:
r(github_data0, _raise=True).license(1, 2, 3)
assert str(ex.value) == "'dict' object is not callable"
with pytest.raises(TypeError) as ex:
r(github_data0, _raise=True).description(bad="argument")
assert str(ex.value) == "'str' object is not callable"
def test_call_delegates_to_and_returns_item(self):
# Delegate to methods on `dict` item
assert r(github_data0)["license"]["items"]() == github_data0["license"].items()
assert r(github_data0).license.keys() == github_data0["license"].keys()
assert list(r(github_data0).license.values()) == list(
github_data0["license"].values()
)
# Delegate to methods on `str` item
assert r(github_data0).license.url.split("/") == [
"https:",
"",
"api.github.com",
"licenses",
"apache-2.0",
]
assert type(r(github_data0).license.url.split("/")) is list
# Delegate to callable item within traversal
assert r(github_data0).owner.fn("Hi") == "Hi"
assert r(github_data0).owner.fn(999) == 999
def test_call_with_roam_option(self):
assert isinstance(r(github_data0).license.items(_roam=True), Roamer)
assert (
r(github_data0).license.items(_roam=True) == github_data0["license"].items()
)
def test_call_on_missing_with_roam_option(self):
assert isinstance(r(github_data0).x.items(_roam=True), Roamer)
assert r(github_data0).x.items(_roam=True) == MISSING
def test_call_with_invoke_option(self):
assert r(github_data0).owner.login(_invoke=len) == 6
def test_call_with_invoke_and_roam_options(self):
assert isinstance(r(github_data0).owner(_invoke=len, _roam=True), Roamer)
assert r(github_data0).owner(_invoke=len, _roam=True) == 4
def test_iterator_traversal(self):
for i, item_roamer in enumerate(r(github_data)):
assert isinstance(item_roamer, Roamer)
assert item_roamer.name == github_data[i]["name"]
assert [
owner_fn("Hello world!") for owner_fn in r(github_data)[:].owner.fn
] == ["Hello world!"]
assert [
owner.fn("Hello world!") for owner in r(github_data)[:].owner if owner.fn
] == ["Hello world!"]
assert [
writer.name()
for writer in r(python_filmography)[:].writers
if not writer.group
] == ["Neil Innes", "Douglas Adams"]
# Trying to iterating over non-iterable
for _ in r(github_data0).size:
pytest.fail("Shouldn't be able to iterate over int")
for _ in r(github_data0).fork:
pytest.fail("Shouldn't be able to iterate over bool")
def test_iterator_traversal_missing(self):
for _ in r(github_data0).x:
pytest.fail("Shouldn't be able to iterate over MISSING")
for _ in r(github_data0).license.name.x:
pytest.fail("Shouldn't be able to iterate over MISSING")
with pytest.raises(RoamPathException) as ex:
r_strict(github_data0).license.name.x
assert (
str(ex.value)
== "<RoamPathException: missing step 3 .x for path <dict>.license.name.x at <str>>"
)
def test_nested_iterable_traversal(self):
assert r(github_data)[:]["owner"]["login"] == ("jmurty", "jmurty")
assert r(python_filmography)[:]["title"] == (
"Monty Python's Flying Circus",
"Monty Python and the Holy Grail",
)
assert r(python_filmography)[:]["writers"]["name"] == (
"Monty Python",
"Neil Innes",
"Douglas Adams",
"Monty Python",
)
assert r(python_filmography)[:].writers.name == (
"Monty Python",
"Neil Innes",
"Douglas Adams",
"Monty Python",
)
# Check slices work within multi-item processing
assert (
r(python_filmography)[:].writers.name[:]()
== r(python_filmography)[:].writers.name()
)
assert (
r(python_filmography)[:]["writers"]["name"][:]()
== r(python_filmography)[:]["writers"]["name"]()
)
assert r(python_filmography)[:].writers.name[1:-1] == (
"Neil Innes",
"Douglas Adams",
)
# Check integer lookpus work within multi-item processing
assert r(python_filmography)[:]["writers"][1]["name"] == "Neil Innes"
assert r(python_filmography)[:].writers[1].name == "Neil Innes"
def test_nested_iterable_traversal_missing(self):
# Referencing missing attr/keys results in an empty list
assert r(python_filmography)[:].x == tuple()
assert r(python_filmography)[:]["x"] == tuple()
assert r(python_filmography)[:].title.x == tuple()
assert r(python_filmography)[:]["title"]["x"] == tuple()
# Referencing *sometimes* missing attr/keys results in partial list
assert len(r(python_filmography)[:].writers) == 4
assert r(python_filmography)[:].writers.group == (True, True)
assert r(python_filmography)[:]["writers"]["group"] == (True, True)
# Lookup missing n-th item in a nested collection
assert r(python_filmography)[:].writers.group[2] == MISSING
with pytest.raises(RoamPathException) as ex:
r(python_filmography)[:].writers.group[2](_raise=True)
assert (
str(ex.value)
== "<RoamPathException: missing step 4 [2] for path <list>[:].writers.group[2] at <tuple> with length 2>"
)
def test_roamer_equality(self):
assert r(python_filmography)[:].writers == r(python_filmography)[:].writers
def test_roamer_len(self):
# Standard length lookup of list
assert len(r(python_filmography)) == 2
# Report 1 for current item that doesn't actually support `len()` instead of `TypeError`
assert len(r(python_filmography)[0]) == 1
with pytest.raises(TypeError):
len(python_filmography[0])
# Report zero from MISSING item
x = r(python_filmography).x
assert len(x) == 0
assert x == MISSING
def test_path_reporting(self):
assert (
str(r(github_data0).license.name)
== "<Roamer: <dict>.license.name => 'Apache License 2.0'>"
)
assert (
str(r(github_data0)["license"]["name"])
== "<Roamer: <dict>['license']['name'] => 'Apache License 2.0'>"
)
assert (
str(r(github_data0)["license"].name)
== "<Roamer: <dict>['license'].name => 'Apache License 2.0'>"
)
assert (
str(r(python_filmography)[:].writers[2]["as_dict"])
== "<Roamer: <list>[:].writers[2]['as_dict'] => {'name': 'Douglas Adams'}>"
)
assert (
str(r(python_filmography)[:].writers[2]["name"])
== "<Roamer: <list>[:].writers[2]['name'] => 'Douglas Adams'>"
)
assert (
str(r(python_filmography)[:].writers[3]["name"])
== "<Roamer: <list>[:].writers[3]['name'] => 'Monty Python'>"
)
assert (
str(r(python_filmography)[:].writers[2]["age"])
== "<Roamer: missing step 4 ['age'] for path <list>[:].writers[2]['age'] at <DataTester> with attrs [name] => <MISSING>>"
)
assert (
str(r(github_data0).license["x"])
== "<Roamer: missing step 2 ['x'] for path <dict>.license['x'] at <dict>"
" with keys ['key', 'name', 'spdx_id', 'url'] => <MISSING>>"
)
assert (
str(r(python_filmography)[0].writers.name)
== "<Roamer: missing step 3 .name for path <list>[0].writers.name at <list> => <MISSING>>"
)
def test_path_survives_roamer_reuse(self):
roamer = r(github_data)
assert (
str(roamer[0].license.name)
== "<Roamer: <list>[0].license.name => 'Apache License 2.0'>"
)
assert (
str(roamer[1]["license"].name)
== "<Roamer: <list>[1]['license'].name => 'MIT License'>"
)
with pytest.raises(RoamPathException) as ex:
roamer[1].license.x(_raise=True)
assert (
str(ex.value)
== "<RoamPathException: missing step 3 .x for path <list>[1].license.x at <dict> with keys ['key', 'name', 'spdx_id', 'url']>"
)
with pytest.raises(RoamPathException) as ex:
roamer[1].license["name"].x(_raise=True)
assert (
str(ex.value)
== "<RoamPathException: missing step 4 .x for path <list>[1].license['name'].x at <str>>"
)
def test_roamer_clone_via_init(self):
r_license = r(github_data)[0].license
assert r_license == {
"key": "apache-2.0",
"name": "Apache License 2.0",
"spdx_id": "Apache-2.0",
"url": "https://api.github.com/licenses/apache-2.0",
}
# Ensure clone is identical in all ways that matter
r_clone = r(r_license)
assert r_clone == r_license
assert r_clone._r_item_ == r_license._r_item_
assert r_clone._r_is_multi_item_ == r_license._r_is_multi_item_
assert r_clone._r_raise_ == r_license._r_raise_ is False
# Can override raise status in clone
r_clone = r(r_license, _raise=True)
assert r_clone._r_raise_ is True
with pytest.raises(RoamPathException) as ex:
r_clone.wrong
assert (
str(ex.value)
== "<RoamPathException: missing step 3 .wrong for path <list>[0].license.wrong"
" at <dict> with keys ['key', 'name', 'spdx_id', 'url']>"
)
| 35.796296 | 138 | 0.573047 | 2,304 | 19,330 | 4.652344 | 0.124132 | 0.085176 | 0.073887 | 0.077992 | 0.671984 | 0.613677 | 0.546786 | 0.505644 | 0.449576 | 0.398545 | 0 | 0.022232 | 0.276306 | 19,330 | 539 | 139 | 35.862709 | 0.744013 | 0.061614 | 0 | 0.279805 | 0 | 0.03163 | 0.256145 | 0.024416 | 0 | 0 | 0 | 0 | 0.284672 | 1 | 0.072993 | false | 0 | 0.004866 | 0.004866 | 0.087591 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c9e65fad55451aba8c3a7aea31c705cc85f2e0b | 1,109 | py | Python | book/src/ch05/src/decorator_parametrized_2.py | zangyuchen2008/Clean-Code-in-Python-Second-Edition | 0be2e41f6cf7322e12ec55d76135ff398df61b4a | [
"MIT"
] | 133 | 2016-07-22T15:16:16.000Z | 2022-03-29T22:39:40.000Z | book/src/ch05/src/decorator_parametrized_2.py | zangyuchen2008/Clean-Code-in-Python-Second-Edition | 0be2e41f6cf7322e12ec55d76135ff398df61b4a | [
"MIT"
] | 137 | 2021-01-05T11:21:04.000Z | 2022-03-31T11:10:11.000Z | book/src/ch05/src/decorator_parametrized_2.py | zangyuchen2008/Clean-Code-in-Python-Second-Edition | 0be2e41f6cf7322e12ec55d76135ff398df61b4a | [
"MIT"
] | 41 | 2020-12-29T04:46:14.000Z | 2022-03-20T22:36:17.000Z | """Clean Code in Python - Chapter 5: Decorators
Parametrized decorators using callable objects.
"""
from functools import wraps
from typing import Optional, Sequence
from decorator_function_1 import ControlledException
from log import logger
_DEFAULT_RETRIES_LIMIT = 3
class WithRetry:
def __init__(
self,
retries_limit: int = _DEFAULT_RETRIES_LIMIT,
allowed_exceptions: Optional[Sequence[Exception]] = None,
) -> None:
self.retries_limit = retries_limit
self.allowed_exceptions = allowed_exceptions or (ControlledException,)
def __call__(self, operation):
@wraps(operation)
def wrapped(*args, **kwargs):
last_raised = None
for _ in range(self.retries_limit):
try:
return operation(*args, **kwargs)
except self.allowed_exceptions as e:
logger.warning(
"retrying %s due to %s", operation.__qualname__, e
)
last_raised = e
raise last_raised
return wrapped
| 29.184211 | 78 | 0.616772 | 113 | 1,109 | 5.769912 | 0.530973 | 0.110429 | 0.07362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003958 | 0.316501 | 1,109 | 37 | 79 | 29.972973 | 0.856201 | 0.083859 | 0 | 0 | 0 | 0 | 0.020813 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.148148 | 0 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c9f57720a9347d49afb7fc2b6e4170c9532b4ee | 6,656 | py | Python | iidada/segment.py | veritas501/iidada | da26a8375ddf092d04b77a8cb6a33b24afc2b5c5 | [
"MIT"
] | 28 | 2022-01-11T12:43:25.000Z | 2022-01-31T09:30:57.000Z | iidada/segment.py | veritas501/iidada | da26a8375ddf092d04b77a8cb6a33b24afc2b5c5 | [
"MIT"
] | null | null | null | iidada/segment.py | veritas501/iidada | da26a8375ddf092d04b77a8cb6a33b24afc2b5c5 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
"""
Dump or load segment info
"""
import copy
import ida_auto
import ida_bytes
import ida_idp
import ida_segment
import ida_segregs
import iidada.log
log = iidada.log.get_logger("segment")
def get_segm_ori_name(seg):
name = ida_segment.get_segm_name(seg)
name_list = name.split(':')
if len(name_list) == 1:
return name
else:
return ':'.join(name_list[1:])
def get_segments_by_name(name):
segments = []
seg_cnt = ida_segment.get_segm_qty()
for i in range(seg_cnt):
seg = ida_segment.getnseg(i)
if get_segm_ori_name(seg) == name:
ida_segment.lock_segm(seg, True)
segments.append(seg)
return segments
def lock_all_segments():
seg_cnt = ida_segment.get_segm_qty()
for i in range(seg_cnt):
seg = ida_segment.getnseg(i)
if not ida_segment.is_segm_locked(seg):
ida_segment.lock_segm(seg, True)
def get_max_selector_id():
return max([
ida_segment.getn_selector(i)[1] for i in # type like: [True, 23, 0]
range(ida_segment.get_selector_qty())
])
def dump_sreg(seg):
first_sreg_id = ida_idp.ph_get_reg_first_sreg()
last_sreg_id = ida_idp.ph_get_reg_last_sreg()
sreg_width = ida_idp.ph_get_segreg_size()
sreg_data = dict()
for sreg_id in range(first_sreg_id, last_sreg_id + 1):
sreg_name = ida_idp.get_reg_name(sreg_id, sreg_width)
sreg_val = ida_segregs.get_sreg(seg.start_ea, sreg_id)
sreg_data[sreg_name] = sreg_val
return sreg_data
class IDASegInfo:
def __init__(self, **kwargs) -> None:
self.seg_name = None
self.seg_class = None
self.seg_start_ea = 0
self.seg_end_ea = 0
self.seg_align = 0
self.seg_perm = 0
self.seg_bits = 0
self.seg_comb = 0
self.seg_flags = 0
self.seg_para = 0
self.seg_type = 0
self.seg_sreg = None
self._seg_data = None
if val := kwargs.get('seg_name'):
self.seg_name = val
if val := kwargs.get('seg_class'):
self.seg_class = val
if val := kwargs.get('seg_start_ea'):
self.seg_start_ea = val
if val := kwargs.get('seg_end_ea'):
self.seg_end_ea = val
if val := kwargs.get('seg_align'):
self.seg_align = val
if val := kwargs.get('seg_perm'):
self.seg_perm = val
if val := kwargs.get('seg_bits'):
self.seg_bits = val
if val := kwargs.get('seg_comb'):
self.seg_comb = val
if val := kwargs.get('seg_flags'):
self.seg_flags = val
if val := kwargs.get('seg_para'):
self.seg_para = val
if val := kwargs.get('seg_type'):
self.seg_type = val
if val := kwargs.get('seg_sreg'):
self.seg_sreg = copy.copy(val)
@property
def seg_data(self):
return self._seg_data
@seg_data.setter
def seg_data(self, data: bytes):
self._seg_data = copy.copy(data)
def dump_segments():
seg_cnt = ida_segment.get_segm_qty()
if not seg_cnt:
print("[-] no segment found")
return None
lock_all_segments()
segment_infos = []
for i in range(seg_cnt):
seg = ida_segment.getnseg(i)
seg_name = ida_segment.get_segm_name(seg)
seg_class = ida_segment.get_segm_class(seg)
seg_start_ea = seg.start_ea
seg_end_ea = seg.end_ea
seg_align = seg.align
seg_perm = seg.perm
seg_bits = seg.bitness
seg_comb = seg.comb
seg_flags = seg.flags
seg_para = ida_segment.get_segm_para(seg)
seg_type = seg.type
seg_sreg = dump_sreg(seg)
seg_size = seg_end_ea - seg_start_ea
if seg_size > 0:
seg_first_bit_loaded = ida_bytes.is_loaded(seg_start_ea)
else:
seg_first_bit_loaded = False
if seg_first_bit_loaded:
seg_data = ida_bytes.get_bytes(seg_start_ea, seg_size)
else:
seg_data = None
ida_segment_info = IDASegInfo(
seg_name=seg_name,
seg_class=seg_class,
seg_start_ea=seg_start_ea,
seg_end_ea=seg_end_ea,
seg_align=seg_align,
seg_perm=seg_perm,
seg_bits=seg_bits,
seg_comb=seg_comb,
seg_flags=seg_flags,
seg_para=seg_para,
seg_type=seg_type,
seg_sreg=seg_sreg,
)
ida_segment_info.seg_data = seg_data
segment_infos.append(ida_segment_info)
return segment_infos
# noinspection PyPropertyAccess
def load_segments(segment_infos, root_filename):
ida_auto.auto_wait()
max_sel_id = get_max_selector_id()
next_sel_id = max_sel_id + 1
for segment_info in segment_infos:
log.debug('Try to add segment `{}` @ [{}, {}]'.format(
root_filename + ':' + segment_info.seg_name,
hex(segment_info.seg_start_ea),
hex(segment_info.seg_end_ea),
))
# recover selector, and give it a new id
sel_id = next_sel_id
next_sel_id += 1
ida_segment.set_selector(
sel_id, segment_info.seg_para)
ida_seg = ida_segment.segment_t()
ida_seg.start_ea = segment_info.seg_start_ea
ida_seg.end_ea = segment_info.seg_end_ea
ida_seg.sel = sel_id
ida_seg.align = segment_info.seg_align
ida_seg.perm = segment_info.seg_perm
ida_seg.comb = segment_info.seg_comb
ida_seg.bitness = segment_info.seg_bits
ida_seg.flags = segment_info.seg_flags
ida_seg.type = segment_info.seg_type
ans = ida_segment.add_segm_ex(
ida_seg,
root_filename + ':' + segment_info.seg_name,
segment_info.seg_class,
ida_segment.ADDSEG_NOSREG
)
if not ans:
log.error('Add segment `{}` @ [{}, {}] failed'.format(
root_filename + ':' + segment_info.seg_name,
hex(segment_info.seg_start_ea),
hex(segment_info.seg_end_ea),
))
return False
# recover sreg
for sreg_name, sreg_val in segment_info.seg_sreg.items():
ida_segregs.set_default_sreg_value(
ida_seg,
ida_idp.str2reg(sreg_name),
sreg_val
)
# recover segment bytes
if segment_info.seg_data:
ida_bytes.put_bytes(
segment_info.seg_start_ea,
segment_info.seg_data
)
return True
| 29.321586 | 76 | 0.599309 | 938 | 6,656 | 3.873134 | 0.13113 | 0.052023 | 0.084778 | 0.046243 | 0.33526 | 0.314066 | 0.220479 | 0.197358 | 0.158547 | 0.158547 | 0 | 0.004567 | 0.309195 | 6,656 | 226 | 77 | 29.451327 | 0.785559 | 0.026442 | 0 | 0.134409 | 0 | 0 | 0.031699 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053763 | false | 0 | 0.037634 | 0.010753 | 0.150538 | 0.005376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ca05b4141ce52b954fafe094e39c727b4d1ade0 | 6,066 | py | Python | app.py | longclothe/python-docs-hello-world | 91e259454103a8536a7c6aadaf88368ddf6970a9 | [
"MIT"
] | null | null | null | app.py | longclothe/python-docs-hello-world | 91e259454103a8536a7c6aadaf88368ddf6970a9 | [
"MIT"
] | null | null | null | app.py | longclothe/python-docs-hello-world | 91e259454103a8536a7c6aadaf88368ddf6970a9 | [
"MIT"
] | 1 | 2021-06-21T05:58:36.000Z | 2021-06-21T05:58:36.000Z | # from flask import Flask
# app = Flask(__name__)
# @app.route("/")
# def hello():
# return "Hello, yangfan chen!"
# coding:utf-8
from flask import Flask,render_template,request,redirect,url_for
from numpy.core.numeric import NaN
from werkzeug.utils import escape, secure_filename
import os
import pandas as pd
app = Flask(__name__)
@app.route('/')
def first():
df = pd.read_csv('static/uploads/names.csv')
df.Picture = "<img src = \"static/uploads/"+df['Picture']+"\"height=\"100\" width = \"100\"/>"
df.insert(df.shape[1],"Change",["" for i in range(df.shape[0])])
for i in range(df.shape[0]):
df["Change"][i]="<a href=\"/change_grade_picture?name="+"\'"+df['Name'][i]+"\'"+"\"><button>Click it !</button></a>"
return "<font size=\"6\" color=\"red\">This is 1001889074 Yangfan Chen</font>"+"<br>"+df.to_html(escape=False)
# return render_template('index.html')
@app.route('/upload', methods=['POST', 'GET'])
def upload():
if request.method == 'POST':
f = request.files['select']
basepath = os.path.dirname(__file__)
upload_path = os.path.join(basepath,'static/uploads',secure_filename(f.filename))
f.save(upload_path)
return redirect(url_for('upload'))
names = str(os.listdir('static/uploads'))
return render_template('upload.html')+"UPLOADS:"+names
@app.route("/table")
def table():
data = pd.read_csv("static/uploads/names.csv")
return data.to_html()
@app.route('/search')
def search():
# key = request.args.get('wd')
# print(key)
return "<font size=\"6\" color=\"red\">This is 1001889074 Yangfan Chen</font>"+"<br>"+render_template('search.html')
@app.route('/sp')
def get_result():
key = request.args.get('wd')
data = pd.read_csv("static/uploads/names.csv")
# data = data.loc[data['State']==key]
data = data[data.Caption.contains(key)]
data.Picture = data['Picture']+"<img src = \"static/uploads/"+data['Picture']+"\"height=\"100\" width = \"100\"/>"
return data.to_html(escape=False)
# return get_html(key)
@app.route('/search_grade')
def search_grade():
# key = request.args.get('wd')
# print(key)
return "1001889074 Yangfan Chen"+"<br>"+render_template('between.html')
@app.route('/bp')
def get_grade():
low = request.args.get('low')
high =request.args.get('high')
data = pd.read_csv("static/uploads/names.csv")
print(data['Grade'])
data = data[(data['Grade']>low)]
data = data[(data['Grade']<high)]
return data.to_html()
# @app.route('/auto_test_case')
# def auto_test_case():
# form = forms.SearchForm()
# return render_template('auto_test_case.html', cases=auto_test_case_objs,form=form)
@app.route('/change_grade_picture',methods=['POST', 'GET'])
def change_grade_picture():
name=request.args.get('name').strip('\'')
# print(name)
if request.method == 'POST':
new_points = request.form.get('points')
new_picture = request.form.get('pic_name')
df = pd.read_csv("static/uploads/names.csv")
df.Grade[df['Name']==name] = new_points
df.Picture[df['Name']==name] = new_picture
df.to_csv("static/uploads/names.csv",index=False)
# df = pd.read_csv("static/uploads/names.csv",index_col=False)
return redirect(url_for('first'))
return render_template('change_grade_picture.html')
@app.route('/search_picture')
def search_picture():
return "<font size=\"6\" color=\"red\">This is 1001889074 Yangfan Chen</font>"+"<br>"+render_template('search_picture.html')
@app.route('/picture_result')
def get_picture_result():
key = request.args.get('name')
data = pd.read_csv("static/uploads/names.csv")
data = data.loc[data['Name']==key]
# data.insert(data.shape[1],"Picture_Name",["" for i in range(data.shape[0])])
# for i in range(data.shape[0]):
# data["Picture_Name"][i]=data['Picture'][i]
data.Picture = data['Picture']+"<img src = \"static/uploads/"+data['Picture']+"\"height=\"100\" width = \"100\"/>"
# data.insert(data.shape[1],"Picture_Name",["" for i in range(data.shape[0])])
# for i in range(data.shape[0]):
# if data["Picture"][i]== NaN:
# data["Picture_Name"][i]=NaN
# else:
# data["Picture_Name"][i]=data['Picture'][i]
return "<font size=\"6\" color=\"red\">This is 1001889074 Yangfan Chen</font>"+"<br>"+data.to_html(escape=False)
@app.route('/search_room')
def search_room():
return "<font size=\"6\" color=\"red\">This is 1001889074 Yangfan Chen</font>"+"<br>"+render_template('search_room.html')
@app.route('/room_result')
def get_room_result():
key = request.args.get('room')
data = pd.read_csv("static/uploads/names.csv")
# data = data.loc[data['Room']==int(key)]
data.Picture = "<img src = \"static/uploads/"+data['Picture']+"\"height=\"100\" width = \"100\"/>"
data.insert(data.shape[1],"Change",["" for i in range(data.shape[0])])
for i in range(data.shape[0]):
data["Change"][i]="<a href=\"/change_room_number?name="+"\'"+data['Name'][i]+"\'"+"\"><button>Change Room Number !</button></a>"
data = data.loc[data['Room']==int(key)]
return "<font size=\"6\" color=\"red\">This is 1001889074 Yangfan Chen</font>"+"<br>"+data.to_html(escape=False)
@app.route('/change_room_number',methods=['POST', 'GET'])
def change_room_number():
name=request.args.get('name').strip('\'')
# print(name)
if request.method == 'POST':
new_number = request.form.get('new_number')
# new_picture = request.form.get('pic_name')
data = pd.read_csv("static/uploads/names.csv")
data = data.loc[data['Name']==name]
data.Room[data['Name']==name] = new_number
data.to_csv("static/uploads/names.csv",index=False)
# df = pd.read_csv("static/uploads/names.csv",index_col=False)
return redirect(url_for('search_room'))
return render_template('change_room_number.html')
if __name__ == '__main__':
app.run(debug=True) | 32.967391 | 136 | 0.629904 | 849 | 6,066 | 4.363958 | 0.150766 | 0.063158 | 0.051822 | 0.068016 | 0.577598 | 0.509312 | 0.483941 | 0.430499 | 0.396761 | 0.378408 | 0 | 0.022244 | 0.162545 | 6,066 | 184 | 137 | 32.967391 | 0.707087 | 0.179031 | 0 | 0.173469 | 0 | 0.05102 | 0.292037 | 0.062449 | 0.020408 | 0 | 0 | 0 | 0 | 1 | 0.132653 | false | 0 | 0.05102 | 0.040816 | 0.346939 | 0.010204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ca4302920331c1946e40857f0640d6f5c170ad2 | 3,596 | py | Python | DisasterResponse/flask_app/data/process_data.py | marcelokscunha/projects | 9c6ff8d9b21bb11d23ec65f6750357ec88d8b3cd | [
"MIT"
] | null | null | null | DisasterResponse/flask_app/data/process_data.py | marcelokscunha/projects | 9c6ff8d9b21bb11d23ec65f6750357ec88d8b3cd | [
"MIT"
] | 3 | 2020-04-02T22:50:51.000Z | 2021-08-23T20:40:45.000Z | DisasterResponse/flask_app/data/process_data.py | marcelokscunha/projects | 9c6ff8d9b21bb11d23ec65f6750357ec88d8b3cd | [
"MIT"
] | null | null | null | # import libraries
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Function for loading the data from the messages csv file and the categories csv file.
Those csv files are joined and a Pandas dataframe is returned.
Parameters
----------
messages_filepath : string containing the path of the messages csv file
categories_filepath : string containing the path of the categories csv file
Returns
-------
df_reports: Pandas DataFrame containing the joined data from the both csv files. The join is performed using 'id'
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages, categories, on='id')
return df
def clean_data(df):
"""
Function cleaning the Pandas dataframe containing messages and categories.
Parameters
----------
df: Pandas DataFrame containing raw data of messages and categories
Returns
-------
df: Cleaned Pandas DataFrame
"""
# create a dataframe with the 36 individual category columns
df = pd.concat([df.drop('categories', axis=1), df['categories'].str.split(pat=';',expand=True)], axis=1)
# select the first row of the categories dataframe
# took out the id columns
row = df.iloc[0,4:]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = list(row.str.split(pat='-', expand=True)[0].values)
# rename the columns of `categories`
df.columns = list(df.columns[:4]) + category_colnames
for column in df.columns[4:]:
# set each value to be the last character of the string, after '-'
col_value = df[column].str.split(pat='-',expand=True)[1]
# convert column from string to numeric
df[column] = pd.to_numeric(col_value)
# drop duplicates
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filename):
"""
Function to save the data in the specified path.
Parameters
----------
df: the Pandas DataFrame to be saved
database_filename: string containing the path to save to data to
Returns
-------
None
"""
engine = create_engine(f'sqlite:///{database_filename}')
df.to_sql('disaster_data', engine, index=False, if_exists='replace')
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n'\
'\nExample: python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db')
if __name__ == '__main__':
main() | 31.269565 | 117 | 0.653782 | 463 | 3,596 | 4.971922 | 0.315335 | 0.041703 | 0.056473 | 0.059079 | 0.095135 | 0.067767 | 0.031277 | 0 | 0 | 0 | 0 | 0.004448 | 0.249722 | 3,596 | 115 | 118 | 31.269565 | 0.848777 | 0.377364 | 0 | 0.05 | 0 | 0 | 0.251566 | 0.035181 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.075 | 0 | 0.225 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ca5119ccc8cb45b020899bc15aa8667404ab7ef | 16,709 | py | Python | src/pathme/kegg/kegg_xml_parser.py | brucetony/PathMe | c7d758ff76f6787a4eb349b95f9c06bf1afb0754 | [
"Apache-2.0"
] | null | null | null | src/pathme/kegg/kegg_xml_parser.py | brucetony/PathMe | c7d758ff76f6787a4eb349b95f9c06bf1afb0754 | [
"Apache-2.0"
] | null | null | null | src/pathme/kegg/kegg_xml_parser.py | brucetony/PathMe | c7d758ff76f6787a4eb349b95f9c06bf1afb0754 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""This module contains functions to parse KGML files."""
import itertools as itt
import json
import logging
import os
from collections import defaultdict
from xml.etree.ElementTree import parse
import requests
from bio2bel_kegg.constants import API_KEGG_GET
from bio2bel_kegg.parsers import parse_description
from ..constants import CHEBI, CHEBI_NAME, HGNC, HGNC_SYMBOL, KEGG_CACHE, KEGG_ID, KEGG_TYPE, PUBCHEM, UNIPROT
from ..wikipathways.utils import merge_two_dicts
logger = logging.getLogger(__name__)
def import_xml_etree(filename):
"""Return XML tree from KGML file.
:param str filename: path to KGML file
:returns: XML Tree
:rtype: xml.etree.ElementTree.ElementTree
"""
try:
tree = parse(filename)
except IOError as ioerr:
logger.warning('File error: %s', ioerr)
return None
return tree
"""KEGG Handling functions"""
def _post_process_api_query(node_meta_data, hgnc_manager, chebi_manager, orientdb_client=None):
"""Process API query.
:param dict[str,str] node_meta_data: JSON retrieved from the API
:param bio2bel_hgnc.Manager hgnc_manager: HGNC Manager
:param bio2bel_chebi.Manager chebi_manager: ChEBI Manager
:return: Standard identifiers for the protein/chemical
:rtype: dict[str,str]
"""
node_dict = {}
if 'DBLINKS' in node_meta_data:
for resource, identifier in node_meta_data['DBLINKS']:
if resource not in {HGNC, UNIPROT, CHEBI, PUBCHEM, MGI, RGD, FLYBASE}:
continue
# Get protein identifiers
if resource == HGNC:
hgnc_entry = hgnc_manager.get_gene_by_hgnc_id(identifier)
if not hgnc_entry:
continue
node_dict[HGNC] = identifier
node_dict[HGNC_SYMBOL] = hgnc_entry.symbol
elif resource == MGI:
client = orientdb_client if orientdb_client else None
network = Network(client=client)
data = network.query_class(class_name="mgi",
columns=['symbol'],
with_rid=False,
accession=identifier)
if not data:
continue
symbol = data[0]['symbol'] # Use the first element
node_dict[MGI] = identifier
node_dict[MGI_SYMBOL] = symbol
elif resource == RGD:
client = orientdb_client if orientdb_client else None
network = Network(client=client)
data = network.query_class(class_name="rgd",
columns=['symbol'],
with_rid=False,
gene_rgd_id=identifier)
if not data:
continue
symbol = data[0]['symbol'] # Use the first element
node_dict[RGD] = identifier
node_dict[RGD_SYMBOL] = symbol
elif resource == FLYBASE:
client = orientdb_client if orientdb_client else None
network = Network(client=client)
data = network.query_class(class_name="flybase",
columns=['symbol'],
with_rid=False,
flybase_id=identifier)
if not data:
continue
symbol = data[0]['symbol'] # Use the first element
node_dict[FLYBASE] = identifier
node_dict[FLYBASE_SYMBOL] = symbol
elif resource == UNIPROT:
hgnc_entry = hgnc_manager.get_gene_by_uniprot_id(identifier)
if not hgnc_entry:
continue
hgnc_entry = hgnc_entry[0] # Use the first element queried
hgnc_id = str(hgnc_entry.identifier)
node_dict[HGNC] = hgnc_id
if hgnc_entry.symbol:
node_dict[HGNC_SYMBOL] = hgnc_entry.symbol
else:
node_dict[UNIPROT] = identifier
# Get chemical identifiers
else:
node_dict[resource] = identifier
if resource == CHEBI:
# Split multiple identifiers and get their names
for chebi_id in identifier.split(' '):
chebi_entry = chebi_manager.get_chemical_by_chebi_id(chebi_id)
# If the id is found in the database stick the name
if chebi_entry:
node_dict[CHEBI_NAME] = chebi_entry.name
# Else use the default name by KEGG to ensure the name makes it into the graph
elif "ENTRY_NAME" in node_meta_data:
node_dict[CHEBI_NAME] = node_meta_data["ENTRY_NAME"]
return node_dict
def _process_kegg_api_get_entity(entity, entity_type, hgnc_manager, chebi_manager):
"""Send a given entity to the KEGG API and process the results.
:param str entity: A KEGG identifier
:param str entity_type: Entity type
:param bio2bel_hgnc.Manager hgnc_manager: HGNC Manager
:param bio2bel_chebi.Manager chebi_manager: ChEBI Manager
:return: JSON retrieved from the API
:rtype: dict[str,str]
"""
_entity_filepath = os.path.join(KEGG_CACHE, f'{entity}.json')
if os.path.exists(_entity_filepath):
with open(_entity_filepath) as f:
return json.load(f)
kegg_url = API_KEGG_GET.format(entity)
node_meta_data = parse_description(requests.get(kegg_url))
node_dict = _post_process_api_query(node_meta_data, hgnc_manager, chebi_manager)
node_dict[KEGG_ID] = entity
node_dict[KEGG_TYPE] = entity_type
with open(_entity_filepath, 'w') as f:
json.dump(node_dict, f)
return node_dict
def get_entity_nodes(tree, hgnc_manager, chebi_manager):
"""Find entry elements (KEGG pathway nodes) in XML.
:param xml.etree.ElementTree.ElementTree tree: XML tree
:param bio2bel_hgnc.Manager hgnc_manager: HGNC Manager
:param bio2bel_chebi.Manager chebi_manager: ChEBI Manager
:return: genes with corresponding metadata (entry_id: [kegg_id, HGNC, UniProt])
:return: compounds with corresponding metadata (entry_id: [compound_name, ChEBI])
:return: biological processes with corresponding metadata (entry_id: [kegg_id, map_name])
:return: orthologs with corresponding metadata (entry_id: [kegg_id, kegg_type])
:rtype: dict[str,str]
"""
entry_dict = defaultdict(list)
compound_dict = defaultdict(list)
map_dict = defaultdict(list)
ortholog_dict = defaultdict(list)
for entry in tree.findall("entry"):
entry_id = entry.get("id")
kegg_ids = entry.get("name")
kegg_type = entry.get("type")
if kegg_type.startswith('gene'):
for kegg_id in kegg_ids.split(' '):
# Query the API/Cache to fetch information about protein
node_info = _process_kegg_api_get_entity(kegg_id, kegg_type, hgnc_manager, chebi_manager)
entry_dict[entry_id].append(node_info)
elif kegg_type.startswith('compound'):
for compound_id in kegg_ids.split(' '):
compound_info = _process_kegg_api_get_entity(compound_id, kegg_type, hgnc_manager, chebi_manager)
if compound_info:
compound_dict[entry_id].append(compound_info)
elif kegg_type.startswith('map'):
map_info = {KEGG_ID: kegg_ids}
for graphics in entry.iter('graphics'):
map_name = graphics.get('name')
map_info['map_name'] = map_name
map_dict[entry_id].append(map_info)
elif kegg_type.startswith('ortholog'):
for ortholog_id in kegg_ids.split(' '):
ortholog_info = {
KEGG_ID: ortholog_id,
KEGG_TYPE: kegg_type,
}
ortholog_dict[entry_id].append(ortholog_info)
# TODO: other, enzyme
elif kegg_type.startswith('brite'):
pass
return entry_dict, compound_dict, map_dict, ortholog_dict
def get_complex_components(tree, genes_dict, flattened=False):
"""Get complex components to either construct complex or flatten relationships.
:param xml.etree.ElementTree.ElementTree tree: XML tree
:param dict genes_dict: dictionary of all genes in pathway
:param bool flattened: True to flatten all complex participants
:return: dictionary of complex IDs and component IDs (complex_id: [component_ids])
:return: flattened dictionary of complex IDs and component metadata (complex_ids: [metadata_dict])
:rtype: dict[str,list]
"""
# Get IDs of complex components to construct complexes of protein composites (i.e. similar proteins).
# or get dictionary of flattened lists of all proteins involved in complexes.
component_info = defaultdict(list)
complex_ids = defaultdict(list)
complexes = defaultdict(list)
all_components = []
flattened_complexes = defaultdict(list)
for entry in (tree.findall("entry")):
entry_id = entry.get("id")
for component in entry.iter("component"):
component_id = component.get("id")
# Get complex IDs and each of their component IDs
complex_ids[entry_id].append(component_id)
# Get the IDs of all components participating in complexes
if component_id not in all_components:
all_components.append(component_id)
# Get node info for each component
for k, v in genes_dict.items():
for (component_id, node_info) in itt.product(all_components, v):
if component_id == k:
component_info[component_id].append(node_info)
# Flatten lists of components in complexes
if flattened:
for k, v in complex_ids.items():
for comp_id in v:
for comp_k, comp_v in component_info.items():
if comp_id == comp_k:
complexes[k].append(comp_v)
for k, v in complexes.items():
for component in v:
for info in component:
flattened_complexes[k].append(info)
return complex_ids, flattened_complexes
def get_xml_types(tree):
"""Find entity and interaction types in KEGG XML.
:param xml.etree.ElementTree.ElementTree tree: XML tree
:return: count of all entity, relation and reaction types present in XML
:rtype: dict[str,int]
"""
entity_types_dict = defaultdict(int)
interaction_types_dict = defaultdict(int)
for entry in tree.findall('entry'):
entry_type = entry.get('type')
if entry_type.startswith('gene'):
gene_ids = entry.get('name')
entity_types_dict['gene'] += len(gene_ids.split(' '))
elif entry_type.startswith('ortholog'):
ortholog_ids = entry.get('name')
entity_types_dict['ortholog'] += len(ortholog_ids.split(' '))
elif entry_type.startswith('compound'):
entity_types_dict['compound entity'] += 1
else:
entity_types_dict[entry_type] += 1
for relation in tree.findall('relation'):
for subtype in relation.iter('subtype'):
relation_subtype = subtype.get('name')
interaction_types_dict[relation_subtype] += 1
for reaction in tree.findall('reaction'):
reaction_type = reaction.get('type')
interaction_types_dict[reaction_type] += 1
entity_types_dict['entities'] = sum(entity_types_dict.values())
interaction_types_dict['interactions'] = sum(interaction_types_dict.values())
return merge_two_dicts(entity_types_dict, interaction_types_dict)
"""Get all interactions in KEGG pathways"""
def get_all_relationships(tree):
"""Find all relationships between 2 entities.
:param xml.etree.ElementTree.ElementTree tree: XML tree
:return: relationships list [(relation_entry1, relation_entry2, relation_subtype)]
:rtype: list[tuple]
"""
relations_list = []
for relation in tree.findall("relation"):
subtype_list = []
relation_entry1 = relation.get("entry1")
relation_entry2 = relation.get("entry2")
relation_type = relation.get('type')
for subtype in relation.iter('subtype'):
relation_subtype = subtype.get("name")
relation_value = subtype.get("value")
subtype_list.append(relation_subtype)
# TODO: assume association ??
if not relation_subtype:
logger.warning("No relation type declared")
# Add protein-protein, protein-compound and transcription factor-target gene product interactions
if relation_type in {'PPrel', 'PCrel', 'GErel'}:
if relation_subtype == 'compound':
relations_list.append((relation_entry1, relation_entry2, 'binding/association'))
else:
# Check if multiple relation subtypes present
if len(subtype_list) == 1:
relations_list.append((relation_entry1, relation_entry2, relation_subtype))
else:
relations_list.append((relation_entry1, relation_entry2, subtype_list))
# Add enzyme-enzyme relations denoted as binding/association
elif relation_type.startswith('ECrel'):
relations_list.append((relation_entry1, relation_entry2, 'binding/association'))
relations_list.append((relation_entry1, relation_value, 'binding/association'))
relations_list.append((relation_value, relation_entry2, 'binding/association'))
# Add interactions between a protein and a protein in another biological process
elif relation_type.startswith('maplink'):
relations_list.append((relation_entry1, relation_entry2, 'binding/association'))
return relations_list
def get_all_reactions(tree, compounds_dict):
"""Get substrates and products with ChEBI or PubChem IDs participating in reactions.
:param xml.etree.ElementTree.ElementTree tree: XML tree
:param dict compounds_dict: dictionary of KEGG compound information
:return: dictionary with substrate ids (reaction_id: [substrate_ids])
:return: dictionary with product ids (reaction_id: [product_ids])
:rtype: dict[str,list]
"""
substrates_dict = defaultdict(list)
products_dict = defaultdict(list)
for reaction in tree.findall("reaction"):
reaction_id = reaction.get("id")
for k in compounds_dict:
for substrate in reaction.iter('substrate'):
substrate_id = substrate.get("id")
if substrate_id == k:
substrates_dict[reaction_id].append(substrate_id)
for product in reaction.iter('product'):
product_id = product.get("id")
if product_id == k:
products_dict[reaction_id].append(product_id)
return substrates_dict, products_dict
def get_reaction_pathway_edges(xml_tree, substrates_dict, products_dict):
"""Get reaction edges.
:param xml.etree.ElementTree.ElementTree xml_tree: xml tree
:param dict substrates_dict: dictionary with substrate info
:param dict products_dict: dictionary with product info
:return: dictionary of reaction elements (reaction_id: [(substrate_id, product_id, reaction_type)])
:rtype: dict[str,list]
"""
reactions_dict = defaultdict(list)
for reaction in xml_tree.findall("reaction"):
reaction_type = reaction.get("type")
reaction_id = reaction.get("id")
if substrates_dict[reaction_id]:
reaction_substrates = substrates_dict[reaction_id]
if products_dict[reaction_id]:
reaction_products = products_dict[reaction_id]
# Add edge from substrates to products with compound info
reactions_dict[reaction_id].append((reaction_substrates, reaction_products, reaction_type))
# If reaction is reversible, flip the reaction order and add a new edge
if reaction_type == "reversible":
reactions_dict[reaction_id].append((reaction_products, reaction_substrates, reaction_type))
return reactions_dict
| 36.803965 | 113 | 0.627686 | 1,936 | 16,709 | 5.191116 | 0.129649 | 0.016716 | 0.022687 | 0.020896 | 0.327662 | 0.28209 | 0.230647 | 0.16995 | 0.160796 | 0.125871 | 0 | 0.003125 | 0.29146 | 16,709 | 453 | 114 | 36.88521 | 0.845764 | 0.245018 | 0 | 0.180328 | 0 | 0 | 0.045889 | 0 | 0 | 0 | 0 | 0.004415 | 0 | 1 | 0.036885 | false | 0.004098 | 0.04918 | 0 | 0.131148 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ca5198bf1cbdbaa578a4aadc7393a936dcf6cc0 | 4,806 | py | Python | paraphraser/inference_frozen_graph.py | mahmoudeid789/paraphraser | 5426b6865601132bab5af932b66eb36304887bd1 | [
"MIT"
] | 371 | 2018-02-12T01:44:04.000Z | 2022-03-12T09:08:00.000Z | paraphraser/inference_frozen_graph.py | mahmoudeid789/paraphraser | 5426b6865601132bab5af932b66eb36304887bd1 | [
"MIT"
] | 28 | 2018-04-13T16:42:40.000Z | 2022-02-09T23:28:56.000Z | paraphraser/inference_frozen_graph.py | mahmoudeid789/paraphraser | 5426b6865601132bab5af932b66eb36304887bd1 | [
"MIT"
] | 104 | 2018-04-12T08:13:04.000Z | 2022-03-22T23:27:59.000Z | import tensorflow as tf
from load_sent_embeddings import load_sentence_embeddings
from preprocess_data import preprocess_batch
from six.moves import input
word_to_id, idx_to_word, embedding, start_id, end_id, unk_id = load_sentence_embeddings()
mask_id = 5800
with open('/media/sdb/models/paraphraser/frozen_model.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
predictions = tf.import_graph_def(
graph_def=graph_def,
return_elements=['predictions:0'],
name='')
print([op.name for op in graph.get_operations()])
seq_source_ids = graph.get_tensor_by_name('placeholders/source_ids:0')
seq_source_lengths = graph.get_tensor_by_name('placeholders/sequence_source_lengths:0')
decoder_technique = graph.get_tensor_by_name('placeholders/decoder_technique:0')
sampling_temperature = graph.get_tensor_by_name('placeholders/sampling_temperature:0')
keep_prob = graph.get_tensor_by_name('placeholders/keep_prob:0')
model = {
'seq_source_ids': seq_source_ids,
'seq_source_lengths': seq_source_lengths,
'predictions': predictions,
'decoder_technique': decoder_technique,
'sampling_temperature': sampling_temperature
}
sess = tf.Session()
def restore_model(checkpoint):
model = lstm_model(sess, 'infer', 300, embedding, start_id, end_id, mask_id)
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
def translate(predictions, decoder, id_to_vocab, end_id):
""" Translate the vocabulary ids in `predictions` to actual words
that compose the paraphrase.
Args:
predictions : arrays of vocabulary ids
decoder : 0 for greedy, 1 for sample, 2 for beam
id_to_vocab : dict of vocabulary index to word
end_id : end token index
Returns:
str : the paraphrase
"""
if decoder == 2:
_, sentence_length, num_samples = predictions.shape
for i in xrange(num_samples):
sent_pred = []
for j in xrange(sentence_length):
sent_pred.append(predictions[0][j][i])
try:
end_index = sent_pred.index(end_id)
sent_pred = sent_pred[:end_index]
except Exception as e:
pass
return ' '.join([ id_to_vocab[pred] for pred in sent_pred ])
else:
for sent_pred in predictions:
if sent_pred[-1] == end_id:
sent_pred = sent_pred[0:-1]
return ' '.join([ id_to_vocab[pred] for pred in sent_pred ])
def infer(sess, model, decoder, source_sent, id_to_vocab, end_id, temp):
""" Perform inferencing. In other words, generate a paraphrase
for the source sentence.
Args:
sess : Tensorflow session.
model : dict of tensor to value
decoder : 0 for greedy, 1 for sampling
source_sent : source sentence to generate a paraphrase for
id_to_vocab : dict of vocabulary index to word
end_id : the end token
temp : the sampling temperature to use when `decoder` is 1
Returns:
str : for the generated paraphrase
"""
seq_source_words, seq_source_ids = preprocess_batch([ source_sent ])
seq_source_len = [ len(seq_source) for seq_source in seq_source_ids ]
feed_dict = {
model['seq_source_ids']: seq_source_ids,
model['seq_source_lengths']: seq_source_len,
model['decoder_technique']: decoder,
model['sampling_temperature']: temp
}
feeds = [
model['predictions']
#model['final_sequence_lengths']
]
predictions = sess.run(feeds, feed_dict)[0][0]
return translate(predictions, decoder, id_to_vocab, end_id)
def greedy_paraphrase(sentence):
"""Paraphrase using greedy sampler
Args:
sentence : The source sentence to be paraphrased.
Returns:
str : a candidate paraphrase of the `sentence`
"""
with tf.Session(graph=graph) as sess:
return infer(sess, model, 0, sentence, idx_to_word, end_id, 0.)
def sampler_paraphrase(sentence, sampling_temp=1.0):
"""Paraphrase by sampling a distribution
Args:
sentence (str): A sentence input that will be paraphrased by
sampling from distribution.
sampling_temp (int) : A number between 0 an 1
Returns:
str: a candidate paraphrase of the `sentence`
"""
with tf.Session(graph=graph) as sess:
return infer(sess, model, 1, sentence, idx_to_word, end_id, sampling_temp)
def main():
while 1:
source_sentence = input("Source: ")
#print("Paraph: {}".format(sampler_paraphrase('hello world.')))
print("Paraph: {}".format(greedy_paraphrase('hello world.')))
if __name__ == '__main__':
main()
| 32.693878 | 91 | 0.668123 | 632 | 4,806 | 4.827532 | 0.234177 | 0.047198 | 0.027532 | 0.026221 | 0.28843 | 0.253687 | 0.159292 | 0.140282 | 0.113405 | 0.113405 | 0 | 0.009277 | 0.237412 | 4,806 | 146 | 92 | 32.917808 | 0.823192 | 0.264045 | 0 | 0.052632 | 0 | 0 | 0.124295 | 0.059033 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0.013158 | 0.065789 | 0 | 0.210526 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ca5936a037a70e964984dceb7241bbcff6bd889 | 4,036 | py | Python | openml/config.py | parnurzeal/openml-python | 8ed0fe6bb17657622a72a7bd8af199745a0be72d | [
"BSD-3-Clause"
] | null | null | null | openml/config.py | parnurzeal/openml-python | 8ed0fe6bb17657622a72a7bd8af199745a0be72d | [
"BSD-3-Clause"
] | null | null | null | openml/config.py | parnurzeal/openml-python | 8ed0fe6bb17657622a72a7bd8af199745a0be72d | [
"BSD-3-Clause"
] | null | null | null | """
Stores module level information like the API key, cache directory and the server.
"""
import logging
import os
from six import StringIO
from six.moves import configparser
from six.moves.urllib_parse import urlparse
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(levelname)s] [%(asctime)s:%(name)s] %('
'message)s', datefmt='%H:%M:%S')
# Default values!
_defaults = {
'apikey': None,
'server': "https://www.openml.org/api/v1/xml",
'verbosity': 0,
'cachedir': os.path.expanduser(os.path.join('~', '.openml', 'cache')),
'avoid_duplicate_runs': 'True',
'connection_n_retries': 2,
}
config_file = os.path.expanduser(os.path.join('~', '.openml' 'config'))
# Default values are actually added here in the _setup() function which is
# called at the end of this module
server = ""
apikey = ""
# The current cache directory (without the server name)
cache_directory = ""
# Number of retries if the connection breaks
connection_n_retries = 2
def _setup():
"""Setup openml package. Called on first import.
Reads the config file and sets up apikey, server, cache appropriately.
key and server can be set by the user simply using
openml.config.apikey = THEIRKEY
openml.config.server = SOMESERVER
We could also make it a property but that's less clear.
"""
global apikey
global server
global cache_directory
global avoid_duplicate_runs
global connection_n_retries
# read config file, create cache directory
try:
os.mkdir(os.path.expanduser(os.path.join('~', '.openml')))
except (IOError, OSError):
# TODO add debug information
pass
config = _parse_config()
apikey = config.get('FAKE_SECTION', 'apikey')
server = config.get('FAKE_SECTION', 'server')
cache_directory = os.path.expanduser(config.get('FAKE_SECTION', 'cachedir'))
avoid_duplicate_runs = config.getboolean('FAKE_SECTION', 'avoid_duplicate_runs')
connection_n_retries = config.get('FAKE_SECTION', 'connection_n_retries')
if connection_n_retries > 20:
raise ValueError(
'A higher number of retries than 20 is not allowed to keep the '
'server load reasonable'
)
def _parse_config():
"""Parse the config file, set up defaults.
"""
config = configparser.RawConfigParser(defaults=_defaults)
if not os.path.exists(config_file):
# Create an empty config file if there was none so far
fh = open(config_file, "w")
fh.close()
logger.info("Could not find a configuration file at %s. Going to "
"create an empty file there." % config_file)
try:
# Cheat the ConfigParser module by adding a fake section header
config_file_ = StringIO()
config_file_.write("[FAKE_SECTION]\n")
with open(config_file) as fh:
for line in fh:
config_file_.write(line)
config_file_.seek(0)
config.readfp(config_file_)
except OSError as e:
logging.info("Error opening file %s: %s", config_file, e.message)
return config
def get_cache_directory():
"""Get the current cache directory.
Returns
-------
cachedir : string
The current cache directory.
"""
url_suffix = urlparse(server).netloc
reversed_url_suffix = os.sep.join(url_suffix.split('.')[::-1])
if not cache_directory:
_cachedir = _defaults(cache_directory)
else:
_cachedir = cache_directory
_cachedir = os.path.join(_cachedir, reversed_url_suffix)
return _cachedir
def set_cache_directory(cachedir):
"""Set module-wide cache directory.
Sets the cache directory into which to download datasets, tasks etc.
Parameters
----------
cachedir : string
Path to use as cache directory.
See also
--------
get_cache_directory
"""
global cache_directory
cache_directory = cachedir
__all__ = [
'get_cache_directory', 'set_cache_directory'
]
_setup()
| 28.223776 | 84 | 0.665263 | 514 | 4,036 | 5.048638 | 0.361868 | 0.113295 | 0.041619 | 0.030829 | 0.036994 | 0.036994 | 0.036994 | 0 | 0 | 0 | 0 | 0.003213 | 0.22894 | 4,036 | 142 | 85 | 28.422535 | 0.830656 | 0.282706 | 0 | 0.051948 | 0 | 0 | 0.202883 | 0.007928 | 0 | 0 | 0 | 0.007042 | 0 | 1 | 0.051948 | false | 0.012987 | 0.064935 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ca6207be0e7d8e46672c8f766480de2edc15cd4 | 15,633 | py | Python | flask_signalbus/signalbus.py | epandurski/flask_signalbus | e495ab5762d3799dfc2e1a69b17ff067767eec83 | [
"MIT"
] | 6 | 2018-11-25T22:05:51.000Z | 2021-03-21T02:44:20.000Z | flask_signalbus/signalbus.py | epandurski/flask_signalbus | e495ab5762d3799dfc2e1a69b17ff067767eec83 | [
"MIT"
] | null | null | null | flask_signalbus/signalbus.py | epandurski/flask_signalbus | e495ab5762d3799dfc2e1a69b17ff067767eec83 | [
"MIT"
] | null | null | null | """
Adds to Flask-SQLAlchemy the capability to atomically send
messages (signals) over a message bus.
"""
import time
import logging
from sqlalchemy import event, inspect, and_, or_
from sqlalchemy.orm import mapper
from sqlalchemy.exc import DBAPIError
from flask_signalbus.utils import retry_on_deadlock, get_db_error_code, DEADLOCK_ERROR_CODES
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema, ModelConversionError
__all__ = ['SignalBus', 'SignalBusMixin']
_SIGNALS_TO_FLUSH_SESSION_INFO_KEY = 'flask_signalbus__signals_to_flush'
_FLUSH_SIGNALS_LIMIT = 50000
def _get_class_registry(base):
return base.registry._class_registry if hasattr(base, 'registry') else base._decl_class_registry
def _chunks(l, size):
"""Yield successive `size`-sized chunks from the list `l`."""
for i in range(0, len(l), size):
yield l[i:i + size]
def _raise_error_if_not_signal_model(model):
if not hasattr(model, 'send_signalbus_message'):
raise RuntimeError(
'{} can not be flushed because it does not have a'
' "send_signalbus_message" method.'
)
class SignalBusMixin(object):
"""A **mixin class** that can be used to extend
:class:`~flask_sqlalchemy.SQLAlchemy` to handle signals.
For example::
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_signalbus import SignalBusMixin
class CustomSQLAlchemy(SignalBusMixin, SQLAlchemy):
pass
app = Flask(__name__)
db = CustomSQLAlchemy(app)
db.signalbus.flush()
"""
def __init__(self, *args, **kwargs):
super(SignalBusMixin, self).__init__(*args, **kwargs)
event.listen(mapper, 'after_configured', _setup_schema(self.Model, self.session))
def init_app(self, app, *args, **kwargs):
super(SignalBusMixin, self).init_app(app, *args, **kwargs)
self.signalbus._init_app(app)
@property
def signalbus(self):
"""The associated `SignalBus` object."""
try:
signalbus = self.__signalbus
except AttributeError:
signalbus = self.__signalbus = SignalBus(self, init_app=False)
return signalbus
class SignalBus(object):
"""Instances of this class automatically send signal messages that
have been recorded in the SQL database, over a message
bus. Normally, the sending of the recorded messages (if there are
any) is done after each transaction commit, but it also can be
triggered explicitly by a command.
:param db: The `flask_sqlalchemy.SQLAlchemy` instance
For example::
from flask_sqlalchemy import SQLAlchemy
from flask_signalbus import SignalBus
app = Flask(__name__)
db = SQLAlchemy(app)
signalbus = SignalBus(db)
signalbus.flush()
"""
def __init__(self, db, init_app=True):
self.db = db
self.signal_session = self.db.create_scoped_session({'expire_on_commit': False})
self.logger = logging.getLogger(__name__)
self._autoflush = True
retry = retry_on_deadlock(self.signal_session, retries=11, max_wait=1.0)
self._flush_signals_with_retry = retry(self._flush_signals)
self._flushmany_signals_with_retry = retry(self._flushmany_signals)
event.listen(self.db.session, 'transient_to_pending', self._transient_to_pending_handler)
event.listen(self.db.session, 'after_commit', self._safe_after_commit_handler)
event.listen(self.db.session, 'after_rollback', self._after_rollback_handler)
event.listen(mapper, 'after_configured', _setup_schema(db.Model, self.db.session))
if init_app:
if db.app is None:
raise RuntimeError(
'No application found. The SQLAlchemy instance passed to'
' SignalBus() should be constructed with an application.'
)
self._init_app(db.app)
@property
def autoflush(self):
"""Setting this property to `False` instructs the `SignalBus` instance
to not automatically flush pending signals after each
transaction commit. Setting it back to `True` restores the
default behavior.
"""
return self._autoflush
@autoflush.setter
def autoflush(self, value):
self._autoflush = bool(value)
def get_signal_models(self):
"""Return all signal types in a list.
:rtype: list(`signal-model`)
"""
base = self.db.Model
return [
cls for cls in _get_class_registry(base).values() if (
isinstance(cls, type)
and issubclass(cls, base)
and hasattr(cls, 'send_signalbus_message')
)
]
def flush(self, models=None, wait=3.0):
"""Send pending signals over the message bus.
This method assumes that auto-flushing is enabled for the
given signal types, and therefore the number of pending
signals is not too big. Having multiple processes that run
this method in parallel is generally *not a good idea*.
:param models: If passed, flushes only signals of the specified types.
:type models: list(`signal-model`) or `None`
:param float wait: The number of seconds the method will wait
after obtaining the list of pending signals, to allow
auto-flushing senders to complete
:return: The total number of signals that have been sent
"""
sent_count = 0
try:
models_to_flush = self.get_signal_models() if models is None else models
pks_by_model = {}
for model in models_to_flush:
_raise_error_if_not_signal_model(model)
pks_by_model[model] = self._list_signal_pks(model)
self.signal_session.rollback()
time.sleep(wait)
for model in models_to_flush:
self.logger.info('Flushing %s.', model.__name__)
sent_count += self._flush_signals_with_retry(model, pk_values_set=set(pks_by_model[model]))
finally:
self.signal_session.remove()
return sent_count
def flushmany(self, models=None):
"""Send a potentially huge number of pending signals over the message bus.
This method assumes that the number of pending signals might
be huge. Using `SignalBus.flushmany` when auto-flushing is
enabled for the given signal types is not recommended, because
it may result in multiple delivery of messages.
`SignalBus.flushmany` can be very useful when recovering from
long periods of disconnectedness from the message bus, or when
auto-flushing is disabled. If your database (and its
SQLAlchemy dialect) supports ``FOR UPDATE SKIP LOCKED``,
multiple processes will be able to run this method in
parallel, without stepping on each others' toes.
:param models: If passed, flushes only signals of the specified types.
:type models: list(`signal-model`) or `None`
:return: The total number of signals that have been sent
"""
return self._flush_models(flush_fn=self._flushmany_signals_with_retry, models=models)
def flushordered(self, models=None):
"""Send all pending messages in predictable order.
The order is defined by the ``signalbus_order_by`` attribute
of the model class. When auto-flushing is disabled for the
given signal types, this method guarantes that messages will
be sent in the correct order. Having multiple processes that
run this method in parallel is generally *not a good idea*.
:param models: If passed, flushes only signals of the specified types.
:type models: list(`signal-model`) or `None`
:return: The total number of signals that have been sent
"""
return self._flush_models(flush_fn=self._flushordered_signals, models=models)
def _init_app(self, app):
from . import signalbus_cli
if not hasattr(app, 'extensions'):
app.extensions = {}
if app.extensions.get('signalbus') not in [None, self]:
raise RuntimeError('Can not attach more than one SignalBus to one application.')
app.extensions['signalbus'] = self
app.cli.add_command(signalbus_cli.signalbus)
@app.teardown_appcontext
def shutdown_signal_session(response_or_exc):
self.signal_session.remove()
return response_or_exc
def _transient_to_pending_handler(self, session, instance):
model = type(instance)
if hasattr(model, 'send_signalbus_message') and getattr(model, 'signalbus_autoflush', True):
signals_to_flush = session.info.setdefault(_SIGNALS_TO_FLUSH_SESSION_INFO_KEY, set())
signals_to_flush.add(instance)
def _after_commit_handler(self, session):
signals_to_flush = session.info.pop(_SIGNALS_TO_FLUSH_SESSION_INFO_KEY, set())
if self.autoflush and signals_to_flush:
signals = [self.signal_session.merge(s, load=False) for s in signals_to_flush]
for signal in signals:
try:
signal.send_signalbus_message()
except Exception:
self.logger.exception('Caught error while sending %s.', signal)
self.signal_session.rollback()
return
self.signal_session.delete(signal)
self.signal_session.commit()
self.signal_session.expire_all()
def _after_rollback_handler(self, session):
session.info.pop(_SIGNALS_TO_FLUSH_SESSION_INFO_KEY, None)
def _safe_after_commit_handler(self, session):
try:
return self._after_commit_handler(session)
except DBAPIError as e:
if get_db_error_code(e.orig) not in DEADLOCK_ERROR_CODES:
self.logger.exception('Caught database error during autoflush.')
self.signal_session.rollback()
def _compose_signal_query(self, model, pk_only=False, ordered=False, max_count=None):
m = inspect(model)
pk_attrs = [m.get_property_by_column(c).class_attribute for c in m.primary_key]
if pk_only:
query = self.signal_session.query(*pk_attrs)
else:
query = self.signal_session.query(model)
if ordered:
order_by_columns = getattr(model, 'signalbus_order_by', ())
if order_by_columns:
query = query.order_by(*order_by_columns)
if max_count is not None:
query = query.limit(max_count)
return query, pk_attrs
def _lock_signal_instances(self, model, pk_values_list, ordered=False):
query, pk_attrs = self._compose_signal_query(model, ordered=ordered)
clause = or_(*[
and_(*[attr == value for attr, value in zip(pk_attrs, pk_values)])
for pk_values in pk_values_list
])
return query.filter(clause).with_for_update().all()
def _list_signal_pks(self, model, ordered=False, max_count=None):
query, _ = self._compose_signal_query(model, pk_only=True, ordered=ordered, max_count=max_count)
return query.all()
def _get_signal_burst_count(self, model):
burst_count = int(getattr(model, 'signalbus_burst_count', 1))
assert burst_count > 0, '"signalbus_burst_count" must be positive'
return burst_count
def _send_and_delete_signal_instances(self, model, instances):
n = len(instances)
if n > 1 and hasattr(model, 'send_signalbus_messages'):
model.send_signalbus_messages(instances)
else:
for instance in instances:
instance.send_signalbus_message()
for instance in instances:
self.signal_session.delete(instance)
return n
def _flush_models(self, flush_fn, models):
sent_count = 0
try:
models_to_flush = self.get_signal_models() if models is None else models
for model in models_to_flush:
_raise_error_if_not_signal_model(model)
sent_count += flush_fn(model)
finally:
self.signal_session.remove()
return sent_count
def _flushordered_signals(self, model):
self.logger.info('Flushing %s in "flushordered" mode.', model.__name__)
sent_count = 0
while True:
n = self._flush_signals(model, ordered=True)
sent_count += n
if n < _FLUSH_SIGNALS_LIMIT:
return sent_count
def _flushmany_signals(self, model):
self.logger.info('Flushing %s in "flushmany" mode.', model.__name__)
sent_count = 0
burst_count = self._get_signal_burst_count(model)
query, _ = self._compose_signal_query(model, max_count=burst_count)
query = query.with_for_update(skip_locked=True)
while True:
signals = query.all()
sent_count += self._send_and_delete_signal_instances(model, signals)
self.signal_session.commit()
self.signal_session.expire_all()
if len(signals) < burst_count:
break
return sent_count
def _flush_signals(self, model, pk_values_set=None, ordered=False):
sent_count = 0
burst_count = self._get_signal_burst_count(model)
signal_pks = self._list_signal_pks(model, ordered=ordered, max_count=_FLUSH_SIGNALS_LIMIT)
self.signal_session.rollback()
if pk_values_set is not None:
signal_pks = [pk for pk in signal_pks if pk in pk_values_set]
for pk_values_chunk in _chunks(signal_pks, size=burst_count):
signals = self._lock_signal_instances(model, pk_values_chunk, ordered=ordered)
sent_count += self._send_and_delete_signal_instances(model, signals)
self.signal_session.commit()
self.signal_session.expire_all()
return sent_count
def _setup_schema(Base, session):
"""Create a function which adds `__marshmallow__` attribute to all signal models."""
def create_schema_class(m):
class Meta(object):
model = m
include_relationships = True
load_instance = True
if not hasattr(m, 'send_signalbus_message'):
# Signal models should not use the SQLAlchemy session.
Meta.sqla_session = session
schema_class_name = '%sSchema' % m.__name__
return type(schema_class_name, (SQLAlchemyAutoSchema,), {'Meta': Meta})
def setup_schema_fn():
for model in _get_class_registry(Base).values():
if hasattr(model, '__tablename__'):
if model.__name__.endswith("Schema"):
raise ModelConversionError(
'Unexpected model name: "{}". '
'For safety, _setup_schema() can not be used when a '
'model class ends with "Schema".'.format(model.__name__)
)
schema_class = getattr(model, '__marshmallow__', None)
if schema_class is None:
schema_class = model.__marshmallow__ = create_schema_class(model)
schema_class_instance = getattr(model, '__marshmallow_schema__', None)
if schema_class_instance is None and hasattr(model, 'send_signalbus_message'):
setattr(model, '__marshmallow_schema__', schema_class())
return setup_schema_fn
| 39.477273 | 107 | 0.653873 | 1,928 | 15,633 | 5.025934 | 0.163382 | 0.028173 | 0.035088 | 0.013003 | 0.328483 | 0.241796 | 0.213003 | 0.183798 | 0.183798 | 0.140557 | 0 | 0.001746 | 0.267383 | 15,633 | 395 | 108 | 39.577215 | 0.84432 | 0.22171 | 0 | 0.213389 | 0 | 0 | 0.086908 | 0.023803 | 0 | 0 | 0 | 0 | 0.004184 | 1 | 0.129707 | false | 0.004184 | 0.033473 | 0.004184 | 0.276151 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ca88af8f397a00a876c1e91c0d10a1564ed42ec | 19,659 | py | Python | models/seq2seqattn.py | kaist-dmlab/COVID-EENet | ee18e61027278d3386b22a2efaf1c749aaf8ce20 | [
"Apache-2.0"
] | 4 | 2021-02-17T05:23:26.000Z | 2021-08-18T04:39:58.000Z | models/seq2seqattn.py | kaist-dmlab/Economic-Epidemiological | ee18e61027278d3386b22a2efaf1c749aaf8ce20 | [
"Apache-2.0"
] | 1 | 2021-09-02T08:29:08.000Z | 2021-09-02T08:29:08.000Z | models/seq2seqattn.py | kaist-dmlab/COVID-EENet | ee18e61027278d3386b22a2efaf1c749aaf8ce20 | [
"Apache-2.0"
] | 2 | 2021-08-18T04:40:00.000Z | 2021-08-19T03:30:18.000Z | import pandas as pd
import numpy as np
import torch
import torch.nn as nn
class DecoderAttention(nn.Module):
def __init__(self, config, hidden_size=40):
super(DecoderAttention, self).__init__()
self.hidden_size = hidden_size
self.directions = 2
self.linear = nn.Linear(self.hidden_size,
self.hidden_size, bias=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, enc_out, dec_out, attn_mask):
"""
@enc_out: (bs*massinf, src_len, hidden_size*2)
@dec_out: (bs*massinf, pred_len, hidden_size*2)
@attn_mask: (bs*massinf, src_len)
return context_vector: # (bs*massinf, pred_len, hidden_size*2)
"""
query = self.linear(dec_out)
# |query| = (bs*massinf, pred_len, hidden_size*2)
weight = torch.bmm(query, enc_out.transpose(1, 2))
# |weight| = (bs*massinf, pred_len, src_len)
src_len, pred_len = enc_out.size(1), dec_out.size(1)
attn_mask = attn_mask.unsqueeze(1).repeat(1, pred_len, 1) # (bs*massinf,pred_len,src_len)
weight.masked_fill_(attn_mask, -float('inf'))
weight = self.softmax(weight) # (bs*massinf, pred_len, src_len)
context_vector = torch.bmm(weight, enc_out)
# |context_vector| = (bs*massinf, pred_len, hidden_size*2)
return context_vector
class Seq2SeqATTN(nn.Module):
def __init__(self, config, mass_inf = 30, num_regions=25, hidden_size=40, rnn_layer=1, dropout_p=0.1):
super(Seq2SeqATTN, self).__init__()
self.config = config
self.region = config.r
self.numOfMassInfection = mass_inf
self.numOfIndustry = hidden_size
self.severity_emb = 6
self.hidden_size = hidden_size
self.lstm_syn = nn.LSTM(input_size=hidden_size+self.severity_emb+1,#41
hidden_size=hidden_size//2,
num_layers=1,
bidirectional=True,
batch_first=True)
self.linear_enc2dec_severity = nn.ModuleList([nn.Linear(in_features=self.hidden_size,
out_features=self.hidden_size) for i in range(2)])
self.severity_rnn = nn.LSTM(
input_size= self.severity_emb,
hidden_size=self.hidden_size,
num_layers=1,
dropout=dropout_p,
bidirectional=False,
batch_first=True,
)
input_size = 1 + self.severity_emb + self.hidden_size
transform_rnn_inp_size = input_size - self.severity_emb+ self.hidden_size
self.transform_rnn = nn.LSTM(
input_size= transform_rnn_inp_size,
hidden_size= self.hidden_size//2,
num_layers=1,
dropout=dropout_p,
bidirectional=True,
batch_first=True,
)
self.attn = DecoderAttention(config,hidden_size=hidden_size)
self.concat_linear = nn.Linear(self.hidden_size*2, self.hidden_size)
self.tanh = nn.Tanh()
# MSE loss for severity
self.severity_linear = nn.Linear(in_features= self.hidden_size, out_features= self.severity_emb)
self.linear_dnnfeatures = nn.Linear(self.config.dnnfeat_dim, self.hidden_size)
self.week_emb_linear = nn.Linear(self.hidden_size, 1)
self.linear_y_hat = nn.Linear(hidden_size,34)
self.last_linear = nn.Linear(config.p,config.p)
self.embedding = nn.Embedding(7, self.hidden_size)
self.loss = torch.nn.MSELoss(reduction="none")
def generate_attnmask(self, x, input_len):
"""
@x: (bs * #massinf, self.max_len or var_len, cat_feats) to get dtype
@input_len: (bs * #massinf)
return mask: (bs * #massinf, max_len of input_len)
"""
mask = []
input_len = input_len.to(torch.long).tolist()
max_length = max(input_len)
for l in input_len:
if max_length - l > 0:
# If the length is shorter than maximum length among samples,
# set last few values to be 1s to remove attention weight.
mask += [torch.cat([x.new_ones(1, l).zero_(),
x.new_ones(1, (max_length - l))
], dim=-1)]
else:
# If the length of the sample equals to maximum length among samples,
# set every value in mask to be 0.
mask += [x.new_ones(1, l).zero_()] # (l,)
mask = torch.cat(mask, dim=0).bool()
return mask.to(self.config.device) # (bs * #massinf, max_len of input_len)
def int_lstm_dt(self, data_int, input_len):
# return output_int
data_int_shape = data_int.size() # (batch_size = #dates*#gus, 362=#dates, 30=#mass_inf_cases)
data_int = data_int.transpose(1,2).contiguous().view(data_int_shape[0]*data_int_shape[2], data_int_shape[1], 1)
# data_int: (bs*mass, T=362, 1)
input_len_int_syn = input_len.unsqueeze(-1).expand(input_len.size(0),data_int_shape[2]).contiguous().view(-1)
# (bs*mass)
if input_len[0] > 50:
input_len__50 = torch.tensor([50]*data_int.size(0))
packed_input_int = torch.nn.utils.rnn.pack_padded_sequence(data_int[:,int(input_len[-1])-50:int(input_len[-1]),:],
input_len__50.tolist(),
batch_first=True,
enforce_sorted=False
)
else:
packed_input_int = torch.nn.utils.rnn.pack_padded_sequence(data_int[:,:int(input_len[-1]),:],
input_len_int_syn.tolist(),
batch_first=True,
enforce_sorted=False
)
input_int_dec = data_int[:,-self.config.p:,:] # (bs*mass, pred_len, 1)
return packed_input_int, input_len_int_syn, input_int_dec
def ext_lstm_dt(self, final_week, input_len_int_syn, final_dnn) :
# return output_ext
final_week = self.embedding(final_week) # (bs, config.p, dim)
bs = final_week.size(0)
T = input_len_int_syn[-1].to(torch.long)
input_ext_dec = final_week.unsqueeze(1).expand(bs, 30, self.config.p, self.hidden_size)
input_ext_dec = input_ext_dec.reshape(-1, self.config.p, self.hidden_size) # (bs*mass, pred_len, self.hidden_size)
if T>50:
input_len__50_ext = torch.tensor([50]*(self.batch_sz*30))
final_week = torch.cat([final_week for i in range(T//self.config.p+1)],
axis=1)[:,-50:,:]
final_week = final_week.squeeze()
final_week = final_week.unsqueeze(1).expand(bs, 30, final_week.size(-2), self.hidden_size).contiguous()
final_week = final_week.view(-1,final_week.size(-2), self.hidden_size) # (bs*mass, len, self.hidden_size)
packed_input_ext = torch.nn.utils.rnn.pack_padded_sequence(final_week,
input_len__50_ext.tolist(),
batch_first=True,
enforce_sorted=False)
else :
final_week = torch.cat([final_week for i in range(T//self.config.p+1)],
axis=1)[:,-T:,:]
final_week = final_week.squeeze()
final_week = final_week.unsqueeze(1).expand(bs, 30, final_week.size(-2), self.hidden_size).contiguous()
final_week = final_week.view(-1,final_week.size(-2), self.hidden_size) # (bs*mass, len, self.hidden_size)
packed_input_ext = torch.nn.utils.rnn.pack_padded_sequence(final_week,
input_len_int_syn.tolist(),
batch_first=True,
enforce_sorted=False)
return packed_input_ext, input_ext_dec
def syn_lstm(self, data_syn, final_dnn, input_len, packed_input_int, packed_input_ext, input_len_int_syn, ):
"""
@
"""
# return output_syn
cut_t = input_len[-1].to(torch.long)
T = input_len_int_syn[-1].to(torch.long)
bs = data_syn.size(0)
# get severity_dec
severity_dec = torch.cat((data_syn[:,[cut_t],:], data_syn[:,-self.config.p:,:]), dim=1)
severity_dec = severity_dec.view(bs, self.config.p+1, self.config.c, -1)
severity_dec = severity_dec.transpose(1,2).contiguous().view(bs*self.config.c, self.config.p+1, -1)
# severity_dec: (bs*#mass, self.config.p+1, dim)
if T>50:
data_syn = data_syn[:,cut_t-50:cut_t,:]
data_syn_shape = data_syn.size()
else :
data_syn = data_syn[:,:cut_t,:]
data_syn_shape = data_syn.size()
data_syn = data_syn.view(data_syn_shape[0], data_syn_shape[1], self.config.c, -1)
severity = data_syn.transpose(1,2).contiguous().view(data_syn.size(0)*self.config.c, data_syn_shape[1], -1)
# severity: (bs*#mass, len, dim)
# pack input
elapsed, elapsed_len = torch.nn.utils.rnn.pad_packed_sequence(packed_input_int, batch_first=True)
weekday, weekday_len = torch.nn.utils.rnn.pad_packed_sequence(packed_input_ext, batch_first=True)
data_cat = torch.cat([severity, elapsed, weekday], axis=-1)
# data_cat: (bs*mass, T, 41)
if T>50:
input_len__50 = torch.tensor([50]*data_cat.size(0))
packed_input_syn = torch.nn.utils.rnn.pack_padded_sequence(data_cat,
input_len__50.tolist(),
batch_first=True,
enforce_sorted=False)
else :
packed_input_syn = torch.nn.utils.rnn.pack_padded_sequence(data_cat,
input_len_int_syn.tolist(),
batch_first=True,
enforce_sorted=False)
# input packed_input & output packed_output
enc_output, enc_h = self.lstm_syn(packed_input_syn.to(torch.float32))
# unpack output
enc_output, enc_output_len = torch.nn.utils.rnn.pad_packed_sequence(enc_output, batch_first=True)
# enc_output: (bs * #massinf, len, hidden_size//2 * #num_directions=2)
# add dnn features
dnnfeat = self.linear_dnnfeatures(final_dnn.to(torch.float32)) #(bs, hidden)
dnnfeat = dnnfeat.unsqueeze(1).unsqueeze(1) #(bs*mass, 1, hidden)
# dnnfeat:(bs,1,1,hidden)
enc_output = enc_output.view(bs*self.numOfMassInfection, -1, self.hidden_size)
return enc_output, enc_output_len, enc_h, severity_dec
def decoder_lstm_attn(self, enc_output, enc_output_len, enc_h,covid_mask, final_dnn, decoder_inp):
"""
@enc_out: (bs * #massinf, len, hidden_size * #num_directions=2)
@enc_output_len: (bs * #massinf)
@enc_h = (h,c): (#layers=1 * #num_dir=2, bs*#massinf, hidden_size)
@decoder_inp: Tuple(internal_dt: (bs*#mass, pred_len, 1),
external_dt: (bs*#mass, pred_len, 34),
sevirity_dt: (bs*#mass, pred_len+1, 6),)
"""
src_padding_mask = self.generate_attnmask(enc_output, enc_output_len) # (bs * #massinf, src_len)
d_list = []
severity_dec = decoder_inp[2]
severity_src = severity_dec[:,:-1,:]
severity_tgt = severity_dec[:,1:,:] # (bs*#massinf, pred_len, severity_emb)
bs_massinf = enc_h[0].size(1)
hidden_severity_rnn = [h_c.transpose(0,1).contiguous().view(bs_massinf,-1,self.hidden_size).transpose(0,1).contiguous()\
for h_c in enc_h] # (#layers, bs*massinf, hidden*2)
hidden_severity_rnn = [h_c.view(-1, self.hidden_size) for h_c in hidden_severity_rnn]# (#layers* bs*massinf, hidden*2)
hidden_severity_rnn = [self.linear_enc2dec_severity[i](hidden_severity_rnn[i]) for i in range(len(hidden_severity_rnn))]
hidden_severity_rnn = [h_c.view(-1,bs_massinf,self.hidden_size) for h_c in hidden_severity_rnn]
# hidden_severity_rnn: (#layers, bs*massinf, hidden)
if self.training: # Teacher forcing
hidden_severity, sev_h = self.severity_rnn(severity_src, hidden_severity_rnn)
# epidemiological_severity_dec: (bs*#massinf, #pred, dim=6)
# sev_h[0]: (#layers(=1) * #directions(=1), bs*#massinf, hidden)
else : # AR
hidden_severity = []
for t in range(self.config.p):
sev_inp_t = severity_src[:,[t],:] # (bs*#massinf, 1, dim=6)
sev_inp_t_out, hidden_severity_rnn = self.severity_rnn(sev_inp_t, hidden_severity_rnn)
# sev_inp_t_out: (bs*#massinf, 1, hidden_size)
hidden_severity.append(sev_inp_t_out)
assert len(hidden_severity) == self.config.p
hidden_severity = torch.cat(hidden_severity, dim=1) # (bs*#massinf, pred_len, hidden_size)
# computes mseloss: hidden_severity ~ epidemiological_severity_tgt
hidden_severity = hidden_severity.contiguous()
hidden_severity_tgt_hat = self.severity_linear(hidden_severity.view(-1, self.hidden_size))
hidden_severity_tgt_hat = hidden_severity_tgt_hat.view(-1, self.config.p, self.severity_emb)
covid_mask = covid_mask.view(-1).to(torch.long) # (bs*#massinf,) active mass inf if 1
severity_rmseloss = self.loss(hidden_severity_tgt_hat, severity_tgt)[covid_mask]
# severity_rmseloss: (none: bs * #active_massinf, pred_len, hidden_size=6)
severity_rmseloss = torch.sqrt(severity_rmseloss.mean())
covid_elapsed_dec, weekdays_dec = decoder_inp[0], decoder_inp[1]
transform_inp = torch.cat([weekdays_dec, covid_elapsed_dec, hidden_severity], dim = -1)
transform_inp = transform_inp.to(self.config.dtype)
dec_out, dec_h = self.transform_rnn(transform_inp, enc_h)
# (bs*#massinf, config.p, hidden_size * #directions=2)
# use attention
# enc_output: (bs*#massinf, src_len, hidden_size * #directions=2)
context_vector = self.attn(enc_output, dec_out, src_padding_mask) # (bs*massinf, pred_len, hidden_size)
dec_out = self.tanh(self.concat_linear(torch.cat([dec_out,context_vector], dim=-1)))
# dec_out: (bs*massinf, pred_len, hidden_size)
dnnfeat = self.linear_dnnfeatures(final_dnn.to(torch.float32)) #(bs,mass,hidden)
dnnfeat = dnnfeat.view(-1, self.hidden_size).unsqueeze(1) #(bs*mass,1,hidden)
week_emb = self.week_emb_linear(weekdays_dec).squeeze() # (bs*#mass, pred_len, 1)
week_emb = week_emb.unsqueeze(-1).expand(-1,-1, self.numOfIndustry) # (bs*#mass, pred_len, 34=ind)
dec_out = dec_out + dnnfeat + week_emb
# dec_out: (bs*massinf, pred_len, hidden_size)
dec_out = dec_out.contiguous().view(-1, self.hidden_size)
dec_out = self.tanh(self.linear_y_hat(dec_out))
y_hat = dec_out.view(-1, self.config.p, 34) # (bs*#massinf, pred_len, hidden_size=34)
return y_hat, severity_rmseloss
def compute_y_hat(self, y_hat, covid_mask, modeling_output=None):
# covid_mask: (bs, #massinf=30)
y_hat = y_hat.view(self.batch_sz, 30, self.config.p, -1) # (bs, #massinf, config.p, hid)
y_hat = y_hat * covid_mask.unsqueeze(-1).unsqueeze(-1)
if modeling_output :
modeling_output["y_hat_bef_mean"] = y_hat.cpu()
y_hat = y_hat.sum(dim=1) # (bs, config.p, hid)
y_hat = y_hat / covid_mask.sum(dim=1).unsqueeze(-1).unsqueeze(-1)
if modeling_output :
modeling_output["y_hat"] = y_hat.cpu()
return y_hat if modeling_output is None else (y_hat, modeling_output)
def forward(self, x, verbose=False, inspect=False):
if inspect:
modeling_output = {}
final_dnn, data_int, data_syn, final_week, final_mask, covid_mask, y_train = x
final_dnn = final_dnn.to(self.config.device, )
data_int = data_int.to(self.config.device, )
data_syn = data_syn.to(self.config.device, )
final_week = final_week.to(self.config.device, )
final_mask = final_mask.to(self.config.device, )
covid_mask = covid_mask.to(self.config.device, self.config.dtype)
y_train = y_train.to(self.config.device)
self.batch_sz = data_int.size(0)
input_len = final_mask.sum(axis=1)
# int_lstm ######################### : output_int
packed_input_int, input_len_int_syn, input_int_dec = self.int_lstm_dt(data_int, input_len, ) # return output_int_data
# ext_lstm ######################### : output_ext
packed_input_ext, input_ext_dec = self.ext_lstm_dt(final_week, input_len_int_syn, final_dnn,) # return output_ext_data
# syn_lstm ######################### : output_syn
enc_output, enc_output_len, enc_h, severity_dec = self.syn_lstm(data_syn, final_dnn, input_len,
packed_input_int, packed_input_ext,
input_len_int_syn, )
# return output_syn
# decoder with lstm_attn #########################
y_hat, severity_rmseloss = self.decoder_lstm_attn(enc_output, enc_output_len, enc_h, covid_mask, final_dnn,
decoder_inp=(input_int_dec, input_ext_dec, severity_dec))
# return y_hat (bs*#massinf, pred_len, hidden_size=34)
if inspect:
logits, modeling_output = self.compute_y_hat(y_hat, covid_mask, modeling_output) # return logits, which is y_hat
else :
logits = self.compute_y_hat(y_hat, covid_mask,) # return logits = y_hat
logits = logits.transpose(1,2).contiguous() # (bs, ind, pred)
logits = self.last_linear(logits)
# compute MSELoss ######################### logits: (bs, config.p, hid)
return (self.loss(logits, y_train.to(torch.float32))+0.1*severity_rmseloss, logits, modeling_output) \
if inspect \
else (self.loss(logits, y_train.to(torch.float32)), logits, None)
| 50.537275 | 128 | 0.564576 | 2,487 | 19,659 | 4.154001 | 0.088862 | 0.060982 | 0.043365 | 0.021682 | 0.493466 | 0.400155 | 0.315071 | 0.275966 | 0.218565 | 0.193399 | 0 | 0.018746 | 0.321634 | 19,659 | 389 | 129 | 50.537275 | 0.755924 | 0.161504 | 0 | 0.247934 | 0 | 0 | 0.00162 | 0 | 0 | 0 | 0 | 0 | 0.004132 | 1 | 0.041322 | false | 0 | 0.016529 | 0 | 0.099174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ca8b8ffb1a739e76493e9761c5a1fd761e67a02 | 4,447 | py | Python | src/api/views/api_v1/bp_doc.py | showthesunli/liuli | 19e04e8e1a99b3643fa2e91298cdbbbcea04610f | [
"Apache-2.0"
] | 139 | 2021-12-30T02:07:01.000Z | 2022-03-30T08:00:29.000Z | src/api/views/api_v1/bp_doc.py | showthesunli/liuli | 19e04e8e1a99b3643fa2e91298cdbbbcea04610f | [
"Apache-2.0"
] | 21 | 2021-12-29T15:02:23.000Z | 2022-03-24T10:30:57.000Z | src/api/views/api_v1/bp_doc.py | showthesunli/liuli | 19e04e8e1a99b3643fa2e91298cdbbbcea04610f | [
"Apache-2.0"
] | 24 | 2021-12-30T07:45:26.000Z | 2022-03-31T14:56:13.000Z | """
Created by howie.hu at 2022-05-23.
Description: 文章相关API
Changelog: all notable changes to this file will be documented
"""
from urllib.parse import urljoin
from flask import Blueprint, current_app, request
from src.api.common import (
ResponseCode,
ResponseField,
ResponseReply,
UniResponse,
jwt_required,
response_handle,
)
from src.config import Config
from src.databases import MongodbBase
from src.utils import LOGGER, get_ip, ts_to_str_date
bp_doc = Blueprint("doc", __name__, url_prefix="/doc")
@bp_doc.route("/articles", methods=["POST"], strict_slashes=False)
@jwt_required()
def articles():
"""查询历史文章
{
"username": "liuli",
"doc_source": "",
"doc_source_name": ""
}
Returns:
Response: 响应类
"""
@bp_doc.route("/rss_list", methods=["POST"], strict_slashes=False)
@jwt_required()
def rss_list():
"""获取用户下所有rss链接地址
eg:
{
"username": "liuli",
"doc_source": "",
}
Returns:
Response: 响应类
"""
mongodb_base: MongodbBase = current_app.config["mongodb_base"]
app_logger: LOGGER = current_app.config["app_logger"]
app_config: Config = current_app.config["app_config"]
coll = mongodb_base.get_collection(coll_name="liuli_rss")
# 获取基础数据
post_data: dict = request.json
doc_source = post_data.get("doc_source", "")
filter_dict = {"doc_source": doc_source} if doc_source else {}
return_dict = {"_id": 0, "doc_source": 1, "doc_source_name": 1, "updated_at": 1}
domain: str = app_config.DOMAIN or f"http://{get_ip()}:{Config.HTTP_PORT}"
try:
cursor = coll.find(filter_dict, return_dict).sort("updated_at", 1)
rss_dict = []
for document in cursor:
updated_at = document["updated_at"]
doc_source = document["doc_source"]
doc_source_name = document["doc_source_name"]
rss_dict.append(
{
**document,
**{
"updated_at": ts_to_str_date(updated_at),
"rss_url": urljoin(
domain, f"rss/{doc_source}/{doc_source_name}"
),
},
}
)
result = {
ResponseField.DATA: rss_dict,
ResponseField.MESSAGE: ResponseReply.SUCCESS,
ResponseField.STATUS: ResponseCode.SUCCESS,
}
except Exception as e:
result = UniResponse.DB_ERR
err_info = f"query doc RSS failed! DB response info -> {e}"
app_logger.error(err_info)
return response_handle(request=request, dict_value=result)
@bp_doc.route("/source_list", methods=["POST"], strict_slashes=False)
@jwt_required()
def source_list():
"""获取所有文档源统计信息
eg:
{
"username": "liuli"
}
Returns:
Response: 响应类
"""
mongodb_base: MongodbBase = current_app.config["mongodb_base"]
app_logger: LOGGER = current_app.config["app_logger"]
coll = mongodb_base.get_collection(coll_name="liuli_articles")
try:
doc_source_list = coll.distinct("doc_source")
doc_source_dict = {}
for doc_source in doc_source_list:
pipeline = [
{"$match": {"doc_source": doc_source}},
{"$group": {"_id": "$doc_source_name", "count": {"$sum": 1}}},
]
doc_source_dict[doc_source] = {
"doc_count": 0,
"doc_source_list": [],
"doc_source_details": [],
}
for item in coll.aggregate(pipeline):
doc_source_list: list = doc_source_dict[doc_source]["doc_source_list"]
doc_source_list.append(item["_id"])
doc_source_details: list = doc_source_dict[doc_source][
"doc_source_details"
]
doc_source_details.append(item)
doc_source_dict[doc_source]["doc_count"] += item["count"]
result = {
ResponseField.DATA: doc_source_dict,
ResponseField.MESSAGE: ResponseReply.SUCCESS,
ResponseField.STATUS: ResponseCode.SUCCESS,
}
except Exception as e:
result = UniResponse.DB_ERR
err_info = f"query doc source failed! DB response info -> {e}"
app_logger.error(err_info)
return response_handle(request=request, dict_value=result)
| 31.764286 | 86 | 0.59141 | 497 | 4,447 | 4.987928 | 0.257545 | 0.152481 | 0.048407 | 0.058088 | 0.461476 | 0.41186 | 0.41186 | 0.387656 | 0.308996 | 0.271077 | 0 | 0.004763 | 0.291882 | 4,447 | 139 | 87 | 31.992806 | 0.782471 | 0.091298 | 0 | 0.244681 | 0 | 0 | 0.143442 | 0.008693 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031915 | false | 0 | 0.06383 | 0 | 0.117021 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cacae0ce7e51f0051a7eb3a0835913ba6201b21 | 2,050 | py | Python | user_history_adder.py | ReutersMedia/sqs-browser-events | 6be8de94fa65efb973a5bce87fee6243dea8d0b9 | [
"MIT"
] | 63 | 2017-03-31T01:30:04.000Z | 2021-05-05T11:46:14.000Z | user_history_adder.py | ReutersMedia/sqs-browser-events | 6be8de94fa65efb973a5bce87fee6243dea8d0b9 | [
"MIT"
] | 3 | 2017-06-02T18:40:43.000Z | 2017-09-05T00:50:24.000Z | user_history_adder.py | ReutersMedia/sqs-browser-events | 6be8de94fa65efb973a5bce87fee6243dea8d0b9 | [
"MIT"
] | 3 | 2017-04-14T15:47:26.000Z | 2020-07-13T08:34:36.000Z | from __future__ import print_function
import os
import sys
import time
import boto3
import json
current_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(current_path, './lib'))
import dynamo_sessions
import common
import logging
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
def lambda_handler(event, context):
# create list of user_id,msg
if 'user_msg_list' in event:
user_msg_list = event['user_msg_list']
msg_list = []
msg_ttl = int(time.time())+int(os.getenv('USER_MESSAGE_TTL'))
for user_id,msgs in user_msg_list:
for m in msgs:
m['is_read'] = 0
m['ttl'] = msg_ttl
msg_list.append( (user_id,m) )
dynamo_sessions.batch_add_user_history(msg_list,n_workers=50)
LOGGER.info("Added {0} entries to user history".format(len(msg_list)))
elif 'user_msg_read_receipts' in event:
user_id = int(event['user_id'])
msg_id_list = event['user_msg_read_receipts']
LOGGER.info("Setting {0} messages read for user_id {1}".format(len(msg_id_list),user_id))
receipted_msg_ids = dynamo_sessions.set_messages_read(user_id,msg_id_list)
if os.getenv('SEND_READ_RECEIPTS_VIA_SQS','1').lower().strip() in ('1','true','yes'):
# send msg-receited updates to any SQS queues for the user
LOGGER.info("Generating read-receipt message for user_id={0}".format(user_id))
m = {'userId':user_id,
'_type':'message-read-receipt',
'messages-receipted': receipted_msg_ids,
'_sqs_only': 1}
try:
c = boto3.client('lambda')
c.invoke(FunctionName=os.getenv('DISPATCHER_LAMBDA'),
Payload=json.dumps({'Records':[m]},cls=common.DecimalEncoder),
InvocationType='Event')
except:
LOGGER.exception("Unable to push read-receipt message for user_id={0}".format(user_id))
| 38.679245 | 103 | 0.631707 | 278 | 2,050 | 4.374101 | 0.366906 | 0.064145 | 0.036184 | 0.026316 | 0.090461 | 0.065789 | 0.065789 | 0.065789 | 0.065789 | 0.065789 | 0 | 0.008458 | 0.250244 | 2,050 | 52 | 104 | 39.423077 | 0.782694 | 0.040488 | 0 | 0 | 0 | 0 | 0.207739 | 0.035642 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.209302 | 0 | 0.232558 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cb33b2b23a6bf974b1d45d9c630d6b35d086c90 | 846 | py | Python | modularise/bed_to_fasta.py | radhikasethi2011/btechproj | 209dda7268edc4f6a0f54192c1b3216d73812797 | [
"MIT"
] | null | null | null | modularise/bed_to_fasta.py | radhikasethi2011/btechproj | 209dda7268edc4f6a0f54192c1b3216d73812797 | [
"MIT"
] | null | null | null | modularise/bed_to_fasta.py | radhikasethi2011/btechproj | 209dda7268edc4f6a0f54192c1b3216d73812797 | [
"MIT"
] | null | null | null | from re import A
from Bio import SeqIO
import pandas as pd
def fasta_to_dataframe(bound,name, file1):
with open(file1) as fasta_file: # Will close handle cleanly
identifiers = []
seqs = []
for seq_record in SeqIO.parse(fasta_file, 'fasta'): # (generator)
identifiers.append(seq_record.id)
seqs.append(str(seq_record.seq))
data= {'EventId':identifiers,'seq':seqs}
df=pd.DataFrame(data)
df['FoldID'] ='A'
df['start_index']='-5'
df=df[["FoldID","EventId","start_index","seq"]] #ordering the columns
df['seq']=df['seq'].str.upper()
df['Bound'] =bound
df.to_csv(name + '.txt', index=None, sep='\t')
return df
#df3=fasta_to_dataframe(1,"tad", '/content/tad.fasta')
#df3=fasta_to_dataframe(0,"left", '/content/left.fasta')
#df3=fasta_to_dataframe(0,"right", '/content/right.fasta')
| 32.538462 | 73 | 0.659574 | 125 | 846 | 4.336 | 0.464 | 0.051661 | 0.118081 | 0.105166 | 0.092251 | 0.092251 | 0 | 0 | 0 | 0 | 0 | 0.012676 | 0.160757 | 846 | 25 | 74 | 33.84 | 0.750704 | 0.262411 | 0 | 0 | 0 | 0 | 0.128039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cb3a73d43f2a5295472a4ba14c6539dcf5ac365 | 4,719 | py | Python | server/nmtwizard/task.py | jsenellart/nmt-wizard | f57edc0b2d60e941d5b7f6aa63f39f794bd6498e | [
"MIT"
] | null | null | null | server/nmtwizard/task.py | jsenellart/nmt-wizard | f57edc0b2d60e941d5b7f6aa63f39f794bd6498e | [
"MIT"
] | null | null | null | server/nmtwizard/task.py | jsenellart/nmt-wizard | f57edc0b2d60e941d5b7f6aa63f39f794bd6498e | [
"MIT"
] | 1 | 2018-03-09T16:58:56.000Z | 2018-03-09T16:58:56.000Z | import time
import json
def set_status(redis, keyt, status):
"""Sets the status and save the time of change."""
redis.hset(keyt, "status", status)
redis.hset(keyt, status + "_time", int(time.time()))
def exists(redis, task_id):
"""Checks if a task exist."""
return redis.exists("task:" + task_id)
def create(redis, task_id, resource, service, content, files):
"""Creates a new task and enables it."""
keyt = "task:" + task_id
redis.hset(keyt, "resource", resource)
redis.hset(keyt, "service", service)
redis.hset(keyt, "content", json.dumps(content))
set_status(redis, keyt, "queued")
for k in files:
redis.hset("files:" + task_id, k, files[k])
enable(redis, task_id)
queue(redis, task_id)
def terminate(redis, task_id, phase):
"""Requests task termination (assume it is locked)."""
if phase is None:
phase = "aborted"
keyt = "task:" + task_id
if redis.hget(keyt, "status") in ("terminating", "stopped"):
return
redis.hset(keyt, "message", phase)
set_status(redis, keyt, "terminating")
queue(redis, task_id)
def queue(redis, task_id, delay=0):
"""Queues the task in the work queue with a delay."""
if delay == 0:
redis.lpush('work', task_id)
redis.delete('queue:'+task_id)
else:
redis.set('queue:'+task_id, delay)
redis.expire('queue:'+task_id, int(delay))
def unqueue(redis):
"""Pop a task from the work queue."""
return redis.rpop('work')
def enable(redis, task_id):
"""Marks a task as enabled."""
redis.sadd("active", task_id)
def disable(redis, task_id):
"""Marks a task as disabled."""
redis.srem("active", task_id)
def list_active(redis):
"""Returns all active tasks (i.e. non stopped)."""
return redis.smembers("active")
def file_list(redis, task_id):
"""Returns the list of files attached to a task"""
keyf = "files:" + task_id
return redis.hkeys(keyf)
def info(redis, task_id, fields):
"""Gets information on a task."""
keyt = "task:" + task_id
field = None
if not isinstance(fields, list):
field = fields
fields = [field]
if not fields:
# only if we want all information - add a lock on the resource
with redis.acquire_lock(keyt):
fields = redis.hkeys(keyt)
fields.append("ttl")
r=info(redis, task_id, fields)
r['files'] = file_list(redis, task_id)
return r
r = {}
for f in fields:
if f != "ttl":
r[f] = redis.hget(keyt, f)
else:
r[f] = redis.ttl("beat:" + task_id)
if field:
return r[field]
r["current_time"] = int(time.time())
return r
def delete(redis, task_id):
"""Delete a given task."""
keyt = "task:" + task_id
status = redis.hget(keyt, "status")
if status is None:
return (False, "task does not exists")
if status != "stopped":
return (False, "status is not stopped")
with redis.acquire_lock(keyt):
redis.delete(keyt)
redis.delete("queue:" + task_id)
redis.delete("files:" + task_id)
return True
# TODO: create iterator returning directly task_id
def scan_iter(redis, pattern):
return redis.scan_iter('task:' + pattern)
def id(task_key):
return task_key[5:]
def beat(redis, task_id, duration, container_id):
"""Sends an update event to the task and add an expiration time
(set duration to 0 to disable expiration). The task must be running.
"""
keyt = "task:" + task_id
with redis.acquire_lock(keyt):
# a beat can only be sent in running mode except if in between, the task stopped
# or in development mode, no need to raise an alert
if redis.hget(keyt, "status") != "running":
return
if duration is not None:
if duration == 0:
redis.delete("beat:" + task_id)
else:
redis.set("beat:" + task_id, duration)
redis.expire("beat:" + task_id, duration)
queue = redis.get("queue:" + task_id)
# renew ttl of queue
if queue is not None:
redis.expire("queue:" + task_id, int(queue))
redis.hset(keyt, "updated_time", int(time.time()))
if container_id is not None:
redis.hset(keyt, "container_id", container_id)
def set_file(redis, task_id, content, filename):
keyf = "files:" + task_id
redis.hset(keyf, filename, content)
def get_file(redis, task_id, filename):
keyf = "files:" + task_id
return redis.hget(keyf, filename)
def get_log(redis, task_id):
return get_file(redis, task_id, "log")
| 32.102041 | 88 | 0.605213 | 658 | 4,719 | 4.237082 | 0.221885 | 0.094692 | 0.074964 | 0.025108 | 0.199785 | 0.053085 | 0.016499 | 0 | 0 | 0 | 0 | 0.001436 | 0.26192 | 4,719 | 146 | 89 | 32.321918 | 0.799024 | 0.171858 | 0 | 0.185185 | 0 | 0 | 0.09193 | 0 | 0 | 0 | 0 | 0.006849 | 0 | 1 | 0.166667 | false | 0 | 0.018519 | 0.027778 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cb878606220ef5504466fef622222dd6d7c2131 | 1,846 | py | Python | src/magic_config/manager.py | mogaiskii/magic_config | c638ffebf5174600ef9632f923da9a35820ade3f | [
"MIT"
] | null | null | null | src/magic_config/manager.py | mogaiskii/magic_config | c638ffebf5174600ef9632f923da9a35820ade3f | [
"MIT"
] | null | null | null | src/magic_config/manager.py | mogaiskii/magic_config | c638ffebf5174600ef9632f923da9a35820ade3f | [
"MIT"
] | null | null | null | from typing import Dict, Any, Optional
import abc
from magic_config.interfaces import AbstractSettingField, AbstractLoader, AbstractSettingsManager
from magic_config.utils import NULL
_FIELDS_REGISTRY = '_fields_registry'
class BaseSettingsManagerMeta(abc.ABCMeta):
"""
metaclass for settings manager. sets behaviour of declaring fields.
"""
def __new__(mcls, name, bases, attrs: Dict[str, Any]):
fields = []
for name, field in attrs.items():
if isinstance(field, AbstractSettingField):
fields.append(field)
attrs[_FIELDS_REGISTRY] = fields
cls = super().__new__(mcls, name, bases, attrs)
cls._immutable = True
return cls
def __setattr__(self, key, value):
if getattr(self, '_immutable', False):
pass
else:
super().__setattr__(key, value)
class BaseSettingsManager(AbstractSettingsManager, metaclass=BaseSettingsManagerMeta):
"""
abstract base class for every setting manager
"""
def __init__(self, loaders=NULL):
self._loaders = []
if self.Meta.loaders:
self._loaders.extend(self.Meta.loaders)
if loaders is not NULL:
self._loaders.extend(loaders)
assert len(self._loaders) > 0, 'At least one loader is required'
assert all([isinstance(loader, AbstractLoader) for loader in self._loaders]), \
'Loaders should be subclass of AbstractLoader'
def get_loaders(self):
return self._loaders
def get_main_loader(self) -> Optional[AbstractLoader]:
for loader in self._loaders:
if not loader.is_readonly():
return loader
return None
def get_fields(self):
return getattr(self, _FIELDS_REGISTRY, [])
class Meta:
loaders = []
| 27.147059 | 97 | 0.64572 | 199 | 1,846 | 5.768844 | 0.407035 | 0.076655 | 0.026132 | 0.027875 | 0.099303 | 0.062718 | 0 | 0 | 0 | 0 | 0 | 0.000739 | 0.266522 | 1,846 | 67 | 98 | 27.552239 | 0.84712 | 0.061213 | 0 | 0 | 0 | 0 | 0.059342 | 0 | 0 | 0 | 0 | 0 | 0.04878 | 1 | 0.146341 | false | 0.02439 | 0.097561 | 0.04878 | 0.439024 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cb9a3835c3f096ec821f3c5b0f2ed7f43bc40e0 | 13,662 | py | Python | tinyms/model/bert/bert_for_finetune.py | zjuter06060126/tinyms | 106fe7eeaa7865ace9a29da946084a101cecb93f | [
"Apache-2.0"
] | 129 | 2021-01-26T01:36:53.000Z | 2022-03-29T13:05:49.000Z | tinyms/model/bert/bert_for_finetune.py | zjuter06060126/tinyms | 106fe7eeaa7865ace9a29da946084a101cecb93f | [
"Apache-2.0"
] | 57 | 2021-02-02T07:15:42.000Z | 2022-03-22T09:56:37.000Z | tinyms/model/bert/bert_for_finetune.py | zjuter06060126/tinyms | 106fe7eeaa7865ace9a29da946084a101cecb93f | [
"Apache-2.0"
] | 62 | 2021-01-26T03:09:41.000Z | 2022-03-16T09:05:30.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
'''
Bert for finetune script.
'''
import tinyms as ts
from tinyms import Tensor
from tinyms import Parameter
from tinyms import context
from tinyms import layers
from tinyms import primitives as P
from tinyms.initializers import initializer
from .bert_for_pretraining import clip_grad
from .finetune_eval_model import BertCLSModel, BertNERModel, BertSquadModel
GRADIENT_CLIP_TYPE = 1
GRADIENT_CLIP_VALUE = 1.0
grad_scale = P.MultitypeFuncGraph("grad_scale")
reciprocal = P.Reciprocal()
@grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale(scale, grad):
return grad * reciprocal(scale)
_grad_overflow = P.MultitypeFuncGraph("_grad_overflow")
grad_overflow = P.FloatStatus()
@_grad_overflow.register("Tensor")
def _tensor_grad_overflow(grad):
return grad_overflow(grad)
class CrossEntropyCalculation(layers.Layer):
"""
Cross Entropy loss
"""
def __init__(self, is_training=True):
super(CrossEntropyCalculation, self).__init__()
self.onehot = P.OneHot()
self.on_value = Tensor(1.0, ts.float32)
self.off_value = Tensor(0.0, ts.float32)
self.reduce_sum = P.ReduceSum()
self.reduce_mean = P.ReduceMean()
self.reshape = P.Reshape()
self.last_idx = (-1,)
self.neg = P.Neg()
self.cast = P.Cast()
self.is_training = is_training
def construct(self, logits, label_ids, num_labels):
if self.is_training:
label_ids = self.reshape(label_ids, self.last_idx)
one_hot_labels = self.onehot(label_ids, num_labels, self.on_value, self.off_value)
per_example_loss = self.neg(self.reduce_sum(one_hot_labels * logits, self.last_idx))
loss = self.reduce_mean(per_example_loss, self.last_idx)
return_value = self.cast(loss, ts.float32)
else:
return_value = logits * 1.0
return return_value
class BertFinetuneLayer(layers.Layer):
"""
Especifically defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, scale_update_layer=None):
super(BertFinetuneLayer, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.optimizer.global_step = Parameter(initializer(0., [1,]), name='global_step')
self.grad = P.GradOperation(get_by_list=True,
sens_param=True)
self.allreduce = P.AllReduce()
self.grad_reducer = None
self.cast = P.Cast()
self.gpu_target = False
if context.get_context("device_target") == "GPU":
self.gpu_target = True
self.float_status = P.FloatStatus()
self.addn = P.AddN()
self.reshape = P.Reshape()
else:
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_before_grad = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.depend_parameter_use = P.Depend()
self.base = Tensor(1, ts.float32)
self.less_equal = P.LessEqual()
self.hyper_map = P.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_layer
if scale_update_layer:
self.loss_scale = Parameter(Tensor(scale_update_layer.get_loss_scale(), dtype=ts.float32),
name="loss_scale")
def construct(self,
input_ids,
input_mask,
token_type_id,
label_ids,
sens=None):
"""Bert Finetune"""
weights = self.weights
init = False
loss = self.network(input_ids,
input_mask,
token_type_id,
label_ids)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
if not self.gpu_target:
init = self.alloc_status()
clear_before_grad = self.clear_before_grad(init)
loss = P.depend(loss, init)
self.depend_parameter_use(clear_before_grad, scaling_sens)
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
label_ids,
self.cast(scaling_sens,
ts.float32))
grads = self.hyper_map(P.partial(grad_scale, scaling_sens), grads)
grads = self.hyper_map(P.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
if not self.gpu_target:
flag = self.get_status(init)
flag_sum = self.reduce_sum(init, (0,))
grads = P.depend(grads, flag)
flag_sum = P.depend(flag_sum, flag)
else:
flag_sum = self.hyper_map(P.partial(_grad_overflow), grads)
flag_sum = self.addn(flag_sum)
flag_sum = self.reshape(flag_sum, (()))
cond = self.less_equal(self.base, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond)
return P.depend(ret, succ)
class BertSquadLayer(layers.Layer):
"""
specifically defined for finetuning where only four inputs tensor are needed.
"""
def __init__(self, network, optimizer, scale_update_layer=None):
super(BertSquadLayer, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = P.GradOperation(get_by_list=True, sens_param=True)
self.allreduce = P.AllReduce()
self.grad_reducer = None
self.cast = P.Cast()
self.alloc_status = P.NPUAllocFloatStatus()
self.get_status = P.NPUGetFloatStatus()
self.clear_before_grad = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.depend_parameter_use = P.Depend()
self.base = Tensor(1, ts.float32)
self.less_equal = P.LessEqual()
self.hyper_map = P.HyperMap()
self.loss_scale = None
self.loss_scaling_manager = scale_update_layer
if scale_update_layer:
self.loss_scale = Parameter(Tensor(scale_update_layer.get_loss_scale(), dtype=ts.float32),
name="loss_scale")
def construct(self,
input_ids,
input_mask,
token_type_id,
start_position,
end_position,
unique_id,
is_impossible,
sens=None):
"""BertSquad"""
weights = self.weights
init = self.alloc_status()
loss = self.network(input_ids,
input_mask,
token_type_id,
start_position,
end_position,
unique_id,
is_impossible)
if sens is None:
scaling_sens = self.loss_scale
else:
scaling_sens = sens
grads = self.grad(self.network, weights)(input_ids,
input_mask,
token_type_id,
start_position,
end_position,
unique_id,
is_impossible,
self.cast(scaling_sens,
ts.float32))
clear_before_grad = self.clear_before_grad(init)
loss = P.depend(loss, init)
self.depend_parameter_use(clear_before_grad, scaling_sens)
grads = self.hyper_map(P.partial(grad_scale, scaling_sens), grads)
grads = self.hyper_map(P.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads)
flag = self.get_status(init)
flag_sum = self.reduce_sum(init, (0,))
cond = self.less_equal(self.base, flag_sum)
P.depend(grads, flag)
P.depend(flag, flag_sum)
overflow = cond
if sens is None:
overflow = self.loss_scaling_manager(self.loss_scale, cond)
if overflow:
succ = False
else:
succ = self.optimizer(grads)
ret = (loss, cond)
return P.depend(ret, succ)
class BertCLS(layers.Layer):
"""
Train interface for classification finetuning task.
"""
def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False,
assessment_method=""):
super(BertCLS, self).__init__()
self.bert = BertCLSModel(config, is_training, num_labels, dropout_prob, use_one_hot_embeddings,
assessment_method)
self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels
self.assessment_method = assessment_method
self.is_training = is_training
def construct(self, input_ids, input_mask, token_type_id, label_ids):
logits = self.bert(input_ids, input_mask, token_type_id)
if self.assessment_method == "spearman_correlation":
if self.is_training:
loss = self.loss(logits, label_ids)
else:
loss = logits
else:
loss = self.loss(logits, label_ids, self.num_labels)
return loss
class BertNER(layers.Layer):
"""
Train interface for sequence labeling finetuning task.
"""
def __init__(self, config, batch_size, is_training, num_labels=11, use_crf=False,
tag_to_index=None, dropout_prob=0.0, use_one_hot_embeddings=False):
super(BertNER, self).__init__()
self.bert = BertNERModel(config, is_training, num_labels, use_crf, dropout_prob, use_one_hot_embeddings)
if use_crf:
if not tag_to_index:
raise Exception("The dict for tag-index mapping should be provided for CRF.")
from src.CRF import CRF
self.loss = CRF(tag_to_index, batch_size, config.seq_length, is_training)
else:
self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels
self.use_crf = use_crf
def construct(self, input_ids, input_mask, token_type_id, label_ids):
logits = self.bert(input_ids, input_mask, token_type_id)
if self.use_crf:
loss = self.loss(logits, label_ids)
else:
loss = self.loss(logits, label_ids, self.num_labels)
return loss
class BertSquad(layers.Layer):
'''
Train interface for SQuAD finetuning task.
'''
def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False):
super(BertSquad, self).__init__()
self.bert = BertSquadModel(config, is_training, num_labels, dropout_prob, use_one_hot_embeddings)
self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels
self.seq_length = config.seq_length
self.is_training = is_training
self.total_num = Parameter(Tensor([0], ts.float32), name='total_num')
self.start_num = Parameter(Tensor([0], ts.float32), name='start_num')
self.end_num = Parameter(Tensor([0], ts.float32), name='end_num')
self.sum = P.ReduceSum()
self.equal = P.Equal()
self.argmax = P.ArgMaxWithValue(axis=1)
self.squeeze = P.Squeeze(axis=-1)
def construct(self, input_ids, input_mask, token_type_id, start_position, end_position, unique_id, is_impossible):
"""interface for SQuAD finetuning task"""
logits = self.bert(input_ids, input_mask, token_type_id)
if self.is_training:
unstacked_logits_0 = self.squeeze(logits[:, :, 0:1])
unstacked_logits_1 = self.squeeze(logits[:, :, 1:2])
start_loss = self.loss(unstacked_logits_0, start_position, self.seq_length)
end_loss = self.loss(unstacked_logits_1, end_position, self.seq_length)
total_loss = (start_loss + end_loss) / 2.0
else:
start_logits = self.squeeze(logits[:, :, 0:1])
end_logits = self.squeeze(logits[:, :, 1:2])
total_loss = (unique_id, start_logits, end_logits)
return total_loss
| 40.660714 | 118 | 0.596765 | 1,597 | 13,662 | 4.835942 | 0.155291 | 0.022789 | 0.020199 | 0.026415 | 0.599637 | 0.551599 | 0.533342 | 0.520912 | 0.497475 | 0.495403 | 0 | 0.008167 | 0.309911 | 13,662 | 335 | 119 | 40.78209 | 0.810989 | 0.076856 | 0 | 0.608209 | 0 | 0 | 0.015377 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052239 | false | 0 | 0.037313 | 0.007463 | 0.141791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cb9eed671220db868e1b0ea5c210e7d65314913 | 5,698 | py | Python | vaultier/nodes/api.py | dz0ny/Vaultier | e23d86c7576f4785b4e369242d7b5f7125e4d8c6 | [
"BSD-3-Clause"
] | 30 | 2015-07-13T11:11:23.000Z | 2021-01-25T14:21:18.000Z | vaultier/nodes/api.py | corpusops/vaultier | 3baef4346add0b3bdff322257467f74b2a0c856c | [
"BSD-3-Clause"
] | null | null | null | vaultier/nodes/api.py | corpusops/vaultier | 3baef4346add0b3bdff322257467f74b2a0c856c | [
"BSD-3-Clause"
] | 31 | 2015-08-10T12:10:16.000Z | 2020-09-18T09:43:28.000Z | from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, \
CreateModelMixin
from .models import Node, Policy
from nodes.business.permissions import PolicyPermission
from nodes.serializers import PolicySerializer
from .serializers import NodeSerializer, NodeBlobSerializer
from .business.permissions import NodePermission
from rest_framework import mixins, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from vaultier.business.exceptions import CustomAPIException
from vaultier.business.mixins import FullUpdateMixin, UpdateModelMixin
from vaultier.business.viewsets import RestfulGenericViewSet
from django.http.response import Http404
from accounts.models import Member
from django.db import transaction
class NodeViewSet(RestfulGenericViewSet,
mixins.CreateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
FullUpdateMixin):
queryset = Node.objects.all()
serializer_class = NodeSerializer
permission_classes = (IsAuthenticated, NodePermission)
@transaction.atomic
def create(self, request, *args, **kwargs):
return super(NodeViewSet, self).create(request, *args, **kwargs)
def initial(self, request, *args, **kwargs):
"""
Find parent if any
"""
parent_id = self.request.QUERY_PARAMS.get('parent')
if parent_id and parent_id.isdigit():
try:
node = Node.objects.get(id=parent_id)
except Node.DoesNotExist:
raise Http404("Parent node was not found.")
else:
self.kwargs['parent'] = node
return super(NodeViewSet, self).initial(request, *args, **kwargs)
def get_queryset(self):
"""
Change queryset when parent URL param provided
"""
if self.action != "list":
return Node.objects.all()
parent = self.kwargs.get('parent')
policy = Policy.objects.filter(
principal__in=Member.objects.filter(user=self.request.user), mask=Policy.mask.read)
if not parent:
return Node.objects.filter(
level=0, _policies__in=policy).prefetch_related('_policies')
return Node.objects.filter(
parent=parent, _policies__in=policy).prefetch_related('_policies')
def pre_save(self, obj):
"""
In action 'create' assign creator
"""
if self.action == "create":
obj.created_by = self.request.user
class NodeDataView(GenericAPIView,
mixins.RetrieveModelMixin,
UpdateModelMixin):
queryset = Node.objects.all()
serializer_class = NodeBlobSerializer
permission_classes = (IsAuthenticated, NodePermission)
def get(self, request, pk):
"""
Added get method
"""
return self.retrieve(request, pk)
def put(self, request, pk):
"""
Added put method
"""
response = self.update(request, pk)
# if success, clear data from response
if response.status_code == status.HTTP_200_OK:
response.data = None
return response
class NodePathView(GenericAPIView):
queryset = Node.objects.all()
serializer_class = NodeSerializer
permission_classes = (IsAuthenticated, NodePermission)
def get(self, request, pk):
"""
Return ordered list of path to all Node parents
"""
node = self.get_object()
nodes = node.get_ancestors(ascending=False)
serializer = self.get_serializer(nodes, many=True)
return Response(serializer.data)
class PolicyViewSet(CreateModelMixin, ListModelMixin, UpdateModelMixin,
RetrieveModelMixin, RestfulGenericViewSet):
model = Policy
serializer_class = PolicySerializer
permission_classes = (IsAuthenticated, NodePermission, PolicyPermission)
def get_queryset(self):
if 'node' in self.kwargs:
return Policy.objects.filter(
subject=self.kwargs['node'],
role__isnull=False, mask__isnull=False)
return Policy.objects.filter(
subject__in=self.kwargs['parent_node'].get_ancestors(
ascending=False, include_self=False),
role__isnull=False, mask__isnull=False)
def initial(self, request, *args, **kwargs):
"""
Find parent if any
"""
node_id = self.request.QUERY_PARAMS.get('node')
parent_id = self.request.QUERY_PARAMS.get('parent_node')
if node_id and node_id.isdigit():
try:
node = Node.objects.get(id=node_id)
except Node.DoesNotExist:
raise Http404("Parent node was not found.")
else:
self.kwargs['node'] = node
elif parent_id and parent_id.isdigit():
try:
node = Node.objects.get(id=parent_id)
except Node.DoesNotExist:
raise Http404("Parent node was not found.")
else:
self.kwargs['parent_node'] = node
else:
detail = "node or node_parent query parameter is missing"
raise CustomAPIException(status_code=400, detail=detail)
return super(PolicyViewSet, self).initial(request, *args, **kwargs)
def pre_save(self, obj):
if self.action in ['create', 'update', 'partial_update']:
obj.subject = self.kwargs.get('node') or \
self.kwargs.get('parent_node')
| 35.836478 | 95 | 0.640225 | 588 | 5,698 | 6.085034 | 0.239796 | 0.033818 | 0.028508 | 0.051425 | 0.347121 | 0.303801 | 0.230017 | 0.230017 | 0.199273 | 0.175797 | 0 | 0.004574 | 0.270972 | 5,698 | 158 | 96 | 36.063291 | 0.856765 | 0.041769 | 0 | 0.342105 | 0 | 0 | 0.049075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0 | 0.140351 | 0.008772 | 0.464912 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cba9d7e266b18a5b6059b60b206b384e90ecfa6 | 697 | py | Python | wicarproject/migrations/versions/8fa25b1b2241_add_filename_column_to_bookingimage_.py | todhm/wicarproject | 5a3ea7b70ba6649af75d9e9bb49683eb6f94b570 | [
"MIT"
] | 1 | 2018-04-20T04:58:50.000Z | 2018-04-20T04:58:50.000Z | wicarproject/migrations/versions/8fa25b1b2241_add_filename_column_to_bookingimage_.py | todhm/wicarproject | 5a3ea7b70ba6649af75d9e9bb49683eb6f94b570 | [
"MIT"
] | 7 | 2021-02-08T20:24:49.000Z | 2022-03-11T23:26:33.000Z | wicarproject/migrations/versions/8fa25b1b2241_add_filename_column_to_bookingimage_.py | todhm/wicarproject | 5a3ea7b70ba6649af75d9e9bb49683eb6f94b570 | [
"MIT"
] | null | null | null | """add filename column to bookingImage Model
Revision ID: 8fa25b1b2241
Revises: 540c302becc1
Create Date: 2018-06-28 21:01:49.930179
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8fa25b1b2241'
down_revision = '540c302becc1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('booking_image', sa.Column('filename', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('booking_image', 'filename')
# ### end Alembic commands ###
| 24.034483 | 85 | 0.704448 | 86 | 697 | 5.627907 | 0.593023 | 0.055785 | 0.086777 | 0.095041 | 0.181818 | 0.181818 | 0.181818 | 0.181818 | 0 | 0 | 0 | 0.086356 | 0.169297 | 697 | 28 | 86 | 24.892857 | 0.749568 | 0.463415 | 0 | 0 | 0 | 0 | 0.195846 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cbec476a1cebf35c3a61f319adccc43c922fbf8 | 22,725 | py | Python | odoo/base-addons/stock_landed_costs/tests/test_stock_landed_costs_purchase.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | odoo/base-addons/stock_landed_costs/tests/test_stock_landed_costs_purchase.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | odoo/base-addons/stock_landed_costs/tests/test_stock_landed_costs_purchase.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from odoo.addons.stock_landed_costs.tests.common import TestStockLandedCostsCommon
from odoo.addons.stock_landed_costs.tests.test_stockvaluationlayer import TestStockValuationLC
from odoo.tests import Form, tagged
@tagged('post_install', '-at_install')
class TestLandedCosts(TestStockLandedCostsCommon):
def setUp(self):
super(TestLandedCosts, self).setUp()
# Create picking incoming shipment
self.picking_in = self.Picking.create({
'partner_id': self.supplier_id,
'picking_type_id': self.picking_type_in_id,
'location_id': self.supplier_location_id,
'location_dest_id': self.stock_location_id})
self.Move.create({
'name': self.product_refrigerator.name,
'product_id': self.product_refrigerator.id,
'product_uom_qty': 5,
'product_uom': self.product_refrigerator.uom_id.id,
'picking_id': self.picking_in.id,
'location_id': self.supplier_location_id,
'location_dest_id': self.stock_location_id})
self.Move.create({
'name': self.product_oven.name,
'product_id': self.product_oven.id,
'product_uom_qty': 10,
'product_uom': self.product_oven.uom_id.id,
'picking_id': self.picking_in.id,
'location_id': self.supplier_location_id,
'location_dest_id': self.stock_location_id})
# Create picking outgoing shipment
self.picking_out = self.Picking.create({
'partner_id': self.customer_id,
'picking_type_id': self.picking_type_out_id,
'location_id': self.stock_location_id,
'location_dest_id': self.customer_location_id})
self.Move.create({
'name': self.product_refrigerator.name,
'product_id': self.product_refrigerator.id,
'product_uom_qty': 2,
'product_uom': self.product_refrigerator.uom_id.id,
'picking_id': self.picking_out.id,
'location_id': self.stock_location_id,
'location_dest_id': self.customer_location_id})
def test_00_landed_costs_on_incoming_shipment(self):
chart_of_accounts = self.env.company.chart_template_id
generic_coa = self.env.ref('l10n_generic_coa.configurable_chart_template')
if chart_of_accounts != generic_coa:
raise unittest.SkipTest('Skip this test as it works only with %s (%s loaded)' % (generic_coa.name, chart_of_accounts.name))
""" Test landed cost on incoming shipment """
#
# (A) Purchase product
# Services Quantity Weight Volume
# -----------------------------------------------------
# 1. Refrigerator 5 10 1
# 2. Oven 10 20 1.5
# (B) Add some costs on purchase
# Services Amount Split Method
# -------------------------------------------
# 1.labour 10 By Equal
# 2.brokerage 150 By Quantity
# 3.transportation 250 By Weight
# 4.packaging 20 By Volume
# Process incoming shipment
income_ship = self._process_incoming_shipment()
# Create landed costs
stock_landed_cost = self._create_landed_costs({
'equal_price_unit': 10,
'quantity_price_unit': 150,
'weight_price_unit': 250,
'volume_price_unit': 20}, income_ship)
# Compute landed costs
stock_landed_cost.compute_landed_cost()
valid_vals = {
'equal': 5.0,
'by_quantity_refrigerator': 50.0,
'by_quantity_oven': 100.0,
'by_weight_refrigerator': 50.0,
'by_weight_oven': 200,
'by_volume_refrigerator': 5.0,
'by_volume_oven': 15.0}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_landed_cost, valid_vals)
# Validate the landed cost.
stock_landed_cost.button_validate()
self.assertTrue(stock_landed_cost.account_move_id, 'Landed costs should be available account move lines')
account_entry = self.env['account.move.line'].read_group(
[('move_id', '=', stock_landed_cost.account_move_id.id)], ['debit', 'credit', 'move_id'], ['move_id'])[0]
self.assertEqual(account_entry['debit'], account_entry['credit'], 'Debit and credit are not equal')
self.assertEqual(account_entry['debit'], 430.0, 'Wrong Account Entry')
def test_00_landed_costs_on_incoming_shipment_without_real_time(self):
chart_of_accounts = self.env.company.chart_template_id
generic_coa = self.env.ref('l10n_generic_coa.configurable_chart_template')
if chart_of_accounts != generic_coa:
raise unittest.SkipTest('Skip this test as it works only with %s (%s loaded)' % (generic_coa.name, chart_of_accounts.name))
""" Test landed cost on incoming shipment """
#
# (A) Purchase product
# Services Quantity Weight Volume
# -----------------------------------------------------
# 1. Refrigerator 5 10 1
# 2. Oven 10 20 1.5
# (B) Add some costs on purchase
# Services Amount Split Method
# -------------------------------------------
# 1.labour 10 By Equal
# 2.brokerage 150 By Quantity
# 3.transportation 250 By Weight
# 4.packaging 20 By Volume
self.product_refrigerator.write({"categ_id": self.categ_manual_periodic.id})
self.product_oven.write({"categ_id": self.categ_manual_periodic.id})
# Process incoming shipment
income_ship = self._process_incoming_shipment()
# Create landed costs
stock_landed_cost = self._create_landed_costs({
'equal_price_unit': 10,
'quantity_price_unit': 150,
'weight_price_unit': 250,
'volume_price_unit': 20}, income_ship)
# Compute landed costs
stock_landed_cost.compute_landed_cost()
valid_vals = {
'equal': 5.0,
'by_quantity_refrigerator': 50.0,
'by_quantity_oven': 100.0,
'by_weight_refrigerator': 50.0,
'by_weight_oven': 200,
'by_volume_refrigerator': 5.0,
'by_volume_oven': 15.0}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_landed_cost, valid_vals)
# Validate the landed cost.
stock_landed_cost.button_validate()
self.assertFalse(stock_landed_cost.account_move_id)
def test_01_negative_landed_costs_on_incoming_shipment(self):
chart_of_accounts = self.env.company.chart_template_id
generic_coa = self.env.ref('l10n_generic_coa.configurable_chart_template')
if chart_of_accounts != generic_coa:
raise unittest.SkipTest('Skip this test as it works only with %s (%s loaded)' % (generic_coa.name, chart_of_accounts.name))
""" Test negative landed cost on incoming shipment """
#
# (A) Purchase Product
# Services Quantity Weight Volume
# -----------------------------------------------------
# 1. Refrigerator 5 10 1
# 2. Oven 10 20 1.5
# (B) Sale refrigerator's part of the quantity
# (C) Add some costs on purchase
# Services Amount Split Method
# -------------------------------------------
# 1.labour 10 By Equal
# 2.brokerage 150 By Quantity
# 3.transportation 250 By Weight
# 4.packaging 20 By Volume
# (D) Decrease cost that already added on purchase
# (apply negative entry)
# Services Amount Split Method
# -------------------------------------------
# 1.labour -5 By Equal
# 2.brokerage -50 By Quantity
# 3.transportation -50 By Weight
# 4.packaging -5 By Volume
# Process incoming shipment
income_ship = self._process_incoming_shipment()
# Refrigerator outgoing shipment.
self._process_outgoing_shipment()
# Apply landed cost for incoming shipment.
stock_landed_cost = self._create_landed_costs({
'equal_price_unit': 10,
'quantity_price_unit': 150,
'weight_price_unit': 250,
'volume_price_unit': 20}, income_ship)
# Compute landed costs
stock_landed_cost.compute_landed_cost()
valid_vals = {
'equal': 5.0,
'by_quantity_refrigerator': 50.0,
'by_quantity_oven': 100.0,
'by_weight_refrigerator': 50.0,
'by_weight_oven': 200.0,
'by_volume_refrigerator': 5.0,
'by_volume_oven': 15.0}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_landed_cost, valid_vals)
# Validate the landed cost.
stock_landed_cost.button_validate()
self.assertTrue(stock_landed_cost.account_move_id, 'Landed costs should be available account move lines')
# Create negative landed cost for previously incoming shipment.
stock_negative_landed_cost = self._create_landed_costs({
'equal_price_unit': -5,
'quantity_price_unit': -50,
'weight_price_unit': -50,
'volume_price_unit': -5}, income_ship)
# Compute negative landed costs
stock_negative_landed_cost.compute_landed_cost()
valid_vals = {
'equal': -2.5,
'by_quantity_refrigerator': -16.67,
'by_quantity_oven': -33.33,
'by_weight_refrigerator': -10.00,
'by_weight_oven': -40.00,
'by_volume_refrigerator': -1.25,
'by_volume_oven': -3.75}
# Check valuation adjustment line recognized or not
self._validate_additional_landed_cost_lines(stock_negative_landed_cost, valid_vals)
# Validate the landed cost.
stock_negative_landed_cost.button_validate()
self.assertEqual(stock_negative_landed_cost.state, 'done', 'Negative landed costs should be in done state')
self.assertTrue(stock_negative_landed_cost.account_move_id, 'Landed costs should be available account move lines')
account_entry = self.env['account.move.line'].read_group(
[('move_id', '=', stock_negative_landed_cost.account_move_id.id)], ['debit', 'credit', 'move_id'], ['move_id'])[0]
self.assertEqual(account_entry['debit'], account_entry['credit'], 'Debit and credit are not equal')
move_lines = [
{'name': 'split by volume - Microwave Oven', 'debit': 3.75, 'credit': 0.0},
{'name': 'split by volume - Microwave Oven', 'debit': 0.0, 'credit': 3.75},
{'name': 'split by weight - Microwave Oven', 'debit': 40.0, 'credit': 0.0},
{'name': 'split by weight - Microwave Oven', 'debit': 0.0, 'credit': 40.0},
{'name': 'split by quantity - Microwave Oven', 'debit': 33.33, 'credit': 0.0},
{'name': 'split by quantity - Microwave Oven', 'debit': 0.0, 'credit': 33.33},
{'name': 'equal split - Microwave Oven', 'debit': 2.5, 'credit': 0.0},
{'name': 'equal split - Microwave Oven', 'debit': 0.0, 'credit': 2.5},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.5, 'credit': 0.0},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 0.5},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 4.0, 'credit': 0.0},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 4.0},
{'name': 'split by weight - Refrigerator', 'debit': 0.0, 'credit': 10.0},
{'name': 'split by weight - Refrigerator', 'debit': 10.0, 'credit': 0.0},
{'name': 'split by volume - Refrigerator', 'debit': 0.0, 'credit': 1.25},
{'name': 'split by volume - Refrigerator', 'debit': 1.25, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 6.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 6.67},
{'name': 'split by quantity - Refrigerator', 'debit': 16.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator', 'debit': 0.0, 'credit': 16.67},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 1.0, 'credit': 0.0},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 1.0},
{'name': 'equal split - Refrigerator', 'debit': 2.5, 'credit': 0.0},
{'name': 'equal split - Refrigerator', 'debit': 0.0, 'credit': 2.5}
]
if stock_negative_landed_cost.account_move_id.company_id.anglo_saxon_accounting:
move_lines += [
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.5, 'credit': 0.0},
{'name': 'split by volume - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 0.5},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 4.0, 'credit': 0.0},
{'name': 'split by weight - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 4.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 6.67, 'credit': 0.0},
{'name': 'split by quantity - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 6.67},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 1.0, 'credit': 0.0},
{'name': 'equal split - Refrigerator: 2.0 already out', 'debit': 0.0, 'credit': 1.0},
]
self.assertRecordValues(
sorted(stock_negative_landed_cost.account_move_id.line_ids, key=lambda d: (d['name'], d['debit'])),
sorted(move_lines, key=lambda d: (d['name'], d['debit'])),
)
def _process_incoming_shipment(self):
""" Two product incoming shipment. """
# Confirm incoming shipment.
self.picking_in.action_confirm()
# Transfer incoming shipment
res_dict = self.picking_in.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
return self.picking_in
def _process_outgoing_shipment(self):
""" One product Outgoing shipment. """
# Confirm outgoing shipment.
self.picking_out.action_confirm()
# Product assign to outgoing shipments
self.picking_out.action_assign()
# Transfer picking.
res_dict = self.picking_out.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
def _create_landed_costs(self, value, picking_in):
return self.LandedCost.create(dict(
picking_ids=[(6, 0, [picking_in.id])],
account_journal_id=self.expenses_journal.id,
cost_lines=[
(0, 0, {
'name': 'equal split',
'split_method': 'equal',
'price_unit': value['equal_price_unit'],
'product_id': self.landed_cost.id}),
(0, 0, {
'name': 'split by quantity',
'split_method': 'by_quantity',
'price_unit': value['quantity_price_unit'],
'product_id': self.brokerage_quantity.id}),
(0, 0, {
'name': 'split by weight',
'split_method': 'by_weight',
'price_unit': value['weight_price_unit'],
'product_id': self.transportation_weight.id}),
(0, 0, {
'name': 'split by volume',
'split_method': 'by_volume',
'price_unit': value['volume_price_unit'],
'product_id': self.packaging_volume.id})
],
))
def _validate_additional_landed_cost_lines(self, stock_landed_cost, valid_vals):
for valuation in stock_landed_cost.valuation_adjustment_lines:
add_cost = valuation.additional_landed_cost
split_method = valuation.cost_line_id.split_method
product = valuation.move_id.product_id
if split_method == 'equal':
self.assertEqual(add_cost, valid_vals['equal'], self._error_message(valid_vals['equal'], add_cost))
elif split_method == 'by_quantity' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_quantity_refrigerator'], self._error_message(valid_vals['by_quantity_refrigerator'], add_cost))
elif split_method == 'by_quantity' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_quantity_oven'], self._error_message(valid_vals['by_quantity_oven'], add_cost))
elif split_method == 'by_weight' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_weight_refrigerator'], self._error_message(valid_vals['by_weight_refrigerator'], add_cost))
elif split_method == 'by_weight' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_weight_oven'], self._error_message(valid_vals['by_weight_oven'], add_cost))
elif split_method == 'by_volume' and product == self.product_refrigerator:
self.assertEqual(add_cost, valid_vals['by_volume_refrigerator'], self._error_message(valid_vals['by_volume_refrigerator'], add_cost))
elif split_method == 'by_volume' and product == self.product_oven:
self.assertEqual(add_cost, valid_vals['by_volume_oven'], self._error_message(valid_vals['by_volume_oven'], add_cost))
def _error_message(self, actucal_cost, computed_cost):
return 'Additional Landed Cost should be %s instead of %s' % (actucal_cost, computed_cost)
@tagged('post_install', '-at_install')
class TestLandedCostsWithPurchaseAndInv(TestStockValuationLC):
def test_invoice_after_lc(self):
self.env.company.anglo_saxon_accounting = True
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_valuation = 'real_time'
self.product1.product_tmpl_id.invoice_policy = 'delivery'
self.price_diff_account = self.env['account.account'].create({
'name': 'price diff account',
'code': 'price diff account',
'user_type_id': self.env.ref('account.data_account_type_current_assets').id,
})
self.product1.property_account_creditor_price_difference = self.price_diff_account
# Create PO
po_form = Form(self.env['purchase.order'])
po_form.partner_id = self.env['res.partner'].create({'name': 'vendor'})
with po_form.order_line.new() as po_line:
po_line.product_id = self.product1
po_line.product_qty = 1
po_line.price_unit = 455.0
order = po_form.save()
order.button_confirm()
# Receive the goods
receipt = order.picking_ids[0]
receipt.move_lines.quantity_done = 1
receipt.button_validate()
# Check SVL and AML
svl = self.env['stock.valuation.layer'].search([('stock_move_id', '=', receipt.move_lines.id)])
self.assertAlmostEqual(svl.value, 455)
aml = self.env['account.move.line'].search([('account_id', '=', self.stock_valuation_account.id)])
self.assertAlmostEqual(aml.debit, 455)
# Create and validate LC
lc = self.env['stock.landed.cost'].create(dict(
picking_ids=[(6, 0, [receipt.id])],
account_journal_id=self.stock_journal.id,
cost_lines=[
(0, 0, {
'name': 'equal split',
'split_method': 'equal',
'price_unit': 99,
'product_id': self.productlc1.id,
}),
],
))
lc.compute_landed_cost()
lc.button_validate()
# Check LC, SVL and AML
self.assertAlmostEqual(lc.valuation_adjustment_lines.final_cost, 554)
svl = self.env['stock.valuation.layer'].search([('stock_move_id', '=', receipt.move_lines.id)], order='id desc', limit=1)
self.assertAlmostEqual(svl.value, 99)
aml = self.env['account.move.line'].search([('account_id', '=', self.stock_valuation_account.id)], order='id desc', limit=1)
self.assertAlmostEqual(aml.debit, 99)
# Create an invoice with the same price
move_form = Form(self.env['account.move'].with_context(default_type='in_invoice'))
move_form.partner_id = order.partner_id
move_form.purchase_id = order
move = move_form.save()
move.post()
# Check nothing was posted in the price difference account
price_diff_aml = self.env['account.move.line'].search([('account_id','=', self.price_diff_account.id), ('move_id', '=', move.id)])
self.assertEquals(len(price_diff_aml), 0, "No line should have been generated in the price difference account.")
| 53.470588 | 153 | 0.575622 | 2,602 | 22,725 | 4.764796 | 0.101845 | 0.040329 | 0.023955 | 0.01839 | 0.72552 | 0.683255 | 0.640023 | 0.578077 | 0.53799 | 0.522746 | 0 | 0.028697 | 0.29923 | 22,725 | 424 | 154 | 53.596698 | 0.749827 | 0.149219 | 0 | 0.431973 | 0 | 0 | 0.24856 | 0.032576 | 0 | 0 | 0 | 0 | 0.07483 | 1 | 0.034014 | false | 0 | 0.013605 | 0.006803 | 0.064626 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cc06bdeadf998bf4b2909eaece7a24a89fae2cf | 928 | py | Python | OpenCV Drawing/polygon_at_random_points_in_image.py | kethan1/OpenCV-Python | 727eaa6399e2cc7be6e4189da500015f18948ea5 | [
"MIT"
] | null | null | null | OpenCV Drawing/polygon_at_random_points_in_image.py | kethan1/OpenCV-Python | 727eaa6399e2cc7be6e4189da500015f18948ea5 | [
"MIT"
] | null | null | null | OpenCV Drawing/polygon_at_random_points_in_image.py | kethan1/OpenCV-Python | 727eaa6399e2cc7be6e4189da500015f18948ea5 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import random
image = cv2.imread("../Images/image_with_colors.png")
point1 = [
random.randint(1, image.shape[1]),
random.randint(1, image.shape[0]),
]
number_of_random_points = random.randint(2, 5) # Random number of points
points = [
np.array([
np.array([point1]),
*[
np.array([[
random.randint(1, image.shape[1]),
random.randint(1, image.shape[0])
]])
for _ in range(number_of_random_points)
],
np.array([point1])
], dtype=np.int32)
]
if random.randint(0, 1): # Either fills the shape or just draws the outline
image = cv2.polylines(
image,
points,
True,
(0, 0, 255),
2
)
else:
image = cv2.fillPoly(
image,
points,
(0, 0, 255),
)
cv2.imshow('Test', image)
cv2.waitKey()
cv2.destroyAllWindows()
| 21.090909 | 76 | 0.550647 | 115 | 928 | 4.365217 | 0.391304 | 0.155378 | 0.111554 | 0.151394 | 0.199203 | 0.199203 | 0.199203 | 0.199203 | 0.199203 | 0.199203 | 0 | 0.054773 | 0.311422 | 928 | 43 | 77 | 21.581395 | 0.730829 | 0.077586 | 0 | 0.205128 | 0 | 0 | 0.041032 | 0.036342 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cc29243df807eae54e6754681e5cc9f414c7841 | 6,453 | py | Python | pokemonlib.py | Jarwien/PGoTrader | 588e7f271b0e5dbd567560b956137d06564ad042 | [
"MIT"
] | 13 | 2019-02-13T21:42:13.000Z | 2021-08-20T12:18:46.000Z | pokemonlib.py | acocalypso/PGoTrader | 9b970c684db5f47ff6b2f832e7ca40b7157bc153 | [
"MIT"
] | 4 | 2021-03-19T00:23:21.000Z | 2022-03-11T23:46:09.000Z | pokemonlib.py | acocalypso/PGoTrader | 9b970c684db5f47ff6b2f832e7ca40b7157bc153 | [
"MIT"
] | 6 | 2019-05-02T05:39:48.000Z | 2021-12-25T10:07:33.000Z | from io import BytesIO
from PIL import Image
import asyncio
import logging
import subprocess
import re
from colorlog import ColoredFormatter
logger = logging.getLogger('PokemonGo')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = ColoredFormatter(" %(log_color)s%(levelname)-8s%(reset)s | %(log_color)s%(message)s%(reset)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
RE_CLIPBOARD_TEXT = re.compile(r"^./ClipboardReceiver\(\s*\d+\): Clipboard text: (.+)$")
class CalcyIVError(Exception):
# logger.error('CalcyIV did not find any combinations.')
pass
class RedBarError(Exception):
# logger.error('The red bar is covering the pokémon CP.')
pass
class PhoneNotConnectedError(Exception):
# logger.error('Your phone does not appear to be connected. Try \'adb devices\' and see if it is listed there :)')
pass
class LogcatNotRunningError(Exception):
# logger.error('For some reason, I can\'t run the logcat on your phone! :( Try to run \'adb logcat\' and see if something happens. Message the developers as well!')
pass
class PokemonGo(object):
def __init__(self):
self.device_id = None
self.calcy_pid = None
self.use_fallback_screenshots = False
async def screencap(self):
if not self.use_fallback_screenshots:
return_code, stdout, stderr = await self.run(["adb", "-s", await self.get_device(), "exec-out", "screencap", "-p"])
try:
return Image.open(BytesIO(stdout))
except (OSError, IOError):
logger.debug("Screenshot failed, using fallback method")
# self.use_fallback_screenshots = True
return_code, stdout, stderr = await self.run(["adb", "-s", await self.get_device(), "shell", "screencap", "-p", "/sdcard/screen.png"])
return_code, stdout, stderr = await self.run(["adb", "-s", await self.get_device(), "pull", "/sdcard/screen.png", "."])
image = Image.open("screen.png")
return image
async def set_device(self, device_id=None):
self.device_id = device_id
async def get_device(self):
if self.device_id:
return self.device_id
devices = await self.get_devices()
if devices == []:
raise PhoneNotConnectedError
self.device_id = devices[0]
return self.device_id
async def run(self, args):
logger.info("Running %s", args)
p = subprocess.Popen([str(arg) for arg in args], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
logger.debug("Return code %d", p.returncode)
return (p.returncode, stdout, stderr)
async def get_devices(self):
code, stdout, stderr = await self.run(["adb", "devices"])
devices = []
for line in stdout.decode('utf-8').splitlines()[1:-1]:
device_id, name = line.split('\t')
devices.append(device_id)
return devices
async def start_logcat(self):
# return_code, stdout, stderr = await self.run(["adb", "-s", await self.get_device(), "shell", "pidof", "-s", "tesmath.calcy"])
# logger.info("Running pidof calcy got code %d: %s", return_code, stdout)
# self.calcy_pid = stdout.decode('utf-8').strip()
# cmd = ["adb", "-s", await self.get_device(), "logcat", "-T", "1", "-v", "brief", "--pid", self.calcy_pid]
cmd = ["adb", "-s", await self.get_device(), "logcat", "-T", "1", "-v", "brief"]
logger.info("Starting logcat %s", cmd)
self.logcat_task = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
await self.logcat_task.stdout.readline() # Read and discard the one line as -T 0 doesn't work
async def seek_to_end(self):
# Seek to the end of the file
while True:
try:
task = await asyncio.wait_for(self.logcat_task.stdout.readline(), 0.2)
except asyncio.TimeoutError:
break
async def read_logcat(self):
if self.logcat_task.returncode != None:
logger.error("Logcat process is not running")
logger.error("stdout %s", await self.logcat_task.stdout.read())
logger.error("stderr %s", await self.logcat_task.stderr.read())
raise LogcatNotRunningError()
line = await self.logcat_task.stdout.readline()
line = line.decode('utf-8', errors='ignore').rstrip()
#while line.split()[2].decode('utf-8') != self.calcy_pid:
# line = await self.logcat_task.stdout.readline()
#logger.debug("Received logcat line: %s", line)
return line
async def get_clipboard(self):
await self.send_intent("clipper.get")
while True:
line = await self.read_logcat()
match = RE_CLIPBOARD_TEXT.match(line)
if match:
logger.info("RE_CLIPBOARD_TEXT matched.")
return match.group(1)
async def send_intent(self, intent, package=None, extra_values=[]):
cmd = "am broadcast -a {}".format(intent)
if package:
cmd = cmd + " -n {}".format(package)
for key, value in extra_values:
if isinstance(value, bool):
cmd = cmd + " --ez {} {}".format(key, "true" if value else "false")
elif '--user' in key:
cmd = cmd + " --user {}".format(value)
else:
cmd = cmd + " -e {} '{}'".format(key, value)
logger.info("Sending intent: " + cmd)
await self.run(["adb", "-s", await self.get_device(), "shell", cmd])
async def tap(self, x, y):
await self.run(["adb", "-s", await self.get_device(), "shell", "input", "tap", x, y])
async def key(self, key):
await self.run(["adb", "-s", await self.get_device(), "shell", "input", "keyevent", key])
async def text(self, text):
await self.run(["adb", "-s", await self.get_device(), "shell", "input", "text", text])
async def swipe(self, x1, y1, x2, y2, duration=None):
args = [
"adb",
"-s",
await self.get_device(),
"shell",
"input",
"swipe",
x1,
y1,
x2,
y2
]
if duration:
args.append(duration)
await self.run(args)
| 38.640719 | 168 | 0.592128 | 804 | 6,453 | 4.661692 | 0.263682 | 0.069637 | 0.034685 | 0.038154 | 0.204642 | 0.174226 | 0.165422 | 0.137407 | 0.128869 | 0.128869 | 0 | 0.004855 | 0.265923 | 6,453 | 166 | 169 | 38.873494 | 0.786363 | 0.156051 | 0 | 0.077519 | 0 | 0.007752 | 0.116001 | 0.018597 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007752 | false | 0.031008 | 0.054264 | 0 | 0.162791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cc39514b838f2b9f021711aaad48885a39ff3ea | 1,351 | py | Python | side_scroller/pause_screen.py | pecjas/Sidescroller-PyGame | dfcaf4ff95a1733714eaaeb00dc00cd876ab1468 | [
"MIT"
] | null | null | null | side_scroller/pause_screen.py | pecjas/Sidescroller-PyGame | dfcaf4ff95a1733714eaaeb00dc00cd876ab1468 | [
"MIT"
] | null | null | null | side_scroller/pause_screen.py | pecjas/Sidescroller-PyGame | dfcaf4ff95a1733714eaaeb00dc00cd876ab1468 | [
"MIT"
] | null | null | null | import pygame
from side_scroller.settings import GameSettings, Fonts
from side_scroller.constants import WHITE, BLACK, TINT_ALPHA_PAUSE
from side_scroller.player import Player
from side_scroller.score import Score
class PauseScreen():
def __init__(self):
self.pause_text1 = Fonts.pause_font.render("Paused", True, WHITE)
self.pause_text2 = Fonts.pause_font.render("Press Enter to continue", True, WHITE)
self.previous_screen = None
def tint_screen(self, screen: pygame.surface, tint_color):
display_tint = pygame.Surface(screen.get_size())
display_tint.fill(tint_color)
display_tint.set_alpha(TINT_ALPHA_PAUSE)
screen.blit(display_tint, (0, 0))
def display(self, screen: pygame.surface):
self.previous_screen = screen.copy()
self.tint_screen(screen, BLACK)
screen.blit(self.pause_text1,
self.pause_text1.get_rect(
center=(int(GameSettings.width/2), int(GameSettings.height/4))
))
screen.blit(self.pause_text2,
self.pause_text2.get_rect(
center=(int(GameSettings.width/2), int(GameSettings.height/2))))
pygame.display.update()
def undisplay(self, screen: pygame.surface):
screen.blit(self.previous_screen, (0, 0))
| 34.641026 | 90 | 0.663212 | 167 | 1,351 | 5.155689 | 0.317365 | 0.062718 | 0.074332 | 0.080139 | 0.127758 | 0.127758 | 0.127758 | 0.127758 | 0.127758 | 0.127758 | 0 | 0.013553 | 0.235381 | 1,351 | 38 | 91 | 35.552632 | 0.819942 | 0 | 0 | 0 | 0 | 0 | 0.021466 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.178571 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cc5184033c05782157d9686b2680d2335be90c9 | 479 | py | Python | untar-packages.py | shipp02/linux-from-scratch | f1ae6a9b38e78331d4590cf2061d323830373b19 | [
"Apache-2.0"
] | 1 | 2020-06-17T13:10:11.000Z | 2020-06-17T13:10:11.000Z | untar-packages.py | shipp02/linux-from-scratch | f1ae6a9b38e78331d4590cf2061d323830373b19 | [
"Apache-2.0"
] | null | null | null | untar-packages.py | shipp02/linux-from-scratch | f1ae6a9b38e78331d4590cf2061d323830373b19 | [
"Apache-2.0"
] | null | null | null | import tarfile as tar
def get_name(url):
for i in range(0, len(url), 1):
if line[len(url) - i - 1] == '/':
return url[len(url)-i:len(url)]
url_file = open('urls.txt', mode='r')
lines = url_file.readlines()
lines = [x.strip() for x in lines]
url_file.close()
if __name__=='__main__':
for line in lines:
name = get_name(line)
print(name)
tfile = tar.open(name, mode='r:*')
tfile.extractall()
tfile.close()
| 19.958333 | 43 | 0.565762 | 73 | 479 | 3.534247 | 0.452055 | 0.093023 | 0.054264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008547 | 0.267223 | 479 | 23 | 44 | 20.826087 | 0.726496 | 0 | 0 | 0 | 0 | 0 | 0.043841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cc852c1c8a95413d2202e47b00580324f2fa4f8 | 442 | py | Python | scatterPlot/scatterPlot.py | AppAnalysis-BGSU/AMD_VirusTotal | 47955466da2e5c5e2b33ec0cb26ae63a50a7075e | [
"MIT"
] | null | null | null | scatterPlot/scatterPlot.py | AppAnalysis-BGSU/AMD_VirusTotal | 47955466da2e5c5e2b33ec0cb26ae63a50a7075e | [
"MIT"
] | null | null | null | scatterPlot/scatterPlot.py | AppAnalysis-BGSU/AMD_VirusTotal | 47955466da2e5c5e2b33ec0cb26ae63a50a7075e | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
group = ("time<20 seconds")
time=[1,2,5,10,100,100,100,100,200,300,1]
for i in range(1,100):
time.append(i)
for j in range(1,10):
time.append(50)
# Create plot
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, axisbg="1.0")
for x in time:
ax.scatter(x, 0, alpha=0.4, c="red", edgecolors='none', s=5)
plt.title('Execution time of LockScreen Plugin')
plt.legend(loc=2)
plt.show() | 19.217391 | 61 | 0.669683 | 88 | 442 | 3.352273 | 0.579545 | 0.061017 | 0.061017 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119681 | 0.149321 | 442 | 23 | 62 | 19.217391 | 0.664894 | 0.024887 | 0 | 0 | 0 | 0 | 0.139535 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cc92ea55afee8a2dc23b6dd2938d306410a7817 | 1,190 | py | Python | class5/ex2c.py | nkbyrne/pyplus | 2fd31eb41c697259f641fd90a371d2cd9ed4a673 | [
"Apache-2.0"
] | null | null | null | class5/ex2c.py | nkbyrne/pyplus | 2fd31eb41c697259f641fd90a371d2cd9ed4a673 | [
"Apache-2.0"
] | null | null | null | class5/ex2c.py | nkbyrne/pyplus | 2fd31eb41c697259f641fd90a371d2cd9ed4a673 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import unicode_literals, print_function
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
from netmiko import ConnectHandler
import my_devices
env = Environment(undefined=StrictUndefined)
env.loader = FileSystemLoader([".", "./templates/"])
template_file = "ex2b.j2"
template = env.get_template(template_file)
nxos1_config = {
"interface": "Ethernet2/1",
"ip_address": "10.1.100.1",
"netmask": "24",
"local_as": "22",
"peer_ip": "10.1.100.2",
"remote_as": "22",
}
nxos2_config = {
"interface": "Ethernet2/1",
"ip_address": "10.1.100.2",
"netmask": "24",
"local_as": "22",
"peer_ip": "10.1.100.1",
"remote_as": "22",
}
nxos1 = my_devices.nxos1
nxos2 = my_devices.nxos2
for device in nxos1, nxos2:
net_connect = ConnectHandler(**device)
if device == nxos1:
print(net_connect.find_prompt())
config = template.render(nxos1_config)
net_connect.send_config_set(config)
elif device == nxos2:
print(net_connect.find_prompt())
config = template.render(nxos2_config)
net_connect.send_config_set(config)
| 27.045455 | 55 | 0.680672 | 151 | 1,190 | 5.13245 | 0.377483 | 0.064516 | 0.030968 | 0.064516 | 0.387097 | 0.387097 | 0.387097 | 0.296774 | 0.180645 | 0.077419 | 0 | 0.06135 | 0.178151 | 1,190 | 43 | 56 | 27.674419 | 0.731084 | 0.016807 | 0 | 0.324324 | 0 | 0 | 0.165954 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.135135 | 0 | 0.135135 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cca63ed944173cd0b1eb620be9eed569fbf4d02 | 36,968 | py | Python | wandb/run_manager.py | rguerrettaz/client | 06a8759ad9c3c407e815cecbd789c3a2d44e4a2b | [
"MIT"
] | null | null | null | wandb/run_manager.py | rguerrettaz/client | 06a8759ad9c3c407e815cecbd789c3a2d44e4a2b | [
"MIT"
] | null | null | null | wandb/run_manager.py | rguerrettaz/client | 06a8759ad9c3c407e815cecbd789c3a2d44e4a2b | [
"MIT"
] | null | null | null | import errno
import json
import logging
import os
import psutil
import re
import signal
import socket
import stat
import subprocess
import sys
import time
from tempfile import NamedTemporaryFile
import threading
import yaml
import numbers
import inspect
import click
from shortuuid import ShortUUID
import six
from six.moves import queue
import requests
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import webbrowser
import wandb
import wandb.api
from .api import BinaryFilePolicy, CRDedupeFilePolicy, DefaultFilePolicy, OverwriteFilePolicy
from wandb import env
from wandb import Error
from wandb import io_wrap
from wandb import jsonlfile
from wandb import file_pusher
from wandb import meta
from wandb.core import START_TIME
import wandb.rwlock
from wandb import sparkline
from wandb import stats
from wandb import streaming_log
from wandb import util
from wandb import wandb_config as config
from wandb import wandb_run
from wandb import wandb_socket
logger = logging.getLogger(__name__)
OUTPUT_FNAME = 'output.log'
class LaunchError(Error):
"""Raised when there's an error starting up."""
class FileTailer(object):
def __init__(self, path, on_read_fn, binary=False, seek_end=False):
self._path = path
mode = 'r'
if binary:
mode = 'rb'
self._file = open(path, mode)
if seek_end:
self._file.seek(0, 2) # seek to 0 bytes from end (2 means end)
self._on_read_fn = on_read_fn
self.running = True
self._thread = threading.Thread(target=self._thread_body)
self._thread.start()
def _thread_body(self):
while self.running:
where = self._file.tell()
data = self._file.read(1024)
if not data:
time.sleep(1)
# required for to get python2 working (Issue #50)
self._file.seek(where)
else:
self._on_read_fn(data)
def stop(self):
self._file.seek(0)
self.running = False
self._thread.join()
class FileEventHandler(object):
def __init__(self, file_path, save_name, api):
self.file_path = file_path
self.save_name = save_name
self._api = api
def on_created(self):
pass
def on_modified(self):
pass
def finish(self):
pass
class FileEventHandlerOverwrite(FileEventHandler):
def __init__(self, file_path, save_name, api, file_pusher, *args, **kwargs):
super(FileEventHandlerOverwrite, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._file_pusher = file_pusher
def on_created(self):
self.on_modified()
def on_modified(self):
# Tell file_pusher to copy the file, we want to allow the user to modify the
# original while this one is uploading (modifying while uploading seems to
# cause a hang somewhere in the google upload code, until the server times out)
self._file_pusher.file_changed(
self.save_name, self.file_path, copy=True)
class FileEventHandlerOverwriteDeferred(FileEventHandler):
def __init__(self, file_path, save_name, api, file_pusher, *args, **kwargs):
super(FileEventHandlerOverwriteDeferred, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._file_pusher = file_pusher
def finish(self):
self._file_pusher.file_changed(self.save_name, self.file_path)
class FileEventHandlerConfig(FileEventHandler):
"""Set the config instead of uploading the file"""
RATE_LIMIT_SECONDS = 30
def __init__(self, file_path, save_name, api, file_pusher, run, *args, **kwargs):
self._api = api
super(FileEventHandlerConfig, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._last_sent = time.time() - self.RATE_LIMIT_SECONDS
self._file_pusher = file_pusher
self._run = run
self._thread = None
def on_created(self):
self._eventually_update()
def on_modified(self):
self._eventually_update()
def _eventually_update(self):
if self._thread:
# assume the existing thread will catch this update
return
if time.time() - self._last_sent >= self.RATE_LIMIT_SECONDS:
self._update()
else:
self._thread = threading.Timer(
self.RATE_LIMIT_SECONDS, self._thread_update)
self._thread.start()
def _thread_update(self):
try:
self._update()
finally:
self._thread = None
def _update(self):
try:
config_dict = yaml.load(open(self.file_path))
except yaml.parser.ParserError:
wandb.termlog(
"Unable to parse config file; probably being modified by user process?")
return
# TODO(adrian): ensure the file content will exactly match Bucket.config
# ie. push the file content as a string
self._api.upsert_run(id=self._run.storage_id, config=config_dict)
self._file_pusher.file_changed(
self.save_name, self.file_path, copy=True)
self._last_sent = time.time()
def finish(self):
if self._thread:
self._thread.join()
self._thread = None
self._update()
class FileEventHandlerSummary(FileEventHandler):
"""Read the file and add to the file push api"""
def __init__(self, file_path, save_name, api, file_pusher, run, *args, **kwargs):
super(FileEventHandlerSummary, self).__init__(
file_path, save_name, api, *args, **kwargs)
self._api = api
self._file_pusher = file_pusher
def on_created(self):
self.on_modified()
def on_modified(self):
self._api.get_file_stream_api().push(self.save_name, open(self.file_path).read())
def finish(self):
self._file_pusher.file_changed(self.save_name, self.file_path)
class FileEventHandlerTextStream(FileEventHandler):
def __init__(self, *args, **kwargs):
self._seek_end = kwargs.pop('seek_end', None)
super(FileEventHandlerTextStream, self).__init__(*args, **kwargs)
self._tailer = None
def on_created(self):
if self._tailer:
logger.error(
'Streaming file created twice in same run: %s', self.file_path)
return
self._setup()
def on_modified(self):
if self._tailer:
return
self._setup()
def _setup(self):
fsapi = self._api.get_file_stream_api()
pusher = streaming_log.TextStreamPusher(fsapi, self.save_name)
def on_read(data):
pusher.write_string(data)
self._tailer = FileTailer(
self.file_path, on_read, seek_end=self._seek_end)
def finish(self):
if self._tailer:
self._tailer.stop()
self._tailer = None
class FileEventHandlerBinaryStream(FileEventHandler):
def __init__(self, *args, **kwargs):
super(FileEventHandlerBinaryStream, self).__init__(*args, **kwargs)
self._tailer = None
def on_created(self):
if self._tailer:
logger.error(
'Streaming file created twice in same run: %s', self.file_path)
return
self._setup()
def on_modified(self):
if self._tailer:
return
self._setup()
def _setup(self):
fsapi = self._api.get_file_stream_api()
def on_read(data):
fsapi.push(self.save_name, data)
self._tailer = FileTailer(self.file_path, on_read, binary=True)
class WriteSerializingFile(object):
"""Wrapper for a file object that serializes writes.
"""
def __init__(self, f):
self.lock = threading.Lock()
self.f = f
def write(self, *args, **kargs):
self.lock.acquire()
try:
self.f.write(*args, **kargs)
self.f.flush()
finally:
self.lock.release()
class Process(object):
"""Represents a running process with an interface that
mimics Popen's.
Only works on Unix-y systems.
TODO(adrian): probably rewrite using psutil.Process
"""
def __init__(self, pid):
self.returncode = None
self.pid = pid
def poll(self):
if self.returncode is None:
try:
os.kill(self.pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
# we have no way of getting the real return code, so just set it to 0
self.returncode = 0
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
pass
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
return self.returncode
def wait(self):
while self.poll() is None:
time.sleep(1)
def interrupt(self):
os.kill(self.pid, signal.SIGINT)
def terminate(self):
os.kill(self.pid, signal.SIGTERM)
def kill(self):
os.kill(self.pid, signal.SIGKILL)
class RunManager(object):
"""Manages a run's process, wraps its I/O, and synchronizes its files.
"""
def __init__(self, api, run, project=None, tags=[], cloud=True, job_type="train", output=True, port=None):
self._api = api
self._run = run
self._cloud = cloud
self._port = port
self._project = project if project else api.settings("project")
self._tags = tags
self._watch_dir = self._run.dir
self._config = run.config
self.job_type = job_type
self.url = self._run.get_url(api)
# We lock this when the backend is down so Watchdog will keep track of all
# the file events that happen. Then, when the backend comes back up, we unlock
# it so all the outstanding events will get handled properly. Watchdog's queue
# only keeps at most one event per file.
# Counterintuitively, we use the "reader" locking to guard writes to the W&B
# backend, and the "writer" locking to indicate that the backend is down. That
# way, users of the W&B API won't block each other, but can all be
# blocked by grabbing a "writer" lock.
self._file_event_lock = wandb.rwlock.RWLock()
# It starts acquired. We release it when we want to allow the events to happen.
# (ie. after the Run is successfully created)
self._file_event_lock.writer_enters()
self._event_handlers = {}
self._handler = PatternMatchingEventHandler()
self._handler.on_created = self._on_file_created
self._handler.on_modified = self._on_file_modified
self._handler._patterns = [
os.path.join(self._watch_dir, os.path.normpath('*'))]
# Ignore hidden files/folders and output.log because we stream it specially
self._handler._ignore_patterns = [
'*/.*',
'*.tmp',
os.path.join(self._run.dir, OUTPUT_FNAME)
]
self._observer = Observer()
self._observer.schedule(self._handler, self._watch_dir, recursive=True)
self._stats = stats.Stats()
# Calling .start() on _meta and _system_stats will spin a thread that reports system stats every 30 seconds
self._system_stats = stats.SystemStats(run, api)
self._meta = meta.Meta(api, self._run.dir)
self._meta.data["jobType"] = job_type
if self._run.program:
self._meta.data["program"] = self._run.program
self._file_pusher = file_pusher.FilePusher(self._push_function)
self._socket = wandb_socket.Client(self._port)
logger.debug("Initialized sync for %s/%s", self._project, self._run.id)
if self._cloud:
self._observer.start()
self._api.save_patches(self._watch_dir)
if output:
wandb.termlog("Syncing %s" % self.url)
wandb.termlog("Run `wandb off` to turn off syncing.")
wandb.termlog("Local directory: %s" % os.path.relpath(run.dir))
self._api.get_file_stream_api().set_file_policy(
OUTPUT_FNAME, CRDedupeFilePolicy())
""" FILE SYNCING / UPLOADING STUFF """
# TODO: limit / throttle the number of adds / pushes
def _on_file_created(self, event):
logger.info('file/dir created: %s', event.src_path)
if os.path.isdir(event.src_path):
return None
save_name = os.path.relpath(event.src_path, self._watch_dir)
self._file_event_lock.await_readable()
self._get_handler(event.src_path, save_name).on_created()
def _on_file_modified(self, event):
logger.info('file/dir modified: %s', event.src_path)
if os.path.isdir(event.src_path):
return None
save_name = os.path.relpath(event.src_path, self._watch_dir)
self._file_event_lock.await_readable()
self._get_handler(event.src_path, save_name).on_modified()
def _get_handler(self, file_path, save_name):
if not os.path.split(save_name)[0] == "media" and save_name not in [
'wandb-history.jsonl', 'wandb-events.jsonl', 'wandb-summary.json']:
# Don't show stats on media files
self._stats.update_file(file_path)
if save_name not in self._event_handlers:
if save_name == 'wandb-history.jsonl':
self._event_handlers['wandb-history.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-history.jsonl', self._api)
elif save_name == 'wandb-events.jsonl':
self._event_handlers['wandb-events.jsonl'] = FileEventHandlerTextStream(
file_path, 'wandb-events.jsonl', self._api)
# Don't try to stream tensorboard files for now.
# elif 'tfevents' in save_name:
# # TODO: This is hard-coded, but we want to give users control
# # over streaming files (or detect them).
# self._api.get_file_stream_api().set_file_policy(save_name,
# BinaryFilePolicy())
# self._event_handlers[save_name] = FileEventHandlerBinaryStream(
# file_path, save_name, self._api)
# Overwrite handler (non-deferred) has a bug, wherein if the file is truncated
# during upload, the request to Google hangs (at least, this is my working
# theory). So for now we defer uploading everything til the end of the run.
# TODO: send wandb-summary during run. One option is to copy to a temporary
# file before uploading.
elif save_name == config.FNAME:
self._event_handlers[save_name] = FileEventHandlerConfig(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name == 'wandb-summary.json':
# Load the summary into the syncer process for meta etc to work
self._run.summary.load()
self._api.get_file_stream_api().set_file_policy(save_name, OverwriteFilePolicy())
self._event_handlers[save_name] = FileEventHandlerSummary(
file_path, save_name, self._api, self._file_pusher, self._run)
elif save_name.startswith('media/'):
# Save media files immediately
self._event_handlers[save_name] = FileEventHandlerOverwrite(
file_path, save_name, self._api, self._file_pusher)
else:
self._event_handlers[save_name] = FileEventHandlerOverwriteDeferred(
file_path, save_name, self._api, self._file_pusher)
return self._event_handlers[save_name]
def _finish_handlers(self):
# TODO: there was a case where _event_handlers was getting modified in the loop.
for handler in list(self._event_handlers.values()):
handler.finish()
def _push_function(self, save_name, path):
with open(path, 'rb') as f:
self._api.push(self._project, {save_name: f}, run=self._run.id,
progress=lambda _, total: self._stats.update_progress(path, total))
""" RUN MANAGEMENT STUFF """
def mirror_stdout_stderr(self):
"""Simple STDOUT and STDERR mirroring used by _init_jupyter"""
# TODO: Ideally we could start collecting logs without pushing
fs_api = self._api.get_file_stream_api()
io_wrap.SimpleTee(sys.stdout, streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True))
io_wrap.SimpleTee(sys.stderr, streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True, line_prepend='ERROR'))
def _get_stdout_stderr_streams(self):
"""Sets up STDOUT and STDERR streams. Only call this once."""
if six.PY2 or "buffer" not in dir(sys.stdout):
stdout = sys.stdout
stderr = sys.stderr
else: # we write binary so grab the raw I/O objects in python 3
try:
stdout = sys.stdout.buffer.raw
stderr = sys.stderr.buffer.raw
except AttributeError:
# The testing environment and potentially others may have screwed with their
# io so we fallback to raw stdout / err
stdout = sys.stdout.buffer
stderr = sys.stderr.buffer
output_log_path = os.path.join(self._run.dir, OUTPUT_FNAME)
self._output_log = WriteSerializingFile(open(output_log_path, 'wb'))
stdout_streams = [stdout, self._output_log]
stderr_streams = [stderr, self._output_log]
if self._cloud:
# Tee stdout/stderr into our TextOutputStream, which will push lines to the cloud.
fs_api = self._api.get_file_stream_api()
self._stdout_stream = streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, prepend_timestamp=True,
lock_function=self._file_event_lock.reader_enters)
self._stderr_stream = streaming_log.TextStreamPusher(
fs_api, OUTPUT_FNAME, line_prepend='ERROR',
prepend_timestamp=True,
lock_function=self._file_event_lock.reader_enters)
stdout_streams.append(self._stdout_stream)
stderr_streams.append(self._stderr_stream)
return stdout_streams, stderr_streams
def _close_stdout_stderr_streams(self, exitcode):
self._output_log.f.close()
self._output_log = None
# Close output-capturing stuff. This also flushes anything left in the buffers.
if self._stdout_tee.tee_file is not None:
# we don't have tee_file's in headless mode
self._stdout_tee.tee_file.close()
# TODO(adrian): we should close these even in headless mode
# but in python 2 the read thread doesn't stop on its own
# for some reason
self._stdout_tee.close_join()
if self._stderr_tee.tee_file is not None:
self._stderr_tee.tee_file.close()
self._stderr_tee.close_join()
if self._cloud:
# not set in dry run mode
self._stdout_stream.close()
self._stderr_stream.close()
self._api.get_file_stream_api().finish(exitcode)
# Ensures we get a new file stream thread
self._api._file_stream_api = None
def _setup_resume(self, resume_status):
# write the tail of the history file
try:
history_tail = json.loads(resume_status['historyTail'])
jsonlfile.write_jsonl_file(os.path.join(self._run.dir, wandb_run.HISTORY_FNAME),
history_tail)
except ValueError:
print("warning: couldn't load recent history")
# write the tail of the events file
try:
events_tail = json.loads(resume_status['eventsTail'])
jsonlfile.write_jsonl_file(os.path.join(self._run.dir, wandb_run.EVENTS_FNAME),
events_tail)
except ValueError:
print("warning: couldn't load recent events")
# Note: these calls need to happen after writing the files above. Because the access
# to self._run.events below triggers events to initialize, but we need the previous
# events to be written before that happens.
# output.log
self._api.get_file_stream_api().set_file_policy(
OUTPUT_FNAME, CRDedupeFilePolicy(resume_status['logLineCount']))
# history
self._api.get_file_stream_api().set_file_policy(
wandb_run.HISTORY_FNAME, DefaultFilePolicy(
start_chunk_id=resume_status['historyLineCount']))
self._event_handlers[wandb_run.HISTORY_FNAME] = FileEventHandlerTextStream(
self._run.history.fname, wandb_run.HISTORY_FNAME, self._api, seek_end=True)
# events
self._api.get_file_stream_api().set_file_policy(
wandb_run.EVENTS_FNAME, DefaultFilePolicy(
start_chunk_id=resume_status['eventsLineCount']))
self._event_handlers[wandb_run.EVENTS_FNAME] = FileEventHandlerTextStream(
self._run.events.fname, wandb_run.EVENTS_FNAME, self._api, seek_end=True)
def init_run(self, env=None):
self._system_stats.start()
self._meta.start()
self._api.get_file_stream_api().start()
if self._cloud:
storage_id = None
if self._run.resume != 'never':
resume_status = self._api.run_resume_status(project=self._api.settings("project"),
entity=self._api.settings(
"entity"),
name=self._run.id)
if resume_status == None and self._run.resume == 'must':
raise LaunchError(
"resume='must' but run (%s) doesn't exist" % self._run.id)
if resume_status:
print('Resuming run: %s' % self._run.get_url(self._api))
self._setup_resume(resume_status)
storage_id = resume_status['id']
if not self._upsert_run(False, storage_id, env):
self._upsert_run_thread = threading.Thread(
target=self._upsert_run, args=(True, storage_id, env))
self._upsert_run_thread.daemon = True
self._upsert_run_thread.start()
def shutdown(self, exitcode=0):
"""Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor"""
self._system_stats.shutdown()
self._meta.shutdown()
self._finish_handlers()
self._file_pusher.shutdown()
self._api.get_file_stream_api().finish(exitcode)
# Ensures we get a new file stream thread
self._api._file_stream_api = None
def _upsert_run(self, retry, storage_id, env):
"""Upsert the Run (ie. for the first time with all its attributes)
Arguments:
retry: (bool) Whether to retry if the connection fails (ie. if the backend is down).
False is useful so we can start running the user process even when the W&B backend
is down, and let syncing finish later.
Returns:
True if the upsert succeeded, False if it failed because the backend is down.
Throws:
LaunchError on other failures
"""
if retry:
num_retries = None
else:
num_retries = 0 # no retries because we want to let the user process run even if the backend is down
try:
upsert_result = self._run.save(
id=storage_id, num_retries=num_retries, job_type=self.job_type, api=self._api)
except wandb.api.CommError as e:
# TODO: Get rid of str contains check
if self._run.resume == 'never' and 'exists' in str(e):
raise LaunchError(
"resume='never' but run (%s) exists" % self._run.id)
else:
if isinstance(e.exc, (requests.exceptions.HTTPError,
requests.exceptions.Timeout,
requests.exceptions.ConnectionError)):
wandb.termerror(
'Failed to connect to W&B. Retrying in the background.')
return False
raise LaunchError(
'Launch exception: {}, see {} for details. To disable wandb set WANDB_MODE=dryrun'.format(e, util.get_log_file_path()))
self._run.set_environment(environment=env)
# unblock file syncing and console streaming, which need the Run to have a .storage_id
self._file_event_lock.writer_leaves()
return True
def run_user_process(self, program, args, env):
"""Launch a user process, capture its output, and sync its files to the backend.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
if sys.platform == "win32":
# PTYs don't work in windows so we use pipes.
self._stdout_tee = io_wrap.Tee.pipe(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pipe(*stderr_streams)
# Seems like the following actually isn't necessary on Windows
# TODO(adrian): we may need to do the following if we use pipes instead of PTYs
# because Python on Unix doesn't like writing UTF-8 to files
# tell child python interpreters we accept utf-8
# env['PYTHONIOENCODING'] = 'UTF-8'
else:
self._stdout_tee = io_wrap.Tee.pty(*stdout_streams)
self._stderr_tee = io_wrap.Tee.pty(*stderr_streams)
self._stdout_stream.write_string(" ".join(psutil.Process(
os.getpid()).cmdline()) + "\n\n")
command = [program] + list(args)
runner = util.find_runner(program)
if runner:
command = runner + command
command = ' '.join(six.moves.shlex_quote(arg) for arg in command)
try:
self.proc = subprocess.Popen(
command,
env=env,
stdout=self._stdout_tee.tee_file,
stderr=self._stderr_tee.tee_file,
shell=True,
)
except (OSError, IOError):
raise Exception('Could not find program: %s' % command)
self._sync_etc()
def wrap_existing_process(self, pid, stdout_read_fd, stderr_read_fd, port=None):
"""Do syncing, etc. for an already-running process.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_read_file = os.fdopen(stdout_read_fd, 'rb')
stderr_read_file = os.fdopen(stderr_read_fd, 'rb')
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
self._stdout_tee = io_wrap.Tee(stdout_read_file, *stdout_streams)
self._stderr_tee = io_wrap.Tee(stderr_read_file, *stderr_streams)
self.proc = Process(pid)
try:
self.init_run()
except LaunchError as e:
wandb.termerror(str(e))
self._socket.launch_error()
return
# Signal the main process that we're all hooked up
self._socket.ready()
self._sync_etc(headless=True)
def _sync_etc(self, headless=False):
# Ignore SIGQUIT (ctrl-\). The child process will # handle it, and we'll
# exit when the child process does.
#
# We disable these signals after running the process so the child doesn't
# inherit this behaviour.
try:
signal.signal(signal.SIGQUIT, signal.SIG_IGN)
except AttributeError: # SIGQUIT doesn't exist on windows
pass
if self._api.update_available:
wandb.termlog(
"An update is available! To upgrade, please run:\n $ pip install wandb --upgrade")
# Add a space before user output
wandb.termlog()
if env.get_show_run():
webbrowser.open_new_tab(self._run.get_url(self._api))
exitcode = None
try:
while True:
res = bytearray()
try:
res = self._socket.recv(2)
except socket.timeout:
pass
if len(res) == 2 and res[0] == 2:
exitcode = res[1]
break
elif len(res) > 0:
wandb.termerror(
"Invalid message received from child process: %s" % str(res))
break
else:
exitcode = self.proc.poll()
if exitcode is not None:
break
time.sleep(1)
except KeyboardInterrupt:
exitcode = 255
if headless:
wandb.termlog('Ctrl-c pressed.')
else:
wandb.termlog(
'Ctrl-c pressed; waiting for program to end. Press ctrl-c again to kill it.')
try:
while self.proc.poll() is None:
time.sleep(0.1)
except KeyboardInterrupt:
pass
if self.proc.poll() is None:
wandb.termlog('Program still alive. Killing it.')
try:
self.proc.kill()
except OSError:
pass
"""TODO(adrian): garbage that appears in the logs sometimes
Exception ignored in: <bound method Popen.__del__ of <subprocess.Popen object at 0x111adce48>>
Traceback (most recent call last):
File "/Users/adrian/.pyenv/versions/3.6.0/Python.framework/Versions/3.6/lib/python3.6/subprocess.py", line 760, in __del__
AttributeError: 'NoneType' object has no attribute 'warn'
"""
if exitcode is None:
exitcode = 254
wandb.termlog(
'Killing program failed; syncing files anyway. Press ctrl-c to abort syncing.')
else:
if exitcode == 0:
wandb.termlog('Program ended.')
else:
wandb.termlog(
'Program failed with code %d. Press ctrl-c to abort syncing.' % exitcode)
#termlog('job (%s) Process exited with code: %s' % (program, exitcode))
self._meta.data["exitcode"] = exitcode
if exitcode == 0:
self._meta.data["state"] = "finished"
elif exitcode == 255:
self._meta.data["state"] = "killed"
else:
self._meta.data["state"] = "failed"
self._meta.shutdown()
self._system_stats.shutdown()
if exitcode != 0 and START_TIME - time.time() < 30:
wandb.termlog("Process crashed early, not syncing files")
sys.exit(exitcode)
# TODO: these can be slow to complete
self._close_stdout_stderr_streams(exitcode)
# If we're not syncing to the cloud, we're done
if not self._cloud:
sys.exit(exitcode)
# Show run summary/history
self._run.summary.load()
summary = self._run.summary._summary
if len(summary):
wandb.termlog('Run summary:')
max_len = max([len(k) for k in summary.keys()])
format_str = ' {:>%s} {}' % max_len
for k, v in summary.items():
# arrays etc. might be too large. for now we just don't print them
if isinstance(v, six.string_types):
if len(v) >= 20:
v = v[:20] + '...'
wandb.termlog(format_str.format(k, v))
elif isinstance(v, numbers.Number):
wandb.termlog(format_str.format(k, v))
self._run.history.load()
history_keys = self._run.history.keys()
if len(history_keys):
wandb.termlog('Run history:')
max_len = max([len(k) for k in history_keys])
for key in history_keys:
vals = util.downsample(self._run.history.column(key), 40)
if any((not isinstance(v, numbers.Number) for v in vals)):
continue
line = sparkline.sparkify(vals)
format_str = u' {:>%s} {}' % max_len
wandb.termlog(format_str.format(key, line))
if self._run.has_examples:
wandb.termlog('Saved %s examples' % self._run.examples.count())
wandb.termlog('Waiting for final file modifications.')
# This is a a heuristic delay to catch files that were written just before
# the end of the script.
# TODO: ensure we catch all saved files.
# TODO(adrian): do we need this?
time.sleep(2)
try:
# avoid hanging if we crashed before the observer was started
if self._observer.is_alive():
self._observer.stop()
self._observer.join()
# TODO: py2 TypeError: PyCObject_AsVoidPtr called with null pointer
except TypeError:
pass
# TODO: py3 SystemError: <built-in function stop> returned a result with an error set
except SystemError:
pass
self._finish_handlers()
self._file_pusher.finish()
wandb.termlog('Syncing files in %s:' %
os.path.relpath(self._watch_dir))
for file_path in self._stats.files():
wandb.termlog(' %s' % os.path.relpath(file_path, self._watch_dir))
step = 0
spinner_states = ['-', '\\', '|', '/']
stop = False
self._stats.update_all_files()
while True:
if not self._file_pusher.is_alive():
stop = True
summary = self._stats.summary()
line = (' %(completed_files)s of %(total_files)s files,'
' %(uploaded_bytes).03f of %(total_bytes).03f bytes uploaded\r' % summary)
line = spinner_states[step % 4] + line
step += 1
wandb.termlog(line, newline=False)
if stop:
break
time.sleep(0.25)
#print('FP: ', self._file_pusher._pending, self._file_pusher._jobs)
# clear progress line.
wandb.termlog(' ' * 79)
# Check md5s of uploaded files against what's on the file system.
# TODO: We're currently using the list of uploaded files as our source
# of truth, but really we should use the files on the filesystem
# (ie if we missed a file this wouldn't catch it).
# This polls the server, because there a delay between when the file
# is done uploading, and when the datastore gets updated with new
# metadata via pubsub.
wandb.termlog('Verifying uploaded files... ', newline=False)
error = False
mismatched = None
for delay_base in range(4):
mismatched = []
download_urls = self._api.download_urls(
self._project, run=self._run.id)
for fname, info in download_urls.items():
if fname == 'wandb-history.h5' or OUTPUT_FNAME:
continue
local_path = os.path.join(self._watch_dir, fname)
local_md5 = util.md5_file(local_path)
if local_md5 != info['md5']:
mismatched.append((local_path, local_md5, info['md5']))
if not mismatched:
break
wandb.termlog(' Retrying after %ss' % (delay_base**2))
time.sleep(delay_base ** 2)
if mismatched:
print('')
error = True
for local_path, local_md5, remote_md5 in mismatched:
wandb.termerror(
'%s (%s) did not match uploaded file (%s) md5' % (
local_path, local_md5, remote_md5))
else:
print('verified!')
if error:
wandb.termerror('Sync failed %s' % self.url)
else:
wandb.termlog('Synced %s' % self.url)
sys.exit(exitcode)
| 38.588727 | 140 | 0.598518 | 4,506 | 36,968 | 4.697958 | 0.171327 | 0.01814 | 0.011904 | 0.011337 | 0.268033 | 0.206198 | 0.172753 | 0.162738 | 0.149275 | 0.129246 | 0 | 0.004292 | 0.313027 | 36,968 | 957 | 141 | 38.629049 | 0.829264 | 0.184105 | 0 | 0.309668 | 0 | 0 | 0.06346 | 0.000715 | 0 | 0 | 0 | 0.007315 | 0 | 1 | 0.087613 | false | 0.015106 | 0.064955 | 0 | 0.193353 | 0.007553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ccba2a706c1c9ecf30452eed9588e448ebaa984 | 1,948 | py | Python | prediction/bimod/BACs_and_TERF/cefprox_ccs_comparison.py | dylanhross/dmccs | 8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3 | [
"MIT"
] | 3 | 2021-05-17T20:19:41.000Z | 2022-02-01T21:43:30.000Z | prediction/bimod/BACs_and_TERF/cefprox_ccs_comparison.py | dylanhross/dmccs | 8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3 | [
"MIT"
] | null | null | null | prediction/bimod/BACs_and_TERF/cefprox_ccs_comparison.py | dylanhross/dmccs | 8b403a90b6cb7edd9d7abc172462e9d9b62b5dd3 | [
"MIT"
] | null | null | null | #!/Library/Frameworks/Python.framework/Versions/3.8/bin/python3
"""
"""
from matplotlib import pyplot as plt
import json
import numpy as np
from scipy.optimize import curve_fit
import sys
fset = sys.argv[1]
# set up plot fonts
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Helvetica', 'Arial']
rcParams['font.size'] = 11
fig = plt.figure(figsize=(3, 4))
ax = fig.add_subplot(111)
# common settings for the same style across plots
bs = {
'linewidth': 1.1, 'align': 'center', 'width': 0.8, 'capstyle': 'round', 'capsize': 5,
'error_kw': {
'elinewidth': 1.1, 'ecolor': 'k'
}
}
y = {
'CUST': [2.210526517599249985e+02, 2.259224234838216319e+02],
'MQN': [2.134326154759856990e+02, 2.134326154759856990e+02],
'MD3D': [2.235046941620169036e+02, 2.179685099344615935e+02],
'COMB': [1.938115028340808408e+02, 1.934436461179425635e+02]
}
c = 'k'
ax.bar([1], [229.5], edgecolor=c, color=c, fill=False, hatch='///', **bs)
ax.bar([1], [229.5], edgecolor=c, ecolor=c, fill=False, **bs)
c = 'b'
ax.bar([2], [235.9], edgecolor=c, color=c, fill=False, hatch='///', **bs)
ax.bar([2], [235.9], edgecolor=c, ecolor=c, fill=False, **bs)
c = 'k'
ax.bar([3], [y[fset][0]], edgecolor=c, color=c, **bs)
ax.bar([3], [y[fset][0]], edgecolor=c, ecolor=c, fill=False, **bs)
c = 'b'
ax.bar([4], [y[fset][1]], edgecolor=c, color=c, **bs)
ax.bar([4], [y[fset][1]], ecolor=c, edgecolor=c, fill=False, **bs)
ax.set_xticks([1, 2, 3, 4])
ax.set_xticklabels(['prot. A (exp.)', 'prot. B (exp.)', 'prot. A (pred.)', 'prot. B (pred.)'], fontstyle='italic', fontsize=10, rotation='vertical')
ax.set_ylim([190, 240])
for d in ['top', 'right']:
ax.spines[d].set_visible(False)
ax.set_ylabel(r'CCS ($\AA^2$)')
plt.tight_layout()
# save figure
png = 'cefprox_{}_modeling_ccscomp.png'.format(fset)
plt.savefig(png, dpi=400, bbox_inches='tight')
#plt.show()
plt.close()
| 24.049383 | 148 | 0.63655 | 305 | 1,948 | 4.022951 | 0.442623 | 0.0326 | 0.0489 | 0.05216 | 0.225754 | 0.225754 | 0.214344 | 0.168704 | 0.117359 | 0.117359 | 0 | 0.137255 | 0.136037 | 1,948 | 80 | 149 | 24.35 | 0.5918 | 0.077002 | 0 | 0.086957 | 0 | 0 | 0.158899 | 0.017406 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cccc6e5828689cc5f573bd0d16b3283aee92177 | 441 | py | Python | codewars/7kyu/doha22/jumping_number/jumping_number.py | doha22/Training_one | 0cd7cf86c7da0f6175834146296b763d1841766b | [
"MIT"
] | null | null | null | codewars/7kyu/doha22/jumping_number/jumping_number.py | doha22/Training_one | 0cd7cf86c7da0f6175834146296b763d1841766b | [
"MIT"
] | 2 | 2019-01-22T10:53:42.000Z | 2019-01-31T08:02:48.000Z | codewars/7kyu/doha22/jumping_number/jumping_number.py | doha22/Training_one | 0cd7cf86c7da0f6175834146296b763d1841766b | [
"MIT"
] | 13 | 2019-01-22T10:37:42.000Z | 2019-01-25T13:30:43.000Z | def jumping_number(number):
print(number)
i=0
x=[int(i) for i in str(number)]
if(len(x) == 1):
return 'Jumping!!'
for i in range(len(x)-1):
if x[i+1] - x[i] != 1 and x[i] - x[i+1]!= 1:
return 'Not!!'
return 'Jumping!!'
def jumping_number2(number):
arr = list(map(int, str(number)))
return ('Not!!', 'Jumping!!')[all(map(lambda a, b: abs(a - b) == 1, arr, arr[1:]))]
| 27.5625 | 87 | 0.498866 | 72 | 441 | 3.027778 | 0.375 | 0.036697 | 0.041284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031746 | 0.285714 | 441 | 15 | 88 | 29.4 | 0.660317 | 0 | 0 | 0.153846 | 0 | 0 | 0.0839 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.461538 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cce58faef4cd8ad122e2fc2181ceb5f69345869 | 901 | py | Python | tests/test_canny.py | urosisakovic/images | 2cc166447eabae9b025683f8bb8eeca88188fa9e | [
"MIT"
] | null | null | null | tests/test_canny.py | urosisakovic/images | 2cc166447eabae9b025683f8bb8eeca88188fa9e | [
"MIT"
] | 2 | 2022-01-13T01:50:58.000Z | 2022-03-12T00:04:41.000Z | tests/test_canny.py | urosisakovic/images | 2cc166447eabae9b025683f8bb8eeca88188fa9e | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import imagelib.filter as filt
import imagelib.utility as util
if __name__ == '__main__':
LOAD_IMG_PATH = '/home/uros/Desktop/workspace/ana.png'
SOBEL_IMG_PATH = '/home/uros/Desktop/workspace/sobel_edges.jpg'
MAIN_EDGES_PATH = '/home/uros/Desktop/workspace/main_edges.jpg'
CANNY_PATH = '/home/uros/Desktop/workspace/canny.jpg'
DBG_PATH = '/home/uros/Desktop/workspace/dbg.jpg'
img = util.imopen(LOAD_IMG_PATH)
img = util.rgb2gray(img)
edge_img, edge_dir = filt.sobel_edge_det(img, (3, 3))
main_edge_img = filt.non_max_supression(edge_img, edge_dir)
util.imwrite(MAIN_EDGES_PATH, main_edge_img)
high = 10
low = 0
canny_edges_image = filt.dual_threshold(main_edge_img, high, low)
util.imwrite(CANNY_PATH, canny_edges_image)
| 32.178571 | 76 | 0.739179 | 141 | 901 | 4.397163 | 0.347518 | 0.064516 | 0.096774 | 0.153226 | 0.287097 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0.007772 | 0.143174 | 901 | 27 | 77 | 33.37037 | 0.795337 | 0 | 0 | 0 | 0 | 0 | 0.227525 | 0.218646 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.238095 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cd2b82fc38feef5c1b1f361ac647db18ec16e26 | 5,484 | py | Python | tokio/connectors/globuslogs.py | NERSC/pytokio | 22244718cf82567c50620cbe0e635dfc990de36b | [
"BSD-3-Clause-LBNL"
] | 22 | 2017-11-14T01:30:48.000Z | 2022-01-01T21:51:00.000Z | tokio/connectors/globuslogs.py | glennklockwood/pytokio | 22244718cf82567c50620cbe0e635dfc990de36b | [
"BSD-3-Clause-LBNL"
] | 39 | 2017-12-20T01:42:19.000Z | 2020-05-28T21:17:26.000Z | tokio/connectors/globuslogs.py | glennklockwood/pytokio | 22244718cf82567c50620cbe0e635dfc990de36b | [
"BSD-3-Clause-LBNL"
] | 5 | 2018-02-06T19:39:19.000Z | 2019-07-10T01:20:26.000Z | """Provides an interface for Globus and GridFTP transfer logs
Globus logs are ASCII files that generally look like::
DATE=20190809091437.927804 HOST=dtn11.nersc.gov PROG=globus-gridftp-server NL.EVNT=FTP_INFO START=20190809091437.884224 USER=glock FILE=/home/g/glock/results0.tar.gz BUFFER=235104 BLOCK=262144 NBYTES=35616 VOLUME=/ STREAMS=4 STRIPES=1 DEST=[0.0.0.0] TYPE=RETR CODE=226
DATE=20190809091438.022479 HOST=dtn11.nersc.gov PROG=globus-gridftp-server NL.EVNT=FTP_INFO START=20190809091437.963894 USER=glock FILE=/home/g/glock/results1.tar.gz BUFFER=235104 BLOCK=262144 NBYTES=35616 VOLUME=/ STREAMS=4 STRIPES=1 DEST=[0.0.0.0] TYPE=RETR CODE=226
DATE=20190809091438.370175 HOST=dtn11.nersc.gov PROG=globus-gridftp-server NL.EVNT=FTP_INFO START=20190809091438.314961 USER=glock FILE=/home/g/glock/results2.tar.gz BUFFER=235104 BLOCK=262144 NBYTES=35616 VOLUME=/ STREAMS=4 STRIPES=1 DEST=[0.0.0.0] TYPE=RETR CODE=226
The keys and values are pretty well demarcated, with the only hiccup being
around file names that contain spaces.
"""
import re
import time
import datetime
from tokio.common import to_epoch
from tokio.connectors.common import SubprocessOutputList
PEELER_REX = re.compile("^([A-Z]+)=(.*?)\s+([A-Z]+=.*)$")
class GlobusLog(SubprocessOutputList):
"""Interface into a Globus transfer log
Parses a Globus transfer log which looks like::
DATE=20190809091437.927804 HOST=dtn11.nersc.gov PROG=globus-gridftp-server NL.EVNT=FTP_INFO START=20190809091437.884224 USER=glock FILE=/home/g/glock/results0.tar.gz BUFFER=235104 BLOCK=262144 NBYTES=35616 VOLUME=/ STREAMS=4 STRIPES=1 DEST=[0.0.0.0] TYPE=RETR CODE=226
DATE=20190809091438.022479 HOST=dtn11.nersc.gov PROG=globus-gridftp-server NL.EVNT=FTP_INFO START=20190809091437.963894 USER=glock FILE=/home/g/glock/results1.tar.gz BUFFER=235104 BLOCK=262144 NBYTES=35616 VOLUME=/ STREAMS=4 STRIPES=1 DEST=[0.0.0.0] TYPE=RETR CODE=226
DATE=20190809091438.370175 HOST=dtn11.nersc.gov PROG=globus-gridftp-server NL.EVNT=FTP_INFO START=20190809091438.314961 USER=glock FILE=/home/g/glock/results2.tar.gz BUFFER=235104 BLOCK=262144 NBYTES=35616 VOLUME=/ STREAMS=4 STRIPES=1 DEST=[0.0.0.0] TYPE=RETR CODE=226
and represents the data in a list-like form::
[
{
"BLOCK": 262144,
"BUFFER": 87040,
"CODE": "226",
"DATE": 1565445916.0,
"DEST": [
"198.125.208.14"
],
"FILE": "/home/g/glock/results_08_F...",
"HOST": "dtn11.nersc.gov",
"NBYTES": 6341890048,
"NL.EVNT": "FTP_INFO",
"PROG": "globus-gridftp-server",
"START": 1565445895.0,
"STREAMS": 1,
"STRIPES": 1,
"TYPE": "STOR",
"USER": "glock",
"VOLUME": "/"
},
...
]
where each list item is a dictionary encoding a single transfer log line.
The keys are exactly as they appear in the log file itself, and it is the
responsibility of downstream analysis code to attribute meaning to each
key.
"""
def __init__(self, *args, **kwargs):
super(GlobusLog, self).__init__(*args, **kwargs)
self.load()
@classmethod
def from_str(cls, input_str):
"""Instantiates from a string
"""
return cls(from_string=input_str)
@classmethod
def from_file(cls, cache_file):
"""Instantiates from a cache file
"""
return cls(cache_file=cache_file)
def load_str(self, input_str):
"""Parses text from a Globus FTP log
Iterates through a multi-line string and converts each line into a
dictionary of key-value pairs.
Args:
input_str (str): Multi-line string containing a single Globus log
transfer record on each line.
"""
for line in input_str.splitlines():
rec = {}
remainder = line
while remainder:
# we use a regex here because file paths may contain both spaces and =
match = PEELER_REX.match(remainder)
if not match:
key, value = remainder.split('=', 1)
remainder = ""
else:
key = match.group(1)
value = match.group(2)
remainder = match.group(3)
rec[key] = value
# recast keys
for key, transform in RECAST_KEYS.items():
if key in rec:
rec[key] = transform(rec[key])
if rec:
self.append(rec)
def _listify_ips(ip_str):
"""Breaks a string encoding a list of destinations into a list of
destinations
Args:
ip_str (str): A list of destination hosts encoded as a string
Returns:
list: A list of destination host strings
"""
if ip_str.startswith('['):
return [x.strip() for x in ip_str.lstrip('[').rstrip(']').split(',')]
return [ip_str]
RECAST_KEYS = {
"DATE": lambda x: to_epoch(datetime.datetime.strptime(x, "%Y%m%d%H%M%S.%f"), float),
"START": lambda x: to_epoch(datetime.datetime.strptime(x, "%Y%m%d%H%M%S.%f"), float),
"BUFFER": int,
"BLOCK": int,
"NBYTES": int,
"STREAMS": int,
"STRIPES": int,
"DEST": _listify_ips,
}
| 40.323529 | 276 | 0.618527 | 737 | 5,484 | 4.541384 | 0.286296 | 0.010756 | 0.010756 | 0.035554 | 0.42187 | 0.42187 | 0.42187 | 0.42187 | 0.42187 | 0.42187 | 0 | 0.119592 | 0.266594 | 5,484 | 135 | 277 | 40.622222 | 0.712581 | 0.629103 | 0 | 0.040816 | 0 | 0 | 0.060758 | 0.016722 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.102041 | 0 | 0.306122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cd54f5905313c10ff19ee6681217229f5fe4ea9 | 1,350 | py | Python | smart_manager/tests/migrations/0001_initial.py | wesleykendall/django-smart-manager | a2a7c030e4300824a57c243e4009f7fc7a56a3b4 | [
"MIT"
] | 9 | 2016-02-20T14:20:04.000Z | 2020-07-19T18:26:17.000Z | smart_manager/tests/migrations/0001_initial.py | wesleykendall/django-smart-manager | a2a7c030e4300824a57c243e4009f7fc7a56a3b4 | [
"MIT"
] | 2 | 2015-04-01T20:10:51.000Z | 2016-02-25T16:24:15.000Z | smart_manager/tests/migrations/0001_initial.py | wesleykendall/django-smart-manager | a2a7c030e4300824a57c243e4009f7fc7a56a3b4 | [
"MIT"
] | 6 | 2015-04-01T19:46:19.000Z | 2020-10-14T16:40:00.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import smart_manager.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CantCascadeModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='RelModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='UpsertModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('char_field', models.CharField(max_length=128)),
('int_field', models.IntegerField()),
],
bases=(models.Model, smart_manager.models.SmartModelMixin),
),
migrations.AddField(
model_name='cantcascademodel',
name='rel_model',
field=models.ForeignKey(to='tests.RelModel', on_delete=django.db.models.deletion.PROTECT),
),
]
| 32.142857 | 114 | 0.585185 | 125 | 1,350 | 6.144 | 0.432 | 0.03125 | 0.097656 | 0.089844 | 0.373698 | 0.373698 | 0.373698 | 0.373698 | 0.373698 | 0.373698 | 0 | 0.004171 | 0.28963 | 1,350 | 41 | 115 | 32.926829 | 0.796663 | 0.015556 | 0 | 0.457143 | 0 | 0 | 0.079126 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.114286 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cd72ab5cf19dddbfa1d864888a9ee4f0886e659 | 5,555 | py | Python | model.py | BIJIRAVI/WBC_Segmentaion | 5fa72b97bb2667b7f83ab53d3b13bcc8aabdb764 | [
"Apache-2.0"
] | 13 | 2017-07-21T06:09:46.000Z | 2022-02-24T08:46:22.000Z | model.py | BIJIRAVI/WBC_Segmentaion | 5fa72b97bb2667b7f83ab53d3b13bcc8aabdb764 | [
"Apache-2.0"
] | 1 | 2021-07-19T08:28:49.000Z | 2021-07-19T08:28:49.000Z | model.py | BIJIRAVI/WBC_Segmentaion | 5fa72b97bb2667b7f83ab53d3b13bcc8aabdb764 | [
"Apache-2.0"
] | 13 | 2017-09-26T03:14:23.000Z | 2020-12-30T07:22:54.000Z | from keras.models import Model
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import MaxPool2D
from keras.layers import UpSampling2D
from keras.layers import Dropout
from keras.layers import concatenate
from all_params import IMG_ROWS, IMG_COLS
def get_model(input_shape=(IMG_ROWS, IMG_COLS, 1), train=True):
layers = {}
layers['inputs'] = Input(shape=input_shape, name='inputs')
layers['conv1_1'] = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv1_1')(layers['inputs'])
layers['conv1_2'] = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv1_2')(layers['conv1_1'])
layers['pool_1'] = MaxPool2D(pool_size=(2, 2), name='pool_1')(layers['conv1_2'])
if train == True:
layers['dropout_1'] = Dropout(0.25, name='dropout_1')(layers['pool_1'])
layers['conv2_1'] = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_1')(layers['dropout_1'])
else:
layers['conv2_1'] = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_1')(layers['pool_1'])
layers['conv2_2'] = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv2_2')(layers['conv2_1'])
layers['pool_2'] = MaxPool2D(pool_size=(2, 2), name='pool_2')(layers['conv2_2'])
if train == True:
layers['dropout_2'] = Dropout(0.25, name='dropout_2')(layers['pool_2'])
layers['conv3_1'] = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_1')(layers['dropout_2'])
else:
layers['conv3_1'] = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_1')(layers['pool_2'])
layers['conv3_2'] = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv3_2')(layers['conv3_1'])
layers['pool_3'] = MaxPool2D(pool_size=(2, 2), name='pool_3')(layers['conv3_2'])
if train == True:
layers['dropout_3'] = Dropout(0.25, name='dropout_3')(layers['pool_3'])
layers['conv4_1'] = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_1')(layers['dropout_3'])
else:
layers['conv4_1'] = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_1')(layers['pool_3'])
layers['conv4_2'] = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv4_2')(layers['conv4_1'])
layers['pool_4'] = MaxPool2D(pool_size=(2, 2), name='pool_4')(layers['conv4_2'])
if train == True:
layers['dropout_4'] = Dropout(0.25, name='dropout_4')(layers['pool_4'])
layers['conv5_1'] = Conv2D(512, (3, 3), padding='same', activation='relu', name='conv5_1')(layers['dropout_4'])
else:
layers['conv5_1'] = Conv2D(512, (3, 3), padding='same', activation='relu', name='conv5_1')(layers['pool_4'])
layers['conv5_2'] = Conv2D(512, (3, 3), padding='same', activation='relu', name='conv5_2')(layers['conv5_1'])
layers['upsample_1'] = UpSampling2D(size=(2, 2), name='upsample_1')(layers['conv5_2'])
layers['concat_1'] = concatenate([layers['upsample_1'], layers['conv4_2']], name='concat_1')
layers['conv6_1'] = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv6_1')(layers['concat_1'])
layers['conv6_2'] = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv6_2')(layers['conv6_1'])
if train == True:
layers['dropout_6'] = Dropout(0.25, name='dropout_6')(layers['conv6_2'])
layers['upsample_2'] = UpSampling2D(size=(2, 2), name='upsample_2')(layers['dropout_6'])
else:
layers['upsample_2'] = UpSampling2D(size=(2, 2), name='upsample_2')(layers['conv6_2'])
layers['concat_2'] = concatenate([layers['upsample_2'], layers['conv3_2']], name='concat_2')
layers['conv7_1'] = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv7_1')(layers['concat_2'])
layers['conv7_2'] = Conv2D(128, (3, 3), padding='same', activation='relu', name='conv7_2')(layers['conv7_1'])
if train == True:
layers['dropout_7'] = Dropout(0.25, name='dropout_7')(layers['conv7_2'])
layers['upsample_3'] = UpSampling2D(size=(2, 2), name='upsample_3')(layers['dropout_7'])
else:
layers['upsample_3'] = UpSampling2D(size=(2, 2), name='upsample_3')(layers['conv7_2'])
layers['concat_3'] = concatenate([layers['upsample_3'], layers['conv2_2']], name='concat_3')
layers['conv8_1'] = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv8_1')(layers['concat_3'])
layers['conv8_2'] = Conv2D(64, (3, 3), padding='same', activation='relu', name='conv8_2')(layers['conv8_1'])
if train == True:
layers['dropout_8'] = Dropout(0.25, name='dropout_8')(layers['conv8_2'])
layers['upsample_4'] = UpSampling2D(size=(2, 2), name='upsample_4')(layers['dropout_8'])
else:
layers['upsample_4'] = UpSampling2D(size=(2, 2), name='upsample_4')(layers['conv8_2'])
layers['concat_4'] = concatenate([layers['upsample_4'], layers['conv1_2']], name='concat_4')
layers['conv9_1'] = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv9_1')(layers['concat_4'])
layers['conv9_2'] = Conv2D(32, (3, 3), padding='same', activation='relu', name='conv9_2')(layers['conv9_1'])
if train == True:
layers['dropout_9'] = Dropout(0.25, name='dropout_9')(layers['conv9_2'])
layers['outputs'] = Conv2D(1, (1, 1), activation='sigmoid', name='outputs')(layers['dropout_9'])
else:
layers['outputs'] = Conv2D(1, (1, 1), activation='sigmoid', name='outputs')(layers['conv9_2'])
model = Model(inputs=layers['inputs'], outputs=layers['outputs'])
return model
| 67.743902 | 119 | 0.642304 | 800 | 5,555 | 4.27 | 0.07625 | 0.053279 | 0.057963 | 0.083724 | 0.639637 | 0.566745 | 0.491218 | 0.459602 | 0.459602 | 0.459602 | 0 | 0.083785 | 0.136274 | 5,555 | 81 | 120 | 68.580247 | 0.628178 | 0 | 0 | 0.210526 | 0 | 0 | 0.238884 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013158 | false | 0 | 0.105263 | 0 | 0.131579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cd91aa577d43008bec3c93456090358adedc5a3 | 1,459 | py | Python | Chapter06/B13346_06_05-classify.py | shahidnawazkhan/geog786course | e2d425875d183af3f0d5d54bb7c01033f5b2926f | [
"MIT"
] | 97 | 2019-06-21T21:59:23.000Z | 2022-03-30T17:00:46.000Z | Chapter06/B13346_06_05-classify.py | Fall-in-love-with-Kikyo/Learning-Geospatial-Analysis-with-Python-Third-Edition | 5f4961837d762cffbf67338e0237313ea89dbb48 | [
"MIT"
] | 3 | 2021-10-19T02:31:15.000Z | 2022-01-18T04:45:07.000Z | Chapter06/B13346_06_05-classify.py | Fall-in-love-with-Kikyo/Learning-Geospatial-Analysis-with-Python-Third-Edition | 5f4961837d762cffbf67338e0237313ea89dbb48 | [
"MIT"
] | 61 | 2019-04-07T22:53:30.000Z | 2022-03-28T03:01:11.000Z | """Classify a remotely sensed image"""
# https://github.com/GeospatialPython/Learn/raw/master/thermal.zip
from gdal import gdal_array
# Input file name (thermal image)
src = "thermal.tif"
# Output file name
tgt = "classified.jpg"
# Load the image into numpy using gdal
srcArr = gdal_array.LoadFile(src)
# Split the histogram into 20 bins as our classes
classes = gdal_array.numpy.histogram(srcArr, bins=20)[1]
# Color look-up table (LUT) - must be len(classes)+1.
# Specified as R, G, B tuples
lut = [[255, 0, 0], [191, 48, 48], [166, 0, 0], [255, 64, 64], [255, 115, 115],
[255, 116, 0], [191, 113, 48], [255, 178, 115], [0, 153, 153],
[29, 115, 115], [0, 99, 99], [166, 75, 0], [0, 204, 0], [51, 204, 204],
[255, 150, 64], [92, 204, 204], [38, 153, 38], [0, 133, 0],
[57, 230, 57], [103, 230, 103], [184, 138, 0]]
# Starting value for classification
start = 1
# Set up the RGB color JPEG output image
rgb = gdal_array.numpy.zeros((3, srcArr.shape[0],
srcArr.shape[1], ), gdal_array.numpy.float32)
# Process all classes and assign colors
for i in range(len(classes)):
mask = gdal_array.numpy.logical_and(start <= srcArr, srcArr <= classes[i])
for j in range(len(lut[i])):
rgb[j] = gdal_array.numpy.choose(mask, (rgb[j], lut[i][j]))
start = classes[i]+1
# Save the image
output = gdal_array.SaveArray(rgb.astype(gdal_array.numpy.uint8), tgt, format="JPEG")
output = None | 33.930233 | 85 | 0.63331 | 237 | 1,459 | 3.85654 | 0.464135 | 0.088621 | 0.091904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137489 | 0.197395 | 1,459 | 43 | 86 | 33.930233 | 0.64304 | 0.300206 | 0 | 0 | 0 | 0 | 0.028827 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cd9cd6ec412874e5e2619a9caeeef6dd15b85fc | 1,308 | py | Python | src/mechanrich/constants.py | zli9/Mechanism-enrichment-using-NeuroMMSig | 3c8fc01fd68f3d37d728acfb5b819b5ebb1a9b49 | [
"MIT"
] | null | null | null | src/mechanrich/constants.py | zli9/Mechanism-enrichment-using-NeuroMMSig | 3c8fc01fd68f3d37d728acfb5b819b5ebb1a9b49 | [
"MIT"
] | null | null | null | src/mechanrich/constants.py | zli9/Mechanism-enrichment-using-NeuroMMSig | 3c8fc01fd68f3d37d728acfb5b819b5ebb1a9b49 | [
"MIT"
] | 1 | 2022-02-18T08:22:07.000Z | 2022-02-18T08:22:07.000Z | import os
from typing import Optional
from src.mechanrich.utils import make_fake_pathway
# parameters
P_THRED: float = 0.01
FC_THRED: float = 0.5
IS_FAKE = False
# file paths
WORKING_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR: str = os.path.abspath(os.path.join(WORKING_DIR, "../.."))
GEO_FILE: str = os.path.join(ROOT_DIR, "data/GSE164191.top.table.tsv")
MAPPING_FILE: str = os.path.join(ROOT_DIR, "data/gene_interaction_map.tsv")
if IS_FAKE: # fake pathway for testing
num_of_fake_edges = 20
gene_set = ["SMAD3", "SMAD4", "TGFBR2", "SPTBN1", "PML", "TGFB1", "DAB2"]
PATHWAY_FILE = os.path.join(ROOT_DIR, "tests/test_data/fake_pathway.txt")
make_fake_pathway(k=num_of_fake_edges, gene_set=gene_set, output_path=PATHWAY_FILE)
else: # real pathway
PATHWAY_FILE: str = os.path.join(ROOT_DIR, "data/TGF-beta_receptor_pathway.txt")
# mapping dataframe column names
# keys are column name in input file; values are standard column name in this project
# NOT change the value of dictionary!
GEO_FILE_COLS: dict = {
"Gene.symbol": "gene_symbol",
"logFC": "log_fold_change",
"adj.P.Val": "p_value",
}
PATHWAY_FILE_COLS: dict = {0: "source", 1: "interaction", 2: "target"}
MAPPING_FILE_COLS: dict = {"source": "source", "target": "target", "relation": "relation"}
| 38.470588 | 90 | 0.724771 | 205 | 1,308 | 4.380488 | 0.468293 | 0.053452 | 0.055679 | 0.062361 | 0.112472 | 0.093541 | 0.093541 | 0.093541 | 0 | 0 | 0 | 0.0194 | 0.133028 | 1,308 | 33 | 91 | 39.636364 | 0.772487 | 0.16055 | 0 | 0 | 0 | 0 | 0.259633 | 0.112844 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cdc0e35d32215246f442987a06cfa486feff2ad | 1,299 | py | Python | examples/alchemy_vision_v1.py | mcwilkes/python-sdk | bc545472ad1d00f77e916773e3a949160e4f48c3 | [
"Apache-2.0"
] | null | null | null | examples/alchemy_vision_v1.py | mcwilkes/python-sdk | bc545472ad1d00f77e916773e3a949160e4f48c3 | [
"Apache-2.0"
] | null | null | null | examples/alchemy_vision_v1.py | mcwilkes/python-sdk | bc545472ad1d00f77e916773e3a949160e4f48c3 | [
"Apache-2.0"
] | null | null | null | import json
from os.path import join, dirname
from watson_developer_cloud import AlchemyVisionV1 as AlchemyVision
alchemy_vision = AlchemyVision(api_key='YOUR API KEY')
# Face recognition
with open(join(dirname(__file__), '../resources/face.jpg'), 'rb') as image_file:
print(json.dumps(alchemy_vision.recognize_faces(image_file, knowledge_graph=True), indent=2))
face_url = 'https://upload.wikimedia.org/wikipedia/commons/9/9d/Barack_Obama.jpg'
print(json.dumps(alchemy_vision.recognize_faces(image_url=face_url, knowledge_graph=True), indent=2))
# Image tagging
with open(join(dirname(__file__), '../resources/test.jpg'), 'rb') as image_file:
print(json.dumps(alchemy_vision.get_image_keywords(image_file, knowledge_graph=True,
force_show_all=True), indent=2))
print(json.dumps(alchemy_vision.get_image_keywords(
image_url='https://upload.wikimedia.org/wikipedia/commons/8/81/Morris-Chair-Ironwood.jpg'), indent=2))
# Image link extraction
print(json.dumps(alchemy_vision.get_image_links(url='http://www.ibm.com/smarterplanet/us/en/ibmwatson/'), indent=2))
with open(join(dirname(__file__), '../resources/example.html'), 'r') as webpage:
print(json.dumps(alchemy_vision.get_image_links(html=webpage.read()), indent=2))
| 46.392857 | 116 | 0.749038 | 184 | 1,299 | 5.038043 | 0.407609 | 0.098166 | 0.090615 | 0.135922 | 0.599784 | 0.517799 | 0.414239 | 0.323625 | 0.167206 | 0.092772 | 0 | 0.010399 | 0.111624 | 1,299 | 27 | 117 | 48.111111 | 0.792894 | 0.040031 | 0 | 0 | 0 | 0.0625 | 0.223652 | 0.053902 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cdcae32c27f41dd0f06e3e4da00d21f792d1a11 | 19,625 | py | Python | DeepDream3D/ModelDefinition/modelSVR_DD.py | QuantumPlumber/PoorMansDeepSDF | a8f2c78fee26155f2fd5337ffb16363a44da4f68 | [
"MIT"
] | null | null | null | DeepDream3D/ModelDefinition/modelSVR_DD.py | QuantumPlumber/PoorMansDeepSDF | a8f2c78fee26155f2fd5337ffb16363a44da4f68 | [
"MIT"
] | null | null | null | DeepDream3D/ModelDefinition/modelSVR_DD.py | QuantumPlumber/PoorMansDeepSDF | a8f2c78fee26155f2fd5337ffb16363a44da4f68 | [
"MIT"
] | null | null | null | import os
import time
import math
import random
import numpy as np
import h5py
import matplotlib.pyplot as plt
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
from pytorch3d.io import save_ply, save_obj, load_objs_as_meshes, load_obj, load_ply
from pytorch3d.structures import Meshes
from pytorch3d.renderer import (
look_at_view_transform,
FoVPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader,
TexturesUV,
Textures,
TexturesVertex
)
import cv2
import mcubes
from typing import List
from ..preprocessing.utils import shapenet_cam_params
from .ShapeNetRendering import ShapeNetRendering
from .utils import *
from .modelSVR import IM_SVR
class IM_SVR_DD(IM_SVR):
def __init__(self, config):
super().__init__(config)
self.shapenet_cam_params = shapenet_cam_params
def load_data(self, config):
'''
Overrides base class method in order to only load data required for deep dreaming.
:param config:
:return:
'''
# get config values
z_base = int(config.interpol_z1)
z_target = int(config.interpol_z2)
self.crop_edge = self.view_size - self.crop_size
data_hdf5_name = self.data_dir + '/' + self.dataset_load + '.hdf5'
if os.path.exists(data_hdf5_name):
data_dict = h5py.File(data_hdf5_name, 'r')
offset_x = int(self.crop_edge / 2)
offset_y = int(self.crop_edge / 2)
# reshape to NCHW
# get the shape of the first two cropped pictures
cropped_shape = np.reshape(
data_dict['pixels'][0:2, :, offset_y:offset_y + self.crop_size, offset_x:offset_x + self.crop_size],
[-1, self.view_num, 1, self.crop_size, self.crop_size]).shape
self.data_pixels = np.empty(shape=cropped_shape)
# now grab only the data that is needed. This must be done iteratively or hdf5 can throw and error
# (selection indices must be of increasing order only)
for ind, z in enumerate([z_base, z_target]):
self.data_pixels[ind, ...] = np.reshape(
data_dict['pixels'][z, :, offset_y:offset_y + self.crop_size, offset_x:offset_x + self.crop_size],
[self.view_num, 1, self.crop_size, self.crop_size])
else:
print("error: cannot load " + data_hdf5_name)
exit(0)
def get_activation(self, output_list):
'''
A wrapper function to establish the forward hook
:param out:
:return:
'''
def hook(model, input, output):
output_list[0] = output
return hook
def get_zvec(self, z_num):
if z_num < len(self.data_pixels):
batch_view = self.data_pixels[z_num:z_num + 1, self.test_idx].astype(np.float32) / 255.0
batch_view = torch.from_numpy(batch_view)
batch_view = batch_view.to(self.device)
z_vec_, _ = self.im_network(batch_view, None, None, is_training=False)
z_vec = z_vec_.detach().cpu().numpy()
return (z_vec)
else:
print("z_num not a valid number")
def interpolate_z(self, config):
'''
A method to create the meshes from latent z vectors linearly interpolated between two vectors.
:param config:
:return:
'''
# TODO: uncomment load data
super().load_data(config=config)
# TODO: load previous checkpoint
self.load_checkpoint()
z1 = int(config.interpol_z1)
z2 = int(config.interpol_z2)
interpol_steps = int(config.interpol_steps)
result_base_directory = config.interpol_directory
self.result_dir_name = 'interpol_' + str(z1) + '_' + str(z2)
self.result_dir = result_base_directory + '/' + self.result_dir_name
print(self.result_dir)
# Create output directory
if not os.path.isdir(self.result_dir):
os.mkdir(self.result_dir)
print('creating directory ' + self.result_dir)
# get the z vectors via forward pass through encoder
z1_vec = self.get_zvec(z1)
print(z1_vec)
z2_vec = self.get_zvec(z2)
print(z2_vec)
# compute linear interpolation between vectors
fraction = np.linspace(0, 1, interpol_steps)
interpolated_z = np.multiply.outer(np.ones_like(fraction), z1_vec) + np.multiply.outer(fraction,
z2_vec - z1_vec)
interpolated_z = interpolated_z.astype(np.float64)
self.out_filenames = []
for z_index in np.arange(interpol_steps):
self.out_filenames.append(self.result_dir + "/" + "out_{:.2f}.ply".format(fraction[z_index]))
for z_index in np.arange(interpol_steps):
start_time = time.time()
model_z = interpolated_z[z_index:z_index + 1].astype(np.float64)
# print('current latent vector:')
# print(model_z.shape)
model_z = torch.from_numpy(model_z).float()
model_z = model_z.to(self.device)
self.im_network.eval()
model_float = self.z2voxel(model_z)
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
vertices = (vertices.astype(np.float32) - 0.5) / self.real_size - 0.5
# vertices = self.optimize_mesh(vertices,model_z)
write_ply_triangle(self.result_dir + "/" + "out_{:.2f}.ply".format(fraction[z_index]), vertices, triangles)
end_time = time.time() - start_time
print("computed interpolation {} in {} seconds".format(z_index, end_time))
def create_saved_images(self, images, name):
num_images = int(images.shape[0])
cols = 3
rows = -int(-num_images // cols)
# convert back to grayscale
rescale_images = images
print(images.max())
print(images.min())
fig, axs = plt.subplots(nrows=rows,
ncols=cols,
sharex='all',
sharey='all',
figsize=(cols * 2, rows * 2),
gridspec_kw={'wspace': 0, 'hspace': 0}
)
for ax, im in zip(axs.flatten(), range(num_images)):
ax.imshow(rescale_images[im, 0, :, :], cmap='gray', vmin=0, vmax=1)
ax.axis('off')
plt.savefig(self.result_dir + '/' + name)
# output shape as ply
def create_model_mesh(self, batch_view, num, config):
# TODO: uncomment load checkpoint
# load previous checkpoint
self.load_checkpoint()
self.im_network.eval()
model_z, _ = self.im_network(batch_view, None, None, is_training=False)
model_float = self.z2voxel(model_z)
print('model_float shape')
print(model_float.shape)
# This transform nescessary to accomodate coordinate transform induced in marching cubes
model_float = np.flip(np.transpose(model_float, (2, 1, 0)), 0)
vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold)
vertices = (vertices.astype(np.float32) - 0.5) / self.real_size - 0.5
# vertices = self.optimize_mesh(vertices,model_z)
full_path = self.result_dir + "/" + str(num) + "_vox.ply"
write_ply_triangle(full_path, vertices, triangles)
print("created .ply for image {}".format(num))
return full_path
def cv2_image_transform(self, img):
'''
Basic image transform used as input to IM_SVR
:param img:
:return:
'''
'''
imgo = img[:, :, :3] * 255
imgo = cv2.cvtColor(imgo, cv2.COLOR_BGR2GRAY)
imga = (img[:, :, 3])
img_out = imgo * imga + 255.0 * (1 - imga)
img_out = np.round(img_out).astype(np.uint8)
'''
img[:, :, :3] = img[:, :, :3] * 255
img_out = cv2.cvtColor(img[:, :, :], cv2.COLOR_BGRA2GRAY) / 255
# img_out = np.round(img_out).astype(np.uint8)
# print(img_out.shape)
img_out = cv2.resize(img_out, dsize=(128, 128))
img_out = img_out[np.newaxis, :, :].astype(np.float32)
return img_out
def annealing_view(self, ply_path):
# param_num = self.test_idx
param_num = 7
# get image transform
R, T = look_at_view_transform(
dist=shapenet_cam_params["distance"][param_num] * 3,
elev=shapenet_cam_params["elevation"][param_num],
azim=shapenet_cam_params["azimuth"][param_num])
cameras = FoVPerspectiveCameras(device=self.device,
R=R,
T=T,
fov=shapenet_cam_params["field_of_view"][param_num]
)
raster_settings = RasterizationSettings(
image_size=128,
blur_radius=0.0,
faces_per_pixel=1,
)
lights = PointLights(device=self.device, location=[[0.0, 0.0, -3.0]])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftPhongShader(
device=self.device,
cameras=cameras,
lights=lights
)
)
verts = []
faces = []
verts_rgb = []
titles = []
vert, face = load_ply(ply_path)
verts.append(vert.to(self.device))
faces.append(face.to(self.device))
verts_rgb.append(torch.ones_like(vert).to(self.device))
textures = Textures(verts_rgb=verts_rgb)
interpol_mesh = Meshes(verts, faces, textures)
image = renderer(interpol_mesh).cpu().numpy()
print(image.shape)
reformatted_image = self.cv2_image_transform(image[0])
print(reformatted_image.min())
out = torch.from_numpy(reformatted_image).unsqueeze(0).type(torch.float32).to(self.device)
# print(out)
return out
def annealing_view_pytorch3d(self, ply_paths: List[str]):
verts = []
faces = []
verts_rgb = []
for ply_path in ply_paths:
vert, face = load_ply(ply_path)
verts.append(vert.to(self.device))
faces.append(face.to(self.device))
verts_rgb.append(torch.ones_like(vert).to(self.device))
# verts_rgb.append(torch.rand(size=vert.size()).to(self.device))
textures = Textures(verts_rgb=verts_rgb)
interpol_mesh = Meshes(verts, faces, textures)
# print(interpol_mesh.isempty())
# print(interpol_mesh.num_verts_per_mesh())
image = self.shapenet_render.render(model_ids=[0],
meshes=interpol_mesh,
device=self.device
).cpu().numpy()
# print(image.shape)
reformatted_image = self.cv2_image_transform(image[0])
out = torch.from_numpy(reformatted_image).unsqueeze(0).type(torch.float32).to(self.device)
return out
def latent_gradient(self, base_batch_view, target_batch_view, step, config):
style_activation = self.style_activation.clone()
# zero gradients
self.im_network.zero_grad()
# re-register forward hook on each forward pass.
# self.target_layer.register_forward_hook(self.get_activation(self.target_activation))
z_vec_, _ = self.im_network(base_batch_view, None, None, is_training=False)
base_activation = self.target_activation[0]
# compute best feature maps
features, width, height = style_activation.shape
style_activation = style_activation.view(features, -1)
comp_base_activation = base_activation.squeeze().view(features, -1)
# Matrix of best matching feature maps.
A = torch.matmul(torch.transpose(comp_base_activation, 0, 1), style_activation)
# A = comp_base_activation.T.dot(style_activation)
loss = comp_base_activation[:, torch.argmax(A, 1)].view(features, width, height).detach()
# run the graph in reverse
base_activation.backward(loss.unsqueeze(0))
return base_batch_view.grad
def deep_dream(self, config):
# TODO: uncomment load data
super().load_data(config)
# TODO: uncomment checkpoint load
# load previous checkpoint
self.load_checkpoint()
# get config values
z_base = int(config.interpol_z1)
base_im_num = int(config.z1_im_view)
z_target = int(config.interpol_z2)
target_im_num = int(config.z1_im_view)
# instantiate camera rendering class
self.shapenet_render = ShapeNetRendering(model_nums=[z_base, z_target],
R2N2_dir=config.R2N2_dir,
model_views=[[base_im_num], [target_im_num]],
splitfile=config.splitfile
)
# set the dreaming rate and boundary size
self.dream_rate = config.dream_rate
annealing_step = config.annealing_rate
# Set up forward hook to pull values
self.layer_num = config.layer_num
# list index includes as zero entry the generator module itself.
# 2 layers up front should not be used
num_model_layers = len(list(self.im_network.img_encoder.named_children())) - 2
if self.layer_num < 2 or self.layer_num >= num_model_layers:
print('Layer number is too large: select layer numbers from 2 to {}'.format(num_model_layers))
exit(0)
# Get target layer
# self.target_layer = list(list(self.im_network.img_encoder.children())[self.layer_num].children())[-1]
self.target_layer = list(self.im_network.img_encoder.children())[self.layer_num]
self.target_activation = [None]
# register forward hook
self.target_layer.register_forward_hook(self.get_activation(self.target_activation))
interpol_steps = int(config.interpol_steps)
result_base_directory = config.interpol_directory
result_dir_name = 'DeepDream_SVR' + str(z_base) + '_' + str(z_target) + '_layer_' + str(self.layer_num)
self.result_dir = result_base_directory + '/' + result_dir_name
# Create output directory
# TODO: re-create directory
if not os.path.isdir(self.result_dir):
os.mkdir(self.result_dir)
print('creating directory ' + self.result_dir)
# store images
num_images = interpol_steps // annealing_step
annealing_images = np.empty(shape=(num_images + 2, 1, 128, 128))
deepdream_images = np.empty(shape=(num_images + 2, 1, 128, 128))
# TODO: remove dummy data
# batch_view = np.random.random(size=(1, 1, 128, 128))
batch_view = self.data_pixels[z_base:z_base + 1, base_im_num, ...].astype(np.float32) / 255.0
base_batch_view_ = torch.from_numpy(batch_view).type(torch.float32).to(self.device)
base_batch_view = torch.autograd.Variable(base_batch_view_, requires_grad=True)
deepdream_images[0, ...] = batch_view[0, ...]
# TODO: uncomment mesh save
self.create_model_mesh(base_batch_view, 'base', config)
# TODO: remove dummy data
# batch_view = np.random.random(size=(1, 1, 128, 128))
batch_view = self.data_pixels[z_target:z_target + 1, target_im_num, ...].astype(np.float32) / 255.0
target_batch_view = torch.from_numpy(batch_view).type(torch.float32).to(self.device)
deepdream_images[1, ...] = batch_view[0, ...]
# TODO: uncomment mesh save
self.create_model_mesh(target_batch_view, 'target', config)
# get target activation
z_vec_, _ = self.im_network(target_batch_view, None, None, is_training=False)
self.style_activation = self.target_activation[0].data.clone().detach().squeeze()
for step in range(interpol_steps):
start_time = time.perf_counter()
# mask zero valued areas
mask = base_batch_view < 1.99e5
grad = self.latent_gradient(base_batch_view, target_batch_view, step, config)
grad = grad[mask]
# print(grad.shape)
# mask low value fluctuations, one standard deviation below mean
grad_mean = grad.mean()
# print(grad_mean)
grad_var = torch.pow(torch.mean(torch.pow(grad - grad_mean, 2)), .5)
# print(grad_var)
# grad[grad < grad_mean - grad_var] = 0
grad_step = grad * self.dream_rate / torch.abs(grad_mean)
# grad_step = self.dream_rate * (grad - grad_mean) / grad_var
# print(grad_step.shape)
# print(torch.max(grad_step))
# clamp output to min,max input values.
# base_batch_view.data = torch.clamp(base_batch_view.data - grad_step, min=0., max=1.)
with torch.no_grad():
base_batch_view.data[mask] += grad_step
base_batch_view.clamp_(min=0, max=1)
print(base_batch_view.shape)
# apply a mask to remove border artifacts
border = 8
# right border
base_batch_view.data[..., :, 0:border] = 1
# left border
base_batch_view[..., :, -border:] = 1
# top border
base_batch_view[..., 0:border, :] = 1
# bottom border
base_batch_view[..., -border:, :] = 1
# print(torch.max(grad))
# Make sure gradients flow on the update
# base_batch_view.requires_grad = True
# create ply models
if (step) % annealing_step == 0:
if step != 0:
# TODO: uncomment mesh save
# save model
ply_path = self.create_model_mesh(base_batch_view, step, config)
# save image
deepdream_images[step // annealing_step + 1, ...] = base_batch_view.clone().detach().cpu().numpy()[
0, ...]
# get a new annealing model image
with torch.no_grad():
# base_batch_view.data = self.annealing_view(ply_path=ply_path)
base_batch_view.data = self.annealing_view_pytorch3d(ply_paths=[ply_path])
# save image
annealing_images[step // annealing_step + 1, ...] = base_batch_view.clone().detach().cpu().numpy()[
0, ...]
end_time = time.perf_counter()
print('Completed dream {} in {} seconds'.format(step, end_time - start_time))
self.create_model_mesh(base_batch_view, step, config)
self.create_saved_images(deepdream_images, 'deepdream_images')
self.create_saved_images(annealing_images, 'annealing_images')
print('Done Dreaming..')
| 36.682243 | 119 | 0.593834 | 2,383 | 19,625 | 4.649182 | 0.179606 | 0.037368 | 0.029335 | 0.009207 | 0.36691 | 0.336673 | 0.295965 | 0.266992 | 0.241358 | 0.220959 | 0 | 0.017857 | 0.303745 | 19,625 | 534 | 120 | 36.750936 | 0.79296 | 0.162497 | 0 | 0.205387 | 0 | 0 | 0.029137 | 0 | 0 | 0 | 0 | 0.003745 | 0 | 1 | 0.043771 | false | 0 | 0.077441 | 0 | 0.148148 | 0.063973 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cde3dad65e9db81f3c2934afb198bd383f5133f | 1,421 | py | Python | main/urls.py | ajra7/Pensive | cffd14267aec21e70c99e16b55c961107605f5db | [
"Apache-2.0"
] | null | null | null | main/urls.py | ajra7/Pensive | cffd14267aec21e70c99e16b55c961107605f5db | [
"Apache-2.0"
] | null | null | null | main/urls.py | ajra7/Pensive | cffd14267aec21e70c99e16b55c961107605f5db | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
app_name = 'main'
urlpatterns = [
path('', views.index, name='index'),
path('<int:tab>/', views.index, name='tags_tab'),
path('<int:tab>/', views.index, name='users_tab'),
path('sort/<str:sort_by>/', views.index, name='sort'),
path('<int:tab>/<slug:tag_slug>/', views.index, name='tags'),
path('<int:tab>/<slug:tag_slug>/<str:tag_sort_by>/', views.index, name='tags_sort'),
path('questions/<int:question_id>/', views.details, name='details'),
path('search/', views.search, name='search'),
path('add_question/', views.add_question, name='add_question'),
path('add_question/adding', views.adding_question, name="adding_question"),
path('signup/', views.signup, name='signup'),
path('login/', views.loginUser, name='login'),
path('logout/', views.logoutUser, name='logout'),
path('feedback/', views.feedback, name='feedback'),
path('send_feedback/', views.send_feedback, name='send_feedback'),
path('<int:question_id>/add_answer/adding/', views.adding_answer, name='adding'),
path('<int:question_id>/upvote', views.upvote, name='upvote'),
path('<int:question_id>/<int:answer_id>/upscore', views.upscore, name='upscore'),
path('<int:user_id>/dashboard', views.dashboard, name='dashboard'),
path('<int:user_id>/edit_profile', views.edit_profile, name='edit_profile'),
] | 52.62963 | 89 | 0.660802 | 188 | 1,421 | 4.835106 | 0.218085 | 0.069307 | 0.092409 | 0.059406 | 0.143014 | 0.09901 | 0 | 0 | 0 | 0 | 0 | 0 | 0.128079 | 1,421 | 27 | 90 | 52.62963 | 0.733656 | 0 | 0 | 0 | 0 | 0 | 0.379656 | 0.17765 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cdef96786f7fd029589fe5ad4352085dd012138 | 5,706 | py | Python | secretsanta.py | jradis/secretsanta | 4f294d4c2bd9900569f810fdfed204eae864d9e4 | [
"Apache-2.0"
] | null | null | null | secretsanta.py | jradis/secretsanta | 4f294d4c2bd9900569f810fdfed204eae864d9e4 | [
"Apache-2.0"
] | null | null | null | secretsanta.py | jradis/secretsanta | 4f294d4c2bd9900569f810fdfed204eae864d9e4 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from twilio.rest import Client
import random
import giphy_client
from giphy_client.rest import ApiException
import time
########################
# SET CONSTANT VARIABLES
########################
santa_message = '''{0}, you have the pleasure of participating in this years friends' gift exchange! Santa has picked you to give a gift to {1}. Date of the Christmas party is TBD. Just make sure you don\'t fuck it up... Oh, and Merry Christmas!!! Ho Ho HO!!!'''
elf_message_1 = '''{0}, you have been chosen to be head elf for a gift exchange. Lucky You. Someone Trusts and/or loves you... Or has nobody else to turn to... lol... Anyways, here is a list of each person, their number and who they are assigned to give a gift. It\'s likely you wont be contacted but in the case that you are it is probably because someone fucked up and forgot who they have. Thanks for being loved!!! Oh, and Merry Christmas!!!'''
elf_message_2 = '''Anyways, here is their info and who has who, just in case:'''
TESTING = False # When set to true, random seed is set to 7 and prints results for verification. When set to False new random seed is set and text messages are sent.
########################
##############################
# LOAD CONFIGURATION VARIABLES
##############################
# SET RANDOM SEED
if TESTING:
random.seed(7)
else:
random.seed(13)
# GET API INFO AND KEYS
config_info = pd.read_csv('api_config.csv')
ACCOUNT = config_info.loc[config_info['key'] == 'ACCOUNT']['value'].values[0] # Twilio Account
AUTH = config_info.loc[config_info['key'] == 'AUTH']['value'].values[0] # Twilio API Key
FROM = config_info.loc[config_info['key'] == 'FROM']['value'].values[0] # Twilio Phone Number
GIPHY = config_info.loc[config_info['key'] == 'GIPHY']['value'].values[0] # GIPHY API Key
# Configure Twilio Client
client = Client(ACCOUNT, AUTH)
##############################
##################
# HELPER FUNCTIONS
##################
def add_christmas_gify():
return '{0}'.format(get_random_santa_gif())
def get_random_santa_gif(api_key=GIPHY, tag='christmas', rating='PG-13', fmt='json'):
api_instance = giphy_client.DefaultApi()
api_key = api_key
tag = tag
rating = rating
fmt = fmt
try:
# Random Sticker Endpoint
api_response = api_instance.gifs_random_get(api_key, tag=tag, rating=rating, fmt=fmt)
return api_response.to_dict()['data']['image_original_url']
except ApiException as e:
print("Exception when calling DefaultApi->stickers_random_get: %s\n" % e)
return None
def send_sms(body, test, TO, client=client, FROM=FROM, media=None):
if test:
print('MSG:', body)
print('Number:', TO)
print('Media:', media)
else:
client.messages.create(
to=TO,
from_=FROM,
body=body,
media_url=media)
time.sleep(10) # Adding to try and avoid getting marked as spam by carrier
##################
#############
# PICK SANTAS
#############
# Parse persons info
people_info = pd.read_csv('santas.csv', dtype={'number': 'str'})
santas_info = people_info.loc[people_info['type'] == 'Santa'][['name', 'number', 'relationship']]
## To-do Split relationships directly from csv. Auto-detect if relationships exist.
relationships = santas_info[~santas_info['relationship'].isnull()].set_index('name').to_dict()['relationship']
santas_info = santas_info[['name', 'number']].set_index('name').to_dict('index')
elf_info = people_info.loc[people_info['type'] != 'Santa'][['name', 'number']]
santas = list(santas_info.keys())
options = list(santas_info.keys())
random.shuffle(santas)
random.shuffle(options)
# Elegantly making it so you don't ever have to reshuffle.
# pick random relationship to set to first and second to last
coupled = random.choice(list(relationships.keys()))
# Set one member of the couple to be the very first of the santas
santas.insert(0, santas.pop(santas.index(coupled)))
# Move the other member of the relationship to be the second to last.
santas.insert(-1, santas.pop(santas.index(relationships[coupled])))
# Move the other member of the relationship to be the very first position of the options
options.insert(0, options.pop(options.index(relationships[coupled])))
# If the last santa is also in a relationship, make sure that that
if santas[-1] in relationships.keys():
options.insert(0, options.pop(options.index(santas[-1])))
options.insert(0, options.pop(options.index(relationships[options[0]])))
pairs = {}
for i, santa in enumerate(santas):
if i == 0:
gives_to = santas[-1]
options.remove(santas[-1])
pairs[santa] = gives_to
else:
bad_match = [santa]
if santa in relationships.keys():
bad_match.append(relationships[santa])
if options[0] not in bad_match:
gives_to = options[0]
elif options[1] not in bad_match:
gives_to = options[1]
else:
gives_to = options[2]
options.remove(gives_to)
pairs[santa] = gives_to
#############
###############
# SEND MESSAGES
###############
for pair in pairs:
santas_info[pair]['gives to'] = pairs[pair]
to_num = santas_info[pair]['number']
msg = santa_message.format(pair, pairs[pair])
send_sms(msg, TO=to_num, test=TESTING, media=add_christmas_gify())
send_sms(elf_message_1.format(elf_info.name.values[0]), TO=elf_info.number.values[0], test=TESTING, media=add_christmas_gify())
time.sleep(60) # Adding to try and avoid getting marked as spam by carrier
send_sms(elf_message_2 + '\n\n' + str(santas_info), TO=elf_info.number.values[0], test=TESTING)
#############
| 34.373494 | 448 | 0.657203 | 822 | 5,706 | 4.447689 | 0.278589 | 0.027352 | 0.014223 | 0.020788 | 0.223195 | 0.20651 | 0.163567 | 0.13895 | 0.077681 | 0.077681 | 0 | 0.008716 | 0.175605 | 5,706 | 165 | 449 | 34.581818 | 0.768495 | 0.176831 | 0 | 0.068182 | 0 | 0.022727 | 0.238738 | 0.007318 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034091 | false | 0 | 0.068182 | 0.011364 | 0.136364 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cdf781ddc7b208f53906322a38fce519aeae677 | 2,353 | py | Python | scoreboard/auth/appengine.py | outdex/ctfscoreboard | ab662e26544a49ba2a80cc0ca48924001e14d9d2 | [
"Apache-2.0"
] | null | null | null | scoreboard/auth/appengine.py | outdex/ctfscoreboard | ab662e26544a49ba2a80cc0ca48924001e14d9d2 | [
"Apache-2.0"
] | null | null | null | scoreboard/auth/appengine.py | outdex/ctfscoreboard | ab662e26544a49ba2a80cc0ca48924001e14d9d2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Appengine based login support."""
import flask
from google.appengine.api import users
from scoreboard import controllers
from scoreboard import errors
from scoreboard import main
from scoreboard import models
from scoreboard import utils
app = main.get_app()
def login_user(_):
"""Login based on GAE Auth."""
gae_user = users.get_current_user()
if not gae_user:
return None
user = models.User.get_by_email(gae_user.email())
if user and flask.request:
user.last_login_ip = flask.request.remote_addr
models.db.session.commit()
return user
def get_login_uri():
return users.create_login_url('/gae_login')
def get_register_uri():
if not users.get_current_user():
return users.create_login_url('/register')
return '/register'
def logout():
pass
def register(flask_request):
gae_user = users.get_current_user()
if not gae_user:
raise errors.LoginError(
'Cannot register if not logged into AppEngine.')
data = flask_request.get_json()
user = controllers.register_user(
gae_user.email(), data['nick'], '',
data.get('team_id'), data.get('team_name'), data.get('team_code'))
if users.is_current_user_admin():
user.promote()
return user
@app.route('/gae_login')
def gae_login_handler():
user = login_user(None)
gae_user = users.get_current_user()
if gae_user and not user:
app.logger.info('No user found for user %s' % gae_user.email())
return flask.redirect('/register')
elif not user:
app.logger.error('No user found and not logged in.')
return flask.redirect(get_register_uri())
utils.session_for_user(user)
return flask.redirect('/')
| 27.682353 | 78 | 0.699108 | 336 | 2,353 | 4.741071 | 0.386905 | 0.039548 | 0.062775 | 0.047709 | 0.096673 | 0.065286 | 0.065286 | 0.047709 | 0.047709 | 0.047709 | 0 | 0.004269 | 0.20357 | 2,353 | 84 | 79 | 28.011905 | 0.845784 | 0.265618 | 0 | 0.142857 | 0 | 0 | 0.104985 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122449 | false | 0.020408 | 0.142857 | 0.020408 | 0.44898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ce16593265e57098f5d89e5c31fab07bdfee79a | 845 | py | Python | build/plugins/credits.py | jochenater/catboost | de2786fbc633b0d6ea6a23b3862496c6151b95c2 | [
"Apache-2.0"
] | null | null | null | build/plugins/credits.py | jochenater/catboost | de2786fbc633b0d6ea6a23b3862496c6151b95c2 | [
"Apache-2.0"
] | null | null | null | build/plugins/credits.py | jochenater/catboost | de2786fbc633b0d6ea6a23b3862496c6151b95c2 | [
"Apache-2.0"
] | 1 | 2022-02-23T13:35:26.000Z | 2022-02-23T13:35:26.000Z | from _common import rootrel_arc_src
def oncredits_disclaimer(unit, *args):
if unit.get('WITH_CREDITS'):
unit.message(["warn", "CREDITS WARNING: {}".format(' '.join(args))])
def oncheck_contrib_credits(unit, *args):
module_path = rootrel_arc_src(unit.path(), unit)
excepts = set()
if 'EXCEPT' in args:
args = list(args)
except_pos = args.index('EXCEPT')
excepts = set(args[except_pos + 1:])
args = args[:except_pos]
for arg in args:
if module_path.startswith(arg) and not unit.get('CREDITS_TEXTS_FILE') and not unit.get('NO_CREDITS_TEXTS_FILE'):
for ex in excepts:
if module_path.startswith(ex):
break
else:
unit.message(["error", "License texts not found. See https://st.yandex-team.ru/DTCC-324"])
| 36.73913 | 120 | 0.614201 | 112 | 845 | 4.455357 | 0.482143 | 0.042084 | 0.078156 | 0.088176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006359 | 0.255621 | 845 | 22 | 121 | 38.409091 | 0.786963 | 0 | 0 | 0 | 0 | 0 | 0.183432 | 0.024852 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ce183f3a39da7c442ad52197d849097719cd1c8 | 1,357 | py | Python | gammapy/utils/tests/test_table.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/utils/tests/test_table.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | gammapy/utils/tests/test_table.py | watsonjj/gammapy | 8d2498c8f63f73d1fbe4ba81ab02d9e72552df67 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from collections import OrderedDict
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.table import Table, Column
from ..table import table_standardise_units_copy, table_row_to_dict, table_from_row_data
def test_table_standardise_units():
table = Table(
[
Column([1], "a", unit="ph cm-2 s-1"),
Column([1], "b", unit="ct cm-2 s-1"),
Column([1], "c", unit="cm-2 s-1"),
Column([1], "d"),
]
)
table = table_standardise_units_copy(table)
assert table["a"].unit == "cm-2 s-1"
assert table["b"].unit == "cm-2 s-1"
assert table["c"].unit == "cm-2 s-1"
assert table["d"].unit is None
@pytest.fixture()
def table():
return Table(
[Column([1, 2], "a"), Column([1, 2] * u.m, "b"), Column(["x", "yy"], "c")]
)
def test_table_row_to_dict(table):
actual = table_row_to_dict(table[1])
expected = OrderedDict([("a", 2), ("b", 2 * u.m), ("c", "yy")])
assert actual == expected
def test_table_from_row_data():
rows = [dict(a=1, b=1 * u.m, c="x"), dict(a=2, b=2 * u.km, c="yy")]
table = table_from_row_data(rows)
assert isinstance(table, Table)
assert table["b"].unit == "m"
assert_allclose(table["b"].data, [1, 2000])
| 28.87234 | 88 | 0.602063 | 215 | 1,357 | 3.655814 | 0.260465 | 0.053435 | 0.030534 | 0.038168 | 0.335878 | 0.129771 | 0.076336 | 0 | 0 | 0 | 0 | 0.031073 | 0.217391 | 1,357 | 46 | 89 | 29.5 | 0.70904 | 0.044952 | 0 | 0 | 0 | 0 | 0.061051 | 0 | 0 | 0 | 0 | 0 | 0.257143 | 1 | 0.114286 | false | 0 | 0.171429 | 0.028571 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ce21b3dba4729f325fbfa0f098d4f303867da95 | 11,597 | py | Python | dowhy/causal_identifiers/id_identifier.py | EgorKraevTransferwise/dowhy | 8b2cf6a722572a7e0d6a1f7fef22e743dbd3b13a | [
"MIT"
] | 2,904 | 2019-05-07T08:09:33.000Z | 2022-03-31T18:28:41.000Z | dowhy/causal_identifiers/id_identifier.py | arnabkdey/dowhy | 46614846b2785809870203529072ff03fb0f1572 | [
"MIT"
] | 238 | 2019-05-11T02:57:22.000Z | 2022-03-31T23:47:18.000Z | dowhy/causal_identifiers/id_identifier.py | arnabkdey/dowhy | 46614846b2785809870203529072ff03fb0f1572 | [
"MIT"
] | 527 | 2019-05-08T16:23:45.000Z | 2022-03-30T21:02:41.000Z | import numpy as np
import pandas as pd
import networkx as nx
from dowhy.utils.ordered_set import OrderedSet
from dowhy.utils.graph_operations import find_c_components, induced_graph, find_ancestor
from dowhy.causal_identifier import CausalIdentifier
from dowhy.utils.api import parse_state
class IDExpression:
"""
Class for storing a causal estimand, as a result of the identification step using the ID algorithm.
The object stores a list of estimators(self._product) whose porduct must be obtained and a list of variables (self._sum) over which the product must be marginalized.
"""
def __init__(self):
self._product = []
self._sum = []
def add_product(self, element):
'''
Add an estimator to the list of product.
:param element: Estimator to append to the product list.
'''
self._product.append(element)
def add_sum(self, element):
'''
Add variables to the list.
:param element: Set of variables to append to the list self._sum.
'''
for el in element:
self._sum.append(el)
def get_val(self, return_type):
"""
Get either the list of estimators (for product) or list of variables (for the marginalization).
:param return_type: "prod" to return the list of estimators or "sum" to return the list of variables.
"""
if return_type=="prod":
return self._product
elif return_type=="sum":
return self._sum
else:
raise Exception("Provide correct return type.")
def _print_estimator(self, prefix, estimator=None, start=False):
'''
Print the IDExpression object.
'''
if estimator is None:
return None
string = ""
if isinstance(estimator, IDExpression):
s = True if len(estimator.get_val(return_type="sum"))>0 else False
if s:
sum_vars = "{" + ",".join(estimator.get_val(return_type="sum")) + "}"
string += prefix + "Sum over " + sum_vars + ":\n"
prefix += "\t"
for expression in estimator.get_val(return_type='prod'):
add_string = self._print_estimator(prefix, expression)
if add_string is None:
return None
else:
string += add_string
else:
outcome_vars = list(estimator['outcome_vars'])
condition_vars = list(estimator['condition_vars'])
string += prefix + "Predictor: P(" + ",".join(outcome_vars)
if len(condition_vars)>0:
string += "|" + ",".join(condition_vars)
string += ")\n"
if start:
string = string[:-1]
return string
def __str__(self):
string = self._print_estimator(prefix="", estimator=self, start=True)
if string is None:
return "The graph is not identifiable."
else:
return string
class IDIdentifier(CausalIdentifier):
def __init__(self, graph, estimand_type,
method_name = "default",
proceed_when_unidentifiable=None):
'''
Class to perform identification using the ID algorithm.
:param self: instance of the IDIdentifier class.
:param estimand_type: Type of estimand ("nonparametric-ate", "nonparametric-nde" or "nonparametric-nie").
:param method_name: Identification method ("id-algorithm" in this case).
:param proceed_when_unidentifiable: If True, proceed with identification even in the presence of unobserved/missing variables.
'''
super().__init__(graph, estimand_type, method_name, proceed_when_unidentifiable)
if self.estimand_type != CausalIdentifier.NONPARAMETRIC_ATE:
raise Exception("The estimand type should be 'non-parametric ate' for the ID method type.")
self._treatment_names = OrderedSet(parse_state(graph.treatment_name))
self._outcome_names = OrderedSet(parse_state(graph.outcome_name))
self._adjacency_matrix = graph.get_adjacency_matrix()
try:
self._tsort_node_names = OrderedSet(list(nx.topological_sort(graph._graph))) # topological sorting of graph nodes
except:
raise Exception("The graph must be a directed acyclic graph (DAG).")
self._node_names = OrderedSet(graph._graph.nodes)
def identify_effect(self, treatment_names=None, outcome_names=None, adjacency_matrix=None, node_names=None):
'''
Implementation of the ID algorithm.
Link - https://ftp.cs.ucla.edu/pub/stat_ser/shpitser-thesis.pdf
The pseudo code has been provided on Pg 40.
:param self: instance of the IDIdentifier class.
:param treatment_names: OrderedSet comprising names of treatment variables.
:param outcome_names:OrderedSet comprising names of outcome variables.
:param adjacency_matrix: Graph adjacency matrix.
:param node_names: OrderedSet comprising names of all nodes in the graph.
:returns: target estimand, an instance of the IDExpression class.
'''
if adjacency_matrix is None:
adjacency_matrix = self._adjacency_matrix
if treatment_names is None:
treatment_names = self._treatment_names
if outcome_names is None:
outcome_names = self._outcome_names
if node_names is None:
node_names = self._node_names
node2idx, idx2node = self._idx_node_mapping(node_names)
# Estimators list for returning after identification
estimators = IDExpression()
# Line 1
# If no action has been taken, the effect on Y is just the marginal of the observational distribution P(v) on Y.
if len(treatment_names) == 0:
identifier = IDExpression()
estimator = {}
estimator['outcome_vars'] = node_names
estimator['condition_vars'] = OrderedSet()
identifier.add_product(estimator)
identifier.add_sum(node_names.difference(outcome_names))
estimators.add_product(identifier)
return estimators
# Line 2
# If we are interested in the effect on Y, it is sufficient to restrict our attention on the parts of the model ancestral to Y.
ancestors = find_ancestor(outcome_names, node_names, adjacency_matrix, node2idx, idx2node)
if len(node_names.difference(ancestors)) != 0: # If there are elements which are not the ancestor of the outcome variables
# Modify list of valid nodes
treatment_names = treatment_names.intersection(ancestors)
node_names = node_names.intersection(ancestors)
adjacency_matrix = induced_graph(node_set=node_names, adjacency_matrix=adjacency_matrix, node2idx=node2idx)
return self.identify_effect(treatment_names=treatment_names, outcome_names=outcome_names, adjacency_matrix=adjacency_matrix, node_names=node_names)
# Line 3 - forces an action on any node where such an action would have no effect on Y – assuming we already acted on X.
# Modify adjacency matrix to obtain that corresponding to do(X)
adjacency_matrix_do_x = adjacency_matrix.copy()
for x in treatment_names:
x_idx = node2idx[x]
for i in range(len(node_names)):
adjacency_matrix_do_x[i, x_idx] = 0
ancestors = find_ancestor(outcome_names, node_names, adjacency_matrix_do_x, node2idx, idx2node)
W = node_names.difference(treatment_names).difference(ancestors)
if len(W) != 0:
return self.identify_effect(treatment_names = treatment_names.union(W), outcome_names=outcome_names, adjacency_matrix=adjacency_matrix, node_names=node_names)
# Line 4 - Decomposes the problem into a set of smaller problems using the key property of C-component factorization of causal models.
# If the entire graph is a single C-component already, further problem decomposition is impossible, and we must provide base cases.
# Modify adjacency matrix to remove treatment variables
node_names_minus_x = node_names.difference(treatment_names)
node2idx_minus_x, idx2node_minus_x = self._idx_node_mapping(node_names_minus_x)
adjacency_matrix_minus_x = induced_graph(node_set=node_names_minus_x, adjacency_matrix=adjacency_matrix, node2idx=node2idx)
c_components = find_c_components(adjacency_matrix=adjacency_matrix_minus_x, node_set=node_names_minus_x, idx2node=idx2node_minus_x)
if len(c_components)>1:
identifier = IDExpression()
sum_over_set = node_names.difference(outcome_names.union(treatment_names))
for component in c_components:
expressions = self.identify_effect(treatment_names=node_names.difference(component), outcome_names=OrderedSet(list(component)), adjacency_matrix=adjacency_matrix, node_names=node_names)
for expression in expressions.get_val(return_type="prod"):
identifier.add_product(expression)
identifier.add_sum(sum_over_set)
estimators.add_product(identifier)
return estimators
# Line 5 - The algorithms fails due to the presence of a hedge - the graph G, and a subgraph S that does not contain any X nodes.
S = c_components[0]
c_components_G = find_c_components(adjacency_matrix=adjacency_matrix, node_set=node_names, idx2node=idx2node)
if len(c_components_G)==1 and c_components_G[0] == node_names:
return None
# Line 6 - If there are no bidirected arcs from X to the other nodes in the current subproblem under consideration, then we can replace acting on X by conditioning, and thus solve the subproblem.
if S in c_components_G:
sum_over_set = S.difference(outcome_names)
prev_nodes = []
for node in self._tsort_node_names:
if node in S:
identifier = IDExpression()
estimator = {}
estimator['outcome_vars'] = OrderedSet([node])
estimator['condition_vars'] = OrderedSet(prev_nodes)
identifier.add_product(estimator)
identifier.add_sum(sum_over_set)
estimators.add_product(identifier)
prev_nodes.append(node)
return estimators
# Line 7 - This is the most complicated case in the algorithm. Explain in the second last paragraph on Pg 41 of the link provided in the docstring above.
for component in c_components_G:
C = S.difference(component)
if C.is_empty() is None:
return self.identify_effect(treatment_names=treatment_names.intersection(component), outcome_names=outcome_names, adjacency_matrix=induced_graph(node_set=component, adjacency_matrix=adjacency_matrix,node2idx=node2idx), node_names=node_names)
def _idx_node_mapping(self, node_names):
'''
Obtain the node name to index and index to node name mappings.
:param node_names: Name of all nodes in the graph.
:return: node to index and index to node mappings.
'''
node2idx = {}
idx2node = {}
for i, node in enumerate(node_names.get_all()):
node2idx[node] = i
idx2node[i] = node
return node2idx, idx2node | 49.348936 | 257 | 0.65905 | 1,428 | 11,597 | 5.140756 | 0.204482 | 0.047814 | 0.015257 | 0.032693 | 0.296826 | 0.210326 | 0.123144 | 0.091268 | 0.051492 | 0.035962 | 0 | 0.005541 | 0.268604 | 11,597 | 235 | 258 | 49.348936 | 0.859821 | 0.276796 | 0 | 0.162162 | 0 | 0 | 0.039886 | 0 | 0.013514 | 0 | 0 | 0 | 0 | 1 | 0.060811 | false | 0 | 0.047297 | 0 | 0.222973 | 0.02027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ce2d96988e94cb88fd27ed04ed46fcd58193fb1 | 8,638 | py | Python | pybireport/report.py | joseaccruz/pybireport | 8406ad1a89a4df52e7267fef0c32f220c534caf6 | [
"MIT"
] | null | null | null | pybireport/report.py | joseaccruz/pybireport | 8406ad1a89a4df52e7267fef0c32f220c534caf6 | [
"MIT"
] | null | null | null | pybireport/report.py | joseaccruz/pybireport | 8406ad1a89a4df52e7267fef0c32f220c534caf6 | [
"MIT"
] | null | null | null | # [TBD] Allow the redefinition of a style in a Report / Page level as well as Viz level.
import xlsxwriter
from pybireport.styles import Style, DefaultStyleSheet
class Report:
def __init__(self, fname):
self._fname = fname
self._pages = []
self._ss = DefaultStyleSheet()
def add(self, page):
# TBD: check if a page w/ the same name exists
self._pages.append(page)
return page
def style_sheet(self, ss):
self._ss = ss
return report
def generate(self):
# Create a workbook and add a worksheet.
wb = xlsxwriter.Workbook(self._fname)
for page in self._pages:
page.generate(wb, self._ss)
wb.close()
class Page:
def __init__(self, name):
self._name = name
self._vizs = []
def add(self, viz):
self._vizs.append(viz)
return viz
def generate(self, wb, ss):
ws = wb.add_worksheet(self._name)
for viz in self._vizs:
viz.reset()
for (i, viz) in enumerate(self._vizs):
viz.generate(wb, ws, ss)
class Viz:
PLACE_ABSOLUTE = 1
PLACE_BELLOW = 2
PLACE_ABOVE = 3
PLACE_LEFT = 4
PLACE_RIGHT = 5
def __init__(self):
# how to place the Viz
self._placement = Viz.PLACE_ABSOLUTE
self._pcol, self._prow = (1, 1)
# reference Viz
self._ref = None
self._spacer_rows = 0
self._spacer_cols = 0
self._generated = False
self._tl_col, self._tl_row = (1, 1)
self._br_col, self._br_row = (1, 1)
# format & style
self._style = {}
def format(self, component, value):
if component not in self._style.keys():
# [TBD] make it a gracefull error exception
print("Error - Component '%s' not found. Choose one of %s" % (component, self._style.keys()))
self._style[component].format(value)
return self
def reset(self):
self._generated = False
def place_at(self, pcol, prow):
self._placement = Viz.PLACE_ABSOLUTE
self._pcol = pcol
self._prow = prow
return self
def place_bellow(self, viz, rows=1, align="left"):
self._placement = Viz.PLACE_BELLOW
self._ref = viz
self._spacer_rows = rows
return self
def place_left(self, viz, cols=1, align="top"):
self._placement = Viz.PLACE_LEFT
self._ref = viz
self._spacer_cols = cols
return self
def _generate(self, wb, ws, ss):
# [TBD] Raise an error here
print("Abstract class error")
quit()
def generate(self, wb, ws, ss):
if not self._generated:
self._generated = True
# compute the relative positioning
if self._placement != Viz.PLACE_ABSOLUTE:
# generate the reference viz (if not already)
self._ref.generate(wb, ws, ss)
(ul_col, ul_row, br_col, br_row) = self._ref.get_coords()
if self._placement == Viz.PLACE_BELLOW:
# TBD: honor the align parameter to compute the _pcol
self._pcol = ul_col
self._prow = br_row + 1 + self._spacer_rows
elif self._placement == Viz.PLACE_LEFT:
# TBD: honor the align parameter to compute the _pcol
self._pcol = br_col + 1 + self._spacer_cols
self._prow = br_row
# generate it's own
print("Viz: Create the viz on (%d, %d)" % (self._pcol, self._prow))
self._generate(wb, ws, ss)
else:
print("Done")
def get_coords(self):
return self._tl_col, self._tl_row, self._br_col, self._br_row
class Text(Viz):
def __init__(self, text):
super().__init__()
self._text = text
# format & style
self._style["text"] = Style("text")
# merge info
self._merge_rows = 1
self._merge_cols = 1
def merge_cols(self, cols=1):
self._merge_cols = cols
return self
def merge_rows(self, rows=1):
self._merge_rows = rows
return self
def format(self, value):
return super().format("text", value)
def _generate(self, wb, ws, ss):
# prepare the format
self._fmt_text = wb.add_format(ss.get(self._style["text"]))
# write the text
if self._merge_cols > 1 or self._merge_rows > 1:
ws.merge_range(self._prow, self._pcol, self._prow + self._merge_rows - 1, self._pcol + self._merge_cols - 1, self._text, self._fmt_text)
else:
ws.write_string(self._prow, self._pcol, self._text, self._fmt_text)
# compute the occupied area
self._tl_col, self._tl_row = self._pcol, self._prow
self._br_col, self._br_row = self._pcol + self._merge_cols, self._prow + self._merge_rows
class Table(Viz):
def __init__(self, data):
super().__init__()
self._data = data
self._title = ""
self._description = ""
self._legend = ""
# default parameters
self._merge_title = True
self._zebra = False
# format & style
self._style = {
"title": Style("table_title"),
"description": Style("table_description"),
"legend": Style("table_legend"),
"header": Style("table_header"),
"row": Style("table_row"),
"row_odd": Style("table_row_odd"),
"row_even": Style("table_row_even") }
# [TBD] allow a specific format for each column (inherit from row, row_odd, row_even)
def title(self, title, style={}):
self._title = title
self.format("title", style)
return self
def description(self, description, style={}):
self._description = description
self.format("description", style)
return self
def legend(self, legend, style={}):
self._legend = legend
self.format("legend", style)
return self
def zebra(self, on):
self._zebra = on
return self
def _generate(self, wb, ws, ss):
# [TBD] add description and legend
# setup all formats
self._fmt = {}
for k, v in self._style.items():
self._fmt[k] = wb.add_format(ss.get(v))
# start cell
(r, c) = self._prow, self._pcol
# write the title
if self._merge_title:
ws.merge_range(r, c, r, c + len(self._data.columns) - 1, self._title, self._fmt["title"])
else:
ws.write_string(r, c, self._title, self._fmt["title"])
# [TBD] this spacer should be configured in the future
r += 2
# write the header
for (i, col) in enumerate(self._data.columns):
# [TBD] allow a specific format for each header column
ws.write_string(r, c + i, col, self._fmt["header"])
r += 1
# write the data
for (i, values) in enumerate(self._data.values):
if self._zebra:
if i % 2 == 0:
fmt_cell = self._fmt["row_odd"]
else:
fmt_cell = self._fmt["row_even"]
else:
fmt_cell = self._fmt["row"]
for (j, value) in enumerate(values):
# Convert the date string into a datetime object.
# date = datetime.strptime(date_str, "%Y-%m-%d")
# [TBD] use a class parameter "col_type"
ws.write_string(r + i, c + j, str(value), fmt_cell)
#worksheet.write_datetime(row, col + 1, date, date_format )
#worksheet.write_number (row, col + 2, cost, money_format)
#row += 1
# compute the occupied area
self._tl_col, self._tl_row = self._pcol, self._prow
self._br_col, self._br_row = self._pcol + len(self._data.columns), self._prow + len(self._data) + 3
class Form(Viz):
# [TBD] finish form
def __init__(self, title, data):
super().__init__()
self._title = title
self._data = _data
def _generate(self, wb, ws):
print("Generate Label")
print("\t'%s'" % self._title)
for (k, v) in self._data.items():
print("\t%s: %s" % (k, str(v)))
# compute the occupied area
self._tl_col, self._tl_row = self._pcol, self._prow
self._br_col, self._br_row = self._pcol + 2, self._prow + len(self._data.keys()) + 2
| 28.697674 | 148 | 0.556726 | 1,103 | 8,638 | 4.097915 | 0.165005 | 0.028319 | 0.028761 | 0.032522 | 0.295354 | 0.15531 | 0.125664 | 0.09115 | 0.09115 | 0.076106 | 0 | 0.006252 | 0.333411 | 8,638 | 300 | 149 | 28.793333 | 0.778743 | 0.142973 | 0 | 0.175824 | 0 | 0 | 0.047004 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159341 | false | 0 | 0.010989 | 0.010989 | 0.313187 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ce53d671c1956c4a288626ae49019933af0452e | 32,926 | py | Python | ! vkbot/main.py | vladimir05112007/vk_bot | 2268b30790262c845a04c8687cc43439cd72c2ab | [
"MIT"
] | null | null | null | ! vkbot/main.py | vladimir05112007/vk_bot | 2268b30790262c845a04c8687cc43439cd72c2ab | [
"MIT"
] | null | null | null | ! vkbot/main.py | vladimir05112007/vk_bot | 2268b30790262c845a04c8687cc43439cd72c2ab | [
"MIT"
] | null | null | null | from sqlite3.dbapi2 import Cursor
from prettytable import PrettyTable
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
import random
import sqlite3
from game_math import RandomNumber
import psycopg2
import threading
import datetime
from vkcoinapi import *
import pikches
coin = VKCoin(key='sT_uy6[Py*jtHQU6QVFJ9SxAc=_OnqIgPK=UjE392y!p,Fxh7p', merchantId=545851228)
coin.setShopName('7B SHOP')
DATABASE_URL = 'postgres://eauprxzosofunb:922ae816b5a8cc2558170460098f6c961d89d7b656ed33c7d665e9e1e4c7108e@ec2-52-205-145-201.compute-1.amazonaws.com:5432/dfrmm2t89jd2ag'
API_VERSION = '5.126'
ranked = 0
ranks_points = [300, 500, 1000, 1200, 1500, 2000, 2200, 2500, 3000, 3200, 3500, 4000, 5000, 6000, 10000]
ranks_names = ["БЕЗ РАНГА", "Железо 3", "Железо 2", "Железо 1", "Бронза 3", "Бронза 2", "Бронза 1", "Серебро 3",
"Серебро 2", "Серебро 1", "Платина 3", "Платина 2", "Платина 1", "Алмаз", "Титан", "Непобедимый"]
col_coins = 1
col_abs = 1
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = conn.cursor()
id_send = False
send_sendr = 0
pr = ""
sendr = ""
table = ""
senders = []
senders_2 = []
resh = []
otvets = []
ban_list = []
wait_resh = []
num = 0
ob_send = False
def update_bases(id):
if id in senders:
senders.remove(id)
if id in senders_2:
senders_2.remove(id)
def update_bases_game(id):
if id in resh:
num = resh.index(id)
resh.remove(id)
otvets.pop(num)
num = 0
# Создание таблицы
try:
cursor.execute("""CREATE TABLE USERS (ID INT, COINS INT, BONUS INT)""")
except:
print("Database users already created")
try:
cursor.execute("""CREATE TABLE SENDS (ID INT, MST_T TEXT, MSG TEXT)""")
except:
print("Database sends already created")
conn.commit()
from rank_manager import *
def write_msg(user_id, message):
rand_id = random.getrandbits(64)
vk.method('messages.send', {'user_id': user_id, 'message': message, 'random_id': rand_id})
def write_msg_pik(user_id, message, attach):
rand_id = random.getrandbits(64)
vk.method('messages.send', {'user_id': user_id, 'message': message, 'random_id': rand_id, 'attachment': attach})
def write_msg_kb(user_id, message, keyboard):
rand_id = random.getrandbits(64)
vk.method('messages.send',
{'user_id': user_id, 'message': message, 'random_id': rand_id, 'keyboard': keyboard.get_keyboard()})
def game_event(event):
ob_send = False
initis(event.user_id)
col = 0
max = 0
closh = ""
update_bases_game(event.user_id)
id_send = False
update_bases(event.user_id)
rn = RandomNumber()
if get_points(event.user_id) < 300:
col = 3
max = 10
closh = "легко"
elif get_points(event.user_id) < 2000:
col = 3
max = 100
closh = "средне"
elif get_points(event.user_id) < 4000:
col = 4
max = 100
closh = "трудно"
else:
col = 4
max = 1000
closh = "очень трудно"
prim = rn.generate(1, max, col)
write_msg(event.user_id,
f"Игра \"Примеры\". Твоя задача - решить пример. \nСложность: {closh}. Ответ округляй до целого в меньшую сторону. \nПример: {prim}")
resh.insert(len(resh), event.user_id)
otvets.insert(len(otvets), eval(prim))
def get_payment():
pass
token = "secret"
vk = vk_api.VkApi(token=token, api_version=API_VERSION)
vk_conn = vk.get_api()
longpoll = VkLongPoll(vk)
kb_start = VkKeyboard(one_time=True, inline=False)
kb_start.add_button(color=VkKeyboardColor.POSITIVE, label="Меню", payload={"type": "0x002_menu"})
kb_menu = VkKeyboard(one_time=False, inline=False)
kb_menu.add_button(color=VkKeyboardColor.PRIMARY, label="Меню")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.POSITIVE, label="Профиль")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.NEGATIVE, label="Играть")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.POSITIVE, label="Магазин")
kb_menu.add_button(color=VkKeyboardColor.SECONDARY, label="Подать заявку")
kb_admin = VkKeyboard(one_time=False, inline=True)
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Данные")
kb_admin.add_line()
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Заявки")
kb_admin.add_line()
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Объявление")
kb_pik = VkKeyboard(one_time=False, inline=True)
kb_pik.add_button(color=VkKeyboardColor.POSITIVE, label="Мои пикчи")
kb_sender = VkKeyboard(one_time=False, inline=True)
kb_sender.add_button(color=VkKeyboardColor.PRIMARY, label="Алгебра и геометрия")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.NEGATIVE, label="Литра и русский")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.POSITIVE, label="Биология")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.PRIMARY, label="Улучшения бота")
kb_shop = VkKeyboard(one_time=False, inline=True)
kb_shop.add_button(color=VkKeyboardColor.NEGATIVE, label="Пикча Алека!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.NEGATIVE, label="Пикча для богатых")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.POSITIVE, label="Рандом Пикча!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.POSITIVE, label="Взлом рандома пикч!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.PRIMARY, label="МЕГА РАНДОМ ПИКЧ!")
def initis(id):
user_get = vk_conn.users.get(user_ids=id)[0]
cursor.execute(f"""SELECT COINS from USERS where id={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO USERS (ID, COINS, BONUS) VALUES ({id}, 0, 0)""")
conn.commit()
cursor.execute(f"""SELECT COINS from USERS where id={id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT POINTS from RANKS where id={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO ranks VALUES ({id}, 0)""")
conn.commit()
cursor.execute(f"""SELECT points FROM ranks WHERE id={id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT INV from PIK where ID={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
data_pik = "1111111111"
cursor.execute(f"""INSERT INTO PIK VALUES ({id}, {str(data_pik)})""")
conn.commit()
cursor.execute(f"""SELECT INV FROM PIK WHERE id={event.user_id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT INV from PIK where ID={id}""")
inv = str(cursor.fetchone()[0])
print(inv)
set = len(inv)
if set < 10:
get = 10 - set
print("GET: "+str(get))
get_inv = inv
data_r = get_inv + '1' * get
cursor.execute(f"""UPDATE PIK set INV={data_r} where ID={id}""")
conn.commit()
while True:
for event in longpoll.listen():
# Если пришло новое сообщение
if event.type == VkEventType.MESSAGE_NEW:
if event.to_me:
req_msg = event.text.lower()
req_msg_up = event.text
try:
initis(event.user_id)
if event.user_id in ban_list:
write_msg(event.user_id,
"Вы получили бан! Теперь вы не можете пользоваться ботом!\nДля разбана обращайтесь к администраторам!")
elif req_msg == "начать":
update_bases_game(event.user_id)
update_bases(event.user_id)
ob_send = False
id_send = False
send_sendr = 0
write_msg_kb(event.user_id,
"Привет! Это бот нашей группы (обновленный), и теперь вы сможете не просто подавать заявки, но и получать и накапливать монеты, повышать ранги и другое...",
kb_start)
user_get = vk_conn.users.get(user_ids=event.user_id)[0]
cursor.execute("""SELECT coins FROM users WHERE id={event.user_id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO users VALUES ({event.user_id}, 0, 0)""")
conn.commit()
cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT points FROM ranks WHERE id={event.user_id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO ranks VALUES ({event.user_id}, 0)""")
conn.commit()
cursor.execute(f"""SELECT points FROM ranks WHERE id={event.user_id}""")
print(cursor.fetchone())
elif req_msg == "меню":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
write_msg_kb(event.user_id, "Меню. Выбери кнопку на панели под клавиатурой.", kb_menu)
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
write_msg_kb(event.user_id,
"Тссс, я тут услышал что ты админ, так что пользуйся кнопкой админов:",
kb_admin)
elif req_msg == "заявки":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
table = ""
id_send = False
send_sendr = 0
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
t = PrettyTable(["ID пользователя", "Тема", "Текст заявки"])
write_msg(event.user_id, r"Таблица заявок 📃, поданных учениками.")
cursor.execute("""SELECT * FROM sends""")
data_s = cursor.fetchall()
for row in data_s:
t.add_row([row[0], row[1], row[2]])
table += str(row[0]) + ": " + str(row[1]) + ", " + str(row[2]) + "\n\n"
print(t)
write_msg(event.user_id, table)
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "данные":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
write_msg(event.user_id, "Введите ID пользователя, чтобы узнать о нем информацию.")
id_send = True
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "объявление":
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
ob_send = False
send_sendr = 0
if event.user_id == 545851228:
write_msg(event.user_id,
"Введите текст объявления, который будет отправлен всем зарегистрированным в боте.")
ob_send = True
elif event.user_id == 502085595:
write_msg(event.user_id,
"Прости, Леша, но во имя безопастности и защиты от спама, тебе тоже сюда нельзя(")
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "подать заявку":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
id_send = False
write_msg_kb(event.user_id,
r"Выберите предмет для заявки. Сейчас поддерживаются такие предметы:",
kb_sender)
senders.insert(len(senders), event.user_id)
elif req_msg == "играть":
ob_send = False
initis(event.user_id)
col = 0
max = 0
closh = ""
update_bases_game(event.user_id)
id_send = False
update_bases(event.user_id)
rn = RandomNumber()
if get_points(event.user_id) < 300:
col = 3
max = 10
closh = "легко"
elif get_points(event.user_id) < 2000:
col = 3
max = 100
closh = "средне"
elif get_points(event.user_id) < 4000:
col = 4
max = 100
closh = "трудно"
else:
col = 4
max = 1000
closh = "очень трудно"
prim = rn.generate(1, max, col)
write_msg(event.user_id,
f"Игра \"Примеры\". Твоя задача - решить пример. \nСложность: {closh}. Ответ округляй до целого в меньшую сторону. \nПример: {prim}")
resh.insert(len(resh), event.user_id)
otvets.insert(len(otvets), eval(prim))
elif req_msg == "профиль":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
ranking, ranked = get_rank(event.user_id)
points = get_points(event.user_id)
if points >= 10000:
ranked_more: str = "∞"
else:
ranked_more = str(ranks_points[ranked])
if ranking == "Непобедимый" and not have_pik(event.user_id, 8):
write_msg_pik(event.user_id, "Ты получил максимальный ранг в игре! Лови пикчу, которую можно получить только за это!\n\nСпасибо за ранг Непобедимый, большой вклад...", pikches.not_win)
add_pik(event.user_id, 8)
cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
write_msg_kb(event.user_id,
f"Профиль:\n1. Монеты: {cursor.fetchone()[0]} 💰\n2. Твой ранг: {ranking} 🌟\n3. Всего очков ранга: {points}/{ranked_more}.", kb_pik)
# elif req_msg == "бонус":
# ob_send = False
# initis(event.user_id)
# update_bases_game(event.user_id)
# update_bases(event.user_id)
# id_send = False
# send_sendr = 0
# write_msg(event.user_id,
# r"Бонус для beta тестировщиков или датамайнеров (везунчиков, которые написали боту во время теста) - 100 💰")
# cursor.execute(f"""SELECT bonus FROM users WHERE id={event.user_id}""")
# if cursor.fetchone()[0] == 0:
# cursor.execute(f"""UPDATE users SET bonus = 1 WHERE id={event.user_id}""")
# write_msg(event.user_id, r"Ты тоже получил бонус! - 100 💰")
# cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
# cursor.execute(
# f"""UPDATE users SET coins = {int(cursor.fetchone()[0]) + 100} WHERE id={event.user_id}""")
# conn.commit()
# else:
# write_msg(event.user_id, r"Что, захотел еще деньжат? Нее, бонус можно получить только раз!")
elif req_msg == "магазин":
write_msg_kb(event.user_id, "Магазин.\n\nЗдесь ты можешь купить рандомную пикчу из групп 7 параллель, 7б и Квазар.\n\nВ честь недавних событий ты можешь поддержать Алека, потратив 100 монет и получив его пикчу!\n\nДоступные пикчи для покупки:\n1. Алек (100 монет)\n2. Рандомная пикча! Может выпасть любая пикча из всех что есть. Шанс секретной: 1/1000 (100 монет)\n3. Взлом рандома пикч! Может выпасть любая пикча из всех что есть. Шанс секретной пикчи: 1/100 (10000 монет)\n4. Мега рандом пикча! ШАНС ВЫПАДЕНИЯ СЕКРЕТНОЙ ПИКЧИ 1/2 (100 000 монет)\n5. Пикча для богатых (10000 монет)", kb_shop)
elif req_msg == "мои пикчи":
text = "Твои пикчи:\n\n"
i = 0
i_2 = 0
for i in range(7):
if have_pik(event.user_id, i):
text = text + str(i+1) + ". " + pikches.pik_data[i] + "\n"
else:
text = text + str(i+1) + ". ????????" + "\n"
text = text + "\nСекретные пикчи либо супер редкие пикчи:\n\n"
for i_2 in range(3):
if have_pik(event.user_id, i_2 + len(pikches.pik_data)):
text = text + str(i_2+1) + ". " + pikches.pik_data_secret[i_2] + "\n"
else:
text = text + str(i_2+1) + ". ????????" + "\n"
text = text + "\nПродолжай собирать пикчи! Если ты собрал все, не радуйся, скоро я добавлю новые!"
write_msg(event.user_id, text)
elif req_msg == "пикча алека!":
if get_coins(event.user_id) >= 100:
write_msg_pik(event.user_id,
"Спасибо! Лови пикчу!", pikches.alek)
pik_id = 0
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -100)
else:
write_msg(event.user_id, "Не хватает монет)")
elif req_msg == "пикча для богатых":
if get_coins(event.user_id) >= 10000:
write_msg_pik(event.user_id,
"Лови пикчу! На расстрел!", pikches.capit)
pik_id = 3
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -10000)
else:
write_msg(event.user_id, "Не хватает монет")
elif req_msg == "рандом пикча!":
if get_coins(event.user_id) >= 100:
msg, pik, pik_id = get_random_pik(1000)
write_msg_pik(event.user_id, msg, pik)
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -100)
else:
write_msg(event.user_id, "Не хватает монет.")
elif req_msg == "мега рандом пикч!":
if get_coins(event.user_id) >= 100000:
msg, pik, pik_id = get_random_pik(2)
write_msg_pik(event.user_id, msg, pik)
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -100000)
else:
write_msg(event.user_id, "Не хватает монет. А ты че думаЛ. на такой рандом копить и копить надо!")
elif req_msg == "взлом рандома пикч!":
if get_coins(event.user_id) >= 10000:
msg, pik, pik_id = get_random_pik(100)
write_msg_pik(event.user_id, msg, pik)
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -10000)
else:
write_msg(event.user_id,
"Не хватает монет. А ты че думаЛ. на такой рандом копить и копить надо!")
elif id_send:
ob_send = False
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
id_get = req_msg
cursor.execute(f"""SELECT * FROM users WHERE id={id_get}""")
data = cursor.fetchall()
print(data)
try:
user_get = vk_conn.users.get(user_ids=id_get)[0]
full_name = user_get['first_name'] + ' ' + user_get['last_name']
except:
write_msg(event.user_id,
"Неверное ID. ВКонтакте не имеет пользователя с таким ID\n\nЕсли вы считаете, что это ошибка, то покажите код ошибки автору бота.\n[ERROR] 0x606")
break
try:
print(data[0])
for row in data:
write_msg(event.user_id,
f"Имя пользователя: {full_name}\nЗарегистрирован в системе.\nКоличество монет: {row[1]}\nСтатус бонуса:{row[2]}")
if id_get == 502085595 or id_get == 545851228 or id_get == 13122641:
write_msg(event.user_id, f"Является администратором")
except:
write_msg(event.user_id,
f"Имя пользователя: {full_name}\nНе зарегистрирован в системе.\n\nЕсли вы считаете, что это ошибка, то покажите код ошибки автору бота.\n[ERROR] 0x707")
elif ob_send:
update_bases_game(event.user_id)
update_bases(event.user_id)
ob_send = False
ob_get = req_msg_up
cursor.execute("""SELECT id FROM users""")
data = cursor.fetchall()
print(data)
try:
for x in data:
if x != 13122641:
write_msg(x, ob_get)
except:
write_msg(event.user_id,
"Неверное ID. ВКонтакте не имеет пользователя с таким ID\n\nЕсли вы считаете, что это ошибка, то покажите код ошибки автору бота.\n[ERROR] 0x606")
break
elif event.user_id in resh:
ob_send = False
num = resh.index(event.user_id)
resh.remove(event.user_id)
otv = int(otvets[num])
otvets.pop(num)
if req_msg == str(otv):
write_msg(event.user_id,
f"Молодец, ответ правильный! Ты получаешь 10 очков ранга и в придачу {col_coins * col_abs} 💰!")
add_coins(event.user_id, col_coins * col_abs)
add_points(event.user_id, 10)
else:
write_msg(event.user_id, "Неверный ответ... Ты теряешь 5 очков ранга. Попробуй позже.")
add_points(event.user_id, -5)
th = threading.Thread(target=game_event(event))
elif event.user_id in senders:
ob_send = False
update_bases_game(event.user_id)
id_send = False
cursor.execute(f"SELECT * FROM SENDS WHERE ID={event.user_id}")
if not cursor.fetchall():
pr = req_msg_up
send_sendr = 2
senders.remove(event.user_id)
senders_2.insert(len(senders_2), event.user_id)
write_msg(event.user_id, r"Введите текст заявки 📃:")
else:
write_msg(event.user_id,
"Нельзя подавать больше одной заявки (защита от спама). Как твою заявку рассмотрят, ты сможешь подать следующую.")
senders.remove(event.user_id)
elif event.user_id in senders_2:
ob_send = False
update_bases_game(event.user_id)
id_send = False
senders_2.remove(event.user_id)
sendr = req_msg_up
write_msg(event.user_id, r"Заявка принята 📃")
cursor.execute(
f"""INSERT INTO sends VALUES ({event.user_id}, '{str(pr)}', '{str(sendr)}')""")
conn.commit()
print(event.user_id, ":", pr, ":", sendr)
send_sendr = 0
sendr = ""
pr = ""
else:
initis(event.user_id)
parse = req_msg.split(' ')
if parse[0] == "!лихорадка":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
col_abs = int(parse[1])
write_msg(event.user_id, f"Коофицент лихорадки настроен на значение {col_abs}")
elif parse[0] == "!rollback" and event.user_id == 545851228:
write_msg(event.user_id, "Соединение с бд зарыто.")
cursor.execute("ROLLBACK")
elif parse[0] == "!ранг":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
add_points(int(parse[1]), int(parse[2]))
write_msg(event.user_id,
f"Пользователю с айди {parse[1]} добавлено {parse[2]} очков ранга.")
elif parse[0] == "!монеты":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
add_coins(int(parse[1]), int(parse[2]))
write_msg(event.user_id, f"Пользователю с айди {parse[1]} добавлено {parse[2]} монет.")
elif parse[0] == "!рассмотреть":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
cursor.execute(f"""DELETE from SENDS where ID={int(parse[1])};""")
conn.commit()
write_msg(event.user_id,
f"""Заявка от пользователя с ID {parse[1]} рассмотрена и удалена из бд.\nПользователь уведомлен о рассмотрении.""")
write_msg(int(parse[1]),
f"""Ваша заявка была рассмотрена и удалена из баз данных.""")
elif parse[0] == "!бан":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
try:
id = int(parse[1])
parse.remove(parse[1])
parse.remove(parse[0])
if id == 545851228:
write_msg(event.user_id,
f"Владимир Константинов является автором и полным обладателем бота, его нельзя банить!")
else:
write_msg(event.user_id, f"Пользователь с айди {id} был забанен.")
ban_list.insert(len(ban_list), id)
write_msg(id,
f"""Вы были забанены! Сообщение от администраторов: {" ".join(parse)}""")
except:
write_msg(event.user_id, "Ошибка бана!")
elif parse[0] == "!разбан":
try:
ban_list.remove(int(parse[1]))
write_msg(event.user_id, "Пользователь разбанен")
write_msg(int(parse[1]), "Вы разбанены!")
except:
write_msg(event.user_id, "Ошибка разбана!")
elif parse[0] == "!секрет":
if parse[1] == "смерть_с_небес":
write_msg_pik(event.user_id,
"Вау, ты открыл одну из серетных пикч!\nШанс ее выпадения из рандом пикч 1/81!\n\nЛови пикчу!\nСупер редкая пикча!\n\nУдар с небес!!!",
pikches.vita)
if parse[1] == "история":
write_msg_pik(event.user_id,
"Вау, ты открыл одну из серетных пикч!\nШанс ее выпадения из рандом пикч 1/81!\n\nЛови пикчу!\nСупер редкая пикча!\n\nИстория с ПалМихом!!!",
pikches.hist)
add_pik(event.user_id, 9)
else:
write_msg(event.user_id,
"Не понятен запрос!\n\nЕсли вы считаете, что это ошибка, то покажите код ошибки автору бота.\n[ERROR] 0x001")
except Exception as err:
cursor.execute("ROLLBACK")
print(err)
print(senders)
print(senders_2)
| 52.513557 | 619 | 0.482051 | 3,522 | 32,926 | 4.324248 | 0.166667 | 0.07367 | 0.128562 | 0.045765 | 0.593894 | 0.552068 | 0.495666 | 0.461064 | 0.445174 | 0.418844 | 0 | 0.042214 | 0.424437 | 32,926 | 626 | 620 | 52.597444 | 0.761332 | 0.027668 | 0 | 0.497288 | 0 | 0.028933 | 0.208321 | 0.0088 | 0 | 0 | 0.000638 | 0 | 0 | 1 | 0.014467 | false | 0.001808 | 0.025316 | 0 | 0.039783 | 0.039783 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ce6a54147c2b061dfa129297800133ae6318cfa | 13,793 | py | Python | usaspending_api/awards/models/award.py | gaybro8777/usaspending-api | fe9d730acd632401bbbefa168e3d86d59560314b | [
"CC0-1.0"
] | null | null | null | usaspending_api/awards/models/award.py | gaybro8777/usaspending-api | fe9d730acd632401bbbefa168e3d86d59560314b | [
"CC0-1.0"
] | null | null | null | usaspending_api/awards/models/award.py | gaybro8777/usaspending-api | fe9d730acd632401bbbefa168e3d86d59560314b | [
"CC0-1.0"
] | null | null | null | from django.db import models
from django.db.models import Q
from usaspending_api.common.models import DataSourceTrackedModel
class AwardManager(models.Manager):
def get_queryset(self):
"""
A generated award will have these set to null, but will also receive no
transactions. Thus, these will remain null. This finds those awards and
throws them out. As soon as one of those awards gets a transaction
(i.e. it is no longer empty), these will be updated via update_from_transaction
and the award will no longer match these criteria
"""
q_kwargs = {"latest_transaction__isnull": True, "date_signed__isnull": True, "total_obligation__isnull": True}
return super(AwardManager, self).get_queryset().filter(~Q(**q_kwargs))
class Award(DataSourceTrackedModel):
"""
Model that provides a high-level award that individual transaction
data can be mapped to. Transactions (i.e., contract and financial assistance
data from the old USAspending site and D1/D2 files from the broker) represent
specific actions against an award, though the award records themselves aren't
assigned on the incoming data. To rectify that and to make the transactional
data easier for people to understand, we create Award objects and map
transactions to them.
Much of the award record data (for example, awarding_agency, funding_agency,
type) is automatically populated from info in the award's child transactions.
These updates happen in our USAspending and data act broker load processes:
see ETL\award_helpers.py for details.
"""
id = models.BigAutoField(primary_key=True)
type = models.TextField(
db_index=True,
verbose_name="Award Type",
null=True,
help_text="The mechanism used to distribute funding. The federal government can distribute "
"funding in several forms. These award types include contracts, grants, loans, "
"and direct payments.",
)
type_description = models.TextField(
verbose_name="Award Type Description",
blank=True,
null=True,
help_text="The plain text description of the type of the award",
)
category = models.TextField(
db_index=True, verbose_name="Category", null=True, help_text="A field that generalizes the award's type."
)
piid = models.TextField(
db_index=True,
blank=True,
null=True,
help_text="Procurement Instrument Identifier - A unique identifier assigned to a federal "
"contract, purchase order, basic ordering agreement, basic agreement, and "
"blanket purchase agreement. It is used to track the contract, and any "
"modifications or transactions related to it. After October 2017, it is "
"between 13 and 17 digits, both letters and numbers.",
)
fpds_agency_id = models.TextField(blank=True, null=True)
fpds_parent_agency_id = models.TextField(blank=True, null=True)
fain = models.TextField(
db_index=True,
blank=True,
null=True,
help_text="An identification code assigned to each financial assistance award tracking "
"purposes. The FAIN is tied to that award (and all future modifications to that "
"award) throughout the award's life. Each FAIN is assigned by an agency. Within "
"an agency, FAIN are unique: each new award must be issued a new FAIN. FAIN "
"stands for Federal Award Identification Number, though the digits are letters, "
"not numbers.",
)
uri = models.TextField(db_index=True, blank=True, null=True, help_text="The uri of the award")
total_obligation = models.DecimalField(
max_digits=23,
db_index=True,
decimal_places=2,
null=True,
verbose_name="Total Obligated",
help_text="The amount of money the government is obligated to pay for the award",
)
total_subsidy_cost = models.DecimalField(
max_digits=23,
decimal_places=2,
null=True,
blank=True,
help_text="The total of the original_loan_subsidy_cost from associated transactions",
)
total_loan_value = models.DecimalField(
max_digits=23,
decimal_places=2,
null=True,
blank=True,
help_text="The total of the face_value_loan_guarantee from associated transactions",
)
awarding_agency = models.ForeignKey(
"references.Agency", related_name="+", null=True, help_text="The awarding agency for the award", db_index=True
)
funding_agency = models.ForeignKey(
"references.Agency", related_name="+", null=True, help_text="The funding agency for the award", db_index=True
)
date_signed = models.DateField(
null=True, db_index=False, verbose_name="Award Date", help_text="The date the award was signed"
)
recipient = models.ForeignKey(
"references.LegalEntity", null=True, help_text="The recipient of the award", db_index=True
)
description = models.TextField(null=True, verbose_name="Award Description", help_text="A description of the award")
period_of_performance_start_date = models.DateField(
null=True, db_index=True, verbose_name="Start Date", help_text="The start date for the period of performance"
)
period_of_performance_current_end_date = models.DateField(
null=True,
db_index=True,
verbose_name="End Date",
help_text="The current, not original, period of performance end date",
)
place_of_performance = models.ForeignKey(
"references.Location",
null=True,
help_text="The principal place of business, where the majority of the "
"work is performed. For example, in a manufacturing contract, "
"this would be the main plant where items are produced.",
db_index=True,
)
base_and_all_options_value = models.DecimalField(
max_digits=23,
db_index=False,
decimal_places=2,
blank=True,
null=True,
verbose_name="Base and All Options Value",
help_text="The sum of the base_and_all_options_value from associated transactions",
)
base_exercised_options_val = models.DecimalField(
max_digits=23,
decimal_places=2,
blank=True,
null=True,
verbose_name="Combined Base and Exercised Options",
help_text="The sum of the base_exercised_options_val from associated transactions",
)
last_modified_date = models.DateField(blank=True, null=True, help_text="The date this award was last modified")
certified_date = models.DateField(blank=True, null=True, help_text="The date this record was certified")
create_date = models.DateTimeField(
auto_now_add=True, blank=True, null=True, help_text="The date this record was created in the API"
)
update_date = models.DateTimeField(
auto_now=True, null=True, help_text="The last time this record was updated in the API"
)
latest_transaction = models.ForeignKey(
"awards.TransactionNormalized",
related_name="latest_for_award",
null=True,
help_text="The latest transaction by action_date and mod associated with this award",
)
earliest_transaction = models.ForeignKey(
"awards.TransactionNormalized",
related_name="earliest_for_award",
null=True,
help_text="The earliest transaction by action_date and mod associated with this award",
)
parent_award_piid = models.TextField(
db_index=True, null=True, verbose_name="Parent Award Piid", help_text="The piid of the Award's parent Award"
)
# As part of DEV-2504, generated_unique_award_id now contains the
# unique_award_key value from Broker rather than being generated during
# the nightly pipeline. It serves the exact same purpose, but renaming
# the column would have been significantly more disruptive and has been
# saved for a future improvement.
generated_unique_award_id = models.TextField(
blank=False, null=False, default="NONE", verbose_name="Generated Unique Award ID"
)
is_fpds = models.BooleanField(blank=False, null=False, default=False, verbose_name="Is FPDS")
transaction_unique_id = models.TextField(
blank=False, null=False, default="NONE", verbose_name="Transaction Unique ID"
)
total_funding_amount = models.DecimalField(
max_digits=23,
decimal_places=2,
blank=True,
null=True,
help_text="A summation of this award's transactions' funding amount",
)
non_federal_funding_amount = models.DecimalField(
max_digits=23,
decimal_places=2,
null=True,
blank=True,
help_text="A summation of this award's transactions' non-federal funding amount",
)
fiscal_year = models.IntegerField(blank=True, null=True, help_text="Fiscal Year calculated based on Action Date")
# Subaward aggregates
total_subaward_amount = models.DecimalField(max_digits=23, decimal_places=2, null=True)
subaward_count = models.IntegerField(default=0)
officer_1_name = models.TextField(null=True, blank=True, help_text="Executive Compensation Officer 1 Name")
officer_1_amount = models.DecimalField(
max_digits=23, decimal_places=2, blank=True, null=True, help_text="Executive Compensation Officer 1 Amount"
)
officer_2_name = models.TextField(null=True, blank=True, help_text="Executive Compensation Officer 2 Name")
officer_2_amount = models.DecimalField(
max_digits=23, decimal_places=2, blank=True, null=True, help_text="Executive Compensation Officer 2 Amount"
)
officer_3_name = models.TextField(null=True, blank=True, help_text="Executive Compensation Officer 3 Name")
officer_3_amount = models.DecimalField(
max_digits=23, decimal_places=2, blank=True, null=True, help_text="Executive Compensation Officer 3 Amount"
)
officer_4_name = models.TextField(null=True, blank=True, help_text="Executive Compensation Officer 4 Name")
officer_4_amount = models.DecimalField(
max_digits=23, decimal_places=2, blank=True, null=True, help_text="Executive Compensation Officer 4 Amount"
)
officer_5_name = models.TextField(null=True, blank=True, help_text="Executive Compensation Officer 5 Name")
officer_5_amount = models.DecimalField(
max_digits=23, decimal_places=2, blank=True, null=True, help_text="Executive Compensation Officer 5 Amount"
)
objects = models.Manager()
nonempty = AwardManager()
def __str__(self):
return "%s piid: %s fain: %s uri: %s" % (self.type_description, self.piid, self.fain, self.uri)
@staticmethod
def get_or_create_summary_award(
awarding_agency=None,
piid=None,
fain=None,
uri=None,
parent_award_piid=None,
save=True,
record_type=None,
generated_unique_award_id=None,
):
"""
Given a set of award identifiers and awarding agency information,
find a corresponding Award record. If we can't find one, create it.
Returns:
created: a list of new awards created (or that need to be created if using cache) used to enable bulk insert
summary_award: the summary award that the calling process can map to
"""
try:
# Contract data uses piid as transaction ID. Financial assistance data depends on the record_type and
# uses either uri (record_type=1) or fain (record_type=2 or 3).
lookup_value = (piid, "piid")
if record_type:
if str(record_type) in ("2", "3"):
lookup_value = (fain, "fain")
else:
lookup_value = (uri, "uri")
if generated_unique_award_id:
# Use the generated unique ID if available
lookup_kwargs = {"generated_unique_award_id": generated_unique_award_id}
else:
# Use the lookup_value is generated unique ID is not available
lookup_kwargs = {"awarding_agency": awarding_agency, lookup_value[1]: lookup_value[0]}
# Look for an existing award record
summary_award = Award.objects.filter(Q(**lookup_kwargs)).first()
if summary_award:
return [], summary_award
# Now create the award record for this award transaction
create_kwargs = {
"awarding_agency": awarding_agency,
"parent_award_piid": parent_award_piid,
lookup_value[1]: lookup_value[0],
}
if generated_unique_award_id:
create_kwargs["generated_unique_award_id"] = generated_unique_award_id
if generated_unique_award_id.startswith("CONT_"):
create_kwargs["is_fpds"] = True
summary_award = Award(**create_kwargs)
if save:
summary_award.save()
return [summary_award], summary_award
# Do not use bare except
except ValueError:
raise ValueError(
"Unable to find or create an award with the provided information: piid={}, fain={}, uri={}, "
"parent_award_piid={}, awarding_agency={}, generated_unique_award_id={}".format(
piid, fain, uri, parent_award_piid, awarding_agency, generated_unique_award_id
)
)
class Meta:
db_table = "awards"
indexes = [
models.Index(fields=["-update_date"], name="awards_update_date_desc_idx"),
models.Index(fields=["generated_unique_award_id"], name="award_unique_id"),
]
| 45.222951 | 120 | 0.677228 | 1,774 | 13,793 | 5.085682 | 0.209696 | 0.037242 | 0.041233 | 0.040789 | 0.39581 | 0.34959 | 0.330747 | 0.278652 | 0.269009 | 0.252051 | 0 | 0.007954 | 0.243457 | 13,793 | 304 | 121 | 45.371711 | 0.856636 | 0.153049 | 0 | 0.207469 | 0 | 0 | 0.31018 | 0.033209 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012448 | false | 0 | 0.012448 | 0.004149 | 0.257261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cecbd213c788f59eea4e1b8e8c27d05b369fa1e | 10,957 | py | Python | tardis/plasma/properties/nlte.py | wkerzendorf/tardis | c9e35423738f3bf6741aa5e89d4e1f3b45033708 | [
"BSD-3-Clause"
] | 1 | 2016-03-24T13:14:25.000Z | 2016-03-24T13:14:25.000Z | tardis/plasma/properties/nlte.py | wkerzendorf/tardis | c9e35423738f3bf6741aa5e89d4e1f3b45033708 | [
"BSD-3-Clause"
] | 6 | 2015-03-16T10:31:40.000Z | 2019-02-21T17:56:55.000Z | tardis/plasma/properties/nlte.py | wkerzendorf/tardis | c9e35423738f3bf6741aa5e89d4e1f3b45033708 | [
"BSD-3-Clause"
] | 5 | 2015-03-17T18:56:20.000Z | 2019-02-12T12:53:15.000Z | import logging
import os
import numpy as np
import pandas as pd
from tardis.plasma.properties.base import (PreviousIterationProperty,
ProcessingPlasmaProperty)
from tardis.plasma.properties import PhiSahaNebular, PhiSahaLTE
__all__ = ['PreviousElectronDensities', 'PreviousBetaSobolev',
'HeliumNLTE', 'HeliumNumericalNLTE']
logger = logging.getLogger(__name__)
class PreviousElectronDensities(PreviousIterationProperty):
outputs = ('previous_electron_densities',)
def set_initial_value(self, kwargs):
initial_value = np.ones(len(kwargs['abundance'].columns))*1000000.0
self._set_initial_value(initial_value)
class PreviousBetaSobolev(PreviousIterationProperty):
outputs = ('previous_beta_sobolev',)
def set_initial_value(self, kwargs):
try:
lines = len(kwargs['atomic_data'].lines)
except:
lines = len(kwargs['atomic_data']._lines)
initial_value = np.ones((lines,
len(kwargs['abundance'].columns)))
self._set_initial_value(initial_value)
class HeliumNLTE(ProcessingPlasmaProperty):
outputs = ('helium_population',)
def calculate(self, level_boltzmann_factor, electron_densities,
ionization_data, beta_rad, g, g_electron, w, t_rad, t_electrons,
delta, zeta_data, number_density, partition_function):
helium_population = level_boltzmann_factor.ix[2].copy()
# He I excited states
he_one_population = self.calculate_helium_one(g_electron, beta_rad,
partition_function, ionization_data, level_boltzmann_factor,
electron_densities, g, w, t_rad, t_electrons)
helium_population.ix[0].update(he_one_population)
#He I metastable states
helium_population.ix[0].ix[1] *= (1 / w)
helium_population.ix[0].ix[2] *= (1 / w)
#He I ground state
helium_population.ix[0].ix[0] = 0.0
#He II excited states
he_two_population = level_boltzmann_factor.ix[2,1].mul(
(g.ix[2,1].ix[0]**(-1)))
helium_population.ix[1].update(he_two_population)
#He II ground state
helium_population.ix[1].ix[0] = 1.0
#He III states
helium_population.ix[2].ix[0] = self.calculate_helium_three(t_rad, w,
zeta_data, t_electrons, delta, g_electron, beta_rad,
partition_function, ionization_data, electron_densities)
unnormalised = helium_population.sum()
normalised = helium_population.mul(number_density.ix[2] / unnormalised)
helium_population.update(normalised)
return helium_population
@staticmethod
def calculate_helium_one(g_electron, beta_rad, partition_function,
ionization_data, level_boltzmann_factor, electron_densities, g,
w, t_rad, t_electron):
(partition_function_index, ionization_data_index, partition_function,
ionization_data) = HeliumNLTE.filter_with_helium_index(2, 1,
partition_function, ionization_data)
phis = (1 / PhiSahaLTE.calculate(g_electron, beta_rad,
partition_function, ionization_data)) * electron_densities * \
(1.0/g.ix[2,1,0]) * (1/w) * (t_rad/t_electron)**(0.5)
return level_boltzmann_factor.ix[2].ix[0].mul(
pd.DataFrame(phis.ix[2].ix[1].values)[0].transpose())
@staticmethod
def calculate_helium_three(t_rad, w, zeta_data, t_electrons, delta,
g_electron, beta_rad, partition_function, ionization_data,
electron_densities):
(partition_function_index, ionization_data_index, partition_function,
ionization_data) = HeliumNLTE.filter_with_helium_index(2, 2,
partition_function, ionization_data)
zeta_data = pd.DataFrame(zeta_data.ix[2].ix[2].values,
columns=ionization_data_index, index=zeta_data.columns).transpose()
delta = pd.DataFrame(delta.ix[2].ix[2].values,
columns=ionization_data_index, index=delta.columns).transpose()
phis = PhiSahaNebular.calculate(t_rad, w,
zeta_data, t_electrons, delta, g_electron,
beta_rad, partition_function, ionization_data)
return (phis * (partition_function.ix[2].ix[1] /
partition_function.ix[2].ix[2]) * (1 /
electron_densities)).ix[2].ix[2]
@staticmethod
def filter_with_helium_index(atomic_number, ion_number, partition_function,
ionization_data):
partition_function_index = pd.MultiIndex.from_tuples([(atomic_number,
ion_number-1), (atomic_number, ion_number)],
names=['atomic_number', 'ion_number'])
ionization_data_index = pd.MultiIndex.from_tuples([(atomic_number,
ion_number)],
names=['atomic_number', 'ion_number'])
partition_function = pd.DataFrame(
partition_function.ix[atomic_number].ix[
ion_number-1:ion_number].values,
index=partition_function_index, columns=partition_function.columns)
ionization_data = pd.DataFrame(
ionization_data.ix[atomic_number].ix[ion_number][
'ionization_energy'], index=ionization_data_index,
columns=['ionization_energy'])
return partition_function_index, ionization_data_index,\
partition_function, ionization_data
class HeliumNumericalNLTE(ProcessingPlasmaProperty):
outputs = ('helium_population',)
'''
IMPORTANT: This particular property requires a specific numerical NLTE
solver and a specific atomic dataset (neither of which are distributed
with Tardis) to work.
'''
def calculate(self, ion_number_density, electron_densities, t_electrons, w,
lines, j_blues, levels, level_boltzmann_factor, t_rad,
zeta_data, g_electron, delta, partition_function, ionization_data,
beta_rad, g):
logger.info('Performing numerical NLTE He calculations.')
if len(j_blues)==0:
return None
heating_rate_data = np.loadtxt(
self.plasma_parent.heating_rate_data_file, unpack=True)
#Outputting data required by SH module
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/shellconditions_{}.txt'.format(zone),
'w') as output_file:
output_file.write(ion_number_density.ix[2].sum()[zone])
output_file.write(electron_densities[zone])
output_file.write(t_electrons[zone])
output_file.write(heating_rate_data[zone])
output_file.write(w[zone])
output_file.write(self.plasma_parent.time_explosion)
output_file.write(t_rad[zone])
output_file.write(self.plasma_parent.v_inner[zone])
output_file.write(self.plasma_parent.v_outer[zone])
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/abundances_{}.txt'.format(zone), 'w') as \
output_file:
for element in range(1,31):
try:
number_density = ion_number_density[zone].ix[
element].sum()
except:
number_density = 0.0
output_file.write(number_density)
helium_lines = lines[lines['atomic_number']==2]
helium_lines = helium_lines[helium_lines['ion_number']==0]
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/discradfield_{}.txt'.format(zone), 'w') \
as output_file:
j_blues = pd.DataFrame(j_blues, index=lines.index)
helium_j_blues = j_blues[zone].ix[helium_lines.index]
for value in helium_lines.index:
if (helium_lines.level_number_lower.ix[value]<35):
output_file.write(
int(helium_lines.level_number_lower.ix[value]+1),
int(helium_lines.level_number_upper.ix[value]+1),
j_blues[zone].ix[value])
#Running numerical simulations
for zone, _ in enumerate(electron_densities):
os.rename('He_NLTE_Files/abundances{}.txt'.format(zone),
'He_NLTE_Files/abundances_current.txt')
os.rename('He_NLTE_Files/shellconditions{}.txt'.format(zone),
'He_NLTE_Files/shellconditions_current.txt')
os.rename('He_NLTE_Files/discradfield{}.txt'.format(zone),
'He_NLTE_Files/discradfield_current.txt')
os.system("nlte-solver-module/bin/nlte_solvertest >/dev/null")
os.rename('He_NLTE_Files/abundances_current.txt',
'He_NLTE_Files/abundances{}.txt'.format(zone))
os.rename('He_NLTE_Files/shellconditions_current.txt',
'He_NLTE_Files/shellconditions{}.txt'.format(zone))
os.rename('He_NLTE_Files/discradfield_current.txt',
'He_NLTE_Files/discradfield{}.txt'.format(zone))
os.rename('debug_occs.dat', 'He_NLTE_Files/occs{}.txt'.format(zone))
#Reading in populations from files
helium_population = level_boltzmann_factor.ix[2].copy()
for zone, _ in enumerate(electron_densities):
with open('He_NLTE_Files/discradfield{}.txt'.format(zone), 'r') as \
read_file:
for level in range(0, 35):
level_population = read_file.readline()
level_population = float(level_population)
helium_population[zone].ix[0][level] = level_population
helium_population[zone].ix[1].ix[0] = float(
read_file.readline())
#Performing He LTE level populations (upper two energy levels,
#He II excited states, He III)
he_one_population = HeliumNLTE.calculate_helium_one(g_electron,
beta_rad, partition_function, ionization_data,
level_boltzmann_factor, electron_densities, g, w, t_rad,
t_electrons)
helium_population.ix[0].ix[35].update(he_one_population.ix[35])
helium_population.ix[0].ix[36].update(he_one_population.ix[36])
he_two_population = level_boltzmann_factor.ix[2].ix[1].ix[1:].mul(
(g.ix[2,1,0]**(-1)) * helium_population.ix[s1,0])
helium_population.ix[1].ix[1:].update(he_two_population)
helium_population.ix[2].ix[0] = HeliumNLTE.calculate_helium_three(
t_rad, w, zeta_data, t_electrons, delta, g_electron, beta_rad,
partition_function, ionization_data, electron_densities)
unnormalised = helium_population.sum()
normalised = helium_population.mul(ion_number_density.ix[2].sum()
/ unnormalised)
helium_population.update(normalised)
return helium_population | 50.031963 | 80 | 0.64598 | 1,289 | 10,957 | 5.19007 | 0.14585 | 0.062182 | 0.027952 | 0.069507 | 0.576682 | 0.507324 | 0.403886 | 0.330194 | 0.265919 | 0.240658 | 0 | 0.013301 | 0.252076 | 10,957 | 219 | 81 | 50.031963 | 0.803051 | 0.027197 | 0 | 0.183784 | 0 | 0 | 0.09435 | 0.066055 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037838 | false | 0 | 0.032432 | 0 | 0.145946 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cf109436cb36774ac8ff63c31066a80fc2c5bfc | 3,474 | py | Python | scripts/outgoing/wxc_iemstage.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | 1 | 2019-10-07T17:01:24.000Z | 2019-10-07T17:01:24.000Z | scripts/outgoing/wxc_iemstage.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | scripts/outgoing/wxc_iemstage.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | """Produce a WXC formatted file with stage information included!"""
import os
import sys
import subprocess
import datetime
import tempfile
import psycopg2.extras
from pyiem.util import get_dbconn
def main():
"""Go Main Go"""
pgconn = get_dbconn('iem', user='nobody')
icursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
state = sys.argv[1]
fn = tempfile.mktemp()
ldmname = "wxc_iemstage_%s.txt" % (state.lower(),)
fh = open(fn, 'w')
fh.write("""Weather Central 001d0300 Surface Data TimeStamp=%s
15
5 Station
64 Stage Location Name
2 Day
4 Hour
7 Lat
7 Lon
10 Current Stage
10 Sig Stage Low
10 Sig Stage Action
10 Sig Stage Bankfull
10 Sig Stage Flood
10 Sig Stage Moderate
10 Sig Stage Major
10 Sig Stage Record
10 Sig Stage Text
""" % (datetime.datetime.now().strftime("%Y.%m.%d.%H%M"), ))
def compute_text(row):
""" Generate text of what this current stage is """
stage = row['value']
for s in ['Record', 'Major', 'Moderate', 'Flood', 'Bankfull',
'Action']:
if (row['ss_'+s.lower()] != 'M' and
float(row['ss_'+s.lower()]) < stage):
return s
return 'Normal'
icursor.execute("""
SELECT c.value, c.source, ST_x(geom) as lon, ST_y(geom) as lat, name,
station, valid,
case when sigstage_low is null then 'M'
else sigstage_low::text end as ss_low,
case when sigstage_action is null then 'M'
else sigstage_action::text end as ss_action,
case when sigstage_bankfull is null then 'M'
else sigstage_bankfull::text end as ss_bankfull,
case when sigstage_flood is null then 'M'
else sigstage_flood::text end as ss_flood,
case when sigstage_moderate is null then 'M'
else sigstage_moderate::text end as ss_moderate,
case when sigstage_major is null then 'M'
else sigstage_major::text end as ss_major,
case when sigstage_record is null then 'M'
else sigstage_record::text end as ss_record,
case when physical_code = 'HG' then 1 else 0 end as rank
from current_shef c JOIN stations s on (c.station = s.id) WHERE
s.network in ('%s_DCP') and c.valid > now() - '4 hours'::interval
and c.physical_code in ('HG','HP', 'HT') and c.duration = 'I'
and c.extremum = 'Z' ORDER by rank DESC
""" % (state,))
used = []
for row in icursor:
nwsli = row['station']
if row['source'] in ['R2', 'R3', 'R4', 'R5', 'R6', 'R7', 'R8', 'R9']:
continue
if nwsli in used:
continue
used.append(nwsli)
fh.write(("%5s %-64.64s %02i %s %-7.2f %-7.2f %-10.2f %-10.10s "
"%-10.10s %-10.10s %-10.10s %-10.10s %-10.10s %-10.10s "
"%-10.10s\n"
) % (row['station'], row['name'], row['valid'].day,
row['valid'].strftime("%H%M"),
row['lat'], row['lon'], row['value'],
row['ss_low'], row['ss_action'], row['ss_bankfull'],
row['ss_flood'], row['ss_moderate'], row['ss_major'],
row['ss_record'], compute_text(row)))
fh.close()
pqstr = "data c 000000000000 wxc/%s bogus text" % (ldmname,)
cmd = "/home/ldm/bin/pqinsert -p '%s' %s" % (pqstr, fn)
subprocess.call(cmd, shell=True)
os.remove(fn)
if __name__ == '__main__':
main()
| 33.728155 | 77 | 0.58175 | 498 | 3,474 | 3.955823 | 0.343373 | 0.022843 | 0.040609 | 0.039086 | 0.10203 | 0.10203 | 0.020305 | 0.020305 | 0.020305 | 0.020305 | 0 | 0.042434 | 0.280944 | 3,474 | 102 | 78 | 34.058824 | 0.746197 | 0.033679 | 0 | 0.022989 | 0 | 0.045977 | 0.558982 | 0.032934 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022989 | false | 0 | 0.08046 | 0 | 0.126437 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cf3d89920307a3429938fc36adb3816d2e6b7df | 1,079 | py | Python | Source/sprites/window.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | null | null | null | Source/sprites/window.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | null | null | null | Source/sprites/window.py | LucXyMan/starseeker | b5c3365514c982734da7d95621e6b85af550ce82 | [
"BSD-3-Clause"
] | 1 | 2019-11-27T18:00:00.000Z | 2019-11-27T18:00:00.000Z | #!/usr/bin/env python2.7
# -*- coding:UTF-8 -*-2
u"""window.py
Copyright (c) 2019 Yukio Kuro
This software is released under BSD license.
ゲームウィンドウモジュール。
"""
import pygame as _pygame
class Window(_pygame.sprite.DirtySprite):
u"""ゲームウィンドウ。
"""
def __init__(self, pos, image, groups=None):
u"""コンストラクタ。
"""
import decorator as __decorator
super(Window, self).__init__(
(self.group, self.draw_group) if groups is None else groups)
self.image = image
self.image.set_colorkey(_pygame.Color("0x000000"))
self.rect = self.image.get_rect()
self.rect.topleft = pos
self._decoration = 0b1111
self._is_light = False
__decorator.set_decorator(self)
@property
def decoration(self):
u"""デコレータ状態取得。
"""
return self._decoration
@property
def is_light(self):
u"""発光判定。
"""
return self._is_light
@is_light.setter
def is_light(self, value):
u"""発光設定。
"""
self._is_light = bool(value)
| 22.957447 | 72 | 0.595922 | 130 | 1,079 | 4.715385 | 0.5 | 0.068516 | 0.053834 | 0.045677 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02584 | 0.282669 | 1,079 | 46 | 73 | 23.456522 | 0.76615 | 0.21316 | 0 | 0.071429 | 0 | 0 | 0.009804 | 0 | 0 | 0 | 0.009804 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cf45fdcf28d4770369f4d8a1e3723d5b1b248b4 | 2,384 | py | Python | custom_components/enedis/__init__.py | Cyr-ius/hass-enedis | ca095e5c6d4f16205adf13a028344fa6da263e12 | [
"MIT"
] | null | null | null | custom_components/enedis/__init__.py | Cyr-ius/hass-enedis | ca095e5c6d4f16205adf13a028344fa6da263e12 | [
"MIT"
] | null | null | null | custom_components/enedis/__init__.py | Cyr-ius/hass-enedis | ca095e5c6d4f16205adf13a028344fa6da263e12 | [
"MIT"
] | null | null | null | """The Enedis integration."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_TOKEN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.typing import ConfigType
from .const import CONF_PDL, COORDINATOR, DOMAIN, PLATFORMS, UNDO_LISTENER
from .enediscoordinator import EnedisDataUpdateCoordinator
from .enedisgateway import EnedisGateway
CONFIG_SCHEMA = vol.Schema({vol.Optional(DOMAIN): {}}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Enedis integration."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up Enedis as config entry."""
hass.data.setdefault(DOMAIN, {})
pdl = config_entry.data.get(CONF_PDL)
token = config_entry.data.get(CONF_TOKEN)
session = async_create_clientsession(hass)
enedis = EnedisGateway(pdl=pdl, token=token, session=session)
coordinator = EnedisDataUpdateCoordinator(hass, config_entry, enedis)
await coordinator.async_config_entry_first_refresh()
if coordinator.data is None:
return False
undo_listener = config_entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][config_entry.entry_id] = {
COORDINATOR: coordinator,
CONF_PDL: pdl,
UNDO_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
async def async_reload_history(call) -> None:
await coordinator.async_load_datas_history(call)
hass.services.async_register(
DOMAIN, "reload_history", async_reload_history, schema=vol.Schema({})
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
hass.data[DOMAIN][config_entry.entry_id][UNDO_LISTENER]()
hass.data[DOMAIN].pop(config_entry.entry_id)
return True
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
| 30.961039 | 82 | 0.756711 | 291 | 2,384 | 5.931271 | 0.271478 | 0.089224 | 0.037659 | 0.031286 | 0.133835 | 0.03708 | 0.03708 | 0 | 0 | 0 | 0 | 0 | 0.156879 | 2,384 | 76 | 83 | 31.368421 | 0.858706 | 0.009648 | 0 | 0.0625 | 0 | 0 | 0.006286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.229167 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cf79babdf5bb56bd514b86c1c627848e5f8e6c6 | 4,079 | py | Python | pywikibot/data/wikistats.py | Partlo/RoboCade | 89c49b3f793b96aeb9e75672fd150872eb52aa11 | [
"MIT"
] | null | null | null | pywikibot/data/wikistats.py | Partlo/RoboCade | 89c49b3f793b96aeb9e75672fd150872eb52aa11 | [
"MIT"
] | null | null | null | pywikibot/data/wikistats.py | Partlo/RoboCade | 89c49b3f793b96aeb9e75672fd150872eb52aa11 | [
"MIT"
] | null | null | null | """Objects representing WikiStats API."""
#
# (C) Pywikibot team, 2014-2020
#
# Distributed under the terms of the MIT license.
#
from csv import DictReader
from io import StringIO
from typing import Optional
import pywikibot
from pywikibot.comms import http
from pywikibot.tools import remove_last_args
class WikiStats:
"""
Light wrapper around WikiStats data, caching responses and data.
The methods accept a Pywikibot family name as the WikiStats table name,
mapping the names before calling the WikiStats API.
"""
FAMILY_MAPPING = {
'wikipedia': 'wikipedias',
'wikiquote': 'wikiquotes',
'wikisource': 'wikisources',
'wiktionary': 'wiktionaries',
}
MISC_SITES_TABLE = 'mediawikis'
WMF_MULTILANG_TABLES = {
'wikipedias', 'wiktionaries', 'wikisources', 'wikinews',
'wikibooks', 'wikiquotes', 'wikivoyage', 'wikiversity',
}
OTHER_MULTILANG_TABLES = {
'uncyclomedia',
'rodovid',
'wikifur',
'wikitravel',
'scoutwiki',
'opensuse',
'metapedias',
'lxde',
'pardus',
'gentoo',
}
OTHER_TABLES = {
# Farms
'wikia',
'wikkii',
'wikisite',
'editthis',
'orain',
'shoutwiki',
'referata',
# Single purpose/manager sets
'wmspecials',
'gamepedias',
'w3cwikis',
'neoseeker',
'sourceforge',
}
ALL_TABLES = ({MISC_SITES_TABLE} | WMF_MULTILANG_TABLES
| OTHER_MULTILANG_TABLES | OTHER_TABLES)
ALL_KEYS = set(FAMILY_MAPPING.keys()) | ALL_TABLES
def __init__(self, url='https://wikistats.wmcloud.org/') -> None:
"""Initializer."""
self.url = url
self._data = {}
@remove_last_args(['format'])
def get(self, table: str) -> list:
"""Get a list of a table of data.
:param table: table of data to fetch
"""
if table in self._data:
return self._data[table]
if table not in self.ALL_KEYS:
pywikibot.warning('WikiStats unknown table ' + table)
table = self.FAMILY_MAPPING.get(table, table)
path = '/api.php?action=dump&table={table}&format=csv'
url = self.url + path
r = http.fetch(url.format(table=table))
f = StringIO(r.text)
reader = DictReader(f)
data = list(reader)
self._data[table] = data
return data
@remove_last_args(['format'])
def get_dict(self, table: str) -> dict:
"""Get dictionary of a table of data.
:param table: table of data to fetch
"""
return {data['prefix']: data for data in self.get(table)}
def sorted(self, table: str, key: str,
reverse: Optional[bool] = None) -> list:
"""
Reverse numerical sort of data.
:param table: name of table of data
:param key: data table key
:param reverse: If set to True the sorting order is reversed.
If None the sorting order for numeric keys are reversed whereas
alphanumeric keys are sorted in normal way.
:return: The sorted table
"""
table = self.get(table)
# take the first entry to determine the sorting key
first_entry = table[0]
if first_entry[key].isdigit():
sort_key = lambda d: int(d[key]) # noqa: E731
reverse = reverse if reverse is not None else True
else:
sort_key = lambda d: d[key] # noqa: E731
reverse = reverse if reverse is not None else False
return sorted(table, key=sort_key, reverse=reverse)
def languages_by_size(self, table: str):
"""Return ordered list of languages by size from WikiStats."""
# This assumes they appear in order of size in the WikiStats dump.
return [d['prefix'] for d in self.get(table)]
| 29.345324 | 76 | 0.571218 | 457 | 4,079 | 5.004376 | 0.371991 | 0.03498 | 0.024049 | 0.020988 | 0.104941 | 0.104941 | 0.104941 | 0.078706 | 0.078706 | 0.078706 | 0 | 0.005842 | 0.328512 | 4,079 | 138 | 77 | 29.557971 | 0.829135 | 0.241971 | 0 | 0.024691 | 0 | 0 | 0.168817 | 0.016129 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061728 | false | 0 | 0.074074 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cf890707bfc5bdbaf1a365889f176c31b933fc3 | 698 | py | Python | Fundamentals/Exercises/Regular_Expressions_Lab/1_match_full_name.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | 2 | 2022-03-04T11:39:03.000Z | 2022-03-13T07:13:23.000Z | Fundamentals/Exercises/Regular_Expressions_Lab/1_match_full_name.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | null | null | null | Fundamentals/Exercises/Regular_Expressions_Lab/1_match_full_name.py | tankishev/Python | 60e511fc901f136b88c681f77f209fe2f8c46447 | [
"MIT"
] | null | null | null | # Write a program to match full names from a sequence of characters and print them on the console.
# Writing the Regular Expression
# First, write a regular expression to match a valid full name, according to these conditions:
#
# • A valid full name has the following characteristics:
# o It consists of two words.
# o Each word starts with a capital letter.
# o After the first letter, it only contains lowercase letters.
# o Each of the two words should be at least two letters long.
# o A single space separates the two words.
import re
intpu_string = input()
re_pattern = r'\b[A-Z][a-z]+ [A-Z][a-z]+\b'
matches = re.findall(re_pattern,intpu_string)
output = ' '.join(matches)
print(output) | 36.736842 | 98 | 0.744986 | 121 | 698 | 4.272727 | 0.553719 | 0.015474 | 0.017408 | 0.023211 | 0.015474 | 0.015474 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173352 | 698 | 19 | 99 | 36.736842 | 0.894281 | 0.732092 | 0 | 0 | 0 | 0.166667 | 0.158192 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cf8dc8e9ccce5aec4e23336b20ed321418c1e92 | 5,115 | py | Python | probe/views.py | krstnschwpwr/speedcontrol | 2de5900a6a74038ea1f36739d26a6fd815732c2f | [
"MIT"
] | 2 | 2017-01-24T11:44:54.000Z | 2017-03-01T20:13:11.000Z | probe/views.py | krstnschwpwr/speedcontrol | 2de5900a6a74038ea1f36739d26a6fd815732c2f | [
"MIT"
] | null | null | null | probe/views.py | krstnschwpwr/speedcontrol | 2de5900a6a74038ea1f36739d26a6fd815732c2f | [
"MIT"
] | 1 | 2020-04-20T21:02:57.000Z | 2020-04-20T21:02:57.000Z | from django.template.response import TemplateResponse
from django.views.generic.base import TemplateView
from rest_framework import generics
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from .forms import SettingsForm
from .models import Speed, AverageQuality, Settings
from .serializers import SpeedSerializer, PrtgSpeedSerializer, SearchSerializer, DictSerializer, QualitySerializer, \
SettingsSerializer, ErrorMsg, PrtgErrorMsg
from .renderers import PrtgRenderer
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from probe.lib.helpers import is_first_run, is_not_first_run
class QualityList(generics.ListAPIView):
renderer_classes = [JSONRenderer]
queryset = AverageQuality.objects.all().order_by('-id')
serializer_class = QualitySerializer
class QualityDetail(generics.RetrieveAPIView):
renderer_classes = [JSONRenderer]
def retrieve(self, request, *args, **kwargs):
if not is_first_run():
queryset = AverageQuality.objects.latest()
if queryset is None:
# print('q: {0}'.format(queryset))
serializer = ErrorMsg('', many=False)
else:
serializer = QualitySerializer(queryset, many=False)
else:
serializer = ErrorMsg('', many=False)
return Response(serializer.data)
class SearchView(generics.ListAPIView):
renderer_classes = [JSONRenderer]
def list(self, request, *args, **kwargs):
q = request.GET.get('query', None)
if q:
queryset = Speed.objects.all().filter(id__contains=q) | Speed.objects.all().filter(server__contains=q)
else:
queryset = Speed.objects.all()
serializer = SearchSerializer(queryset, many=True)
return Response({'results': serializer.data})
class SpeedList(generics.ListAPIView):
renderer_classes = [JSONRenderer]
queryset = Speed.objects.all()
serializer_class = SpeedSerializer
class SpeedDetail(generics.RetrieveAPIView):
renderer_classes = [JSONRenderer, PrtgRenderer]
def retrieve(self, request, *args, **kwargs):
id = kwargs.get('id', None)
format = request.GET.get('format', None)
if id:
queryset = Speed.objects.get(id=id)
serializer = SpeedSerializer(queryset, many=False)
else:
try:
queryset = Speed.objects.latest()
if format == 'prtg':
serializer = PrtgSpeedSerializer(queryset, many=False)
else:
serializer = SpeedSerializer(queryset, many=False)
except:
if format == 'prtg':
serializer = PrtgErrorMsg('', many=False)
else:
serializer = ErrorMsg('', many=False)
return Response(serializer.data)
class OverviewView(TemplateView):
def get(self, request):
if is_first_run():
return redirect('/start')
return TemplateResponse(request, 'overview.html')
class RecordView(generics.RetrieveAPIView):
renderer_classes = [JSONRenderer]
def retrieve(self, request, *args, **kwargs):
self.queryset = Settings.objects.last()
if self.queryset is None:
serializer = ErrorMsg(self.queryset, many=False)
else:
serializer = SettingsSerializer(self.queryset, many=False)
return Response(serializer.data)
class StartView(TemplateView):
template_name = 'start.html'
class StatusView(TemplateView):
model = Speed
def dispatch(self, request, *args, **kwargs):
if is_first_run():
return redirect('/start')
return TemplateResponse(request, 'status.html', context=self.get_context_data())
def get_context_data(self, **kwargs):
context = super(StatusView, self).get_context_data(**kwargs)
context['exp'] = Settings.objects.latest()
return context
def settings(request):
if request.method == 'POST':
form = SettingsForm(request.POST)
if form.is_valid():
expected_upload = request.POST.get('expected_upload', '')
expected_download = request.POST.get('expected_download', '')
prtg_url = request.POST.get('prtg_url', '')
prtg_token = request.POST.get('prtg_token', '')
if Settings.objects.first():
ps = Settings.objects.first()
ps.expected_upload = expected_upload
ps.expected_download = expected_download
ps.prtg_url = prtg_url
ps.prtg_token = prtg_token
ps.save()
else:
prov_object = Settings(expected_upload=expected_upload, expected_download=expected_download, prtg_url=prtg_url,
prtg_token=prtg_token)
prov_object.save()
return HttpResponseRedirect('/')
else:
form = SettingsForm()
return render(request, 'settings.html', {
'form': form,
})
| 34.328859 | 127 | 0.640274 | 508 | 5,115 | 6.326772 | 0.230315 | 0.028002 | 0.050404 | 0.03267 | 0.290292 | 0.190417 | 0.146858 | 0.13379 | 0.13379 | 0.13379 | 0 | 0.000264 | 0.26002 | 5,115 | 148 | 128 | 34.560811 | 0.848877 | 0.006256 | 0 | 0.286957 | 0 | 0 | 0.029915 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069565 | false | 0 | 0.104348 | 0 | 0.452174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cff1a39b0af8dd37952a48444efb65be1a075bc | 2,926 | py | Python | serving_patterns/src/app/apps/app_flask.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | 10 | 2020-08-30T03:19:10.000Z | 2021-08-08T17:38:06.000Z | serving_patterns/src/app/apps/app_flask.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | null | null | null | serving_patterns/src/app/apps/app_flask.py | shibuiwilliam/ml-system-in-action | 0aa9d6bc4a4346236b9c971ec90afad04bcf5cca | [
"MIT"
] | 6 | 2020-08-30T03:19:13.000Z | 2021-11-26T23:32:42.000Z | from flask import Flask, jsonify, request
import numpy as np
from PIL import Image
from typing import List
import logging
import io
import base64
from src.app.ml.active_predictor import Data, DataConverter, active_predictor
from src.helper import get_job_id, get_image_data
from src.middleware.profiler import do_cprofile
from src.configurations import PlatformConfigurations
from src.app.configurations import APIConfigurations
logger = logging.getLogger(__name__)
logger.info(f"starts {APIConfigurations.title}:{APIConfigurations.version}")
logger.info(f"platform: {PlatformConfigurations.platform}")
app = Flask(
import_name=APIConfigurations.title,
)
@do_cprofile
def predict_image(image_data: Image.Image) -> List:
output_np = active_predictor.predict(image_data)
reshaped_output_nps = DataConverter.reshape_output(output_np)
prediction = reshaped_output_nps.tolist()
return prediction
@app.route("/health", methods=["GET"])
def health():
return jsonify({"health": "ok"})
@app.route("/predict", methods=["GET", "POST"])
def predict():
_data = Data()
if request.method == "GET":
data = _data.test_data
image_data = get_image_data(data)
prediction = predict_image(image_data)
return jsonify({"prediction": prediction})
elif request.method == "POST":
input_data = request.get_json()
raw_data = input_data["image_data"]
decoded = base64.b64decode(str(raw_data))
io_bytes = io.BytesIO(decoded)
data = Image.open(io_bytes)
image_data = get_image_data(data)
prediction = predict_image(image_data)
job_id = data["job_id"] if "job_id" in input_data.keys() else get_job_id()
return jsonify({"prediction": prediction, "job_id": job_id})
@app.route("/labels", methods=["GET"])
def labels():
_data = Data()
labels = _data.labels
return jsonify({"labels": labels})
@app.route("/predict/label", methods=["GET", "POST"])
def predict_label():
_data = Data()
labels = _data.labels
if request.method == "GET":
data = _data.test_data
image_data = get_image_data(data)
prediction = predict_image(image_data)
argmax = int(np.argmax(np.array(prediction)[0]))
return jsonify({labels[argmax]: prediction[0][argmax]})
elif request.method == "POST":
input_data = request.get_json()
raw_data = input_data["image_data"]
decoded = base64.b64decode(str(raw_data))
io_bytes = io.BytesIO(decoded)
data = Image.open(io_bytes)
image_data = get_image_data(data)
prediction = predict_image(image_data)
argmax = int(np.argmax(np.array(prediction)[0]))
job_id = data["job_id"] if "job_id" in input_data.keys() else get_job_id()
return jsonify({labels[argmax]: prediction[0][argmax], "job_id": job_id})
if __name__ == "__main__":
app.run(debug=True)
| 31.462366 | 82 | 0.689337 | 378 | 2,926 | 5.087302 | 0.216931 | 0.079563 | 0.031201 | 0.054602 | 0.4883 | 0.438378 | 0.438378 | 0.401456 | 0.401456 | 0.401456 | 0 | 0.005885 | 0.186945 | 2,926 | 92 | 83 | 31.804348 | 0.802438 | 0 | 0 | 0.452055 | 0 | 0 | 0.092618 | 0.029392 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068493 | false | 0 | 0.178082 | 0.013699 | 0.342466 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0cff1d4cdbc092857eb74fd1827ddec8f53c0e0e | 2,370 | py | Python | images/ip-gateway/entrypoint.py | ERIGrid2/charts | 37606d2fdc2e2b8e4f00a687e5168691e410dd8d | [
"Apache-2.0"
] | 1 | 2021-02-18T12:34:10.000Z | 2021-02-18T12:34:10.000Z | images/ip-gateway/entrypoint.py | ERIGrid2/charts | 37606d2fdc2e2b8e4f00a687e5168691e410dd8d | [
"Apache-2.0"
] | null | null | null | images/ip-gateway/entrypoint.py | ERIGrid2/charts | 37606d2fdc2e2b8e4f00a687e5168691e410dd8d | [
"Apache-2.0"
] | null | null | null |
import subprocess
import os
import json
import shlex
class Network:
def __init__(self, **args):
for key, val in args.items:
setattr(self, key, val)
self.ip = self.ips[0]
@classmethod
def load(cls):
""" Loads multus network status annotation
NETWORK_STATUS = [
{
"name": "cbr0",
"interface": "eth0",
"ips": [
"10.42.2.36"
],
"mac": "02:67:84:3a:47:8e",
"default": True,
"dns": {}
},
{
"name": "kube-system/local-bridge",
"interface": "net1",
"ips": [
"172.23.157.190"
],
"mac": "e2:39:e4:f3:f0:d7",
"dns": {}
}
]
"""
nets = json.loads(os.environ.get('NETWORK_STATUS', []))
return [cls(net) for net in nets]
def tc(**args):
return subprocess.check_call(['tc'] + args)
networks = Network.load()
internal = networks[0]
external = networks[1]
# Internal -> External
## DNAT
tc('qdisc', 'add', 'dev', internal.interface, 'ingress', 'handle', 'ffff')
tc('filter', 'add', 'dev', internal.interface, 'parent', 'ffff:', 'protocol', 'ip', 'prio', '10', 'u32', 'match', 'ip', 'dst', f'{internal.ip}/32', 'action', 'nat', 'ingress', 'f{internal.ip}/32', external.ip)
## SNAT
tc('qdisc', 'add', 'dev', internal.interface, 'root', 'handle', '10:', 'htb')
tc('filter', 'add', 'dev', internal.interface, 'parent', '10:', 'protocol', 'ip', 'prio', '10', 'u32', 'match', 'ip', 'src', f'{external.ip}/32', 'action', 'nat', 'egress', 'f{external.ip}/32', internal.ip)
# External -> Internal
## DNAT
tc('qdisc', 'add', 'dev', external.interface, 'ingress', 'handle', 'ffff')
tc('filter', 'add', 'dev', external.interface, 'parent', 'ffff:', 'protocol', 'ip', 'prio', '10', 'u32', 'match', 'ip', 'dst', f'{external.ip}/32', 'action', 'nat', 'ingress', f'{external.ip}/32', internal.ip)
## SNAT
tc('qdisc', 'add', 'dev', external.interface, 'root', 'handle 10: htb')
tc('filter', 'add', 'dev', external.interface, 'parent', '10:', 'protocol', 'ip', 'prio', '10', 'u32', 'match', 'ip', 'src', f'{external.ip}/32', 'action', 'nat', 'egress', f'{external.ip}/32', internal.ip)
| 33.380282 | 209 | 0.505063 | 274 | 2,370 | 4.343066 | 0.346715 | 0.040336 | 0.055462 | 0.065546 | 0.54958 | 0.542857 | 0.403361 | 0.376471 | 0.309244 | 0.245378 | 0 | 0.046794 | 0.26962 | 2,370 | 70 | 210 | 33.857143 | 0.64067 | 0.218143 | 0 | 0 | 0 | 0 | 0.319123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.153846 | 0.038462 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
490039a07eeeaa351662f97dfab65ba9ad40969a | 1,964 | py | Python | src/django_clickhouse/serializers.py | sdil/django-clickhouse | 5a8e2ec7a4203b4d455f27a8ae624690e30ec7b4 | [
"MIT"
] | 74 | 2018-11-30T04:13:26.000Z | 2022-03-15T23:11:48.000Z | src/django_clickhouse/serializers.py | sdil/django-clickhouse | 5a8e2ec7a4203b4d455f27a8ae624690e30ec7b4 | [
"MIT"
] | 24 | 2019-02-03T09:07:05.000Z | 2022-01-04T10:28:03.000Z | src/django_clickhouse/serializers.py | sdil/django-clickhouse | 5a8e2ec7a4203b4d455f27a8ae624690e30ec7b4 | [
"MIT"
] | 28 | 2018-12-11T15:14:17.000Z | 2022-03-16T00:20:45.000Z | from typing import NamedTuple, Optional, Iterable, Type
import pytz
from django.db.models import Model as DjangoModel
from .utils import model_to_dict
class Django2ClickHouseModelSerializer:
def __init__(self, model_cls: Type['ClickHouseModel'], fields: Optional[Iterable[str]] = None, # noqa: F821
exclude_fields: Optional[Iterable[str]] = None, writable: bool = False,
defaults: Optional[dict] = None) -> None:
"""
Initializes serializer
:param model_cls: ClickHouseModel subclass to serialize to
:param fields: Optional. A list of fields to add into result tuple
:param exclude_fields: Fields to exclude from result tuple
:param writable: If fields parameter is not set directly,
this flags determines if only writable or all fields should be taken from model_cls
:param defaults: A dictionary of field: value which are taken as default values for model_cls instances
:return: None
"""
self._model_cls = model_cls
if fields is not None:
self.serialize_fields = fields
else:
self.serialize_fields = model_cls.fields(writable=writable).keys()
self.exclude_serialize_fields = exclude_fields
self._result_class = self._model_cls.get_tuple_class(defaults=defaults)
self._fields = self._model_cls.fields(writable=False)
def _get_serialize_kwargs(self, obj: DjangoModel) -> dict:
data = model_to_dict(obj, fields=self.serialize_fields, exclude_fields=self.exclude_serialize_fields)
# Remove None values, they should be initialized as defaults
result = {
key: self._fields[key].to_python(value, pytz.utc)
for key, value in data.items() if value is not None
}
return result
def serialize(self, obj: DjangoModel) -> NamedTuple:
return self._result_class(**self._get_serialize_kwargs(obj))
| 42.695652 | 112 | 0.686864 | 246 | 1,964 | 5.296748 | 0.345528 | 0.055257 | 0.036838 | 0.038373 | 0.09363 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002681 | 0.240326 | 1,964 | 45 | 113 | 43.644444 | 0.870643 | 0.274949 | 0 | 0 | 0 | 0 | 0.011169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.16 | 0.04 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
490097c273efe7c92e946981318d9e5943573430 | 1,022 | py | Python | examples/real_traffic/udp_echo_server.py | lunarss/ns.py | 9298c850290fb2ee98b047dfc757f4687763c2f8 | [
"Apache-2.0"
] | null | null | null | examples/real_traffic/udp_echo_server.py | lunarss/ns.py | 9298c850290fb2ee98b047dfc757f4687763c2f8 | [
"Apache-2.0"
] | null | null | null | examples/real_traffic/udp_echo_server.py | lunarss/ns.py | 9298c850290fb2ee98b047dfc757f4687763c2f8 | [
"Apache-2.0"
] | null | null | null | """ A simple UDP echo server. """
import argparse
import socketserver
class MyUDPHandler(socketserver.BaseRequestHandler):
"""
This class works in a similar way to the TCP handler class, except that
self.request consists of a pair of data and client socket, and since
there is no connection the client address must be given explicitly
when sending data back via sendto().
"""
def handle(self):
data = self.request[0].strip()
socket = self.request[1]
print("{} wrote: ".format(self.client_address))
print(data)
socket.sendto(data, self.client_address)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("listen_port",
help="The port this process will listen on.",
type=int)
args = parser.parse_args()
with socketserver.UDPServer(("localhost", int(args.listen_port)),
MyUDPHandler) as server:
server.serve_forever()
| 32.967742 | 75 | 0.636986 | 122 | 1,022 | 5.213115 | 0.631148 | 0.051887 | 0.053459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002677 | 0.26908 | 1,022 | 30 | 76 | 34.066667 | 0.848728 | 0.264188 | 0 | 0 | 0 | 0 | 0.104603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.222222 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4904010488fa10b4da5c3c4d5d64a0a4d3a16f57 | 4,219 | py | Python | unsup_spatial_pred/analyze/live_visualizer.py | alaflaquiere/unsupervised-spatial-predictor | 3c8aa02dc20782d31d1df791dd5e92dce275aec2 | [
"MIT"
] | null | null | null | unsup_spatial_pred/analyze/live_visualizer.py | alaflaquiere/unsupervised-spatial-predictor | 3c8aa02dc20782d31d1df791dd5e92dce275aec2 | [
"MIT"
] | null | null | null | unsup_spatial_pred/analyze/live_visualizer.py | alaflaquiere/unsupervised-spatial-predictor | 3c8aa02dc20782d31d1df791dd5e92dce275aec2 | [
"MIT"
] | null | null | null | import os
import sys
import numpy as np
import platform
import subprocess
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from _pickle import UnpicklingError
plt.ion()
def save_embedding(h, state, w, path):
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
dic = {"h_grid": h,
"state_grid": state,
"W": w}
np.save(path, dic, allow_pickle=True)
def start_display_server(path):
root = os.path.realpath(__file__)
if platform.system() == 'Windows':
command = "python {} {}".format(root, path)
proc = subprocess.Popen(command)
elif platform.system() == 'Linux':
command = "exec python3 {} {}".format(root, path)
proc = subprocess.Popen([command], shell=True)
else:
proc = None
return proc
def center_and_scale(x):
if x.ndim == 1:
x = x.reshape(1, -1)
center = np.mean(x, axis=0)
scale = 0.5 * np.max(np.max(x, axis=0) - np.min(x, axis=0))
return (x - center) / scale
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
class LiveEmbeddingVisualizer:
def __init__(self, path):
self.path = path
self.save_name = os.path.join(os.path.dirname(self.path),
"embedding.png")
self._run()
def _display(self, h, state, w, plot_quiver=False):
if not plt.fignum_exists(0):
self.fig = plt.figure(num=0, figsize=(6, 6))
self.ax = plt.subplot(111, projection='3d')
self.ax.cla()
h = center_and_scale(h)
self.ax.plot(h[:, 0],
h[:, 1],
h[:, 2],
'r.')
self.ax.plot(state[:, 0],
state[:, 1],
state[:, 2],
'k.',
alpha=1)
if plot_quiver:
self.ax.quiver(np.zeros(w.shape[0]),
np.zeros(w.shape[0]),
np.zeros(w.shape[0]),
w[:, 0],
w[:, 1],
w[:, 2],
color="b",
alpha=0.5)
self.ax.set_xlabel('$h_1$')
self.ax.set_ylabel('$h_2$')
self.ax.set_zlabel('$h_3$')
set_axes_equal(self.ax) # make the axis scales equal
plt.show(block=False)
self.fig.savefig(self.save_name)
def _run(self):
moddate = 0
while True:
try:
stamp = os.stat(self.path).st_mtime
if stamp != moddate:
moddate = stamp
try:
dic = np.load(self.path, allow_pickle=True)[()] # array of 0 dimensions
self._display(dic["h_grid"],
dic["state_grid"],
dic["W"])
except (OSError, UnpicklingError):
pass
except FileNotFoundError:
pass
finally:
plt.pause(0.2)
if __name__ == '__main__':
d = LiveEmbeddingVisualizer(sys.argv[1])
| 33.220472 | 96 | 0.52785 | 550 | 4,219 | 3.881818 | 0.325455 | 0.025293 | 0.044965 | 0.018267 | 0.076815 | 0.057143 | 0.057143 | 0.019672 | 0.019672 | 0.019672 | 0 | 0.021083 | 0.34795 | 4,219 | 126 | 97 | 33.484127 | 0.754998 | 0.103342 | 0 | 0.057692 | 0 | 0 | 0.031708 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067308 | false | 0.019231 | 0.076923 | 0 | 0.173077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49043c91758b7199d063670616826656f7e8b485 | 1,159 | py | Python | paddle/trainer/tests/simple_sparse_neural_network_dp.py | limeng357/Paddle | dbd25805c88c48998eb9dc0f4b2ca1fd46326482 | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2017-12-04T02:58:01.000Z | 2020-12-03T14:46:30.000Z | paddle/trainer/tests/simple_sparse_neural_network_dp.py | limeng357/Paddle | dbd25805c88c48998eb9dc0f4b2ca1fd46326482 | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2017-12-05T20:29:08.000Z | 2018-10-15T08:57:40.000Z | paddle/trainer/tests/simple_sparse_neural_network_dp.py | limeng357/Paddle | dbd25805c88c48998eb9dc0f4b2ca1fd46326482 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2018-03-19T22:38:46.000Z | 2019-11-01T22:28:27.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.PyDataProvider2 import provider, integer_sequence, integer_value
import random
def init_hook(settings, is_train, **kwargs):
settings.is_train = is_train
@provider(
input_types={'word_ids': integer_value(8191),
'label': integer_value(10)},
min_pool_size=0,
init_hook=init_hook)
def process(settings, filename):
if settings.is_train:
data_size = 2**10
else:
data_size = 2**5
for _ in xrange(data_size):
yield random.randint(0, 8190), random.randint(0, 9)
| 32.194444 | 84 | 0.721311 | 169 | 1,159 | 4.83432 | 0.615385 | 0.073439 | 0.05508 | 0.039168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029915 | 0.192407 | 1,159 | 35 | 85 | 33.114286 | 0.842949 | 0.502157 | 0 | 0 | 0 | 0 | 0.023173 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49049bb0d3f8c6acd5e760dcc5b10c26a2f9565e | 53,909 | py | Python | odoo-14.0/addons/hr_expense/models/hr_expense.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | odoo-14.0/addons/hr_expense/models/hr_expense.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | odoo-14.0/addons/hr_expense/models/hr_expense.py | Yomy1996/P1 | 59e24cdd5f7f82005fe15bd7a7ff54dd5364dd29 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools import email_split, float_is_zero
class HrExpense(models.Model):
_name = "hr.expense"
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = "Expense"
_order = "date desc, id desc"
_check_company_auto = True
@api.model
def _default_employee_id(self):
employee = self.env.user.employee_id
if not employee and not self.env.user.has_group('hr_expense.group_hr_expense_team_approver'):
raise ValidationError(_('The current user has no related employee. Please, create one.'))
return employee
@api.model
def _default_product_uom_id(self):
return self.env['uom.uom'].search([], limit=1, order='id')
@api.model
def _default_account_id(self):
return self.env['ir.property']._get('property_account_expense_categ_id', 'product.category')
@api.model
def _get_employee_id_domain(self):
res = [('id', '=', 0)] # Nothing accepted by domain, by default
if self.user_has_groups('hr_expense.group_hr_expense_user') or self.user_has_groups('account.group_account_user'):
res = "['|', ('company_id', '=', False), ('company_id', '=', company_id)]" # Then, domain accepts everything
elif self.user_has_groups('hr_expense.group_hr_expense_team_approver') and self.env.user.employee_ids:
user = self.env.user
employee = self.env.user.employee_id
res = [
'|', '|', '|',
('department_id.manager_id', '=', employee.id),
('parent_id', '=', employee.id),
('id', '=', employee.id),
('expense_manager_id', '=', user.id),
'|', ('company_id', '=', False), ('company_id', '=', employee.company_id.id),
]
elif self.env.user.employee_id:
employee = self.env.user.employee_id
res = [('id', '=', employee.id), '|', ('company_id', '=', False), ('company_id', '=', employee.company_id.id)]
return res
name = fields.Char('Description', compute='_compute_from_product_id_company_id', store=True, required=True, copy=True,
states={'draft': [('readonly', False)], 'reported': [('readonly', False)], 'refused': [('readonly', False)]})
date = fields.Date(readonly=True, states={'draft': [('readonly', False)], 'reported': [('readonly', False)], 'refused': [('readonly', False)]}, default=fields.Date.context_today, string="Expense Date")
accounting_date = fields.Date(string="Accounting Date", related='sheet_id.accounting_date', store=True, groups='account.group_account_invoice,account.group_account_readonly')
employee_id = fields.Many2one('hr.employee', compute='_compute_employee_id', string="Employee",
store=True, required=True, readonly=False, tracking=True,
states={'approved': [('readonly', True)], 'done': [('readonly', True)]},
default=_default_employee_id, domain=lambda self: self._get_employee_id_domain(), check_company=True)
# product_id not required to allow create an expense without product via mail alias, but should be required on the view.
product_id = fields.Many2one('product.product', string='Product', readonly=True, tracking=True, states={'draft': [('readonly', False)], 'reported': [('readonly', False)], 'refused': [('readonly', False)]}, domain="[('can_be_expensed', '=', True), '|', ('company_id', '=', False), ('company_id', '=', company_id)]", ondelete='restrict')
product_uom_id = fields.Many2one('uom.uom', string='Unit of Measure', compute='_compute_from_product_id_company_id',
store=True, states={'draft': [('readonly', False)], 'refused': [('readonly', False)]},
default=_default_product_uom_id, domain="[('category_id', '=', product_uom_category_id)]")
product_uom_category_id = fields.Many2one(related='product_id.uom_id.category_id', readonly=True)
unit_amount = fields.Float("Unit Price", compute='_compute_from_product_id_company_id', store=True, required=True, copy=True,
states={'draft': [('readonly', False)], 'reported': [('readonly', False)], 'refused': [('readonly', False)]}, digits='Product Price')
quantity = fields.Float(required=True, readonly=True, states={'draft': [('readonly', False)], 'reported': [('readonly', False)], 'refused': [('readonly', False)]}, digits='Product Unit of Measure', default=1)
tax_ids = fields.Many2many('account.tax', 'expense_tax', 'expense_id', 'tax_id',
compute='_compute_from_product_id_company_id', store=True, readonly=False,
domain="[('company_id', '=', company_id), ('type_tax_use', '=', 'purchase')]", string='Taxes')
untaxed_amount = fields.Float("Subtotal", store=True, compute='_compute_amount', digits='Account')
total_amount = fields.Monetary("Total", compute='_compute_amount', store=True, currency_field='currency_id', tracking=True)
amount_residual = fields.Monetary(string='Amount Due', compute='_compute_amount_residual')
company_currency_id = fields.Many2one('res.currency', string="Report Company Currency", related='sheet_id.currency_id', store=True, readonly=False)
total_amount_company = fields.Monetary("Total (Company Currency)", compute='_compute_total_amount_company', store=True, currency_field='company_currency_id')
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, states={'draft': [('readonly', False)], 'refused': [('readonly', False)]}, default=lambda self: self.env.company)
# TODO make required in master (sgv)
currency_id = fields.Many2one('res.currency', string='Currency', readonly=True, states={'draft': [('readonly', False)], 'refused': [('readonly', False)]}, default=lambda self: self.env.company.currency_id)
analytic_account_id = fields.Many2one('account.analytic.account', string='Analytic Account', check_company=True)
analytic_tag_ids = fields.Many2many('account.analytic.tag', string='Analytic Tags', states={'post': [('readonly', True)], 'done': [('readonly', True)]}, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
account_id = fields.Many2one('account.account', compute='_compute_from_product_id_company_id', store=True, readonly=False, string='Account',
default=_default_account_id, domain="[('internal_type', '=', 'other'), ('company_id', '=', company_id)]", help="An expense account is expected")
description = fields.Text('Notes...', readonly=True, states={'draft': [('readonly', False)], 'reported': [('readonly', False)], 'refused': [('readonly', False)]})
payment_mode = fields.Selection([
("own_account", "Employee (to reimburse)"),
("company_account", "Company")
], default='own_account', tracking=True, states={'done': [('readonly', True)], 'approved': [('readonly', True)], 'reported': [('readonly', True)]}, string="Paid By")
attachment_number = fields.Integer('Number of Attachments', compute='_compute_attachment_number')
state = fields.Selection([
('draft', 'To Submit'),
('reported', 'Submitted'),
('approved', 'Approved'),
('done', 'Paid'),
('refused', 'Refused')
], compute='_compute_state', string='Status', copy=False, index=True, readonly=True, store=True, default='draft', help="Status of the expense.")
sheet_id = fields.Many2one('hr.expense.sheet', string="Expense Report", domain="[('employee_id', '=', employee_id), ('company_id', '=', company_id)]", readonly=True, copy=False)
reference = fields.Char("Bill Reference")
is_refused = fields.Boolean("Explicitly Refused by manager or accountant", readonly=True, copy=False)
is_editable = fields.Boolean("Is Editable By Current User", compute='_compute_is_editable')
is_ref_editable = fields.Boolean("Reference Is Editable By Current User", compute='_compute_is_ref_editable')
sample = fields.Boolean()
@api.depends('sheet_id', 'sheet_id.account_move_id', 'sheet_id.state')
def _compute_state(self):
for expense in self:
if not expense.sheet_id or expense.sheet_id.state == 'draft':
expense.state = "draft"
elif expense.sheet_id.state == "cancel":
expense.state = "refused"
elif expense.sheet_id.state == "approve" or expense.sheet_id.state == "post":
expense.state = "approved"
elif not expense.sheet_id.account_move_id:
expense.state = "reported"
else:
expense.state = "done"
@api.depends('quantity', 'unit_amount', 'tax_ids', 'currency_id')
def _compute_amount(self):
for expense in self:
expense.untaxed_amount = expense.unit_amount * expense.quantity
taxes = expense.tax_ids.compute_all(expense.unit_amount, expense.currency_id, expense.quantity, expense.product_id, expense.employee_id.user_id.partner_id)
expense.total_amount = taxes.get('total_included')
@api.depends("sheet_id.account_move_id.line_ids")
def _compute_amount_residual(self):
for expense in self:
if not expense.sheet_id:
expense.amount_residual = expense.total_amount
continue
if not expense.currency_id or expense.currency_id == expense.company_id.currency_id:
residual_field = 'amount_residual'
else:
residual_field = 'amount_residual_currency'
payment_term_lines = expense.sheet_id.account_move_id.line_ids \
.filtered(lambda line: line.expense_id == self and line.account_internal_type in ('receivable', 'payable'))
expense.amount_residual = -sum(payment_term_lines.mapped(residual_field))
@api.depends('date', 'total_amount', 'company_currency_id')
def _compute_total_amount_company(self):
for expense in self:
amount = 0
if expense.company_currency_id:
date_expense = expense.date
amount = expense.currency_id._convert(
expense.total_amount, expense.company_currency_id,
expense.company_id, date_expense or fields.Date.today())
expense.total_amount_company = amount
def _compute_attachment_number(self):
attachment_data = self.env['ir.attachment'].read_group([('res_model', '=', 'hr.expense'), ('res_id', 'in', self.ids)], ['res_id'], ['res_id'])
attachment = dict((data['res_id'], data['res_id_count']) for data in attachment_data)
for expense in self:
expense.attachment_number = attachment.get(expense.id, 0)
@api.depends('employee_id')
def _compute_is_editable(self):
is_account_manager = self.env.user.has_group('account.group_account_user') or self.env.user.has_group('account.group_account_manager')
for expense in self:
if expense.state == 'draft' or expense.sheet_id.state in ['draft', 'submit']:
expense.is_editable = True
elif expense.sheet_id.state == 'approve':
expense.is_editable = is_account_manager
else:
expense.is_editable = False
@api.depends('employee_id')
def _compute_is_ref_editable(self):
is_account_manager = self.env.user.has_group('account.group_account_user') or self.env.user.has_group('account.group_account_manager')
for expense in self:
if expense.state == 'draft' or expense.sheet_id.state in ['draft', 'submit']:
expense.is_ref_editable = True
else:
expense.is_ref_editable = is_account_manager
@api.depends('product_id', 'company_id')
def _compute_from_product_id_company_id(self):
for expense in self.filtered('product_id'):
expense = expense.with_company(expense.company_id)
expense.name = expense.name or expense.product_id.display_name
if not expense.attachment_number or (expense.attachment_number and not expense.unit_amount):
expense.unit_amount = expense.product_id.price_compute('standard_price')[expense.product_id.id]
expense.product_uom_id = expense.product_id.uom_id
expense.tax_ids = expense.product_id.supplier_taxes_id.filtered(lambda tax: tax.company_id == expense.company_id) # taxes only from the same company
account = expense.product_id.product_tmpl_id._get_product_accounts()['expense']
if account:
expense.account_id = account
@api.depends('company_id')
def _compute_employee_id(self):
if not self.env.context.get('default_employee_id'):
for expense in self:
expense.employee_id = self.env.user.with_company(expense.company_id).employee_id
@api.onchange('product_id', 'date', 'account_id')
def _onchange_product_id_date_account_id(self):
rec = self.env['account.analytic.default'].sudo().account_get(
product_id=self.product_id.id,
account_id=self.account_id.id,
company_id=self.company_id.id,
date=self.date
)
self.analytic_account_id = self.analytic_account_id or rec.analytic_id.id
self.analytic_tag_ids = self.analytic_tag_ids or rec.analytic_tag_ids.ids
@api.constrains('product_id', 'product_uom_id')
def _check_product_uom_category(self):
if self.product_id and self.product_uom_id.category_id != self.product_id.uom_id.category_id:
raise UserError(_('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.'))
def create_expense_from_attachments(self, attachment_ids=None, view_type='tree'):
''' Create the expenses from files.
:return: An action redirecting to hr.expense tree/form view.
'''
if attachment_ids is None:
attachment_ids = []
attachments = self.env['ir.attachment'].browse(attachment_ids)
if not attachments:
raise UserError(_("No attachment was provided"))
expenses = self.env['hr.expense']
if any(attachment.res_id or attachment.res_model != 'hr.expense' for attachment in attachments):
raise UserError(_("Invalid attachments!"))
product = self.env['product.product'].search([('can_be_expensed', '=', True)])
if product:
product = product.filtered(lambda p: p.default_code == "EXP_GEN") or product[0]
else:
raise UserError(_("You need to have at least one product that can be expensed in your database to proceed!"))
for attachment in attachments:
expense = self.env['hr.expense'].create({
'name': attachment.name.split('.')[0],
'unit_amount': 0,
'product_id': product.id
})
expense.message_post(body=_('Uploaded Attachment'))
attachment.write({
'res_model': 'hr.expense',
'res_id': expense.id,
})
attachment.register_as_main_attachment()
expenses += expense
if len(expenses) == 1:
return {
'name': _('Generated Expense'),
'view_mode': 'form',
'res_model': 'hr.expense',
'type': 'ir.actions.act_window',
'views': [[False, 'form']],
'res_id': expenses[0].id,
}
return {
'name': _('Generated Expenses'),
'domain': [('id', 'in', expenses.ids)],
'res_model': 'hr.expense',
'type': 'ir.actions.act_window',
'views': [[False, view_type], [False, "form"]],
}
# ----------------------------------------
# ORM Overrides
# ----------------------------------------
def unlink(self):
for expense in self:
if expense.state in ['done', 'approved']:
raise UserError(_('You cannot delete a posted or approved expense.'))
return super(HrExpense, self).unlink()
def write(self, vals):
if 'tax_ids' in vals or 'analytic_account_id' in vals or 'account_id' in vals:
if any(not expense.is_editable for expense in self):
raise UserError(_('You are not authorized to edit this expense report.'))
if 'reference' in vals:
if any(not expense.is_ref_editable for expense in self):
raise UserError(_('You are not authorized to edit the reference of this expense report.'))
return super(HrExpense, self).write(vals)
@api.model
def get_empty_list_help(self, help_message):
return super(HrExpense, self).get_empty_list_help(help_message + self._get_empty_list_mail_alias())
@api.model
def _get_empty_list_mail_alias(self):
use_mailgateway = self.env['ir.config_parameter'].sudo().get_param('hr_expense.use_mailgateway')
alias_record = use_mailgateway and self.env.ref('hr_expense.mail_alias_expense') or False
if alias_record and alias_record.alias_domain and alias_record.alias_name:
return """
<p>
Or send your receipts at <a href="mailto:%(email)s?subject=Lunch%%20with%%20customer%%3A%%20%%2412.32">%(email)s</a>.
</p>""" % {'email': '%s@%s' % (alias_record.alias_name, alias_record.alias_domain)}
return ""
# ----------------------------------------
# Actions
# ----------------------------------------
def action_view_sheet(self):
self.ensure_one()
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'hr.expense.sheet',
'target': 'current',
'res_id': self.sheet_id.id
}
def _create_sheet_from_expenses(self):
if any(expense.state != 'draft' or expense.sheet_id for expense in self):
raise UserError(_("You cannot report twice the same line!"))
if len(self.mapped('employee_id')) != 1:
raise UserError(_("You cannot report expenses for different employees in the same report."))
if any(not expense.product_id for expense in self):
raise UserError(_("You can not create report without product."))
todo = self.filtered(lambda x: x.payment_mode=='own_account') or self.filtered(lambda x: x.payment_mode=='company_account')
sheet = self.env['hr.expense.sheet'].create({
'company_id': self.company_id.id,
'employee_id': self[0].employee_id.id,
'name': todo[0].name if len(todo) == 1 else '',
'expense_line_ids': [(6, 0, todo.ids)]
})
return sheet
def action_submit_expenses(self):
sheet = self._create_sheet_from_expenses()
sheet.action_submit_sheet()
return {
'name': _('New Expense Report'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'hr.expense.sheet',
'target': 'current',
'res_id': sheet.id,
}
def action_get_attachment_view(self):
self.ensure_one()
res = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment')
res['domain'] = [('res_model', '=', 'hr.expense'), ('res_id', 'in', self.ids)]
res['context'] = {'default_res_model': 'hr.expense', 'default_res_id': self.id}
return res
# ----------------------------------------
# Business
# ----------------------------------------
def _prepare_move_values(self):
"""
This function prepares move values related to an expense
"""
self.ensure_one()
journal = self.sheet_id.bank_journal_id if self.payment_mode == 'company_account' else self.sheet_id.journal_id
account_date = self.sheet_id.accounting_date or self.date
move_values = {
'journal_id': journal.id,
'company_id': self.sheet_id.company_id.id,
'date': account_date,
'ref': self.sheet_id.name,
# force the name to the default value, to avoid an eventual 'default_name' in the context
# to set it to '' which cause no number to be given to the account.move when posted.
'name': '/',
}
return move_values
def _get_account_move_by_sheet(self):
""" Return a mapping between the expense sheet of current expense and its account move
:returns dict where key is a sheet id, and value is an account move record
"""
move_grouped_by_sheet = {}
for expense in self:
# create the move that will contain the accounting entries
if expense.sheet_id.id not in move_grouped_by_sheet:
move_vals = expense._prepare_move_values()
move = self.env['account.move'].with_context(default_journal_id=move_vals['journal_id']).create(move_vals)
move_grouped_by_sheet[expense.sheet_id.id] = move
else:
move = move_grouped_by_sheet[expense.sheet_id.id]
return move_grouped_by_sheet
def _get_expense_account_source(self):
self.ensure_one()
if self.account_id:
account = self.account_id
elif self.product_id:
account = self.product_id.product_tmpl_id.with_company(self.company_id)._get_product_accounts()['expense']
if not account:
raise UserError(
_("No Expense account found for the product %s (or for its category), please configure one.") % (self.product_id.name))
else:
account = self.env['ir.property'].with_company(self.company_id)._get('property_account_expense_categ_id', 'product.category')
if not account:
raise UserError(_('Please configure Default Expense account for Product expense: `property_account_expense_categ_id`.'))
return account
def _get_expense_account_destination(self):
self.ensure_one()
if not self.employee_id.sudo().address_home_id:
raise UserError(_("No Home Address found for the employee %s, please configure one.") % (self.employee_id.name))
partner = self.employee_id.sudo().address_home_id.with_company(self.company_id)
account_dest = partner.property_account_payable_id.id or partner.parent_id.property_account_payable_id.id
return account_dest
def _get_account_move_line_values(self):
move_line_values_by_expense = {}
for expense in self:
move_line_name = expense.employee_id.name + ': ' + expense.name.split('\n')[0][:64]
account_src = expense._get_expense_account_source()
account_dst = expense._get_expense_account_destination()
account_date = expense.sheet_id.accounting_date or expense.date or fields.Date.context_today(expense)
company_currency = expense.company_id.currency_id
move_line_values = []
taxes = expense.tax_ids.with_context(round=True).compute_all(expense.unit_amount, expense.currency_id, expense.quantity, expense.product_id)
total_amount = 0.0
total_amount_currency = 0.0
partner_id = expense.employee_id.sudo().address_home_id.commercial_partner_id.id
# source move line
balance = expense.currency_id._convert(taxes['total_excluded'], company_currency, expense.company_id, account_date)
amount_currency = taxes['total_excluded']
move_line_src = {
'name': move_line_name,
'quantity': expense.quantity or 1,
'debit': balance if balance > 0 else 0,
'credit': -balance if balance < 0 else 0,
'amount_currency': amount_currency,
'account_id': account_src.id,
'product_id': expense.product_id.id,
'product_uom_id': expense.product_uom_id.id,
'analytic_account_id': expense.analytic_account_id.id,
'analytic_tag_ids': [(6, 0, expense.analytic_tag_ids.ids)],
'expense_id': expense.id,
'partner_id': partner_id,
'tax_ids': [(6, 0, expense.tax_ids.ids)],
'tax_tag_ids': [(6, 0, taxes['base_tags'])],
'currency_id': expense.currency_id.id,
}
move_line_values.append(move_line_src)
total_amount -= balance
total_amount_currency -= move_line_src['amount_currency']
# taxes move lines
for tax in taxes['taxes']:
balance = expense.currency_id._convert(tax['amount'], company_currency, expense.company_id, account_date)
amount_currency = tax['amount']
if tax['tax_repartition_line_id']:
rep_ln = self.env['account.tax.repartition.line'].browse(tax['tax_repartition_line_id'])
base_amount = self.env['account.move']._get_base_amount_to_display(tax['base'], rep_ln)
else:
base_amount = None
move_line_tax_values = {
'name': tax['name'],
'quantity': 1,
'debit': balance if balance > 0 else 0,
'credit': -balance if balance < 0 else 0,
'amount_currency': amount_currency,
'account_id': tax['account_id'] or move_line_src['account_id'],
'tax_repartition_line_id': tax['tax_repartition_line_id'],
'tax_tag_ids': tax['tag_ids'],
'tax_base_amount': base_amount,
'expense_id': expense.id,
'partner_id': partner_id,
'currency_id': expense.currency_id.id,
'analytic_account_id': expense.analytic_account_id.id if tax['analytic'] else False,
'analytic_tag_ids': [(6, 0, expense.analytic_tag_ids.ids)] if tax['analytic'] else False,
}
total_amount -= balance
total_amount_currency -= move_line_tax_values['amount_currency']
move_line_values.append(move_line_tax_values)
# destination move line
move_line_dst = {
'name': move_line_name,
'debit': total_amount > 0 and total_amount,
'credit': total_amount < 0 and -total_amount,
'account_id': account_dst,
'date_maturity': account_date,
'amount_currency': total_amount_currency,
'currency_id': expense.currency_id.id,
'expense_id': expense.id,
'partner_id': partner_id,
}
move_line_values.append(move_line_dst)
move_line_values_by_expense[expense.id] = move_line_values
return move_line_values_by_expense
def action_move_create(self):
'''
main function that is called when trying to create the accounting entries related to an expense
'''
move_group_by_sheet = self._get_account_move_by_sheet()
move_line_values_by_expense = self._get_account_move_line_values()
for expense in self:
# get the account move of the related sheet
move = move_group_by_sheet[expense.sheet_id.id]
# get move line values
move_line_values = move_line_values_by_expense.get(expense.id)
# link move lines to move, and move to expense sheet
move.write({'line_ids': [(0, 0, line) for line in move_line_values]})
expense.sheet_id.write({'account_move_id': move.id})
if expense.payment_mode == 'company_account':
expense.sheet_id.paid_expense_sheets()
# post the moves
for expense in self:
if not expense.payment_mode == 'company_account':
for move in move_group_by_sheet[expense.sheet_id.id]:
if move.state != 'posted':
move._post()
return move_group_by_sheet
def refuse_expense(self, reason):
self.write({'is_refused': True})
self.sheet_id.write({'state': 'cancel'})
self.sheet_id.message_post_with_view('hr_expense.hr_expense_template_refuse_reason',
values={'reason': reason, 'is_sheet': False, 'name': self.name})
@api.model
def get_expense_dashboard(self):
expense_state = {
'draft': {
'description': _('to report'),
'amount': 0.0,
'currency': self.env.company.currency_id.id,
},
'reported': {
'description': _('under validation'),
'amount': 0.0,
'currency': self.env.company.currency_id.id,
},
'approved': {
'description': _('to be reimbursed'),
'amount': 0.0,
'currency': self.env.company.currency_id.id,
}
}
if not self.env.user.employee_ids:
return expense_state
target_currency = self.env.company.currency_id
expenses = self.read_group(
[
('employee_id', 'in', self.env.user.employee_ids.ids),
('payment_mode', '=', 'own_account'),
('state', 'in', ['draft', 'reported', 'approved'])
], ['total_amount', 'currency_id', 'state'], ['state', 'currency_id'], lazy=False)
for expense in expenses:
state = expense['state']
currency = self.env['res.currency'].browse(expense['currency_id'][0]) if expense['currency_id'] else target_currency
amount = currency._convert(
expense['total_amount'], target_currency, self.env.company, fields.Date.today())
expense_state[state]['amount'] += amount
return expense_state
# ----------------------------------------
# Mail Thread
# ----------------------------------------
@api.model
def message_new(self, msg_dict, custom_values=None):
email_address = email_split(msg_dict.get('email_from', False))[0]
employee = self.env['hr.employee'].search([
'|',
('work_email', 'ilike', email_address),
('user_id.email', 'ilike', email_address)
], limit=1)
expense_description = msg_dict.get('subject', '')
if employee.user_id:
company = employee.user_id.company_id
currencies = company.currency_id | employee.user_id.company_ids.mapped('currency_id')
else:
company = employee.company_id
currencies = company.currency_id
if not company: # ultimate fallback, since company_id is required on expense
company = self.env.company
# The expenses alias is the same for all companies, we need to set the proper context
# To select the product account
self = self.with_company(company)
product, price, currency_id, expense_description = self._parse_expense_subject(expense_description, currencies)
vals = {
'employee_id': employee.id,
'name': expense_description,
'unit_amount': price,
'product_id': product.id if product else None,
'product_uom_id': product.uom_id.id,
'tax_ids': [(4, tax.id, False) for tax in product.supplier_taxes_id],
'quantity': 1,
'company_id': company.id,
'currency_id': currency_id.id
}
account = product.product_tmpl_id._get_product_accounts()['expense']
if account:
vals['account_id'] = account.id
expense = super(HrExpense, self).message_new(msg_dict, dict(custom_values or {}, **vals))
self._send_expense_success_mail(msg_dict, expense)
return expense
@api.model
def _parse_product(self, expense_description):
"""
Parse the subject to find the product.
Product code should be the first word of expense_description
Return product.product and updated description
"""
product_code = expense_description.split(' ')[0]
product = self.env['product.product'].search([('can_be_expensed', '=', True), ('default_code', '=ilike', product_code)], limit=1)
if product:
expense_description = expense_description.replace(product_code, '', 1)
return product, expense_description
@api.model
def _parse_price(self, expense_description, currencies):
""" Return price, currency and updated description """
symbols, symbols_pattern, float_pattern = [], '', '[+-]?(\d+[.,]?\d*)'
price = 0.0
for currency in currencies:
symbols.append(re.escape(currency.symbol))
symbols.append(re.escape(currency.name))
symbols_pattern = '|'.join(symbols)
price_pattern = "((%s)?\s?%s\s?(%s)?)" % (symbols_pattern, float_pattern, symbols_pattern)
matches = re.findall(price_pattern, expense_description)
if matches:
match = max(matches, key=lambda match: len([group for group in match if group])) # get the longuest match. e.g. "2 chairs 120$" -> the price is 120$, not 2
full_str = match[0]
currency_str = match[1] or match[3]
price = match[2].replace(',', '.')
if currency_str:
currency = currencies.filtered(lambda c: currency_str in [c.symbol, c.name])[0]
currency = currency or currencies[0]
expense_description = expense_description.replace(full_str, ' ') # remove price from description
expense_description = re.sub(' +', ' ', expense_description.strip())
price = float(price)
return price, currency, expense_description
@api.model
def _parse_expense_subject(self, expense_description, currencies):
""" Fetch product, price and currency info from mail subject.
Product can be identified based on product name or product code.
It can be passed between [] or it can be placed at start.
When parsing, only consider currencies passed as parameter.
This will fetch currency in symbol($) or ISO name (USD).
Some valid examples:
Travel by Air [TICKET] USD 1205.91
TICKET $1205.91 Travel by Air
Extra expenses 29.10EUR [EXTRA]
"""
product, expense_description = self._parse_product(expense_description)
price, currency_id, expense_description = self._parse_price(expense_description, currencies)
return product, price, currency_id, expense_description
# TODO: Make api.multi
def _send_expense_success_mail(self, msg_dict, expense):
mail_template_id = 'hr_expense.hr_expense_template_register' if expense.employee_id.user_id else 'hr_expense.hr_expense_template_register_no_user'
expense_template = self.env.ref(mail_template_id)
rendered_body = expense_template._render({'expense': expense}, engine='ir.qweb')
body = self.env['mail.render.mixin']._replace_local_links(rendered_body)
# TDE TODO: seems louche, check to use notify
if expense.employee_id.user_id.partner_id:
expense.message_post(
partner_ids=expense.employee_id.user_id.partner_id.ids,
subject='Re: %s' % msg_dict.get('subject', ''),
body=body,
subtype_id=self.env.ref('mail.mt_note').id,
email_layout_xmlid='mail.mail_notification_light',
)
else:
self.env['mail.mail'].sudo().create({
'email_from': self.env.user.email_formatted,
'author_id': self.env.user.partner_id.id,
'body_html': body,
'subject': 'Re: %s' % msg_dict.get('subject', ''),
'email_to': msg_dict.get('email_from', False),
'auto_delete': True,
'references': msg_dict.get('message_id'),
}).send()
class HrExpenseSheet(models.Model):
"""
Here are the rights associated with the expense flow
Action Group Restriction
=================================================================================
Submit Employee Only his own
Officer If he is expense manager of the employee, manager of the employee
or the employee is in the department managed by the officer
Manager Always
Approve Officer Not his own and he is expense manager of the employee, manager of the employee
or the employee is in the department managed by the officer
Manager Always
Post Anybody State = approve and journal_id defined
Done Anybody State = approve and journal_id defined
Cancel Officer Not his own and he is expense manager of the employee, manager of the employee
or the employee is in the department managed by the officer
Manager Always
=================================================================================
"""
_name = "hr.expense.sheet"
_inherit = ['mail.thread', 'mail.activity.mixin']
_description = "Expense Report"
_order = "accounting_date desc, id desc"
_check_company_auto = True
@api.model
def _default_employee_id(self):
return self.env.user.employee_id
@api.model
def _default_journal_id(self):
""" The journal is determining the company of the accounting entries generated from expense. We need to force journal company and expense sheet company to be the same. """
default_company_id = self.default_get(['company_id'])['company_id']
journal = self.env['account.journal'].search([('type', '=', 'purchase'), ('company_id', '=', default_company_id)], limit=1)
return journal.id
@api.model
def _default_bank_journal_id(self):
default_company_id = self.default_get(['company_id'])['company_id']
return self.env['account.journal'].search([('type', 'in', ['cash', 'bank']), ('company_id', '=', default_company_id)], limit=1)
name = fields.Char('Expense Report Summary', required=True, tracking=True)
expense_line_ids = fields.One2many('hr.expense', 'sheet_id', string='Expense Lines', copy=False)
state = fields.Selection([
('draft', 'Draft'),
('submit', 'Submitted'),
('approve', 'Approved'),
('post', 'Posted'),
('done', 'Paid'),
('cancel', 'Refused')
], string='Status', index=True, readonly=True, tracking=True, copy=False, default='draft', required=True, help='Expense Report State')
employee_id = fields.Many2one('hr.employee', string="Employee", required=True, readonly=True, tracking=True, states={'draft': [('readonly', False)]}, default=_default_employee_id, check_company=True, domain= lambda self: self.env['hr.expense']._get_employee_id_domain())
address_id = fields.Many2one('res.partner', compute='_compute_from_employee_id', store=True, readonly=False, copy=True, string="Employee Home Address", check_company=True)
payment_mode = fields.Selection(related='expense_line_ids.payment_mode', default='own_account', readonly=True, string="Paid By", tracking=True)
user_id = fields.Many2one('res.users', 'Manager', compute='_compute_from_employee_id', store=True, readonly=True, copy=False, states={'draft': [('readonly', False)]}, tracking=True, domain=lambda self: [('groups_id', 'in', self.env.ref('hr_expense.group_hr_expense_team_approver').id)])
total_amount = fields.Monetary('Total Amount', currency_field='currency_id', compute='_compute_amount', store=True, tracking=True)
amount_residual = fields.Monetary(
string="Amount Due", store=True,
currency_field='currency_id',
compute='_compute_amount_residual')
company_id = fields.Many2one('res.company', string='Company', required=True, readonly=True, states={'draft': [('readonly', False)]}, default=lambda self: self.env.company)
currency_id = fields.Many2one('res.currency', string='Currency', readonly=True, states={'draft': [('readonly', False)]}, default=lambda self: self.env.company.currency_id)
attachment_number = fields.Integer(compute='_compute_attachment_number', string='Number of Attachments')
journal_id = fields.Many2one('account.journal', string='Expense Journal', states={'done': [('readonly', True)], 'post': [('readonly', True)]}, check_company=True, domain="[('type', '=', 'purchase'), ('company_id', '=', company_id)]",
default=_default_journal_id, help="The journal used when the expense is done.")
bank_journal_id = fields.Many2one('account.journal', string='Bank Journal', states={'done': [('readonly', True)], 'post': [('readonly', True)]}, check_company=True, domain="[('type', 'in', ['cash', 'bank']), ('company_id', '=', company_id)]",
default=_default_bank_journal_id, help="The payment method used when the expense is paid by the company.")
accounting_date = fields.Date("Accounting Date")
account_move_id = fields.Many2one('account.move', string='Journal Entry', ondelete='restrict', copy=False, readonly=True)
department_id = fields.Many2one('hr.department', compute='_compute_from_employee_id', store=True, readonly=False, copy=False, string='Department', states={'post': [('readonly', True)], 'done': [('readonly', True)]})
is_multiple_currency = fields.Boolean("Handle lines with different currencies", compute='_compute_is_multiple_currency')
can_reset = fields.Boolean('Can Reset', compute='_compute_can_reset')
_sql_constraints = [
('journal_id_required_posted', "CHECK((state IN ('post', 'done') AND journal_id IS NOT NULL) OR (state NOT IN ('post', 'done')))", 'The journal must be set on posted expense'),
]
@api.depends('expense_line_ids.total_amount_company')
def _compute_amount(self):
for sheet in self:
sheet.total_amount = sum(sheet.expense_line_ids.mapped('total_amount_company'))
@api.depends(
'currency_id',
'account_move_id.line_ids.amount_residual',
'account_move_id.line_ids.amount_residual_currency',
'account_move_id.line_ids.account_internal_type',)
def _compute_amount_residual(self):
for sheet in self:
if sheet.currency_id == sheet.company_id.currency_id:
residual_field = 'amount_residual'
else:
residual_field = 'amount_residual_currency'
payment_term_lines = sheet.account_move_id.line_ids\
.filtered(lambda line: line.account_internal_type in ('receivable', 'payable'))
sheet.amount_residual = -sum(payment_term_lines.mapped(residual_field))
def _compute_attachment_number(self):
for sheet in self:
sheet.attachment_number = sum(sheet.expense_line_ids.mapped('attachment_number'))
@api.depends('expense_line_ids.currency_id')
def _compute_is_multiple_currency(self):
for sheet in self:
sheet.is_multiple_currency = len(sheet.expense_line_ids.mapped('currency_id')) > 1
def _compute_can_reset(self):
is_expense_user = self.user_has_groups('hr_expense.group_hr_expense_team_approver')
for sheet in self:
sheet.can_reset = is_expense_user if is_expense_user else sheet.employee_id.user_id == self.env.user
@api.depends('employee_id')
def _compute_from_employee_id(self):
for sheet in self:
sheet.address_id = sheet.employee_id.sudo().address_home_id
sheet.department_id = sheet.employee_id.department_id
sheet.user_id = sheet.employee_id.expense_manager_id or sheet.employee_id.parent_id.user_id
@api.constrains('expense_line_ids')
def _check_payment_mode(self):
for sheet in self:
expense_lines = sheet.mapped('expense_line_ids')
if expense_lines and any(expense.payment_mode != expense_lines[0].payment_mode for expense in expense_lines):
raise ValidationError(_("Expenses must be paid by the same entity (Company or employee)."))
@api.constrains('expense_line_ids', 'employee_id')
def _check_employee(self):
for sheet in self:
employee_ids = sheet.expense_line_ids.mapped('employee_id')
if len(employee_ids) > 1 or (len(employee_ids) == 1 and employee_ids != sheet.employee_id):
raise ValidationError(_('You cannot add expenses of another employee.'))
@api.constrains('expense_line_ids', 'company_id')
def _check_expense_lines_company(self):
for sheet in self:
if any(expense.company_id != sheet.company_id for expense in sheet.expense_line_ids):
raise ValidationError(_('An expense report must contain only lines from the same company.'))
@api.model
def create(self, vals):
sheet = super(HrExpenseSheet, self.with_context(mail_create_nosubscribe=True, mail_auto_subscribe_no_notify=True)).create(vals)
sheet.activity_update()
return sheet
def unlink(self):
for expense in self:
if expense.state in ['post', 'done']:
raise UserError(_('You cannot delete a posted or paid expense.'))
super(HrExpenseSheet, self).unlink()
# --------------------------------------------
# Mail Thread
# --------------------------------------------
def _track_subtype(self, init_values):
self.ensure_one()
if 'state' in init_values and self.state == 'approve':
return self.env.ref('hr_expense.mt_expense_approved')
elif 'state' in init_values and self.state == 'cancel':
return self.env.ref('hr_expense.mt_expense_refused')
elif 'state' in init_values and self.state == 'done':
return self.env.ref('hr_expense.mt_expense_paid')
return super(HrExpenseSheet, self)._track_subtype(init_values)
def _message_auto_subscribe_followers(self, updated_values, subtype_ids):
res = super(HrExpenseSheet, self)._message_auto_subscribe_followers(updated_values, subtype_ids)
if updated_values.get('employee_id'):
employee = self.env['hr.employee'].browse(updated_values['employee_id'])
if employee.user_id:
res.append((employee.user_id.partner_id.id, subtype_ids, False))
return res
# --------------------------------------------
# Actions
# --------------------------------------------
def action_sheet_move_create(self):
samples = self.mapped('expense_line_ids.sample')
if samples.count(True):
if samples.count(False):
raise UserError(_("You can't mix sample expenses and regular ones"))
self.write({'state': 'post'})
return
if any(sheet.state != 'approve' for sheet in self):
raise UserError(_("You can only generate accounting entry for approved expense(s)."))
if any(not sheet.journal_id for sheet in self):
raise UserError(_("Expenses must have an expense journal specified to generate accounting entries."))
expense_line_ids = self.mapped('expense_line_ids')\
.filtered(lambda r: not float_is_zero(r.total_amount, precision_rounding=(r.currency_id or self.env.company.currency_id).rounding))
res = expense_line_ids.action_move_create()
for sheet in self.filtered(lambda s: not s.accounting_date):
sheet.accounting_date = sheet.account_move_id.date
to_post = self.filtered(lambda sheet: sheet.payment_mode == 'own_account' and sheet.expense_line_ids)
to_post.write({'state': 'post'})
(self - to_post).write({'state': 'done'})
self.activity_update()
return res
def action_get_attachment_view(self):
res = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment')
res['domain'] = [('res_model', '=', 'hr.expense'), ('res_id', 'in', self.expense_line_ids.ids)]
res['context'] = {
'default_res_model': 'hr.expense.sheet',
'default_res_id': self.id,
'create': False,
'edit': False,
}
return res
# --------------------------------------------
# Business
# --------------------------------------------
def set_to_paid(self):
self.write({'state': 'done'})
def action_submit_sheet(self):
self.write({'state': 'submit'})
self.activity_update()
def approve_expense_sheets(self):
if not self.user_has_groups('hr_expense.group_hr_expense_team_approver'):
raise UserError(_("Only Managers and HR Officers can approve expenses"))
elif not self.user_has_groups('hr_expense.group_hr_expense_manager'):
current_managers = self.employee_id.expense_manager_id | self.employee_id.parent_id.user_id | self.employee_id.department_id.manager_id.user_id
if self.employee_id.user_id == self.env.user:
raise UserError(_("You cannot approve your own expenses"))
if not self.env.user in current_managers and not self.user_has_groups('hr_expense.group_hr_expense_user') and self.employee_id.expense_manager_id != self.env.user:
raise UserError(_("You can only approve your department expenses"))
responsible_id = self.user_id.id or self.env.user.id
notification = {
'type': 'ir.actions.client',
'tag': 'display_notification',
'params': {
'title': _('There are no expense reports to approve.'),
'type': 'warning',
'sticky': False, #True/False will display for few seconds if false
},
}
sheet_to_approve = self.filtered(lambda s: s.state in ['submit', 'draft'])
if sheet_to_approve:
notification['params'].update({
'title': _('The expense reports were successfully approved.'),
'type': 'success',
'next': {'type': 'ir.actions.act_window_close'},
})
sheet_to_approve.write({'state': 'approve', 'user_id': responsible_id})
self.activity_update()
return notification
def paid_expense_sheets(self):
self.write({'state': 'done'})
def refuse_sheet(self, reason):
if not self.user_has_groups('hr_expense.group_hr_expense_team_approver'):
raise UserError(_("Only Managers and HR Officers can approve expenses"))
elif not self.user_has_groups('hr_expense.group_hr_expense_manager'):
current_managers = self.employee_id.expense_manager_id | self.employee_id.parent_id.user_id | self.employee_id.department_id.manager_id.user_id
if self.employee_id.user_id == self.env.user:
raise UserError(_("You cannot refuse your own expenses"))
if not self.env.user in current_managers and not self.user_has_groups('hr_expense.group_hr_expense_user') and self.employee_id.expense_manager_id != self.env.user:
raise UserError(_("You can only refuse your department expenses"))
self.write({'state': 'cancel'})
for sheet in self:
sheet.message_post_with_view('hr_expense.hr_expense_template_refuse_reason', values={'reason': reason, 'is_sheet': True, 'name': sheet.name})
self.activity_update()
def reset_expense_sheets(self):
if not self.can_reset:
raise UserError(_("Only HR Officers or the concerned employee can reset to draft."))
self.mapped('expense_line_ids').write({'is_refused': False})
self.write({'state': 'draft'})
self.activity_update()
return True
def _get_responsible_for_approval(self):
if self.user_id:
return self.user_id
elif self.employee_id.parent_id.user_id:
return self.employee_id.parent_id.user_id
elif self.employee_id.department_id.manager_id.user_id:
return self.employee_id.department_id.manager_id.user_id
return self.env['res.users']
def activity_update(self):
for expense_report in self.filtered(lambda hol: hol.state == 'submit'):
self.activity_schedule(
'hr_expense.mail_act_expense_approval',
user_id=expense_report.sudo()._get_responsible_for_approval().id or self.env.user.id)
self.filtered(lambda hol: hol.state == 'approve').activity_feedback(['hr_expense.mail_act_expense_approval'])
self.filtered(lambda hol: hol.state in ('draft', 'cancel')).activity_unlink(['hr_expense.mail_act_expense_approval'])
def action_register_payment(self):
''' Open the account.payment.register wizard to pay the selected journal entries.
:return: An action opening the account.payment.register wizard.
'''
return {
'name': _('Register Payment'),
'res_model': 'account.payment.register',
'view_mode': 'form',
'context': {
'active_model': 'account.move',
'active_ids': self.account_move_id.ids,
},
'target': 'new',
'type': 'ir.actions.act_window',
}
| 52.491723 | 339 | 0.626352 | 6,477 | 53,909 | 4.950903 | 0.082291 | 0.020208 | 0.008919 | 0.00948 | 0.435588 | 0.349799 | 0.280475 | 0.23638 | 0.196495 | 0.177784 | 0 | 0.003251 | 0.24111 | 53,909 | 1,026 | 340 | 52.542885 | 0.780572 | 0.081248 | 0 | 0.224969 | 0 | 0.002472 | 0.222739 | 0.052505 | 0 | 0 | 0 | 0.001949 | 0 | 1 | 0.07911 | false | 0 | 0.004944 | 0.004944 | 0.217553 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4905a8914ef8f75cc9df814c45efb0a1cfda4b12 | 24,617 | py | Python | cogs/commands.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null | cogs/commands.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null | cogs/commands.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null |
from math import log
from os import stat
from re import T
from discord.enums import _is_descriptor
from discord.ext.commands import Cog, command
from discord.ext import commands, tasks
from discord import Embed, channel
from threading import Thread
import time, discord, json, sys
import texttable
from cogs.rpi import RPi
class Commands(Cog):
def __init__(self, bot, bot_name, console):
self.bot = bot
self.bot_name = str(bot_name).lower()
if sys.platform == "linux":
self.rpi_os = True
self.logDir = "/home/pi/asbot/Logs" #!
self.commandLogDir = f'{self.logDir}/commands.json'
self.adminLogDir = f'{self.logDir}/admin.json'
else:
self.rpi_os = False
self.logDir = "C:\Dev\Github\src\mecha\cogs\Logs"
self.commandLogDir = f'{self.logDir}\commands.json'
self.adminLogDir = f'{self.logDir}\\admin.json'
self.console = console
self.botResponds = {} # * {guildId:{userMessage:botRespond}, ...}
self.botResponds2 = {} # * {guildId:{userMessage:botRespond}, ...}
self.filterSettings = {}
self.logs = {}
self.doorStatusNotificationChannelId = None
self.doorStatus = "open"
self.rpi = RPi()
@Cog.listener()
async def on_ready(self):
with open(self.commandLogDir) as logs_file:
self.logs = json.load(logs_file)
await self.initializeLog("public")
for guild in self.bot.guilds:
await self.initializeLog(int(guild.id))
await self.initializeDoorLog(699224778824745003) # TODO 743711488220594217 699224778824745003
await self.console.print_console(level=2, number="0001", logText=f'Commands Class has been started.')
if self.rpi_os:
self.door_check.start()
await self.console.print_console(level=2, number="0002", logText=f'Door check has been started.')
async def initializeLog(self, guildId):
if guildId != "public":
try:
self.bot.get_guild(int(guildId))
except Exception as Err:
await self.console.print_console(level=4, number="1000", logText=f'{Err} raised at Commands/initializeLog.')
print(f'Errorx1000: {Err}')
return
try:
self.botResponds.update({int(guildId):dict((self.logs[str(guildId)])["botResponds"])})
self.botResponds2.update({int(guildId):dict((self.logs[str(guildId)])["botResponds2"])})
await self.console.print_console(level=2, number="1001", logText=f"Bot responds for guild {guildId} have been initialized.")
except Exception as Err:
await self.console.print_console(level=3, number="1002", logText=f'There is no info about guild {guildId} - Commands/initializeLog.')
print(f'Errorx1002: {Err}')
pass # TODO Add "guildId" keyword to the JSON file.
else:
try:
self.botResponds.update({str(guildId):dict((self.logs[str(guildId)])["botResponds"])})
self.botResponds2.update({str(guildId):dict((self.logs[str(guildId)])["botResponds2"])})
await self.console.print_console(level=2, number="1003", logText="Public bot responds have been initialized.")
except Exception as Err:
await self.console.print_console(level=4, number="1004", logText=f'{Err} raised at Commands/initializeLog.')
print(f'Errorx1004: {Err}')
pass # TODO Add "public" keyword to the JSON file.
async def updateLog(self, guildId, keyword1, keyword2=None, value2=None):
with open(self.commandLogDir) as logs_file:
self.logs = json.load(logs_file)
if not str(guildId) in self.logs.keys():
self.logs.update({str(guildId):{keyword1:value2}})
elif keyword2 == None:
(self.logs[str(guildId)]).update({keyword1:value2})
else:
((self.logs[str(guildId)])[str(keyword1)]).update({keyword2:value2})
logs_file = open(self.commandLogDir, 'w')
json.dump(self.logs, logs_file)
await self.console.print_console(level=2, number="1005", logText=f"Bot responds for guild {guildId} have been updated.")
logs_file.close()
async def initializeDoorLog(self, guildId):
if self.bot.get_guild(id=int(guildId)):
self.doorStatusNotificationChannelId = (self.logs["doorStatusNotificationChannels"])[str(guildId)]
@tasks.loop(seconds=2)
async def door_check(self):
guild = self.bot.get_guild(id=699224778824745003) # TODO
doorStatusNotificationChannel = discord.utils.get(guild.text_channels, id=int(self.doorStatusNotificationChannelId))
trespassing, result = self.rpi.mech_door()
if self.doorStatus == "open":
if trespassing:
member = result[0]
await doorStatusNotificationChannel.send(f'**{member}** tarafından topluluk odası kapısı açıldı.')
self.rpi.open_door()
elif trespassing == False:
cardid = result
await doorStatusNotificationChannel.send(f'Kart numarası **{cardid}** olan birisi topluluk odası kapısını açmaya çalıştı.')
else:
if trespassing:
member = result[0]
await doorStatusNotificationChannel.send(f'**{member}** tarafından topluluk odası kapısı açılmaya çalışıldı, kapı kilitli olduğu için kapı açılmadı.')
else:
cardid = result
await doorStatusNotificationChannel.send(f'Kart numarası **{cardid}** olan birisi topluluk odası kapısını açmaya çalıştı.')
@door_check.before_loop
async def before_check(self):
# waits until the bot is ready, then it starts the loop.
await self.bot.wait_until_ready()
@Cog.listener()
async def on_message(self, ctx):
guild = ctx.guild
# * Filter conditions
if ctx.author.bot:
return
if int(guild.id) in self.filterSettings.keys():
if int(ctx.channel.id) in (self.filterSettings[int(guild.id)]).keys():
if ((self.filterSettings[int(guild.id)])[int(ctx.channel.id)])['text'] and ctx.content != '':
try:
await ctx.delete()
except Exception as Err:
await self.console.print_console(level=4, number="1006", logText=f'{Err} raised at Commands/on_message.')
print(f'Errorx1006: {Err}')
elif ((self.filterSettings[int(guild.id)])[int(ctx.channel.id)])['file'] and len(ctx.attachments):
try:
await ctx.delete()
except Exception as Err:
await self.console.print_console(level=4, number="1007", logText=f'{Err} raised at Commands/on_message.')
print(f'Errorx1007: {Err}')
# * Bot responds
data_general = self.botResponds["public"]
if str(ctx.content).lower() in data_general.keys():
await ctx.channel.send(f'**{data_general[str(ctx.content).lower()]}**')
return
elif str(ctx.content) in data_general.keys():
await ctx.channel.send(f'**{data_general[str(ctx.content)]}**')
return
if int(guild.id) in self.botResponds.keys():
data = self.botResponds[int(guild.id)]
if str(ctx.content).lower() in data.keys():
await ctx.channel.send(f'**{data[str(ctx.content).lower()]}**')
elif str(ctx.content) in data.keys():
await ctx.channel.send(f'**{data[str(ctx.content)]}**')
elif int(guild.id) in self.botResponds2.keys():
for cnt in (self.botResponds2)[int(guild.id)].keys():
if cnt in str(ctx.content):
await ctx.channel.send(f'**{self.botrespond2[str(cnt)]}**')
elif cnt in str(ctx.content).lower():
await ctx.channel.send(f'**{self.botrespond2[str(cnt)]}**')
# * Mentioned in a message
message = ctx
if self.bot.user.mentioned_in(message):
if message.channel.type != discord.ChannelType.private:
await self.console.print_console(level=0, number="1009", logText=str(message.content))
@command()
@commands.has_permissions(manage_channels=True)
async def listmembers(self, ctx):
members = self.rpi.list_members()
message_list = []
member_list = [["Durum", "Pozisyon", "İsim", "Soyisim", "Kart Numarası"]]
n = 0
for member in members:
if n == 10:
message_list.append(member_list)
member_list = [["Durum", "Pozisyon", "İsim", "Soyisim", "Kart Numarası"]]
n = 0
member_list.append([member["status"], member["position"], member["name"], member["surname"], member["cardid"]])
n+=1
message_list.append(member_list)
tableObj = texttable.Texttable()
for msg in message_list:
# Set columns
tableObj.set_cols_align(["c", "c", "c", "c", "c"])
# Set datatype of each column
tableObj.set_cols_dtype(["t", "t", "t", "t", "i"])
# Adjust columns
tableObj.set_cols_valign(["m", "m", "m", "m", "m"])
# Insert rows
tableObj.add_rows(msg)
await ctx.send(f'```{tableObj.draw()}```')
tableObj.reset()
@command()
@commands.has_permissions(manage_channels=True)
async def addmember(self, ctx, status, position, name, surname, cardid):
self.rpi.add_member(status, position, name, surname, cardid)
await ctx.send("Kişi veri tabanına eklendi.")
@command()
@commands.has_permissions(manage_channels=True)
async def removemember(self, ctx, name=None, surname=None):
if surname == None:
result = self.rpi.remove_member(name)
if result == 1:
await ctx.send("Kişi veri tabanından silindi.")
elif result == 2:
await ctx.send("Aynı kriterlere uygun birden fazla kişi bulunuyor.")
else:
await ctx.send("Kriterlere uygun kişi bulunamadı.")
else:
self.rpi.remove_member(name, surname)
await ctx.send("Kişi veri tabanından silindi.")
@command()
@commands.has_permissions(manage_channels=True)
async def doorlock(self, ctx, status="close"):
if str(status).lower() != "open" and str(status).lower() != "close":
await ctx.send("Invalid keyword!")
else:
self.doorStatus = status
if str(status).lower() == "open":
await ctx.send("Kapı kilidi açıldı!")
elif str(status).lower() == "close":
await ctx.send("Kapı kilitlendi!")
await ctx.message.delete()
@command()
@commands.has_permissions(manage_channels=True)
async def adduser(self, ctx, userId, channelId=None):
guild = ctx.guild
if channelId == None:
channel = ctx.channel
else:
try:
channel = discord.utils.get(guild.text_channels, id=int(channelId))
except Exception as Err:
await self.console.print_console(level=4, number="1008", logText=f'{Err} raised at Commands/adduser.')
print(f'Errorx1008: {Err}')
await ctx.send("The channel cannot be found!")
return
try:
user = discord.utils.get(guild.members, id=int(userId))
except:
await ctx.send("The user cannot be found!")
return
try:
overwrite = discord.PermissionOverwrite()
overwrite.view_channel = True
overwrite.send_messages = True
overwrite.read_messages = True
overwrite.read_message_history = True
await channel.set_permissions(user, overwrite=overwrite)
await ctx.send(f'{user.name} is added to {channel.name}.')
except Exception as Err:
await self.console.print_console(level=4, number="1009", logText=f'{Err} raised at Commands/adduser.')
print(f'Errorx1009: {Err}')
await ctx.send("The user cannot be added!")
@command()
@commands.has_permissions(manage_channels=True)
async def removeuser(self, ctx, userId, channelId=None):
guild = ctx.guild
if channelId == None:
channel = ctx.channel
else:
try:
channel = discord.utils.get(guild.text_channels, id=int(channelId))
except Exception as Err:
await self.console.print_console(level=4, number="1010", logText=f'{Err} raised at Commands/removeuser.')
print(f'Errorx1010: {Err}')
await ctx.send("The channel cannot be found!")
return
try:
user = discord.utils.get(guild.members, id=int(userId))
except:
await ctx.send("The user cannot be found!")
return
if not user in channel.members:
await ctx.send("The user is not in this channel.")
return
try:
await channel.set_permissions(user, overwrite=None)
await ctx.send(f'{user.name} is removed from {channel.name}.')
except Exception as Err:
#! await self.console.print_console(level=4, number="1011", logText=f'{Err} raised at Commands/removeuser.')
print(f'Errorx1011: {Err}')
await ctx.send("The user cannot be removed!")
@command()
@commands.has_permissions(manage_webhooks=True)
async def clearwebhooks(self, ctx, channelId=None):
try:
channelWebhooks = await ctx.channel.webhooks()
for w in channelWebhooks:
await w.delete()
await ctx.message.delete()
await ctx.send("Kanalda bulunan webhooklar silindi.")
except Exception as Err:
await self.console.print_console(level=4, number="1012", logText=f'{Err} raised at Commands/clearwebhooks.')
print(f'Errorx1012: {Err}')
await ctx.send("İstek gerçekleştirilemedi!")
@command()
@commands.has_permissions(manage_channels=True)
async def get(self, ctx, userId, channelId=None):
guild = ctx.guild
if not channelId:
if ctx.author.voice:
voiceChannel = ctx.author.voice.channel
else:
return
else:
try:
voiceChannel = discord.utils.get(guild.text_channels, id=int(channelId))
except Exception as Err:
await self.console.print_console(level=4, number="1013", logText=f'{Err} raised at Commands/get.')
print(f'Errorx1013: {Err}')
await ctx.send("Couldn't get the user!")
return
try:
targetUser = self.bot.get_user(userId)
except Exception as Err:
await self.console.print_console(level=4, number="1014", logText=f'{Err} raised at Commands/get.')
print(f'Errorx1014: {Err}')
return
try:
await targetUser.move_to(voiceChannel)
except Exception as Err:
await self.console.print_console(level=4, number="1015", logText=f'{Err} raised at Commands/get.')
print(f'Errorx1015')
@command()
@commands.has_permissions(manage_channels=True)
async def filter(self, ctx, on_off=None, filterType='text', channelId=None):
guild = ctx.guild
if not str(on_off).lower() in ['on', 'off']:
return
if not channelId:
chnl = ctx.channel
else:
try:
chnl = discord.utils.get(guild.text_channels, id=int(channelId))
except Exception as Err:
await ctx.send('İstek gerçekleştirilemedi, tekrar deneyin!')
await self.console.print_console(level=4, number="1016", logText=f'{Err} raised at Commands/filter.')
print(f'Errorx1016: {Err}')
return
on_off = True if str(on_off).lower() == 'on' else False
otherFilterType = 'text' if str(filterType).lower() == 'file' else 'file'
if int(guild.id) in self.filterSettings.keys():
if int(chnl.id) in (self.filterSettings[int(guild.id)]).keys():
((self.filterSettings[int(guild.id)])[int(chnl.id)])[filterType] = on_off
else:
(self.filterSettings[int(guild.id)]).update({int(chnl.id):{str(filterType):on_off, str(otherFilterType):False}})
else:
self.filterSettings.update({int(guild.id):{int(chnl.id):{str(filterType):on_off, str(otherFilterType):False}}})
await self.updateLog(int(guild.id), "filterSettings", value2=self.filterSettings[int(guild.id)])
def arrange_time(self, text):
text = str(text).split(' ')
d = (text[0].split('-'))[::-1]
t = (text[1].split(':'))
t[-1] = str(round(float(t[-1])))
d = f'{d[0]}.{d[1]}.{d[2]}'
t = f'{t[0]}:{t[1]}:{t[1]}'
return f'{t} {d}'
@command()
@commands.has_permissions(manage_messages=True)
async def getlog(self, ctx, channelId=None):
guild = ctx.guild
if channelId == None:
targetChannel = ctx.channel
else:
try:
targetChannel = discord.utils.get(guild.text_channels, id=int(channelId))
except Exception as Err:
await self.console.print_console(level=4, number="1017", logText=f'{Err} raised at Commands/getlog.')
print(f'Errorx1017: {Err}')
await ctx.send('Kanal bulunamadı, tekrar deneyiniz!')
return
await ctx.message.delete()
messages = await targetChannel.history(limit=None, oldest_first=True).flatten()
log_content = f'###\tThis is the log file of the channel named {targetChannel.name} in {guild.name}.\n###\tThe log is taken by {ctx.author}.\n{time.strftime("###%tDate: %d.%m.%Y %n###%tTime: %H:%M:%S")} \n###\tThe first message time of the log is {self.arrange_time(messages[0].created_at)}.\n###\tThe last message time of the log is {self.arrange_time(messages[-1].created_at)}.\n\n'
for msg in messages:
msg_info = f'{msg.author} - {self.arrange_time(msg.created_at)} -- {msg.content}\n'
log_content += msg_info
print(log_content)
@command(name='sad')
async def sad(self, ctx):
await ctx.message.delete()
await ctx.send(":frowning:")
@command()
async def getcommands(self, ctx):
for item in self.bot.commands:
await ctx.send(item.name)
@command()
async def react(self, ctx, messageId, *, emoji):
if int(ctx.author.id) == 565956579300737026:
try:
msg = await ctx.fetch_message(id=int(messageId))
await ctx.message.delete()
await msg.add_reaction(emoji)
except:
for guild in self.bot.guilds:
for channel in guild.text_channels:
try:
msg = await channel.fetch_message(id=int(messageId))
await ctx.message.delete()
await msg.add_reaction(emoji)
return
except:
pass
@command()
async def unreact(self, ctx, messageId, *, emoji):
guild = ctx.guild
if int(ctx.author.id) == 565956579300737026:
try:
msg = await ctx.fetch_message(id=int(messageId))
await ctx.message.delete()
await msg.remove_reaction(emoji, guild.me)
except:
for guild in self.bot.guilds:
for channel in guild.text_channels:
try:
msg = await channel.fetch_message(id=int(messageId))
await ctx.message.delete()
await msg.remove_reaction(emoji, guild.me)
return
except:
pass
@command()
async def message(self, ctx, *, msg):
if int(ctx.author.id) == 565956579300737026:
await ctx.message.delete()
await ctx.send(msg)
@command()
async def msg(self, ctx, channelId, *, content):
thisFile = 0
if ctx.message.attachments:
attch = ctx.message.attachments[0]
thisFile = await attch.to_file()
channel = self.bot.get_channel(int(channelId))
if thisFile:
await channel.send(content, file=thisFile)
else:
await channel.send(content)
await ctx.message.delete()
@command()
async def msgedit(self, ctx, messageId, *, content):
try:
guild = ctx.guild
for channel in guild.text_channels:
try:
msg = await channel.fetch_message(int(messageId))
await msg.edit(content=content)
await ctx.message.delete()
return
except:
pass
except:
pass
@command(name='clear')
@commands.has_permissions(manage_messages=True)
async def clear(self, ctx, number):
number = int(number) + 1
counter = 0
async for x in ctx.channel.history(limit = number):
if counter < number:
await x.delete()
counter += 1
if (counter-1) > 1:
msg = f':white_check_mark: I have deleted {(counter-1)} messages.'
elif (counter-1) == 1:
msg = f':white_check_mark: I have deleted {(counter-1)} message.'
else:
msg = f':white_check_mark: I cannot delete any message.'
bot_msg = await ctx.send(msg)
time.sleep(1.2)
await bot_msg.delete()
@command(name='embed')
@commands.has_permissions(manage_messages=True)
async def embedMessage(self, ctx, *, text_msg):
embed = Embed(description=text_msg, color=ctx.author.color)
await ctx.send(embed=embed)
await ctx.message.delete()
@command()
async def testConsole(self, ctx, level=2):
await ctx.message.delete()
if level < 5 and level > 0:
pass
#! await self.console.print_console(level=level)
@command()
async def testCmd(self, ctx):
await ctx.message.delete()
await ctx.send(self.doorStatusNotificationChannelId)
#print(self.filterSettings)
#print(self.botResponds)
#print(self.commandServerLog)
#await ctx.send(ctx.author.roles)
@command()
async def testCmd1(self, ctx, emj):
guild = ctx.guild
await ctx.message.add_reaction(str(emj))
for emjj in guild.emojis:
if emj == str(emjj):
break
await ctx.send(emjj.id)
"""
@command(name='seslisil')
@commands.has_permissions(manage_messages=True)
async def seslisil(self, ctx):
guild = ctx.guild
for channel in guild.voice_channels:
if "grup" in (str(channel.name)).lower():
await channel.delete()
await ctx.send("Sesli kanallar silindi.")
@command(name='yazilisil')
@commands.has_permissions(manage_messages=True)
async def yazilisil(self, ctx):
guild = ctx.guild
for channel in guild.text_channels:
if "grup" in (str(channel.name)).lower():
await channel.delete()
await ctx.send("Yazılı kanallar silindi.")
"""
| 44.354955 | 393 | 0.565585 | 2,779 | 24,617 | 4.949262 | 0.139619 | 0.034317 | 0.028792 | 0.03359 | 0.549149 | 0.505526 | 0.473244 | 0.442417 | 0.391086 | 0.299331 | 0 | 0.020884 | 0.315311 | 24,617 | 554 | 394 | 44.435018 | 0.794898 | 0.027136 | 0 | 0.425263 | 0 | 0.004211 | 0.147975 | 0.03045 | 0 | 0 | 0 | 0.001805 | 0 | 1 | 0.004211 | false | 0.023158 | 0.023158 | 0 | 0.069474 | 0.075789 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
490710a1c10e360b4ab332bdb07ba7c980dbf5ed | 1,246 | py | Python | citrix_hypervisor/tests/test_metadata.py | tdimnet/integrations-core | a78133a3b71a1b8377fa214d121a98647031ab06 | [
"BSD-3-Clause"
] | 663 | 2016-08-23T05:23:45.000Z | 2022-03-29T00:37:23.000Z | citrix_hypervisor/tests/test_metadata.py | tdimnet/integrations-core | a78133a3b71a1b8377fa214d121a98647031ab06 | [
"BSD-3-Clause"
] | 6,642 | 2016-06-09T16:29:20.000Z | 2022-03-31T22:24:09.000Z | citrix_hypervisor/tests/test_metadata.py | tdimnet/integrations-core | a78133a3b71a1b8377fa214d121a98647031ab06 | [
"BSD-3-Clause"
] | 1,222 | 2017-01-27T15:51:38.000Z | 2022-03-31T18:17:51.000Z | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import os
import mock
import pytest
from datadog_checks.citrix_hypervisor import CitrixHypervisorCheck
from . import common
@pytest.mark.usefixtures('mock_responses')
def test_collect_metadata(datadog_agent, instance):
check = CitrixHypervisorCheck('citrix_hypervisor', {}, [instance])
check.check_id = 'test:123'
version_metadata = {
'version.scheme': 'semver',
'version.major': '8',
'version.minor': '2',
'version.patch': '0',
'version.raw': '8.2.0',
}
with open(os.path.join(common.HERE, 'fixtures', 'standalone', 'version.json'), 'rb') as f:
content = json.load(f)
xenserver = common.mocked_xenserver('master')
xenserver.session.get_this_host.return_value = {'Status': 'Success', 'Value': 'hostref'}
xenserver.host.get_software_version.return_value = content
with mock.patch('six.moves.xmlrpc_client.Server', return_value=xenserver):
check.check(None)
datadog_agent.assert_metadata('test:123', version_metadata)
datadog_agent.assert_metadata_count(len(version_metadata))
| 32.789474 | 96 | 0.684591 | 149 | 1,246 | 5.557047 | 0.563758 | 0.043478 | 0.048309 | 0.05314 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016815 | 0.188604 | 1,246 | 37 | 97 | 33.675676 | 0.802176 | 0.086677 | 0 | 0 | 0 | 0 | 0.19224 | 0.026455 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.038462 | false | 0 | 0.230769 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b213b8c923ba11970c21634a12acda42294786c | 7,527 | py | Python | clinvar-variant-types/clinvar-variant-types.py | tskir/eva-cttv-pipeline | b1f78a174d4570232512a15f381bbf4a97b473bb | [
"Apache-2.0"
] | null | null | null | clinvar-variant-types/clinvar-variant-types.py | tskir/eva-cttv-pipeline | b1f78a174d4570232512a15f381bbf4a97b473bb | [
"Apache-2.0"
] | null | null | null | clinvar-variant-types/clinvar-variant-types.py | tskir/eva-cttv-pipeline | b1f78a174d4570232512a15f381bbf4a97b473bb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
from collections import Counter
import gzip
import re
import sys
import xml.etree.ElementTree as ElementTree
SIG_STARS = {
'practice guideline': 4,
'reviewed by expert panel': 3,
'criteria provided, multiple submitters, no conflicts': 2,
'criteria provided, conflicting interpretations': 1,
'criteria provided, single submitter': 1,
}
parser = argparse.ArgumentParser()
parser.add_argument('--clinvar-xml', required=True)
args = parser.parse_args()
def add_transitions(transitions_counter, transition_chain):
"""Increments the count of a particular flow in Sankey diagram."""
for transition_from, transition_to in zip(transition_chain, transition_chain[1:]):
transitions_counter[(transition_from, transition_to)] += 1
def find_attribute(rcv, xpath, attribute_name):
"""Find an attribute in the RCV record which can have either zero or one occurrence. Return a textual representation
of the attribute, including special representations for the case of zero or multiple, constructed using the
attribute_name parameter."""
attributes = rcv.findall(xpath)
if len(attributes) == 0:
return '{} missing'.format(attribute_name)
elif len(attributes) == 1:
return attributes[0].text
else:
return '{} multiple'.format(attribute_name)
def review_status_stars(review_status):
black_stars = SIG_STARS.get(review_status, 0)
white_stars = 4 - black_stars
return '★' * black_stars + '☆' * white_stars
# The dicts store transition counts for the Sankey diagrams. Keys are (from, to), values are transition counts.
# Sankey diagrams can be visualised with SankeyMatic (see http://www.sankeymatic.com/build/).
variant_type_transitions, clin_sig_transitions, review_status_transitions, inheritance_mode_transitions,\
allele_origin_transitions = Counter(), Counter(), Counter(), Counter(), Counter()
all_clinical_significance_levels = set()
# ClinVar XML have the following top-level structure:
# <ReleaseSet>
# <ClinVarSet>...</ClinVarSet>
# <ClinVarSet>...</ClinVarSet>
# ...
# </ReleaseSet>
# To not load everything into memory, we use iterparse, wait for a complete ClinVarSet element, and then remove it from
# the tree because we don't need it anymore.
elements_processed = 0
for event, elem in ElementTree.iterparse(gzip.open(args.clinvar_xml)):
# Wait until we have built a complete ClinVarSet element. Skip
if elem.tag != 'ClinVarSet':
continue
# Go to a ReferenceClinVarAssertion element. This corresponds to a single RCV record, the main unit of ClinVar.
# There should only be one such record per ClinVarSet.
rcv_records = elem.findall('ReferenceClinVarAssertion')
assert len(rcv_records) == 1, 'Found multiple RCV records per ClinVarSet'
rcv = rcv_records[0]
rcv_id = 'RCV{:09}'.format(int(rcv.attrib['ID']))
# RCV can contain either a MeasureSet, or a GenotypeSet. It must not contain both.
measure_sets = rcv.findall('MeasureSet')
genotype_sets = rcv.findall('GenotypeSet')
if len(measure_sets) == 1 and len(genotype_sets) == 0:
# Most common case. RCV directly contains one measure set.
measure_set = measure_sets[0]
measure_set_type = measure_set.attrib['Type']
add_transitions(variant_type_transitions, ('RCV', 'MeasureSet', measure_set_type))
if measure_set_type == 'Variant':
# Most common case, accounting for >99.95% of all ClinVar records.. Here, we go into details on various
# attribute distributions.
# Variant type
measures = measure_set.findall('Measure')
assert len(measures) == 1, 'MeasureSet of type Variant must contain exactly one Measure'
add_transitions(variant_type_transitions, (measure_set_type, measures[0].attrib['Type']))
# Clinical significance
clinical_significance = find_attribute(
rcv, 'ClinicalSignificance/Description', 'ClinicalSignificance')
all_clinical_significance_levels.add(clinical_significance)
significance_type = 'Complex' if re.search('[,/]', clinical_significance) else 'Simple'
add_transitions(clin_sig_transitions, (
'Variant',
significance_type,
clinical_significance,
))
# Review status
review_status = find_attribute(
rcv, 'ClinicalSignificance/ReviewStatus', 'ReviewStatus')
add_transitions(review_status_transitions, (
'Variant',
review_status_stars(review_status),
review_status,
))
# Mode of inheritance
mode_of_inheritance_xpath = 'AttributeSet/Attribute[@Type="ModeOfInheritance"]'
mode_of_inheritance = find_attribute(rcv, mode_of_inheritance_xpath, 'ModeOfInheritance')
if mode_of_inheritance.endswith('multiple'):
# Having multiple ModeOfInheritance is rare. Log them for further investigation
all_modes = '|'.join(sorted(mode.text for mode in rcv.findall(mode_of_inheritance_xpath)))
print(f'Multiple ModeOfInheritance\t{rcv_id}\t{all_modes}')
add_transitions(inheritance_mode_transitions, (
'Variant',
mode_of_inheritance if mode_of_inheritance.endswith('missing') else 'ModeOfInheritance present',
))
if not mode_of_inheritance.endswith('missing'):
add_transitions(inheritance_mode_transitions, (
'ModeOfInheritance present', mode_of_inheritance
))
elif len(measure_sets) == 0 and len(genotype_sets) == 1:
# RCV directly contains one genotype set.
genotype_set = genotype_sets[0]
add_transitions(variant_type_transitions, ('RCV', 'GenotypeSet', genotype_set.attrib['Type']))
else:
raise AssertionError('RCV must contain either exactly one measure set, or exactly one genotype set')
allele_origins = {origin.text for origin in rcv.findall('ObservedIn/Sample/Origin')}
if len(allele_origins) == 0:
add_transitions(allele_origin_transitions, ('RCV', 'No allele origin'))
else:
allele_origins_count = 'Single allele origin' if len(allele_origins) == 1 else 'Multiple allele origins'
allele_origins_text = ','.join(sorted(allele_origins))
add_transitions(allele_origin_transitions, ('RCV', allele_origins_count, allele_origins_text))
# Remove the processed element from the tree to save memory
elem.clear()
# Track the number of already processed elements
elements_processed += 1
if elements_processed % 10000 == 0:
print('Processed {} elements'.format(elements_processed), file=sys.stderr)
# Output the code for Sankey diagram. Transitions are sorted in decreasing number of counts, so that the most frequent
# cases are on top.
for transitions_counter in (variant_type_transitions, clin_sig_transitions, review_status_transitions,
inheritance_mode_transitions, allele_origin_transitions):
print()
for (transition_from, transition_to), count in sorted(transitions_counter.items(), key=lambda x: -x[1]):
print('{transition_from} [{count}] {transition_to}'.format(**locals()))
print('\n\nAll clinical significance levels:')
for clin_sig in sorted(all_clinical_significance_levels):
print(clin_sig)
| 45.343373 | 120 | 0.695231 | 889 | 7,527 | 5.695163 | 0.292463 | 0.026072 | 0.033577 | 0.029232 | 0.149121 | 0.075449 | 0.044243 | 0.044243 | 0.044243 | 0.044243 | 0 | 0.006759 | 0.213764 | 7,527 | 165 | 121 | 45.618182 | 0.848429 | 0.234622 | 0 | 0.114286 | 0 | 0 | 0.178478 | 0.035521 | 0 | 0 | 0 | 0 | 0.038095 | 1 | 0.028571 | false | 0 | 0.057143 | 0 | 0.12381 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b234e632f18a852be723bff31996127a7db163f | 3,965 | py | Python | ithkuil/morphology/words/formative.py | fizyk20/ithkuil | b77db331ac829b75a3a3782461cb1202c135b272 | [
"MIT"
] | 20 | 2016-03-01T04:21:21.000Z | 2021-04-28T06:52:30.000Z | ithkuil/morphology/words/formative.py | fizyk20/ithkuil | b77db331ac829b75a3a3782461cb1202c135b272 | [
"MIT"
] | 5 | 2016-01-07T16:22:25.000Z | 2019-12-26T01:49:16.000Z | ithkuil/morphology/words/formative.py | fizyk20/ithkuil | b77db331ac829b75a3a3782461cb1202c135b272 | [
"MIT"
] | 2 | 2015-06-14T16:13:26.000Z | 2017-04-30T15:43:50.000Z | from .word import Word
from ithkuil.morphology.database import ithWordType, Session
from ..exceptions import AnalysisException
class Formative(Word):
wordType = Session().query(ithWordType).filter(ithWordType.name == 'Formative').first()
categories = [
'Root',
'Stem and Pattern',
'Designation',
'Incorporated root',
'Stem and Pattern (inc)',
'Designation (inc)',
'Perspective (inc)',
'Configuration (inc)',
'Case (inc)',
'Format',
'Relation',
'Function',
'Case',
'Essence',
'Extension',
'Perspective',
'Affiliation',
'Configuration',
'Context',
'Aspect',
'Mood',
'Phase',
'Sanction',
'Illocution',
'Version',
'Valence',
'Bias'
]
def analyze(self):
vc = self.slots['Vc']
vcparts = [x for x in vc.split('’') if x]
if len(vcparts) > 1:
self.slots['Vc'] = vcparts[0] + '’V'
vr = vcparts[1]
if 'Vr' in self.slots and self.slots['Vr'] != 'a' and vr != 'a':
raise AnalysisException('Duplicate Vr: in slots IV and VII!')
else:
self.slots['Vr'] = vr
def fillResult(self, add, suffix):
if 'Cx' in self.slots:
add('Cv')
add('Vl')
add('Cg')
add('Cs')
add('Vr')
if 'Cx' in self.slots:
add('Cx')
add('Vp')
else:
add('Cv')
add('Vl')
add('Cr')
add('Vc')
add('Ci+Vi')
add('Ca')
if 'VxC' in self.slots:
for suf in self.slots['VxC']:
suffix(suf)
add('Vf')
add('Cb')
add('[tone]')
add('[stress]')
def abbreviatedDescription(self):
desc = []
def values(slot):
if slot == 'Cx' or slot == 'Cr':
return self.slots[slot]
vals = self.slots_values(slot)
codes = map(lambda x: x.code, vals)
return '/'.join(codes)
def add(slot):
if slot not in self.slots:
return
vals = values(slot)
if slot == 'Cb' and 'Cb+' in self.slots:
vals += '+' if self.slots['Cb+'] else ''
desc.append(vals)
def suffix(suf):
deg = self.atom(self.morpheme('VxC', suf['degree'])).values[0].code
suf = self.atom(self.morpheme('VxC', suf['type'])).values[0].code
desc.append('%s_%s' % (suf, deg))
self.fillResult(add, suffix)
return '-'.join(desc)
def fullDescription(self):
desc = { 'type': 'Formative', 'categories': self.categories }
def values(slot):
if slot == 'Cx':
return { 'Incorporated root': self.slots[slot] }
elif slot == 'Cr':
return { 'Root': self.slots[slot] }
vals = self.slots_values(slot)
result = { x.category.name: {'code': x.code, 'name': x.name} for x in vals }
if slot == 'Vp':
result = { k + ' (inc)': v for k, v in result.items()}
return result
def add(slot):
if slot not in self.slots:
return
vals = values(slot)
if slot == 'Cb' and self.slots.get('Cb+'):
vals['Bias']['name'] += '+'
vals['Bias']['code'] += '+'
desc.update(vals)
def suffix(suf):
if 'suffixes' not in desc:
desc['suffixes'] = []
deg = self.atom(self.morpheme('VxC', suf['degree'])).values[0].name
suf = self.atom(self.morpheme('VxC', suf['type'])).values[0]
desc['suffixes'].append({'code': suf.code, 'name': suf.name, 'degree': deg})
self.fillResult(add, suffix)
return desc
| 28.941606 | 91 | 0.466583 | 426 | 3,965 | 4.335681 | 0.265258 | 0.092583 | 0.047645 | 0.034651 | 0.283162 | 0.270709 | 0.193828 | 0.193828 | 0.154846 | 0.154846 | 0 | 0.002832 | 0.376545 | 3,965 | 136 | 92 | 29.154412 | 0.744337 | 0 | 0 | 0.205128 | 0 | 0 | 0.141307 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08547 | false | 0 | 0.025641 | 0 | 0.213675 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b2525186bd1174470d63733d983f318bfdb6868 | 13,034 | py | Python | cmorize/cmor_var.py | duncanwp/cmorize | 77839f19326d3a54f3e461b56f78a765214678d1 | [
"MIT"
] | 1 | 2019-07-24T12:45:41.000Z | 2019-07-24T12:45:41.000Z | cmorize/cmor_var.py | duncanwp/cmorize | 77839f19326d3a54f3e461b56f78a765214678d1 | [
"MIT"
] | null | null | null | cmorize/cmor_var.py | duncanwp/cmorize | 77839f19326d3a54f3e461b56f78a765214678d1 | [
"MIT"
] | 1 | 2018-04-26T08:03:35.000Z | 2018-04-26T08:03:35.000Z | """
A module for writing Iris cubes to CMOR / AeroCom compatible files
Adapted from script by Zak Kipling
(c) Duncan watson-parris 2017
"""
import cis
import iris
import numpy as np
from utils import get_time_delta
def get_time_freq(time_coord, pdrmip_format):
"""
Take a time coordinate and return a string describing it's frequency
:param iris.Coords.Coord time_coord:
:param bool pdrmip_format: Is the string format required a PDRMIP one?
:return:
"""
time_delta = get_time_delta(time_coord)
# The above call will either return a length one timestep - or raise a ValueError (which we allow to bubble up)
if time_delta.microseconds:
raise ValueError("Non-integer number of seconds (+%d us) between timesteps" % time_delta.microseconds)
elif time_delta.seconds % 3600:
raise ValueError("Non-integer number of hours (+%d s) between timesteps" % time_delta.seconds)
elif time_delta.days == 30:
time_freq = "Amon" if pdrmip_format else "monthly"
elif time_delta.seconds == 3600:
time_freq = "hourly"
elif time_delta.seconds:
time_freq = "%dhourly" % (time_delta.seconds / 3600)
elif time_delta.days == 1:
time_freq = 'day' if pdrmip_format else "daily"
elif time_delta.days > 1:
time_freq = "%ddaily" % time_delta.days
else:
raise ValueError("Repeating time steps")
return time_freq
def select_vars(cmor_vars, var_names):
# This slightly backward method ensures that an error is thrown if the var isn't in the list of cmor_vars
cmor_var_names = [v.cmor_var_name for v in cmor_vars]
return [cmor_vars[cmor_var_names.index(v)] for v in var_names]
def get_daily_cubes(c):
import iris.coord_categorisation
iris.coord_categorisation.add_day_of_month(c, 'time')
iris.coord_categorisation.add_month_number(c, 'time')
# Create the aggregation group-by instance.
groupby = iris.analysis._Groupby([c.coord('day_of_month'), c.coord('month_number')])
dimension_to_groupby = c.coord_dims(c.coord('day_of_month'))[0]
cube_slice = [slice(None, None)] * len(c.shape)
for groupby_slice in groupby.group():
# for day in c.slices_over('day_of_month'):
# Put the groupby slice in the right place
cube_slice[dimension_to_groupby] = groupby_slice
day = c[tuple(cube_slice)]
yield day
def get_monthly_cubes(c):
import iris.coord_categorisation
iris.coord_categorisation.add_year(c, 'time')
iris.coord_categorisation.add_month_number(c, 'time')
# Create the aggregation group-by instance.
groupby = iris.analysis._Groupby([c.coord('month_number'), c.coord('year')])
dimension_to_groupby = c.coord_dims(c.coord('month_number'))[0]
cube_slice = [slice(None, None)] * len(c.shape)
for groupby_slice in groupby.group():
# for day in c.slices_over('day_of_month'):
# Put the groupby slice in the right place
cube_slice[dimension_to_groupby] = groupby_slice
month = c[tuple(cube_slice)]
yield month
def output_monthly_cubes(cube, filename_template='out_{}'):
for c in get_monthly_cubes(cube):
# Just take the first value since they're all the same day...
date = c.coord('time').units.num2date(c.coord('time').points[0])
date_fmt = date.strftime('%Y%m')
out = filename_template.format(date_fmt)
print("Saving to {}...".format(out))
iris.save(c, out)
class cmor_var:
def __init__(self, cmor_var_name, load, stream='', long_name=None, standard_name=None,
units=None, vertical_coord_type=None, scaling=1.0, comment=None, product=None):
"""
A class for representing a single cmor valid variable. The attributes (standard_name, long_name etc are only
needed if they aren't present in the model output (or are different, e.g. in the case of units).
:param str cmor_var_name: The cmor-approved variable name
:param callable or str load: Either CIS style variable name(s) or a callable function returning a single cube
:param str stream: The filestream in which to find the above variable(s)
The following are optional attributes
:param str long_name: Variable long-name
:param str standard_name: CF compliant standard name
:param cf_units.Unit units: The required output units (which will be converted to)
:param str vertical_coord_type: The vertical coordinate type (Surface, Column, etc). An educated guess will be
made if not present
:param float scaling: A constant scaling to apply
:param str comment: Any comment attributes to add
:param str product: CIS plugin name for reading the variable
"""
self.product = product
self.comment = comment
self.scaling = scaling
self.vertical_coord_type = vertical_coord_type
self.units = units
self.standard_name = standard_name
self.long_name = long_name
self.stream = stream
self.load = load
self.cmor_var_name = cmor_var_name
def load_var(self, infile, product=None):
# Take the specific variable product over the general one if there is one
product = self.product or product
if callable(self.load):
cube = self.load(self.stream_file(infile), product=product)
else:
cube = cis.read_data(self.stream_file(infile), self.load, product)
return cube
def stream_file(self, infile):
from utils import filename_suffix
return filename_suffix(infile, self.stream)
def write_var(self, cube, outfile, experiment_info=None, contact_info=None,
output_monthly=False):
"""
Write the variable to a single file
:param iris.cube.Cube cube: The data cube to be output
:param str outfile: The output file to write to
:param str experiment_info: e.g. "hindcast experiment (1980-2008); ACCMIP-MACCity emissions; nudged to ERAIA.";
:param str contact_info: e.g. "Nick Schutgens (schutgens\@physics.ox.ac.uk)";
"""
from iris.std_names import STD_NAMES
from cf_units import Unit
print("Process %s: '%s' / %s" % (self.cmor_var_name, self.long_name, self.standard_name))
# Do scaling and conversions
cube *= self.scaling
if self.units is not None:
if isinstance(cube.units, Unit) and (not cube.units.is_unknown()) and (cube.units != '1'):
cube.convert_units(self.units)
else:
print("WARNING: Setting units to '{}'without conversion".format(self.units))
cube.units = self.units
# Update attributes
cube.var_name = self.cmor_var_name
if self.long_name is not None:
cube.long_name = self.long_name
if self.standard_name is not None:
if self.standard_name not in STD_NAMES:
STD_NAMES[self.standard_name] = {"canonical_units": cube.units}
cube.standard_name = self.standard_name
if self.comment is not None:
cube.attributes['comment'] = self.comment
if experiment_info is not None:
cube.attributes['info_exp'] = experiment_info
if contact_info is not None:
cube.attributes['info_contact'] = contact_info
if output_monthly:
output_monthly_cubes(cube, outfile.replace("{{}}", "{}"))
else:
iris.save(cube, outfile)
def get_output_file(self, cube=None, time=None, outbase=None, daily=False, monthly=False, three_hourly=False,
pdrmip_format=False, output_monthly=False):
"""
Write the variable to a single file
:param iris.cube.Cube cube: The data cube to be output
:param str time: The time period to use for variables that don't have a unique one
:param str outbase: The output filename base: aerocom3_<!ModelName>_<!ExperimentName>
:param bool daily: Assume daily output
:param bool monthly: Assume monthly output
:param bool three_hourly: Assume three_hourly output
:param bool pdrmip_format: Construct filename in the pdrmip way?
"""
import os.path
# Figure out the vertical coordinate type
if self.vertical_coord_type is not None:
vert_coord = self.vertical_coord_type
else:
vert_coord = cube.coords(axis="Z", dim_coords=True)
if len(vert_coord) == 0:
vert_coord = cube.coords(axis="Z")
print("vert_coord: %s" % (", ".join(c.name() for c in vert_coord)))
if len(vert_coord) == 0:
if cube.standard_name and cube.standard_name.startswith("atmosphere_boundary_layer_"):
vert_coord = "Surface"
elif cube.standard_name and cube.standard_name.startswith("atmosphere_"):
vert_coord = "Column"
elif cube.standard_name and cube.standard_name.startswith("surface_"):
vert_coord = "Surface"
else:
raise ValueError("Unknown vertical coordinate type for %s" % self.cmor_var_name)
elif len(vert_coord) == 1:
vert_coord, = vert_coord
if vert_coord.name() in ["model_level_number",
"atmosphere_hybrid_height_coordinate",
"atmosphere_hybrid_sigma_pressure_coordinate",
"hybrid level at layer midpoints"]:
vert_coord = "ModelLevel"
elif vert_coord.standard_name and vert_coord.standard_name == 'air_pressure':
vert_coord = "PressureLevel"
else:
raise ValueError(
"Unknown vertical coordinate type (%s) for %s" % (vert_coord.name(), self.cmor_var_name))
else:
raise ValueError("Multiple vertical coordinates (%s) for %s" % (
", ".join(c.name() for c in vert_coord), self.cmor_var_name))
print("vert_coord guessed: %s" % vert_coord)
if time and (daily or monthly or three_hourly):
time_period = time
if daily:
time_freq = 'day' if pdrmip_format else "daily"
if monthly:
time_freq = "Amon" if pdrmip_format else "monthly"
if three_hourly:
time_freq = '3hourly'
else:
# Figure out the time period and frequency
# TODO This doesn't actually shift the time reference, but Nick's script converts to 1850-01-01,00:00:00
time_coord = cube.coords('time')
print("time_coord: %s " % (", ".join(c.name() for c in time_coord)))
if len(time_coord) == 1:
time_coord, = time_coord
time_period = np.unique([("%04d%02d%02d" % (d.year, d.month, d.day) if daily
else "%04d%02d" % (d.year, d.month) if monthly else "%04d" % d.year)
for d in time_coord.units.num2date(time_coord.points)])
if len(time_period) == 0:
time_period = time if time is not None else "9999"
elif len(time_period) > 1:
if time is not None:
time_period = time
else:
raise ValueError("Multiple time periods (%s-%s) for %s" % (
min(time_period), max(time_period), self.cmor_var_name))
else:
time_period, = time_period
time_freq = get_time_freq(time_coord, pdrmip_format)
elif len(time_coord) == 0:
time_period = time if time else "9999"
time_freq = 'fx' if pdrmip_format else 'timeinvariant'
else:
raise ValueError("Multiple time coordinates (%s) for %s" % (", ".join(time_coord), self.cmor_var_name))
#print("time_period: %s" % time_period)
#print("time_coord: %s" % time_freq)
if pdrmip_format:
output_template = "{var}_{freq}_{model_exp}_{period}.nc"
elif output_monthly:
# Create an extra set of curly braces for the specific month. but escape them for the first format call
output_template = "{model_exp}_{var}_{vert}_{{}}_{freq}.nc"
else:
output_template = "{model_exp}_{var}_{vert}_{period}_{freq}.nc"
output_path, model_exp = os.path.split(outbase)
outfile = os.path.join(output_path, output_template.format(model_exp=model_exp, var=self.cmor_var_name,
vert=vert_coord, period=time_period,
freq=time_freq))
return outfile
| 46.716846 | 119 | 0.618229 | 1,700 | 13,034 | 4.550588 | 0.192353 | 0.026758 | 0.018485 | 0.01939 | 0.295372 | 0.238108 | 0.204628 | 0.170243 | 0.139736 | 0.112203 | 0 | 0.008561 | 0.292006 | 13,034 | 278 | 120 | 46.884892 | 0.829757 | 0.235154 | 0 | 0.178378 | 0 | 0 | 0.111848 | 0.022991 | 0 | 0 | 0 | 0.003597 | 0 | 1 | 0.054054 | false | 0 | 0.054054 | 0 | 0.140541 | 0.032432 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b286e35d688b950f23fdbcc3aeed4741b7fec7f | 1,381 | py | Python | hostmanager/tomato/lib/newcmd/tcpserver.py | dswd/ToMaTo | 355fd3a8c7f95dc72c62383b3edfa8f6c0396bf4 | [
"BSD-4-Clause-UC"
] | 2 | 2016-11-10T06:12:05.000Z | 2016-11-10T06:12:10.000Z | hostmanager/tomato/lib/newcmd/tcpserver.py | dswd/ToMaTo | 355fd3a8c7f95dc72c62383b3edfa8f6c0396bf4 | [
"BSD-4-Clause-UC"
] | 2 | 2015-01-19T16:00:24.000Z | 2015-01-20T11:33:56.000Z | hostmanager/tomato/lib/newcmd/tcpserver.py | dswd/ToMaTo | 355fd3a8c7f95dc72c62383b3edfa8f6c0396bf4 | [
"BSD-4-Clause-UC"
] | 1 | 2016-11-10T06:12:15.000Z | 2016-11-10T06:12:15.000Z | from . import Error, netstat, SUPPORT_CHECK_PERIOD
from util import spawnDaemon, params, proc, wait, cmd, cache
class TcpserverError(Error):
CODE_UNKNOWN="tcpserver.unknown"
CODE_UNSUPPORTED="tcpserver.unsupported"
CODE_PORT_USED="tcpserver.port_used"
CODE_STILL_RUNNING="tcpserver.still_running"
@cache.cached(timeout=SUPPORT_CHECK_PERIOD)
def _check():
TcpserverError.check(cmd.exists("tcpserver"), TcpserverError.CODE_UNSUPPORTED, "Binary tcpserver does not exist")
return True
def _public(method):
def call(*args, **kwargs):
_check()
return method(*args, **kwargs)
call.__name__ = method.__name__
call.__doc__ = method.__doc__
return call
######################
### Public methods ###
######################
def checkSupport():
return _check()
@_public
def start(port, command):
port = params.convert(port, convert=int, gte=1, lt=2**16)
command = params.convert(command, convert=list)
netstat.checkPortFree(port, tcp=True, ipv4=True)
pid = spawnDaemon(["tcpserver", "-qHRl", "0", "0", str(port)] + command)
try:
wait.waitFor(lambda :netstat.isPortUsedBy(port, pid), failCond=lambda :not proc.isAlive(pid))
return pid
except wait.WaitError:
proc.autoKill(pid, group=True)
raise
@_public
def stop(pid):
proc.autoKill(pid, group=True)
TcpserverError.check(not proc.isAlive(pid), TcpserverError.CODE_STILL_RUNNING, "Failed to stop tcpserver") | 30.021739 | 114 | 0.732078 | 176 | 1,381 | 5.534091 | 0.420455 | 0.036961 | 0.036961 | 0.034908 | 0.049281 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005686 | 0.108617 | 1,381 | 46 | 115 | 30.021739 | 0.78554 | 0.010138 | 0 | 0.111111 | 0 | 0 | 0.121581 | 0.033435 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.055556 | 0.027778 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b297a0e675e6ac79f6a293ab66265114ba2620d | 4,860 | py | Python | p3_collab-compet/maddpg_agent.py | Bhuvans/Udacity_DeepRL_ND | cb6280aea21322e711c40c1521b96cb940cfa1f5 | [
"MIT"
] | null | null | null | p3_collab-compet/maddpg_agent.py | Bhuvans/Udacity_DeepRL_ND | cb6280aea21322e711c40c1521b96cb940cfa1f5 | [
"MIT"
] | null | null | null | p3_collab-compet/maddpg_agent.py | Bhuvans/Udacity_DeepRL_ND | cb6280aea21322e711c40c1521b96cb940cfa1f5 | [
"MIT"
] | null | null | null | import copy
import torch
from model import Actor, Critic
from torch.optim import Adam
import torch.nn.functional as F
import numpy as np
WEIGHT_DECAY = 0.
ACTOR_LR = 1e-4
CRITIC_LR = 5e-3
GAMMA = 0.995
TAU = 0.001
SEED = 0
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class OUNoise:
""" Ornstein-Uhlenbeck process. Adds noise to the deterministic action from the actor network. Contributes to exploration. Any other reason you would want such a noise added to the action ? """
def __init__(self, size, mu=0., theta=0.15, sigma=0.2):
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.reset()
def reset(self):
self.state = copy.copy(self.mu)
def sample(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([np.random.randn() for i in range(len(x))])
self.state = x + dx
return self.state
def decode(size, agent_id, arr):
list_indices = torch.tensor([idx for idx in range(agent_id*size, agent_id*size+size)]).to(device)
return arr.index_select(1, list_indices)
class Agent:
""" Interacts with and learns from the environment. """
def __init__(self, agent_id, num_agents, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.agent_id = agent_id
self.actor_local = Actor(state_size, action_size, SEED).to(device)
self.actor_target = Actor(state_size, action_size, SEED).to(device)
self.actor_optimizer = Adam(self.actor_local.parameters(), lr = ACTOR_LR)
self.critic_local = Critic(state_size*num_agents, action_size*num_agents, SEED).to(device)
self.critic_target = Critic(state_size*num_agents, action_size*num_agents, SEED).to(device)
self.critic_optimizer = Adam(self.critic_local.parameters(), lr = CRITIC_LR, weight_decay = WEIGHT_DECAY)
self.noise = OUNoise(action_size)
self.t_step = 0
def act(self, state, add_noise=True):
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state.unsqueeze(0)).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, other_agents, gamma=GAMMA):
states, actions, rewards, next_states, dones = experiences
# update critic
own_states = decode(self.state_size, self.agent_id, states)
other_states = decode(self.state_size, 1-self.agent_id, states)
own_actions = decode(self.action_size, self.agent_id, actions)
other_actions = decode(self.action_size, 1-self.agent_id, actions)
own_next_states = decode(self.state_size, self.agent_id, next_states)
other_next_states = decode(self.state_size, 1-self.agent_id, next_states)
all_states = torch.cat((own_states, other_states), 1)
all_actions = torch.cat((own_actions, other_actions), 1)
all_next_states = torch.cat((own_next_states, other_next_states), 1)
own_next_actions = self.actor_target(own_next_states)
other_next_actions = other_agents[0].actor_target(other_next_states)
actions_next = torch.cat((own_next_actions, other_next_actions), 1)
Q_targets_next = self.critic_target(all_next_states, actions_next)
r = torch.unsqueeze(rewards[:, self.agent_id], 1)
do = torch.unsqueeze(dones[:, self.agent_id], 1)
Q_targets = r + ((gamma*Q_targets_next)*(1-do))
Q_expected = self.critic_local(all_states, all_actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
actions_pred_self = self.actor_local(own_states)
actions_pred_other = other_actions # self.actor_local(other_states).detach()
actions_pred = torch.cat((actions_pred_self, actions_pred_other), 1)
actor_loss = -self.critic_local(all_states, actions_pred).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
| 41.896552 | 197 | 0.66358 | 681 | 4,860 | 4.478708 | 0.209985 | 0.032131 | 0.036066 | 0.019672 | 0.195082 | 0.122623 | 0.12 | 0.12 | 0.096393 | 0.072131 | 0 | 0.010672 | 0.228807 | 4,860 | 116 | 198 | 41.896552 | 0.803095 | 0.059671 | 0 | 0.022727 | 0 | 0 | 0.001975 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102273 | false | 0 | 0.068182 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b299eae64eba561da96f012fa6592a8648e7f1c | 1,600 | py | Python | src/graph_transpiler/webdnn/backend/webgl/kernels/depth2space.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | src/graph_transpiler/webdnn/backend/webgl/kernels/depth2space.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/backend/webgl/kernels/depth2space.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | from typing import List
from webdnn.backend.webgl.attributes.channel_mode import ChannelMode, ChannelModeEnum
from webdnn.backend.webgl.generator import WebGLDescriptorGenerator
from webdnn.backend.webgl.kernel import Kernel
from webdnn.backend.webgl.kernel_code import KernelCode
from webdnn.backend.webgl.kernels.util import texel_fetch, get_output_position, change_order
from webdnn.graph.axis import Axis
from webdnn.graph.operators.depth2space import Depth2Space
from webdnn.graph.order import OrderNHWC
@WebGLDescriptorGenerator.register_handler(Depth2Space)
def depth2space(op: Depth2Space) -> List[Kernel]:
x = op.inputs["x"]
y = op.outputs["y"]
r = op.parameters['r']
C2 = y.shape_dict[Axis.C]
assert x.order.check_same_axes(OrderNHWC)
assert y.order.check_same_axes(OrderNHWC)
assert ChannelMode.get(x) == ChannelModeEnum.R
assert ChannelMode.get(y) == ChannelModeEnum.R
code = KernelCode(["""
void main() {
ivec4 variable_position_y = """, change_order(get_output_position(y), y.order, OrderNHWC), f""";
int n = variable_position_y.x;
int h2 = variable_position_y.y;
int w2 = variable_position_y.z;
int c2 = variable_position_y.w;
int h1 = h2 / {r};
int w1 = w2 / {r};
int c1 = c2 + (w2-w1*{r})*{C2} + (h2-h1*{r})*{C2}*{r};
gl_FragColor.r = """, texel_fetch(x, change_order("vec4(n, h1, w1, c1)", OrderNHWC, x.order)), """.r;
}
"""], name=op.__class__.__name__)
source = code.generate()
return [Kernel(
source,
code.name,
code.samplers,
code.uniforms,
y
)]
| 32.653061 | 105 | 0.698125 | 219 | 1,600 | 4.936073 | 0.333333 | 0.074006 | 0.078631 | 0.101758 | 0.112858 | 0.061055 | 0 | 0 | 0 | 0 | 0 | 0.019637 | 0.1725 | 1,600 | 48 | 106 | 33.333333 | 0.796828 | 0 | 0 | 0 | 0 | 0.025 | 0.218125 | 0.068125 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.025 | false | 0 | 0.225 | 0 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b2a64f430c0f74642dd4bde99ac074c24f35bef | 4,337 | py | Python | com/radityalabs/Python/preprocessing/pre1.py | radityagumay/BenchmarkSentimentAnalysis_2 | e509a4e749271da701ce9da1d9f16cdd78dcdf40 | [
"Apache-2.0"
] | 3 | 2017-06-08T01:17:55.000Z | 2019-06-02T10:52:36.000Z | com/radityalabs/Python/preprocessing/pre1.py | radityagumay/BenchmarkSentimentAnalysis_2 | e509a4e749271da701ce9da1d9f16cdd78dcdf40 | [
"Apache-2.0"
] | null | null | null | com/radityalabs/Python/preprocessing/pre1.py | radityagumay/BenchmarkSentimentAnalysis_2 | e509a4e749271da701ce9da1d9f16cdd78dcdf40 | [
"Apache-2.0"
] | null | null | null | # http://stevenloria.com/how-to-build-a-text-classification-system-with-python-and-textblob/
# http://tech.thejoestory.com/2015/01/python-textblob-sentiment-analysis.html
from textblob.classifiers import NaiveBayesClassifier
from textblob.sentiments import NaiveBayesAnalyzer
from sklearn.externals import joblib
from textblob import TextBlob
import _pickle as cPickle
import os
import pymysql
import asyncio
# variables
path = os.path.expanduser("~/Python/SamplePython3/com/radityalabs/")
items_total = 39227
items_train_count = 23536
items_test_count = 15691
def connection():
conn = pymysql.connect(
host='127.0.0.1',
user='root', passwd='',
db='sentiment_analysis')
cursor = conn.cursor()
return cursor, conn
def query(cursor):
return cursor.execute("SELECT reviewBody, label FROM sentiment_analysis.review_label_benchmark_with_polarity where length(reviewBody) > 30 and (label = 'pos' or label = 'neg') limit 0, 39227")
def close_connection(cursor, conn):
conn.close()
cursor.close
def end_word_extractor(document):
tokens = document.split()
first_word, last_word = tokens[0], tokens[-1]
feats = {}
feats["first({0})".format(first_word)] = True
feats["last({0})".format(last_word)] = False
return feats
def sentence():
return "This app is never good enough"
def train_and_test(train, test):
save_train(train)
save_test(test)
cl = NaiveBayesClassifier(train, feature_extractor=end_word_extractor)
blob = TextBlob(sentence(), classifier=cl)
print(sentence() + " label : ", blob.classify())
print("polarity", blob.sentiment.polarity) # polarity and subjectivity
print("subjectivity", blob.sentiment.subjectivity)
## calc neg and pos
sentiment = TextBlob(sentence(), classifier=cl, analyzer=NaiveBayesAnalyzer())
print("positive", sentiment.sentiment.p_pos)
print("negative", sentiment.sentiment.p_neg)
print("Accuracy: {0}".format(cl.accuracy(test)))
def save_train(train):
with open(path + "/Python/bimbingan_data/twitter_train_23536_1.pickle", "wb") as handle:
cPickle.dump(train, handle)
print("saving train data's is done")
def save_test(test):
with open(path + "/Python/bimbingan_data/twitter_test_15691_1.pickle", "wb") as handle:
cPickle.dump(test, handle)
print("saving test data's is done")
def run_me():
cursor, conn = connection()
query(cursor)
datas = []
for data in cursor:
datas.append((data[0], data[1]))
train = datas[:23536]
test = datas[23536:]
train_and_test(train, test)
close_connection(cursor, conn)
if __name__ == '__main__':
run_me()
# train
# @asyncio.coroutine
# def load_train():
# train = []
# for item in cursor:
# train.append((item[0], item[1]))
# return train
# train = asyncio.get_event_loop().run_until_complete(load_train())
# print(train)
#
# # test
# cursor.execute("SELECT reviewBody, label FROM google_play_crawler.authors17 where length(reviewBody) > 30 and (label = 'pos' or label = 'neg') limit 0, 2317")
# @asyncio.coroutine
# def load_test():
# test = []
# for item in cursor:
# test.append((item[0], item[1]))
# return test
# test = asyncio.get_event_loop().run_until_complete(load_test())
# print(test)
#
# @asyncio.coroutine
# def save_train():
# with open(path + "/Python/bimbingan_data/train_twitter_corpus_34636_1.pickle", "wb") as handle:
# cPickle.dump(train, handle)
# print("Saving train is done")
#
# @asyncio.coroutine
# def save_test():
# with open(path + "/Python/bimbingan_data/train_twitter_corpus_2317_1.pickle", "wb") as handle:
# cPickle.dump(test, handle)
# print("Saving test is done")
#
# asyncio.get_event_loop().run_until_complete(save_train())
# asyncio.get_event_loop().run_until_complete(save_test())
#
# cursor.close()
# connection.close()
#
# def end_word_extractor(document):
# tokens = document.split()
# first_word, last_word = tokens[0], tokens[-1]
# feats = {}
# feats["first({0})".format(first_word)] = True
# feats["last({0})".format(last_word)] = False
# return feats
#
# cl = NaiveBayesClassifier(train, feature_extractor=end_word_extractor)
# joblib.dump(cl, path + '/Python/bimbingan_data/sklearn-joblib-train-twitter-1.pkl') | 32.856061 | 196 | 0.694489 | 566 | 4,337 | 5.150177 | 0.259717 | 0.017153 | 0.03259 | 0.039451 | 0.444597 | 0.420583 | 0.379417 | 0.34717 | 0.249743 | 0.216124 | 0 | 0.026003 | 0.166475 | 4,337 | 132 | 197 | 32.856061 | 0.78036 | 0.4095 | 0 | 0 | 0 | 0.015385 | 0.203194 | 0.077844 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138462 | false | 0.015385 | 0.123077 | 0.030769 | 0.323077 | 0.123077 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b2bab7140532e0dbb277efecde6822b98a6b8f8 | 2,472 | py | Python | universal_landmark_detection/model/networks/globalNet.py | ICT-MIRACLE-lab/YOLO_Universal_Anatomical_Landmark_Detection | 465a3d6afcdb23fdec609efe336beebdc9ed61f4 | [
"MIT"
] | 14 | 2021-07-06T01:54:41.000Z | 2021-12-07T01:31:01.000Z | universal_landmark_detection/model/networks/globalNet.py | xtyawesome/YOLO_Universal_Anatomical_Landmark_Detection | 465a3d6afcdb23fdec609efe336beebdc9ed61f4 | [
"MIT"
] | 3 | 2022-01-01T08:41:29.000Z | 2022-03-29T18:23:40.000Z | universal_landmark_detection/model/networks/globalNet.py | xtyawesome/YOLO_Universal_Anatomical_Landmark_Detection | 465a3d6afcdb23fdec609efe336beebdc9ed61f4 | [
"MIT"
] | 5 | 2021-09-07T12:15:06.000Z | 2021-11-30T12:36:37.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
class myConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1):
super(myConv2d, self).__init__()
padding = (kernel_size-1)//2
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, padding=padding)
def forward(self, x):
return self.conv(x)
class dilatedConv(nn.Module):
''' stride == 1 '''
def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1):
super(dilatedConv, self).__init__()
# f = (kernel_size-1) * d +1
# new_width = (width - f + 2 * padding)/stride + stride
padding = (kernel_size-1) * dilation // 2
self.conv = nn.Conv2d(in_channels, out_channels,
kernel_size, dilation=dilation, padding=padding)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
class GlobalNet(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=0.25, kernel_size=3, dilations=None):
super(GlobalNet, self).__init__()
self.scale_factor = scale_factor
if not isinstance(in_channels, list):
in_channels = [in_channels]
if not isinstance(out_channels, list):
out_channels = [out_channels]
mid_channels = 128
if dilations is None:
dilations = [1, 2, 5]
for i, n_chan in enumerate(in_channels):
setattr(self, 'in{i}'.format(i=i),
myConv2d(n_chan, mid_channels, 3))
for i, n_chan in enumerate(out_channels):
setattr(self, 'out{i}'.format(i=i),
myConv2d(mid_channels, n_chan, 1))
convs = [dilatedConv(mid_channels, mid_channels,
kernel_size, dilation) for dilation in dilations]
convs = nn.Sequential(*convs)
setattr(self, 'convs{}'.format(i), convs)
def forward(self, x, task_idx=0):
size = x.size()[2:]
sf = self.scale_factor
x = F.interpolate(x, scale_factor=sf)
x = getattr(self, 'in{}'.format(task_idx))(x)
x = getattr(self, 'convs{}'.format(task_idx))(x)
x = getattr(self, 'out{}'.format(task_idx))(x)
x = F.interpolate(x, size=size)
return {'output': torch.sigmoid(x)}
| 38.030769 | 100 | 0.594256 | 320 | 2,472 | 4.365625 | 0.215625 | 0.07874 | 0.081603 | 0.075161 | 0.299928 | 0.264853 | 0.200429 | 0.163207 | 0.163207 | 0.068719 | 0 | 0.017978 | 0.279935 | 2,472 | 64 | 101 | 38.625 | 0.766854 | 0.038026 | 0 | 0.078431 | 0 | 0 | 0.016878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0.039216 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b2deec39ba465ca1e637c5b0ddec244e546a9a8 | 8,419 | py | Python | learning_journal/tests.py | nosrac77/pyramid-learning-journal | 2e158ab01a7f4b6b978ac0d3f0695df73570dd2f | [
"MIT"
] | null | null | null | learning_journal/tests.py | nosrac77/pyramid-learning-journal | 2e158ab01a7f4b6b978ac0d3f0695df73570dd2f | [
"MIT"
] | 3 | 2019-12-26T16:38:52.000Z | 2021-06-01T21:52:31.000Z | learning_journal/tests.py | nosrac77/pyramid-learning-journal | 2e158ab01a7f4b6b978ac0d3f0695df73570dd2f | [
"MIT"
] | null | null | null | """Functions that test server functions."""
import pytest
from pyramid import testing
# import transaction
from learning_journal.models import (
Entry,
# get_tm_session
)
from learning_journal.models.meta import Base
@pytest.fixture
def configuration(request):
"""Set up a Configurator instance.
This Configurator instance sets up a pointer to the location of the
database.
It also includes the models from your app's model package.
Finally it tears everything down, including the in-memory SQLite database.
This configuration will persist for the entire duration of your PyTest run.
"""
config = testing.setUp(settings={
'sqlalchemy.url': 'postgres://postgres:Skrillexfan7@localhost:5432/test_db'
})
config.include("learning_journal.models")
def teardown():
testing.tearDown()
request.addfinalizer(teardown)
return config
@pytest.fixture
def db_session(configuration, request):
"""Create a session for interacting with the test database.
This uses the dbsession_factory on the configurator instance to create a
new database session. It binds that session to the available engine
and returns a new session for every call of the dummy_request object.
"""
SessionFactory = configuration.registry["dbsession_factory"]
session = SessionFactory()
engine = session.bind
Base.metadata.create_all(engine)
def teardown():
session.transaction.rollback()
Base.metadata.drop_all(engine)
request.addfinalizer(teardown)
return session
@pytest.fixture
def dummy_request(db_session):
"""Instantiate a fake HTTP Request, complete with a database session.
This is a function-level fixture, so every new request will have a
new database session.
"""
return testing.DummyRequest(dbsession=db_session)
@pytest.fixture
def testapp():
"""Create an instance of our app for testing."""
from webtest import TestApp
from pyramid.config import Configurator
def main():
""" This function returns a Pyramid WSGI application.
"""
config = Configurator()
config.include('pyramid_jinja2')
config.include('.routes')
config.scan()
return config.make_wsgi_app()
app = main()
return TestApp(app)
def test_list_view_returns_html(dummy_request):
"""Function to test if list_view returns proper list of dictionaries."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
assert isinstance(response, dict)
def test_create_view_returns_title(dummy_request):
"""Update view response has file content."""
from learning_journal.views.default import create_view
request = dummy_request
response = create_view(request)
assert response['title'] == 'Create'
def test_update_view_returns_title(dummy_request):
"""Update view response has file content."""
from learning_journal.views.default import update_view
dummy_request.matchdict['id'] = 1
request = dummy_request
response = update_view(request)
assert response['title'] == 'Update'
def test_response_is_instance_of_dict(dummy_request):
"""Function that tests database gets populated with model object."""
from learning_journal.views.default import list_view
new_entry = Entry(
title='Learning Journal Fun Times',
body='Today I learned all of the things',
creation_date='November 2nd, 2017 7:47pm'
)
dummy_request.dbsession.add(new_entry)
dummy_request.dbsession.commit()
response = list_view(dummy_request)
assert isinstance(response, dict)
def test_model_gets_added_to_test_database(db_session):
assert len(db_session.query(Entry).all()) == 0
model = Entry(
title='Learning Journal Fun Times',
body='Today I learned all of the things',
creation_date='November 2nd, 2017 7:47pm'
)
db_session.add(model)
assert len(db_session.query(Entry).all()) == 1
def test_list_view_returns_correct_size_of_test_database(dummy_request):
"""Home view response matches database count."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
query = dummy_request.dbsession.query(Entry)
assert len(response['entries']) == query.count()
def test_list_view_returns_empty_when_test_database_is_empty(dummy_request):
"""List view returns nothing when there is no data."""
from learning_journal.views.default import list_view
response = list_view(dummy_request)
assert len(response['entries']) == 0
def test_detail_view_return_Entry_instance_and_values(dummy_request):
"""Update view response has file content."""
from learning_journal.views.default import detail_view
new_entry = Entry(
title='Test',
creation_date='01/23/45',
body='Test should pass!'
)
dummy_request.dbsession.add(new_entry)
dummy_request.matchdict['id'] = 1
request = dummy_request
response = detail_view(request)
assert str(response['post']) == '<Entry: {}>.format(self.title)'
assert response['post'].title == 'Test'
assert response['post'].creation_date == '01/23/45'
assert response['post'].body == 'Test should pass!'
def test_detail_view_return_Entry_instance_and_vals_of_correct_model_id(dummy_request):
"""Update view response has file content."""
from learning_journal.views.default import detail_view
first_entry = Entry(
title='Test',
creation_date='01/23/45',
body='Test should pass!'
)
second_entry = Entry(
title='Test 2',
creation_date='99/99/99',
body='This entry is different!'
)
entries = [first_entry, second_entry]
dummy_request.dbsession.add_all(entries)
dummy_request.matchdict['id'] = 2
request = dummy_request
response = detail_view(request)
assert len(dummy_request.dbsession.query(Entry).all()) == 2
assert str(response['post']) == '<Entry: {}>.format(self.title)'
assert response['post'].title == 'Test 2'
assert response['post'].creation_date == '99/99/99'
assert response['post'].body == 'This entry is different!'
def test_list_view_return_Entry_instance_and_only_two_values(dummy_request):
"""Update view response has file content."""
from learning_journal.views.default import list_view
new_entry = Entry(
title='Test',
creation_date='01/23/45'
)
dummy_request.dbsession.add(new_entry)
request = dummy_request
response = list_view(request)
assert 'body' not in response['entries']
assert str(response['entries']) == '[<Entry: {}>.format(self.title)]'
assert response['entries'][0].title == 'Test'
assert response['entries'][0].creation_date == '01/23/45'
def test_create_view_post_empty_is_empty_dict(dummy_request):
"""POST requests without data should return an empty dictionary."""
from learning_journal.views.default import create_view
dummy_request.method = "POST"
response = create_view(dummy_request)
assert response == {}
def test_update_view_replaces_existing_Entry_and_doesnt_alter_list_len(dummy_request):
"""Update view response has file content."""
from learning_journal.views.default import update_view, detail_view
assert len(dummy_request.dbsession.query(Entry).all()) == 0
first_entry = Entry(
title='Test',
creation_date='01/23/45',
body='Test should pass!'
)
dummy_request.dbsession.add(first_entry)
dummy_request.matchdict['id'] = 1
request = dummy_request
prev_entry = detail_view(request)
assert len(dummy_request.dbsession.query(Entry).all()) == 1
assert prev_entry['post'].title == 'Test'
assert prev_entry['post'].id == 1
dummy_request.method = 'POST'
update_entry = {
"title": 'Updated',
"creation_date": '99/99/99',
"body": 'This entry should replace the other one!'
}
dummy_request.POST = update_entry
update_view(dummy_request)
response = dummy_request.dbsession.query(Entry).get(1)
assert len(dummy_request.dbsession.query(Entry).all()) == 1
assert str(response) == '<Entry: {}>.format(self.title)'
assert response.title == 'Updated'
assert response.creation_date == '99/99/99'
assert response.body == 'This entry should replace the other one!'
| 34.363265 | 87 | 0.706616 | 1,087 | 8,419 | 5.275989 | 0.188592 | 0.092066 | 0.043069 | 0.046033 | 0.491543 | 0.435571 | 0.418134 | 0.396164 | 0.327812 | 0.300785 | 0 | 0.014501 | 0.189096 | 8,419 | 244 | 88 | 34.504098 | 0.825546 | 0.171398 | 0 | 0.364706 | 0 | 0 | 0.131128 | 0.02444 | 0 | 0 | 0 | 0 | 0.182353 | 1 | 0.111765 | false | 0.023529 | 0.1 | 0 | 0.241176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b2df1a8f85d4db15e3b2d21e28748dcd104e396 | 4,679 | py | Python | vehicle.py | syeda27/MonoRARP | 71415d9fc71bc636ac1f5de1a90f033b4e519538 | [
"MIT"
] | 2 | 2020-07-22T07:05:01.000Z | 2021-11-27T13:28:03.000Z | vehicle.py | syeda27/MonoRARP | 71415d9fc71bc636ac1f5de1a90f033b4e519538 | [
"MIT"
] | null | null | null | vehicle.py | syeda27/MonoRARP | 71415d9fc71bc636ac1f5de1a90f033b4e519538 | [
"MIT"
] | null | null | null | import driver_models
import time
"""
A vehicle class to encapsulate features and driver models
Everything is relative to the ego vehicle.
Assume the ego vehicle is at (0,0) position, so rel_x == x
"""
class Vehicle:
veh_id = "I do not exist muahahahah"
rel_x = 0 # lateral distance to ego car
rel_y = 0 # longitudinal distance to ego car
rel_vx = 0 # relative lateral velocity
rel_vy = 0 # relative longitudinal velocity
rel_ax = 0 # the above 2 but for acceleration
rel_ay = 0
lateral_distance = 0 # to track lane change progress
longitudinal_model = None # will be IDM, but need parameters
lateral_model = None # will be MOBIL, but need parameters
# this state dict is for this car
def __init__(self,
veh_id,
state_dict,
des_v=120,
hdwy_t=1.5,
min_gap=2.0,
accel=0.3,
deccel=3.0,
p=0.2,
b_safe=3,
a_thr=0.2,
delta_b=0):
self.veh_id = veh_id
self.set_values(state_dict)
self.longitudinal_model = driver_models.IDM_Model(
des_v, hdwy_t, min_gap, accel, deccel)
self.lateral_model = driver_models.MOBIL_Model(
p, b_safe, a_thr, delta_b)
def set_values(self, state_dict):
if "distance_x" in state_dict:
self.rel_x = state_dict['distance_x']
if "distance_y" in state_dict:
self.rel_y = state_dict['distance_y']
if "speed_x" in state_dict:
self.rel_vx = state_dict['speed_x']
if "speed_y" in state_dict:
self.rel_vy = state_dict['speed_y']
if "accel_x" in state_dict:
self.rel_ax = state_dict['accel_x']
if "accel_y" in state_dict:
self.rel_ay = state_dict['accel_y']
def get_lateral_accel(self, fore_vehicle, scene, step=0.2):
if self.lateral_distance > 0: # in a lane change maneuver
if self.lateral_distance >= scene.lane_width_m:
# lane change is done, stop changing lanes
self.lateral_distance = 0
return -(self.rel_vx + scene.ego_vel[0]) / step
else:
# still changing lanes
return 0
# see if we want to change lanes
# check move right
back_vehicle_right = scene.get_back_vehicle_right(scene.scene, self)
backs_fore_vehicle_right = scene.get_fore_vehicle(scene.scene, back_vehicle_right)
change_lanes = self.lateral_model.propagate(self, fore_vehicle,
back_vehicle_right, backs_fore_vehicle_right,
scene.ego_vel[1], step)
if change_lanes > 0:
return change_lanes
# check move left
back_vehicle_left = scene.get_back_vehicle_left(\
scene.scene, self)
backs_fore_vehicle_left = scene.get_fore_vehicle(\
scene.scene, back_vehicle_left)
change_lanes = self.lateral_model.propagate(self, fore_vehicle,
back_vehicle_left, backs_fore_vehicle_left,
scene.ego_vel[1], step)
return change_lanes
"""
wrapper around both longitudinal and lateral accel
for lateral accel, we are accelerating without regard to physical
limitations, in order to move lanes in 1 step.
"""
def get_action(self, scene, step=0.2, profile=False):
if profile:
start = time.time()
fore_vehicle = scene.get_fore_vehicle(scene.scene, self)
if profile:
get_fore_vehicle_time = time.time() - start
lateral_accel = self.get_lateral_accel(fore_vehicle, scene, step)
if profile:
get_lat_accel_time = time.time() - (start + get_fore_vehicle_time)
gap_y = fore_vehicle.rel_y - self.rel_y
if gap_y <= 0:
return (0,0) # don't react
vy = self.rel_vy + scene.ego_vel[1]
longitudinal_accel = self.longitudinal_model.propagate(\
vy, # own speed, absolute
gap_y, # fore gap
self.rel_vy - fore_vehicle.rel_vy) # positive when approaching
if profile:
prop_time = time.time() - (start + get_fore_vehicle_time + get_lat_accel_time)
print("GetFore 1 vehicle took: {}".format(get_fore_vehicle_time))
print("GetLatAceel 1 vehicle took: {}".format(get_lat_accel_time))
print("PropLongA 1 vehicle took: {}".format(prop_time))
return (lateral_accel, longitudinal_accel)
| 39.991453 | 90 | 0.601197 | 625 | 4,679 | 4.224 | 0.2224 | 0.075 | 0.03447 | 0.034091 | 0.256061 | 0.180682 | 0.103788 | 0.103788 | 0.04697 | 0.04697 | 0 | 0.014493 | 0.32165 | 4,679 | 116 | 91 | 40.336207 | 0.817265 | 0.106647 | 0 | 0.11236 | 0 | 0 | 0.053891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044944 | false | 0 | 0.022472 | 0 | 0.258427 | 0.033708 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b31c207b505a1cf37fe780c0701b0448002e991 | 3,263 | py | Python | src/misc/elo_classifier.py | Brunopaes/python-sandbox | c51f716785bf11789b13f9bb24bfd76108eaec08 | [
"MIT"
] | 5 | 2020-08-04T00:13:06.000Z | 2022-03-18T09:18:29.000Z | src/misc/elo_classifier.py | Brunopaes/python-sandbox | c51f716785bf11789b13f9bb24bfd76108eaec08 | [
"MIT"
] | null | null | null | src/misc/elo_classifier.py | Brunopaes/python-sandbox | c51f716785bf11789b13f9bb24bfd76108eaec08 | [
"MIT"
] | 2 | 2020-06-26T13:42:17.000Z | 2020-08-04T00:13:10.000Z | from tkinter.filedialog import askopenfilename
from itertools import permutations
from tkinter import Tk
import datetime
import pandas
import random
import math
import csv
import os
class TopSomething:
def __init__(self):
self.file = self.choose_file()
self.dict = {}
self.keys = []
self.matches = []
self.file_name = 'elo-rating_{}.csv'.format(datetime.date.today())
self.k = 30
@staticmethod
def choose_file(path='./', file_type=(('All files', '*.*'), )):
root = Tk()
root.withdraw()
filename = askopenfilename(initialdir=path,
title='Select file',
filetypes=file_type)
root.destroy()
return filename
def open_file(self):
with open(self.file, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
for row in reader:
self.dict.update({row[0]: [1000.0]})
def generating_matches(self):
self.matches = list(permutations(self.keys, 2))
for i in range(random.randint(10, 100)):
random.shuffle(self.matches)
def running_matches(self):
for match in self.matches:
self.winner(match)
def winner(self, competitors):
phrase = 'Which one is your favorite?\n'\
' 1 - {}\n'\
' 2 - {}\n\n'.format(competitors[0], competitors[1])
while True:
try:
qst = int(input(phrase))
except ValueError:
print('Non Valid Option')
continue
if qst in [1, 2]:
break
print('Non Valid Option!')
self.calculating_elo(int(qst), competitors)
@staticmethod
def calculating_win_probability(rating_1, rating_2):
return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating_1 - rating_2)))
def updating_elo(self, ra, rb, winner_name, loser_name):
self.dict[winner_name] = [ra]
self.dict[loser_name] = [rb]
def calculating_elo(self, d, competitors):
if d == 1:
winner_name = competitors[0]
loser_name = competitors[1]
else:
winner_name = competitors[1]
loser_name = competitors[0]
ra = self.dict.get(winner_name)[0]
rb = self.dict.get(loser_name)[0]
pa = self.calculating_win_probability(ra, rb)
pb = self.calculating_win_probability(rb, ra)
if d == 1:
ra = ra + self.k * (1 - pa)
rb = rb + self.k * (0 - pb)
else:
ra = ra + self.k * (1 - pa)
rb = rb + self.k * (0 - pb)
self.updating_elo(ra, rb, winner_name, loser_name)
def formatting_output(self):
df = pandas.DataFrame(self.dict).T.sort_values(0, ascending=False)
df.columns = ['ELO Rating']
df.to_csv(self.file_name)
os.startfile(self.file_name)
def __call__(self, *args, **kwargs):
self.open_file()
self.keys = list(self.dict.keys())
self.generating_matches()
self.running_matches()
self.formatting_output()
if __name__ == '__main__':
TopSomething().__call__()
| 27.888889 | 80 | 0.554704 | 391 | 3,263 | 4.460358 | 0.314578 | 0.036697 | 0.020642 | 0.021789 | 0.053899 | 0.053899 | 0.027523 | 0.027523 | 0.027523 | 0.027523 | 0 | 0.021769 | 0.324242 | 3,263 | 116 | 81 | 28.12931 | 0.769161 | 0 | 0 | 0.11236 | 0 | 0 | 0.045051 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123596 | false | 0 | 0.101124 | 0.011236 | 0.258427 | 0.022472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b31d2ae67a2555166c6d20e8ec407aebb930728 | 1,542 | py | Python | custom_components/loxone/const.py | JoDehli/PyLoxone_beta | 346f3b1c895dfd384c1203da2624be71a5fd339a | [
"Apache-2.0"
] | null | null | null | custom_components/loxone/const.py | JoDehli/PyLoxone_beta | 346f3b1c895dfd384c1203da2624be71a5fd339a | [
"Apache-2.0"
] | null | null | null | custom_components/loxone/const.py | JoDehli/PyLoxone_beta | 346f3b1c895dfd384c1203da2624be71a5fd339a | [
"Apache-2.0"
] | null | null | null | """
Loxone constants
For more details about this component, please refer to the documentation at
https://github.com/JoDehli/PyLoxone
"""
# Loxone constants
LOXONE_PLATFORMS = [
"sensor",
"switch",
"cover",
"light",
"climate",
"alarm_control_panel",
]
DOMAIN = "loxone"
SENDDOMAIN = "loxone_send"
SECUREDSENDDOMAIN = "loxone_send_secured"
EVENT = "loxone_event"
DEFAULT = ""
DEFAULT_PORT = 8080
DEFAULT_IP = ""
ATTR_UUID = "uuid"
ATTR_UUID = "uuid"
ATTR_VALUE = "value"
ATTR_CODE = "code"
ATTR_COMMAND = "command"
ATTR_AREA_CREATE = "create_areas"
DOMAIN_DEVICES = "devices"
CONF_ACTIONID = "uuidAction"
CONF_SCENE_GEN = "generate_scenes"
CONF_SCENE_GEN_DELAY = "generate_scenes_delay"
CONF_LIGHTCONTROLLER_SUBCONTROLS_GEN = "generate_lightcontroller_subcontrols"
DEFAULT_FORCE_UPDATE = False
SUPPORT_SET_POSITION = 4
SUPPORT_STOP = 8
SUPPORT_OPEN_TILT = 16
SUPPORT_CLOSE_TILT = 32
SUPPORT_STOP_TILT = 64
SUPPORT_SET_TILT_POSITION = 128
DEFAULT_DELAY_SCENE = 3
CONF_HVAC_AUTO_MODE = "hvac_auto_mode"
STATE_ON = "on"
STATE_OFF = "off"
cfmt = """\
( # start of capture group 1
% # literal "%"
(?: # first option
(?:[-+0 #]{0,5}) # optional flags
(?:\d+|\*)? # width
(?:\.(?:\d+|\*))? # precision
(?:h|l|ll|w|I|I32|I64)? # size
[cCdiouxXeEfgGaAnpsSZ] # type
) | # OR
%%)
"""
# End of loxone constants
| 22.676471 | 77 | 0.614786 | 172 | 1,542 | 5.19186 | 0.627907 | 0.050392 | 0.026876 | 0.035834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021164 | 0.264591 | 1,542 | 67 | 78 | 23.014925 | 0.766314 | 0.110895 | 0 | 0.040816 | 0 | 0 | 0.494861 | 0.07489 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3281fcb8e929e42ec3296270fd092d795c1d55 | 1,249 | py | Python | presamples/array.py | tngTUDOR/presamples | beb7ec1720b28bb45f45b4250661c03e94366fbf | [
"BSD-3-Clause"
] | 9 | 2020-03-19T09:52:57.000Z | 2021-08-07T15:30:13.000Z | presamples/array.py | tngTUDOR/presamples | beb7ec1720b28bb45f45b4250661c03e94366fbf | [
"BSD-3-Clause"
] | 18 | 2019-06-12T15:59:51.000Z | 2021-08-30T07:56:38.000Z | presamples/array.py | tngTUDOR/presamples | beb7ec1720b28bb45f45b4250661c03e94366fbf | [
"BSD-3-Clause"
] | 10 | 2019-06-06T14:27:26.000Z | 2022-02-19T14:02:18.000Z | import numpy as np
class RegularPresamplesArrays:
"""A wrapper around a list of memory-mapped Numpy arrays with heterogeneous shapes.
This class provides a simple way to consistently multiple arrays with the same number of columns.
Input arguments:
* ``filepaths``: An iterable of Numpy array filepaths.
"""
def __init__(self, filepaths):
self.count = 0
self.data = [
np.load(str(fp), mmap_mode='r')
for fp in filepaths
]
self.start_indices = np.cumsum([0] + [array.shape[0] for array in self.data])
def sample(self, index):
"""Draw a new sample from the pre-sampled arrays"""
result = np.hstack([arr[:, index] for arr in self.data])
self.count += 1
return result
def translate_row(self, row):
"""Translate row index from concatenated array to (array list index, row modulo)"""
if row < 0:
raise ValueError("Row index must be >= 0")
if row >= self.start_indices[-1]:
raise ValueError("Row index too large")
if row == 0:
return (0, 0)
i = np.searchsorted(self.start_indices, row, side='right') - 1
return (i, row - self.start_indices[i])
| 32.868421 | 101 | 0.606085 | 168 | 1,249 | 4.446429 | 0.470238 | 0.048193 | 0.085676 | 0.06158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012373 | 0.288231 | 1,249 | 37 | 102 | 33.756757 | 0.827897 | 0.301841 | 0 | 0 | 0 | 0 | 0.056086 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.045455 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3361752f448cc2dec14bc08919cc2ba4742b20 | 652 | py | Python | source/machine/adc.py | charkster/u2if | ceab7e038435fc212641ecc35683fd05523dd5cd | [
"BSD-3-Clause"
] | 79 | 2021-04-18T14:03:24.000Z | 2022-03-27T06:03:44.000Z | source/machine/adc.py | charkster/u2if | ceab7e038435fc212641ecc35683fd05523dd5cd | [
"BSD-3-Clause"
] | 10 | 2021-04-21T20:28:42.000Z | 2022-03-19T21:47:15.000Z | source/machine/adc.py | charkster/u2if | ceab7e038435fc212641ecc35683fd05523dd5cd | [
"BSD-3-Clause"
] | 13 | 2021-04-18T14:32:25.000Z | 2022-03-26T01:22:48.000Z | from .u2if import Device
from . import u2if_const as report_const
class ADC:
def __init__(self, pin_id):
self.pin_id = pin_id
self._device = Device()
self._init()
def _init(self):
res = self._device.send_report(bytes([report_const.ADC_INIT_PIN, self.pin_id]))
if res[1] != report_const.OK:
raise RuntimeError("ADC init error.")
def value(self):
res = self._device.send_report(bytes([report_const.ADC_GET_VALUE, self.pin_id]))
if res[1] != report_const.OK:
raise RuntimeError("ADC read error.")
return int.from_bytes(res[3:3+2], byteorder='little')
| 31.047619 | 88 | 0.638037 | 94 | 652 | 4.138298 | 0.340426 | 0.141388 | 0.092545 | 0.087404 | 0.48329 | 0.48329 | 0.48329 | 0.48329 | 0.48329 | 0.48329 | 0 | 0.014141 | 0.240798 | 652 | 20 | 89 | 32.6 | 0.771717 | 0 | 0 | 0.125 | 0 | 0 | 0.055215 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.125 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3601b3e064d66668032210e3f4785a740ab41b | 4,722 | py | Python | ETCetera/Abstractions/Abstraction.py | ggleizer/ETCetera | 8fa9f3c82fd1944507a0c02d52a236244821f3ca | [
"MIT"
] | null | null | null | ETCetera/Abstractions/Abstraction.py | ggleizer/ETCetera | 8fa9f3c82fd1944507a0c02d52a236244821f3ca | [
"MIT"
] | null | null | null | ETCetera/Abstractions/Abstraction.py | ggleizer/ETCetera | 8fa9f3c82fd1944507a0c02d52a236244821f3ca | [
"MIT"
] | 1 | 2022-03-11T11:15:20.000Z | 2022-03-11T11:15:20.000Z | import pickle, json
import os
from abc import ABCMeta, abstractmethod
from functools import cached_property
from config import save_path
from ETCetera.Systems.Automata import TimedAutomaton, Automaton
class Abstraction(metaclass=ABCMeta):
""" Abstract class """
# plant: Plant
# controller: Controller
# ETC: ETC
def __init__(self, *args, **kwargs):
pass
# @abstractmethod
# def generate_regions(self) -> None:
# """
# Constructs a partitioning of the state space of the given ETC,
# depending on the specific algorithm that implements this class.
# """
# raise NotImplementedError
#
# @cached_property
# def regions(self):
# return self.generate_regions()
#
# # @abstractmethod
# # def generate_transitions(self):
#
# @abstractmethod
# def create_abstraction(self, *args, **kwargs) -> None:
# """
# Creates the complete traffic abstraction. If the regions are not
# constructed yet, the method self.construction_regions() must be called
# beforehand.
# """
# raise NotImplementedError
#
# @abstractmethod
# def refine(self) -> None:
# """
# Refines the abstraction is some way. Details will depend on the algorithm
# implementing this class.
# This must be called last in any overwriting methods.
# """
#
# # Invalidate the cached timed automaton
# #Abstraction.timed_automaton.fget.cache_clear()
# # if 'timed_automaton' in self.__dict__:
# # del self.__dict__['timed_automaton']
# pass
""" Methods to create a automata """
@abstractmethod
def _create_automaton(self) -> Automaton:
raise NotImplementedError
@cached_property
def automaton(self):
return self._create_timed_automaton()
""" Methods to create a timed automata """
@abstractmethod
def _create_timed_automaton(self) -> TimedAutomaton:
raise NotImplementedError
@cached_property
def timed_automaton(self) -> TimedAutomaton:
"""
Creates a Timed Automaton
"""
return self._create_timed_automaton()
""" Exportation """
def export(self, file_name: str = None, export_type: str = 'pickle'):
export_type = export_type.lower()
if file_name is None:
file_name = self.__class__.__name__
if export_type in ['pickle', 'bytes', 'byte_stream']:
self._export_pickle(file_name)
elif export_type in ['json']:
self._export_json(file_name)
def _export_pickle(self, file_name: str):
if not file_name.endswith('.pickle'):
file_name += '.pickle'
with open(os.path.join(save_path, file_name), 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
def _export_json(self, file_name:str):
if not file_name.endswith('.json'):
file_name += '.json'
with open(os.path.join(save_path, file_name), 'w') as f:
json.dump(self.__repr__(), f)
def _export_txt(self, file_name:str):
if not file_name.endswith('.txt'):
file_name += '.json'
with open(os.path.join(save_path, file_name), 'w') as f:
f.write(str(self.__repr__()))
@classmethod
def from_bytestream_file(cls, file_name) -> 'Abstraction':
file_name = cls._check_file(file_name)
if file_name is None:
raise FileNotFoundError
with open(file_name, 'rb') as f:
obj = pickle.load(f)
return obj
@abstractmethod
def __repr__(self):
raise NotImplementedError
# @classmethod
# def from_json(cls, file_name):
# # Check if file exists.
# # If not check if it is because the save path is not specified
# file_name = cls._check_file(file_name)
# if file_name is None:
# return None
#
# with open(file_name, 'r') as f:
# obj = json.load(f)
#
# # This feels a bit like cheating....
# temp = cls(**obj)
# temp.__dict__.update(obj)
# return temp
@staticmethod
def _check_file(file_name):
""" Check if file exists. If not check if it is because the save path is not specified.
If it exists return the file_name, else None"""
if not os.path.isfile(file_name):
file_retry = os.path.join(save_path, file_name)
if not os.path.isfile(file_retry):
print("Please specify a valid file.")
return None
else:
file_name = file_retry
return file_name
| 30.464516 | 95 | 0.606734 | 552 | 4,722 | 4.958333 | 0.259058 | 0.096456 | 0.017537 | 0.021922 | 0.271465 | 0.198758 | 0.183413 | 0.173913 | 0.173913 | 0.122031 | 0 | 0 | 0.295214 | 4,722 | 154 | 96 | 30.662338 | 0.822416 | 0.351334 | 0 | 0.246154 | 0 | 0 | 0.038488 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.184615 | false | 0.015385 | 0.092308 | 0.015385 | 0.369231 | 0.015385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |