seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
73819292265 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import enum
import logging
from logging import StreamHandler
handler = StreamHandler()
logger = logging.getLogger(__name__)
class Level(enum.Enum):
FATAL = logging.FATAL
ERROR = logging.ERROR
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
class ColoredFormatter(logging.Formatter):
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
COLOR_START = "COLOR_START"
COLOR_END = "COLOR_END"
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
COLORS = {
'ERROR': RED,
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': CYAN,
}
def __init__(self, fmt):
"""
:param str fmt: Format string.
"""
logging.Formatter.__init__(self, fmt)
self.fmt = fmt.replace(ColoredFormatter.COLOR_END, ColoredFormatter.RESET_SEQ)
def format(self, record):
"""
Output colored log
:param logging.LogRecord record:
:return: Format result.
:rtype str
"""
levelname = record.levelname
if levelname in ColoredFormatter.COLORS:
cs = ColoredFormatter.COLOR_SEQ % (30 + ColoredFormatter.COLORS[levelname])
fmt = self.fmt.replace(ColoredFormatter.COLOR_START, cs)
# update color of format
self._style._fmt = fmt
return logging.Formatter.format(self, record)
def init():
"""
Initialize log module.
"""
handler.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
logger.propagate = False
f = ColoredFormatter("COLOR_START%(message)sCOLOR_END")
handler.setFormatter(f)
def set_level(level: Level):
"""
Set logging level
:param Level level: Log level
"""
logger.setLevel(level.value)
handler.setLevel(level.value)
if level == Level.DEBUG:
f = ColoredFormatter("COLOR_START%(asctime)s %(levelname)-7sCOLOR_END %(message)s")
f.default_time_format = '%H:%M:%S'
f.default_msec_format = '%s.%03d'
handler.setFormatter(f)
def d(msg):
"""
Debug log
:param str | bytes msg: Message string.
"""
if isinstance(msg, str):
logger.debug(msg)
elif isinstance(msg, bytes):
logger.debug(msg.decode("utf-8"))
def i(msg):
"""
Info log
:param str msg: Message string.
"""
logger.info(msg)
def w(msg):
"""
Warning log
:param str msg: Message string.
"""
logger.warning(msg)
def e(msg):
"""
Error log
:param str msg: Message string.
"""
logger.error(msg)
| ujiro99/auto_logger | logger/log.py | log.py | py | 2,655 | python | en | code | 0 | github-code | 36 |
40735068929 | import numpy as np
from plotfit import plotLineReg, plotPoints
def approxD2(f, x_0, h):
return (f(x_0+h) - 2*f(x_0) + f(x_0-h))/(h*h)
def approxD2iter(f, x_0, h, tol, maxiters):
iters = 0
err = 10*tol
a = x_0
appxList = []
while(err>tol and iters<maxiters):
b = approxD2(f, x_0, h)
err = np.abs(a-b)
iters += 1
h *= 0.5
a = b
appxList.append([h,a])
return appxList
def testf(x):
return ((x-np.pi/2)*(np.tan(x)**2))/(x**2+65)
if __name__=='__main__':
# test func
approxList = approxD2iter(testf, np.pi/4, .125, .0001, 200)
print("h, approx")
xlist = []
ylist = []
for i in range(len(approxList)):
print(approxList[i])
xlist.append(approxList[i][0])
ylist.append(approxList[i][0])
plotPoints(xlist, ylist)
| DryToaster/ComputationalMath | src/derive.py | derive.py | py | 852 | python | en | code | 0 | github-code | 36 |
15476854005 | from __future__ import division, print_function, absolute_import
import os
import pytest
import hypothesis
from hypothesis.errors import InvalidArgument
from hypothesis.database import ExampleDatabase
from hypothesis._settings import settings, Verbosity
def test_has_docstrings():
assert settings.verbosity.__doc__
original_default = settings.get_profile('default').max_examples
def setup_function(fn):
settings.load_profile('default')
settings.register_profile('test_settings', settings())
settings.load_profile('test_settings')
def test_cannot_set_non_settings():
s = settings()
with pytest.raises(AttributeError):
s.databas_file = u'some_file'
def test_settings_uses_defaults():
s = settings()
assert s.max_examples == settings.default.max_examples
def test_raises_attribute_error():
with pytest.raises(AttributeError):
settings().kittens
def test_respects_none_database():
assert settings(database=None).database is None
def test_settings_can_be_used_as_context_manager_to_change_defaults():
with settings(max_examples=12):
assert settings.default.max_examples == 12
assert settings.default.max_examples == original_default
def test_can_repeatedly_push_the_same_thing():
s = settings(max_examples=12)
t = settings(max_examples=17)
assert settings().max_examples == original_default
with s:
assert settings().max_examples == 12
with t:
assert settings().max_examples == 17
with s:
assert settings().max_examples == 12
with t:
assert settings().max_examples == 17
assert settings().max_examples == 12
assert settings().max_examples == 17
assert settings().max_examples == 12
assert settings().max_examples == original_default
def test_cannot_create_settings_with_invalid_options():
with pytest.raises(InvalidArgument):
settings(a_setting_with_limited_options=u'spoon')
def test_can_set_verbosity():
settings(verbosity=Verbosity.quiet)
settings(verbosity=Verbosity.normal)
settings(verbosity=Verbosity.verbose)
def test_can_not_set_verbosity_to_non_verbosity():
with pytest.raises(InvalidArgument):
settings(verbosity='kittens')
@pytest.mark.parametrize('db', [None, ExampleDatabase()])
def test_inherits_an_empty_database(db):
assert settings.default.database is not None
s = settings(database=db)
assert s.database is db
with s:
t = settings()
assert t.database is db
@pytest.mark.parametrize('db', [None, ExampleDatabase()])
def test_can_assign_database(db):
x = settings(database=db)
assert x.database is db
def test_load_profile():
settings.load_profile('default')
assert settings.default.max_examples == 200
assert settings.default.max_shrinks == 500
assert settings.default.min_satisfying_examples == 5
settings.register_profile(
'test',
settings(
max_examples=10,
max_shrinks=5
)
)
settings.load_profile('test')
assert settings.default.max_examples == 10
assert settings.default.max_shrinks == 5
assert settings.default.min_satisfying_examples == 5
settings.load_profile('default')
assert settings.default.max_examples == 200
assert settings.default.max_shrinks == 500
assert settings.default.min_satisfying_examples == 5
def test_loading_profile_keeps_expected_behaviour():
settings.register_profile('ci', settings(max_examples=10000))
settings.load_profile('ci')
assert settings().max_examples == 10000
with settings(max_examples=5):
assert settings().max_examples == 5
assert settings().max_examples == 10000
def test_load_non_existent_profile():
with pytest.raises(hypothesis.errors.InvalidArgument):
settings.get_profile('nonsense')
@pytest.mark.skipif(
os.getenv('HYPOTHESIS_PROFILE') not in (None, 'default'),
reason='Defaults have been overridden')
def test_runs_tests_with_defaults_from_conftest():
assert settings.default.strict
assert settings.default.timeout == -1
| LyleH/hypothesis-python_1 | tests/cover/test_settings.py | test_settings.py | py | 4,182 | python | en | code | 1 | github-code | 36 |
16172861853 | """
Сравнивает два списка.
Возвращает в виде списка результаты сравнения элементов:
1 - если l1[n] и l1[n] - буквы одного регистра;
0 - если l1[n] и l1[n] - буквы разных регистра;
-1 - если l1[n] или l1[n] - не бунвы
"""
def same(l1, l2):
li = []
for i in range(0, len(l1)):
if l1[i].isalpha() and l2[i].isalpha():
if l1[i].isupper() and l2[i].isupper() or l1[i].islower() and l2[i].islower():
li.append(1)
else:
li.append(0)
else:
li.append(-1)
return li
l1 = 'QedhgrtRt'
l2 = 'JKLl;l9kj'
print(same(l1, l2))
| genievy/codewars | same_case.py | same_case.py | py | 743 | python | ru | code | 0 | github-code | 36 |
10140994218 | from django.urls import reverse
def reverse_querystring(view, urlconf=None, args=None, kwargs=None, current_app=None, query_kwargs=None):
"""Custom reverse to handle query strings.
Usage:
reverse_querystring('app.views.my_view', kwargs={'pk': 123}, query_kwargs={'search': 'Bob'})
for multivalue query string
reverse_querystring('app.views.my_view', kwargs={'pk': 123}, query_kwargs={'search': ['Bob', 'Jack']})
"""
base_url = reverse(view, urlconf=urlconf, args=args, kwargs=kwargs, current_app=current_app)
if query_kwargs:
lst = []
for k, v in query_kwargs.items():
if isinstance(v, (list, tuple)):
for ret in v:
lst.append("%s=%s" % (k, ret))
else:
lst.append("%s=%s" % (k, v))
query_string = "&".join(lst)
return "%s?%s" % (base_url, query_string)
return base_url
| chiemerieezechukwu/django-api | core/utils/reverse_with_query_string.py | reverse_with_query_string.py | py | 930 | python | en | code | 0 | github-code | 36 |
34754881507 | N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
Max = 0
def dfs(day,total):
global Max
if day == N:
Max = max(Max, total)
return
if day+arr[day][0]<=N:
dfs(day+arr[day][0],total+arr[day][1])
dfs(day+1,total)
for i in range(N):
dfs(i,0)
print(Max) | dwkim8155/Algorithm | Algorithm/DFS/[BOJ] 14501 Sliver3 퇴사.py | [BOJ] 14501 Sliver3 퇴사.py | py | 333 | python | en | code | 1 | github-code | 36 |
37059041423 | """Function which calculates how positive a website's content is. Scores usually range between -10 and +10"""
import requests
from bs4 import BeautifulSoup as bs
from afinn import Afinn
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
def sentiment_analyze(url):
"""calculates a website's positivity"""
# add https if not in there at start
if url[0:8] != "https://":
url = "https://" + url
try:
my_session = requests.session()
for_cookies = requests.get(url).cookies
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"
}
response = my_session.get(url, headers=headers, cookies=for_cookies)
req_text = response.text
# checking request html output
# with open("testing.html", "w") as file:
# file.write(req_text)
# remove unicode characters
decoded_text = req_text.encode("ascii", "ignore").decode("unicode_escape")
# get the individual text pieces inside the web page as separate list elements
soup_li = bs(decoded_text, "lxml").body.getText(separator="||").split("||")
# list which will hold the pieces of text together with their scores
text_li = []
# Initialise the 2 sentiment analysis libraries used
afinn = Afinn()
analyzer = SentimentIntensityAnalyzer()
# sum of non-0 scores
sum_text = 0
# count of non-0 scores
count_text = 0
# max/min text scores holders
max_score = 0
max_text = ""
min_score = 0
min_text = ""
for text in soup_li:
# only look at pieces of text with 5+ sentences
if len(text.split()) >= 5:
afinn_score = afinn.score(text) # usually from -5 to +5
vader_score = analyzer.polarity_scores(text)[
"compound"
] # from -1 to +1
combined_score = 0
if afinn_score != 0 or vader_score != 0:
count_text += 1
if afinn_score == 0:
combined_score = vader_score
elif vader_score == 0:
combined_score = afinn_score
else:
combined_score = (afinn_score * 2 + vader_score * 10) / 2
sum_text += 10 if combined_score > 0 else -10
if combined_score > max_score:
max_score = combined_score
max_text = text
elif combined_score < min_score:
min_score = combined_score
min_text = text
text_li.append(
{
"text": text,
"combined_score": combined_score,
"vader_score": vader_score,
"afinn_score": afinn_score,
}
)
if count_text == 0:
return {
"success": False,
"message": "Unable to calculate any scores.",
"raw_data": text_li,
}
else:
return {
"success": True,
"avg_score": round(sum_text / count_text * 2),
"max_score": max_score,
"max_text": max_text,
"min_score": min_score,
"min_text": min_text,
"raw_data": text_li,
}
# catch errors in requests.get statement
except requests.exceptions.ConnectionError as error:
return {
"success": False,
"message": f"An error occurred when trying to access the '{url}' URL. Error message: '{error}'",
}
except Exception as error:
return {
"success": False,
"message": f"Something went wrong when processing the '{url}' URL.Error message: '{error}'",
} | mihailthebuilder/news-sentiment | sentiment.py | sentiment.py | py | 4,072 | python | en | code | 0 | github-code | 36 |
20254710953 | print("BMI calculator:-")
h = input("What is your height in m : ")
w = input("What is your weight in kg : ")
h1 = float(h)
w1 = float(w)
bmi1 = w1/ h1**2
bmi2 = float(bmi1)
bmi= round(bmi2, 2)
if bmi <= 18 :
print("you are under waeight",bmi)
elif bmi <= 24:
print("you are normal weight",bmi)
elif bmi <= 30:
print("You are over weight",bmi)
elif bmi <= 35:
print("You are obssess weight",bmi)
else:
print("You dont enter the correct data") | Mr-Pankuu/snoopy | Day_3/bmiindex.py | bmiindex.py | py | 462 | python | en | code | 0 | github-code | 36 |
71683730983 | import argparse
import datetime
import os
import socket
import sys
from time import sleep
# espa-processing imports
import config
import parameters
import processor
import settings
import sensor
import utilities
from api_interface import APIServer
from logging_tools import (EspaLogging, get_base_logger, get_stdout_handler,
get_stderr_handler, archive_log_files)
base_logger = get_base_logger()
def work(cfg, params, developer_sleep_mode=False):
"""
Take the environment configuration, order parameters and initiate order processing.
Note: Much of this code was taken from the ondemand_mapper.py script in espa-processing.
Args:
cfg (dict): Configuration params given by config.config() and by the worker environment
params (dict): JSON response from the API for a single granule or scene
Returns:
None, Products are generated, packaged, and distributed if processing was successful
"""
# This will be the Mesos node hostname
processing_location = socket.gethostname()
# Use the base_logger initially, if an exception occurs before the processing logger is configured
# the base_logger will handle log it
logger = base_logger
if not parameters.test_for_parameter(params, 'options'):
raise ValueError('Error missing JSON [options] record')
start_time = datetime.datetime.now()
# Initialize so that we don't sleep
dont_sleep = True
# Note that the API response "scene" value is what we use for product_id
try:
(order_id, product_id, product_type, options) = \
(params['orderid'], params['scene'], params['product_type'],
params['options'])
if product_id != 'plot':
# Developer mode is always false unless you are a developer
# so sleeping will always occur for non-plotting requests
# Override with the developer mode
dont_sleep = developer_sleep_mode
# Fix the orderid in-case it contains any single quotes
# The processors can not handle single quotes in the email
# portion due to usage in command lines.
params['orderid'] = order_id.replace("'", '')
# product_id is not part of the API response - we add it here
if not parameters.test_for_parameter(params, 'product_id'):
params['product_id'] = product_id
# Figure out if debug level logging was requested
debug = False
if parameters.test_for_parameter(options, 'debug'):
debug = options['debug']
# Configure and get the logger for this order request
EspaLogging.configure(settings.PROCESSING_LOGGER, order=order_id,
product=product_id, debug=debug)
# Replace the base_logger with the processing_logger
logger = EspaLogging.get_logger(settings.PROCESSING_LOGGER)
# add our stdout/stderr log streams
logger.addHandler(get_stdout_handler())
logger.addHandler(get_stderr_handler())
logger.info('Processing {}:{}'.format(order_id, product_id))
logger.info('Attempting connection to {0}'.format(cfg['espa_api']))
# will throw an exception on init if unable to get a 200 response
server = APIServer(cfg['espa_api'])
# will throw an exception if does not receive a 200 response
status = server.update_status(product_id, order_id, processing_location, 'processing')
if product_id != 'plot':
# Make sure we can process the sensor
tmp_info = sensor.info(product_id)
del tmp_info
# Make sure we have a valid output format
if not parameters.test_for_parameter(options, 'output_format'):
logger.warning('[output_format] parameter missing defaulting to envi')
options['output_format'] = 'envi'
if (options['output_format'] not in parameters.VALID_OUTPUT_FORMATS):
raise ValueError('Invalid Output format {}'.format(options['output_format']))
# ----------------------------------------------------------------
# NOTE: The first thing the product processor does during
# initialization is validate the input parameters.
# ----------------------------------------------------------------
destination_product_file = 'ERROR'
destination_cksum_file = 'ERROR'
pp = None
try:
# All processors are implemented in the processor module
pp = processor.get_instance(cfg, params)
(destination_product_file, destination_cksum_file) = pp.process()
finally:
# Free disk space to be nice to the whole system.
if pp is not None:
pp.remove_product_directory()
# Sleep the number of seconds for minimum request duration
sleep(utilities.get_sleep_duration(cfg, start_time, dont_sleep))
log_items = archive_log_files(order_id, product_id)
for item in log_items:
utilities.change_ownership(item, cfg.get('espa_user'), cfg.get('espa_group'))
# Everything was successful so mark the scene complete
server.mark_scene_complete(product_id, order_id,
processing_location,
destination_product_file,
destination_cksum_file,
'') # sets log_file_contents to empty string ''
return True
except Exception as e:
# First log the exception
logger.exception('Exception encountered in processing.main.work:\nexception: {}'.format(e))
try:
# Sleep the number of seconds for minimum request duration
logger.debug('Attempting to archive log files for order_id: {}\nproduct_id: {}'.format(order_id, product_id))
sleep(utilities.get_sleep_duration(cfg, start_time, dont_sleep))
log_items = archive_log_files(order_id, product_id)
for item in log_items:
utilities.change_ownership(item, cfg.get('espa_user'), cfg.get('espa_group'))
except Exception as e2:
logger.exception('Problem archiving log files. error: {}'.format(e2))
try:
logger.debug('Attempting to set product error, order_id: {}\nproduct_id: {}'.format(order_id, product_id))
logged_contents = EspaLogging.read_logger_file(settings.PROCESSING_LOGGER)
error_log = "Processing Log: {}\n\nException: {}".format(logged_contents, e)
server.set_scene_error(product_id, order_id, processing_location, error_log)
except Exception as e3:
logger.exception('Unable to reach ESPA API and set product error for order_id: {}\nproduct_id: {}\nerror: {}'.format(order_id, product_id, e3))
raise e3
return False
def main(data=None):
try:
# retrieve a dict containing processing environment configuration values
cfg = config.config()
sleep_for = cfg.get('init_sleep_seconds')
base_logger.info('Holding for {} seconds'.format(sleep_for))
sleep(sleep_for)
# export values for the container environment
config.export_environment_variables(cfg)
# create the .netrc file
utilities.build_netrc()
base_logger.debug('OS ENV - {0}'.format(['{0}: {1}'.format(var, val) for var, val in os.environ.items()]))
base_logger.info('configured parameters - {0}'.format(['{0}: {1}'.format(var, val) for var, val in cfg.items()]))
if not data:
parser = argparse.ArgumentParser()
parser.add_argument(dest="data", action="store", metavar="JSON",
type=utilities.convert_json,
help="response from the API"
"containing order information")
args = parser.parse_args()
data = args.data
base_logger.info('order data - {0}'.format(data))
for d in data:
result = work(cfg, d)
base_logger.info('processing.work executed for data {} successfully? {}'.format(d, result))
# Exit normally
sys.exit(0)
except Exception as e:
msg = 'ESPA Worker error, problem executing main.main\nError: {}'.format(e)
base_logger.exception(msg)
# Exit with 1 so Container and Task know there was a problem and report to the framework appropriately
sys.exit(msg)
if __name__ == '__main__':
main()
| djzelenak/espa-worker | processing/main.py | main.py | py | 8,666 | python | en | code | 0 | github-code | 36 |
40657010510 | import tensorflow as tf
import numpy as np
import scipy
import time
import math
import argparse
import random
import sys
import os
import matplotlib.pyplot as plt
from termcolor import colored, cprint
# from Kuhn_Munkres import KM
# from BN16 import BatchNormalizationF16
from time import gmtime, strftime
from external.structural_losses.tf_approxmatch import approx_match, match_cost
from external.sampling.tf_sampling import farthest_point_sample, prob_sample
default_dtype = tf.float32
SN = False
def batch_norm(inputs, decay, is_train, name):
decay = 0.965
# Disable norm
return inputs
# if default_dtype == tf.float32:
# return tf.keras.layers.BatchNormalization(momentum = decay)(inputs, training = is_train)
# if default_dtype == tf.float16:
# return BatchNormalizationF16(momentum = decay)(inputs, training = is_train)
# return tf.keras.layers.BatchNormalization(momentum = decay)(inputs, training = is_train)
# return tf.contrib.layers.batch_norm(inputs, decay = decay, is_training = is_train, fused = True)
# Batch norm
# return tf.contrib.layers.batch_norm(inputs, decay = decay, is_training = is_train, scope = name, fused = True)
# Layer norm
# return tf.contrib.layers.layer_norm(inputs, scope = name)
# Instance norm
if False:
if default_dtype == tf.float32:
return tf.contrib.layers.instance_norm(inputs, scope = name)
else:
return tf.contrib.layers.instance_norm(inputs, epsilon = 1e-3, scope = name)
# return tf.contrib.layers.group_norm(inputs,
# TODO: use Spec norm
def spectral_norm(w, iteration=1):
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def fc_as_conv_SN(inputs, outDim, act = None, bias = True, name = 'fc'):
input_shape = inputs.shape.as_list()
inputs = tf.reshape(inputs, [-1, input_shape[-1]])
with tf.variable_scope(name):
w = tf.get_variable('W', shape = [input_shape[-1], outDim], dtype = default_dtype)
if SN:
x = tf.matmul(inputs, spectral_norm(w))
else:
x = tf.matmul(inputs, w)
if bias == True:
b = tf.get_variable('b', shape = [outDim], dtype = default_dtype)
x = tf.nn.bias_add(x, b)
x_shape = input_shape
x_shape[-1] = outDim
x = tf.reshape(x, x_shape)
if act is not None:
x = act(x)
return x
# Laplacian is always FP32
def Laplacian(bs, N, k, kNNIdx, name = 'kNNG_laplacian'):
with tf.variable_scope(name):
Ns = tf.broadcast_to(tf.reshape(tf.range(N), [1, N, 1]), [bs, N, k])
_ustack = tf.unstack(kNNIdx, axis = -1) # list of [bs, N, k]
kNNIdx_withN = tf.stack([_ustack[0], Ns, _ustack[1]], axis = -1) # [bs, N, k, 3], containing [#batch, #start, #end] with #start #end in [0, N)
# Translate a directed graph to undirected graph by removing the direction of edges, in order to obtain a real symmtric matrix L.
A = tf.scatter_nd(kNNIdx_withN, tf.constant(True, shape = [bs, N, k]), [bs, N, N], name = 'A')
# print(A.shape)
A_T = tf.transpose(A, [0, 2, 1])
A_undirected = tf.math.logical_or(A, A_T)
A = tf.cast(A_undirected, tf.float16, name = 'A_undirected') # [bs, N, N]
# print(A.shape)
# D = tf.matrix_set_diag(tf.zeros([bs, N, N], tf.float32), tf.reduce_sum(A, axis = -1)) # [bs, N] -> [bs, N, N]
# print(D.shape)
L = tf.matrix_set_diag(-A, tf.reduce_sum(A, axis = -1) - 1) # We have self-loops
# print(L.shape)
# Normalizations for the laplacian?
return tf.cast(L, default_dtype), 0, 0
# Inputs: [bs, N, C]
# Builds edges X -> Y
def bip_kNNG_gen(Xs, Ys, k, pos_range, name = 'kNNG_gen'):
with tf.variable_scope(name):
bs = Xs.shape[0]
Nx = Xs.shape[1]
Ny = Ys.shape[1]
Cx = Xs.shape[2]
Cy = Ys.shape[2]
k = min(Ny, k)
print("bip-kNNG-gen: %4d -> %4d, kernel = %3d" % (Nx, Ny, k))
posX = Xs[:, :, :pos_range]
posY = Ys[:, :, :pos_range]
drow = tf.cast(tf.reshape(posX, [bs, Nx, 1, pos_range]), tf.float16) # duplicate for row
dcol = tf.cast(tf.reshape(posY, [bs, 1, Ny, pos_range]), tf.float16) # duplicate for column
local_pos = drow - dcol #[bs, Nx, Ny, 3]
# minusdist = -tf.sqrt(tf.reduce_sum(tf.square(local_pos), axis = 3))
# minusdist = -tf.sqrt(tf.add_n(tf.unstack(tf.square(local_pos), axis = 3))) # Will this be faster?
minusdist = -tf.norm(local_pos, ord = 'euclidean', axis = -1)
_kNNEdg, _TopKIdx = tf.nn.top_k(minusdist, k)
TopKIdx = _TopKIdx[:, :, :] # No self-loops? (Separated branch for self-conv)
# TopKIdx = _TopKIdx # Have self-loops?
kNNEdg = -_kNNEdg[:, :, :] # Better methods?
kNNEdg = tf.stop_gradient(kNNEdg) # Don't flow gradients here to avoid nans generated for unselected edges
kNNEdg = tf.cast(tf.reshape(kNNEdg, [bs, Nx, k, 1]), default_dtype)
# Build NxKxC Neighboor tensor
# Create indices
batches = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1, 1]), [bs, Nx, k])
kNNIdx = tf.stack([batches, TopKIdx], axis = -1)
Ns = tf.broadcast_to(tf.reshape(tf.range(Nx), [1, Nx, 1]), [bs, Nx, k])
gather_lpos_indices = tf.stack([batches, Ns, TopKIdx], axis = -1)
# [x, y, z], 1st order moment
neighbor_pos = tf.cast(tf.gather_nd(local_pos, gather_lpos_indices), default_dtype) # [bs, Nx, k, 3]
# [xx, xy, xz, yx, yy, yz, zx, zy, zz], 2nd order moment
# neighbor_pos_rs = tf.reshape(neighbor_pos, [bs, Nx, k, 3, 1])
# neighbor_quadratic = tf.reshape(tf.multiply(neighbor_pos_rs, tf.transpose(neighbor_pos_rs, perm = [0, 1, 2, 4, 3])), [bs, Nx, k, 9])
kNNEdg = tf.concat([neighbor_pos], axis = -1) # [bs, Nx, k, eC]
# kNNEdg = tf.concat([kNNEdg, neighbor_pos, neighbor_quadratic], axis = -1) # [bs, Nx, k, eC]
return posX, posY, kNNIdx, kNNEdg
def kNNG_gen(inputs, k, pos_range, name = 'kNNG_gen'):
p, _, idx, edg = bip_kNNG_gen(inputs, inputs, k, pos_range, name)
return p, idx, edg
# Inputs: [bs, Nx, Cx] [bs, Ny, Cy]
# kNNIdx: [bs, Nx, k]
# kNNEdg: [bs, Nx, k, eC]
# Edges are X -> Y
def bip_kNNGConvLayer_concatMLP(Xs, Ys, kNNIdx, kNNEdg, act, channels, no_act_final = False, W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0), name = 'kNNGConvNaive'):
with tf.variable_scope(name):
bs = Xs.shape[0]
Nx = Xs.shape[1]
Ny = Ys.shape[1]
Cx = Xs.shape[2]
Cy = Ys.shape[2]
k = kNNIdx.shape[2]
eC = kNNEdg.shape[3]
neighbors = tf.gather_nd(Ys, kNNIdx)
# neighbors: Edge u-v = [u;v;edg]
neighbors = tf.concat([neighbors, tf.broadcast_to(tf.reshape(Xs, [bs, Nx, 1, Cx]), [bs, Nx, k, Cx]), kNNEdg], axis = -1) # [bs, Nx, Cx+Cy+eC]
# Reshape to conv
rs_neighbors = tf.reshape(neighbors, [bs, Nx*k, Cx+Cy+eC])
rs_knnedg = tf.reshape(kNNEdg, [bs, Nx*k, eC])
# rs_neighbors = tf.concat([rs_neighbors, rs_knnedg], -1) # embed edge data in it
### Do the convolution ###
# TODO: MLP instead of 1x fc?
# Collect neightbors ("M" stage)
W_neighbor = tf.get_variable('W_neighbor', dtype = default_dtype, shape = [1, Cx+Cy+eC, channels], initializer = W_init, trainable=True)
b_neighbor = tf.get_variable('b_neighbor', dtype = default_dtype, shape = [channels], initializer = b_init, trainable=True)
resnbr = tf.nn.conv1d(rs_neighbors, W_neighbor, 1, padding = 'SAME')
resnbr = tf.nn.bias_add(resnbr, b_neighbor)
resnbr = act(resnbr)
# resnbr = tf.reshape(resnbr, [bs, Nx, k, channels])
# Collect edge masks
W_edges = tf.get_variable("W_edges", dtype = default_dtype, shape = [1, eC, channels], initializer = W_init, trainable=True)
b_edges = tf.get_variable("b_edges", dtype = default_dtype, shape = [channels], initializer = b_init, trainable=True)
resedg = tf.nn.conv1d(rs_knnedg, W_edges, 1, padding = 'SAME')
resedg = tf.nn.bias_add(resedg, b_edges)
resedg = act(resedg)
# resedg = tf.nn.softmax(resedg, axis = -1)
# resedg = tf.reshape(resedg, [bs, Nx, k, channels])
# resnbr = tf.multiply(resnbr, resedg)
resnbr = tf.concat([resnbr, resedg], axis = -1)
W_nb2 = tf.get_variable('W_neighbor2', dtype = default_dtype, shape = [1, channels*2, channels], initializer = W_init, trainable=True)
b_nb2 = tf.get_variable('b_neighbor2', dtype = default_dtype, shape = [channels], initializer = b_init, trainable=True)
resnbr = tf.nn.conv1d(resnbr, W_nb2, 1, padding = 'SAME')
resnbr = tf.nn.bias_add(resnbr, b_nb2)
resnbr = act(resnbr)
resnbr = tf.reshape(resnbr, [bs, Nx, k, channels])
resnbr = tf.reduce_sum(resnbr, axis = 2) # combine_method?
W_self = tf.get_variable('W_self', dtype = default_dtype, shape = [1, Cx + channels, channels], initializer = W_init, trainable=True)
b_self = tf.get_variable('b', dtype = default_dtype, shape = [channels], initializer = b_init, trainable=True)
res = tf.nn.conv1d(tf.concat([Xs, resnbr], axis = -1), W_self, 1, padding = 'SAME')
res = tf.nn.bias_add(res, b_self)
if not no_act_final:
res = act(res)
return res, [W_neighbor, b_neighbor, W_edges, b_edges, W_self, b_self] # [bs, Nx, channels]
def bip_kNNGConvLayer_concat(Xs, Ys, kNNIdx, kNNEdg, act, channels, W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0), name = 'kNNGConvNaive'):
with tf.variable_scope(name):
bs = Xs.shape[0]
Nx = Xs.shape[1]
Ny = Ys.shape[1]
Cx = Xs.shape[2]
Cy = Ys.shape[2]
k = kNNIdx.shape[2]
eC = kNNEdg.shape[3]
neighbors = tf.gather_nd(Ys, kNNIdx)
# neighbors: Edge u-v = [u;v;edg]
neighbors = tf.concat([neighbors, tf.broadcast_to(tf.reshape(Xs, [bs, Nx, 1, Cx]), [bs, Nx, k, Cx]), kNNEdg], axis = -1) # [bs, Nx, k, Cx+Cy+eC]
### Do the convolution ###
# Collect neightbors ("M" stage)
W_neighbor = tf.get_variable('W_neighbor', dtype = default_dtype, shape = [1, 1, Cx+Cy+eC, channels], initializer = W_init, trainable=True)
b_neighbor = tf.get_variable('b_neighbor', dtype = default_dtype, shape = [channels], initializer = b_init, trainable=True)
res = tf.nn.conv2d(neighbors, W_neighbor, [1, 1, 1, 1], padding = 'SAME')
# res = tf.reduce_max(res, axis = 2) # combine_method?
res = tf.reduce_sum(res, axis = 2) # combine_method?
# res = tf.add_n(tf.unstack(res, axis = 2)) # combine_method? # nearly the same performance
res = tf.nn.bias_add(res, b_neighbor)
if act:
res = act(res)
return res, [W_neighbor, b_neighbor] # [bs, Nx, channels]
def bip_kNNGConvLayer_feature(Xs, Ys, kNNIdx, kNNEdg, act, channels, is_train, W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0), name = 'kNNGConvNaive'):
global global_is_train
with tf.variable_scope(name):
bs = Xs.shape[0]
Nx = Xs.shape[1]
Ny = Ys.shape[1]
Cx = Xs.shape[2]
Cy = Ys.shape[2]
k = kNNIdx.shape[2]
eC = kNNEdg.shape[3]
neighbors = tf.gather_nd(Ys, kNNIdx)
# neighbors: Edge u-v = [u;v;edg]
# neighbors = tf.concat([neighbors, kNNEdg], axis = -1) # [bs, Nx, k, Cx+Cy+eC]
fCh = 6
if channels > 32:
fCh = 12
### Do the convolution ###
mlp = [channels]
n = kNNEdg
for i in range(len(mlp)):
if SN:
n = fc_as_conv_SN(n, mlp[i], tf.nn.elu, name = 'kernel/mlp%d' % i)
else:
n = tf.contrib.layers.conv2d(n, mlp[i], [1, 1], padding = 'SAME', activation_fn = tf.nn.elu, scope = 'kernel/mlp%d' % i, weights_initializer = W_init)
n = batch_norm(n, 0.999, is_train, 'kernel/bn')
if SN:
n = fc_as_conv_SN(n, channels * fCh, tf.nn.tanh, name = 'kernel/mlp_out')
else:
n = tf.contrib.layers.conv2d(n, channels * fCh, [1, 1], padding = 'SAME', activation_fn = tf.nn.tanh, scope = 'kernel/mlp_out', weights_initializer = W_init)
cW = tf.reshape(n, [bs, Nx, k, channels, fCh])
# Batch matmul won't work for more than 65535 matrices ???
# n = tf.matmul(n, tf.reshape(neighbors, [bs, Nx, k, Cy, 1]))
# Fallback solution
if SN:
n = fc_as_conv_SN(neighbors, channels * fCh, None, name = 'feature/feature_combine')
else:
n = tf.contrib.layers.conv2d(neighbors, channels * fCh, [1, 1], padding = 'SAME', activation_fn = None, scope = 'feature/feature_combine', weights_initializer = W_init)
n = batch_norm(n, 0.999, is_train, 'feature/bn')
# MatMul
n = tf.reshape(n, [bs, Nx, k, channels, fCh])
n = tf.reduce_sum(tf.multiply(cW, n), axis = -1)
print(n.shape)
print("Graph cConv: [%3d x %2d] = %4d" % (channels, fCh, channels * fCh))
# n = tf.reshape(n, [bs, Nx, k, channels])
b = tf.get_variable('b_out', dtype = default_dtype, shape = [channels], initializer = b_init, trainable = True)
n = tf.reduce_mean(n, axis = 2)
n = tf.nn.bias_add(n, b)
if act is not None:
n = act(n)
return n, [b] # [bs, Nx, channels]
def bip_kNNGConvLayer_edgeMask(Xs, Ys, kNNIdx, kNNEdg, act, channels, no_act_final = False, W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0), name = 'kNNGConvNaive'):
with tf.variable_scope(name):
bs = Xs.shape[0]
Nx = Xs.shape[1]
Ny = Ys.shape[1]
Cx = Xs.shape[2]
Cy = Ys.shape[2]
k = kNNIdx.shape[2]
eC = kNNEdg.shape[3]
W_edge = tf.get_variable('W_edge', dtype = default_dtype, shape = [1, 1, eC, channels], initializer = W_init, trainable = True)
b_edge = tf.get_variable('b_edge', dtype = default_dtype, shape = [channels], initializer = b_init, trainable = True)
mask = tf.nn.conv2d(kNNEdg, W_edge, [1, 1, 1, 1], padding = 'SAME')
mask = tf.nn.bias_add(mask, b_edge)
mask = tf.nn.sigmoid(mask)
neighbors = tf.gather_nd(Ys, kNNIdx)
# neighbors: Edge u-v = [u;v;edg]
neighbors = tf.concat([neighbors, tf.broadcast_to(tf.reshape(Xs, [bs, Nx, 1, Cx]), [bs, Nx, k, Cx]), kNNEdg], axis = -1) # [bs, Nx, k, Cx+Cy+eC]
### Do the convolution ###
# Collect neightbors ("M" stage)
W_neighbor = tf.get_variable('W_neighbor', dtype = default_dtype, shape = [1, 1, Cx+Cy+eC, channels], initializer = W_init, trainable=True)
b_neighbor = tf.get_variable('b_neighbor', dtype = default_dtype, shape = [channels], initializer = b_init, trainable=True)
res = tf.nn.conv2d(neighbors, W_neighbor, [1, 1, 1, 1], padding = 'SAME')
res = tf.multiply(res, mask)
res = tf.reduce_sum(res, axis = 2) # combine_method?
# res = tf.add_n(tf.unstack(res, axis = 2)) # combine_method? # nearly the same performance
res = tf.nn.bias_add(res, b_neighbor)
if act:
res = act(res)
return res, [W_edge, b_edge, W_neighbor, b_neighbor]
# Inputs: [bs, N, C]
# Pos: [bs, N, 3]
def kNNGPooling_farthest(inputs, pos, k):
# with tf.variable_scope(name):
bs = pos.shape[0]
N = pos.shape[1]
k = min(N, k)
idx = farthest_point_sample(k, pos) # [bs, k]
# Pick them
batches = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, k])
gather_idx = tf.stack([batches, idx], axis = -1)
pool_features = tf.gather_nd(inputs, gather_idx) # [bs, k, C]
pool_position = tf.gather_nd(pos, gather_idx) # [bs, k, 3]
return pool_position, pool_features, 0.0, [], 0.0
# Inputs: [bs, N, C]
# Pos: [bs, N, 3]
def kNNGPooling_rand(inputs, pos, bs, N, k, laplacian, masking = True, channels = 1, W_init = tf.truncated_normal_initializer(stddev=0.1), name = 'kNNGPool'):
with tf.variable_scope(name):
k = min(N, k)
y = tf.random.uniform([bs, N]) # [bs, N]
val, idx = tf.nn.top_k(y, k) # [bs, k]
# Pick them
batches = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, k])
gather_idx = tf.stack([batches, idx], axis = -1)
pool_features = tf.gather_nd(inputs, gather_idx) # [bs, k, C]
pool_position = tf.gather_nd(pos, gather_idx) # [bs, k, 3]
if False:
pool_features = tf.multiply(pool_features, tf.reshape(tf.nn.tanh(val), [bs, k, 1]))
return pool_position, pool_features, y, [], 0.0
# Inputs: [bs, N, C]
# Pos: [bs, N, 3]
def kNNGPooling_GUnet(inputs, pos, k, masking = True, channels = 1, W_init = tf.truncated_normal_initializer(stddev=0.1), name = 'kNNGPool'):
with tf.variable_scope(name):
bs = inputs.shape[0]
N = inputs.shape[1]
C = inputs.shape[2]
k = min(N, k)
W = tf.get_variable('W', dtype = default_dtype, shape = [1, C, channels], initializer=W_init, trainable=True)
norm = tf.sqrt(tf.reduce_sum(tf.square(W), axis = 1, keepdims = True)) # [1, 1, channels]
y = tf.nn.conv1d(inputs, W, 1, padding = 'SAME') # [bs, C, channels]
y = tf.multiply(y, 1.0 / norm)
y = tf.reduce_mean(y, axis = -1) # [bs, C]
val, idx = tf.nn.top_k(y, k) # [bs, k]
# Pick them
batches = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, k])
gather_idx = tf.stack([batches, idx], axis = -1)
pool_features = tf.gather_nd(inputs, gather_idx) # [bs, k, C]
pool_position = tf.gather_nd(pos, gather_idx) # [bs, k, 3]
if masking == True:
pool_features = tf.multiply(pool_features, tf.reshape(tf.nn.tanh(val), [bs, k, 1]))
return pool_position, pool_features, [W]
# Inputs: [bs, N, C]
# Pos: [bs, N, 3]
def kNNGPooling_CAHQ(inputs, pos, k, kNNIdx, kNNEdg, laplacian, is_train, masking = True, channels = 1, W_init = tf.truncated_normal_initializer(stddev=0.1), name = 'kNNGPool', stopGradient = False, act = tf.nn.relu, b_init = None):
with tf.variable_scope(name):
bs = inputs.shape[0]
N = inputs.shape[1]
C = inputs.shape[2]
k = min(N, k)
if stopGradient == True:
inputs = tf.stop_gradient(inputs)
kNNEdg_dist = tf.norm(kNNEdg, axis = -1, keepdims = True)
imp = tf.contrib.layers.conv1d(inputs, 1, 1, padding = 'SAME', activation_fn = tf.nn.tanh, scope = 'importance')
layers = 4
f = imp
for i in range(layers):
tmp = f
f = tf.gather_nd(f, kNNIdx)
f = tf.concat([f, kNNEdg_dist], axis = -1) # [bs, N, k, 2]
f = tf.contrib.layers.conv2d(f, 16, 1, padding = 'SAME', scope = 'mlp%d/h1' % i)
f = tf.contrib.layers.conv2d(f, 1, 1, padding = 'SAME', scope = 'mlp%d/h2' % i) # [bs, N, k, 1]
f = tf.reduce_mean(f, axis = 2) # [bs, N, 1]
f = tf.contrib.layers.conv1d(f, 16, 1, padding = 'SAME', scope = 'mlp%d/ro/h1' % i)
f = tf.contrib.layers.conv1d(f, 1, 1, padding = 'SAME', scope = 'mlp%d/ro/h2' % i)
f = batch_norm(f, 0.999, is_train, 'mlp%d/bn' % i)
f = f + tmp
y = tf.reshape(f, [bs, N])
# Freq Loss
print(laplacian.shape)
norm_Ly = tf.sqrt(tf.reduce_sum(tf.cast(tf.square(tf.matmul(laplacian, tf.reshape(y, [bs, N, 1]), name = 'L_y')), tf.float32), axis = [1, 2]) + 1e-3)
norm_y = tf.sqrt(tf.reduce_sum(tf.cast(tf.square(y), tf.float32), axis = 1) + 1e-3)
freq_loss = norm_Ly / (norm_y + 1e-3) # Maximize this
freq_loss = 0 - freq_loss # Minimize negate
# freq_loss = 0
val, idx = tf.nn.top_k(y, k) # [bs, k]
# Pick them
batches = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, k])
gather_idx = tf.stack([batches, idx], axis = -1)
pool_features = tf.gather_nd(inputs, gather_idx) # [bs, k, C]
pool_position = tf.gather_nd(pos, gather_idx) # [bs, k, 3]
if masking == True:
# pool_features = tf.multiply(pool_features, tf.reshape(tf.nn.tanh(val), [bs, k, 1]))
pool_features = tf.multiply(pool_features, tf.reshape(val, [bs, k, 1]))
return pool_position, pool_features, y, [], tf.cast(freq_loss, default_dtype)
# Inputs: [bs, N, C]
# Pos: [bs, N, 3]
def kNNGPooling_HighFreqLoss_GUnet(inputs, pos, k, laplacian, masking = True, channels = 1, W_init = tf.truncated_normal_initializer(stddev=0.1), name = 'kNNGPool', stopGradient = False, act = tf.nn.relu, b_init = None):
with tf.variable_scope(name):
bs = inputs.shape[0]
N = inputs.shape[1]
C = inputs.shape[2]
k = min(N, k)
if stopGradient == True:
inputs = tf.stop_gradient(inputs)
# Fuse freq features
fuse_freq = tf.get_variable('freq', dtype = default_dtype, shape = [2], trainable = True, initializer = tf.ones_initializer)
fuse_1 = tf.math.sin(pos * fuse_freq[0])
fuse_2 = tf.math.sin(pos * fuse_freq[1])
tf.summary.scalar('Fuse_freq', fuse_freq[0])
inputs = tf.concat([inputs, fuse_1, fuse_2], axis = -1)
W = tf.get_variable('W', dtype = default_dtype, shape = [1, C, channels], initializer=W_init, trainable=True)
norm = tf.sqrt(tf.reduce_sum(tf.square(W), axis = 1, keepdims = True)) # [1, 1, channels]
# y = tf.nn.conv1d(inputs, W, 1, padding = 'SAME') # [bs, N, channels]
# y = tf.multiply(y, 1.0 / (norm + 1e-3))
# y = tf.reduce_mean(y, axis = -1) # [bs, N]
mlp = [C*2]
y = inputs
for l in range(len(mlp)):
y, _ = Conv1dWrapper(y, mlp[l], 1, 1, 'SAME', act, W_init = W_init, b_init = b_init, name = 'fc%d' % l)
y, _ = Conv1dWrapper(y, 1, 1, 1, 'SAME', tf.nn.tanh, W_init = W_init, b_init = b_init, name = 'fcOut')
y = tf.reshape(y, [bs, N])
# Freq Loss
print(laplacian.shape)
norm_Ly = tf.sqrt(tf.reduce_sum(tf.cast(tf.square(tf.matmul(laplacian, tf.reshape(y, [bs, N, 1]), name = 'L_y')), tf.float32), axis = [1, 2]) + 1e-3)
norm_y = tf.sqrt(tf.reduce_sum(tf.cast(tf.square(y), tf.float32), axis = 1) + 1e-3)
freq_loss = norm_Ly / (norm_y + 1e-3) # Maximize this
freq_loss = 0 - freq_loss # Minimize negate
# freq_loss = 0
val, idx = tf.nn.top_k(y, k) # [bs, k]
# Pick them
batches = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, k])
gather_idx = tf.stack([batches, idx], axis = -1)
pool_features = tf.gather_nd(inputs, gather_idx) # [bs, k, C]
pool_position = tf.gather_nd(pos, gather_idx) # [bs, k, 3]
if masking == True:
pool_features = tf.multiply(pool_features, tf.reshape(tf.nn.tanh(val), [bs, k, 1]))
# pool_features = tf.multiply(pool_features, tf.reshape(val, [bs, k, 1]))
return pool_position, pool_features, y, [W], tf.cast(freq_loss, default_dtype)
def Conv1dWrapper(inputs, filters, kernel_size, stride, padding, act, W_init, b_init, bias = True, name = 'conv'):
if SN:
return fc_as_conv_SN(inputs, filters, act, name = name), []
else:
with tf.variable_scope(name):
N = inputs.shape[1]
C = inputs.shape[2]
variables = []
W = tf.get_variable('W', dtype = default_dtype, shape = [kernel_size, C, filters], initializer=W_init, trainable=True)
variables.append(W)
y = tf.nn.conv1d(inputs, W, stride, padding = padding)
if bias == True:
b = tf.get_variable('b', dtype = default_dtype, shape = [filters], initializer=b_init, trainable=True)
y = tf.nn.bias_add(y, b)
variables.append(b)
if act is not None:
y = act(y)
return y, variables
def kNNGPosition_refine(input_position, input_feature, refine_maxLength, act, hidden = 128, W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0), name = 'kNNGPosRefine'):
with tf.variable_scope(name):
bs = input_position.shape[0]
N = input_position.shape[1]
C = input_feature.shape[2]
pC = input_position.shape[2]
assert N == input_feature.shape[1] and bs == input_feature.shape[0]
# pos_feature, vars1 = Conv1dWrapper(tf.concat([input_position, input_feature], axis = -1), hidden, 1, 1, 'SAME', act, W_init, b_init, True, 'hidden')
pos_res, v = Conv1dWrapper(input_feature, pC, 1, 1, 'SAME', None, W_init, b_init, True, 'refine') # [bs, N, pC]
pos_norm = tf.norm(pos_res, axis = -1, keepdims = True) # [bs, N, 1]
pos_norm_tun = tf.nn.tanh(pos_norm) * refine_maxLength
pos_res = pos_res / (pos_norm + 1) * pos_norm_tun
# pos_res *= refine_maxLength
# tf.summary.histogram('Position_Refine_%s' % name, pos_res)
refined_pos = tf.add(input_position, pos_res)
return refined_pos, [v]
def bip_kNNGConvBN_wrapper(Xs, Ys, kNNIdx, kNNEdg, batch_size, gridMaxSize, particle_hidden_dim, act, decay = 0.999, is_train = True, name = 'gconv', W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0)):
with tf.variable_scope(name):
n, v = bip_kNNGConvLayer_feature(Xs, Ys, kNNIdx, kNNEdg, act = None, channels = particle_hidden_dim, is_train = is_train, W_init = W_init, b_init = b_init, name = 'gc')
# n, v = bip_kNNGConvLayer_edgeMask(Xs, Ys, kNNIdx, kNNEdg, act = None, channels = particle_hidden_dim, W_init = W_init, b_init = b_init, name = 'gc')
# n, v = bip_kNNGConvLayer_concat(Xs, Ys, kNNIdx, kNNEdg, act = None, channels = particle_hidden_dim, W_init = W_init, b_init = b_init, name = 'gc')
# n, v = bip_kNNGConvLayer_concatMLP(Xs, Ys, kNNIdx, kNNEdg, act = act, no_act_final = True, channels = particle_hidden_dim, W_init = W_init, b_init = b_init, name = 'gc')
if False:
# if True:
ch = particle_hidden_dim
mlp = [ch * 2, ch * 2, ch]
vs = []
n, v = bip_kNNGConvLayer_concat(Xs, Ys, kNNIdx, kNNEdg, act = None, channels = mlp[0], W_init = W_init, b_init = b_init, name = 'gconv')
vs.append(v)
for i in range(1, len(mlp)):
n = batch_norm(n, decay, is_train, name = 'bn%d' % (i-1))
if act:
n = act(n)
n, v = Conv1dWrapper(n, mlp[i], 1, 1, 'SAME', None, W_init, b_init, True, 'fc%d' % i)
n = batch_norm(n, decay, is_train, name = 'bn')
if act:
n = act(n)
return n, v
def kNNGConvBN_wrapper(inputs, kNNIdx, kNNEdg, batch_size, gridMaxSize, particle_hidden_dim, act, decay = 0.999, is_train = True, name = 'gconv', W_init = tf.truncated_normal_initializer(stddev=0.1), b_init = tf.constant_initializer(value=0.0)):
return bip_kNNGConvBN_wrapper(inputs, inputs, kNNIdx, kNNEdg, batch_size, gridMaxSize, particle_hidden_dim, act, decay, is_train, name, W_init, b_init)
# TODO: position re-fine layer
class model_particles:
def __init__(self, gridMaxSize, latent_dim, batch_size, optimizer, outDim):
# Size of each grid
self.gridMaxSize = gridMaxSize
self.particle_latent_dim = latent_dim
self.particle_hidden_dim = 64
self.cluster_feature_dim = 128
self.cluster_count = 128
# self.latent_dim = latent_dim
self.combine_method = tf.reduce_sum
self.loss_func = tf.abs
self.resSize = 1
self.batch_size = batch_size
self.knn_k = 16
self.useVector = False
self.doSim = True
self.doLoop = True
self.loops = 30
self.normalize = 1.0
self.outDim = outDim
# self.act = (lambda x: 0.8518565165255 * tf.exp(-2 * tf.pow(x, 2)) - 1) # normalization constant c = (sqrt(2)*pi^(3/2)) / 3, 0.8518565165255 = c * sqrt(5).
self.act = tf.nn.elu
self.convact = tf.nn.elu
# self.act = tf.nn.relu
self.encoder_arch = 'plain' # plain, plain_noNorm, plain_shallow, attractor, attractor_attention, attractor_affine
self.decoder_arch = 'plain' # plain, advanced_score, distribution_weight, distribution_conditional, attractor
self.wdev=0.1
# self.initial_grid_size = 6.0 # TODO: make this larger? (Done in dataLoad)
# self.total_world_size = 96.0
self.loss_metric = 'chamfer' # or 'earthmover'
self.ph_X = tf.placeholder(default_dtype, [self.batch_size, self.gridMaxSize, outDim + 1]) # x y z vx vy vz 1
self.ph_Y = tf.placeholder(default_dtype, [self.batch_size, self.gridMaxSize, outDim + 1])
self.ph_L = tf.placeholder(default_dtype, [self.batch_size, self.gridMaxSize, outDim + 1]) # Loop simulation (under latent space) ground truth
self.ph_card = tf.placeholder(default_dtype, [self.batch_size]) # card
self.ph_max_length = tf.placeholder('int32', [2])
self.optimizer = optimizer
# 1 of a batch goes in this function at once.
def particleEncoder(self, input_particle, output_dim, is_train = False, reuse = False, returnPool = False):
w_init = tf.random_normal_initializer(stddev=self.wdev)
w_init = tf.contrib.layers.xavier_initializer(dtype = default_dtype)
b_init = tf.constant_initializer(value=0.0)
g_init = tf.random_normal_initializer(1., 0.02)
with tf.variable_scope("ParticleEncoder", reuse = reuse) as vs:
# We are going to use a way deeper model than before. Please refer to model_particlesTest_backup.py for original codes.
# I hate *** code.
# blocks = 5
# particles_count = [2560, 1280, 640, 320, self.cluster_count]
# conv_count = [2, 2, 4, 1, 1]
# res_count = [0, 0, 0, 2, 4]
# kernel_size = [int(self.knn_k / 1.5), int(self.knn_k / 1.2), self.knn_k, self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [int(hd / 3.2), hd // 2, hd, int(hd * 1.5), hd * 2]
# LJP 2560
# blocks = 4
# particles_count = [self.gridMaxSize, 1280, 512, self.cluster_count]
# conv_count = [3, 2, 2, 2]
# res_count = [0, 0, 1, 1]
# kernel_size = [self.knn_k, self.knn_k, self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [hd // 3, hd // 2, hd, hd * 2]
# LJP shallow
# blocks = 3
particles_count = [self.gridMaxSize, 768, self.cluster_count]
# conv_count = [2, 3, 2]
# res_count = [0, 0, 1]
kernel_size = [self.knn_k, self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [hd // 2, hd, hd * 2]
# Test
# blocks = 2
# particles_count = [self.gridMaxSize, self.cluster_count]
# conv_count = [3, 2]
# res_count = [0, 0]
# kernel_size = [self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [hd, hd]
# LJP Deep
# blocks = 5
# particles_count = [self.gridMaxSize, 1024, 512, 256, self.cluster_count]
# conv_count = [4, 2, 2, 0, 0]
# res_count = [0, 0, 1, 1, 2]
# kernel_size = [6, 8, 12, self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [16, 32, hd, hd*2, hd*4]
# ShapeNet_NEWconvConcat and Fluid_NEWconvConcat
# blocks = 5
# particles_count = [self.gridMaxSize, 1920, 768, 256, self.cluster_count]
# conv_count = [2, 3, 2, 2, 2]
# res_count = [0, 0, 1, 2, 2]
# kernel_size = [6, 8, 12, self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [16, 32, hd, hd*2, hd*4]
# ShapeNet_shallow_uniform_NEWconvConcat
# blocks = 3
# particles_count = [self.gridMaxSize, 1920, self.cluster_count]
# conv_count = [2, 3, 2]
# res_count = [0, 0, 1]
# kernel_size = [self.knn_k, self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [hd // 2, hd, hd*2]
# ShapeNet_shallow_feature
# blocks = 3
# particles_count = [self.gridMaxSize, 1920, self.cluster_count]
# conv_count = [1, 2, 0]
# res_count = [0, 0, 2]
# kernel_size = [self.knn_k, self.knn_k, self.knn_k]
# bik = [0, 32, 64]
# hd = self.particle_hidden_dim
# channels = [hd // 2, hd, hd * 2]
# ShapeNet_regular_featureSqz
blocks = 5
particles_count = [self.gridMaxSize, 1920, 768, max(256, self.cluster_count * 2), self.cluster_count]
conv_count = [1, 2, 2, 0, 0]
res_count = [0, 0, 0, 1, 2]
kernel_size = [self.knn_k, self.knn_k, self.knn_k, self.knn_k, min(self.knn_k, particles_count[4])]
bik = [0, 32, 32, 48, 64]
hd = self.particle_hidden_dim
channels = [hd // 2, 2 * hd // 3, hd, 3 * hd // 2, max(self.particle_latent_dim, hd * 2)]
res_count[4] = 6
# ShapeNet_SingleVector
# blocks = 5
# particles_count = [self.gridMaxSize, 1920, 768, max(256, self.cluster_count * 2), self.cluster_count]
# conv_count = [1, 2, 2, 0, 1]
# res_count = [0, 0, 0, 1, 0]
# kernel_size = [self.knn_k, self.knn_k, self.knn_k, self.knn_k, min(self.knn_k, particles_count[4])]
# bik = [0, 32, 32, 48, 256]
# hd = self.particle_hidden_dim
# channels = [hd // 2, 2 * hd // 3, hd, 3 * hd // 2, max(self.particle_latent_dim, hd * 2)]
# bik = [0, 4, 32]
# channels = [hd // 8, hd // 6, hd // 4]
# ShapeNet_shallow_uniform_convConcatSimpleMLP
# blocks = 3
# particles_count = [self.gridMaxSize, 1920, self.cluster_count]
# conv_count = [1, 1, 1]
# res_count = [0, 0, 1]
# kernel_size = [self.knn_k, self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [hd // 2, hd, hd*2]
# ShapeNet_deepshallow_uniform_convConcatSimpleMLP
# blocks = 7
# particles_count = [self.gridMaxSize, 2560, 1280, 512, 256, 128, self.cluster_count]
# conv_count = [2, 1, 1, 1, 0, 0, 0]
# res_count = [0, 0, 0, 0, 1, 1, 2]
# kernel_size = [self.knn_k for i in range(7)]
# hd = self.particle_hidden_dim
# channels = [hd // 2, hd // 2, hd, hd, hd, hd * 2, hd * 2]
# ShapeNet_deep_uniform_edgeMask
# blocks = 5
# particles_count = [self.gridMaxSize, 1920, 768, 256, self.cluster_count]
# conv_count = [4, 2, 0, 0, 0]
# res_count = [0, 0, 1, 2, 2]
# kernel_size = [self.knn_k // 2, self.knn_k, self.knn_k, self.knn_k, self.knn_k]
# hd = self.particle_hidden_dim
# channels = [hd // 2, int(hd / 1.4), hd, int(hd * 1.5), hd * 2]
self.pool_count = blocks - 1
self.pCount = particles_count
try:
bik
except NameError:
bik = kernel_size
gPos = input_particle[:, :, :3]
n = input_particle[:, :, self.outDim:] # Ignore velocity
var_list = []
pool_pos = []
pool_eval_func = []
freq_loss = 0
for i in range(blocks):
if i > 0:
# Pooling
prev_n = n
prev_pos = gPos
# gPos, n, eval_func, v, fl = kNNGPooling_HighFreqLoss_GUnet(n, gPos, particles_count[i], MatL, W_init = w_init, name = 'gpool%d' % i, stopGradient = True)
# gPos, n, eval_func, v, fl = kNNGPooling_CAHQ(n, gPos, particles_count[i], gIdx, gEdg, MatL, masking = True, W_init = w_init, name = 'gpool%d' % i, stopGradient = True)
#gPos, n, eval_func, v, fl = kNNGPooling_rand(n, gPos, self.batch_size, particles_count[i-1], particles_count[i], MatL, masking = True, W_init = w_init, name = 'gpool%d' % i)
gPos, n, eval_func, v, fl = kNNGPooling_farthest(n, gPos, particles_count[i])
# Single point
# if i == 4:
# gPos = tf.zeros_like(gPos)
var_list.append(v)
# pool_eval_func.append(tf.concat([prev_pos, tf.reshape(eval_func, [self.batch_size, particles_count[i-1], 1])], axis = -1))
pool_pos.append(gPos)
freq_loss = freq_loss + fl
# Collect features after pool
_, _, bpIdx, bpEdg = bip_kNNG_gen(gPos, prev_pos, bik[i], 3, name = 'gpool%d/ggen' % i)
n, _ = bip_kNNGConvBN_wrapper(n, prev_n, bpIdx, bpEdg, self.batch_size, particles_count[i], channels[i], self.act, is_train = is_train, W_init = w_init, b_init = b_init, name = 'gpool%d/gconv' % i)
gPos, gIdx, gEdg = kNNG_gen(gPos, kernel_size[i], 3, name = 'ggen%d' % i)
# MatL, MatA, MatD = Laplacian(self.batch_size, particles_count[i], kernel_size[i], gIdx, name = 'gLaplacian%d' % i)
for c in range(conv_count[i]):
n, v = kNNGConvBN_wrapper(n, gIdx, gEdg, self.batch_size, particles_count[i], channels[i], self.act, 0.999, is_train = is_train, W_init = w_init, b_init = b_init, name = 'g%d/gconv%d' % (i, c))
var_list.append(v)
tmp = n
for r in range(res_count[i]):
nn, v = kNNGConvBN_wrapper(n, gIdx, gEdg, self.batch_size, particles_count[i], channels[i], self.act, 0.999, is_train = is_train, W_init = w_init, b_init = b_init, name = 'g%d/res%d/conv1' % (i, r))
var_list.append(v)
nn, v = kNNGConvBN_wrapper(nn, gIdx, gEdg, self.batch_size, particles_count[i], channels[i], self.act, 0.999, is_train = is_train, W_init = w_init, b_init = b_init, name = 'g%d/res%d/conv2' % (i, r))
var_list.append(v)
n = n + nn
if res_count[i] > 1:
n = n + tmp
# tf.summary.histogram('Pooled_clusters_pos', gPos)
n, v = Conv1dWrapper(n, self.cluster_feature_dim, 1, 1, 'SAME', None, w_init, b_init, True, 'convOut')
var_list.append(v)
if self.useVector == True:
zeroPos = tf.zeros([self.batch_size, 1, 3])
_, _, bpIdx, bpEdg = bip_kNNG_gen(zeroPos, gPos, particles_count[blocks - 1], 3, name = 'globalPool/bipgen')
n, _ = bip_kNNGConvBN_wrapper(tf.zeros_like(zeroPos), n, bpIdx, bpEdg, self.batch_size, 1, 512, self.act, is_train = is_train, W_init = w_init, b_init = b_init, name = 'globalPool/gconv')
n = fc_as_conv_SN(n, 512, name = 'globalPool/fc')
gPos = zeroPos
if returnPool == True:
return gPos, n, var_list, pool_pos, freq_loss, pool_eval_func
return gPos, n, var_list, freq_loss, pool_eval_func
def particleDecoder(self, cluster_pos, local_feature, groundTruth_card, output_dim, is_train = False, reuse = False):
w_init = tf.random_normal_initializer(stddev=self.wdev)
w_init_fold = tf.random_normal_initializer(stddev= 1.0*self.wdev)
w_init_pref = tf.random_normal_initializer(stddev=0.03*self.wdev)
w_init = tf.contrib.layers.xavier_initializer(dtype = default_dtype)
w_init_fold = w_init
w_init_pref = w_init
b_init = tf.constant_initializer(value=0.0)
g_init = tf.random_normal_initializer(1., 0.02)
with tf.variable_scope("ParticleDecoder", reuse = reuse) as vs:
CC = local_feature.shape[2]
if False: # Original approach
global_latent, v = Conv1dWrapper(local_feature, self.particle_latent_dim, 1, 1, 'SAME', None, w_init, b_init, True, 'convGlobal')
global_latent = self.combine_method(global_latent, axis = 1)
# Folding stage
fold_particles_count = self.gridMaxSize - self.cluster_count
# net_input = InputLayer(input_latent, name = 'input')
# FIXME: no card in this model
# generate random noise
pos_range = 3
# use gaussian for fluid
# z = tf.random.normal([self.batch_size, fold_particles_count, self.particle_latent_dim * 2], dtype = default_dtype)
# but uniform should be way better
# z = tf.random.uniform([self.batch_size, fold_particles_count, self.particle_latent_dim * 2], minval = -1., maxval = 1., dtype = default_dtype)
z = tf.random.uniform([self.batch_size, fold_particles_count, 3], minval = -1., maxval = 1., dtype = default_dtype)
# conditional generative network (FOLD Stage)
latents = \
tf.broadcast_to\
(\
tf.reshape(global_latent, [self.batch_size, 1, self.particle_latent_dim]),\
[self.batch_size, fold_particles_count, self.particle_latent_dim]\
)
pos = z
c = tf.concat([pos, latents], axis = -1)
global_fold = 3
for i in range(global_fold):
c, v = Conv1dWrapper(c, self.particle_hidden_dim, 1, 1, 'SAME', None, w_init_fold, b_init, True, 'fold/fc%d' % i)
c = batch_norm(c, 0.999, is_train, name = 'fold/fc%d/bn' % i)
c = self.act(c)
alter_particles, v = Conv1dWrapper(c, pos_range, 1, 1, 'SAME', None, w_init, b_init, True, 'fold/fc_out')
fold_before_prefine = tf.concat([alter_particles, cluster_pos], axis = 1)
# tf.summary.histogram('Particles_AfterFolding', alter_particles)
# Graph pos-refinement stage
# Obtain features for alter particles
# Create the graph
posAlter, posRefer, gp_idx, gp_edg = bip_kNNG_gen(alter_particles, cluster_pos, self.knn_k - 6, 3, name = 'bi_ggen_pre')
# Create a empty feature (0.0)
n = tf.reduce_mean(tf.zeros_like(alter_particles), axis = -1, keepdims = True)
# Do the convolution
convSteps = 3
varsGConv = []
for i in range(convSteps):
n, v = bip_kNNGConvBN_wrapper(n, local_feature, gp_idx, gp_edg, self.batch_size, fold_particles_count, self.particle_hidden_dim // 2, self.act, is_train = is_train, name = 'gconv%d_pre' % i, W_init = w_init)
varsGConv.append(v)
fold_particle_features = n
# Reduce clusters' features
# clusters: [bs, N_clusters, cluster_feature_dim]
n, vars3 = Conv1dWrapper(local_feature, self.particle_hidden_dim // 2, 1, 1, 'SAME', self.act, w_init, b_init, True, 'conv1')
ref_particle_features = n
# Combine them to a single graph
pos = tf.concat([posRefer, posAlter], axis = 1) # [bs, N, 3]
n = tf.concat([ref_particle_features, fold_particle_features], axis = 1) # [bs, N, phd]
# Position Refinement
# refine_loops = 0
refine_loops = 2
refine_res_blocks = 2
vars_loop = []
for r in range(refine_loops):
_, gr_idx, gr_edg = kNNG_gen(pos, self.knn_k, 3, name = 'grefine%d/ggen' % r)
tmp = n
for i in range(refine_res_blocks):
# Pos-refinement
# pos, v = kNNGPosition_refine(pos, n, self.act, W_init = w_init_pref, b_init = b_init, name = 'gloop%d/pos_refine' % i)
# vars_loop.append(v)
# Graph generation
# _, gl_idx, gl_edg = kNNG_gen(pos, self.knn_k, 3, name = 'gloop%d/ggen' % i)
# Convolution
nn, v = kNNGConvBN_wrapper(n, gr_idx, gr_edg, self.batch_size, self.gridMaxSize, self.particle_hidden_dim // 2, self.act, is_train = is_train, name = 'gr%d/gloop%d/gconv1' % (r, i), W_init = w_init, b_init = b_init)
vars_loop.append(v)
nn, v = kNNGConvBN_wrapper(nn, gr_idx, gr_edg, self.batch_size, self.gridMaxSize, self.particle_hidden_dim // 2, self.act, is_train = is_train, name = 'gr%d/gloop%d/gconv2' % (r, i), W_init = w_init, b_init = b_init)
vars_loop.append(v)
n = n + nn
n = n + tmp
pos, v = kNNGPosition_refine(pos, n, self.act, W_init = w_init_pref, b_init = b_init, name = 'gr%d/grefine/refine' % r)
vars_loop.append(v)
final_particles = pos
if output_dim > pos_range:
n, _ = Conv1dWrapper(n, output_dim - pos_range, 1, 1, 'SAME', None, w_init, b_init, True, 'finalConv')
final_particles = tf.concat([pos, n], -1)
return 0, [final_particles, fold_before_prefine], 0
else: # New approach, local generation, fold-refine blocks
hd = self.particle_hidden_dim
ld = self.particle_latent_dim
_k = self.knn_k
# Single decoding stage
coarse_pos, coarse_fea, coarse_cnt = cluster_pos, local_feature, self.cluster_count
blocks = 1
pcnt = [self.gridMaxSize] # particle count
generator = [6] # Generator depth
refine = [0] # refine steps (each refine step = 1x res block (2x gconv))
refine_res = [1]
refine_maxLength = [0.6]
hdim = [self.particle_hidden_dim // 3]
fdim = [self.particle_latent_dim] # dim of features used for folding
gen_hdim = [self.particle_latent_dim]
knnk = [self.knn_k // 2]
# Multiple stacks
# coarse_pos, coarse_fea, coarse_cnt = cluster_pos, local_feature, self.cluster_count
# blocks = 2
# pcnt = [768, self.gridMaxSize] # particle count
# generator = [4, 3] # Generator depth
# refine = [2, 1] # refine steps (each refine step = 1x res block (2x gconv))
# hdim = [self.particle_hidden_dim, self.particle_hidden_dim // 3]
# fdim = [self.particle_latent_dim, self.particle_latent_dim] # dim of features used for folding
# gen_hdim = [self.particle_latent_dim, self.particle_latent_dim]
# knnk = [self.knn_k, self.knn_k // 2]
# [fullFC_regular, fullGen_regular] Setup for full generator - fully-connected
# coarse_pos, coarse_fea, coarse_cnt = cluster_pos, local_feature, self.cluster_count
# blocks = 3
# pcnt = [256, 1280, self.gridMaxSize] # particle count
# generator = [4, 4, 4] # Generator depth
# refine = [2, 1, 1] # Regular setup
# refine_maxLength = [2.0, 1.0, 0.5]
# refine = [1, 0, 0] # variant / refine steps (each refine step = 1x res block (2x gconv))
# refine_res = [1, 1, 1]
# hdim = [hd * 2, hd, hd // 3]
# fdim = [ld, ld, ld // 2] # dim of features used for folding
# gen_hdim = [ld, ld, ld]
# knnk = [_k, _k, _k // 2]
# [fullGen_shallow]
# coarse_pos, coarse_fea, coarse_cnt = cluster_pos, local_feature, self.cluster_count
# blocks = 2
# pcnt = [1280, self.gridMaxSize] # particle count
# generator = [6, 3] # Generator depth
# refine = [0, 0] # refine steps (each refine step = 1x res block (2x gconv))
# refine_res = [1, 1]
# hdim = [self.particle_hidden_dim, self.particle_hidden_dim // 3]
# fdim = [self.particle_latent_dim, self.particle_latent_dim] # dim of features used for folding
# gen_hdim = [self.particle_latent_dim, self.particle_latent_dim]
# knnk = [self.knn_k, self.knn_k // 2]
if self.useVector == True:
blocks += 1
pcnt = [coarse_cnt] + pcnt
coarse_cnt = 1
generator = [4] + generator
refine = [0] + refine
refine_res = [1] + refine_res
refine_maxLength = [1.0] + refine_maxLength
hdim = [self.particle_hidden_dim] + hdim
fdim = [512] + fdim
gen_hdim = [512] + gen_hdim
knnk = [self.knn_k] + knnk
pos_range = 3
gen_only = []
regularizer = 0.0
for bi in range(blocks):
with tf.variable_scope('gr%d' % bi):
if True: # Fully-connected generator (Non-distribution-based) & Full generators (pcnt[bi] instead of pcnt[bi] - coarse_cnt
# Check for good setups
assert pcnt[bi] % coarse_cnt == 0
n_per_cluster = pcnt[bi] // coarse_cnt
if False: # fc
n = coarse_fea
for gi in range(generator[bi]):
with tf.variable_scope('gen%d' % gi):
n, v = Conv1dWrapper(n, fdim[bi], 1, 1, 'SAME', None, w_init, b_init, True, 'fc')
n = batch_norm(n, 0.999, is_train, name = 'norm')
n = self.act(n)
n, v = Conv1dWrapper(n, pos_range * n_per_cluster, 1, 1, 'SAME', None, w_init, b_init, True, 'gen_out')
n = tf.reshape(n, [self.batch_size, coarse_cnt, n_per_cluster, pos_range])
# Back to world space
n = n + tf.reshape(coarse_pos, [self.batch_size, coarse_cnt, 1, pos_range])
ap = tf.reshape(n, [self.batch_size, pcnt[bi], pos_range])
else: # generator
z = tf.random.uniform([self.batch_size, coarse_cnt, n_per_cluster, fdim[bi]], minval = -0.5, maxval = 0.5, dtype = default_dtype)
fuse_fea, v = Conv1dWrapper(coarse_fea, fdim[bi], 1, 1, 'SAME', None, w_init_fold, b_init, True, 'feaFuse')
z = tf.concat([z, tf.broadcast_to(tf.reshape(fuse_fea, [self.batch_size, coarse_cnt, 1, fdim[bi]]), [self.batch_size, coarse_cnt, n_per_cluster, fdim[bi]])], axis = -1)
n = tf.reshape(z, [self.batch_size, pcnt[bi], fdim[bi] * 2])
for gi in range(generator[bi]):
with tf.variable_scope('gen%d' % gi):
n, v = Conv1dWrapper(n, fdim[bi], 1, 1, 'SAME', None, w_init, b_init, True, 'fc')
n = batch_norm(n, 0.999, is_train, name = 'norm')
n = self.act(n)
n, v = Conv1dWrapper(n, pos_range, 1, 1, 'SAME', None, w_init, b_init, True, 'gen_out')
n = tf.reshape(n, [self.batch_size, coarse_cnt, n_per_cluster, pos_range])
# Back to world space
n = n + tf.reshape(coarse_pos, [self.batch_size, coarse_cnt, 1, pos_range])
ap = tf.reshape(n, [self.batch_size, pcnt[bi], pos_range])
# General operations for full generators
gen_only.append(ap)
# Empty feature
# n = tf.zeros([self.batch_size, pcnt[bi], 1], dtype = default_dtype)
# Outputs of this stage
pos = ap
# n = n
else:
# Folding stage
fold_particles_count = pcnt[bi] - coarse_cnt
# Mixture
mix = tf.random.uniform([self.batch_size, fold_particles_count, 1], maxval = coarse_cnt, dtype = tf.int32)
# Coarse graph: [bs, coarse_cnt, coarse_hdim]
bs_idx = tf.broadcast_to(tf.reshape(tf.range(self.batch_size), [self.batch_size, 1, 1]), [self.batch_size, fold_particles_count, 1])
gather_idx = tf.concat([bs_idx, mix], axis = -1)
origin_pos = tf.gather_nd(coarse_pos, gather_idx)
origin_fea = tf.gather_nd(coarse_fea, gather_idx)
z = tf.random.uniform([self.batch_size, fold_particles_count, fdim[bi] * 2], minval = -1., maxval = 1., dtype = default_dtype)
if False: # Fuse feature to every layer, maybe stupid...?
for gi in range(generator[bi]):
with tf.variable_scope('gen%d' % gi):
fuse_fea, v = Conv1dWrapper(origin_fea, fdim[bi], 1, 1, 'SAME', self.act, w_init_fold, b_init, True, 'feaFuse')
z = tf.concat([z, fuse_fea], axis = -1)
z, v = Conv1dWrapper(z, fdim[bi], 1, 1, 'SAME', None, w_init_fold, b_init, True, 'fc')
z = batch_norm(z, 0.999, is_train, name = 'bn')
z = self.act(z)
elif False: # Regular small generator
fuse_fea, v = Conv1dWrapper(origin_fea, fdim[bi], 1, 1, 'SAME', self.act, w_init_fold, b_init, True, 'feaFuse')
z = tf.concat([z, fuse_fea], axis = -1)
for gi in range(generator[bi]):
with tf.variable_scope('gen%d' % gi):
z, v = Conv1dWrapper(z, 2 * fdim[bi], 1, 1, 'SAME', None, w_init_fold, b_init, True, 'fc')
z = batch_norm(z, 0.999, is_train, name = 'bn')
z = self.act(z)
z, v = Conv1dWrapper(z, pos_range, 1, 1, 'SAME', None, w_init, b_init, True, 'gen/fc_out')
else: # Advanced conditioned small generator
with tf.variable_scope('weight_gen'):
l, v = Conv1dWrapper(origin_fea, gen_hdim[bi], 1, 1, 'SAME', self.act, w_init_fold, b_init, True, 'mlp1')
l = batch_norm(l, 0.999, is_train, name = 'mlp1/bn')
l, v = Conv1dWrapper(l, gen_hdim[bi], 1, 1, 'SAME', self.act, w_init_fold, b_init, True, 'mlp2')
l = batch_norm(l, 0.999, is_train, name = 'mlp2/bn')
w, v = Conv1dWrapper(l, pos_range * fdim[bi], 1, 1, 'SAME', None, w_init_fold, b_init, True, 'mlp_weights_out')
b, v = Conv1dWrapper(l, pos_range, 1, 1, 'SAME', None, w_init_fold, b_init, True, 'mlp_bias_out')
t, v = Conv1dWrapper(l, pos_range * pos_range, 1, 1, 'SAME', None, w_init_fold, b_init, True, 'mlp_transform_out')
w = tf.reshape(w, [self.batch_size, fold_particles_count, fdim[bi], pos_range])
w = tf.nn.softmax(w, axis = 2)
t = tf.reshape(t, [self.batch_size, fold_particles_count, pos_range, pos_range])
# Entropy loss
entropy = tf.reduce_mean(-tf.reduce_sum(w * tf.log(w + 1e-4), axis = 2)) # We want minimize entropy of W
tf.summary.scalar('entropy', entropy)
regularizer += entropy * 0.1
# Ortho of t?
z = tf.random.uniform([self.batch_size, fold_particles_count, fdim[bi]], minval = -0.5, maxval = 0.5, dtype = default_dtype)
for gi in range(generator[bi]):
with tf.variable_scope('gen%d' % gi):
z, v = Conv1dWrapper(z, fdim[bi], 1, 1, 'SAME', None, w_init_fold, b_init, True, 'fc')
z = batch_norm(z, 0.999, is_train, name = 'bn')
z = self.act(z)
z, v = Conv1dWrapper(z, fdim[bi], 1, 1, 'SAME', None, w_init_fold, b_init, True, 'fc_final')
# Collect features
z = tf.multiply(w, tf.reshape(z, [self.batch_size, fold_particles_count, fdim[bi], 1]))
z = tf.reduce_sum(z, axis = 2)
z = z + b
# Linear transformation
z = tf.multiply(t, tf.reshape(z, [self.batch_size, fold_particles_count, pos_range, 1]))
z = tf.reduce_sum(z, axis = 2)
# ap, v = Conv1dWrapper(z, pos_range, 1, 1, 'SAME', None, w_init, b_init, True, 'gen/fc_out')
ap = z
ap = ap + origin_pos # ap is alter_particles
gen_only.append(tf.concat([ap, coarse_pos], axis = 1))
# Position refinement stage
# Bipartite graph
posAlter, posRefer, gp_idx, gp_edg = bip_kNNG_gen(ap, coarse_pos, knnk[bi], pos_range, name = 'bi_ggen_pre')
# Empty feature
n = tf.zeros([self.batch_size, fold_particles_count, 1], dtype = default_dtype)
n, v = bip_kNNGConvBN_wrapper(n, coarse_fea, gp_idx, gp_edg, self.batch_size, fold_particles_count, hdim[bi], self.act, is_train = is_train, name = 'bip/conv', W_init = w_init)
gen_features = n
# Existing features
n, v = Conv1dWrapper(coarse_fea, hdim[bi], 1, 1, 'SAME', self.act, w_init, b_init, True, 'pre/conv')
ref_features = n
# Combine to get graph
pos = tf.concat([posRefer, posAlter], axis = 1)
n = tf.concat([ref_features, gen_features], axis = 1)
### General part for full and partial generators
# Position Refinement
# get feature
# Bipartite graph
posAlter, posRefer, gp_idx, gp_edg = bip_kNNG_gen(pos, coarse_pos, knnk[bi], pos_range, name = 'bi_ggen_gRefine')
# Empty feature
n = tf.zeros([self.batch_size, pcnt[bi], 1], dtype = default_dtype)
n, v = bip_kNNGConvBN_wrapper(n, coarse_fea, gp_idx, gp_edg, self.batch_size, pcnt[bi], hdim[bi], self.act, is_train = is_train, name = 'gRefine/bip/conv', W_init = w_init)
# refine_loops = 0
refine_res_blocks = refine_res[bi]
vars_loop = []
for r in range(refine[bi]):
_, gr_idx, gr_edg = kNNG_gen(pos, knnk[bi], 3, name = 'grefine%d/ggen' % r)
tmp = n
for i in range(refine_res_blocks):
# Convolution
nn, v = kNNGConvBN_wrapper(n, gr_idx, gr_edg, self.batch_size, pcnt[bi], hdim[bi], self.act, is_train = is_train, name = 'gr%d/gloop%d/gconv1' % (r, i), W_init = w_init, b_init = b_init)
vars_loop.append(v)
nn, v = kNNGConvBN_wrapper(nn, gr_idx, gr_edg, self.batch_size, pcnt[bi], hdim[bi], self.act, is_train = is_train, name = 'gr%d/gloop%d/gconv2' % (r, i), W_init = w_init, b_init = b_init)
vars_loop.append(v)
n = n + nn
n = n + tmp
pos, v = kNNGPosition_refine(pos, n, refine_maxLength[bi], self.act, W_init = w_init_pref, b_init = b_init, name = 'gr%d/grefine/refine' % r)
vars_loop.append(v)
# get feature
# Bipartite graph
posAlter, posRefer, gp_idx, gp_edg = bip_kNNG_gen(pos, coarse_pos, knnk[bi], pos_range, name = 'bi_ggen_featureEx')
# Empty feature
n = tf.zeros([self.batch_size, pcnt[bi], 1], dtype = default_dtype)
n, v = bip_kNNGConvBN_wrapper(n, coarse_fea, gp_idx, gp_edg, self.batch_size, pcnt[bi], hdim[bi], self.act, is_train = is_train, name = 'featureEx/bip/conv', W_init = w_init)
_, gidx, gedg = kNNG_gen(pos, knnk[bi], 3, name = 'featureEx/ggen')
n, v = kNNGConvBN_wrapper(n, gidx, gedg, self.batch_size, pcnt[bi], hdim[bi], self.act, is_train = is_train, name = 'featureEx/gconv1', W_init = w_init, b_init = b_init)
n, v = kNNGConvBN_wrapper(n, gidx, gedg, self.batch_size, pcnt[bi], hdim[bi], self.act, is_train = is_train, name = 'featureEx/gconv2', W_init = w_init, b_init = b_init)
coarse_pos = pos
coarse_fea = n
coarse_cnt = pcnt[bi]
final_particles = coarse_pos
n = coarse_fea
if output_dim > pos_range:
n, _ = Conv1dWrapper(n, output_dim - pos_range, 1, 1, 'SAME', None, w_init, b_init, True, 'finalConv')
final_particles = tf.concat([pos, n], -1)
regularizer = regularizer / blocks
return 0, [final_particles, gen_only[0]], 0, regularizer
def simulator(self, pos, particles, name = 'Simluator', is_train = True, reuse = False):
w_init = tf.random_normal_initializer(stddev=self.wdev)
w_init_pref = tf.random_normal_initializer(stddev=0.03*self.wdev)
b_init = tf.constant_initializer(value=0.0)
with tf.variable_scope(name, reuse = reuse) as vs:
_, gIdx, gEdg = kNNG_gen(pos, self.knn_k, 3, name = 'simulator/ggen')
layers = 1
n = particles
Np = particles.shape[1]
C = particles.shape[2]
var_list = []
nn = n
for i in range(layers):
nn, v = kNNGConvBN_wrapper(nn, gIdx, gEdg, self.batch_size, Np, C, self.act, is_train = is_train, name = 'simulator/gconv%d' % i, W_init = w_init, b_init = b_init)
var_list.append(v)
n = n + nn
pos, v = kNNGPosition_refine(pos, n, self.act, W_init = w_init_pref, b_init = b_init, name = 'simulator/grefine')
var_list.append(v)
return pos, particles, var_list
def simulator_old(self, pos, particles, name = 'Simulator', is_train = True, reuse = False):
w_init = tf.random_normal_initializer(stddev=self.wdev)
w_init_pref = tf.random_normal_initializer(stddev=0.03*self.wdev)
b_init = tf.constant_initializer(value=0.0)
with tf.variable_scope(name, reuse = reuse) as vs:
_, gIdx, gEdg = kNNG_gen(pos, self.knn_k, 3, name = 'simulator/ggen')
layers = 1
n = particles
Np = particles.shape[1]
C = particles.shape[2]
var_list = []
nn = n
for i in range(layers):
nn, v = kNNGConvBN_wrapper(nn, gIdx, gEdg, self.batch_size, Np, C, self.act, is_train = is_train, name = 'simulator/gconv%d' % i, W_init = w_init, b_init = b_init, bnact = None)
# n, v = kNNGConvBN_wrapper(n, gIdx, gEdg, self.batch_size, Np, C, self.act, is_train = is_train, name = 'simulator/gconv%d' % i, W_init = w_init, b_init = b_init)
var_list.append(v)
n = n + nn
pos, v = kNNGPosition_refine(pos, n, self.act, W_init = w_init_pref, b_init = b_init, name = 'simulator/grefine')
var_list.append(v)
return pos, particles, var_list
def generate_match_canonical(self, card):
# card: [bs]
batch_size = card.shape[0]
mask = np.zeros((batch_size, self.gridMaxSize, 3), dtype = 'f')
for b in range(batch_size):
for i in range(int(card[b])):
mask[b, i, :] = 1
# Uncomment for randomly choosing
# np.random.shuffle(mask[b, :])
match = np.zeros((batch_size, self.gridMaxSize, 2), dtype = np.int32)
# TODO: implement match
for b in range(batch_size):
cnt = 0
for i in range(self.gridMaxSize):
if mask[b, i, 0] > 0.2: # randomly chosen 0.2 (any 0~1)
match[b, cnt, :] = [b, i]
cnt += 1
# fill lefts
for i in range(self.gridMaxSize):
if mask[b, i, 0] <= 0.2:
match[b, cnt, :] = [b, i]
cnt += 1
return mask, match
def generate_canonical_mask(self, card):
# card: [bs]
batch_size = card.shape[0]
mask = np.zeros((batch_size, self.gridMaxSize, 3), dtype = 'f')
for b in range(batch_size):
for i in range(int(card[b])):
mask[b, i, :] = 1
# Uncomment for randomly choosing
# np.random.shuffle(mask[b, :])
return mask
def generate_score_label(self, src, cards):
result = np.zeros((self.batch_size, self.gridMaxSize), dtype = 'f')
for b in range(self.batch_size):
for p in range(self.gridMaxSize):
if src[b, p] < cards[b]:
result[b, p] = 1 / cards[b]
return result
def generate_KM_match(self, src):
result = np.zeros((self.batch_size, self.gridMaxSize, 2), dtype = np.int32)
for b in range(self.batch_size):
for p in range(self.gridMaxSize):
result[b, src[b, p]] = np.asarray([b, p]) # KM match order reversed (ph_Y -> output => output -> ph_Y)
return result
def no_return_assign(self, ref, value):
tf.assign(ref, value)
return 0
def chamfer_metric(self, particles, groundtruth, pos_range, loss_func, EMD = False):
if EMD == True:
bs = groundtruth.shape[0]
Np = particles.shape[1]
Ng = groundtruth.shape[1]
match = approx_match(groundtruth, particles) # [bs, Np, Ng]
row_predicted = tf.reshape( particles[:, :, 0:pos_range], [bs, Np, 1, pos_range])
col_groundtru = tf.reshape(groundtruth[:, :, 0:pos_range], [bs, 1, Ng, pos_range])
distance = tf.sqrt(tf.add_n(tf.unstack(tf.square(row_predicted - col_groundtru), axis = -1)))
distance = distance * match
distance_loss = tf.reduce_mean(tf.reduce_sum(distance, axis = -1))
else:
# test - shuffle the groundtruth and calculate the loss
# rec_particles = tf.stack(list(map(lambda x: tf.random.shuffle(x), tf.unstack(self.ph_X[:, :, 0:6]))))
# rec_particles = tf.random.uniform([self.batch_size, self.gridMaxSize, 3], minval = -1.0, maxval = 1.0)
bs = groundtruth.shape[0]
Np = particles.shape[1]
Ng = groundtruth.shape[1]
assert groundtruth.shape[2] == particles.shape[2]
# NOTE: current using position (0:3) only here for searching nearest point.
row_predicted = tf.reshape( particles[:, :, 0:pos_range], [bs, Np, 1, pos_range])
col_groundtru = tf.reshape(groundtruth[:, :, 0:pos_range], [bs, 1, Ng, pos_range])
# distance = tf.norm(row_predicted - col_groundtru, ord = 'euclidean', axis = -1)
distance = tf.sqrt(tf.add_n(tf.unstack(tf.square(row_predicted - col_groundtru), axis = -1)))
rearrange_predicted_N = tf.argmin(distance, axis = 1, output_type = tf.int32)
rearrange_groundtru_N = tf.argmin(distance, axis = 2, output_type = tf.int32)
batch_subscriptG = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, Ng])
batch_subscriptP = tf.broadcast_to(tf.reshape(tf.range(bs), [bs, 1]), [bs, Np])
rearrange_predicted = tf.stack([batch_subscriptG, rearrange_predicted_N], axis = 2)
rearrange_groundtru = tf.stack([batch_subscriptP, rearrange_groundtru_N], axis = 2)
nearest_predicted = tf.gather_nd( particles[:, :, :], rearrange_predicted)
nearest_groundtru = tf.gather_nd(groundtruth[:, :, :], rearrange_groundtru)
if loss_func == tf.abs:
distance_loss =\
tf.reduce_mean(loss_func(tf.cast( particles, tf.float32) - tf.cast(nearest_groundtru, tf.float32))) +\
tf.reduce_mean(loss_func(tf.cast(nearest_predicted, tf.float32) - tf.cast(groundtruth , tf.float32)))
else:
distance_loss =\
tf.reduce_mean(tf.sqrt(tf.reduce_sum(loss_func(tf.cast( particles, tf.float32) - tf.cast(nearest_groundtru, tf.float32)), axis = -1))) +\
tf.reduce_mean(tf.sqrt(tf.reduce_sum(loss_func(tf.cast(nearest_predicted, tf.float32) - tf.cast( groundtruth, tf.float32)), axis = -1)))
return tf.cast(distance_loss, default_dtype)
# pos [bs, N, pRange]
# imp [bs, N]
# FIXME: this is not good ...
def pool_coverage(self, pos, importance, evaluation, k, ctrlRange = 0.01, falloff = 2.0, smin = 64.0, eplison = 1e-5):
bs = pos.shape[0]
N = pos.shape[1]
pr = pos.shape[2]
rx = 1.0 / (ctrlRange / 1.0)
row = tf.reshape(pos, [bs, N, 1, pr])
col = tf.reshape(pos, [bs, 1, N, pr])
dist = tf.sqrt(tf.add_n(tf.unstack(tf.square(row - col), axis = -1)))
subCover = tf.pow(tf.nn.tanh(1.0 / (rx * dist + eplison)), falloff)
# normalize importance, fake k-max
importance = k * importance / tf.reduce_sum(importance, axis = -1, keepdims = True)
importance = tf.pow(importance, 4.0)
def custom_dtype_getter(self, getter, name, shape=None, dtype=default_dtype, *args, **kwargs):
if dtype is tf.float16:
var = getter(name, shape, tf.float32, *args, **kwargs)
return tf.cast(var, dtype=dtype, name=name + '_cast')
else:
return getter(name, shape, dtype, *args, **kwargs)
def build_network(self, is_train, reuse, loopSim = True, includeSim = True):
normalized_X = self.ph_X / self.normalize
normalized_Y = self.ph_Y / self.normalize
normalized_L = self.ph_L / self.normalize
# tf.summary.histogram('groundTruth_pos', self.ph_X[:, :, 0:3])
# tf.summary.histogram('groundTruth_vel', self.ph_X[:, :, 3:6])
# Mixed FP16 & FP32
with tf.variable_scope('net', custom_getter = self.custom_dtype_getter):
# with tf.variable_scope('net'):
# Go through the particle AE
# tf.summary.histogram('GroundTruth', normalized_X[:, :, 0:3])
var_list = []
floss = 0
regularizer = 0
# Enc(X)
posX, feaX, _v, _floss, eX = self.particleEncoder(normalized_X, self.particle_latent_dim, is_train = is_train, reuse = reuse)
var_list.append(_v)
floss += _floss
encX = tf.concat([posX, feaX], -1)
if includeSim == True:
# Enc(Y)
posY, feaY, _v, _floss, eY = self.particleEncoder(normalized_Y, self.particle_latent_dim, is_train = is_train, reuse = True)
var_list.append(_v)
floss += _floss
encY = tf.concat([posY, feaY], -1)
# tf.summary.histogram('Encoded Y', encY)
if loopSim == True:
# Enc(L)
posL, feaL, _v, _floss, _ = self.particleEncoder(normalized_L, self.particle_latent_dim, is_train = is_train, reuse = True)
var_list.append(_v)
floss += _floss
encL = tf.concat([posL, feaL], -1)
# tf.summary.histogram('Clusters X', posX)
outDim = self.outDim
if includeSim == True:
# Sim: X -> Y
sim_posY, sim_feaY, _v = self.simulator(posX, feaX, 'Simulator', is_train, reuse)
var_list.append(_v)
# SimInv: Y -> X
sim_posYX, sim_feaYX, _v = self.simulator(posY, feaY, 'Simulator_Inv', is_train, reuse)
var_list.append(_v)
simY = tf.concat([ sim_posY, sim_feaY], -1)
simYX = tf.concat([sim_posYX, sim_feaYX], -1)
# tf.summary.histogram('simulated Y', simY)
# Decoders
# _, [rec_YX, _], _ = self.particleDecoder(sim_posYX, sim_feaYX, self.ph_card, 6, True, reuse)
# _, [ rec_Y, _], _ = self.particleDecoder( sim_posY, sim_feaY, self.ph_card, 6, True, True)
_, [rec_YX, fold_X], _, r = self.particleDecoder( posX, feaX, self.ph_card, outDim, True, reuse)
regularizer += r * 0.5
_, [ rec_Y, _], _, r = self.particleDecoder( posY, feaY, self.ph_card, outDim, True, True)
regularizer += r * 0.5
# tf.summary.histogram('Reconstructed X (from SInv(Y))', rec_YX[:, :, 0:3])
if loopSim == True:
# SimInv: L -> X
sim_posLX, sim_feaLX = posL, feaL
for i in range(self.loops):
sim_posLX, sim_feaLX, _v = self.simulator(sim_posLX, sim_feaLX, 'Simulator_Inv', is_train, True)
# Sim: X -> L
sim_posL, sim_feaL = posX, feaX
for i in range(self.loops):
sim_posL, sim_feaL, _v = self.simulator(sim_posL, sim_feaL, 'Simulator', is_train, True)
simL = tf.concat([ sim_posL, sim_feaL], -1)
simLX = tf.concat([sim_posLX, sim_feaLX], -1)
_, [rec_LX, _], _, r = self.particleDecoder(sim_posLX, sim_feaLX, self.ph_card, outDim, True, True)
regularizer += r * 0.5
# _, [ rec_L, _], _ = self.particleDecoder( sim_posL, sim_feaL, self.ph_card, outDim, True, True)
_, [ rec_L, _], _, r = self.particleDecoder( posL, feaL, self.ph_card, outDim, True, True)
regularizer += r * 0.5
regularizer *= 0.5
else:
_, [rec_X, fold_X], _, r = self.particleDecoder(posX, feaX, self.ph_card, outDim, True, reuse)
regularizer += r
# tf.summary.histogram('Reconstructed X (from X)', rec_X[:, :, 0:3])
reconstruct_loss = 0.0
simulation_loss = 0.0
# EMD
if self.loss_metric == 'earthmover':
raise NotImplementedError
if self.loss_metric == 'chamfer':
# Do some stop_gradient?
# reconstruct_loss += self.chamfer_metric(rec_YX, normalized_X[:, :, 0:6], 3, self.loss_func)
# reconstruct_loss += self.chamfer_metric(rec_LX, normalized_X[:, :, 0:6], 3, self.loss_func)
# reconstruct_loss += self.chamfer_metric(rec_Y , normalized_Y[:, :, 0:6], 3, self.loss_func)
# reconstruct_loss += self.chamfer_metric(rec_L , normalized_L[:, :, 0:6], 3, self.loss_func)
pool_align_loss = 0
if includeSim == True:
if loopSim == True:
reconstruct_loss += self.chamfer_metric(rec_YX, normalized_X[:, :, 0:outDim], 3, self.loss_func)
reconstruct_loss += self.chamfer_metric(rec_LX, normalized_X[:, :, 0:outDim], 3, self.loss_func)
reconstruct_loss += self.chamfer_metric(rec_Y , normalized_Y[:, :, 0:outDim], 3, self.loss_func)
reconstruct_loss += self.chamfer_metric(rec_L , normalized_L[:, :, 0:outDim], 3, self.loss_func)
simulation_loss += self.chamfer_metric(simY , encY, 3, self.loss_func)
simulation_loss += self.chamfer_metric(simL , encL, 3, self.loss_func)
simulation_loss += self.chamfer_metric(simYX, encX, 3, self.loss_func)
simulation_loss += self.chamfer_metric(simLX, encX, 3, self.loss_func)
reconstruct_loss *= 10.0
simulation_loss *= 10.0
else:
reconstruct_loss += self.chamfer_metric(rec_YX, normalized_X[:, :, 0:outDim], 3, self.loss_func)
reconstruct_loss += self.chamfer_metric(rec_Y , normalized_Y[:, :, 0:outDim], 3, self.loss_func)
simulation_loss += self.chamfer_metric(simY , encY, 3, self.loss_func)
simulation_loss += self.chamfer_metric(simYX, encX, 3, self.loss_func)
reconstruct_loss *= 20.0
simulation_loss *= 20.0
for ei in range(len(eX)):
pool_align_loss += self.chamfer_metric(eX[ei], eY[ei], 3, self.loss_func)
pool_align_loss *= 10.0 / len(eX)
else:
reconstruct_loss += self.chamfer_metric(rec_X, normalized_X[:, :, 0:outDim], 3, self.loss_func, EMD = True)
# reconstruct_loss += self.chamfer_metric(rec_X[:, self.cluster_count:, 0:outDim], normalized_X[:, :, 0:outDim], 3, self.loss_func, EMD = True)
reconstruct_loss *= 40.0
raw_error = 0.0
# raw_error = self.chamfer_metric(rec_X, normalized_X[:, :, 0:outDim], 3, tf.abs) * 40.0
# reconstruct_loss += self.chamfer_metric(fold_X[:, :, 0:3], normalized_X[:, :, 0:3], 3, self.loss_func) * 40.0
# reconstruct_loss *= 0.5
hqpool_loss = 0.01 * tf.reduce_mean(floss) + regularizer
# hqpool_loss = 0.0
if includeSim == True:
hqpool_loss *= 0.5
if includeSim == True and loopSim == True:
hqpool_loss *= 0.5
particle_net_vars =\
[]
# tl.layers.get_variables_with_name('ParticleEncoder', True, True) +\
# tl.layers.get_variables_with_name('ParticleDecoder', True, True)
# rec_L = rec_L * self.normalize
# rec_LX = rec_LX * self.normalize
# rec_X = rec_X * self.normalize
# rec_YX = rec_YX * self.normalize
# rec_Y = rec_Y * self.normalize
return rec_X, normalized_X[:, :, 0:outDim], reconstruct_loss, simulation_loss, hqpool_loss, pool_align_loss, particle_net_vars, raw_error
# Only encodes X
def build_predict_Enc(self, normalized_X, is_train = False, reuse = False):
# Mixed FP16 & FP32
with tf.variable_scope('net', custom_getter = self.custom_dtype_getter):
# with tf.variable_scope('net'):
# Go through the particle AE
# tf.summary.histogram('GroundTruth', normalized_X[:, :, 0:3])
var_list = []
floss = 0
# Enc(X)
posX, feaX, _v, pPos, _floss, evals = self.particleEncoder(normalized_X, self.particle_latent_dim, is_train = is_train, reuse = reuse, returnPool = True)
var_list.append(_v)
floss += _floss
return posX, feaX, pPos, evals
# Only simulates posX & feaX for a single step
def build_predict_Sim(self, pos, fea, is_train = False, reuse = False):
with tf.variable_scope('net', custom_getter = self.custom_dtype_getter):
sim_posY, sim_feaY, _v = self.simulator(pos, fea, 'Simulator', is_train, reuse)
return sim_posY, sim_feaY
# Decodes Y
def build_predict_Dec(self, pos, fea, gt, is_train = False, reuse = False, outDim = 6):
with tf.variable_scope('net', custom_getter = self.custom_dtype_getter):
_, [rec, rec_f], _, _ = self.particleDecoder(pos, fea, self.ph_card, outDim, is_train, reuse)
rec = rec
reconstruct_loss = self.chamfer_metric(rec, gt, 3, self.loss_func, True) * 40.0
return rec * self.normalize, rec_f * self.normalize, reconstruct_loss
def build_model(self):
# Train & Validation
_, _, self.train_particleRecLoss, self.train_particleSimLoss, self.train_HQPLoss, self.train_PALoss,\
self.particle_vars, self.train_error =\
self.build_network(True, False, self.doLoop, self.doSim)
self.val_rec, self.val_gt,\
self.val_particleRecLoss, self.val_particleSimLoss, _, _, _, self.val_error =\
self.build_network(False, True, self.doLoop, self.doSim)
# self.train_particleLoss = self.train_particleCardLoss
# self.val_particleLoss = self.val_particleCardLoss
self.train_particleLoss = self.train_particleRecLoss + self.train_particleSimLoss + self.train_HQPLoss + self.train_PALoss
self.val_particleLoss = self.val_particleRecLoss
# self.train_particleLoss = self.train_particleCardLoss + 100 * self.train_particleRawLoss
# self.val_particleLoss = self.val_particleRawLoss
# self.val_particleLoss = self.val_particleCardLoss + 100 * self.val_particleRawLoss
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
gvs = self.optimizer.compute_gradients(self.train_particleLoss)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.) if grad is not None else None, var) for grad, var in gvs]
# self.train_op = self.optimizer.minimize(self.train_particleLoss)
self.train_op = self.optimizer.apply_gradients(capped_gvs)
# self.train_op = self.optimizer.minimize(self.train_particleLoss, var_list = self.particle_vars)
# self.train_op = tf.constant(0, shape=[10, 10])
| betairylia/NNParticles | model_graph.py | model_graph.py | py | 87,989 | python | en | code | 0 | github-code | 36 |
15593091597 | # den här funktionen ger dig möjlighet att välja vilken biom du vill spela spelet i
def biomer():
try:
print("""
<------------->
1.Mountain,N
2.Desert,S
3.Jungle,E
4.Plains,W
<------------->
""")
choosebiom=int(input("\nChoose a diraction(number): "))
if choosebiom == 1:
biom = "Mountains"
elif choosebiom == 2:
biom = "Desert"
elif choosebiom == 3:
biom = "Jungle"
elif choosebiom == 4:
biom = "Plains"
else:
print("""
<-------------------------------------->
Your stupid shithead write 1, 2, 3 or 4!
<-------------------------------------->
""")
print(f"""
<-------------------------->
You went towards the {biom}!
<-------------------------->
""")
return biom
except ValueError:
print("Try a number please\n")
biomer()
| xXwilson2005Xx/python-projekt | biom.py | biom.py | py | 1,032 | python | en | code | 0 | github-code | 36 |
71952062185 | def longestCommonPrefix(strs):
s1 = ''
s = strs[0]
k=0
for i in range(len(s)):
match = s[:k + 1]
c = 1
for j in range(1, len(strs)):
s2=strs[j]
if match == s2[:k+1]:
c += 1
if c == len(strs):
s1=match
k+=1
return s1
strs = ["flower","flow","flood"]
print(longestCommonPrefix(strs))
| Exile404/LeetCode | LEETCODE_Longest-common-prefix.py | LEETCODE_Longest-common-prefix.py | py | 415 | python | en | code | 2 | github-code | 36 |
30647725108 | #! /usr/bin/python2
# coding=utf-8
import socket
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
hostName = clientSocket.getsockname()
serverIP = "127.0.0.1"
port = 8888
clientSocket.connect((serverIP, port))
while True:
mess = "hello"
# clientSocket.send(mess.encode()) #发送一条信息 python3 只接收btye流
clientSocket.send(mess)
buffer = clientSocket.recv(1024)
print(buffer)
break
clientSocket.close() | Hsurpass/ElegantTest | test_python/python2/netProgramming/client.py | client.py | py | 465 | python | en | code | 0 | github-code | 36 |
73123502184 | from udb.controller.tests import WebCase
class TestApp(WebCase):
def test_index(self):
# Given the application is started
# When making a query to index page
self.getPage('/')
# Then an html page is returned
self.assertStatus(303)
self.assertHeaderItemValue("Location", self.baseurl + "/dashboard/")
| ikus060/udb | src/udb/controller/tests/test_index_page.py | test_index_page.py | py | 354 | python | en | code | 0 | github-code | 36 |
6164265087 | from random import Random, choice
import math
MAX_LEVELS = 4
HERIDITARY_WEIGHT = 0
MUTATED_WEIGHT = 1
random_negative = Random().randint(-1,1)
ran = Random()
def shouldMutate(random_num,mutate_range):
(low,high) = mutate_range
return low < random_num < high
def genRandomBiotGene():
gene = {
'num_arms' : ran.randint(2,6),
'branchfactor' : (2*math.pi/ran.randint(2, 14)),
'size' : ran.randint(2, 10),
'armGenes' : {}
}
gene['armGenes'] = genRandomArmGenes(gene['num_arms'])
#always place arms equally far apart
for arm,armgene in gene['armGenes'].iteritems():
gene['armGenes'][arm]['angle'] = (arm + 1) * math.pi * 1/(gene['num_arms'] / 2)
return gene
def genRandomArmGenes(num_arms):
armGenes = {}
for arm in range(num_arms):
armGenes[arm] = { 'num_levels' : ran.randint(2,MAX_LEVELS),
'angle' : None,
'segGenes' : {},
'arm_id' : arm
}
armGenes[arm]['segGenes'] = genRandomSegmentGenes(armGenes[arm]['num_levels'])
return armGenes
def genRandomSegmentGenes(num_levels):
segGenes = {}
for seg in range(num_levels):
segGenes[seg] = { 'branchfactor' : _choose_value(None,[(2*math.pi/ran.randint(2, 14))]),
'color' : _get_color((0,255,0)),
'length' : _choose_value(None,[ran.randint(5, 20)]),
'energy' : 0,
'seg_id' : seg,
'movecounter' : _choose_value(None,[ran.randint(3, 50)]),
}
segGenes[seg]['energy'] = mutate_energy(segGenes[seg])
return segGenes
def _choose_value(parentValue,mutatedValueList):
parent = []
mutated = []
if parentValue:
parent = [parentValue] * HERIDITARY_WEIGHT
if mutatedValueList:
mutated = mutatedValueList * MUTATED_WEIGHT
return choice(parent + mutated)
def mutate(parentGene):
if shouldMutate(ran.randint(1, 100000), (1,5000)):
newGene = {}
for key, value in parentGene.iteritems():
if key == 'armGenes':
continue
newGene[key] = eval('mutate_%s(parentGene)' % (key))
newGene['armGenes'] = mutate_armGenes(parentGene,newGene['num_arms'])
return newGene
else:
return parentGene
def mutate_num_arms(parentGene):
parent_arms = parentGene['num_arms']
new_arms = parent_arms + ( 1 * ran.randint(-1,1) )
if new_arms < 2:
new_arms = 2
if new_arms > 6:
new_arms = 6
return _choose_value(parent_arms,[new_arms])
def mutate_branchfactor(parentGene):
parent_branchfactor = parentGene['branchfactor']
new_branchfactor = parent_branchfactor + 0.1 * random_negative
return _choose_value(parent_branchfactor, [new_branchfactor])
def mutate_size(parentGene):
return 30
def mutate_armGenes(parentGene,num_arms):
newGene = {}
for arm,armGene in parentGene['armGenes'].iteritems():
newGene[arm] = {}
newGene[arm]['arm_id'] = arm
newGene[arm]['angle'] = None
newGene[arm]['num_levels'] = mutate_num_levels(armGene)
newGene[arm]['segGenes'] = mutate_segGenes(armGene['segGenes'],newGene[arm]['num_levels'])
#if we have more arms now, add some genes
missing_arms = num_arms - max(newGene.keys())
if missing_arms > 1:
for arm in range(num_arms)[-1*missing_arms:]:
newGene[arm] = genRandomArmGenes(1)[0]
#always place arms equally far apart
for arm,armGene in newGene.iteritems():
newGene[arm]['angle'] = (arm + 1) * math.pi * 1/(num_arms / 2)
return newGene
def mutate_num_levels(armGene):
parent_levels = armGene['num_levels']
newlevel = parent_levels + ( 1 * ran.randint(-1,1) )
if newlevel > MAX_LEVELS:
newlevel = MAX_LEVELS
if newlevel < 2:
newlevel = 2
return _choose_value(parent_levels, [newlevel])
def mutate_segGenes(segGenes,num_levels):
newGene = {}
random_negative = ran.randint(-1,1)
for seg, segGene in segGenes.iteritems():
newGene[seg] = {}
newGene[seg]['color'] = mutate_color(segGene)
newGene[seg]['energy'] = mutate_energy(segGene)
newGene[seg]['length'] = mutate_length(segGene)
newGene[seg]['branchfactor'] = mutate_branchfactor(segGene)
newGene[seg]['movecounter'] = mutate_movecounter(segGene['movecounter'])
newGene[seg]['seg_id'] = seg
missing_levels = num_levels - max(newGene.keys())
if missing_levels > 1:
for level in range(num_levels)[-1*missing_levels:]:
newGene[level] = genRandomSegmentGenes(1)[0]
return newGene
def mutate_movecounter(movecount):
return _choose_value(movecount,[movecount + (random_negative * 10)])
def mutate_color(segGene):
parentColor = segGene['color']
return _get_color(parentColor)
def _get_color(parentColor):
if parentColor:
parentColor = [parentColor]
else:
parentColor = []
red = [(255,0,0)]
green = [(0,255,0)]
blue = [(0,0,255)]
cyan = [(0,255,255)]
biglist = parentColor * 100 + red * 2 + green * 1 + blue * 1 + cyan * 1
return choice(biglist)
def mutate_energy(segGene):
if segGene['color'] == (0,255,0):
return 10
else:
return 100
def mutate_length(segGene):
parentLen = segGene['length']
return _choose_value(parentLen, [parentLen + ( 1 * random_negative )] )
#
# import pprint
# # pprint.pprint(genRandomBiotGene())
# pprint.pprint(mutate(genRandomBiotGene()))
| elstupido/primlife | src/biots/genes.py | genes.py | py | 5,842 | python | en | code | 4 | github-code | 36 |
74928715303 | # Derived from https://github.com/greenelab/deep-review/blob/75f2dd8c61099a17235a4b8de0567b2364901e4d/build/randomize-authors.py
# by Daniel Himmelstein under the CC0 1.0 license
# https://github.com/greenelab/deep-review#license
import argparse
import pathlib
import sys
import yaml
from manubot.util import read_serialized_data
MISSING_CONTRIBUTIONS = ["**MISSING**"]
def parse_args():
parser = argparse.ArgumentParser(
description="Select authors for an individual manuscript from metadata.authors "
"or update author metadata for the merged manuscript. Overwrites metadata.yaml."
)
parser.add_argument(
"--keyword", required=True, help="keyword indicating the individual manuscript "
"(e.g. 'pathogenesis') or 'merged' to update author metadata for the merged manuscript"
)
parser.add_argument(
"--path", default="content/metadata.yaml", help="path to metadata.yaml"
)
args = parser.parse_args()
return args
def dump_yaml(obj, path):
path = pathlib.Path(path)
sys.stderr.write(f"Dumping YAML to {path}\n")
with path.open("w", encoding="utf-8") as write_file:
yaml.dump(
obj,
write_file,
# default_flow_style=False,
explicit_start=True,
explicit_end=True,
width=float("inf"),
sort_keys=False,
allow_unicode=True,
)
write_file.write("\n")
def generate_consortium_members(authors):
"""
Generate the list of consortium members from the authors
"""
# Consortium members are all authors who are not consortia
# Sort members by the last token of their name
consortium_members = [author["name"] for author in authors if "consortium" not in author or not author["consortium"]]
return sorted(consortium_members, key=lambda name: name.split()[-1])
def update_merged(path):
"""
Update author contributions for the merged manuscript by taking the union
of all contributions on individual manuscripts. Overwrites existing
contributions for the author that are not associated with an individual
manuscript. Builds the list of consortium members.
"""
metadata = read_serialized_data(path)
authors = metadata.get("authors", [])
metadata["consortiummembers"] = generate_consortium_members(authors)
# Set contributions to the union of all manuscript-specific contributions
# Use general contributions if there are no manuscript-specific contributions
for author in authors:
contributions = set()
if "manuscripts" in author:
for manuscript in author["manuscripts"].keys():
if manuscript.lower() == "contributions":
raise IndentationError(f"Contributions for {author['name']} should be "
"indented under a specific manuscript\n")
# A list of the author's contributions for each individual manuscript
individual_contributions = author["manuscripts"][manuscript].get("contributions", MISSING_CONTRIBUTIONS)
contributions.update(individual_contributions)
elif "contributions" in author:
contributions.update(author["contributions"])
else:
contributions.update(MISSING_CONTRIBUTIONS)
if MISSING_CONTRIBUTIONS[0] in contributions:
sys.stderr.write(f"Missing contributions for {author['name']}\n")
author["contributions"] = sorted(contributions)
# Check whether code of conduct has been approved
if "code of conduct" not in author or "confirmed" not in author["code of conduct"] or not author["code of conduct"]["confirmed"]:
sys.stderr.write(f"{author['name']} has not approved the code of conduct\n")
sys.stderr.write(f"Updating contributions for {len(authors)} authors for merged manuscript\n")
metadata["authors"] = authors
dump_yaml(metadata, path)
def update_individual(path, keyword):
"""
Select authors for an individual manuscript. Expects the manuscript keyword
to be in a dictionary called manuscripts for each author of that manuscript.
Updates contributions to be the manuscript-specific contributions. Builds the
list of consortium members.
"""
metadata = read_serialized_data(path)
authors = metadata.get("authors", [])
metadata["consortiummembers"] = generate_consortium_members(authors)
individual_authors = [author for author in authors if "manuscripts" in author and keyword in author["manuscripts"]]
# Sort authors by their numeric order for this individual manuscript
# If the author has the manuscript keyword, which indicates authorship, but not an order
# the default order is -1, which should move them to the front of the author list
# Sort by name to break ties
individual_authors.sort(key=lambda author: (author["manuscripts"][keyword].get("order", -1), author["name"]))
# Set contributions to the appropriate manuscript-specific contributions
for author in individual_authors:
# A list of the author's contributions for this manuscript
contributions = author["manuscripts"][keyword].get("contributions", MISSING_CONTRIBUTIONS)
if contributions == MISSING_CONTRIBUTIONS:
sys.stderr.write(f"Missing {keyword} contributions for {author['name']}\n")
author["contributions"] = sorted(contributions)
sys.stderr.write(f"Found {len(individual_authors)} authors for {keyword} manuscript\n")
metadata["authors"] = individual_authors
dump_yaml(metadata, path)
if __name__ == "__main__":
args = parse_args()
if args.keyword.lower() == "merged":
update_merged(args.path)
else:
update_individual(args.path, args.keyword)
| greenelab/covid19-review | build/update-author-metadata.py | update-author-metadata.py | py | 5,812 | python | en | code | 117 | github-code | 36 |
228949403 | eps = 1e-4
n_generations = N_generations_max
for i in range(N_generations_max-1):
# print(i)
SR_i = SR_all[i]
metric_i = metric_all[i]
if metric_i.min() == metric_i.max():
n_generations = i+1
break
selected_inds = selection_tournament(-metric_i, N_population, 2, elitism=True)
SR_i = [SR_i[ind] for ind in selected_inds]
SR_ip = crossover_pop(SR_i, -metric_i[selected_inds], crossover_operator, k=5)
metric_ip = np.array([metric(SR_i_j , interpolate_SR, [depth, y], muy_k+p0, metric_type=metric_type) for SR_i_j in SR_ip])
SR_ip = mutation_v2_pop(SR_ip, -metric_ip, xrange=[0,1], yrange=[0,3], eta=20)
metric_ip = np.array([metric(SR_i_j , interpolate_SR, [depth, y], muy_k+p0, metric_type=metric_type) for SR_i_j in SR_ip])
metric_all[i+1] = metric_ip
SR_all += [SR_ip]
print(n_generations)
# metric_all = metric_all[:n_generations]
SR_all = SR_all[:n_generations]
| hhnam96/AstroGeo | ipython_cell_input.py | ipython_cell_input.py | py | 932 | python | en | code | 0 | github-code | 36 |
323629396 | """Added paid to order model
Revision ID: eb502f9a5410
Revises: 82df20a186ef
Create Date: 2020-04-19 17:05:05.312230
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'eb502f9a5410'
down_revision = '82df20a186ef'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('order', sa.Column('paid', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('order', 'paid')
# ### end Alembic commands ###
| Dsthdragon/kizito_bookstore | migrations/versions/eb502f9a5410_added_paid_to_order_model.py | eb502f9a5410_added_paid_to_order_model.py | py | 682 | python | en | code | 0 | github-code | 36 |
30974355669 | import importlib
import shutil
import uuid
from typing import Dict, List, Tuple, Union
from torch import Tensor, nn
from torchdistill.common import module_util
from pathlib import Path
import torch
import time
import gc
import os
from logging import FileHandler, Formatter
from torchdistill.common.file_util import check_if_exists
from torchdistill.common.main_util import is_main_process, load_ckpt
from torchdistill.losses.util import register_func2extract_org_output
from torchdistill.models.official import get_image_classification_model
from torchdistill.models.registry import get_model
from torchdistill.common.constant import def_logger, LOGGING_FORMAT
import numpy as np
from torchinfo import summary
logger = def_logger.getChild(__name__)
def make_dirs(dir_path):
Path(dir_path).mkdir(parents=True, exist_ok=True)
def make_parent_dirs(file_path):
Path(file_path).parent.mkdir(parents=True, exist_ok=True)
def setup_log_file(log_file_path, mode='w'):
make_parent_dirs(log_file_path)
fh = FileHandler(filename=log_file_path, mode=mode)
fh.setFormatter(Formatter(LOGGING_FORMAT))
def_logger.addHandler(fh)
def calc_compression_module_sizes(bnet_injected_model: nn.Module,
device: str,
input_size: Tuple[int, int, int, int] = (1, 3, 224, 224),
log_model_summary: bool = True) -> Tuple[str, int, Dict[str, int]]:
"""
Calc params and sizes individual components of compression module
Returns (summary string, #params model, #params of the encoder)
"""
assert hasattr(bnet_injected_model, 'compression_module'), "Model has no compression module"
model_summary = summary(bnet_injected_model, input_size=input_size,
col_names=['input_size', 'output_size', 'mult_adds', 'num_params'],
depth=5,
device=device,
verbose=0,
mode="eval")
model_params = model_summary.total_params
if log_model_summary:
logger.info(f"Bottleneck Injected model params:\n{model_summary}")
# compression module core
p_analysis = summary(bnet_injected_model.compression_module.g_a, col_names=["num_params"],
verbose=0,
mode="eval",
device=device).total_params
p_synthesis = summary(bnet_injected_model.compression_module.g_s, col_names=["num_params"],
verbose=0,
mode="eval",
device=device).total_params
# compression modules with side information
p_hyper_analysis = summary(bnet_injected_model.compression_module.h_a,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"h_a") else 0
p_hyper_synthesis = summary(bnet_injected_model.compression_module.h_s,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"h_s") else 0
p_hyper_analysis_2 = summary(bnet_injected_model.compression_module.h_a_2,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"h_a_2") else 0
p_hyper_synthesis_2 = summary(bnet_injected_model.compression_module.h_s_2,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"h_s_2") else 0
# compression modules with context models
p_context_prediction = summary(bnet_injected_model.compression_module.context_prediction,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"context_prediction") else 0
p_entropy_parameters = summary(bnet_injected_model.compression_module.entropy_parameters,
col_names=["num_params"],
verbose=0,
mode="eval").total_params if hasattr(bnet_injected_model.compression_module,
"entropy_parameters") else 0
# entropy estimation
params_eb = summary(bnet_injected_model.compression_module.entropy_bottleneck, col_names=["num_params"],
verbose=0,
mode="eval").total_params
params_comp_module = summary(bnet_injected_model.compression_module, col_names=["num_params"],
verbose=0).total_params
# params_comp_module += p_reconstruction
summary_str = f"""
Compression Module Summary:
Params Analysis: {p_analysis:,}
Params Synthesis: {p_synthesis:,}
Params Hyper Analysis: {p_hyper_analysis:,}
Params Hyper Synthesis: {p_hyper_synthesis:,}
Params Hyper Analysis 2: {p_hyper_analysis_2:,}
Params Hyper Synthesis 2: {p_hyper_synthesis_2:,}
Params Context Prediction: {p_context_prediction:,}
Params Entropy Parameters: {p_entropy_parameters :,}
Params Entropy Bottleneck: {params_eb:,}
Total Params Compression Module: {params_comp_module:,}
Which makes up {params_comp_module / model_params * 100:.2f}% of the total model params
"""
enc_params_main = p_analysis
enc_params_hyper = p_hyper_analysis + p_hyper_synthesis + p_hyper_analysis_2 + p_hyper_synthesis_2
enc_params_context_module = p_entropy_parameters + p_context_prediction
total_encoder = enc_params_main + enc_params_hyper + enc_params_context_module
return summary_str, model_params, { "Main Network": enc_params_main,
"Hyper Network": enc_params_hyper,
"Context Module": enc_params_context_module,
"Total Encoder Params": total_encoder}
def calc_compression_module_overhead(bnet_injected_model: nn.Module,
base_model: nn.Module,
device: str,
input_size: Tuple[int, int, int, int] = (1, 3, 224, 224),
log_model_summary: bool = True) -> Tuple[str, int, int]:
model_summary = summary(base_model, input_size=input_size,
col_names=['input_size', 'output_size', 'mult_adds', 'num_params'],
depth=3,
device=device,
verbose=0,
mode="eval")
if log_model_summary:
logger.info(f"Base model params:\n{model_summary}")
# in case teacher model is a mock model
teacher_params = model_summary.total_params or 1
summary_str, model_params, enc_params = calc_compression_module_sizes(bnet_injected_model,
device,
input_size,
log_model_summary)
summary_str = f"""{summary_str}
Incurring a total overhead of {(model_params - teacher_params) / teacher_params * 100:.2f}% in parameters w.r.t the original classification model
"""
return summary_str, model_params, enc_params
@torch.inference_mode
def freeze_module_params(modules: Union[List[nn.Module], nn.Module]):
modules = modules if isinstance(list, modules) else [modules]
for module in modules:
for param in module.parameters():
param.requires_grad = False
@torch.inference_mode
def unfreeze_module_params(modules: Union[List[nn.Module], nn.Module]):
modules = modules if isinstance(list, modules) else [modules]
for module in modules:
for param in module.parameters():
param.requires_grad = False
def chmod_r(path: str, mode: int):
"""Recursive chmod"""
if not os.path.exists(path):
return
os.chmod(path, mode)
for root, dirnames, filenames in os.walk(path):
for dirname in dirnames:
os.chmod(os.path.join(root, dirname), mode)
for filename in filenames:
os.chmod(os.path.join(root, filename), mode)
def mkdir(folder: str):
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
def prepare_log_file(test_only, log_file_path, config_path, start_epoch, overwrite=False):
eval_file = "_eval" if test_only else ""
if is_main_process():
if log_file_path:
log_file_path = f"{os.path.join(log_file_path, Path(config_path).stem)}{eval_file}.log"
else:
log_file_path = f"{config_path.replace('config', 'logs', 1)}{eval_file}.log"
if start_epoch == 0 or overwrite:
log_file_path = uniquify(log_file_path)
mode = 'w'
else:
mode = 'a'
setup_log_file(os.path.expanduser(log_file_path), mode=mode)
def rm_rf(path: str):
"""
Recursively removes a file or directory
"""
if not path or not os.path.exists(path):
return
try:
chmod_r(path, 0o777)
except PermissionError:
pass
exists_but_non_dir = os.path.exists(path) and not os.path.isdir(path)
if os.path.isfile(path) or exists_but_non_dir:
os.remove(path)
else:
shutil.rmtree(path)
def to_token_tensor(t: Tensor):
if len(t.shape) == 3:
return t
return t.flatten(2).transpose(1, 2)
def to_img_tensor(t: Tensor, resolution):
if len(t.shape) == 4:
print("Tensor already in img shape")
return t
B, _, C = t.shape
H, W = resolution
return t.transpose(1, 2).view(B, -1, H, W)
class AverageMeter:
"""Moving Average"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_if_module_exits(module, module_path):
module_names = module_path.split('.')
child_module_name = module_names[0]
if len(module_names) == 1:
return hasattr(module, child_module_name)
if not hasattr(module, child_module_name):
return False
return check_if_module_exits(getattr(module, child_module_name), '.'.join(module_names[1:]))
def extract_entropy_bottleneck_module(model):
model_wo_ddp = model.module if module_util.check_if_wrapped(model) else model
entropy_bottleneck_module = None
if check_if_module_exits(model_wo_ddp, "compression_module.entropy_bottleneck"):
entropy_bottleneck_module = module_util.get_module(model_wo_ddp, "compression_module")
elif check_if_module_exits(model_wo_ddp, 'compression_model.entropy_bottleneck'):
entropy_bottleneck_module = module_util.get_module(model_wo_ddp, "compression_model")
return entropy_bottleneck_module
def compute_bitrate(likelihoods, input_size):
b, _, h, w = input_size
likelihoods = likelihoods.detach().cpu()
bitrate = -likelihoods.log2().sum()
bbp = bitrate / (b * h * w)
return bbp, bitrate
def start_timer():
global start_time
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.synchronize()
start_time = time.time()
def end_timer():
torch.cuda.synchronize()
end_time = time.time()
return end_time - start_time
def load_model(model_config, device, distributed, skip_ckpt=False,
load_stage1_ckpt=False,
apply_quantization=False,
load_orig=False):
model = get_image_classification_model(model_config, distributed)
if model is None:
repo_or_dir = model_config.get('repo_or_dir', None)
model = get_model(model_config['name'], repo_or_dir, **model_config['params'])
if apply_quantization:
model.prepare_quantization()
model.apply_quantization()
if not skip_ckpt:
if load_orig:
ckpt_file_path = os.path.expanduser(model_config.get('ckpt_orig'))
else:
ckpt_file_path = os.path.expanduser(model_config.get('ckpt_stage1') if load_stage1_ckpt else model_config['ckpt'])
load_ckpt(ckpt_file_path, model=model, strict=True)
else:
logger.info('Skipping loading from checkpoint...')
return model.to(device)
def get_no_stages(train_config):
return sum(map(lambda x: "stage" in x, train_config.keys()))
def uniquify(path):
filename, extension = os.path.splitext(path)
counter = 1
while os.path.exists(path):
path = filename + " (" + str(counter) + ")" + extension
counter += 1
return path
def compute_psnr(recon_images, ground_truths):
with torch.no_grad():
# todo: expand ground truth as? Probably not because gt is also batched
mse = torch.mean((recon_images - ground_truths).pow(2))
psnr = 10. * torch.log10(1. / mse)
return psnr
def short_uid() -> str:
return str(uuid.uuid4())[0:8]
def append_to_filename(filename: str, appendix: str, sep='_'):
path_obj = Path(filename)
return os.path.join(os.path.dirname(filename), f"{path_obj.stem}{sep}{appendix}{path_obj.suffix}")
def calc_head_size(model,
encoder_paths=('compression_module.g_a',
'compression_module.h_s',
'compression_module.h_a')):
"""
Calculate head size in kB
"""
size = analyze_model_size(model, encoder_paths=encoder_paths,
additional_rest_paths=('compression_module.g_s', 'backbone'))
return size['encoder']
def analyze_model_size(model, encoder_paths=None, additional_rest_paths=None, ignores_dtype_error=True):
"""
Modified version from SC2bench
"""
model_size = 0
encoder_size = 0
rest_size = 0
encoder_path_set = set(encoder_paths)
additional_rest_path_set = set(additional_rest_paths)
# todo: exclude buffers
for k, v in model.state_dict().items():
if v is None:
# "fake params" of eagerly quantized modules
assert 'model_fp32' in k
continue
dim = v.dim()
param_count = 1 if dim == 0 else np.prod(v.size())
v_dtype = v.dtype
if v_dtype in (torch.int64, torch.float64):
num_bits = 64
elif v_dtype in (torch.int32, torch.float32):
num_bits = 32
elif v_dtype in (torch.int16, torch.float16, torch.bfloat16):
num_bits = 16
elif v_dtype in (torch.int8, torch.uint8, torch.qint8, torch.quint8):
num_bits = 8
elif v_dtype == torch.bool:
num_bits = 2
else:
error_message = f'For {k}, dtype `{v_dtype}` is not expected'
if ignores_dtype_error:
print(error_message)
continue
else:
raise TypeError(error_message)
param_size = num_bits * param_count
model_size += param_size
match_flag = False
for encoder_path in encoder_path_set:
if k.startswith(encoder_path):
encoder_size += param_size
if k in additional_rest_path_set:
rest_size += param_size
match_flag = True
break
if not match_flag:
rest_size += param_size
return {'model': model_size, 'encoder': encoder_size, 'rest': rest_size}
class GradScaleMockWrapper:
def __init__(self, scaler):
self.scaler = scaler
def scale(self, loss):
if self.scaler:
return self.scaler.scale(loss)
else:
return loss
def step(self, optim):
if self.scaler:
self.scaler.step(optim)
else:
optim.step()
def update(self):
if self.scaler:
self.scaler.update()
def get_module(module_path):
"""
Return a module reference
"""
module_ = importlib.import_module(module_path)
return module_
@torch.inference_mode()
def load_ckpt_inf(ckpt_file_path, model=None, optimizer=None, lr_scheduler=None, strict=True):
if check_if_exists(ckpt_file_path):
ckpt = torch.load(ckpt_file_path, map_location='cpu')
elif isinstance(ckpt_file_path, str) and \
(ckpt_file_path.startswith('https://') or ckpt_file_path.startswith('http://')):
ckpt = torch.hub.load_state_dict_from_url(ckpt_file_path, map_location='cpu', progress=True)
else:
logger.info('ckpt file is not found at `{}`'.format(ckpt_file_path))
return None, None, None
if model is not None:
if 'model' in ckpt:
logger.info('Loading model parameters')
if strict is None:
model.load_state_dict(ckpt['model'], strict=strict)
else:
model.load_state_dict(ckpt['model'], strict=strict)
elif optimizer is None and lr_scheduler is None:
logger.info('Loading model parameters only')
model.load_state_dict(ckpt, strict=strict)
else:
logger.info('No model parameters found')
if optimizer is not None:
if 'optimizer' in ckpt:
logger.info('Loading optimizer parameters')
optimizer.load_state_dict(ckpt['optimizer'])
elif model is None and lr_scheduler is None:
logger.info('Loading optimizer parameters only')
optimizer.load_state_dict(ckpt)
else:
logger.info('No optimizer parameters found')
if lr_scheduler is not None:
if 'lr_scheduler' in ckpt:
logger.info('Loading scheduler parameters')
lr_scheduler.load_state_dict(ckpt['lr_scheduler'])
elif model is None and optimizer is None:
logger.info('Loading scheduler parameters only')
lr_scheduler.load_state_dict(ckpt)
else:
logger.info('No scheduler parameters found')
return ckpt.get('best_value', 0.0), ckpt.get('config', None), ckpt.get('args', None)
@register_func2extract_org_output
def extract_org_loss_map(org_criterion, student_outputs, teacher_outputs, targets, uses_teacher_output, **kwargs):
org_loss_dict = dict()
if org_criterion is not None:
# Models with auxiliary classifier returns multiple outputs
if isinstance(student_outputs, (list, tuple)):
if uses_teacher_output:
org_loss_dict[0] = org_criterion(student_outputs, teacher_outputs, targets)
else:
for i, sub_outputs in enumerate(student_outputs):
org_loss_dict[i] = org_criterion(sub_outputs, targets)
else:
org_loss = org_criterion(student_outputs, teacher_outputs, targets) if uses_teacher_output \
else org_criterion(student_outputs, targets)
org_loss_dict = {0: org_loss}
return org_loss_dict
def normalize_range(t: Tensor, new_min: float = 0.0, new_max: float = 1.0) -> Tensor:
t_min = torch.min(t)
s_max = torch.max(t)
return (t - t_min) / (s_max - t_min) * (new_max - new_min) + new_min
| rezafuru/FrankenSplit | misc/util.py | util.py | py | 20,533 | python | en | code | 9 | github-code | 36 |
35910502981 | import os, discord, random, asyncio, json, time
from discord.ext import commands
class games(commands.Cog):
def __init__(self,bot):
self.coin_toss=0
self.bot=bot
self.counter = 0
@commands.command(name="FLIP",aliases=['FLIP`'])
async def coin_toss(self,ctx,choice):
#choice must be either H or T
self.coin_toss=random.randint(0,1)
if choice == 'H' or choice == 'T':
if choice== 'H':
choice_text='Heads'
if self.coin_toss==0:
won_text='And Won!'
else:
won_text='You Lost!'
else:
choice_text='Tails'
if self.coin_toss==1:
won_text='And Won!'
else:
won_text='You Lost!'
coin_embed=discord.Embed(title="The Coin Is Being Tossed",description="You're Betting On "+choice_text ,colour=discord.Colour.gold())
coin_embed.set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url)
coin_embed.set_image(url="https://cdn.dribbble.com/users/1493264/screenshots/5573460/coin-flip-dribbble.gif")
coin_embed.set_thumbnail(url="https://i.imgur.com/YTg8cjS.png")
heads_embed=discord.Embed(title="The Coin Has Been Tossed",description="You Got Heads! "+won_text ,colour=discord.Colour.gold())
heads_embed.set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url)
heads_embed.set_image(url="https://rollthedice.online/assets/images/upload/dice/dado-cara-cruz/cara_moneda.png")
tails_embed=discord.Embed(title="The Coin Has Been Tossed",description="You Got Tails! "+won_text ,colour=discord.Colour.gold())
tails_embed.set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url)
tails_embed.set_image(url="https://rollthedice.online/assets/images/upload/dice/dado-cara-cruz/cruz_moneda.png")
loading=await ctx.send(embed=coin_embed)
await asyncio.sleep(7)
await loading.delete()
if self.coin_toss==0:
await ctx.send(embed=heads_embed)
else:
await ctx.send(embed=tails_embed)
if choice == "H" and self.coin_toss==0 or choice == "T" and self.coin_toss==1:
print("COIN: PLAYER WON")
else:
print("COIN: PLAYER LOST")
else:
await ctx.send('``INCORRECT FORMAT: PLEASE DO `FLIP` H OR `FLIP` T ``')
@commands.group()
async def cool(self,ctx):
if ctx.invoked_subcommand is None:
await ctx.send("No")
@cool.command(name='bot',aliases=['TESTB'])
async def _bot(self,ctx):
await ctx.send('Yes, the bot is cool.')
@commands.group(name="DUNGEON",aliases=['DNG`'],invoke_without_command=True)
@commands.has_role('Owner')
async def dungeon_game(self,ctx,choice=None):
self.dungeon_current=ctx.author.id
self.is_playing=True
dungeon_embed=discord.Embed(title="THE DUNGEON",description="To start the game do `DNG` START",colour=discord.Colour.dark_red())
dungeon_embed.set_author(name=ctx.author.name,icon_url=ctx.author.avatar_url)
dungeon_embed.set_image(url="https://cdn.discordapp.com/attachments/732597615199387820/732603048664236082/Untitled13_20200714182238.png")
dungeon_embed.set_thumbnail(url="https://i.imgur.com/YTg8cjS.png")
dungeon_embed.add_field(name="Description:",value='"The Dungeon" is a Dungeon crawler game where your goal is to roam the dungeon to try and find loot,and finally make a swift exit,careful of the skeleton mobs that may end up catching you ending your run! find the exit as fast as possible with the maximum amount of loot.')
startup=await ctx.send(embed=dungeon_embed)
@commands.command(name="DUNGEONT",aliases=['DT`'])
@commands.has_role('Owner')
async def dungeon_test(self,ctx,choice=None):
print(choice)
i=0
if choice ==None:
await ctx.send("hello")
def check(m):
return m.content == ('hello') or m.content == ('hi')
for i in range(10):
msg = await self.bot.wait_for('message', check=check)
if msg.content== ('hello'):
print('y')
elif msg.content==('hi'):
print('n')
i+=1
def setup(client):
client.add_cog(games(client))
| yassir56069/TSOE | cogs/games.py | games.py | py | 4,574 | python | en | code | 0 | github-code | 36 |
72166330345 | # Importing necessary libraries
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# from sklearn.metrics import mean_squared_error, r2_score
# Sample dataset of house prices and areas (replace this with your own dataset)
mona = "data.xlsx"
data = pd.read_excel(mona)
# Create a DataFrame from the dataset
#df = pd.DataFrame(data)
# Split the data into features (X) and target (y)
X = data.iloc[:, 0:1]
y = data.iloc[:, -1]
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X.values, y, test_size=0.2, random_state=42)
# Create and train the linear regression model
model = LinearRegression()
model.fit(X_train, y_train)
# Make predictions on the test set
# y_pred = model.predict(X_test)
# Evaluate the model
# mse = mean_squared_error(y_test, y_pred)
# r2 = r2_score(y_test, y_pred)
# Get area as user input for prediction
while True:
try:
user_input_area = float(input("Enter the area in sq. ft for price prediction: "))
break
except ValueError:
print("Invalid input! Please enter a valid number.")
# Predict the price for the user input area
predicted_price = model.predict([[user_input_area]])
price = predicted_price[0]
print(f"Predicted Price for {user_input_area} sq.ft is Rs {round(price,2)}")
| preyar/RealEstatePricePrediction | Prediction.py | Prediction.py | py | 1,385 | python | en | code | 0 | github-code | 36 |
884569807 | #driving 2 (add new usa)
country = input('你的国家是? ')
if country == 'taiwan' :
age = input('请输入年龄? ')
age = int(age)
if age >= 18 :
print('你已经',age,'岁了,可以考驾照')
else :
print('you`re not allowed to get liance!')
elif country == 'usa' :
age = input('请输入年龄? ')
age = int(age)
if age >= 16 :
print('你已经',age,'岁了,可以考驾照')
else :
print(' 其他不知道!只知道taiwan and usa :> ')
| hugoming620/driving | driving2.py | driving2.py | py | 475 | python | zh | code | 0 | github-code | 36 |
30176732849 | import re
with open("250-najbolj-znanih-filmov.html") as dat:
vsebina = dat.read()
vzorec = re.compile(
r'<a href="/title/tt'
r'(?P<id>\d+)'
r'/\?ref_=adv_li_tt">(?P<naslov>.+?)</a>\s*'
r'<span class="lister-item-year text-muted unbold">'
r'(\([IVXLCDM]+\) )?' # če je več filmov v istem letu, dobijo rimske številke
r'\((?P<leto>\d+)\)' # vzorec za leto
r'</span>'
)
for i, ujemanje in enumerate(vzorec.finditer(vsebina), 1):
print(i, ujemanje.groupdict())
| matijapretnar/programiranje-1 | 01-regularni-izrazi/predavanja/preberi_filme.py | preberi_filme.py | py | 502 | python | sl | code | 6 | github-code | 36 |
4799644228 | from cadastro import Cadastro, Login
from email_senha import EmailSenha
import json, random, string
login = False
cadastro = False
opcao = input("1. login\n2. cadastrar ")
if opcao == "1":
login = Login().autenticacao()
tentativas = 0
while login == "senha incorreta":
print("senha incorreta")
login = Login().autenticacao()
tentativas += 1
if tentativas == 2:
codigo = "".join(random.choices(string.ascii_uppercase + string.digits, k=6))
dados = Login().retorna_email()
retorno_email = EmailSenha(dados[0]).envia(codigo)
print(retorno_email)
if "não existe" in retorno_email:
pass
else:
autenticacao = input("digite o codigo enviado no seu email: ")
if autenticacao == codigo:
senha = input("digite sua nova senha: ")
confirmacao = input("confirme sua nova senha: ")
if senha == confirmacao:
Cadastro(dados[0], senha, dados[1], dados[2], dados[3], True).armazena()
else:
for i in range(2):
senha = input("digite sua nova senha: ")
confirmacao = input("confirme sua nova senha: ")
if senha == confirmacao:
Cadastro(dados[0], senha, dados[1], dados[2], dados[3], True).armazena()
break
else:
i = 0
while autenticacao != codigo:
i += 1
autenticacao = input("digite o codigo enviado no seu email: ")
if i == 4:
break
break
if login == "logado":
print("_"*50, "\n")
print("1. consultar\n2. atualizar")
print("_"*50, "\n")
option = input("digite a opção: ")
if option == "1":
with open("db.json", "r", encoding="utf8") as db:
data = json.load(db)
print("_"*50, "\n")
print("1. consultar base\n2. consultar usuario")
print("_"*50, "\n")
option = input("digite a opção: ")
if option == "1":
for id in data:
print(id, data[id]["nickname"])
elif option == "2":
user = input("qual usuário você deseja consultar? ")
for id in data:
for user_db in data[id]:
if user in data[id][user_db]:
print(user)
# print(data)
#to do: atualizar()
elif opcao == "2":
Cadastro().armazena() | Bonbeck/Cadastro | main.py | main.py | py | 2,800 | python | pt | code | 0 | github-code | 36 |
40148288536 | def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
pter_non_0 = 0
for i,num in enumerate(nums):
if num != 0:
nums[i] = nums[pter_non_0]
pter_non_0 += 1
| Elmanou89/leetcode_daily_challenge | week1/movezeroes.py | movezeroes.py | py | 327 | python | en | code | 0 | github-code | 36 |
12631299379 | import cv2
import os
# 비디오 파일 경로 설정
video_path = "차체 영상 1차.mp4"
# 저장할 이미지 파일을 저장할 폴더 경로 설정
output_folder = "video"
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# 비디오 캡쳐 객체 생성
cap = cv2.VideoCapture(video_path)
# 캡쳐가 올바르게 열렸는지 확인
if not cap.isOpened():
print("Error: Could not open video.")
exit()
# 프레임 캡쳐 설정
frame_count = 0
while True:
# 프레임 캡쳐
ret, frame = cap.read()
# 비디오의 끝에 도달하면 종료
if not ret:
break
# 이미지 파일 경로 설정
image_path = os.path.join(output_folder, f"frame_{frame_count:04d}.jpg")
# 이미지 파일로 저장
cv2.imwrite(image_path, frame)
frame_count += 1
# 사용한 리소스 해제
cap.release()
cv2.destroyAllWindows()
print(f"{frame_count} frames captured and saved to '{output_folder}'")
| TAEM1N2/kudos12_2023 | data_setting/python_practice/video_cap.py | video_cap.py | py | 971 | python | ko | code | 1 | github-code | 36 |
20052331496 | __author__ = 'matsrichter'
import numpy as np
import AccountV2 as acc
import time
class Risk_Manager:
# @param isBroke failure state. System has fatal error and stops functioning if this is True
def __init__(self, account, risk_aversion_factor, max_draw_down):
assert(isinstance(account, acc.Account))
self.account = account
self.cooldown = 40000
self.tstep = 0
self.isBroke = False
self.risk_aversion = risk_aversion_factor
self.max_draw_down = max_draw_down
self.max_profit = 0
#evaluates risk of a trade
# @input decision: float: the raw decision
def eval_risk(self,decision):
self.tstep += 1
#close stoplosses
loss = self.account.check_stoploss()
#loss = False
#set cooldown if stoplosses were surpassed
if(loss):
self.set_cooldown()
return False
#if(self.account.check_total_stoploss()):
# self.set_cooldown()
# return False
#if currently on cooldown, no trade is performed
if(self.on_cooldown()):
return False
if(self.risk_aversion > np.fabs(decision)):
return False
if(self.account.maxVal - self.account.total_account_value() > self.max_draw_down):
self.isBroke = True
self.account.sell_all()
return False
return True
# sets the cooldown (in seconds) indicated by length if not already set
# while cooldown >= tstep no trading decision of the learner is processed
# however, stoploss is still performed if nessasry
# @input cooldown time in seconds
def set_cooldown(self, length = 100):
if(self.tstep > self.cooldown):
self.cooldown = self.tstep + length
return
else:
return
def sell_all(self):
self.account.sell_all()
def on_cooldown(self):
return self.cooldown > self.tstep
| MLRichter/AutoBuffett | Layer2/Risk_Manager.py | Risk_Manager.py | py | 1,976 | python | en | code | 8 | github-code | 36 |
20227289100 | #!/usr/bin/python
import sys, os
import sets
from Bio import SeqIO
def make_location_set(l):
return sets.Set([n for n in xrange(l.nofuzzy_start, l.nofuzzy_end)])
for rec in SeqIO.parse(sys.stdin, "genbank"):
new_features = []
for feature in rec.features:
add = 1
if feature.type == 'CDS':
if '*' in feature.qualifiers['translation'][0]:
location_set = make_location_set(feature.location)
for f2 in rec.features:
if f2.type == 'CDS' and f2 != feature:
ret = location_set.intersection(make_location_set(f2.location))
if ret:
add = 0
if add:
new_features.append(feature)
rec.features = new_features
SeqIO.write([rec], sys.stdout, "genbank")
| nickloman/xbase | annotation/remove_overlaps_with_frameshifts.py | remove_overlaps_with_frameshifts.py | py | 843 | python | en | code | 6 | github-code | 36 |
23410005730 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('bar', '0238_auto_20160817_1352'),
]
operations = [
migrations.AddField(
model_name='caja',
name='marca',
field=models.CharField(default=b'PC Standard', help_text=b'Ingrese la marca de la Caja.', max_length=50, verbose_name=b'Marca'),
),
migrations.AddField(
model_name='caja',
name='modelo_fabricacion',
field=models.CharField(default=b'PC Standard Proc. Intel - 4 GBs de RAM', help_text=b'Ingrese el modelo de fabricacion de la Caja.', max_length=100, verbose_name=b'Modelo de Fabricacion'),
),
migrations.AddField(
model_name='caja',
name='numero_serie',
field=models.CharField(default=b'1234567890', help_text=b'Ingrese el numero de serie de la Caja.', max_length=20, verbose_name=b'Numero de Serie'),
),
migrations.AlterField(
model_name='timbrado',
name='fecha_autorizacion_timbrado',
field=models.DateField(default=datetime.datetime(2016, 8, 17, 14, 8, 50, 877000), help_text=b'Ingrese la Fecha de Autorizacion del Timbrado', verbose_name=b'Fecha de Autorizacion del Timbrado'),
),
migrations.AlterField(
model_name='timbrado',
name='fecha_limite_vigencia_timbrado',
field=models.DateField(default=datetime.datetime(2017, 8, 17, 14, 8, 50, 877000), help_text=b'Ingrese la Fecha Limite de Vigencia del Timbrado', verbose_name=b'Fecha Limite de Vigencia del Timbrado'),
),
]
| pmmrpy/SIGB | bar/migrations/0239_auto_20160817_1408.py | 0239_auto_20160817_1408.py | py | 1,747 | python | es | code | 0 | github-code | 36 |
74957373542 | from __future__ import print_function, division
import os
import torch
import torchtext
import itertools
from loss.loss import NLLLoss
class Evaluator(object):
def __init__(self, loss=NLLLoss(), batch_size=64):
self.loss = loss
self.batch_size = batch_size
def evaluate(self, model, data):
model.eval()
loss = self.loss
loss.reset()
match = 0
total = 0
match_sentence = 0
total_lengths = 0
condition_positive = 0
prediction_positive = 0
true_positive = 0
check_sentence = True
#device = None if torch.cuda.is_available() else -1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_iterator = torchtext.data.BucketIterator(
dataset=data, batch_size=self.batch_size,
sort=True, sort_key=lambda x: len(x.src),
device=device, train=False)
tgt_vocab = data.fields['tgt'].vocab
pad = tgt_vocab.stoi[data.fields['tgt'].pad_token]
eos = tgt_vocab.stoi['<eos>']
zero = tgt_vocab.stoi['0']
unk = tgt_vocab.stoi[data.fields['tgt'].unk_token]
with torch.no_grad():
for batch in batch_iterator:
input_variables, input_lengths = getattr(batch, 'src')
input_part, _ = getattr(batch, 'srcp')
target_variables = getattr(batch, 'tgt')
target_part = getattr(batch, 'tgtp')
decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(),
input_part, target_variables, target_part)
correct_list = []
# Evaluation
seqlist = other['sequence']
for step, step_output in enumerate(decoder_outputs):
target = target_variables[:, step + 1]
loss.eval_batch(step_output.view(target_variables.size(0), -1), target)
predict = seqlist[step].view(-1)
non_padding = target.ne(pad)
correct = predict.eq(target).masked_select(non_padding).sum().item()
correct_list.append(predict.eq(target).masked_select(non_padding).tolist())
CP = target.ne(zero).eq(target.ne(eos)).masked_select(non_padding)
PP = predict.ne(zero).eq(predict.ne(eos)).masked_select(non_padding)
c_mask = target.ne(pad).eq(target.ne(eos)).eq(target.ne(unk)).eq(target.ne(zero))
TP = target.masked_select(c_mask).eq(predict.masked_select(c_mask))
match += correct
total += non_padding.sum().item()
condition_positive += CP.sum().item()
prediction_positive += PP.sum().item()
true_positive += TP.sum().item()
q = list(itertools.zip_longest(*correct_list))
for i in q:
check_sentence = False
for j in i:
if(j == 0):
check_sentence = True
if(check_sentence == False):
match_sentence += 1
total_lengths += len(input_lengths)
if total == 0:
character_accuracy = 0
sentence_accuracy = 0
else:
character_accuracy = match / total
sentence_accuracy = match_sentence / total_lengths
if condition_positive == 0:
recall = 0
else:
recall = true_positive / condition_positive
if prediction_positive == 0:
precision = 0
else:
precision = true_positive / prediction_positive
if precision == 0 and recall == 0:
f1_score = 0
else:
f1_score = 2.0 * ((precision * recall) / (precision + recall))
return loss.get_loss(), character_accuracy, sentence_accuracy, f1_score
| hopemini/activity-clustering-multimodal-ml | autoencoder/seq2seq/evaluator/evaluator.py | evaluator.py | py | 4,024 | python | en | code | 3 | github-code | 36 |
21546325152 | #This python script reads a CSV formatted table of methylation sites
#and attaches, depending on the coordinate, 1.5 kb flanking regions
#numbers listed
import csv # module to read CSV files
import re # module to search for regular expressions in files; not in use now but will set up for sophisticated search and grabs
import argparse # module to handle or "parse" arguments
import numpy
def split_head_body(g_file,head_a,body_a):
#a=[]
#b=[]
h_ct=-1
for i in g_file:
if (re.match('>',i)):
h_ct=h_ct+1
head_a.append(i.replace("\n","").replace(">",""))
body_a.append("")
else:
body_a[h_ct]=body_a[h_ct]+i.replace("\n","")
#head_a.append(a)
#body_a.append(b)
#print (headers)
#We will set up the following command line arguments
#-f --csvfile : path to CSV input file
##-g --gff3file : path to GFF3 formatted annotation file
#-d --dnafile : path to FASTA formatted genome sequence file
#-o --output : path to CSV output file
#Let's see what the user has given us
optParse=argparse.ArgumentParser() #Create an argument parsing "object" called optParse
optParse.add_argument("-f","--csvfile",help="path to CSV input file")
optParse.add_argument("-s","--size",help="size of flanking region")
optParse.add_argument("-d","--dnafile",help="path to genome sequence file")
optParse.add_argument("-o","--output",help="path to CSV output file")
argstr=optParse.parse_args() # store the arguments given/passed by the user
#Now, get to work and open the files (no error handling for now)
csv_f=open(argstr.csvfile,'r') # open the input CSV file in read-only mode
out_f=open(argstr.output,'w')# open the output CSV file in write-only mode
#Now let's start reading the csv file, line by line
genome_f=open(argstr.dnafile,'r')
genome=genome_f.readlines()
head_a=[]
body_a=[]
split_head_body(genome,head_a,body_a)
flank_size=int(argstr.size)
#inp_csv_read=csv.reader(csv_f,dialect='excel')
#run a loop that iterates over all lines/rows in the CSV input
for line in csv_f:
#store field values in an array
inp_field=line.split(',')
coord=int(inp_field[1])
#Now, we know that the GenomeIDs are in Column 1
#So we will use the first element of the array and search for matches in the
#annotation file
if (inp_field[0]!='' and inp_field!="Name"):
if ((coord-flank_size)<1):
coord_s=0
else:
coord_s=coord-flank_size-1
if (coord+flank_size>len(body_a[head_a.index(inp_field[0])])):
coord_e=len(body_a[head_a.index(inp_field[0])])
else:
coord_e=coord+flank_size
seq=body_a[head_a.index(inp_field[0])][coord_s:coord_e]
#print(genome_id.group(0).replace(";","")+","+seq+","+line)
out_f.write(">"+inp_field[0]+":"+str(coord)+"\n"+seq+"\n\n")
csv_f.close()
out_f.close()
genome_f.close()
#count_fpkm.close()
quit()
| lanl/DNA_methylation_analysis | meth_site_flanking_seq.py | meth_site_flanking_seq.py | py | 2,848 | python | en | code | 0 | github-code | 36 |
20294716631 | import socket
def main():
host = '127.0.0.1'
port = 5001
server = (host, 5000)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((host, port))
message = input('->')
while message != 'EXIT':
s.sendto(bytes(message, encoding='utf-8'), server)
data, addr = s.recvfrom(1024)
data = data.decode('utf-8')
print('Received from server:' + data)
message = input('->')
s.close()
if __name__ == '__main__':
main()
| ijkilchenko/_socket_programming | udp_client.py | udp_client.py | py | 495 | python | en | code | 0 | github-code | 36 |
14570907737 | #importar librerias
from ast import parse
from wsgiref import headers
import requests #peticion al servidor
import lxml.html as html
import pandas as pd
from tqdm.auto import tqdm #barras de progreso
from lxml.etree import ParseError
from lxml.etree import ParserError
import csv
from fake_useragent import UserAgent
ua = UserAgent()
#inicialize variables & XPATH
url_padre = f'https://www.metacritic.com/browse/games/release-date/available/ps5/metascore?page=0' #inicializar en 0
url_root = 'https://www.metacritic.com'
header = {'User-Agent': ua.random}
link_title = '//td[@class="clamp-summary-wrap"]/a/@href'
genre = '//li[@class="summary_detail product_genre"]/span[@class="data"]/text()'
s = requests.Session() #pasa a trabajar con sesiones por ende mas rapido hace los requests
r = s.get(url_padre, headers=header) #headers evita error 400
home = r.content.decode('utf-8')
parser = html.fromstring(home)
titles_url = parser.xpath(link_title)
titles_url = [url_root+x for x in titles_url]
genres_list = []
links_offline = []
def get_genres():
for x in tqdm(titles_url):
header = {'User-Agent': ua.random}
try:
r = s.get(x, headers=header) #headers evita error 400
if (r == ''):
print(f'Status Code: {r.status_code} - {x}')
links_offline.append(x)
pass
home = r.content.decode('utf-8')
parser = html.fromstring(home)
genres = parser.xpath(genre)
genres_aux = set(genres)
genres = list(genres_aux)
genres_list.append(genres)
except (ParserError, ParseError, IndexError) as error:
print(f'\n{error} {r.status_code}')
links_offline.append(x)
continue
return genres_list
df=pd.DataFrame(get_genres())
print(df)
print(links_offline)
df.to_csv('metacritic_game_ranking_test.csv', index=False) | joseorozco84/scraper | genres.py | genres.py | py | 1,910 | python | en | code | 0 | github-code | 36 |
138291184 | #!/usr/bin/env python3
from collections import deque
class BinaryTreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def create_tree():
root = BinaryTreeNode(1)
root.left = BinaryTreeNode(2)
root.right = BinaryTreeNode(3)
root.left.left = BinaryTreeNode(4)
root.left.right = BinaryTreeNode(5)
# root.right.left = BinaryTreeNode(6)
root.right.right = BinaryTreeNode(7)
root.left.right.left = BinaryTreeNode(8)
root.left.right.left.right = BinaryTreeNode(9)
return root
def in_order_traversal(node):
if node is None:
return
in_order_traversal(node.left)
print('{0:}, '.format(node.value), end="")
in_order_traversal(node.right)
def is_tree_balanced(node):
return abs(height(node.left)-height(node.right))<=1
def height(node):
lh = -1
rh = -1
if node.left is not None:
lh = height(node.left)
lh += 1
else:
lh += 1
if node.right is not None:
rh = height(node.right)
rh += 1
else:
rh += 1
return max(lh, rh)
def bfs(node):
if node is not None:
return
q = deque([])
q.append(node)
while len(q) > 0:
temp_node = q.popleft()
print(temp_node.value, end=", ")
if temp_node.left is not None:
q.append(temp_node.left)
if temp_node.right is not None:
q.append(temp_node.right)
if __name__ == '__main__':
in_order_traversal(create_tree())
print("\n", end="")
print(is_tree_balanced(create_tree()))
bfs(create_tree())
| banginji/algorithms | misc/treebalance.py | treebalance.py | py | 1,635 | python | en | code | 0 | github-code | 36 |
32088085449 | from app.models import db, Portfolio, environment, SCHEMA
def seed_portfolio():
tsla = Portfolio(
symbol="TSLA", user_id=1, num_shares=10, average_price=187.85
)
# look up python docs for datetime
nvda= Portfolio(
symbol="NVDA", user_id=1, num_shares=15, average_price=162.60
)
aapl = Portfolio(
symbol="AAPL", user_id=1, num_shares=12, average_price=148.90
)
lcid = Portfolio(
symbol="LCID", user_id=1, num_shares=30, average_price=11.50
)
amzn = Portfolio(
symbol="AMZN", user_id=1, num_shares=25, average_price=97.60
)
db.session.add(tsla)
db.session.add(nvda)
db.session.add(aapl)
db.session.add(lcid)
db.session.add(amzn)
db.session.commit()
# Uses a raw SQL query to TRUNCATE or DELETE the users table. SQLAlchemy doesn't
# have a built in function to do this. With postgres in production TRUNCATE
# removes all the data from the table, and RESET IDENTITY resets the auto
# incrementing primary key, CASCADE deletes any dependent entities. With
# sqlite3 in development you need to instead use DELETE to remove all data and
# it will reset the primary keys for you as well.
def undo_portfolio():
if environment == "production":
db.session.execute(f"TRUNCATE table {SCHEMA}.portfolio RESTART IDENTITY CASCADE;")
else:
db.session.execute("DELETE FROM portfolio")
db.session.commit()
| Dylluu/Ravenhood | app/seeds/portfolio.py | portfolio.py | py | 1,466 | python | en | code | 5 | github-code | 36 |
406588995 | # Run by typing python3 main.py
# Import basics
import re
import os
import pickle
# Import stuff for our web server
from flask import Flask, flash, request, redirect, url_for, render_template
from flask import send_from_directory
from flask import jsonify
from utils import get_base_url, allowed_file, and_syntax
# Import stuff for text pre-processing and models
import numpy as np
import nltk
nltk.download('punkt')
import torch
from aitextgen import aitextgen
from gensim.models import Word2Vec
# Load up the models into memory
ai = aitextgen(to_gpu=False, model_folder="models/trained_model_gpt2")
rf_model = pickle.load(open('models/random_forest_model_avg.pkl', 'rb'))
w2v_model = Word2Vec.load('models/w2v.bin')
NON_ALPHANUM = re.compile(r'[\W]')
NON_ASCII = re.compile(r'[^a-z0-1\s]')
# Setup the webserver
# Port may need to be changed if there are multiple flask servers running on same server
#port = 12346
#base_url = get_base_url(port)
#app = Flask(__name__, static_url_path=base_url+'static')
# Deployment code - uncomment the following line of code when ready for production
app = Flask(__name__)
def sent_vectorizer(sent):
"""Takes in a sentence and returns the average word2vec embedding of all words
in this sentence that are in the vocab of the model.
Inputs:
-------
sent (str):
a string of a sentence to embedd
model (gensim.models.Word2Vec):
an already trained Word2Vec model
Output:
-------
avgembedding (np.ndarray):
A 100-dimension long numpy vector of the average Word2vec embedding of all
the words of ``sent`` that appear in the vocabulary of Word2Vec model.
"""
sent_vec = np.zeros(100)
numw = 0
words = nltk.word_tokenize(sent)
for w in words:
if w in w2v_model.wv.index_to_key:
sent_vec = np.add(sent_vec, w2v_model.wv[w])
numw += 1
avgembedding = sent_vec/numw
return avgembedding
def clean_text(text):
"""Cleans text using regex.
Arguments:
----------
text (str):
Text.
no_non_ascii (str):
Cleaned text.
"""
lower = text.lower()
no_punctuation = NON_ALPHANUM.sub(r' ', lower)
no_non_ascii = NON_ASCII.sub(r'', no_punctuation)
return no_non_ascii
@app.route('/')
#@app.route(base_url)
def home():
return render_template('Home.html', generated=None)
@app.route('/', methods=['POST'])
#@app.route(base_url, methods=['POST'])
def home_post():
return redirect(url_for('results'))
@app.route('/team')
#@app.route(base_url + '/team')
def team():
return render_template('Team.html', generated=None)
@app.route('/results')
#@app.route(base_url + '/results')
def results():
return render_template('Try-our-product.html', generated=None)
@app.route('/generate_text', methods=["POST"])
#@app.route(base_url + '/generate_text', methods=["POST"])
def generate_text():
"""
View function that will return json response for generated text.
"""
prompt = request.form['prompt']
if prompt is not None:
prompt = str(prompt).strip()
generated = ai.generate(
n=2,
batch_size=4,
prompt=prompt,
max_length=20,
temperature=1.0,
top_p=0.9,
return_as_list=True
)
opinions = []
for line in generated:
cleaned_line = clean_text(line)
embedding = sent_vectorizer(cleaned_line).reshape(-1, 100)
opinion = rf_model.predict(embedding).item()
if opinion == '1':
opinions.append('<br><i> ( Meemaw <span style=\"color: #008000\">approves</span> this message! )</i>')
elif opinion == '-1':
opinions.append("<br><i> ( Meemaw <span style=\"color: #E53F2E\">doesn't approve</span> this message! )</i>")
data = {'generated_ls': generated, 'opinions': opinions}
return jsonify(data)
if __name__ == "__main__":
'''
coding center code
'''
# IMPORTANT: change the cocalcx.ai-camp.org to the site where you are editing this file.
website_url = 'cocalc2.ai-camp.org'
print(f"Try to open\n\n https://{website_url}" + base_url + '\n\n')
app.run(host = '0.0.0.0', port=port, debug=True)
import sys; sys.exit(0)
'''
scaffold code
'''
# Only for debugging while developing
# app.run(port=80, debug=True)
| aashishyadavally/pick-up-line-generator | app/main.py | main.py | py | 4,425 | python | en | code | 0 | github-code | 36 |
39013964049 | # 1969 DNA
import sys
sys.stdin = open('DNA.txt', 'r')
N, M = map(int, input().split()) # N: DNA의 수, M: 문자열의 길이
arr = [list(input()) for _ in range(N)] # 배열 초기화
print(arr)
result = '' # 결과 값 result 초기화
ham_d = 0 # 해밍 거리
for j in range(M): # 가로열 고정
cnt = [0, 0, 0, 0] # DNA A, C, G, T 각 개수 초기화
for i in range(N): # 세로열의 문자 개수 찾기
if arr[i][j] == 'A':
cnt[0] += 1
elif arr[i][j] == 'C':
cnt[1] += 1
elif arr[i][j] == 'G':
cnt[2] += 1
elif arr[i][j] == 'T':
cnt[3] += 1
max_v = 0
for i in range(len(cnt)): # cnt 중 최고 개수 찾기
if max_v < cnt[i]:
max_v = cnt[i]
if max_v == cnt[0]: # max_v가 동일한 경우가 있으면 사전 순이므로 A, C, G, T 순으로 조건문을 설정
result += 'A'
ham_d += cnt[1]+cnt[2]+cnt[3]
elif max_v == cnt[1]:
result += 'C'
ham_d += cnt[0]+cnt[2]+cnt[3]
elif max_v == cnt[2]:
result += 'G'
ham_d += cnt[0]+cnt[1]+cnt[3]
elif max_v == cnt[3]:
result += 'T'
ham_d += cnt[0]+cnt[1]+cnt[2]
print(result)
print(ham_d)
'''
문제 접근 방식 : 문자열을 이중 배열로 입력을 받아 세로열마다 글자마다의 개수를 세서 최대 값을 구하고
최대 값에 해당하는 문자와 문자와 다른 수 만큼을 출력
어려웠던점:
설명이 필요한점: 생각나는 대로 해서 코드 간소화?
''' | eomsteve/algo_study | johoh0/2nd_week/1969_DNA.py | 1969_DNA.py | py | 1,569 | python | ko | code | 0 | github-code | 36 |
20968448747 | # 2. **********maria**********Создайте список, в который попадают числа,
# описывающие возрастающую последовательность.
# Порядок элементов менять нельзя
from random import choices
# функция ****choices****
# возвращает !!!!список!!! элементов длины k ,
# выбранных из последовательности (список- any_list или кортеж -any_tuple ....или range(1, num*2)....)
# с перестановкой элементов.
# Другими словами, функция используется,
# когда требуется выбрать несколько k случайных элементов
# из заданной последовательности,
# элементы не сохраняют первоначальный порядок.
def any_list(num):
return choices(range(1, num*2), k=num) # k - количество выбираемых случайных элементов
def seq(some_list):
temp_list =[]
for i in range(len(some_list)):
num_1 = some_list[i]
out_list = [num_1]
for j in range(i+ 1, len(some_list)):
if some_list[j] > num_1:
num_1 = some_list[j]
out_list.append(num_1)
if len(out_list) > 1: #условие, по которому к большому итоговому списку(temp_list)
# добавляем только те подсписки(out_list),
# длина которых больше одного
temp_list.append(out_list)
return temp_list
my_list = any_list(10)
print(my_list)
# print(seq(my_list))
exit()
# **********nikolai****************************************
num_list = [1, 5, 2, 3, 4, 6, 1, 7]
print(num_list, end=' => ')
min_num = num_list[0]
for i in range(len(num_list)):
order_list = []
order_list.append(num_list[i])
min_num = num_list[i]
for j in range(i,len(num_list)-1):
if num_list[j] > min_num:
min_num = num_list[j]
order_list.append(num_list[j])
if len(order_list) > 1:
print(order_list, end=' ')
| Nadzeya25/Python_GB | seminar5_Python/class_work5/task5_2.py | task5_2.py | py | 2,355 | python | ru | code | 0 | github-code | 36 |
26297648004 | from itertools import product
def create_dice(sides):
dice = []
for i in range(sides):
dice.append( i+1 )
return dice
number_of_dices = int(input("Mata in antalet tärningar:\n"))
number_of_sides = int(input("Mata in antalet sidor för tärningarna:\n"))
highest_sum = number_of_sides * number_of_dices
lowest_sum = number_of_dices
dice = create_dice(number_of_sides)
d = {}
for i in range(lowest_sum, highest_sum+1):
d[i] = ""
res = list(product(range(1, number_of_sides + 1), repeat = number_of_dices))
for item in res:
for key in d:
if key == sum(item):
d[key] += "*"
break
print("Resultat:")
for i in d:
print("{:<4}{}".format(i, d[i]))
| hadi-ansari/TDP002 | gamla_tentor_tdp002/2018/uppgift2.py | uppgift2.py | py | 730 | python | en | code | 0 | github-code | 36 |
34986315817 | from flask import Flask, render_template, request, url_for, flash, redirect
import sqlite3
from werkzeug.exceptions import abort
from flask_socketio import SocketIO
from engineio.payload import Payload
Payload.max_decode_packets = 50
def get_db_connection():
conn = sqlite3.connect('database.db')
conn.row_factory = sqlite3.Row
return conn
def get_post(post_id):
conn = get_db_connection()
post = conn.execute('SELECT * FROM posts WHERE id = ?', (post_id,)).fetchone()
conn.close()
if post is None:
abort(404)
return post
app = Flask(__name__)
app.config['SECRET_KEY'] = 'your secret key'
sio = SocketIO(app)
@app.route("/")
def index():
conn = get_db_connection()
posts = conn.execute('SELECT * FROM posts').fetchall()
conn.close()
return render_template('index.html', posts=posts)
@app.route("/drawing")
def drawing():
return render_template('drawing.html')
@app.route("/chat")
def chat():
return render_template('chat.html')
@app.route("/tracking")
def tracking():
return render_template('tracking.html')
@app.route('/<int:post_id>')
def post(post_id):
post = get_post(post_id)
return render_template('post.html', post=post)
@app.route('/create', methods=('GET', 'POST'))
def create():
if request.method == 'POST':
title = request.form['title']
content = request.form['content']
if not title:
flash('Title is required!')
else:
conn = get_db_connection()
conn.execute('INSERT INTO posts (title, content) VALUES (?, ?)', (title, content))
conn.commit()
conn.close()
return redirect(url_for('index'))
return render_template('create.html')
@app.route('/<int:id>/edit', methods=('GET', 'POST'))
def edit(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
content = request.form['content']
if not title:
flash('Title is required!')
else:
conn = get_db_connection()
conn.execute('UPDATE posts SET title = ?, content = ?'
' WHERE id = ?',
(title, content, id))
conn.commit()
conn.close()
flash('updated!', 'success')
return redirect(url_for('post', post_id=post['id']))
return render_template('edit.html', post=post)
@app.route('/<int:id>/delete', methods=('POST',))
def delete(id):
post = get_post(id)
conn = get_db_connection()
conn.execute('DELETE FROM posts WHERE id = ?', (id,))
conn.commit()
conn.close()
flash('"{}" was successfully deleted!'.format(post['title']), 'danger')
return redirect(url_for('index'))
@app.route("/chart")
def chartindex():
conn = get_db_connection()
postRows = conn.execute('SELECT COUNT(*) AS PostCount, strftime("%d-%m-%Y", created) AS PostDate FROM posts group by strftime("%d-%m-%Y", created)').fetchall()
commentRows = conn.execute('SELECT COUNT(*) AS CommentCount, strftime("%d-%m-%Y", created) AS CommentDate FROM comments group by strftime("%d-%m-%Y", created)').fetchall()
conn.close()
posts = [dict(row) for row in postRows]
comments = [dict(row) for row in commentRows]
jsonData = {"posts": posts, "comments": comments}
return render_template('chart.html', json=jsonData)
def messageReceived(methods=['GET', 'POST']):
print('message was received!!!')
@sio.on('my event')
def handle_my_custom_event(json, methods=['GET', 'POST']):
print('received my event: ' + str(json))
sio.emit('my response', json, callback=messageReceived)
clients = {}
@sio.on('mouse_position')
def handle_mouse_position(data):
print('received mouse position: ' + str(data) + ' sid:' + request.sid)
clients[request.sid] = data
sio.emit('all_coords', clients)
if __name__ == '__main__':
sio.run(app, debug=True)
| JeroenMX/LearningPython | main.py | main.py | py | 3,952 | python | en | code | 0 | github-code | 36 |
24193842042 | import pandas as pd
from pathlib import Path
from enum import Enum
pd.options.mode.chained_assignment = None # default='warn'
class Location(Enum):
home = 1
visiting = 2
#not used atm
def get_last_occurrence(team_id, location):
data_folder = Path("../")
all_game_file = data_folder / "_mlb_remerged_all.csv"
df_game_logs = pd.read_csv(all_game_file, index_col=0)
if location == Location.home:
last_occurrence = df_game_logs.where(df_game_logs['Home team'] == team_id).last_valid_index()
print('home')
else:
last_occurrence = df_game_logs.where(df_game_logs['Visiting team'] == team_id).last_valid_index()
print('away')
return last_occurrence
#not used atm
def get_all_teams():
data_folder = Path("../")
all_game_file = data_folder / "_mlb_remerged_all.csv"
df_teams = pd.read_csv(all_game_file)
df_unique_id = df_teams['teamID'].unique()
print(df_unique_id)
print(df_unique_id.size)
def get_team_data(home_id, visit_id):
data_folder = Path("../")
all_game_file = data_folder / "_mlb_remerged_all.csv"
df_all_games = pd.read_csv(all_game_file)
print("Number of Columns: " + str(len(df_all_games.columns)-2))
df_columns = df_all_games.columns.values.tolist()
home_team_columns = [i for i in df_columns if "Home" in i]
print("Number of Home Columns: " + str(len(home_team_columns)))
visiting_team_columns = [i for i in df_columns if "Visiting" in i]
print("Number of Visiting Columns: " + str(len(visiting_team_columns)))
last_occurrence_home = df_all_games.where(df_all_games['Home team'] == home_id).last_valid_index()
#home_team_data = df_all_games.iloc[[get_last_occurrence(home_id, Location.home)]]
home_team_data = df_all_games.iloc[[last_occurrence_home]]
home_team_to_home_column = home_team_data[home_team_columns]
last_occurrence_away = df_all_games.where(df_all_games['Visiting team'] == visit_id).last_valid_index()
#visiting_team_data = df_all_games.iloc[[get_last_occurrence(visit_id, Location.visiting)]]
visiting_team_data = df_all_games.iloc[[last_occurrence_away]]
visiting_team_to_visiting_column = visiting_team_data[visiting_team_columns]
df_merged_data = pd.concat([home_team_to_home_column,
visiting_team_to_visiting_column.set_index(home_team_to_home_column.index)], axis=1)
print(df_merged_data)
get_team_data('SEA', 'DET')
#get_all_teams()
| timucini/MLB-DeepLearning-Project | Verworfen/TeamIdMerge2.py | TeamIdMerge2.py | py | 2,468 | python | en | code | 1 | github-code | 36 |
12466948381 | import smbus #dit is een library om met de i2c bus te communiceren
import time
#op de pi instaleren: i2c-tools, python-smbus en zeker ook de i2c enablen in de raspi-config
I2C_ADDR = 0x27 #i2cdetect -y 1
LCD_WIDTH = 20 #breedte van mijn scherm
LCD_CHR = 1 #om data door te sturen
LCD_CMD = 0 #om een command door te sturen
LCD_LINE1 = 0x80 #adres van mijn eerste lijn
LCD_LINE2 = 0xC0 #adres van mijn tweede lijn
LCD_LINE3 = 0x94 #adres van mijn derde lijn
LCD_LINE4 = 0xD4 #adres van mijn vierde lijn
LCD_BACKLIGHT = 0x08 # uit = 0x00, aan = 0x08
ENABLE = 0b00000100 #enable bit
#tijdsconstanten
E_PULSE = 0.0005
E_DELAY = 0.0005
bus = smbus.SMBus(1) #de smbus van mijn pi 'openen'
def lcd_init():
lcd_byte(0x33, LCD_CMD)
lcd_byte(0x32, LCD_CMD)
lcd_byte(0x06, LCD_CMD)
lcd_byte(0x0C, LCD_CMD)
lcd_byte(0x28, LCD_CMD)
lcd_byte(0x01, LCD_CMD)
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
bits_high = mode | (bits & 0xF0) | LCD_BACKLIGHT
bits_low = mode | ((bits<<4) & 0xF0) | LCD_BACKLIGHT
bus.write_byte(I2C_ADDR, bits_high)
lcd_toggle_enable(bits_high)
bus.write_byte(I2C_ADDR, bits_low)
lcd_toggle_enable(bits_low)
def lcd_toggle_enable(bits):
time.sleep(E_DELAY)
bus.write_byte(I2C_ADDR, (bits | ENABLE))
time.sleep(E_PULSE)
bus.write_byte(I2C_ADDR, (bits & ~ENABLE))
time.sleep(E_DELAY)
def lcd_string(message, line):
message = message.ljust(LCD_WIDTH, " ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]), LCD_CHR)
def write(lijn1,lijn2,lijn3,lijn4):
lcd_init()
lcd_string('{0}'.format(lijn1), LCD_LINE1)
lcd_string('{0}'.format(lijn2), LCD_LINE2)
lcd_string("{0}".format(lijn3), LCD_LINE3)
lcd_string("{0}".format(lijn4), LCD_LINE4)
| VanbecelaereVincent/Speedometer | LCD.py | LCD.py | py | 1,807 | python | nl | code | 2 | github-code | 36 |
5216604834 | from dataclasses import dataclass
from struct import Struct
from .bytes import Bytes
KEY_HASH = Struct("<HH")
ENCRYPTED_MESSAGE = Struct("<BIIII16s16s")
@dataclass
class SignedMessage:
"""The cryptographic message portion of Session Offer."""
flags: int
key_slot: int
key_mask: int
challenge: bytes
echo: int
@classmethod
def read(cls, buf: Bytes) -> "SignedMessage":
buf.seek(0)
flags = buf.u8()
key_slot = buf.u8()
key_mask = buf.u8()
challenge_len = buf.u8()
challenge = buf.read(challenge_len)
echo = buf.u32()
return cls(flags, key_slot, key_mask, challenge, echo)
def write(self, buf: Bytes) -> int:
buf.seek(0)
written = 0
written += buf.write_u8(self.flags)
written += buf.write_u8(self.key_slot)
written += buf.write_u8(self.key_mask)
written += buf.write_u8(len(self.challenge))
written += buf.write(self.challenge)
written += buf.write_u32(self.echo)
buf.truncate()
return written
@property
def hash_region(self) -> tuple[int, int]:
return KEY_HASH.unpack_from(self.challenge)
@property
def challenge_type(self) -> int:
return self.challenge[4]
@property
def challenge_buf(self) -> bytes:
return self.challenge[5:]
@dataclass
class EncryptedMessage:
"""The cryptographic message portion of Session Accept."""
flags: int
key_hash: int
challenge_answer: int
echo: int
timestamp: int
key: bytes
nonce: bytes
@classmethod
def read(cls, buf: Bytes) -> "EncryptedMessage":
buf.seek(0)
args = buf.read_struct(ENCRYPTED_MESSAGE)
return cls(*args)
def write(self, buf: Bytes) -> int:
buf.seek(0)
written = buf.write_struct(
ENCRYPTED_MESSAGE,
self.flags,
self.key_hash,
self.challenge_answer,
self.echo,
self.timestamp,
self.key,
self.nonce,
)
buf.truncate()
return written
| vbe0201/wizproxy | wizproxy/proto/handshake.py | handshake.py | py | 2,130 | python | en | code | 2 | github-code | 36 |
30461364600 | # village_id, name, x, y, idx, pts, b
import pdb
import utils
import numpy as np
if __name__ == "__main__":
files = utils.getLastFiles()
villages = utils.read_villages(files["villages"])
coords = villages['coords']
pts = villages["points"]
v_barb = (villages['player'] == 0)
v_player = (villages['player'] != 0)
WORLD_POINTS = np.zeros([10,10], dtype=int)
WORLD_PLAYERS = np.zeros([10,10], dtype=int)
WORLD_BARB = np.zeros([10,10], dtype=int)
for k in range(100):
xlim, ylim = utils.contLimits(k)
# pdb.set_trace()
validx = (coords[:,0] >= xlim[0]) & (coords[:,0] < xlim[1])
validy = (coords[:,1] >= ylim[0]) & (coords[:,1] < ylim[1])
valid = validx & validy & v_player
valid_pts = pts[valid.nonzero()[0]]
if len(valid_pts) > 0:
m = np.mean(valid_pts)
x, y = k%10, k//10
WORLD_POINTS[y, x] = int(m)
WORLD_PLAYERS[y, x] = int(len(valid_pts))
valid = validx & validy & v_barb
valid_pts = pts[valid.nonzero()[0]]
if len(valid_pts) > 0:
WORLD_BARB[y, x] = int(len(valid_pts))
utils.plotMat(WORLD_POINTS, "Média de pontos", False)
utils.plotMat(WORLD_PLAYERS, "Número de Aldeias Ativas", False)
utils.plotMat(WORLD_BARB, "Número de Bárbaras", False)
| felipecadar/tw-scripts | plot_world.py | plot_world.py | py | 1,374 | python | en | code | 1 | github-code | 36 |
35512006282 | import trimesh
import numpy as np
from sklearn.neighbors import KDTree
from trimesh.proximity import ProximityQuery
def transform_mesh(mesh, trans_name, trans_params):
if trans_name == 'preprocess':
mesh = preprocess_mesh(mesh, **trans_params)
elif trans_name == 'refine':
mesh = refine_mesh(mesh, **trans_params)
elif trans_name == 'rotate':
mesh = rotate_mesh(mesh, **trans_params)
else:
raise ValueError("transformation type %d is not defined"%trans_name)
return mesh
def preprocess_mesh(mesh,
mesh_out=None,
merge_vertex=True,
with_scaling_to_unit_box=True,
scaler=2):
"""
process the mesh to ensure it is watertight and fits a unit cube [-1,1]^3
"""
if merge_vertex:
mesh.merge_vertices(merge_tex=True, merge_norm=True)
if not mesh.is_watertight:
raise ValueError('mesh is not watertight')
if with_scaling_to_unit_box:
s1 = mesh.bounding_box.centroid
s2 = scaler / np.max(mesh.bounding_box.extents)
new_vertices = mesh.vertices - s1
mesh.vertices = new_vertices * s2
if mesh_out is not None:
with open(mesh_out, 'w') as fid:
mesh.export(fid, file_type='obj')
return mesh
def refine_mesh(mesh,
mesh_out=None,
mesh_refine_size=0.1,
show=False):
"""
generate refined surface mesh
"""
refined_mesh = refine_surface_mesh(mesh, mesh_size=mesh_refine_size, show=show)
if mesh_out is not None:
with open(mesh_out, 'w') as fid:
refined_mesh.export(fid, file_type='obj')
return refined_mesh
def rotate_mesh(mesh,
matrix=None):
if matrix is None:
matrix = trimesh.transformations.random_rotation_matrix()
mesh.apply_transform(matrix)
return mesh
def get_volume_points_randomly(n_points, scaler=2):
points = np.random.random((n_points, 3)) - 0.5
points *= scaler
return points
def get_rasterized_points(voxel_resolution, scaler=2):
half_width = scaler / 2
points = np.meshgrid(
np.linspace(-half_width, half_width, voxel_resolution),
np.linspace(-half_width, half_width, voxel_resolution),
np.linspace(-half_width, half_width, voxel_resolution)
)
points = np.stack(points)
points = np.swapaxes(points, 1, 2)
points = points.reshape(3, -1).transpose().astype(np.float32)
return points
def get_sdf(mesh, points):
return - ProximityQuery(mesh).signed_distance(points)
# def remove_volume_edges(node_attr, edge_idx):
# on_surface_idx = np.where(node_attr[:, -1] == 1)[0]
# mask = np.isin(edge_idx, on_surface_idx).any(axis=0)
# new_edge_idx = edge_idx[:, mask]
# return new_edge_idx
def ball_query(x1, x2, radius=0.1, min_n_edges=3, max_n_edges=50):
k = min(len(x2), max_n_edges)
tree = KDTree(x2)
dist, idx = tree.query(x1, k=k)
s1, s2 = idx.shape
idx = np.stack((np.tile(np.arange(s1), (s2, 1)).T, idx), axis=2).reshape(-1, 2) # get list of pairs
indicator = dist < radius
indicator[:, :min_n_edges] = 1 # set the minimum number of edges
indicator = indicator.reshape(-1)
idx = idx[indicator] # set the radius of proximity
edges = idx.T
return edges
def get_edges_with_ball_query(x, radius=0.1, min_n_edges=3, max_n_edges=50, n_features_to_consider=3,
with_volume_edges=True):
points = x[:, :n_features_to_consider]
if with_volume_edges:
edges = ball_query(points, points, radius=radius, min_n_edges=min_n_edges, max_n_edges=max_n_edges)
else:
sdf_indicator = x[:, -1]
surface_points = points[sdf_indicator == 1]
volume_points = points[sdf_indicator != 1]
edges1 = ball_query(surface_points, points, radius=radius, min_n_edges=min_n_edges, max_n_edges=max_n_edges)
edges2 = ball_query(volume_points, surface_points, radius=radius, min_n_edges=min_n_edges, max_n_edges=max_n_edges)
edges2[0] = edges2[0] + len(surface_points)
edges = np.concatenate((edges1, edges2), axis=1)
return edges
def add_reversed_edges(edges):
edges_reversed = np.flipud(edges)
edges = np.concatenate([edges, edges_reversed], axis=1)
return edges
def add_self_edges(edges):
n_nodes = edges.max() + 1
self_edges = [list(range(n_nodes))] * 2
self_edges = np.array(self_edges)
edges = np.concatenate([edges, self_edges], axis=1)
return edges
def compute_edge_features(x, edge_index):
e1, e2 = edge_index
edge_attrs = x[e1, :] - x[e2, :]
return edge_attrs
def get_mesh_edges(mesh):
return mesh.edges.T
| amaleki2/graph_sdf | src/data_utils.py | data_utils.py | py | 4,749 | python | en | code | 2 | github-code | 36 |
368888743 | from datetime import datetime
import re
import string
import pandas as pd
import time
from sympy import li
class Model_Trace_Analysis:
def __init__(self):
timestr = time.strftime("%Y%m%d_%H%M%S")
self.txt_path = './analysis/klm_bei_record/typing_log_'+str(timestr)+'.txt'
self.result_path = './analysis/klm_bei_record/human_factor_analysis_'+str(timestr)+'.xlsx'
self.f = open(self.txt_path, 'a+')
print("trace analysis initialisation")
def set_trace(self, boolTrace):
if boolTrace == True:
self.f = open(self.txt_path, 'a')
self.f.write("----------------Start a new log----------------\n")
self.f.close()
print("trace on")
else:
self.f = open(self.txt_path, 'a')
self.f.write("----------------End the log----------------\n")
self.f.close()
print("trace off")
return boolTrace
""" trace typing below """
def record_pressed_button(self, caption, wordPred, senPred, currentSen):
currentTime = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
self.f = open(self.txt_path, 'a')
self.f.write(currentTime + ' >> ' + caption + ', word pred: '+ '|'.join(wordPred).lower() + ', sentence pred: '+ '|'.join(senPred).lower()+ ', current sentence: '+ currentSen +'\n')
self.f.close()
""" trace typing above """
# Start trace analysis
""" Extract data from a line below """
def _extract_info_from_line(self, line):
dateTime = re.search(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3}', line)
# time = re.search(r'\d{2}:\d{2}:\d{2}.d{3}', line)
dateTimeObj = datetime.strptime(dateTime.group(0), '%Y-%m-%d %H:%M:%S.%f')
keyType = line[line.find('>> ')+3 : line[line.find('>> ')+3:].find(': ')+line.find('>> ')+3]
keyValue = line[line.find(keyType)+len(keyType)+2 : line.find(', word pred: ')]
if keyValue != 'Speak' and keyValue != 'Space' and keyValue != 'Clear All':
keyValue.lower()
wordPred = line[line.find(', word pred: ')+len(', word pred: ') : line.find(', sentence pred: ')].lower()
senPred = line[line.find(', sentence pred: ')+len(', sentence pred: ') : line.find(', current sentence: ')].lower()
currentSentence = line[line.find(', current sentence: ')+len(', current sentence: ') : line.find('\n')].lower()
wordPredList = wordPred.split('|')
senPredList = senPred.split('|')
logDict = { 'timeTag': dateTimeObj,
'keyType': keyType,
'keyValue': keyValue,
'wordPred': wordPredList,
'sentencePred': senPredList,
'wordPredRoundIndex': 0,
'sentencePredRoundIndex': 0,
'currentSentence': currentSentence}
return logDict
""" Extract data from a line above """
""" Run trace analyse below """
def run_trace_analyse(self, traceLogFile, T_interrupt_threshold):
# print("In model_trace_analyse using file: " + traceLogFile)
with open(traceLogFile) as f:
lines = f.readlines()
boolLogData = False
logDictList = []
for line in lines:
if 'Start a new log' in line:
boolLogData = True
continue
if 'End the log' in line:
boolLogData = False
break
if boolLogData:
logDictList.append(self._extract_info_from_line(line))
print("log dictionary list:")
print(logDictList)
# seprate by sentences
oneSentence = []
# result summary
humanFactorsDistList = []
sentenceNum = 0
for line in logDictList:
if line['keyValue'] != 'Speak':
oneSentence.append(line)
else:
oneSentence.append(line)
# Start analysis this sentence
KS_all_temp, sentenceLengthInWord, sentenceLengthInChar, T_all_temp, KS_error_correction_temp, T_error_correction_temp, KS_irra_extra_temp, T_irra_extra_temp, T_interrupt_temp = self._cal_human_factors(oneSentence, T_interrupt_threshold)
humanFactorsDist = {
'sentenceNum': sentenceNum,
'sentence': oneSentence,
'KS_all': KS_all_temp,
'sentenceLengthInWord': sentenceLengthInWord,
'sentenceLengthInChar': sentenceLengthInChar,
'T_all': T_all_temp,
'KS_error_correction': KS_error_correction_temp,
'T_error_correction': T_error_correction_temp,
'KS_irra_extra': KS_irra_extra_temp,
'T_irra_extra': T_irra_extra_temp,
'T_interrupt': T_interrupt_temp,
'entry_rate': sentenceLengthInWord/(T_all_temp/60.0),
'KS_saving_rate': (sentenceLengthInChar-KS_all_temp)/sentenceLengthInChar,
'ETRI': 1 - T_error_correction_temp/(T_all_temp-T_interrupt_temp),
'EI': KS_error_correction_temp/(KS_all_temp-KS_irra_extra_temp),
'RI': 1 - KS_irra_extra_temp/(KS_all_temp-KS_error_correction_temp),
'II_KS': 1 - (KS_error_correction_temp+KS_irra_extra_temp)/KS_all_temp,
'II_T': 1 - (T_error_correction_temp+T_irra_extra_temp+T_interrupt_temp)/T_all_temp
}
humanFactorsDistList.append(humanFactorsDist)
print('Sentence ' + str(sentenceNum) + ' human factor: ')
print('ETRI = '+str(humanFactorsDist['ETRI']))
print('EI = '+str(humanFactorsDist['EI']))
print('RI = '+str(humanFactorsDist['RI']))
print('II_KS = '+str(humanFactorsDist['II_KS']))
print('II_T = '+str(humanFactorsDist['II_T']))
oneSentence = []
sentenceNum += 1
# Overall human performance
KS_all = 0
T_all = 0.0
KS_error_correction = 0
T_error_correction = 0.0
KS_irra_extra = 0
T_irra_extra = 0.0
T_interrupt = 0.0
for hf in humanFactorsDistList:
KS_all += hf['KS_all']
T_all += hf['T_all']
KS_error_correction += hf['KS_error_correction']
T_error_correction += hf['T_error_correction']
KS_irra_extra += hf['KS_irra_extra']
T_irra_extra += hf['T_irra_extra']
T_interrupt += hf['T_interrupt']
ETRI = 1 - T_error_correction/(T_all-T_interrupt),
EI = KS_error_correction/(KS_all-KS_irra_extra),
RI = 1 - KS_irra_extra/(KS_all-KS_error_correction),
II_KS = 1 - (KS_error_correction+KS_irra_extra)/KS_all,
II_T = 1 - (T_error_correction+T_irra_extra+T_interrupt)/T_all
print('Overall human factors: ')
print('ETRI = '+str(ETRI))
print('EI = '+str(EI))
print('RI = '+str(RI))
print('II_KS = '+str(II_KS))
print('II_T = '+str(II_T))
df = pd.DataFrame.from_dict(humanFactorsDistList)
df.to_excel(self.result_path)
""" Run trace analyse above """
def _add_to_pred_pool(self, index, timeTag, predList):
dict = {}
dict['round'] = index
dict['timeTag'] = timeTag
dict['prediction'] = predList
return dict
def _cal_human_factors(self, logDictList, T_interrupt_threshold):
KS_current = 0
sentenceLengthInWord = 0
sentenceLengthInChar = 0
if logDictList:
T_all = (logDictList[-1]['timeTag'] - logDictList[0]['timeTag']).total_seconds()
KS_irra_extra = 0
T_irra_extra = 0.0
word_irra_extra_buffer = []
KS_error_correction = 0
T_error_correction = 0.0
boolDeletionCombo = False
N_deletion_combo = 0
errorSentence = ""
lastSentence = ""
T_interrupt = 0.0
lastLogDict = {}
currentWordPredFistShowInRound = {}
currentSentencePredFistIrrationalAction = {}
currentWord = ""
currentSentence = ""
currentWordPredListDictList = []
currentSentencePredListDictList = []
currentWordPredRoundIndex = 0
currentSentencePredRoundIndex = 0
boolCheckWordPredIrrational = False
boolCheckSentencePredIrrational = False
boolFirstWord = True
for logDict in logDictList:
KS_current += 1
boolFinishAWord = False
boolFinishASentence = False
currentSentence = logDict['currentSentence']
if logDict.get('keyValue') == 'Space' or logDict.get('keyValue') == ',' or logDict.get('keyValue') == '.' or logDict.get('keyType') == 'word' or logDict.get('keyType') == 'sentence':
# indicate the end of a word
boolFinishAWord = True
boolFinishASentence = False
if logDict.get('keyValue') == 'Speak':
# indicate the end of a sentence
boolFinishAWord = True
boolFinishASentence = True
"""Form a sentence below """
if logDict.get('keyType') == 'key':
# Delete a letter
if logDict.get('keyValue') == '<-':
print("<-, Error sentence: "+logDict['currentSentence'])
if N_deletion_combo == 0:
errorSentence = lastSentence
N_deletion_combo += 1
if currentWord:
currentWord = currentWord[:-1]
# Typing a word
elif boolFinishAWord == False:
if N_deletion_combo != 0:
boolDeletionCombo = True
currentWord = currentWord + logDict.get('keyValue')
# Extend wordPred and sentencePred list
if boolFirstWord == True:
boolFirstWord = False
else:
currentWordPredRoundIndex += 1
currentSentencePredRoundIndex += 1
currentWordPredListDictList.append(self._add_to_pred_pool(index=currentWordPredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('wordPred')))
currentSentencePredListDictList.append(self._add_to_pred_pool(index=currentSentencePredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('sentencePred')))
currentWordPredPool = ''
currentSentencePredPool = ''
currentWordPredPoolDictList = []
currentSentencePoolDictList = []
for currentWordPred in currentWordPredListDictList:
currentWordPredPool = currentWordPredPool + str(currentWordPred['round']) +': '+ '||'.join(currentWordPred['prediction']) + '; '
tempDict = {
'round': currentWordPred['round'],
'prediction': currentWordPred['prediction']
}
currentWordPredPoolDictList.append(tempDict)
for currentSentencePred in currentSentencePredListDictList:
currentSentencePredPool = currentSentencePredPool + str(currentSentencePred['round']) + ': ' + '||'.join(currentSentencePred['prediction'])+ '; '
tempDict = {
'round': currentSentencePred['round'],
'prediction': currentSentencePred['prediction']
}
currentSentencePoolDictList.append(tempDict)
print('Typing a word, currentWord: '+currentWord)
print(' current word prediction: '+'|'.join(logDict.get('wordPred')))
print(' current sentence prediction: '+'|'.join(logDict.get('sentencePred')))
print(' current word prediction pool: '+currentWordPredPool)
print(' current sentence prediction pool: '+currentSentencePredPool)
print(' current sentence: '+currentSentence)
# currentSentence = logDict['currentSentence']
if boolDeletionCombo == True:
# Find when errorStartSentence first shows in log
errorStart = 0
if N_deletion_combo > 1:
errorStartSentence = errorSentence[:-(N_deletion_combo-1)]
else:
errorStartSentence = errorSentence
for tempLog in logDictList[:KS_current]:
errorStart += 1
if errorStartSentence == tempLog['currentSentence']:
break
# Calculate deletion and error time and KS
if errorStart != 0:
# current sentence is shown before, account error KS and time
KS_error_correction += KS_current - errorStart
T_error_correction += (logDict['timeTag'] - logDictList[errorStart-1]['timeTag']).total_seconds()
else:
# current sentence is not shown before, only add correction KS and time (select a pred word but delete part of it)
KS_error_correction += N_deletion_combo
T_error_correction += (logDict['timeTag'] - logDictList[KS_current-N_deletion_combo]['timeTag']).total_seconds()
boolDeletionCombo = False
N_deletion_combo = 0
errorSentence = ""
if boolFinishAWord == True and boolFinishASentence == False and logDict.get('keyType') != 'sentence':
# A word is finished, but the sentence is not finished
if logDict.get('keyType') == 'word':
# Use word prediction to finish the word
currentWord = logDict.get('keyValue').lower()
# Check word rationality
currentWord = currentWord.strip().lower()
currentWordFinishInRound = len(currentWordPredListDictList) # Finishes in the next round that is not recorded in this list, therefore we use the maximum round in the list plus one
currentWordFinishTime = logDict['timeTag']
currentWordPredFirstIrrationalAction = {}
for recordByRound in currentWordPredListDictList:
# Record the first miss of the predicted word
if currentWord in recordByRound['prediction'] and len(currentWord)>1:
if boolCheckWordPredIrrational == True:
currentWordPredFirstIrrationalAction = {
'round': recordByRound['round'],
'timeTag': recordByRound['timeTag']}
KS_irra_extra = KS_irra_extra + currentWordFinishInRound - currentWordPredFirstIrrationalAction['round']
T_irra_extra = T_irra_extra + (currentWordFinishTime-currentWordPredFirstIrrationalAction['timeTag']).total_seconds()
temp_irra_extra_dict = {
'round': KS_current, # in sentence level
'KS_irra_extra': KS_irra_extra,
'T_irra_extra': T_irra_extra
}
word_irra_extra_buffer.append(temp_irra_extra_dict)
print("-> Current KS_irra_extra = "+str(KS_irra_extra))
print("-> Current T_irra_extra = "+str(T_irra_extra))
break
boolCheckWordPredIrrational = True
boolCheckWordPredIrrational = False
# Renew wordPred list
currentWordPredRoundIndex = 0
currentWordPredListDictList = []
currentWordPredListDictList.append(self._add_to_pred_pool(index=currentWordPredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('wordPred')))
# Extend sentencePred list
currentSentencePredRoundIndex += 1
currentSentencePredListDictList.append(self._add_to_pred_pool(index=currentSentencePredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('sentencePred')))
print('A word is finished, currentWord: '+currentWord+', currentSentence: '+currentSentence+'*')
# print trace
currentWordPredPool = ''
currentSentencePredPool = ''
currentWordPredPoolDictList = []
currentSentencePredPoolDictList = []
for currentWordPred in currentWordPredListDictList:
currentWordPredPool = currentWordPredPool + str(currentWordPred['round']) +': '+ '||'.join(currentWordPred['prediction']) + '; '
tempDict = {
'round': currentWordPred['round'],
'prediction': currentWordPred['prediction']
}
currentWordPredPoolDictList.append(tempDict)
for currentSentencePred in currentSentencePredListDictList:
currentSentencePredPool = currentSentencePredPool + str(currentSentencePred['round']) + ': ' + '||'.join(currentSentencePred['prediction'])+ '; '
tempDict = {
'round': currentSentencePred['round'],
'prediction': currentSentencePred['prediction']
}
currentSentencePredPoolDictList.append(tempDict)
print(' current word prediction: '+'|'.join(logDict.get('wordPred')))
print(' current sentence prediction: '+'|'.join(logDict.get('sentencePred')))
print(' current word prediction pool: '+currentWordPredPool)
print(' current sentence prediction pool: '+currentSentencePredPool)
print(' current sentence: '+currentSentence)
currentWord = ''
# currentWordPredFistShowInRound = None
currentWordPredFistShowInRound = {}
if boolFinishAWord == True and boolFinishASentence == False and logDict.get('keyType') == 'sentence':
# A word is finished, and a sentence prediction is selected
currentWord = ''
# Check sentence rationality
currentSentenceFinishInRound = len(currentSentencePredListDictList) # Finishes in the next round that is not recorded in this list, therefore we use the maximum round in the list plus one
currentSentenceFinishTime = logDict['timeTag']
for recordByRound in currentSentencePredListDictList:
if boolCheckSentencePredIrrational == True:
currentSentencePredFistIrrationalAction = {
'round': recordByRound['round'],
'timeTag': recordByRound['timeTag']}
# Check if the sentence irrational action is after any word finishment actions
boolIrrationalInSentenceLevel = True
if word_irra_extra_buffer:
for buffer in reversed(word_irra_extra_buffer):
if buffer['round'] < currentSentencePredFistIrrationalAction['round']:
boolIrrationalInSentenceLevel = False
if boolIrrationalInSentenceLevel == False:
KS_irra_extra = buffer['KS_irra_extra'] + currentSentenceFinishInRound - currentSentencePredFistIrrationalAction['round']
T_irra_extra = buffer['T_irra_extra'] + (currentSentenceFinishTime-currentSentencePredFistIrrationalAction['timeTag']).total_seconds()
else:
KS_irra_extra = currentSentenceFinishInRound - currentSentencePredFistIrrationalAction['round']
T_irra_extra = (currentSentenceFinishTime-currentSentencePredFistIrrationalAction['timeTag']).total_seconds()
print("-> Current KS_irra_extra = "+str(KS_irra_extra))
print("-> Current T_irra_extra = "+str(T_irra_extra))
break
if currentSentence.strip() in recordByRound['prediction']:
boolCheckSentencePredIrrational = True
boolCheckSentencePredIrrational = False
# Renew the wordPred and sentencePred list
currentWordPredRoundIndex += 1
currentSentencePredRoundIndex += 1
currentWordPredListDictList.append(self._add_to_pred_pool(index=currentWordPredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('wordPred')))
currentSentencePredListDictList.append(self._add_to_pred_pool(index=currentSentencePredRoundIndex, timeTag=logDict.get('timeTag'), predList=logDict.get('sentencePred')))
# print
currentWordPredPool = ''
currentSentencePredPool = ''
currentWordPredPoolDictList = []
currentSentencePredPoolDictList = []
for currentWordPred in currentWordPredListDictList:
currentWordPredPool = currentWordPredPool + str(currentWordPred['round']) +': '+ '||'.join(currentWordPred['prediction']) + '; '
tempDict = {
'round': currentWordPred['round'],
'prediction': currentWordPred['prediction']
}
currentWordPredPoolDictList.append(tempDict)
for currentSentencePred in currentSentencePredListDictList:
currentSentencePredPool = currentSentencePredPool + str(currentSentencePred['round']) + ': ' + '||'.join(currentSentencePred['prediction'])+ '; '
tempDict = {
'round': currentSentencePred['round'],
'prediction': currentSentencePred['prediction']
}
currentSentencePredPoolDictList.append(tempDict)
print('Select a sentence prediction: '+currentSentence)
print(' current word prediction: '+'|'.join(logDict.get('wordPred')))
print(' current sentence prediction: '+'|'.join(logDict.get('sentencePred')))
print(' current word prediction pool: ' + currentWordPredPool)
print(' current sentence prediction pool: ' + currentSentencePredPool)
print(' current sentence: '+currentSentence)
currentWordPredListDictList = []
currentSentencePredListDictList = []
currentWordPredRoundIndex = 1
currentSentencePredRoundIndex = 1
currentWordPredFistShowInRound = {}
currentSentencePredFistIrrationalAction = {}
if boolFinishAWord == True and boolFinishASentence == True:
# A sentence is finished
# Set wordPred and sentencePred to []
currentWordPredListDictList = []
currentSentencePredListDictList = []
currentWordPredRoundIndex = 0
currentSentencePredRoundIndex = 0
sentenceLengthInWord = lastSentence.count(' ') + 1
sentenceLengthInChar = len(lastSentence)
print('A sentence is finished, currentSentence: '+lastSentence+'*')
lastSentence = currentSentence
""" Form a sentence above """
""" Calculate interruption time below """
# Assume interruption does not happen in irrational and erronous actions
if lastLogDict:
timeDifference = (logDict['timeTag'] - lastLogDict['timeTag']).total_seconds()
if timeDifference > T_interrupt_threshold:
T_interrupt += timeDifference
""" Calculate interruption time above """
lastLogDict = logDict
print('KS_all = '+str(KS_current))
print('T_all = '+str(T_all))
print('KS_error_correction = '+str(KS_error_correction))
print('T_error_correction = '+str(T_error_correction))
print('KS_irra_extra = '+str(KS_irra_extra))
print('T_irra_extra = '+str(T_irra_extra))
print('T_interrupt = '+str(T_interrupt))
return KS_current, sentenceLengthInWord, sentenceLengthInChar, T_all, KS_error_correction, T_error_correction, KS_irra_extra, T_irra_extra, T_interrupt
| TuringFallAsleep/Tinkerable-AAC-Keyboard | develop/model_trace_analysis.py | model_trace_analysis.py | py | 25,662 | python | en | code | 0 | github-code | 36 |
73013290345 | def main():
#Entrada
x = int(input())
y = int(input())
somador = 0
#processamento
if x > y :
for i in range(y, x+1):
if i%13 !=0:
somador += i
else:
for i in range(x, y+1):
if i%13 !=0:
somador += i
print(somador)
if __name__ == '__main__':
main() | DarknessRdg/URI | iniciante/1132.py | 1132.py | py | 277 | python | en | code | 2 | github-code | 36 |
947924592 | pkgname = "zxing-cpp"
pkgver = "2.1.0"
pkgrel = 0
build_style = "cmake"
configure_args = [
"-DBUILD_UNIT_TESTS=ON",
"-DBUILD_EXAMPLES=OFF",
"-DBUILD_BLACKBOX_TESTS=OFF",
"-DBUILD_DEPENDENCIES=LOCAL",
]
hostmakedepends = ["cmake", "ninja", "pkgconf"]
checkdepends = ["gtest-devel"]
pkgdesc = "Multi-format 1D/2D barcode library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "Apache-2.0"
url = "https://github.com/nu-book/zxing-cpp"
source = f"{url}/archive/refs/tags/v{pkgver}.tar.gz"
sha256 = "6d54e403592ec7a143791c6526c1baafddf4c0897bb49b1af72b70a0f0c4a3fe"
@subpackage("zxing-cpp-devel")
def _devel(self):
return self.default_devel()
| chimera-linux/cports | contrib/zxing-cpp/template.py | template.py | py | 667 | python | en | code | 119 | github-code | 36 |
29014504899 | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from penn_treebank_reader import *
from dataset import DatasetReader
from dataset import make_batch_iterator
def get_offset_cache(length):
offset_cache = {}
ncells = int(length * (1 + length) / 2)
for lvl in range(length):
level_length = length - lvl
ncells_less = int(level_length * (1 + level_length) / 2)
offset_cache[lvl] = ncells - ncells_less
return offset_cache
class ChartUtils(object):
def __init__(self):
super(ChartUtils, self).__init__()
self.offset_cache = {}
def to_idx(self, length, level, pos):
return self.to_offset(length, level) + pos
def to_offset(self, length, level):
if length not in self.offset_cache:
self.offset_cache[length] = get_offset_cache(length)
return self.offset_cache[length][level]
chart_utils = ChartUtils()
class ModelContainer(nn.Module):
def __init__(self, embed, model, loss_func):
super(ModelContainer, self).__init__()
self.embed = embed
self.model = model
self.loss_func = loss_func
class ConstituentLoss(nn.Module):
def __init__(self, vocab_size):
super(ConstituentLoss, self).__init__()
self.hidden_size = 100
self.predict = nn.Linear(self.hidden_size, vocab_size)
def forward(self, chart, label_batch):
batch_index = label_batch['batch_index']
idx_index = label_batch['idx_index']
label_index = label_batch['label_index']
logit = self.predict(chart[batch_index, idx_index])
loss = nn.CrossEntropyLoss()(logit, label_index)
return loss
class SequenceEncoder(nn.Module):
def __init__(self, vocab_size, self_attention=False):
super(SequenceEncoder, self).__init__()
self.self_attention = self_attention
self.hidden_size = 100
self.embed = nn.Embedding(vocab_size, self.hidden_size)
if self.self_attention:
self.atten_q = nn.Linear(self.hidden_size, self.hidden_size)
def run_attention(self, h):
q, k, v = h, h, h
scores = torch.matmul(self.atten_q(q), k.transpose(1, 2))
return torch.sum(scores.unsqueeze(3) * v.unsqueeze(2), 2)
def forward(self, x):
h = self.embed(x)
if self.self_attention:
h = self.run_attention(h)
return h
class ChartEncoder(nn.Module):
def __init__(self):
super(ChartEncoder, self).__init__()
self.hidden_size = 100
self.compose = nn.Sequential(
nn.Linear(2*self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size))
self.score = nn.Linear(2*self.hidden_size, 1) # TODO: Use dot product instead.
def step(self, level):
N = level # number of constituent pairs.
L = self.length - level # number of elements being computed.
l_index, r_index = [], []
ref_idx, ref_pos = [], []
for idx in range(N):
left_level = idx
right_level = level - idx - 1
left_offset = chart_utils.to_offset(self.length, left_level)
right_offset = chart_utils.to_offset(self.length, right_level + 1) - L
left_index = torch.arange(left_offset, left_offset+L)
right_index = torch.arange(right_offset, right_offset+L)
l_index.append(left_index)
r_index.append(right_index)
ref_idx.append(torch.LongTensor([idx]*L))
ref_pos.append(torch.arange(L))
l_index = torch.cat(l_index)
r_index = torch.cat(r_index)
ref_idx = torch.cat(ref_idx)
ref_pos = torch.cat(ref_pos)
l = self.chart.index_select(index=l_index, dim=1)
r = self.chart.index_select(index=r_index, dim=1)
state = torch.cat([l, r], 2)
h_raw = self.compose(state)
s_raw = self.score(state) # TODO: Should incorporate score from children.
s = torch.softmax(s_raw.view(self.batch_size, L, N, 1), dim=2)
hbar = torch.sum(s * h_raw.view(self.batch_size, L, N, self.hidden_size), 2)
# sbar = torch.sum(s * s_raw.view(self.batch_size, L, N, 1), 2)
offset = chart_utils.to_offset(self.length, level)
self.chart[:, offset:offset+L] = hbar
def build_chart(self, x):
chart_size = self.length * (self.length + 1) // 2
chart = torch.FloatTensor(self.batch_size, chart_size, self.hidden_size).fill_(0)
chart[:, :self.length] = x
self.chart = chart
for level in range(1, self.length):
self.step(level)
def init_batch(self, x):
self.batch_size = x.shape[0]
self.length = x.shape[1]
def forward(self, x):
self.init_batch(x)
self.build_chart(x)
return None
class BatchManager(object):
def prepare_batch(self, batch_map):
return torch.LongTensor(batch_map['sentences'])
def prepare_labels(self, batch_map):
batch_index = []
idx_index = []
label_index = []
length = len(batch_map['sentences'][0])
for s in batch_map['sentences']:
assert len(s) == length, 'Does not support variable length batches.'
for i, spans in enumerate(batch_map['labels']):
for pos, size, label in spans:
level = size - 1
batch_index.append(i)
idx_index.append(chart_utils.to_idx(length, level, pos))
label_index.append(label)
batch_index = torch.LongTensor(batch_index)
idx_index = torch.LongTensor(idx_index)
label_index = torch.LongTensor(label_index)
return {
'batch_index': batch_index,
'idx_index': idx_index,
'label_index': label_index,
}
def main(options):
tr_reader = JSONLReader(options.tr_file)
tr_dataset = DatasetReader(tr_reader, config={'max_len': options.tr_max_len}).build()
batch_iterator = make_batch_iterator(None, tr_dataset)
embed = SequenceEncoder(self_attention=options.self_attention, vocab_size=len(tr_dataset['metadata']['word2idx']))
model = ChartEncoder()
loss_func = ConstituentLoss(vocab_size=len(tr_dataset['metadata']['label2idx']))
container = ModelContainer(embed, model, loss_func)
params = container.parameters()
optimizer = optim.Adam(params, lr=0.002, betas=(0.9, 0.999), eps=1e-8)
print('# of sentences = {}'.format(len(tr_dataset['sentences'])))
print('vocab size = {}'.format(len(tr_dataset['metadata']['word2idx'])))
print('# of classes = {}'.format(len(tr_dataset['metadata']['label2idx'])))
print(tr_dataset['metadata']['label2idx'])
for epoch in range(options.max_epochs):
for batch_map in batch_iterator.get_iterator():
seq = BatchManager().prepare_batch(batch_map)
seqh = embed(seq)
_ = model(seqh)
label_batch = BatchManager().prepare_labels(batch_map)
loss = loss_func(model.chart, label_batch)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(params, 5.0)
optimizer.step()
print(loss.item())
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--tr_file', default=os.path.expanduser('~/data/ptb/valid.jsonl'), type=str)
parser.add_argument('--tr_max_len', default=10, type=int)
parser.add_argument('--self_attention', action='store_true')
parser.add_argument('--max_epochs', default=1000, type=int)
options = parser.parse_args()
main(options)
| mrdrozdov/chart-parser | train.py | train.py | py | 7,733 | python | en | code | 0 | github-code | 36 |
25452837300 | """Test whether the app increased LDs
by comparing a 7-session total against a baseline week.
"""
import os
import numpy as np
import pandas as pd
import pingouin as pg
from scipy.stats import sem
import utils
#### Choose export paths.
basename = "app_effect"
export_dir = os.path.join(utils.Config.data_directory, "results")
export_fname_data = os.path.join(export_dir, f"{basename}-data.csv")
# export_fname_descr = os.path.join(export_dir, f"{basename}-descriptives.csv")
export_fname_stats = os.path.join(export_dir, f"{basename}-stats.csv")
# export_fname_plot = os.path.join(export_dir, f"{basename}-plot.csv")
export_fname_timedesc = os.path.join(export_dir, f"{basename}-timedesc.csv")
################################# Load and wrangle data.
df = utils.load_data("merged")
# There might be a few dreams without a lucidity rating.
df = df.dropna(subset=["lucidSelfRating"])
# Convert boolean lucid success column to integer (1s/0s) for later math.
df["lucidSelfRating"] = df["lucidSelfRating"].astype(int)
# Shouldn't be more than 7 sessions but just to be sure.
df = df[df["sessionID"].isin([1,2,3,4,5,6,7])]
# Most sessions have just one trial, but some need to be aggregated into a single score.
# Sum the number of LDs for each session.
session_df = df.groupby(["subjectID", "sessionID"], as_index=False
)["lucidSelfRating"].agg("sum")
# Reduce number of LDs to simple yes/no (1/0) lucidity. (doesn't change much, only a few have >1)
session_df["lucidSelfRating"] = session_df["lucidSelfRating"].ge(1).astype(int)
# Pivot out to a table that has sessions as columns
table = session_df.pivot(columns="sessionID", values="lucidSelfRating", index="subjectID")
# Reduce to subjects with all 7 sessions
table = table[table.notna().all(axis=1)]
# Sum across all sessions to get cumulative total amount of LDs per participant per day.
cumtable = table.cumsum(axis=1)
# Get the baseline scores for each participant and merge with session data.
baseline = df[["subjectID","LDF"]].drop_duplicates("subjectID")
data = cumtable.merge(baseline, on="subjectID")
data = data.rename(columns={7: "app", "LDF": "baseline"})
# # Get descriptives summary for the cumulative version.
# cumtable_descr = totals[["all_sessions", "baseline"]
# ].agg(["count", "mean"]).round(3).T.unstack(level=1)
####### Get number of days between first and 7th app use, for final sample.
final_subs = data["subjectID"].unique()
subset = df[df["subjectID"].isin(final_subs)]
subset = subset[subset["sessionID"].isin([1,7])]
subset = subset[~subset.duplicated(subset=["subjectID", "sessionID"], keep="first")]
subset = subset[["subjectID", "sessionID", "timeStart"]].reset_index(drop=True)
subset["timeStart"] = pd.to_datetime(subset["timeStart"])
subset = subset.pivot(index="subjectID", columns="sessionID", values="timeStart")
timediff = subset[7] - subset[1]
timediff_desc = timediff.describe()
timediff_desc.to_csv(export_fname_timedesc, index=True, header=False)
####### Run statistics
a = data["baseline"].values
b = data["app"].values
stats = pg.wilcoxon(a, b).rename_axis("test")
stats.loc["Wilcoxon", "mean-n"] = len(a) # same as b
stats.loc["Wilcoxon", "mean-app"] = np.mean(b)
stats.loc["Wilcoxon", "mean-app"] = np.mean(b)
################## Export session-level data, descriptives, and stats.
data.to_csv(export_fname_data, index=False, na_rep="NA")
stats.to_csv(export_fname_stats, index=True, float_format="%.4f")
| remrama/lucidapp | analyze-app_effect.py | analyze-app_effect.py | py | 3,437 | python | en | code | 0 | github-code | 36 |
39893499902 | from django.urls import path
from . import views
# app_name = ''
urlpatterns = [
#GET: 전체글조회, POST: 게시글 생성
path('', views.review_list),
#GET: 단일 게시글 조회, DELETE: 해당 게시글 삭제, PUT: 해당 게시글 수정
path('<int:review_pk>/', views.review_update_delete),
#GET: 댓글 전체를 조회
path('<int:review_pk>/comments/', views.comment_create),
#GET: 단일 댓글 조회, PUT: 댓글 수정, DELETE: 댓글 삭제
path('comments/<int:comment_pk>/', views.comment_update_delete),
######################################################
# 리뷰 좋아요
path('<int:review_pk>/likes/', views.review_likes),
]
| chomyoenggeun/linkedmovie | server/community/urls.py | urls.py | py | 707 | python | ko | code | 0 | github-code | 36 |
13098646528 |
from io import BytesIO
from pathlib import Path
import random
from flask import Blueprint, Flask
from flask.wrappers import Response
from loft.config import Config, DebugConfig
from loft.util.id_map import IdMap
from loft.web.blueprints.api import api
rand = random.Random()
rand.seed(24242424)
def client(config: Config, api: Blueprint):
'''
Create the test Flask application, register the blueprint and return the
Flask test client.
'''
flask = Flask(__name__)
flask.config.from_object(config)
flask.register_blueprint(api)
return flask.test_client()
def test_post():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
dest = config.DOWNLOADS_FOLDER / 'test.txt'
assert not dest.exists()
response: Response = c.post('/api/files', data={
'upload': (BytesIO('lorum ipsum dolor sit amet'.encode('utf8')), 'test.txt')
})
assert response.status_code == 200
assert dest.exists()
with open(dest, 'r') as f:
assert f.read() == 'lorum ipsum dolor sit amet'
dest.unlink()
def test_post_duplicate_filename():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
with (config.DOWNLOADS_FOLDER / 'test.txt').open('w') as f:
f.write('hello')
dest = config.DOWNLOADS_FOLDER / 'test_1.txt'
assert not dest.exists()
response: Response = c.post('/api/files', data={
'upload': (BytesIO('lorum ipsum dolor sit amet'.encode('utf8')), 'test.txt')
})
assert response.status_code == 200
assert dest.exists()
with open(dest, 'r') as f:
assert f.read() == 'lorum ipsum dolor sit amet'
dest.unlink()
def test_post_empty_filename():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
dest = config.DOWNLOADS_FOLDER / 'Untitled'
assert not dest.exists()
response: Response = c.post('/api/files', data={
'upload': (BytesIO('lorum ipsum dolor sit amet'.encode('utf8')), '')
})
assert response.status_code == 200
assert dest.exists()
with open(dest, 'r') as f:
assert f.read() == 'lorum ipsum dolor sit amet'
dest.unlink()
def test_list():
config = DebugConfig()
i = IdMap()
i.add(Path('parent/foo.ext'))
i.add(Path('parent/bar.ext2'))
with client(config, api(i)) as c:
response = c.get('/api/files')
data = response.get_json()
assert 'available' in data
assert len(data['available']) == 2
assert data['available'][0][0] == 0
assert data['available'][0][1] == 'foo.ext'
assert data['available'][1][0] == 1
assert data['available'][1][1] == 'bar.ext2'
def test_list_empty():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
response = c.get('/api/files')
data = response.get_json()
assert 'available' in data
assert len(data['available']) == 0
def test_get():
config = DebugConfig()
i = IdMap()
with open(config.DOCUMENTS_FOLDER / 'a.txt', 'w+') as f:
path = Path(f.name)
for _ in range(1, 10):
f.write(str(rand.uniform(0, 1000)))
assert i.add(path) == 0
with client(config, api(i)) as c:
l_r: Response = c.get('/api/files')
l_data = l_r.get_json()
assert 'available' in l_data
assert len(l_data['available']) == 1
assert l_data['available'][0][0] == 0
assert l_data['available'][0][1] == path.name
response: Response = c.get('/api/files/0')
assert response.status_code == 200
f.seek(0)
assert response.get_data(as_text=True) == f.read()
def test_get_empty():
config = DebugConfig()
i = IdMap()
with client(config, api(i)) as c:
response: Response = c.get('/api/files/0')
assert response.status_code == 404
| ucsb-cs148-s21/t7-local-network-file-transfer | test/web/blueprints/test_api.py | test_api.py | py | 4,074 | python | en | code | 3 | github-code | 36 |
13933428778 | import pytest
from uceasy.ioutils import load_csv, dump_config_file
@pytest.fixture
def config_example():
config = {
"adapters": {
"i7": "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC*ATCTCGTATGCCGTCTTCTGCTTG",
"i5": "AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT",
}
}
expected = """[adapters]
i7:AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC*ATCTCGTATGCCGTCTTCTGCTTG
i5:AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT
"""
return (config, expected)
@pytest.fixture
def config_novalue():
config = {"samples": {"sample1": None, "sample2": None}}
expected = """[samples]
sample1
sample2
"""
return (config, expected)
def test_load_csv_returns_a_list(context):
csv = load_csv(context["csv_file"])
assert isinstance(csv, list)
def test_config_file_is_created(context, config_example):
dump_config_file(context["output"] + "test.conf", config_example[0])
with open(context["output"] + "test.conf", "r") as fl:
assert fl.read() == config_example[1]
| uceasy/uceasy | tests/test_ioutils.py | test_ioutils.py | py | 1,048 | python | en | code | 8 | github-code | 36 |
30526669570 | # augmentations for 2D and 2.5D
import random
import numpy as np
import SimpleITK as sitk
from src.utils.itk_tools import rotate_translate_scale2d
# Now only support list or tuple
class Compose(object):
def __init__(self, augmentations):
self._augmentations = augmentations
def __call__(self, nda, nda_type):
for _a in self._augmentations:
nda = _a(nda, nda_type)
return nda
# flip axis is array axis
class NPRandomFlip(object):
def __init__(self, axis=0, do_probability=0.5):
assert isinstance(axis, (int, tuple, list)), "Axis value type must be int, tuple or list."
self._axis = axis
self._p = do_probability
def __call__(self, nda, nda_type):
if random.random() < self._p:
# set flip params
if isinstance(self._axis, int):
flip_axis = self._axis
flip_axis_dim3 = self._axis + 1
elif isinstance(self._axis, (tuple, list)):
flip_axis = random.sample(self._axis, random.randint(1, len(self._axis)))
flip_axis_dim3 = tuple([i + 1 for i in flip_axis])
else:
flip_axis = None
flip_axis_dim3 = None
# initialize out
out = []
# do flip
for a, t in zip(nda, nda_type):
if a.ndim == 3:
out.append(np.copy(np.flip(a, flip_axis_dim3)))
else:
out.append(np.copy(np.flip(a, flip_axis)))
out = tuple(out)
return out
else:
return nda
# scale, translate, rotation
class ITKRandomRotateTranslateScale(object):
def __init__(self, theta=0, tx=0, ty=0, scale=0, do_probability=0.5):
self._theta = theta * np.pi / 180.0
self._tx = tx
self._ty = ty
self._scale = scale
self._p = do_probability
def __call__(self, nda, nda_type):
if random.random() < self._p:
# set transform params
transform_params = [(np.random.rand() * 2 - 1) * self._scale + 1,
(np.random.rand() * 2 - 1) * self._theta,
(np.random.rand() * 2 - 1) * self._tx,
(np.random.rand() * 2 - 1) * self._ty,
]
# initialize out
out = []
# do
for a, t in zip(nda, nda_type):
interpolator = "Linear" if t == "image" else "NearestNeighbor"
default_v = float(np.amin(a)) if t == "image" else 0
if a.ndim == 3:
tmp = []
for i in a.shape[0]:
tmp.append(sitk.GetArrayFromImage(rotate_translate_scale2d(
sitk.GetImageFromArray(a[i], isVector=False), transform_params, interpolator, default_v)))
out.append(np.stack(tmp, axis=0))
else:
out.append(sitk.GetArrayFromImage(
rotate_translate_scale2d(
sitk.GetImageFromArray(a, isVector=False), transform_params, interpolator, default_v)))
out = tuple(out)
return out
else:
return nda
| eugeneyuan/test_rep | src/data/aug2d.py | aug2d.py | py | 3,312 | python | en | code | 0 | github-code | 36 |
12133536334 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 2 14:51:00 2017
@author: trevario
Automatically plot TCSPC data using matplotlib.pyplot
"""
import glob, os
import numpy as np
import matplotlib.pyplot as plt
import csv
#%matplotlib inline
print("what directory are the files in?")
name = input()
os.chdir('/home/trevario/Documents/Labnotebook/TCSPC/' + str(name))
spt = glob.glob("*.dat")
for i in range(len(spt)):
data = np.genfromtxt(str(spt[i]),skip_header=2)
#this size is in inches
plt.figure(figsize=(5, 3), dpi=240)
#get those labels right
sps = spt[i]
plt.title(str(sps[:-4]), fontsize=16)
with open(str(sps), encoding="latin_1") as bunch:
spamreader = csv.reader(bunch, delimiter='\t')
col_1 = list(zip(*spamreader))[0]
xaxis = col_1[1]
plt.xlabel(str(xaxis), fontsize=12)
with open(str(sps), encoding="latin_1") as bunch:
spamreader = csv.reader(bunch, delimiter='\t')
col_2 = list(zip(*spamreader))[1]
yaxis = col_2[1]
plt.ylabel(str(yaxis), fontsize=12)
#change the plotted data if you want
#plt.ylim(0,15000)
#TCSPC data does not need to be inverted, ignore this line
#plt.gca().invert_xaxis()
#tell it you wanna change the axes
ax = plt.subplot(1,1,1)
#get rid of top and right axes
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
#get rid of the ticks on the top and right axis
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
#set size of font for data labels
ax.tick_params(axis='both', which='major', labelsize=10)
#set thickness of axes
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_linewidth(2)
#make room for the labels
plt.tight_layout()
plt.gcf().subplots_adjust(left=0.15)
#show the plot
plt.plot(data[:,0],data[:,1], color = "blue", linewidth=2.0)
#save the plot
plt.savefig(str(spt[i]) + ".png", dpi=240)
plt.show()
| trevhull/dataplot | tcspc.py | tcspc.py | py | 2,065 | python | en | code | 0 | github-code | 36 |
2952639119 | import visa
import time
from time import sleep
rm = visa.ResourceManager()
print('Connected VISA resources:')
print(rm.list_resources())
dmm = rm.open_resource('USB0::0x1AB1::0x09C4::DM3R192701216::INSTR')
print('Instrument ID (IDN:) = ', dmm.query('*IDN?'))
#print("Volts DC = ", dmm.query(":MEASure:VOLTage:DC?"))
print("DC Current = ", dmm.query(":MEASure:CURRent:DC?"))
f = open('iLog.csv','w')
fStr = "Time, DC Current, Raw\n"
f.write(fStr)
print("Poll rate = 500mS. Will run for 24 hours collecting 172,800 readings")
print("output file = iLog.csv\n\n")
print(" Seconds Count ", "DC Current", "Raw Meter Response", sep="\t|\t")
print("----------------------------------------------------------------------------------")
for x in range(0, 172800):
rawStr = dmm.query(":MEASure:CURRent:DC?")
iStr = rawStr
rawStr = rawStr.replace ("\n", "")
iStr = iStr.replace("\n", "")
iStr = iStr.replace("#9000000015", "")
iFlt = float(iStr)
now = time.time()
print(now, iFlt ,rawStr, sep="\t|\t")
fStr = str(now) + "," + str(iFlt) + "," + rawStr + "\n"
f.write(fStr)
sleep(.5)
f.close()
| JohnRucker/Rigol-DM3058E | test.py | test.py | py | 1,135 | python | en | code | 1 | github-code | 36 |
14203206189 | import math
from flask import render_template, request, redirect, url_for, session, jsonify
from saleapp import app, login
import utils
import cloudinary.uploader
from flask_login import login_user, logout_user, login_required
from saleapp.admin import *
from saleapp.models import UserRole
@app.route("/")
def index():
kw = request.args.get('keyword')
cate_id = request.args.get('category_id')
page = request.args.get('page', 1)
products = utils.read_products(kw=kw, cate_id=cate_id, page=int(page))
counter = utils.count_products()
return render_template("index.html",
products=products,
pages=math.ceil(counter/app.config['PAGE_SIZE']))
@app.route('/register', methods=['get', 'post'])
def user_register():
err_msg = ""
if request.method.__eq__('POST'):
name = request.form.get('name')
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
confirm = request.form.get('confirm')
avatar_path = None
try:
if password.strip().__eq__(confirm.strip()):
avatar = request.files.get('avatar')
if avatar:
res = cloudinary.uploader.upload(avatar)
avatar_path = res['secure_url']
utils.add_user(name=name, username=username,
password=password, email=email,
avatar=avatar_path)
return redirect(url_for('user_signin'))
else:
err_msg = 'Mat khau KHONG khop!!!'
except Exception as ex:
err_msg = 'He thong dang co loi: ' + str(ex)
return render_template('register.html', err_msg=err_msg)
@app.route('/user-login', methods=['get', 'post'])
def user_signin():
err_msg = ''
if request.method.__eq__('POST'):
username = request.form.get('username')
password = request.form.get('password')
user = utils.check_login(username=username, password=password)
if user:
login_user(user=user)
return redirect(url_for(request.args.get('next', 'index')))
else:
err_msg = 'Username hoac password KHONG chinh xac!!!'
return render_template('login.html', err_msg=err_msg)
@app.route('/user-logout')
def user_signout():
logout_user()
return redirect(url_for('user_signin'))
@app.route('/admin-login', methods=['post'])
def signin_admin():
username = request.form['username']
password = request.form['password']
user = utils.check_login(username=username,
password=password)
if user:
login_user(user=user)
return redirect('/admin')
@app.context_processor
def common_response():
return {
'categories': utils.read_categories(),
'cart_stats': utils.cart_stats(session.get('cart'))
}
@login.user_loader
def user_load(user_id):
return utils.get_user_by_id(user_id=user_id)
@app.route("/products")
def product_list():
cate_id = request.args.get('category_id')
kw = request.args.get('keyword')
from_price = request.args.get('from_price')
to_price = request.args.get('to_price')
products = utils.read_products(cate_id=cate_id, kw=kw,
from_price=from_price, to_price=to_price)
return render_template('product_list.html', products=products)
@app.route('/cart')
def cart():
return render_template('cart.html',
cart_stats=utils.cart_stats(session.get('cart')))
@app.route('/api/add-to-cart', methods=['post'])
def add_to_cart():
data = request.json
id = str(data.get('id'))
name = data.get('name')
price = data.get('price')
cart = session.get('cart')
if not cart:
cart = {}
if id in cart:
cart[id]['quantity'] = cart[id]['quantity'] + 1
else:
cart[id] = {
'id': id,
'name': name,
'price': price,
'quantity': 1
}
session['cart'] = cart
return jsonify(utils.cart_stats(session.get('cart')))
@app.route('/api/update-cart', methods=['put'])
def update_cart():
id = str(request.json.get('id'))
quantity = request.json.get('quantity')
cart = session.get('cart')
err_msg = ''
if cart:
if id in cart:
cart[id]['quantity'] = quantity
session['cart'] = cart
return jsonify({
'code': 200,
'data': utils.cart_stats(cart)
})
else:
err_msg = 'Khong co san pham tuong ung de cap nhat!'
else:
err_msg = 'Chua co gio hang!'
return jsonify({
'code': 404,
'err_msg': err_msg
})
@app.route('/api/delete-cart/<product_id>', methods=['delete'])
def delete_cart(product_id):
cart = session.get('cart')
err_msg = ''
if cart:
if product_id in cart:
del cart[product_id]
session['cart'] = cart
return jsonify({
'code': 200,
'data': utils.cart_stats(cart)
})
else:
err_msg = 'Khong co san pham tuong ung de cap nhat!'
else:
err_msg = 'Chua co gio hang!'
return jsonify({
'code': 404,
'err_msg': err_msg
})
@app.route('/api/pay', methods=['post'])
@login_required
def pay():
try:
utils.add_receipt(session.get('cart'))
del session['cart']
except:
return jsonify({'code': 404})
return jsonify({'code': 200})
if __name__ == '__main__':
app.run(debug=True) | duonghuuthanh/K19SaleApp | mysaleappv3/saleapp/index.py | index.py | py | 5,712 | python | en | code | 0 | github-code | 36 |
27228057071 | # Major League Baseball Statistical Analysis for Batters
import csv
def main():
batters = []
try:
with open("/Users/jasonbarba/Projects/MLB_Statistics_Analysis/WhiteSox_Batting_2022.csv", 'r') as file:
csvreader = csv.reader(file)
for batter in csvreader:
batters.append(batter)
except:
print("Could not open file")
return None
# print(contents)
print(batters[1])
if __name__ == "__main__":
main() | jasonbarba19/MLB_Statistics_Analysis | batter_analyze.py | batter_analyze.py | py | 422 | python | en | code | 0 | github-code | 36 |
1479958272 | from datetime import date
import numpy as np
import pandas as pd
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar
from app.features_extractors.numerical import make_harmonic_features
def number_of_days_until_true(boolean_values: pd.Series, today: date) -> pd.Series:
return (boolean_values[today:].idxmax() - today).days if not boolean_values.empty else np.NaN
def number_of_days_after_true(boolean_values: pd.Series, today: date):
return (today - boolean_values[:today].iloc[::-1].idxmax()).days if not boolean_values.empty else np.NaN
def extract_calendar_features(data: pd.DataFrame) -> pd.DataFrame:
df = pd.DataFrame(index=data.created_at.dt.normalize().unique())
df['day_cos'], df['day_sin'] = make_harmonic_features(df.index.day, df.index.days_in_month)
df['month_cos'], df['month_sin'] = make_harmonic_features(df.index.month, 12)
df['quarter_cos'], df['quarter_sin'] = make_harmonic_features(df.index.quarter, 4)
seasons = (df.index.month % 12 + 3) // 3 - 1
df['season_cos'], df['season_sin'] = make_harmonic_features(seasons, 4)
df['year'] = df.index.year
df['dayofyear_cos'], df['dayofyear_sin'] = make_harmonic_features(df.index.year, 365)
df['dayofweek_cos'], df['dayofweek_sin'] = make_harmonic_features(df.index.dayofweek, 7)
df['is_weekend'] = df.index.dayofweek >= 5
dates_with_margin = pd.date_range(
pd.to_datetime(df.index.min()) - pd.DateOffset(months=4),
pd.to_datetime(df.index.max()) + pd.DateOffset(months=4))
holidays = calendar().holidays(
start=dates_with_margin.min(),
end=dates_with_margin.max())
is_holiday = pd.Series(pd.Series(dates_with_margin).isin(holidays).values, index=dates_with_margin)
df['days_until_holidays'] = pd.Series(df.index)\
.apply(lambda today: number_of_days_until_true(is_holiday, today)).values
df['days_after_holidays'] = pd.Series(df.index)\
.apply(lambda today: number_of_days_after_true(is_holiday, today)).values
df['is_holiday'] = df.index.isin(holidays)
return pd.DataFrame({'normalized_date': data.created_at.dt.normalize()}, index=data.index)\
.merge(df.fillna(0), left_on='normalized_date', right_index=True)\
.drop(columns=['normalized_date'])
| ahmediqtakehomes/TakeHomes | reformated_takehomes_old/doordash_1/example_submission/app/features_extractors/calendar.py | calendar.py | py | 2,283 | python | en | code | 1 | github-code | 36 |
27601063298 | import threading
# def do_this(what):
# whoami(what)
# def whoami(what):
# print("Thread %s says: %s" % (threading.current_thread(), what))
# if __name__ == '__main__':
# whoami("I'm the main program")
# for n in range(4):
# p = threading.Thread(target=do_this, args=("I'm function %s" % n,))
# p.start()
class Worker(threading.Thread):
def run(self):
for _ in range(1000):
print('hello from', threading.current_thread().name)
# создаем 10 потоков
for _ in range(10):
thread = Worker()
# запускаем созданный поток
thread.start()
| AlexNavidu/BillLob | test_treading/2threading.py | 2threading.py | py | 637 | python | en | code | 0 | github-code | 36 |
19255593783 | class GameStats():
"""Seguir las estadisticas de Alien Invasion"""
def __init__(self, ai_settings):
"""Inicializar las estadisticas"""
self.ai_settings = ai_settings
self.reset_stats()
#Empezar Alien Invasion en un estado inactivo
self.game_active = False
#Puntuacion mas alta
self.high_score = 0
self.level = 1
def reset_stats(self):
"""Inicializar las estadisticas que puden cambiar durante el juego"""
self.ships_left = self.ai_settings.ship_limit
self.score = 0
| jGarciaGz/PythonCrashCourse_Proyecto1 | game_stats.py | game_stats.py | py | 568 | python | es | code | 0 | github-code | 36 |
16586177664 | # -*- coding: UTF-8 -*-
from flask import render_template, flash, redirect
from sqlalchemy.orm import *
from sqlalchemy import *
from flask.ext.sqlalchemy import SQLAlchemy
from flask import Flask
from flask import *
from forms import lyb
#from flask.ext.bootstrap import Bootstrap
app = Flask(__name__)
app.config.from_object('config')
app.config['SQLALCHEMY_DATABASE_URI']='mysql://dj:123456@localhost/wjh'
#bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
titel = db.Column(db.String(300), unique=True)
text = db.Column(db.Text)
def __init__(self, titel,text):
self.text = text
self.titel=titel
db.create_all()
@app.route('/add', methods=['GET', 'POST'])
def register():
form = lyb(request.form)
if request.method == 'POST' and form.validate():
user=User(form.title.data,form.text.data)
db.session.add(user)
db.session.commit()
flash('Thanks for registering')
return redirect(url_for('index'))
return render_template('text.html', form=form)
@app.route('/index')
def index():
p = User.query.order_by(User.id.desc()).all()
return render_template('index.html',p=p)
@app.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
p =User.query.get_or_404(id)
return render_template('index.html_1.html', p=p)
@app.route('/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
post = User.query.get_or_404(id)
form = lyb(request.form)
if request.method == 'POST' and form.validate():
post.titel=form.title.data
post.text=form.text.data
db.session.add(post)
db.session.commit()
return redirect(url_for('index'))
form.title.data=post.titel
form.text.data=post.text
return render_template('text_1.html', form=form,post=post)
@app.route('/delete/<int:id>', methods=['GET', 'POST'])
def delete(id):
post = User.query.get_or_404(id)
db.session.delete(post)
db.session.commit()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run('0.0.0.0',debug=True)
if __name__ == '__main__':
app.run('0.0.0.0',debug=True)
| wjh1234/python-scripts | flasker/app/views.py | views.py | py | 2,095 | python | en | code | 0 | github-code | 36 |
73605994665 | class Solution(object):
def flipAndInvertImage(self, image):
"""
:type image: List[List[int]]
:rtype: List[List[int]]
"""
result = []
for row in image:
new_row = []
for grid in row[::-1]:
new_row.append(1 - grid)
result.append(new_row)
return result
| yichenfromhyrule/LeetCode | #832_FlippingAnImage.py | #832_FlippingAnImage.py | py | 362 | python | en | code | 0 | github-code | 36 |
31479728563 | import pygame
from constants import WHITE, SIZE_WALL, YELLOW, MARGIN
class Food:
def __init__(self, row, col, width, height, color):
self.image = pygame.Surface([width, height])
self.image.fill(WHITE)
self.image.set_colorkey(WHITE)
pygame.draw.ellipse(self.image, color, [0, 0, width, height])
self.row = row
self.col = col
self.rect = self.image.get_rect()
self.rect.top = row * SIZE_WALL + MARGIN["TOP"]
self.rect.left = col * SIZE_WALL + MARGIN["LEFT"]
if color == YELLOW:
self.rect.top += SIZE_WALL // 2 - height // 2
self.rect.left += SIZE_WALL // 2 - width // 2
def draw(self, screen):
screen.blit(self.image, (self.rect.left, self.rect.top))
def getRC(self):
return [self.row, self.col]
| nxhawk/PacMan_AI | Source/Object/Food.py | Food.py | py | 860 | python | en | code | 0 | github-code | 36 |
30490661295 | #!/usr/bin/env python3
import os
import sys
import logging
import json
import requests
import datetime
import pyteamcity
import http.server
import validators
from urllib.parse import urlparse
config = {}
tc = None
logger = None
def initializeLogger():
logger = logging.getLogger('teamcity_connector')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
class TCWebHookHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
contentLen = int(self.headers['Content-Length'])
postBody=self.rfile.read(contentLen)
postBody = postBody.decode("utf-8")
postBody=json.loads(postBody)
buildId=postBody['build']['buildId']
result=processBuild(buildId)
self.send_response(result['status_code'])
self.end_headers()
self.wfile.write(result['text'].encode("utf-8"))
return
def getTeamcityConnection(user,password,url):
url_parsed = urlparse(url)
tc = pyteamcity.TeamCity(user,password,url_parsed.hostname,url_parsed.port)
try:
tc.get_server_info()
except Exception as e:
logger.error("can not connect to TeamCity: %s" % e)
result = False
else:
result = tc
return result
def dateTimeToTimestamp(s):
s=datetime.datetime.strptime(s, "%Y%m%dT%H%M%S%z").timestamp()*1000
s="%.0f" % s
return s
def processBuild(buildId):
try:
build = tc.get_build_by_build_id(buildId)
logger.debug("buildId: %s" % buildId)
logger.debug("build: %s" % build)
except Exception as e:
logger.error("can not get build: %s" % e)
try:
buildStatistic = tc.get_build_statistics_by_build_id(buildId)
except Exception as e:
logger.error("can not get build statistic: %s" % e)
try:
changes = tc.get_changes_by_build_id(buildId)
except Exception:
logger.info("changes are empty for build id: %s" % buildId)
changesEmpty = True
else:
changesEmpty = False
data={}
data['buildStatus'] = build['status']
data['buildUrl'] = build['webUrl']
buildStatisticProperties = buildStatistic['property']
for buildStatisticProperty in buildStatisticProperties:
if 'BuildDurationNetTime' == buildStatisticProperty['name']:
data['duration'] = int(buildStatisticProperty['value'])
data['startTime'] = dateTimeToTimestamp(build['startDate'])
if 'finishDate' in build:
data['endTime'] = dateTimeToTimestamp(build['finishDate'])
# FIXME: what is instanceUrl ? set to N/A
data['instanceUrl'] = "N/A"
try:
data['jobName'] = build['buildType']['projectName']
except Exception as e:
logger.warn("can not get project name from build type, set to N/A")
data['jobName'] = "N/A"
# FIXME: what is jobURL? set to webUrl
data['jobUrl'] = build['webUrl']
try:
data['log'] = changes['comment']
except Exception as e:
data['log'] = ""
data['niceName'] = build['buildType']['id']
data['number'] = build['id']
if build['triggered']['type'] == "user":
data['startedBy'] = build['triggered']['user']['username']
elif build['triggered']['type'] == "vcs":
data['startedBy'] = "started by VCS trigger"
data['sourceChangeSet'] = []
sourceChangeSet = {}
if changesEmpty == False:
for changeIterator in build['lastChanges']['change']:
try:
change=tc.get_change_by_change_id(changeIterator['id'])
except Exception as e:
logger.error("can not get change with id %s" % changeIterator['id'])
else:
sourceChangeSet['scmRevisionNumber'] = change['version']
sourceChangeSet['scmCommitLog'] = change['comment']
try:
sourceChangeSet['scmAuthor'] = change['user']['name']
except Exception as e:
sourceChangeSet['scmAuthor'] = ''
logger.info("user.name is not found for change %s, set to username" % changeIterator['id'])
else:
sourceChangeSet['scmAuthor'] = change['username']
if sourceChangeSet['scmAuthor'] == '' and build['triggered']['type'] == "vcs":
sourceChangeSet['scmAuthor'] = "started by VCS trigger"
elif sourceChangeSet['scmAuthor'] == '' and build['triggered']['type'] == "user":
sourceChangeSet['scmAuthor'] = build['triggered']['user']['username']
else:
logger.error("can not get \"triggered by\" value for buildId %s" % buildId)
sourceChangeSet['scmCommitTimestamp'] = dateTimeToTimestamp(change['date'])
sourceChangeSet['numberOfChanges'] = 1
data['sourceChangeSet'].append(sourceChangeSet)
dataJson=json.dumps(data)
logger.debug("dataJson: %s" % dataJson)
headers = {'Accept': 'application/json','Content-type':'application/json'}
url=config['HYGIEIA_API_URL'] + "/build"
request=requests.post(url, data = dataJson, headers = headers)
logger.debug("request: %s" % request)
logger.debug("build ID: %s" % build['id'])
result={}
result['status_code']=request.status_code
result['text']=request.text
logger.debug("result: %s" % result)
return result
def checkEnvironmentVariables(config):
result = True
config["HOST"] = "0.0.0.0"
config['PORT'] = 80
if "HYGIEIA_API_URL" in os.environ and validators.url(os.getenv("HYGIEIA_API_URL")):
config['HYGIEIA_API_URL'] = os.getenv("HYGIEIA_API_URL")
else:
logger.error("HYGIEIA_API_URL environmanet variable is not set")
result = False
if "TEAMCITY_URL" in os.environ and validators.url(os.getenv("TEAMCITY_URL")):
config['TEAMCITY_URL'] = os.getenv("TEAMCITY_URL")
else:
logger.error("TEAMCITY_URL environmanet variable is not set")
result=False
if "TEAMCITY_USER" in os.environ:
config['TEAMCITY_USER'] = os.getenv("TEAMCITY_USER")
else:
logger.info("TEAMCITY_USER environment variable is not set, trying with empty")
config['TEAMCITY_USER'] = ""
if "TEAMCITY_PASSWORD" in os.environ:
config['TEAMCITY_PASSWORD'] = os.getenv("TEAMCITY_PASSWORD")
else:
logger.info("TEAMCITY_PASSWORD environment variable is not set, trying with empty")
config['TEAMCITY_PASSWORD'] = ""
return result
if __name__ == '__main__':
logger = initializeLogger()
if checkEnvironmentVariables(config) == True:
tc = getTeamcityConnection(config['TEAMCITY_USER'], config['TEAMCITY_PASSWORD'], config['TEAMCITY_URL'])
if tc != False:
httpd = http.server.HTTPServer((config['HOST'], config['PORT']), TCWebHookHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
| mrlov/hygieia_teamcity_collector | main.py | main.py | py | 6,631 | python | en | code | 1 | github-code | 36 |
34058973132 | from sklearn.preprocessing import Normalizer
import numpy as np
from utils import extract_face_roi_single
from Database import addNewStudent
import pickle
import os
from bson.binary import Binary
import tensorflow as tf
os.environ['CUDA_VISIBLE_DEVICES']='-1'
Normaliser = Normalizer(norm='l2')
global graph
frozen_graph="facenet_optimized.pb"
with tf.gfile.GFile(frozen_graph, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def,
input_map=None,
return_elements=None,
name="")
sess= tf.Session(graph=graph)
def get_embedding(face):
with graph.as_default():
face=face.astype('float32')
mean,std=face.mean(),face.std()
face=(face-mean)/std
face=np.expand_dims(face,axis=0)
#embedding=model.predict(face)
y_pred = graph.get_tensor_by_name("Bottleneck_BatchNorm/cond/Merge:0")
x= graph.get_tensor_by_name("input_1:0")
feed_dict = {x: face}
embedding=sess.run(y_pred,feed_dict)
return embedding[0]
def get_block_embeddings(path):
embeddingsArr=[]
for filename in os.listdir(path):
img=extract_face_roi_single(os.path.join(path,filename))
img=get_embedding(img)
img=np.reshape(img,(-1,2))
img=Normaliser.transform(img)
img=np.reshape(img,(128,))
embeddingsArr.append(img)
return embeddingsArr
def get_single_embedding(rollno,img,filename):
img=extract_face_roi_single(img)
img=get_embedding(img)
img=np.reshape(img,(-1,2))
img=Normaliser.transform(img)
img=np.reshape(img,(128,))
img=list(img)
img= Binary(pickle.dumps(img, protocol=2), subtype=128 )
addNewStudent(rollno,filename,img)
image_path='StudentImages/'
save_path='Embeddings/'
def prepare_data():
embeddingArr=get_block_embeddings(image_path)
with open(os.path.join(save_path,'Embeddings.pickle'),'wb') as f:
pickle.dump((embeddingArr),f)
| VikasOjha666/Attendance_API_optimized | prepare_embeddings.py | prepare_embeddings.py | py | 2,065 | python | en | code | 0 | github-code | 36 |
12482854412 | #!/usr/bin/env python
# coding: utf-8
# !rm -r inference
# !pip install -r requirements.txt
import json
import sys, os
import requests
import datetime
import numpy as np
import pickle
import time
import random
import zstandard as zstd
import tarfile
import pandas as pd
import boto3
import botocore
from botocore.client import Config
import pygrib
import pydap
import xarray as xr
import h5py
from collections import defaultdict
from joblib import Parallel, delayed, parallel_backend
import subprocess
import cv2
secure = dict([e.split('=') for e in open('secure.txt', 'r').read().split('\n')])
infer = dict([e.split('=') for e in open('infer.txt', 'r').read().split('\n')])
# infer = {k: v.split(',') for k, v in infer.items()}
dataset = 'tg'
labels = pd.read_csv('data_{}/train_labels.csv'.format(dataset))
grid = pd.concat( (
pd.read_csv('data_tg/grid_metadata.csv'),
) ).drop_duplicates().reset_index(drop = True)
submission = pd.read_csv('data_{}/submission_format.csv'.format(dataset))
files = pd.read_csv('data_{}/{}_satellite_metadata{}.csv'.format(
dataset, *(('pm25', '') if dataset == 'pm'
else ('no2', '_0AF3h09'))))
labels['location'] = grid.set_index('grid_id')['location'].reindex(labels.grid_id).values
labels['datetime'] = pd.to_datetime(labels.datetime)
submission['location'] = grid.set_index('grid_id').location.reindex(submission.grid_id).values
files.time_end = pd.to_datetime(files.time_end)
cities = {
'Taipei': ( (121.5, 121.5), (25.0, 25) ),
'Delhi': ( (77.0, 77.25), (28.75, 28.5) ),
'LA': ((360-118.25, 360-117.75), (34.0, 34.0) )
}
feats = [
# (6, 'Maximum/Composite radar reflectivity:dB (instant):regular_ll:atmosphere:level 0', ),
# (7, 'Visibility:m (instant):regular_ll:surface:level 0', ),
(11, 'Wind speed (gust):m s**-1 (instant):regular_ll:surface:level 0', ),
(402, 'Surface pressure:Pa (instant):regular_ll:surface:level 0'),
# (404, 'Temperature:K (instant):regular_ll:surface:level 0'),
# (405, 'Soil Temperature:K (instant):regular_ll:depthBelowLandLayer:levels 0.0-0.1 m'),
(406, 'Volumetric soil moisture content:Proportion (instant):regular_ll:depthBelowLandLayer:levels 0.0-0.1 m'),
(415, '2 metre temperature:K (instant):regular_ll:heightAboveGround:level 2 m'),
(416, '2 metre specific humidity:kg kg**-1 (instant):regular_ll:heightAboveGround:level 2 m'),
# (417, '2 metre dewpoint temperature:K (instant):regular_ll:heightAboveGround:level 2 m:'),#fcst time 0 hrs:from 202001010000
(418, '2 metre relative humidity:% (instant):regular_ll:heightAboveGround:level 2 m:'), #fcst time 0 hrs:from 202001010000
(419, 'Apparent temperature:K (instant):regular_ll:heightAboveGround:level 2 m:'),#fcst time 0 hrs:from 202001010000
(420, '10 metre U wind component:m s**-1 (instant):regular_ll:heightAboveGround:level 10 m:'),#fcst time 0 hrs:from 202001010000
(421, '10 metre V wind component:m s**-1 (instant):regular_ll:heightAboveGround:level 10 m:'),#fcst time 0 hrs:from 202001010000
# (435, 'Precipitable water:kg m**-2 (instant):regular_ll:atmosphereSingleLayer:level 0 considered as a single layer'),#:fcst time 0 hrs:from 202001010000
(436, 'Cloud water:kg m**-2 (instant):regular_ll:atmosphereSingleLayer:level 0 considered as a single layer:'),#fcst time 0 hrs:from 202001010000
(437, 'Relative humidity:% (instant):regular_ll:atmosphereSingleLayer:level 0 considered as a single layer:'),#fcst time 0 hrs:from 202001010000
(438, 'Total ozone:DU (instant):regular_ll:atmosphereSingleLayer:level 0 considered as a single layer:'),#fcst time 0 hrs:from 202001010000
# (424, 'Precipitation rate:kg m**-2 s**-1 (instant):regular_ll:surface:level 0'),
# (484, 'Temperature:K (instant):regular_ll:pressureFromGroundLayer', ),
# (485, 'Relative humidity:% (instant):regular_ll:pressureFromGroundLayer:levels 3000-0 Pa', ),
# (486, 'Specific humidity:kg kg**-1 (instant):regular_ll:pressureFromGroundLayer:levels 3000-0 Pa', ),
# (487, 'U component of wind:m s**-1 (instant):regular_ll:pressureFromGroundLayer:levels 3000-0 Pa', ),
# (488, 'V component of wind:m s**-1 (instant):regular_ll:pressureFromGroundLayer:levels 3000-0 Pa', ),
# (520, 'Pressure reduced to MSL:Pa (instant):regular_ll:meanSea:level 0:', ),
]
cities2 = {
'tpe': ( 121.5, 25 ),
'dl': ( 77.0, 28.5 ),
'la': (-118.25, 34.0 )
}
coords = {'la': [('3A3IE', -117.9114, 34.1494),
('3S31A', -117.9563, 33.8142),
('7II4T', -118.0461, 34.0006),
('8BOQH', -118.4504, 34.0379),
('A2FBI', -117.4173, 34.0006),
('A5WJI', -117.9563, 33.9261),
('B5FKJ', -117.5071, 34.1123),
('C8HH7', -116.519, 33.8516),
('DHO4M', -118.3605, 34.1866),
('DJN0F', -117.6419, 34.1123),
('E5P9N', -117.5071, 34.0006),
('FRITQ', -118.1809, 33.8516),
('H96P6', -118.5402, 34.1866),
('HUZ29', -117.2825, 34.1123),
('I677K', -117.5071, 34.0751),
('IUON3', -117.7317, 34.0751),
('JNUQF', -118.2258, 33.8142),
('PG3MI', -118.2258, 34.0751),
('QH45V', -118.4504, 33.9634),
('QJHW4', -118.5402, 34.3722),
('QWDU8', -118.1359, 34.1494),
('VBLD0', -118.2258, 33.8888),
('VDUTN', -117.9114, 33.8142),
('WT52R', -116.8783, 33.9261),
('X5DKW', -117.597, 34.0379),
('Z0VWC', -118.1809, 33.7769),
('ZP1FZ', -117.8665, 34.1494),
('ZZ8JF', -117.3275, 33.6648)],
'tpe': [('1X116', 121.5033, 24.998),
('90BZ1', 121.5482, 25.0387),
('9Q6TA', 121.5482, 25.0794),
('KW43U', 121.5931, 25.0387),
('VR4WG', 121.5033, 25.0794),
('XJF9O', 121.5033, 25.0387),
('XNLVD', 121.5033, 25.1201)],
'dl': [('1Z2W7', 77.2821, 28.5664),
('6EIL6', 77.0575, 28.5664),
('7334C', 77.1024, 28.5664),
('78V83', 76.9227, 28.5664),
('7F1D1', 77.1024, 28.6058),
('8KNI6', 77.2821, 28.4874),
('90S79', 77.1922, 28.6452),
('A7UCQ', 77.2372, 28.6058),
('AZJ0Z', 77.2372, 28.724),
('C7PGV', 77.1922, 28.5269),
('CPR0W', 77.2821, 28.6846),
('D72OT', 77.1473, 28.724),
('D7S1G', 77.327, 28.6846),
('E2AUK', 77.0126, 28.6058),
('GAC6R', 77.1024, 28.7634),
('GJLB2', 77.1024, 28.4874),
('GVQXS', 77.1922, 28.6846),
('HANW9', 77.1922, 28.5664),
('HM74A', 77.1024, 28.6846),
('IUMEZ', 77.2372, 28.6452),
('KZ9W9', 77.1473, 28.6452),
('NE7BV', 77.1024, 28.8421),
('P8JA5', 77.2372, 28.5664),
('PJNW1', 77.1922, 28.724),
('PW0JT', 76.9227, 28.6846),
('S77YN', 77.0575, 28.724),
('SZLMT', 77.1473, 28.6846),
('UC74Z', 77.2821, 28.5269),
('VXNN3', 77.1473, 28.8028),
('VYH7U', 77.0575, 28.7634),
('WZNCR', 77.1473, 28.5664),
('YHOPV', 77.2821, 28.6452),
('ZF3ZW', 77.0575, 28.6846)]}
def cleanDict(d):
return {k: cleanDict(v) for k, v in d.items() } if isinstance(d, defaultdict) else d
def processGFS(file, d):
p = pygrib.open(file)
lat, lon = p[1].latlons()
spots = {}
for city, ( (lonmin, lonmax) , (latmin, latmax) ) in cities.items():
xmin = np.argmax( (lat == latmin).sum(axis = 1) )#[0]
xmax = np.argmax( (lat == latmax).sum(axis = 1) )#[0]
ymin = np.argmax( (lon == lonmin).sum(axis = 0) )#[0]
ymax = np.argmax( (lon == lonmax).sum(axis = 0) )#[0]
spots[city] = ((xmin, xmax), (ymin, ymax))
data = []
for e in p:
if any(z in str(e) for i, z in feats):
arr = e.values
assert arr.shape == lat.shape
for spot, ((xmin, xmax), (ymin, ymax)) in spots.items():
data.append( (str(e),
spot,
((lat[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].min(),
lat[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].max()),
(lon[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].min(),
lon[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].max())),
arr[xmin - d :xmax + 1 + d, ymin - d :ymax + 1 + d].astype(np.float32),
arr[xmin:xmax + 1, ymin:ymax + 1].mean() ) );
# if len(data) == 1: print(data)
# print(data); return data
return data
# break;
def pullGFS(files):
results = []
for i in range(10):
try:
pswd = secure['password']
values = {'email' : secure['username'], 'passwd' : pswd, 'action' : 'login'}
login_url = 'https://rda.ucar.edu/cgi-bin/login'
ret = requests.post(login_url, data=values)
if ret.status_code != 200:
print('Bad Authentication'); time.sleep(i); continue;
except Exception as e:
print(e)
time.sleep(i)
print('Downloading {} gfs files'.format(len(files)))
# print(filelist); return;
dspath = 'https://rda.ucar.edu/data/ds084.1/'
save_dir = '/tmp/'
zc = zstd.ZstdCompressor(level = 9)
for file in files:
start = time.time()
for i in range(10):
try:
filename = dspath + file
outfile = save_dir + os.path.basename(filename)
print('Downloading', file)
with requests.get(filename, cookies = ret.cookies,
allow_redirects = True, stream=True) as r:
r.raise_for_status()
with open(outfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024*1024):
f.write(chunk)
s = os.path.getsize(outfile);
data = processGFS(outfile, 5)
os.remove(outfile)
pkl = pickle.dumps(data)
compr = zc.compress(pkl)
os.makedirs('inference/gfs-5/', exist_ok = True)
with open('inference/gfs-5/{}'.format(os.path.basename(filename)), 'wb') as f:
f.write(compr)
results.append({
# 'statusCode': 200,
'file': os.path.basename(filename),
'body': s/1e6, #os.path.getsize(outfile), #json.dumps('Hello from Lambda!'),
'outlen': len(pkl),#len(pickle.dumps(data)),
'outlen-compr': len(compr),#zc.compress(pickle.dumps(data))),
'elaspsed_time': round(time.time() - start, 1)
# 'data': json.dumps(data),
}); break;
except Exception as e:
print(e)
time.sleep(i)
try: os.remove(outfile)
except: pass;
return results
ifs_tags = ['128_057_uvb',
'128_134_sp',
'128_136_tcw',
'128_137_tcwv',
'128_146_sshf',
'128_147_slhf',
'128_164_tcc',
'128_165_10u',
'128_166_10v',
'128_167_2t',
'128_168_2d',
'128_169_ssrd',
'128_175_strd',
'128_176_ssr',
'128_177_str',
'128_189_sund',
'128_206_tco3',
'128_228_tp',
'128_243_fal',
'128_244_fsr',
'128_245_flsr',
'228_246_100u',
'228_247_100v']
def processIFS(file):
dataset = xr.open_dataset(file)
vars = list(dataset.variables)
assert len(vars) == 5 if 'oper.an' in file else 6 if 'oper.fc' in file else -1;
# assert vars[-4:] == ['latitude', 'longitude', 'time', 'utc_date']
field = vars[0]
name = dataset.variables[field].attrs['long_name']
# print(name)
clean_name = name.lower().replace(' ', '_').replace('-', '_')
# print(clean_name)
sat_data = defaultdict(lambda: defaultdict(dict))
for location, (clon, clat) in cities2.items():
minimum_latitude = clat + 8
minimum_longitude = (clon - 10 ) % 360
maximum_latitude = clat - 8
maximum_longitude = (clon + 10) % 360
data = dataset[field].loc[{
'latitude':slice(minimum_latitude,maximum_latitude),
'longitude':slice(minimum_longitude,maximum_longitude)}]
# print(data.shape)
a = data
v = a.values
lat = np.tile( np.stack([a['latitude']], axis = 1), ( 1, v.shape[-1]))
lon = np.tile( np.stack([a['longitude']], axis = 0), ( v.shape[-2], 1))
assert v.shape == (4, 227, 285) if 'oper.an' in file else (2, 2, 227, 285) if 'oper.fc' in file else None
if 'oper.an' in file:
times = a.time.values.astype('datetime64[s]')
assert len(times) == 4
assert v.shape[0] == len(times)
elif 'oper.fc' in file:
start_times = np.repeat(a.forecast_initial_time.values.astype('datetime64[s]'), 2)
deltas = np.tile([np.timedelta64(int(h), 'h') for h in a.forecast_hour.values], 2)
times = list(zip(start_times, deltas))
# print(times)
v = v.reshape(4, v.shape[-2], v.shape[-1])
# print(times); print(deltas)
assert v.shape[1:] == lat.shape
assert v.shape[1:] == lon.shape
zones = {}# defaultdict(dict)
for tidx, t in enumerate(times):
for grid_id, plon, plat in coords[location]:
for r in [ 0.05, 0.1, 0.2, 0.5, 1, 2, 5]:
if (grid_id, r) not in zones:
zones[(grid_id, r)] = (lat - plat) ** 2 + (lon - plon%360) ** 2 < r ** 2
zone = zones[(grid_id, r)]
# ct = len(v[tidx][zone])#.count()
sat_data[t][grid_id][clean_name + '_mean{}'.format(r)] = v[tidx][zone].mean() #if ct > 3 else np.nan
# for k, v in sat_data.items():
# print(k, len(v))
# print(v['1X116'])
def clean(d):
if isinstance(d, defaultdict):
d = {k: clean(v) for k, v in d.items()}
return d
return clean(sat_data)
def pullIFS(files):
results = []
for i in range(10):
try:
pswd = secure['password']
values = {'email' : secure['username'], 'passwd' : pswd, 'action' : 'login'}
login_url = 'https://rda.ucar.edu/cgi-bin/login'
ret = requests.post(login_url, data=values)
if ret.status_code != 200:
print('Bad Authentication'); time.sleep(i); continue;
except Exception as e:
print(e)
time.sleep(i)
save_dir = '/tmp/'
dspath = 'https://rda.ucar.edu/data/ds113.1/'
print('Downloading {} ifs files'.format(len(files)))
zc = zstd.ZstdCompressor(level = 9)
for file in files:
start = time.time()
for i in range(10):
try:
filename = dspath + file
outfile = save_dir + os.path.basename(filename)
print('Downloading', file)
with requests.get(filename, cookies = ret.cookies,
allow_redirects = True, stream=True) as r:
r.raise_for_status()
with open(outfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024*1024):
f.write(chunk)
s = os.path.getsize(outfile);
data = processIFS(outfile)
os.remove(outfile)
pkl = pickle.dumps(data)
compr = zc.compress(pkl)
os.makedirs('inference/ifs/', exist_ok = True)
with open('inference/ifs/{}'.format(os.path.basename(filename)), 'wb') as f:
f.write(compr)
results.append({
# 'statusCode': 200,
'file': os.path.basename(filename),
'body': s/1e6, #os.path.getsize(outfile), #json.dumps('Hello from Lambda!'),
'outlen': len(pkl),#len(pickle.dumps(data)),
'outlen-compr': len(compr),#zc.compress(pickle.dumps(data))),
'elaspsed_time': round(time.time() - start, 1)
}); break;
except Exception as e:
print(e)
time.sleep(i)
try: os.remove(outfile)
except: pass
return results
tropomi_fields = ['nitrogendioxide_tropospheric_column',
'nitrogendioxide_tropospheric_column_precision',
'air_mass_factor_troposphere',
'air_mass_factor_total']
def loadFileS3(row):
my_config = Config(signature_version = botocore.UNSIGNED)
s3a = boto3.client('s3', config = my_config)
filename, url, cksum, sz = [row[k] for k in ['granule_id', 'us_url', 'cksum', 'granule_size']]
print(filename, url, cksum, sz)
file = '/tmp/' + filename
bucket = url.split('//')[-1].split('/')[0]
key = '/'.join(url.split('//')[-1].split('/')[1:])
s = s3a.download_file(bucket, key, file)
assert ( subprocess.check_output(['cksum',file])
.decode('utf-8').split(' ')[:2] == [str(cksum), str(sz)])
return file
def processTropomi(hdf, location, fine = True):
zones = {}; # defaultdict(dict)
sat_data = defaultdict(lambda: defaultdict(dict))
hp = hdf['PRODUCT']
lat = hp['latitude'][:][0]#.values
lon = hp['longitude'][:][0]#.values
for field in tropomi_fields:
v = hp[field][:][0]
data = np.ma.masked_array(v, (v == v.max() ) | (v == v.min())).clip(0, None)
assert data.shape == lat.shape
for grid_id, plon, plat in coords[location]:
for r in ([ 0.07, 0.1, 0.14, 0.2, 0.3, 0.5, 1, 2] if fine else [ 0.1, 0.25, 0.5, 1, 2, ]):
if (grid_id, r) not in zones:
zones[(grid_id, r)] = (lat - plat) ** 2 + (lon - plon) ** 2 < r ** 2
zone = zones[(grid_id, r)]
ct = data[zone].count()
m = data[zone].mean() if ct > (0 if 'fine' else 3) else np.nan
s = data[zone].std() if ct >= 3 else np.nan
sat_data[grid_id][field + '_mean{}'.format(r)] = m
sat_data[grid_id][field + '_stdev{}'.format(r)] = s
sat_data[grid_id][field + '_count{}'.format(r)] = ct
# if '2' in grid_id:#.startswith('9'):
# print(field, '_count{}'.format(r), ct, m ,s )
return sat_data
def pullTropomi(row, fine = True):
results = []
start = time.time()
assert row['product'].startswith('tropomi')
file = loadFileS3(row)
hdf = h5py.File(file, 'r')
s = os.path.getsize(file);
sat_data = processTropomi(hdf, row['location'], fine)
output = row.copy()
output['d1'] = cleanDict(sat_data)
s3 = boto3.client('s3')
zc = zstd.ZstdCompressor(level = 15)
pkl = pickle.dumps(output)
compr = zc.compress(pkl)
filename = file.split('/')[-1]
os.makedirs('inference/tropomi-fine/', exist_ok = True)
with open('inference/tropomi-fine/{}'.format(filename), 'wb') as f:
f.write(compr)
try: os.remove(file)
except Exception as e: print(e); pass
return {
# 'statusCode': 200,
'file': os.path.basename(filename),
'body': s/1e6, #os.path.getsize(outfile), #json.dumps('Hello from Lambda!'),
'outlen': len(pkl),#len(pickle.dumps(data)),
'outlen-compr': len(compr),#zc.compress(pickle.dumps(data))),
'elaspsed_time': round(time.time() - start, 1)
};
def loadAssim(field, location, year, month, min_day, max_day):
url = 'https://opendap.nccs.nasa.gov/dods/gmao/geos-cf/assim/aqc_tavg_1hr_g1440x721_v1'
DATASET = xr.open_dataset(url)
start_time = np.datetime64('{}-{:02d}-{:02d} 00:00:00'.format(year, month, min_day))
end_time = np.datetime64('{}-{:02d}-{:02d} 23:59:00'.format(year, month, max_day))
# end_time = np.datetime64('{}-01-01 00:00:00'.format(year + 1))
minimum_latitude = min([e[-1] for e in coords[location]]) - 3
minumum_longitude = min([e[-2] for e in coords[location]]) - 3
maximum_latitude = max([e[-1] for e in coords[location]]) + 3
maximum_longitude = max([e[-2] for e in coords[location]]) + 3
data = DATASET[field].loc[{'time':slice(start_time,end_time),
'lat':slice(minimum_latitude,maximum_latitude),
'lon':slice(minumum_longitude,maximum_longitude)}]
return data
def processAssim(a, location, field):
t = a.time.values.astype('datetime64[s]')
sat_data = defaultdict(dict)
v = a.values[0]
if (v == 1.0e15).sum() > 0:
return {'location': location, 'time_end': t, 'd1': cleanDict(sat_data)}
lat = np.tile( np.stack([a['lat']], axis = 1), ( 1, v.shape[1]))
lon = np.tile( np.stack([a['lon']], axis = 0), ( v.shape[0], 1))
lat = cv2.resize(lat, None, fx = 5, fy = 5)
lon = cv2.resize(lon, None, fx = 5, fy = 5)
v2 = cv2.resize(v, None, fx = 5, fy = 5)
zones = {}
for grid_id, plon, plat in coords[location]:
for r in [ 0.1, 0.25, 0.5, 1, 2, ]:
if (grid_id, r) not in zones:
z = (lat - plat) ** 2 + (lon - plon) ** 2 < r ** 2
zones[(grid_id, r)] = z#, z.sum())
zone = zones[(grid_id, r)]
m = v2[zone].mean()#, 1#zone.sum()
sat_data[grid_id][field + '_mean{}'.format(r)] = m #data[zone].mean()# if ct > 3 else np.nan
return {'location': location, 'time_end': t, 'd1': cleanDict(sat_data)}
def pullAssim(year, month, min_day, max_day):
for field in ['no2', 'so2', 'co', 'o3', 'pm25_rh35_gcc']:
for location in coords.keys():
start = time.time()
for i in range(10):
try:
data = loadAssim(field, location, year, month, min_day, max_day)
print('{}-{:02d} {} {} {}'.format(
year, month, field, location, len(data)))
# assert len(data) == 24
with parallel_backend('threading'):
r = Parallel(os.cpu_count())(
delayed(processAssim)(a, location, field) for a in data)
zc = zstd.ZstdCompressor(level = 9)
out = pickle.dumps(r)
compr = zc.compress(out)
filename = '{}_{}_{}_{:02}.pkl'.format(
field, location, year, month, )
os.makedirs('inference/assim/', exist_ok = True)
with open('inference/assim/{}'.format(filename), 'wb') as f:
f.write(compr)
print({
# 'statusCode': 200,
'file': filename.split('.')[0],
# 'body': s/1e6, #os.path.getsize(outfile), #json.dumps('Hello from Lambda!'),
'outlen': len(out),#len(pickle.dumps(data)),
'outlen-compr': len(compr),#zc.compress(pickle.dumps(data))),
'elaspsed_time': round(time.time() - start, 1)
}); break;
except Exception as e:
print(e); time.sleep(i)
def listAssimDates(dates):
months = {}
for t in dates:
k = (t.year, t.month)
prior = months.get(k, [])
if sum(prior) > 0:
months[k] = (min(prior[0], t.day), max(prior[0], t.day))
else:
months[k] = (t.day, t.day)
return [(*k, *v) for k, v in months.items()]
start = datetime.datetime(*[int(i) for i in infer['start'].split(',')])
end = datetime.datetime(*[int(i) for i in infer['end'].split(',')])
dt = start - datetime.timedelta(days = 10)
dates = []
while dt <= end + datetime.timedelta(days = 1):
dates.append(dt);
dt += datetime.timedelta(days = 1)
print(len(dates))
print(dates[0]); print(dates[-1])
def listGFSFiles(dates):
filelist = []; fwd = 0
for t in dates:
dt = t.strftime('%Y%m%d')
for hr in [0, 6, 12, 18]:
filelist.append('{}/{}/gfs.0p25.{}{:02d}.f{:03d}.grib2'.format(
dt[:4], dt, dt, hr, fwd))
return filelist
def listIFSFiles(dates):
filelist = []
for t in dates:
for tag in ifs_tags:
domain = 'ec.oper.fc.sfc'
file = '{}/{}/{}.{}.regn1280sc.{}.nc'.format(domain,
datetime.datetime.strftime(t, '%Y%m'),
domain, tag,
datetime.datetime.strftime(t, '%Y%m%d') )
filelist.append(file)
return filelist
def listTropomiRows(dates):
tropomi_rows = [e.to_dict() for idx, e in
files[files['product'].str.startswith('tropomi')
& (files.time_end.dt.tz_localize(None) >= min(dates) )
& (files.time_end.dt.tz_localize(None)
<= max(dates) + datetime.timedelta(days = 1) )
].iterrows()]
return tropomi_rows
# %%time
N_THREADS = min(10, os.cpu_count() )
Parallel(N_THREADS)(delayed(pullIFS)(
listIFSFiles(dates)[i::N_THREADS])
for i in range(N_THREADS))
# %%time
N_THREADS = min(4, os.cpu_count() )
Parallel(N_THREADS)(delayed(pullGFS)(
listGFSFiles(dates)[:][i::N_THREADS])
for i in range(N_THREADS))
# %%time
N_THREADS = min(5, os.cpu_count())
Parallel(N_THREADS)(delayed(pullTropomi)(row)
for row in listTropomiRows(dates))
# %%time
[pullAssim(*d) for d in listAssimDates(dates)]
if start.year <= 2018 and end.year >= 2021:
os.makedirs('cache', exist_ok = True)
for path in os.listdir('inference'):
with tarfile.open('cache/{}.tar'.format(path), 'w') as f:
for file in os.listdir('inference/{}'.format(path)):
f.add('inference/{}/{}'.format(path, file),
arcname = file)
# !jupyter nbconvert --no-prompt --to script 'RunFeatures.ipynb'
| drivendataorg/nasa-airathon | no2/1st Place/RunFeatures.py | RunFeatures.py | py | 25,953 | python | en | code | 12 | github-code | 36 |
28239507991 | W = H = 480
SIZE = 180.0
IN_SIZE = 60.0
FPS = 20.0
D = 2.0
N_FRAMES = D * FPS
N_SAMPLES = 4.0
WEIGHT = 3
MINE_COLOR = color(231, 231, 222)
BLUR_COLOR = color(0, 0, 0)
BG_COLOR = color(188, 184, 172)
RECORD = False
def polar2cart(r,theta):
return r * cos(theta), r * sin(theta)
def make_triangle(radius):
return [polar2cart(radius, i * 2*PI/3 + PI/6) for i in range(3)]
def easing(x):
return x ** 3
def draw_(t):
background(BG_COLOR)
for i in range(2):
pushMatrix()
noStroke()
fill([BLUR_COLOR, MINE_COLOR][i])
translate(width / 2, height / 2)
rotate(PI/3 * t)
triangle = make_triangle(SIZE / 2 + SIZE / 2 * easing(t))
beginShape()
for x, y in triangle:
vertex(x, y)
for j in range(3):
beginContour()
triangle = make_triangle(IN_SIZE / 2)
xc, yc = polar2cart(SIZE / 2 * easing(t), j * 2*PI/3 + PI/6)
triangle.reverse()
for x, y in triangle:
vertex(xc + x, yc + y)
endContour()
endShape(CLOSE)
rotate(PI/3)
for j in range(3):
fill([BLUR_COLOR, MINE_COLOR][i])
triangle = make_triangle(SIZE / 2 + SIZE / 2 * easing(t))
xc, yc = polar2cart(SIZE / 2 + 2 * SIZE * easing(t), j * 2*PI/3 + PI/6)
beginShape()
for x, y in triangle:
vertex(xc + x, yc + y)
triangle = make_triangle(IN_SIZE / 2 + IN_SIZE / 2 * easing(t))
triangle.reverse()
beginContour()
for x, y in triangle:
vertex(xc + x, yc + y)
endContour()
endShape(CLOSE)
popMatrix()
if i == 0: filter(BLUR, 4)
def setup():
size(W, H)
frameRate(FPS)
def draw():
if not RECORD:
t = (frameCount % N_FRAMES) / N_FRAMES
draw_(t)
else:
result = [[0, 0, 0] for i in range(W * H)]
for sample in range(N_SAMPLES):
t = ((frameCount - 1 + sample / N_SAMPLES) % N_FRAMES) / N_FRAMES
draw_(t)
loadPixels()
for i, pix in enumerate(pixels):
result[i][0] += red(pix)
result[i][1] += green(pix)
result[i][2] += blue(pix)
loadPixels()
for i, rgb in enumerate(result):
pixels[i] = color(rgb[0] / N_SAMPLES, rgb[1] / N_SAMPLES, rgb[2] / N_SAMPLES)
updatePixels()
if frameCount <= N_FRAMES:
saveFrame('png/####.png')
else:
exit()
| letsgetcooking/Sketches | processing/2015/triangle.py | triangle.py | py | 2,592 | python | en | code | 0 | github-code | 36 |
25743825361 | __all__ = [
"Quantizer"
]
from multiprocessing import Pool
import numpy as np
from ..base import Pipe
from ..config import read_config
from ..funcs import parse_marker
config = read_config()
D = config.get("jpeg2000", "D")
QCD = config.get("jpeg2000", "QCD")
delta_vb = config.get("jpeg2000", "delta_vb")
reserve_bits = config.get("jpeg2000", "reserve_bits")
min_task_number = config.get("accelerate", "codec_min_task_number")
max_pool_size = config.get("accelerate", "codec_max_pool_size")
class Quantizer(Pipe):
"""
Quantizer
"""
def __init__(self,
name="Quantizer",
mode="quantify",
irreversible=False,
accelerated=False,
D=D,
QCD=QCD,
delta_vb=delta_vb,
reserve_bits=reserve_bits):
"""
Init and set attributes of a quantizer.
Explicit Attributes
-------------------
name: str, ofptional
Name of the quantizer.
mode: str, optional
Mode of quantizer, must in ["quantify", "dequantify"].
irreversible: bool, optional
Whether the transform is lossy or lossless.
accelerated: bool, optional
Whether the process would be accelerated by subprocess pool.
D: int, optional
Number of resolution layers.
QCD: str, optional
Quantization default used to specify epsilon_b and mu_b of subband with lowest resolution.
delta_vb: float, optional
Used in dequantization, ranges from 0 to 1.
Implicit Attributes
-------------------
epsilon_b: int
Epsilon used to determine the quantization step of subband with lowest resolution, ranges from 0 to 2^5.
mu_b: int
Mu used to determine the quantization step of subband with lowest resolution, ranges from 0 to 2^11.
min_task_number: int
Minimun task number to start a pool.
max_pool_size: int
Maximun size of pool.
"""
super().__init__()
self.name = name
self.mode = mode
self.irreversible = irreversible
self.accelerated = accelerated
self.D = D
self.QCD = QCD
self.delta_vb = delta_vb
self.reserve_bits = reserve_bits
self.epsilon_b, self.mu_b = parse_marker(self.QCD)
self.min_task_number = min_task_number
self.max_pool_size = max_pool_size
def recv(self, X, **params):
self.logs.append("")
self.logs[-1] += self.formatter.message("Receiving data.")
self.received_ = X
self.accelerate(**params)
try:
self.irreversible = params["irreversible"]
self.logs[-1] += self.formatter.message("\"irreversible\" is specified as {}.".format(self.irreversible))
except KeyError:
self.logs[-1] += self.formatter.warning(
"\"irreversible\" is not specified, now set to {}.".format(self.irreversible))
try:
self.QCD = params["QCD"]
self.logs[-1] += self.formatter.message("\"QCD\" is specified as {}.".format(self.QCD))
except KeyError:
self.logs[-1] += self.formatter.warning("\"QCD\" is not specified, now set to {}.".format(self.QCD))
try:
self.D = params["D"]
self.logs[-1] += self.formatter.message("\"D\" is specified as {}.".format(self.D))
except KeyError:
self.logs[-1] += self.formatter.warning("\"D\" is not specified, now set to {}.".format(self.D))
try:
self.reserve_bits = params["reserve_bits"]
self.logs[-1] += self.formatter.message("\"reserve_bits\" is specified as {}.".format(self.reserve_bits))
except KeyError:
self.logs[-1] += self.formatter.warning("\"reserve_bits\" is not specified, now set to {}.".format(self.reserve_bits))
self.epsilon_b, self.mu_b = parse_marker(self.QCD)
delta_bs = []
for i in range(self.D):
delta_bs.append(2 ** -(self.epsilon_b + i - self.D) * (1 + self.mu_b / (2 ** 11)))
print(delta_bs)
if self.mode == "quantify":
if self.irreversible:
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate quantify.")
inputs = [[x, delta_bs] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_quantize, inputs)
else:
X = [_quantize(x, delta_bs) for x in X]
else:
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate quantify.")
inputs = [[x, self.reserve_bits, False] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_scale, inputs)
else:
X = [_scale(x, self.reserve_bits, False) for x in X]
elif self.mode == "dequantify":
try:
self.D = params["delta_vb"]
self.logs[-1] += self.formatter.message("\"delta_vb\" is specified as {}.".format(self.delta_vb))
except KeyError:
self.logs[-1] += self.formatter.warning("\"delta_vb\" is not specified, now set to {}.".format(self.delta_vb))
if self.irreversible:
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate dequantify.")
inputs = [[x, delta_bs, self.delta_vb] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_dequantize, inputs)
else:
X = [_dequantize(x, delta_bs, self.delta_vb) for x in X]
else:
if self.accelerated:
self.logs[-1] += self.formatter.message("Using multiprocess pool to accelerate dequantify.")
inputs = [[x, self.reserve_bits, True] for x in X]
with Pool(min(self.task_number, self.max_pool_size)) as p:
X = p.starmap(_scale, inputs)
else:
X = [_scale(x, self.reserve_bits, True) for x in X]
else:
msg = "Invalid attribute %s for quantizer %s. Quantizer.mode should be set to \"quantify\" or \"dequantify\"." % (self.mode, self)
self.logs[-1] += self.formatter.error(msg)
raise AttributeError(msg)
self.sended_ = X
return self
def _quantize(tile, delta_bs):
quantified_tile = [np.array(tile[0] / delta_bs[0], dtype=np.int64)]
for subbands, delta_b in zip(tile[1:], delta_bs):
quantified_tile.append(tuple([np.array(subband / delta_b, dtype=np.int64) for subband in subbands]))
return quantified_tile
def _dequantize(coeffs, delta_bs, delta_vb):
dequantified_tile = [(coeffs[0] + delta_vb) * delta_bs[0]]
for subbands, delta_b in zip(coeffs[1:], delta_bs):
dequantified_tile.append(tuple([(subband + delta_vb) * delta_b for subband in subbands]))
return dequantified_tile
def _scale(tile, reserve_bits, compress):
new_tile = []
if compress:
for subbands in tile:
if isinstance(subbands, tuple):
new_tile.append(tuple([np.array(subband / (10 ** reserve_bits), dtype=np.float64) for subband in subbands]))
else:
new_tile.append(np.array(subbands / (10 ** reserve_bits), dtype=np.float64))
else:
for subbands in tile:
if isinstance(subbands, tuple):
new_tile.append(tuple([np.array(subband * (10 ** reserve_bits), dtype=np.int64) for subband in subbands]))
else:
new_tile.append(np.array(subbands * (10 ** reserve_bits), dtype=np.int64))
return new_tile
| yetiansh/fpeg | fpeg/utils/quantify.py | quantify.py | py | 6,831 | python | en | code | 1 | github-code | 36 |
15138053890 | import sys
import numpy as np
import matplotlib.pyplot as plt
import cuqipy_fenics
import dolfin as dl
import warnings
import cuqi
from cuqi.problem import BayesianProblem
from cuqi.model import PDEModel
from cuqi.distribution import Gaussian
from cuqi.geometry import Geometry
import dolfin as dl
from .pde import SteadyStateLinearFEniCSPDE
from .geometry import FEniCSContinuous, FEniCSMappedGeometry,\
MaternKLExpansion
from .utilities import to_dolfin_expression, _import_ufl
ufl = _import_ufl()
__all__ = ['FEniCSDiffusion1D', 'FEniCSPoisson2D']
class FEniCSDiffusion1D(BayesianProblem):
"""
1D Diffusion PDE problem using FEniCS. The problem has Dirichlet boundary conditions.
Parameters
------------
dim : int, Default 100
Dimension of the 1D problem
endpoint : float, Default 1
Endpoint of the 1D grid (starts at 0).
exactSolution : ndarray, CUQIarray, Default None
Exact solution used to generate data.
If None a default exact solution is chosen.
SNR : float, Default 100
Signal-to-noise ratio.
mapping: str or callable
mapping to parametrize the Bayesian parameters. If provided as string, it can take one of the values: ['exponential']
Attributes
----------
data : ndarray
Generated (noisy) data
model : cuqi.model.Model
The forward model
prior : cuqi.distribution.Distribution
Distribution of the prior (Default = None)
likelihood : cuqi.likelihood.Likelihood
Likelihood function.
exactSolution : ndarray
Exact solution (ground truth)
exactData : ndarray
Noise free data
infoSring : str
String with information about the problem, noise etc.
Methods
----------
MAP()
Compute MAP estimate of posterior.
NB: Requires prior to be defined.
sample_posterior(Ns)
Sample Ns samples of the posterior.
NB: Requires prior to be defined.
"""
def __init__(self, dim = 100, endpoint = 1, exactSolution = None, SNR = 100, observation_operator = None, mapping = None, left_bc=0, right_bc=1):
# Create FEniCSPDE
def u_boundary(x, on_boundary):
return on_boundary
def form(m,u,p):
return m*ufl.inner(ufl.grad(u), ufl.grad(p))*ufl.dx
mesh = dl.IntervalMesh(dim, 0,endpoint)
solution_function_space = dl.FunctionSpace(mesh, 'Lagrange', 1)
parameter_function_space = dl.FunctionSpace(mesh, 'Lagrange', 1)
dirichlet_bc_expression = dl.Expression("left_bc*(x[0]<eps)+right_bc*(x[0]>endpoint-eps)", eps=dl.DOLFIN_EPS, endpoint=endpoint, left_bc=left_bc, right_bc=right_bc, degree=1)
dirichlet_bc = dl.DirichletBC(solution_function_space, dirichlet_bc_expression, u_boundary)
adjoint_dirichlet_bc = dl.DirichletBC(
solution_function_space, dl.Constant(0), u_boundary)
PDE = cuqipy_fenics.pde.SteadyStateLinearFEniCSPDE(
form, mesh, solution_function_space, parameter_function_space, dirichlet_bc, adjoint_dirichlet_bc, observation_operator=observation_operator)
# Create PDE model
domain_geometry = cuqipy_fenics.geometry.FEniCSContinuous(parameter_function_space)
if mapping is not None:
if mapping == 'exponential':
mapping = lambda x : ufl.exp(x)
elif callable(mapping):
mapping = mapping
else:
raise ValueError('mapping can be a callable or one of the expected keywords.')
domain_geometry = cuqipy_fenics.geometry.FEniCSMappedGeometry(geometry=domain_geometry, map = mapping)
range_geometry = cuqipy_fenics.geometry.FEniCSContinuous(solution_function_space)
model = cuqi.model.PDEModel(PDE,range_geometry,domain_geometry)
# Create prior
pr_mean = np.zeros(domain_geometry.par_dim)
x = cuqi.distribution.GMRF(pr_mean,25,1,'zero')
# Set up exact solution
if exactSolution is None:
exactSolution = x.sample()
elif exactSolution == 'smooth_step':
N = dim + 1
fun = lambda grid: 0.8*np.exp( -( (grid -endpoint/2.0 )**2 ) / 0.02)
grid = np.linspace(0,endpoint,N)
exactSolution = np.ones(N)*.8
exactSolution[np.where(grid > endpoint/2.0)
] = fun(grid[np.where(grid > endpoint/2.0)])
exactSolution = cuqi.array.CUQIarray(
exactSolution, geometry=domain_geometry)
# Generate exact data
b_exact = model.forward(domain_geometry.par2fun(exactSolution),is_par=False)
# Add noise to data
# Reference: Adding noise with a desired signal-to-noise ratio
# https://sites.ualberta.ca/~msacchi/SNR_Def.pdf
noise = np.random.normal(0, 1, b_exact.shape)
alpha = np.linalg.norm(b_exact)/(np.sqrt(SNR)*np.linalg.norm(noise))
data = cuqi.array.CUQIarray(
b_exact + alpha*noise, geometry=range_geometry)
# Create likelihood
y = cuqi.distribution.Gaussian(
mean=model(x), cov=alpha*np.eye(range_geometry.par_dim))
# Initialize FEniCSDiffusion1D as BayesianProblem problem
super().__init__(y, x, y=data)
# Store exact values and information
self.exactSolution = exactSolution
self.exactData = b_exact
self.infoString = f"Noise type: Additive i.i.d. noise with mean zero and signal to noise ratio: {SNR}"
class FEniCSPoisson2D(BayesianProblem):
"""
2D Diffusion PDE-based Bayesian inverse problem that uses FEniCS.
The problem is set up on a unit square ([0,1]x[0,1]) mesh with either Dirichlet
or Neumann boundary conditions on each boundary. The unknown parameter
is the (possibly heterogeneous) diffusion coefficient (e.g. conductivity)
field. The unknown parameter (e.g. conductivity) and the PDE solution
(e.g. the potential) are approximated in the first order Lagrange FEM space.
Parameters
-----------
dim : tuple, Default (32,32)
| Number of the 2D mesh vertices on the x and y directions, respectively.
bc_type : list of str, Default ['Dirichlet', 'Dirichlet', 'Dirichlet', 'Dirichlet']
| Boundary conditions on each boundary. The accepted values are:
| 'Dirichlet': Dirichlet boundary condition.
| 'Neumann': Neumann boundary condition.
| The list should be ordered as follows: [left, bottom, right, top]
bc_value : list of entries, each is a float or a callable , Default [0, 0, 0, 0]
| Boundary condition values on each boundary. The accepted values are:
| A float: a constant value.
| A callable : a callable that takes a point coordinate vector `x` in the physical domain as input and return the boundary condition value at that point, e.g., `lambda x: np.sin(x[0])+np.cos(x[1])`.
| The list should be ordered as follows: [left, bottom, right, top]
exactSolution : ndarray, CUQIarray, or callable , Default None
| Exact solution to the Bayesian inverse problem used to generate data, the diffusivity coefficient field in this case. When passed as a callable, it should take a point coordinate vector `x` in the physical domain as input and return the exact solution value at that point, e.g. `lambda x: np.sin(x[0])+np.cos(x[1])`. If None, a default exact solution is chosen. The default exact solution is a prior sample if the `field_type` is 'KL' and a smooth function if the `field_type` is None.
source_term : float, callable, or dolfin.Expression, Default 1
| Source term in the PDE. The accepted values are:
| A float: a constant value.
| A callable: a callable that takes a point coordinate vector `x` in the physical domain as input and returns the source term value at that point, e.g. `lambda x: np.sin(x[0])+np.cos(x[1])`.
| A dolfin.Expression: a dolfin.Expression object that defines the source term.
noise_level : float, default 0.01
Noise level relative to the exact data (the ratio of the L2 norm of the noise to the L2 norm of the exact data). By default, the noise level is 1% (=0.01).
field_type : str, Default None
| Field type of the forward model domain. The accepted values are:
| "KL": a :class:`MaternKLExpansion` geometry object will be created and set as a domain geometry.
| None: a :class:`FEniCSContinuous` geometry object will be created and set as a domain geometry.
field_params : dict, Default None
| A dictionary of keyword arguments that the underlying geometry accepts. (Passed to the underlying geometry when field type is "KL" or None). For example, for "KL" field type, the dictionary can be `{"length_scale": 0.1, "num_terms": 32}`. If None is passed as field_type, this argument is ignored.
mapping : str or callable , Default None
| mapping to parametrize the Bayesian parameters. If None, no mapping is applied. If provided as callable, it should take a FEniCS function (of the unknown parameter) as input and return a FEniCS form, e.g. `lambda m: ufl.exp(m)`.
| If provided as string, it can take one of the values:
| 'exponential' : Parameterization in which the unknown parameter becomes the log of the diffusion coefficient.
prior : cuqi.distribution.Distribution, Default Gaussian
| Distribution of the prior. Needs to be i.i.d standard Gaussian if field_type is "KL". The prior name property, i.e., `prior.name` is expected to be "x".
"""
def __init__(self, dim=None, bc_types=None, bc_values=None,
exactSolution=None, source_term=None, noise_level=None,
field_type=None, field_params=None, mapping=None, prior=None):
# Create the mesh
if dim is None:
dim = (32, 32)
mesh = dl.UnitSquareMesh(dim[0], dim[1])
# Create the function space
V = dl.FunctionSpace(mesh, 'Lagrange', 1)
# Set up boundary conditions
if bc_types is None:
bc_types = ['Dirichlet', 'Dirichlet', 'Dirichlet', 'Dirichlet']
elif len(bc_types) != 4:
raise ValueError(
"The length of bc_types list should be 4. The list should be ordered as follows: [left, bottom, right, top]")
elif all(bc_type.lower() in ['neumann'] for bc_type in bc_types):
raise ValueError(
"All boundary conditions cannot be Neumann. At least one boundary condition should be Dirichlet.")
if bc_values is None:
bc_values = [0, 0, 0, 0]
elif len(bc_values) != 4:
raise ValueError(
"The length of bc_values list should be 4. The list should be ordered as follows: [left, bottom, right, top]")
bc_values = [to_dolfin_expression(bc_value) for bc_value in bc_values]
subdomains = self._create_boundaries_subdomains()
# Set up Neumann and Dirichlet boundary conditions
dirichlet_bcs = self._set_up_dirichlet_bcs(
V, bc_types, bc_values, subdomains)
neumann_bcs = self._set_up_neumann_bcs(
V, bc_types, bc_values, subdomains)
# Set up the adjoint boundary conditions used for solving
# the Poisson adjoint problem, which is needed for computing
# the adjoint-based gradient.
adjoint_dirichlet_bcs = self._set_up_adjoint_dirichlet_bcs(
V, bc_types, subdomains)
# Set up the source term
if source_term is None:
source_term = dl.Constant(1)
else:
source_term = to_dolfin_expression(source_term)
# Set up the variational problem form
if mapping is None:
def parameter_form(m): return m
elif callable(mapping):
parameter_form = mapping
elif mapping.lower() == 'exponential':
def parameter_form(m): return ufl.exp(m)
else:
raise ValueError('mapping should be a callable, None or a string.')
def form(m, u, p):
return parameter_form(m)*ufl.inner(ufl.grad(u), ufl.grad(p))*ufl.dx\
- source_term*p*ufl.dx\
- neumann_bcs(m, p)
# Create the CUQI PDE object
PDE = SteadyStateLinearFEniCSPDE(
form,
mesh,
parameter_function_space=V,
solution_function_space=V,
dirichlet_bcs=dirichlet_bcs,
adjoint_dirichlet_bcs=adjoint_dirichlet_bcs)
# Create the domain geometry
G_FEM = FEniCSContinuous(V)
if field_params is None:
field_params = {}
if field_type is None:
G_domain = G_FEM
elif field_type == 'KL':
if field_params == {}:
field_params = {'length_scale': 0.1, 'num_terms': 32}
G_domain = MaternKLExpansion(G_FEM, **field_params)
else:
raise ValueError('Unknown field type.')
# Create the range geometry
G_range = FEniCSContinuous(V)
# Create the forward model
A = PDEModel(PDE, domain_geometry=G_domain, range_geometry=G_range)
# Create the prior
if prior is None:
prior = Gaussian(np.zeros(A.domain_dim), 1,
geometry=G_domain, name='x')
A = A(prior) # Set parameter name of model to match prior
# Set up the exact solution
if exactSolution is None and field_type == "KL":
rng = np.random.RandomState(15)
exactSolution = rng.randn(G_domain.par_dim)
elif exactSolution is None:
def exactSolution(x): return 1.5 + 0.5 * \
np.sin(2*np.pi*x[0])*np.sin(2*np.pi*x[1])
if isinstance(exactSolution, np.ndarray):
exactSolution = cuqi.array.CUQIarray(
exactSolution,
is_par=True,
geometry=G_domain)
elif callable(exactSolution):
exactSolution_expr = to_dolfin_expression(exactSolution)
exactSolution_func = dl.interpolate(exactSolution_expr, V)
exactSolution = cuqi.array.CUQIarray(
exactSolution_func,
is_par=False,
geometry=G_domain)
else:
raise ValueError(
'exactSolution should be a numpy array, a function or None.')
# Create the exact data
exact_data = A(exactSolution)
if not isinstance(exact_data, cuqi.array.CUQIarray):
exact_data = cuqi.array.CUQIarray(
exact_data, is_par=True, geometry=G_range)
# Create the data distribution and the noisy data
noise = np.random.randn(len(exact_data))
if noise_level is None:
noise_level = 0.01
noise_std = noise_level * \
np.linalg.norm(exact_data)/np.linalg.norm(noise)
noise = noise_std*noise
data = exact_data + noise
data_dist = Gaussian(mean=A(prior), cov=noise_std**2, geometry=G_range,
name='y')
# Create the Bayesian problem
super().__init__(data_dist, prior)
super().set_data(y=data)
# Store exact values and information
self.exactSolution = exactSolution
self.exactData = exact_data
self.infoString = f"Noise type: Additive i.i.d. noise with standard deviation: {noise_std} and relative noise standard deviation: {noise_level}."
def _create_boundaries_subdomains(self):
"""
Create subdomains for the boundary conditions.
"""
class Left(dl.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and x[0] < dl.DOLFIN_EPS
class Bottom(dl.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and x[1] < dl.DOLFIN_EPS
class Right(dl.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and x[0] > 1.0 - dl.DOLFIN_EPS
class Top(dl.SubDomain):
def inside(self, x, on_boundary):
return on_boundary and x[1] > 1.0 - dl.DOLFIN_EPS
return [Left(), Bottom(), Right(), Top()]
def _set_up_dirichlet_bcs(self, V, bc_types, bc_values, subdomains):
"""
Set up Dirichlet boundary conditions for the Poisson PDE problem defined
on the unit square mesh, where V is the function space.
"""
dirichlet_bcs = []
for i, bc in enumerate(bc_types):
if bc.lower() == 'dirichlet':
dirichlet_bcs.append(dl.DirichletBC(
V, bc_values[i], subdomains[i]))
return dirichlet_bcs
def _set_up_adjoint_dirichlet_bcs(self, V, bc_types, subdomains):
"""
Set up Dirichlet boundary conditions for the adjoint Poisson PDE problem
defined on the unit square mesh, where V is the function space. The
adjoint problem is needed for the computation of the adjoint-based
gradient. The adjoint Dirichlet boundary conditions are set to zero in
this case.
"""
adjoint_dirichlet_bcs = []
for i, bc in enumerate(bc_types):
if bc.lower() == 'dirichlet':
adjoint_dirichlet_bcs.append(dl.DirichletBC(
V, dl.Constant(0), subdomains[i]))
return adjoint_dirichlet_bcs
def _set_up_neumann_bcs(self, V, bc_types, bc_values, subdomains):
"""
Set up Neumann boundary conditions for the Poisson PDE problem defined
on the unit square mesh, where V is the function space.
"""
boundary_markers = dl.MeshFunction(
'size_t', V.mesh(), V.mesh().topology().dim()-1)
boundary_markers.set_all(0)
for i, subdomain in enumerate(subdomains):
subdomain.mark(boundary_markers, i+1)
ds = dl.Measure('ds', domain=V.mesh(), subdomain_data=boundary_markers)
neumann_bcs = []
for i, bc_type in enumerate(bc_types):
if bc_type.lower() == 'neumann':
neumann_bcs.append(lambda m, p: bc_values[i]*p*ds(i+1))
if neumann_bcs == []:
return lambda m, p: dl.Constant(0)*p*dl.ds
else:
return lambda m, p: sum([nbc(m, p) for nbc in neumann_bcs])
| CUQI-DTU/CUQIpy-FEniCS | cuqipy_fenics/testproblem.py | testproblem.py | py | 18,480 | python | en | code | 4 | github-code | 36 |
29178775126 | from django.http import HttpResponse
from django.core.paginator import Paginator
from django.shortcuts import render
from .operations_c import Company_O, Publication
from django.http import QueryDict
from home.operation_home import Home_O
def Add_Publication(request):
if request.method == 'POST':
mutable_post_data = request.POST.copy()
mutable_post_data['availability_travel'] = int('availability_travel' in request.POST)
mutable_post_data['change_residence'] = int('change_residence' in request.POST)
Publication().Create_Publication(mutable_post_data,request.session['pk_user'])
return render(request,'company/add_publication.html',{
'area':Company_O().Get_Area(),'city':Company_O().Get_City(),
'Type_Contract':Company_O().Type_Contract(),'Workday':Company_O().Workday(),
'Workplace':Company_O().Workplace(),'Minimum_Studiess':Company_O().Minimum_Studiess(),
'languages':Company_O().languages(),'Languages_Levels':Company_O().Languages_Levels()
})
def All_List_Application_Company(request):
data = Publication().All_List_Application_Company(request.session['pk_user'])
items_per_page = 4
paginator = Paginator(data, items_per_page)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request,'company/list_publications.html', {'page_obj': page_obj})
def Edit_Publication(request,pk):
data = Home_O().Get_Publication(pk)
print(data)
return render(request,'company/edit_publication.html',{'data':data,
'area':Company_O().Get_Area(),'city':Company_O().Get_City(),
'Type_Contract':Company_O().Type_Contract(),'Workday':Company_O().Workday(),
'Workplace':Company_O().Workplace(),'Minimum_Studiess':Company_O().Minimum_Studiess(),
'languages':Company_O().languages(),'Languages_Levels':Company_O().Languages_Levels()
})
| cdavid58/empleo | company/views.py | views.py | py | 1,810 | python | en | code | 0 | github-code | 36 |
17872592313 | #
# author: Paul Galatic
#
# This program is JUST for drawing a rounded rectangle.
#
import pdb
from PIL import Image, ImageDraw
from extern import *
def sub_rectangle(draw, xy, corner_radius=25, fill=(255, 255, 255)):
'''
Source: https://stackoverflow.com/questions/7787375/python-imaging-library-pil-drawing-rounded-rectangle-with-gradient
'''
upper_left_point = xy[0]
bottom_right_point = xy[1]
draw.rectangle(
[
(upper_left_point[0], upper_left_point[1] + corner_radius),
(bottom_right_point[0], bottom_right_point[1] - corner_radius)
],
fill=fill,
)
draw.rectangle(
[
(upper_left_point[0] + corner_radius, upper_left_point[1]),
(bottom_right_point[0] - corner_radius, bottom_right_point[1])
],
fill=fill,
)
draw.pieslice([upper_left_point, (upper_left_point[0] + corner_radius * 2, upper_left_point[1] + corner_radius * 2)],
180,
270,
fill=fill,
)
draw.pieslice([(bottom_right_point[0] - corner_radius * 2, bottom_right_point[1] - corner_radius * 2), bottom_right_point],
0,
90,
fill=fill,
)
draw.pieslice([(upper_left_point[0], bottom_right_point[1] - corner_radius * 2), (upper_left_point[0] + corner_radius * 2, bottom_right_point[1])],
90,
180,
fill=fill,
)
draw.pieslice([(bottom_right_point[0] - corner_radius * 2, upper_left_point[1]), (bottom_right_point[0], upper_left_point[1] + corner_radius * 2)],
270,
360,
fill=fill,
)
def rectangle(draw, size, fill=WHITE, border=None):
width, height = size
img = Image.new('RGBA', size, color=BLANK)
if border:
outdims = ((0, 0), (width, height))
sub_rectangle(draw, outdims, fill=border)
indims = ((BORDER, BORDER), (width - BORDER, height - BORDER))
else:
indims = ((0, 0), (width, height))
sub_rectangle(draw, indims, fill=fill)
return img | pgalatic/zeitgeist | rectround.py | rectround.py | py | 2,033 | python | en | code | 0 | github-code | 36 |
9587970567 | from plugin import plugin
from colorama import Fore
@plugin("hex")
def binary(jarvis, s):
"""
Converts an integer into a hexadecimal number
"""
if s == "":
s = jarvis.input("What's your number? ")
try:
n = int(s)
except ValueError:
jarvis.say("That's not a number!", Fore.RED)
return
else:
if n < 0:
jarvis.say("-" + hex(n).upper()[3:], Fore.YELLOW)
else:
jarvis.say(hex(n).upper()[2:], Fore.YELLOW)
| sukeesh/Jarvis | jarviscli/plugins/hex.py | hex.py | py | 503 | python | en | code | 2,765 | github-code | 36 |
15542188724 | import unittest
from unittest.mock import patch, mock_open, MagicMock
from external_sort.file_merger import FileMerger
class StubHandle:
def __init__(self, data):
self.data = data
self.position = 0
def readline(self):
if self.position >= len(self.data):
return ''
line = self.data[self.position]
self.position += 1
return line
handles = [
StubHandle(data=['1\n', '2\n', '3\n', '4\n', '5']),
StubHandle(data=['a\n', 'b\n', 'c\n', 'd\n', 'e\n', 'f\n', 'h']),
]
class FileMergerTests(unittest.TestCase):
def test_should_publish_incomplete_buffers(self):
m = FileMerger(None, max_files=2, buffer_size=3)
handles = [
StubHandle(data=['1\n', '2\n']),
StubHandle(data=['a\n', 'b\n']),
]
buffers = list(m._merge_to_buffer(handles, 0, 0))
self.assertEqual(2, len(buffers))
self.assertEqual(3, len(buffers[0]))
self.assertEqual(1, len(buffers[1]))
def test_should_strip_new_line_separators(self):
m = FileMerger(None, max_files=2, buffer_size=3)
handles = [
StubHandle(data=['1\n', '2\n']),
StubHandle(data=['a\n', 'b\n']),
]
buffers = list(m._merge_to_buffer(handles, 0, 0))
self.assertEqual(['1', '2', 'a'], buffers[0])
def test_should_output_lines_in_correct_order(self):
m = FileMerger(None, max_files=2, buffer_size=5)
handles = [
StubHandle(data=['a\n', 'b\n', 'c']),
StubHandle(data=['a\n', 'b\n']),
]
buffers = list(m._merge_to_buffer(handles, 0, 0))
self.assertEqual(['a', 'a', 'b', 'b', 'c'], buffers[0])
| xelibrion/sort-large-files | tests/test_file_merger.py | test_file_merger.py | py | 1,715 | python | en | code | 0 | github-code | 36 |
28890243109 | import torch
import math
from torch import nn
from torch.nn import functional as F
from data import utils as du
from model import ipa_pytorch
from model import frame_gemnet
from openfold.np import residue_constants
import functools as fn
Tensor = torch.Tensor
def get_index_embedding(indices, embed_size, max_len=2056):
"""Creates sine / cosine positional embeddings from a prespecified indices.
Args:
indices: offsets of size [..., N_edges] of type integer
max_len: maximum length.
embed_size: dimension of the embeddings to create
Returns:
positional embedding of shape [N, embed_size]
"""
K = torch.arange(embed_size//2).to(indices.device)
pos_embedding_sin = torch.sin(
indices[..., None] * math.pi / (max_len**(2*K[None]/embed_size))).to(indices.device)
pos_embedding_cos = torch.cos(
indices[..., None] * math.pi / (max_len**(2*K[None]/embed_size))).to(indices.device)
pos_embedding = torch.cat([
pos_embedding_sin, pos_embedding_cos], axis=-1)
return pos_embedding
def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000):
# Code from https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py
assert len(timesteps.shape) == 1
timesteps = timesteps * max_positions
half_dim = embedding_dim // 2
emb = math.log(max_positions) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = F.pad(emb, (0, 1), mode='constant')
assert emb.shape == (timesteps.shape[0], embedding_dim)
return emb
class Embedder(nn.Module):
def __init__(self, model_conf):
super(Embedder, self).__init__()
self._model_conf = model_conf
self._embed_conf = model_conf.embed
# Time step embedding
index_embed_size = self._embed_conf.index_embed_size
node_embed_dims = index_embed_size
edge_in = index_embed_size * 2
# Sequence index embedding
if self._embed_conf.use_res_idx_encoding:
node_embed_dims += index_embed_size
edge_in += index_embed_size
if self._embed_conf.embed_aatype:
aatype_embed_size = self._embed_conf.aatype_embed_size
self.aatype_embedder = nn.Sequential(
nn.Linear(residue_constants.restype_num+1, aatype_embed_size),
nn.ReLU(),
nn.Linear(aatype_embed_size, aatype_embed_size),
nn.LayerNorm(aatype_embed_size),
)
node_embed_dims += aatype_embed_size
edge_in += aatype_embed_size * 2
else:
aatype_embed_size = 0
node_embed_size = self._model_conf.node_embed_size
self.node_embedder = nn.Sequential(
nn.Linear(node_embed_dims, node_embed_size),
nn.ReLU(),
nn.Linear(node_embed_size, node_embed_size),
nn.ReLU(),
nn.Linear(node_embed_size, node_embed_size),
nn.LayerNorm(node_embed_size),
)
if self._embed_conf.embed_distogram:
edge_in += self._embed_conf.num_bins
edge_embed_size = self._model_conf.edge_embed_size
self.edge_embedder = nn.Sequential(
nn.Linear(edge_in, edge_embed_size),
nn.ReLU(),
nn.Linear(edge_embed_size, edge_embed_size),
nn.LayerNorm(edge_embed_size),
)
self.timestep_embedder = fn.partial(
get_timestep_embedding,
embedding_dim=self._embed_conf.index_embed_size
)
self.index_embedder = fn.partial(
get_index_embedding,
embed_size=self._embed_conf.index_embed_size
)
def forward(
self,
*,
seq_idx,
t,
aatype,
fixed_mask,
ca_pos
):
"""Embeds a set of inputs
Args:
seq_idx: [..., N] Positional sequence index for each residue.
t: Sampled t in [0, 1].
fixed_mask: mask of fixed (motif) residues.
Returns:
node_embed: [B, N, D_node]
edge_embed: [B, N, N, D_edge]
"""
num_batch, num_res = seq_idx.shape
init_node_embed = []
# Embed timestep.
t_embed = torch.tile(
self.timestep_embedder(t)[:, None, :], (1, num_res, 1))
# Set time step to epsilon=1e-5 for fixed residues.
fixed_t_embed = self.timestep_embedder(torch.ones_like(t)*1e-5)
fixed_t_embed = torch.tile(fixed_t_embed[:, None, :], (1, num_res, 1))
fixed_mask = fixed_mask[..., None]
prot_t_embed = (
t_embed * (1 - fixed_mask)
+ fixed_t_embed * fixed_mask
)
init_node_embed.append(prot_t_embed)
# Embed 1D sequence features.
if self._embed_conf.use_res_idx_encoding:
init_node_embed.append(self.index_embedder(seq_idx))
if self._embed_conf.embed_aatype:
aatype_embed = self.aatype_embedder(aatype)
init_node_embed.append(aatype_embed)
node_embed = self.node_embedder(
torch.cat(init_node_embed, dim=-1).float())
# Embed 2D sequence features.
edge_attr = seq_idx[:, :, None] - seq_idx[:, None, :]
edge_attr = edge_attr.reshape([num_batch, num_res**2])
edge_embed = self.index_embedder(edge_attr)
cross_t_embed = torch.cat([
torch.tile(prot_t_embed[:, :, None, :], (1, 1, num_res, 1)),
torch.tile(prot_t_embed[:, None, :, :], (1, num_res, 1, 1)),
], dim=-1).float().reshape([num_batch, num_res**2, -1])
pair_feats = [
edge_embed,
cross_t_embed,
]
if self._embed_conf.embed_aatype:
cross_aatype = torch.cat([
torch.tile(aatype_embed[:, :, None, :], (1, 1, num_res, 1)),
torch.tile(aatype_embed[:, None, :, :], (1, num_res, 1, 1)),
], dim=-1).float()
pair_feats.append(cross_aatype.reshape(
[num_batch, num_res**2, -1]))
if self._embed_conf.embed_distogram:
dgram = du.calc_distogram(
ca_pos,
self._embed_conf.min_bin,
self._embed_conf.max_bin,
self._embed_conf.num_bins,
)
pair_feats.append(dgram.reshape([num_batch, num_res**2, -1]))
edge_embed = torch.cat(pair_feats, dim=-1).float()
edge_embed = self.edge_embedder(edge_embed)
edge_embed = edge_embed.reshape(
[num_batch, num_res, num_res, -1])
return node_embed, edge_embed
class ReverseDiffusion(nn.Module):
def __init__(self, model_conf):
super(ReverseDiffusion, self).__init__()
self._model_conf = model_conf
self.embedding_layer = Embedder(model_conf)
if self._model_conf.network_type == 'ipa':
self.score_model = ipa_pytorch.IpaScore(model_conf)
else:
raise ValueError(
f'Unrecognized network {self._model_conf.network_type}')
def _apply_mask(self, aatype_diff, aatype_0, diff_mask):
return diff_mask * aatype_diff + (1 - diff_mask) * aatype_0
def _calc_trans_0(self, trans_score, trans_t, beta_t):
beta_t = beta_t[..., None, None]
cond_var = 1 - torch.exp(-beta_t)
return (trans_score * cond_var + trans_t) / torch.exp(-1/2*beta_t)
def forward(self, input_feats):
"""forward computes the reverse diffusion conditionals p(X^t|X^{t+1})
for each item in the batch
Args:
X: the noised samples from the noising process, of shape [Batch, N, D].
Where the T time steps are t=1,...,T (i.e. not including the un-noised X^0)
Returns:
model_out: dictionary of model outputs.
"""
# Frames as [batch, res, 7] tensors.
bb_mask = input_feats['res_mask'].type(torch.float32) # [B, N]
fixed_mask = input_feats['fixed_mask'].type(torch.float32)
edge_mask = bb_mask[..., None] * bb_mask[..., None, :]
# Padding needs to be unknown aatypes.
pad_aatype = torch.eye(residue_constants.restype_num + 1)[-1][None]
aatype_t = (
input_feats['aatype_t'] * bb_mask[..., None]
+ pad_aatype[:, None, :].to(bb_mask.device) * (1 - bb_mask[..., None])
).type(torch.float32)
# Initial embeddings of positonal and relative indices.
init_node_embed, init_edge_embed = self.embedding_layer(
seq_idx=input_feats['seq_idx'],
t=input_feats['t'],
aatype=aatype_t,
fixed_mask=fixed_mask,
ca_pos=input_feats['rigids_t'][..., 4:],
)
edge_embed = init_edge_embed * edge_mask[..., None]
node_embed = init_node_embed * bb_mask[..., None]
# Run main network
model_out = self.score_model(node_embed, edge_embed, input_feats)
# Rescale score predictions by the standard deviations or variances.
trans_score = model_out['trans_score'] * input_feats['trans_score_scaling'][:, None, None]
rot_score = model_out['rot_score'] * input_feats['rot_score_scaling'][:, None, None]
# Logits are of shape [..., 20] where 20 is the number of aatypes.
if self._model_conf.aatype_prediction:
aatype_logits = model_out['aatype']
# Probs are of shape [..., 21] where 21 is the vocab size.
# Last token is padding that we set to 0.
aatype_probs = torch.nn.functional.softmax(aatype_logits, dim=-1)
else:
aatype_logits = input_feats['aatype_t'][..., :-1]
aatype_probs = input_feats['aatype_t'][..., :-1]
aatype_probs = torch.cat([
aatype_probs,
torch.zeros(aatype_probs.shape[:-1] + (1,)).to(
aatype_probs.device)
], dim=-1)
aatype_probs = self._apply_mask(
aatype_probs, input_feats['aatype_0'], 1 - fixed_mask[..., None])
pred_out = {
'psi': model_out['psi'],
'rot_score': rot_score,
'trans_score': trans_score,
'aatype_logits': aatype_logits,
'aatype_probs': aatype_probs,
}
if self._model_conf.direct_prediction:
raise ValueError('Make compatible with masking')
pred_out['final_rigids'] = model_out['final_rigids']
pred_out['rigids_update'] = model_out['rigids_update']
if self._model_conf.dgram_prediction:
pred_out['dgram'] = model_out['dgram']
return pred_out
| blt2114/twisted_diffusion_sampler | protein_exp/model/reverse_se3_diffusion.py | reverse_se3_diffusion.py | py | 10,866 | python | en | code | 11 | github-code | 36 |
40858069336 | import matplotlib.pyplot as plt
# 模拟导航路径数据
path = [(0, 0), (1, 1), (2, 3), (3, 4), (4, 2)]
# 初始化绘图
fig, ax = plt.subplots()
ax.set_xlim(-1, 5)
ax.set_ylim(-1, 5)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_title('Navigation Path')
# 绘制导航路径
x = [point[0] for point in path]
y = [point[1] for point in path]
ax.plot(x, y, 'b-')
# 初始化用户位置
user_position = (0, 0)
user_position_plot, = ax.plot(user_position[0], user_position[1], 'ro') # 用户位置的红点
# 更新用户位置
def update_user_position(new_pos):
user_position_plot.set_data(new_pos[0], new_pos[1]) # 更新红点的坐标
plt.draw() # 重新绘制图像
# 模拟用户位置的更新
import time
for i in range(1, len(path)):
new_position = path[i]
update_user_position(new_position)
plt.pause(1) # 暂停一秒
time.sleep(1) # 等待一秒
| haiboCode233/KivyPlusAR | testcode.py | testcode.py | py | 897 | python | zh | code | 0 | github-code | 36 |
16927982669 | def merge_sort(src):
"""
:type src: List(int)
:rtype: None
"""
if src:
return __helper(src, 0, len(src))
else:
return None
def __helper(src, lo, hi):
if lo + 1 >= hi:
return src[lo:hi]
else:
mid = (lo + hi) / 2
left = __helper(src, lo, mid)
right = __helper(src, mid, hi)
ret = []
l_p = r_p = 0
while l_p < len(left) and r_p < len(right):
if left[l_p] < right[r_p]:
ret.append(left[l_p])
l_p += 1
else:
ret.append(right[r_p])
r_p += 1
if l_p != len(left):
ret.extend(left[l_p:])
elif r_p != len(right):
ret.extend(right[r_p:])
return ret
| YorkShen/CLRS | Python/sort/merge_sort.py | merge_sort.py | py | 780 | python | en | code | 0 | github-code | 36 |
11263851417 | import sqlite3
from flask import g
from app.app import app
from .model import Objective, User
DATABASE = "data.db"
def create_tables():
with app.app_context():
db = get_db()
cursor = db.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS users (
user_id text primary key,
name text,
email text
)'''
)
cursor.execute('''CREATE TABLE IF NOT EXISTS objectives (
id integer primary key,
user_id text,
name text,
initial_date text,
final_date text,
initial_investment text,
recurring_investment text,
goal_value text,
foreign KEY(user_id) REFERENCES users(user_id)
)'''
)
db.commit()
db.close()
def add_objective(objective: Objective):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""INSERT INTO objectives VALUES (null,
'{objective.user_id}',
'{objective.name}',
'{objective.initial_date}',
'{objective.final_date}',
'{objective.initial_investment}',
'{objective.recurring_investment}',
'{objective.goal_value}')"""
cursor.execute(command)
db.commit()
db.close()
def update_objective(objective: Objective):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""UPDATE objectives SET
name = '{objective.name}',
initial_date = '{objective.initial_date}',
final_date = '{objective.final_date}',
initial_investment = '{objective.initial_investment}',
recurring_investment = '{objective.recurring_investment}',
goal_value = '{objective.goal_value}'
WHERE id = '{objective.id}'"""
cursor.execute(command)
db.commit()
db.close()
def add_user(users: User):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""INSERT INTO users VALUES ('{users.user_id}', '{users.name}', '{users.email}')"""
cursor.execute(command)
db.commit()
db.close()
def update_user(user: User):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""UPDATE users SET
name = '{user.name}',
email = '{user.email}'
WHERE id = '{user.user_id}'"""
cursor.execute(command)
db.commit()
db.close()
return cursor.rowcount > 0
def get_user(user_id):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""SELECT * FROM users WHERE user_id = '{user_id}'"""
cursor.execute(command)
user = cursor.fetchone()
db.close()
if user is None:
raise FileNotFoundError
return User(user[0], user[1], user[2])
def get_all_users():
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""SELECT * FROM users"""
cursor.execute(command)
users = cursor.fetchall()
db.close()
return map(lambda user: User(user[0], user[1], user[2]), users)
def delete_user(user_id):
with app.app_context():
db = get_db()
cursor = db.cursor()
command = f"""DELETE FROM users WHERE user_id = '{user_id}'"""
cursor.execute(command)
db.commit()
db.close()
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
| brunotsantos1997/robson-api | app/data/database.py | database.py | py | 4,413 | python | en | code | 1 | github-code | 36 |
72244550505 | import json
# Load the mapping from inscriptions.json
with open("inscriptions.json", "r") as json_file:
data = json.load(json_file)
# Create a mapping from "Goosinals #" to "id"
mapping = {}
for entry in data:
number = int(entry["meta"]["name"].split("#")[1].strip())
mapping[number] = entry["id"]
# Process output.txt and replace numbers with ids
with open("output.txt", "r") as file:
lines = file.readlines()
with open("mapped_output.txt", "w") as file:
for line in lines:
number = int(line.strip()) # Assuming each line has only a number
file.write(mapping[number] + "\n")
| jokie88/goosinal_mosaic | map_goosinalnumber_to_hash.py | map_goosinalnumber_to_hash.py | py | 617 | python | en | code | 1 | github-code | 36 |
74160148903 | '''
Pay attention to the situation when the characters are all 'S' or 'O', but in a wrong sequence just like the test example
'''
import sys
def marsExploration(s):
# Complete this function
num = 0
length = len(s)
n = length // 3
start = 0
end = start + 3
for i in range(n):
if end > length:
break
sub_string = s[start:end]
if sub_string[0] != 'S':
num += 1
if sub_string[1] != 'O':
num += 1
if sub_string[2] != 'S':
num += 1
start, end = end, end + 3
return num
s = 'SOSOOSOSOSOSOSSOSOSOSOSOSOS'
result = marsExploration(s)
print(result) | CodingProgrammer/HackerRank_Python | Mars_Exploration.py | Mars_Exploration.py | py | 667 | python | en | code | 0 | github-code | 36 |
40243581716 | from django.conf import settings
from django.db import models
import logging
import requests
log = logging.getLogger('genoome.twentythree.models')
class Token23(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, unique=True)
access_token = models.TextField()
refresh_token = models.TextField()
scope = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
class ClientError(Exception):
pass
def _api_get(self, url):
headers = {
'Authorization': 'Bearer %s' % self.access_token,
}
url = "https://api.23andme.com%s" % (url,)
response = requests.get(url, headers=headers)
if response.status_code != 200:
# https://api.23andme.com/docs/errors/
log.warning('23andme error response: %s\nurl:%s', response.text, url)
raise self.ClientError
return response.json()
@classmethod
def get_by_code(klass, user, code):
# https://api.23andme.com/docs/authentication/
# curl https://api.23andme.com/token/
# -d client_id=xxx \
# -d client_secret=yyy \
# -d grant_type=authorization_code \
# -d code=zzz \
# -d "redirect_uri=https://localhost:5000/receive_code/"
# -d "scope=basic%20rs3094315"
post_data = {
'client_id': settings.CLIENT_ID23,
'client_secret': settings.CLIENT_SECRET23,
'scope': 'basic genomes',
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': settings.COMEBACK_URL23,
}
response = requests.post('https://api.23andme.com/token/', data=post_data, timeout=30.00, verify=True)
if response.status_code != 200:
log.error('Problem fetching token %s %s', response.status_code, response.text)
raise klass.ClientError
data = response.json()
initial = {
'access_token': data['access_token'],
'refresh_token': data['refresh_token'],
'scope': data['scope'],
}
instance, created = klass.objects.get_or_create(user=user, defaults=initial)
if not created:
log.warning('Updating initial token for %s', user)
for key in initial:
setattr(instance, key, initial[key])
instance.save()
log.debug('Token for %s ready!', user)
return instance
def refresh(self):
post_data = {
'client_id': settings.CLIENT_ID23,
'client_secret': settings.CLIENT_SECRET23,
'scope': self.scope,
'refresh_token': self.refresh_token,
'grant_type': 'refresh_token',
'redirect_uri': settings.COMEBACK_URL23,
}
response = requests.post('https://api.23andme.com/token/', data=post_data, timeout=30.00, verify=True)
if response.status_code != 200:
log.error('Problem refreshing token %s %s', response.status_code, response.text)
raise self.ClientError
data = response.json()
self.access_token = data['access_token']
self.refresh_token = data['refresh_token']
self.scope = data['scope']
self.save()
def get_genome(self, profile_id23):
# GET /1/genomes/profile_id/?unfiltered=...
# curl https://api.23andme.com/1/genomes/c44.../ -H "..."
# https://api.23andme.com/res/txt/snps.b4e00fe1db50.data
# scope required: genomes
data = self._api_get('/1/genomes/%s/' % (profile_id23,))
return data['genome']
def get_profiles(self):
# GET /1/user/
# # JSON response:
#{
# "id": "a42e94634e3f7683",
# "profiles": [
# {
# "genotyped": true,
# "id": "c4480ba411939067"
# }, ...
# ]
#}
# scope required: basic
data = self._api_get('/1/user/')
return data['profiles']
class CeleryTask23(models.Model):
STATUS_CHOICES = (
('new', 'new'),
('fetching', 'fetching genome'),
('parsing', 'parsing genome'),
('error', 'error'),
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, unique=True)
chosen_profile = models.TextField()
fetch_task_id = models.TextField()
analyze_order = models.ForeignKey('disease.AnalyzeDataOrder', null=True)
process_task_id = models.TextField(null=True)
status = models.TextField(choices=STATUS_CHOICES, default='new')
| jiivan/genoomy | genoome/twentythree/models.py | models.py | py | 4,655 | python | en | code | 0 | github-code | 36 |
11359117771 | import sys
sys.stdin = open('알파벳.txt')
def dfs(x, y, cnt):
global max_c
if cnt > max_c:
max_c = cnt
check.append(data[x][y])
for k in range(4):
nx = x + dx[k]
ny = y + dy[k]
if nx < 0 or nx >= R or ny < 0 or ny >= C: continue
if data[nx][ny] in check: continue
cnt += 1
dfs(nx, ny, cnt)
cnt -= 1
check.pop()
R, C = map(int, input().split())
data = [list(map(str, input())) for _ in range(R)]
visited = [[0]*C for _ in range(R)]
max_c = -9876654332
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
for i in range(C):
visited = [[0] * C for _ in range(R)]
check = []
dfs(0, i, 0)
print(max_c) | Jade-KR/TIL | 04_algo/study/Mine/알파벳.py | 알파벳.py | py | 690 | python | en | code | 0 | github-code | 36 |
40243457981 | import torch.nn as nn
import torch
import torchvision
import cv2
import time
import numpy as np
import os
YOEO_CLASSES = (
"shark",
"coral",
"fish",
"turtle",
"manta ray",
)
def preproc(img, input_size, swap=(2, 0, 1)):
if len(img.shape) == 3:
padded_img = np.ones((input_size[0], input_size[1], 3), dtype=np.uint8) * 114
else:
padded_img = np.ones(input_size, dtype=np.uint8) * 114
r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.uint8)
padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
padded_img = padded_img.transpose(swap)
padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
return padded_img, r
class ValTransform:
"""
Defines the transformations that should be applied to test PIL image
for input into the network
dimension -> tensorize -> color adj
Arguments:
resize (int): input dimension to SSD
rgb_means ((int,int,int)): average RGB of the dataset
(104,117,123)
swap ((int,int,int)): final order of channels
Returns:
transform (transform) : callable transform to be applied to test/val
data
"""
def __init__(self, swap=(2, 0, 1), legacy=False):
self.swap = swap
self.legacy = legacy
# assume input is cv2 img for now
def __call__(self, img, res, input_size):
img, _ = preproc(img, input_size, self.swap)
if self.legacy:
img = img[::-1, :, :].copy()
img /= 255.0
img -= np.array([0.485, 0.456, 0.406]).reshape(3, 1, 1)
img /= np.array([0.229, 0.224, 0.225]).reshape(3, 1, 1)
return img, np.zeros((1, 5))
def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
for i in range(len(boxes)):
box = boxes[i]
cls_id = int(cls_ids[i])
score = scores[i]
if score < conf:
continue
x0 = int(box[0])
y0 = int(box[1])
x1 = int(box[2])
y1 = int(box[3])
color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
text = "{}:{:.1f}%".format(class_names[cls_id], score * 100)
txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]
cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
cv2.rectangle(
img,
(x0, y0 + 1),
(x0 + txt_size[0] + 1, y0 + int(1.5 * txt_size[1])),
txt_bk_color,
-1,
)
cv2.putText(
img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1
)
return img
_COLORS = (
np.array(
[
0.000,
0.000,
1.000,
0.000,
0.447,
0.880,
0.929,
0.694,
0.125,
0.466,
0.674,
0.188,
0.494,
0.184,
0.556,
0.301,
0.745,
0.933,
0.635,
0.078,
0.184,
0.300,
0.300,
0.300,
0.600,
0.600,
0.600,
1.000,
0.000,
0.000,
1.000,
0.500,
0.000,
0.749,
0.749,
0.000,
0.000,
1.000,
0.000,
0.850,
0.325,
0.098,
0.667,
0.000,
1.000,
0.333,
0.333,
0.000,
0.333,
0.667,
0.000,
0.333,
1.000,
0.000,
0.667,
0.333,
0.000,
0.667,
0.667,
0.000,
0.667,
1.000,
0.000,
1.000,
0.333,
0.000,
1.000,
0.667,
0.000,
1.000,
1.000,
0.000,
0.000,
0.333,
0.500,
0.000,
0.667,
0.500,
0.000,
1.000,
0.500,
0.333,
0.000,
0.500,
0.333,
0.333,
0.500,
0.333,
0.667,
0.500,
0.333,
1.000,
0.500,
0.667,
0.000,
0.500,
0.667,
0.333,
0.500,
0.667,
0.667,
0.500,
0.667,
1.000,
0.500,
1.000,
0.000,
0.500,
1.000,
0.333,
0.500,
1.000,
0.667,
0.500,
1.000,
1.000,
0.500,
0.000,
0.333,
1.000,
0.000,
0.667,
1.000,
0.000,
1.000,
1.000,
0.333,
0.000,
1.000,
0.333,
0.333,
1.000,
0.333,
0.667,
1.000,
0.333,
1.000,
1.000,
0.667,
0.000,
1.000,
0.667,
0.333,
1.000,
0.667,
0.667,
1.000,
0.667,
1.000,
1.000,
1.000,
0.000,
1.000,
1.000,
0.333,
1.000,
1.000,
0.667,
1.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.167,
0.000,
0.000,
0.333,
0.000,
0.000,
0.500,
0.000,
0.000,
0.667,
0.000,
0.000,
0.833,
0.000,
0.000,
1.000,
0.000,
0.000,
0.000,
0.143,
0.143,
0.143,
0.286,
0.286,
0.286,
0.429,
0.429,
0.429,
0.571,
0.571,
0.571,
0.714,
0.714,
0.714,
0.857,
0.857,
0.857,
0.000,
0.447,
0.741,
0.314,
0.717,
0.741,
0.50,
0.5,
0,
]
)
.astype(np.float32)
.reshape(-1, 3)
)
def postprocess(
prediction, num_classes, conf_thre=0.7, nms_thre=0.45, class_agnostic=False
):
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
output = [None for _ in range(len(prediction))]
for i, image_pred in enumerate(prediction):
# If none are remaining => process next image
if not image_pred.size(0):
continue
# Get score and class with highest confidence
class_conf, class_pred = torch.max(
image_pred[:, 5 : 5 + num_classes], 1, keepdim=True
)
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= conf_thre).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
detections = torch.cat((image_pred[:, :5], class_conf, class_pred.float()), 1)
detections = detections[conf_mask]
if not detections.size(0):
continue
if class_agnostic:
nms_out_index = torchvision.ops.nms(
detections[:, :4],
detections[:, 4] * detections[:, 5],
nms_thre,
)
else:
nms_out_index = torchvision.ops.batched_nms(
detections[:, :4],
detections[:, 4] * detections[:, 5],
detections[:, 6],
nms_thre,
)
detections = detections[nms_out_index]
if output[i] is None:
output[i] = detections
else:
output[i] = torch.cat((output[i], detections))
return output
class Predictor(object):
def __init__(
self,
model,
num_classes,
confthre,
nmsthre,
test_size,
cls_names=YOEO_CLASSES,
):
self.model = model
self.cls_names = cls_names
self.num_classes = num_classes
self.confthre = confthre
self.nmsthre = nmsthre
self.test_size = test_size
self.preproc = ValTransform(legacy=False)
def inference(self, img):
img_info = {"id": 0}
if isinstance(img, str):
img_info["file_name"] = os.path.basename(img)
img = cv2.imread(img)
else:
img_info["file_name"] = None
height, width = img.shape[:2]
img_info["height"] = height
img_info["width"] = width
img_info["raw_img"] = img
ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])
img_info["ratio"] = ratio
img, _ = self.preproc(img, None, self.test_size)
img = torch.from_numpy(img).unsqueeze(0)
img = img.float()
# print(img.shape)
with torch.no_grad():
t0 = time.time()
outputs = self.model(img)
# if self.decoder is not None:
# outputs = self.decoder(outputs, dtype=outputs.type())
outputs = postprocess(
outputs,
self.num_classes,
self.confthre,
self.nmsthre,
class_agnostic=True,
)
return outputs, img_info
def visual(self, output, img_info, cls_conf=0.35):
ratio = img_info["ratio"]
img = img_info["raw_img"]
if output is None:
return img, (torch.empty(0), torch.empty(0), torch.empty(0))
output = output.cpu()
bboxes = output[:, 0:4]
# preprocessing: resize
bboxes /= ratio
cls = output[:, 6]
scores = output[:, 4] * output[:, 5]
vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)
return vis_res, (bboxes, cls, scores)
def video_predict(
video_file,
out_path,
model,
num_classes,
confthre,
nmsthre,
test_size,
YOEO_CLASSES,
ifps,
verbose=False,
):
predictor = Predictor(
model, num_classes, confthre, nmsthre, test_size, YOEO_CLASSES
)
cap = cv2.VideoCapture(video_file)
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
origi_shape = (width, height)
fps = round(cap.get(cv2.CAP_PROP_FPS))
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if verbose:
print(num_frames, "frames detected!")
bbox_class_score = []
vid_writer = cv2.VideoWriter(
out_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height))
)
index = 0
start_time = time.time()
while True:
success, img = cap.read()
index += 1
if success:
if index % (fps / ifps) == 0: # inference optimization
frame_start_time = time.time()
outputs, img_info = predictor.inference(img.copy())
result_frame, results = predictor.visual(
outputs[0], img_info, predictor.confthre
)
vid_writer.write(result_frame)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord("q") or ch == ord("Q"):
break
if verbose:
print(
"--- Frame inferred in %0.2f seconds ---"
% (time.time() - frame_start_time)
)
bbox_class_score.append(results)
else:
vid_writer.write(img)
if index > num_frames:
break
print("--- Completed in %0.2f seconds ---" % (time.time() - start_time))
return (
bbox_class_score,
origi_shape,
fps,
num_frames,
)
| teyang-lau/you-only-edit-once | src/utils/yolox_process.py | yolox_process.py | py | 13,535 | python | en | code | 6 | github-code | 36 |
22204331425 | from ..service.client_service import ClientService
from flask_restx import Resource, Namespace, fields
from flask import jsonify, request
client_service = ClientService()
api = Namespace('Cliente', 'Operações relacionadas aos clientes da loja')
clients_fields = api.model('Cliente', {
'name': fields.String,
'cpf': fields.String
})
@api.route("/<int:id>")
class Client(Resource):
def get(self, id):
try:
client = client_service.get_client(id)
return jsonify({"data": client.serialize()})
except Exception as e:
return jsonify({'data': 'Cliente não disponível, {}'.format(str(e))})
@api.doc(body=clients_fields)
def put(self, id):
json = request.get_json(force=True)
try:
name = json['name']
cpf = json['cpf']
status = client_service.update_client(id, name, cpf)
if status:
return jsonify({'data': 'Cliente atualizado'})
else:
return jsonify({'data': 'Cliente não pôde ser atualizado'})
except:
return jsonify({'data': 'Cliente não pôde ser atualizado, campo necessário não foi enviado.'})
def delete(self, id):
status = client_service.delete_client(id)
if status:
return jsonify({'data': 'Cliente deletado'})
else:
return jsonify({'data': 'Cliente não pôde ser deletado'})
@api.route("")
class ClientList(Resource):
def get(self):
clients = client_service.get_clients()
return jsonify({'data': clients})
@api.doc(body=clients_fields)
def post(self):
try:
json = request.get_json(force=True)
name = json['name']
cpf = json['cpf']
client_service.insert_client(str(name), str(cpf))
return jsonify({'data': 'Cliente inserido com sucesso'})
except Exception as e:
print(str(e))
return jsonify({'data': 'Cliente não pôde ser inserido, {}'.format(str(e))}) | anaplb3/loja-api | app/main/controller/client_controller.py | client_controller.py | py | 2,076 | python | pt | code | 0 | github-code | 36 |
43906527977 | """
Process command line arguments and/or load configuration file
mostly used by the test scripts
"""
import argparse
import sys
import os.path
from typing import Union
import yaml
def do_args():
"""
@brief { function_description }
@return { description_of_the_return_value }
"""
# Parse command line arguments and modify config
parser = argparse.ArgumentParser(
prog='pyspectrumscale.py',
description='Python Spectrum Scale Management API tools'
)
# Command line arguments
parser.add_argument(
"-v",
"--verbose",
dest='verbose',
help="Increase output to stderr and stdout",
action="store_true"
)
parser.add_argument(
"-q",
"--quiet",
dest='quiet',
help="Reduce output to stderr and stdout",
action="store_true"
)
parser.add_argument(
"-d",
"--dry_run",
dest='dryrun',
help="Do a dry run, no changes written to Spectrum Scale or GPFS",
action="store_true"
)
parser.add_argument(
"-f",
"--file",
default='pyspectrumsscale.conf.yaml',
dest='file',
help="Specify a configuration file, default is pyspectrumsscale.conf.yaml",
)
parser.add_argument(
"--filesystem",
default=None,
nargs='+',
dest='filesystem',
help="Specify a scale filesystem",
)
parser.add_argument(
"--fileset",
default=None,
nargs='+',
dest='fileset',
help="Specify a scale fileset, requires a filesystem",
)
parser.add_argument(
"--path",
default=None,
dest='path',
help="Specify a scale filesystem, requires a filesystem",
)
parser.add_argument(
"--parent",
default=None,
dest='parent',
help="Specify a scale fileset parent",
)
parser.add_argument(
"--comment",
default=None,
dest='comment',
help="Specify a scale fileset comment",
)
parser.add_argument(
'-s',
'--server',
default=None,
type=str,
dest='server',
help="Hostname of Spectrum Scale Management server"
)
parser.add_argument(
'-u',
'--user',
default=None,
type=str,
dest='user',
help="The username used to connect to the Spectrum Scale Management server"
)
parser.add_argument(
'-p',
'--password',
default=None,
type=str,
dest='password',
help="The password used to conenct to the Spectrum Scale Management server"
)
parser.add_argument(
'--port',
default=None,
type=str,
dest='port',
help="The password used to conenct to the Spectrum Scale Management server"
)
parser.add_argument(
'--version',
default=None,
type=str,
dest='version',
help="The Spectrum Scale Management server API version"
)
parser.add_argument(
'--verify_ssl',
default=None,
type=bool,
dest='verify_ssl',
help=(
"If true the SSL certificate of the"
" Spectrum Scale Management server will be verified"
)
)
parser.add_argument(
'--verify_warnings',
default=None,
type=bool,
dest='verify_warnings',
help=(
"If false warnings about the SSL state of "
"the Spectrum Scale Management server will be silenced"
)
)
parser.add_argument(
'--verify_method',
default=None,
type=Union[bool, str],
dest='verify_method',
help=(
"The method used to validate the SSL state of "
"the Spectrum Scale Management server"
)
)
# Positional commands
parser.add_argument(
dest='command',
help='Command help',
default=None,
nargs='?',
type=str,
choices=[
'dumpconfig',
'connectiontest'
]
)
return parser.parse_args()
# Create the CONFIG to be imported elsewhere
# Set defaults
CONFIG = {
'scaleserver': {
'host': 'scaleserver.example.org',
'user': 'username',
'password': None,
'port': 443,
'version': 'v2',
'verify_ssl': True,
'verify_method': True,
'verify_warnings': True
},
}
ARGS = do_args()
# Override configuration defaults with values from the config file
if os.path.isfile(ARGS.file):
with open(ARGS.file, 'r') as configfile:
CONFIG.update(yaml.load(configfile))
# Override configuration loaded from file with command line arguments
if ARGS.server:
CONFIG['scaleserver']['host'] = ARGS.server
if ARGS.user:
CONFIG['scaleserver']['user'] = ARGS.user
if ARGS.password:
CONFIG['scaleserver']['password'] = ARGS.password
if ARGS.port:
CONFIG['scaleserver']['port'] = ARGS.port
if ARGS.version:
CONFIG['scaleserver']['version'] = ARGS.version
# This one can be bool or str values
if ARGS.verify_method is not None:
CONFIG['scaleserver']['verify_method'] = ARGS.verify_method
if ARGS.verify_ssl is not None:
CONFIG['scaleserver']['verify_ssl'] = ARGS.verify_ssl
if ARGS.verify_warnings is not None:
CONFIG['scaleserver']['verify_warnings'] = ARGS.verify_warnings
# If there's no config file, write one
if not os.path.isfile(ARGS.file):
print(
"The configuration file %s was missing,"
" wrote default configuration to file" %
ARGS.file
)
with open(ARGS.file, 'w') as configfile:
yaml.dump(CONFIG, configfile, default_flow_style=False)
sys.exit(0)
# Set state from command line
CONFIG['command'] = ARGS.command
CONFIG['dryrun'] = ARGS.dryrun
CONFIG['filesystem'] = ARGS.filesystem
CONFIG['fileset'] = ARGS.fileset
CONFIG['path'] = ARGS.path
CONFIG['parent'] = ARGS.parent
CONFIG['comment'] = ARGS.comment
| Aethylred/pyspectrumscale | pyspectrumscale/configuration/__init__.py | __init__.py | py | 6,045 | python | en | code | 0 | github-code | 36 |
26089552298 | import numpy as np
import math
import scipy.signal as juan
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['KaiTi']#黑体:SimHei 宋体:SimSun 楷体KaiTi 微软雅黑体:Microsoft YaHei
plt.rcParams['axes.unicode_minus'] = False#这两用于写汉字
n1 = np.arange(0,32,1)
dom = [True if (i>=8 and i<=23) else False for i in n1] #使用列表解析的方法
s=0*(n1<=7)+(0.7**n1)*dom+0*(n1>=24)#信号的表示
noise=np.random.normal(0, 0.004, len(n1))
x=s+noise#将均值为0,方差为0.004的噪声与信号相加
h1=(0.5**(15-n1))*(n1<=15)#为了便于对照,我们将几个滤波器长度都设成一样
h2=(0.9**(15-n1))*(n1<=15)
h3=0*(n1<=7)+(0.7**(31-n1))*dom+0*(n1>=24)
def convolve(h):#两函数进行卷积
y=juan.convolve(x,h/(math.sqrt(sum(h**2))),mode='full')
return y
y1=convolve(h1)
y2=convolve(h2)
y3=convolve(h3)
fig1,(ax1,ax2)=plt.subplots(2,1)
ax1.stem(s,use_line_collection='True',label='原始信号')
ax2.stem(x,use_line_collection='True',label='加噪信号')
fig2,(ax3,ax4,ax5)=plt.subplots(3,1)
ax3.stem(y1,use_line_collection='True',label='h1滤波')
ax4.stem(y2,use_line_collection='True',label='h2滤波')
ax5.stem(y3,use_line_collection='True',label='匹配滤波')
ax1.legend()#写图例
ax2.legend(loc="upper right")
ax3.legend()
ax4.legend()
ax5.legend()
plt.show()
| Mr-Da-Yang/Python_learning | 2019vacational_project/matplotlib/xinhaojiance_02.py | xinhaojiance_02.py | py | 1,391 | python | en | code | 0 | github-code | 36 |
73445915623 | import IPython
from IPython.utils.path import get_ipython_dir
from IPython.html.utils import url_path_join as ujoin
from IPython.html.base.handlers import IPythonHandler, json_errors
from tornado import web
import json
# Handler for the /new page. Will render page using the
# wizard.html page
class New_PageHandler(IPythonHandler):
"""Render the create distributed project interface """
@web.authenticated
def get(self):
self.write(self.render_template('wizard.html',
base_url = self.base_url,
page_title="New Distributed Project"
)
)
# Handler for the /master page. Will render page using the
# master.html page
class Master_PageHandler(IPythonHandler):
"""Render the create distributed project interface """
@web.authenticated
def get(self):
self.write(self.render_template('master.html',
base_url = self.base_url,
page_title="Manage Distributed Projects"
)
)
| Feldman-Michael/masterthesis | home/ipython/.local/share/jupyter/nbextensions/ma/server/services/ipy_html_distproject.py | ipy_html_distproject.py | py | 996 | python | en | code | 1 | github-code | 36 |
14997087223 | import typing
from typing import Any, Callable, List, Tuple, Union
import IPython.display as display
import cv2
import numpy as np
import os, sys
from PIL import Image
from .abc_interpreter import Interpreter
from ..data_processor.readers import preprocess_image, read_image, restore_image, preprocess_inputs
from ..data_processor.visualizer import visualize_heatmap
class ScoreCAMInterpreter(Interpreter):
"""
Score CAM Interpreter.
More details regarding the Score CAM method can be found in the original paper:
https://arxiv.org/abs/1910.01279
"""
def __init__(self,
paddle_model,
trained_model_path,
use_cuda=True,
model_input_shape=[3, 224, 224]) -> None:
"""
Initialize the GradCAMInterpreter.
Args:
paddle_model (callable): A user-defined function that gives access to model predictions.
It takes the following arguments:
- data: Data inputs.
and outputs predictions. See the example at the end of ``interpret()``.
trained_model_path (str): The pretrained model directory.
use_cuda (bool, optional): Whether or not to use cuda. Default: True
model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]
"""
Interpreter.__init__(self)
self.paddle_model = paddle_model
self.trained_model_path = trained_model_path
self.use_cuda = use_cuda
self.model_input_shape = model_input_shape
self.paddle_prepared = False
def interpret(self,
inputs,
target_layer_name,
labels=None,
visual=True,
save_path=None):
"""
Main function of the interpreter.
Args:
inputs (str or list of strs or numpy.ndarray): The input image filepath or a list of filepaths or numpy array of read images.
target_layer_name (str): The target layer to calculate gradients.
labels (list or tuple or numpy.ndarray, optional): The target labels to analyze. The number of labels should be equal to the number of images. If None, the most likely label for each image will be used. Default: None
visual (bool, optional): Whether or not to visualize the processed image. Default: True
save_path (str or list of strs or None, optional): The filepath(s) to save the processed image(s). If None, the image will not be saved. Default: None
:return: interpretations/heatmap for each image
:rtype: numpy.ndarray
Example::
import interpretdl as it
def paddle_model(image_input):
import paddle.fluid as fluid
class_num = 1000
model = ResNet50()
logits = model.net(input=image_input, class_dim=class_num)
probs = fluid.layers.softmax(logits, axis=-1)
return probs
scorecam = it.ScoreCAMInterpreter(paddle_model,
"assets/ResNet50_pretrained", True)
scorecam.interpret(
'assets/catdog.png',
'res5c.add.output.5.tmp_0',
label=None,
visual=True,
save_path='assets/scorecam_test.jpg')
"""
imgs, data, save_path = preprocess_inputs(inputs, save_path,
self.model_input_shape)
b, c, h, w = data.shape
self.target_layer_name = target_layer_name
if not self.paddle_prepared:
self._paddle_prepare()
if labels is None:
_, probs = self.predict_fn(data)
labels = np.argmax(probs, axis=1)
bsz = len(imgs)
labels = np.array(labels).reshape((bsz, 1))
feature_map, _ = self.predict_fn(data)
interpretations = np.zeros((b, h, w))
for i in range(feature_map.shape[1]):
feature_channel = feature_map[:, i, :, :]
feature_channel = np.concatenate([
np.expand_dims(cv2.resize(f, (h, w)), 0)
for f in feature_channel
])
norm_feature_channel = np.array(
[(f - f.min()) / (f.max() - f.min())
for f in feature_channel]).reshape((b, 1, h, w))
_, probs = self.predict_fn(data * norm_feature_channel)
scores = [p[labels[i]] for i, p in enumerate(probs)]
interpretations += feature_channel * np.array(scores).reshape((
b, ) + (1, ) * (interpretations.ndim - 1))
interpretations = np.maximum(interpretations, 0)
interpretations_min, interpretations_max = interpretations.min(
), interpretations.max()
if interpretations_min == interpretations_max:
return None
interpretations = (interpretations - interpretations_min) / (
interpretations_max - interpretations_min)
interpretations = np.array([(interp - interp.min()) /
(interp.max() - interp.min())
for interp in interpretations])
for i in range(b):
visualize_heatmap(interpretations[i], imgs[i], visual,
save_path[i])
return interpretations
def _paddle_prepare(self, predict_fn=None):
if predict_fn is None:
import paddle.fluid as fluid
startup_prog = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
data_op = fluid.data(
name='data',
shape=[None] + self.model_input_shape,
dtype='float32')
probs = self.paddle_model(data_op)
if isinstance(probs, tuple):
probs = probs[0]
trainable_vars = list(main_program.list_vars())
for v in trainable_vars:
if v.name == self.target_layer_name:
conv = v
main_program = main_program.clone(for_test=True)
if self.use_cuda:
gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))
place = fluid.CUDAPlace(gpu_id)
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
fluid.io.load_persistables(exe, self.trained_model_path,
main_program)
def predict_fn(data):
feature_map, probs_out = exe.run(main_program,
feed={'data': data},
fetch_list=[conv, probs])
return feature_map, probs_out
self.predict_fn = predict_fn
self.paddle_prepared = True
| LoganCome/FedMedical | utils/InterpretDL/interpretdl/interpreter/score_cam.py | score_cam.py | py | 7,112 | python | en | code | 44 | github-code | 36 |
32350610780 | import sublime_plugin
from ..package_creator import PackageCreator
class CreatePackageCommand(sublime_plugin.WindowCommand, PackageCreator):
"""
Command to create a regular .sublime-package file
"""
def run(self):
self.show_panel()
def on_done(self, picked):
"""
Quick panel user selection handler - processes the user package
selection and create the package file
:param picked:
An integer of the 0-based package name index from the presented
list. -1 means the user cancelled.
"""
if picked == -1:
return
package_name = self.packages[picked]
package_destination = self.get_package_destination()
if self.manager.create_package(package_name, package_destination):
self.window.run_command('open_dir', {"dir":
package_destination, "file": package_name +
'.sublime-package'})
| Iristyle/ChocolateyPackages | EthanBrown.SublimeText2.UtilPackages/tools/PackageCache/Package Control/package_control/commands/create_package_command.py | create_package_command.py | py | 961 | python | en | code | 24 | github-code | 36 |
1000678117 | __author__ = 'SOROOSH'
import loadimpact
import settings
__all__ = ['ConfigurationGenerator', 'ConfigurationUploader']
class ConfigurationGenerator(object):
def __init__(self, jmx_info):
self.jmx_info = jmx_info
def generate_configuration(self, scenario):
domain = self.jmx_info.domain
if not domain.startswith("http://"):
domain = 'http://' + domain
duration = self.jmx_info.ramp_time / 60
config = {'name': self.jmx_info.test_name,
'url': domain,
'config': {
'load_schedule': [
{'users': self.jmx_info.num_of_threads, 'duration': duration}],
'tracks': [{
'clips': [{
'user_scenario_id': scenario.id, 'percent': 100
}],
'loadzone': settings.TIME_ZONE
}],
'user_type': settings.USER_TYPE
}
}
return config
class ConfigurationUploader(object):
client = loadimpact.ApiTokenClient(api_token=settings.loadimpact_api_token)
@staticmethod
def upload(data):
config = ConfigurationUploader.client.create_test_config(data)
return config
| s-soroosh/loadimpact-jmx-importer | jmx_importer/configuration.py | configuration.py | py | 1,394 | python | en | code | 0 | github-code | 36 |
23109102237 | import requests
from datetime import datetime
from bs4 import BeautifulSoup
url = 'https://www.naver.com/'
html = requests.get(url).text
soup = BeautifulSoup(html, 'html.parser')
#실시간 검색어 긁어온거 그대로
#한두개 코드 긁어서 전체적으로 긁어오려면 어떻게 써야할지 고민
# → li 태그 전체를 뽑아오게끔 손질해보자( li:nth-child(1) → li )
names = soup.select('#PM_ID_ct > div.header > div.section_navbar > div.area_hotkeyword.PM_CL_realtimeKeyword_base > div.ah_roll.PM_CL_realtimeKeyword_rolling_base > div > ul > li > a > span.ah_k')
print(names)
#태그들을 제거해보자
print(f'{datetime.now()} 기준 실시간 검색어') #fstring 기능 사용(3.6버전 이상부터)
for name in names:
print(name.text)
## 파이썬 3.0 이상 → format함수를 이용한 포매팅
## 파이썬 3.6 이상 → f-string 포매팅
# format 함수
#'{1} {0}'.format('one','two')
# f-string
# a, b = 'one', 'two'
# f'{a}, {b}' | drhee0919/TIL | Chatbot/05_naver_rank.py | 05_naver_rank.py | py | 992 | python | ko | code | 0 | github-code | 36 |
29100418117 |
"""
SORU 1:
Bir a matrisinin tüm elemanları sıfır olan
sütunlarının sayısını bulup bu bilgiyi geri döndüren Python
fonksiyonunu yazınız.
"""
import numpy as np
def sifiribul(matris):
print(matris)
yeni = np.transpose(matris)
sutun=0
for i in yeni:
say = 0
for a in i:
if a == 0:
say =say+1
if say ==len(matris):
sutun = sutun+1
if sutun != 0:
print("Sıfır olan",sutun," adet sutun vardır")
else:
print("Tum sutunları sıfır olan eleman bulunmamaktadir")
matris = np.random.randint(2,size=(5,5))
sifiribul(matris) | symydnnn/PythonExamples | 23.12.2021/soru1.py | soru1.py | py | 647 | python | tr | code | 0 | github-code | 36 |
993256569 | import asyncio
import serial_asyncio
import threading
from functools import partial
class AsyncSerialConnection(object):
def __init__(self, loop, device, port='/dev/ttyUSB0'):
coro = serial_asyncio.create_serial_connection(loop, ZiGateProtocol, port, baudrate=115200)
futur = asyncio.run_coroutine_threadsafe(coro, loop) # Requires python 3.5.1
futur.add_done_callback(partial(self.bind_transport_to_device, device))
@staticmethod
def bind_transport_to_device(device, protocol_refs):
"""
Bind device and protocol / transport once they are ready
Update the device status @ start
"""
transport = protocol_refs.result()[0]
protocol = protocol_refs.result()[1]
protocol.device = device
device.send_to_transport = transport.write
class ZiGateProtocol(asyncio.Protocol):
def __init__(self):
super().__init__()
self.transport = None
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
try:
self.device.read_data(data)
except:
ZGT_LOG.debug('ERROR')
def connection_lost(self, exc):
pass
def start_loop(loop):
loop.run_forever()
loop.close()
if __name__ == "__main__":
import logging
from pyzigate.interface import ZiGate
# Setup logging on screen, debug mode
l = logging.getLogger('zigate')
l.setLevel(logging.DEBUG)
l.addHandler(logging.StreamHandler())
# Asyncio based connection
zigate = ZiGate()
loop = asyncio.get_event_loop()
connection = AsyncSerialConnection(loop, zigate)
# Adding loop in a thread for testing purposes (i.e non blocking ipython console)
# not needed when full program is run within the event loop
t = threading.Thread(target=start_loop, args=(loop,))
t.start()
zigate.send_data('0010')
| elric91/ZiGate | examples/async_serial.py | async_serial.py | py | 1,929 | python | en | code | 18 | github-code | 36 |
9815950754 | import discord
import asyncio
import time
import sys
import os
import random
import aiohttp
useproxies = sys.argv[4]
if useproxies == 'True':
proxy_list = open("proxies.txt").read().splitlines()
proxy = random.choice(proxy_list)
con = aiohttp.ProxyConnector(proxy="http://"+proxy)
client = discord.Client(connector=con)
else:
client = discord.Client()
token = sys.argv[1]
SERVER = sys.argv[2]
tokenno = sys.argv[3]
@client.event
async def on_ready(): #the prints are commented out so it is silent and we can return to the menu. I also plan to do this for the main attacks, but i need a way to view the attacks first so we can stop them :/
#print ("Token " + str(tokenno) + " logged in!")
for channel in client.get_server(SERVER).channels:
if channel.type != discord.ChannelType.text:
continue
myperms = channel.permissions_for(client.get_server(SERVER).get_member(client.user.id))
if not myperms.send_messages:
continue
for x in range(3):
async for x in client.logs_from(channel):
channame = channel.name
if x.author.id == str(client.user.id):
await client.delete_message(x)
#print ("Token " + str(tokenno) + ": Cleaned " + channame)
await client.close()
try:
client.run(token, bot=False)
except Exception as c:
print (c)
| X-Nozi/NoziandNiggarr24Toolbox | spammer/cleanup.py | cleanup.py | py | 1,446 | python | en | code | 0 | github-code | 36 |
75129886504 | __all__ = ["Echo"]
from textwrap import dedent
from typing import Any, Dict
from ..imagecrawler import BaseImageCrawler, Image, ImageCollection, ImageCrawlerConfig, ImageCrawlerInfo
class Echo(BaseImageCrawler):
def __init__(self, *, image_uri: str) -> None:
super().__init__(image_uri=image_uri)
@classmethod
def info(cls) -> ImageCrawlerInfo:
return ImageCrawlerInfo(
description='"Finds" the same image ... again ... and again.',
long_description=dedent('''
Not an actual crawler.
More like an Parrot that is trained to repeat what you tell it to say.
''').strip(),
config={
'image_uri': 'the URI of the image to "find"',
},
# does not have an icon
)
@classmethod
def check_config(cls, config: Dict[str, Any]) -> ImageCrawlerConfig:
image_uri = config['image_uri']
if type(image_uri) is not str:
raise TypeError(f'image_uri {image_uri!r} is not str')
if len(image_uri) == 0:
raise ValueError(f'image_uri {image_uri!r} is empty')
return ImageCrawlerConfig(
image_uri=image_uri,
)
def is_exhausted(self) -> bool:
# is generic -> never exhausts
return False
def _reset(self) -> None: # pragma: no cover
pass
def _crawl(self) -> ImageCollection:
images = ImageCollection()
image_uri = self.get_config()["image_uri"]
images.add(
Image(
uri=image_uri,
source=image_uri,
is_generic=True,
this_is_a_dummy=True,
)
)
return images
| k4cg/nichtparasoup | python-package/src/nichtparasoup/imagecrawlers/echo.py | echo.py | py | 1,743 | python | en | code | 40 | github-code | 36 |
1119124359 | from typing import Iterable, Callable
import SearchSpace
from BenchmarkProblems.CombinatorialProblem import CombinatorialProblem
from Version_E.Feature import Feature
from Version_E.InterestingAlgorithms.Miner import FeatureSelector
from Version_E.MeasurableCriterion.CriterionUtilities import Balance, Extreme, All
from Version_E.MeasurableCriterion.Explainability import Explainability
from Version_E.MeasurableCriterion.ForSampling import Completeness, ExpectedFitness
from Version_E.MeasurableCriterion.GoodFitness import HighFitness, ConsistentFitness
from Version_E.MeasurableCriterion.MeasurableCriterion import MeasurableCriterion
from Version_E.PrecomputedFeatureInformation import PrecomputedFeatureInformation
from Version_E.PrecomputedPopulationInformation import PrecomputedPopulationInformation
from Version_E.Testing import Miners
def get_reference_features_for_regurgitation_sampling(problem: CombinatorialProblem,
termination_predicate: Callable,
ppi: PrecomputedPopulationInformation,
reference_miner_parameters: dict,
amount_to_return: int,
importance_of_explainability: float) -> list[Feature]:
search_criterion = Balance([
Explainability(problem),
Balance([
HighFitness(),
ConsistentFitness()],
weights = [1, 1])],
weights=[importance_of_explainability, 1 - importance_of_explainability])
selector = FeatureSelector(ppi, search_criterion)
miner = Miners.decode_miner(reference_miner_parameters,
selector=selector,
termination_predicate=termination_predicate)
mined_features = miner.get_meaningful_features(amount_to_return)
return mined_features
def regurgitation_sample(reference_features: Iterable[Feature],
termination_predicate: Callable,
original_ppi: PrecomputedPopulationInformation,
sampling_miner_parameters: dict,
amount_to_return: int) -> list[SearchSpace.Candidate]:
reference_feature_pfi = PrecomputedFeatureInformation(original_ppi, reference_features)
good_fitness = Balance([HighFitness(), ConsistentFitness()])
generation_criterion = Balance([Completeness(),
ExpectedFitness(criterion=good_fitness,
pfi=reference_feature_pfi)],
weights=[2, 1])
selector = FeatureSelector(original_ppi, generation_criterion)
sampling_miner = Miners.decode_miner(sampling_miner_parameters,
selector=selector,
termination_predicate=termination_predicate)
sampled_features = sampling_miner.get_meaningful_features(amount_to_return)
sampled_candidates = [feature.to_candidate() for feature in sampled_features
if feature.is_convertible_to_candidate()]
return sampled_candidates
| Giancarlo-Catalano/Featurer | Version_E/Sampling/RegurgitationSampler.py | RegurgitationSampler.py | py | 3,398 | python | en | code | 0 | github-code | 36 |
72225854504 | #!/usr/bin/python3
"""
Main 'BaseModel' class that defines all common
attributes/methods for other classes
"""
import uuid
import models
from datetime import datetime
class BaseModel:
""" Base class constructor method """
def __init__(self, *args, **kwargs):
""" Base class initializes the objects """
if kwargs:
for key_inside, value_inside in kwargs.items():
if key_inside == "created_at" or key_inside == "updated_at":
format = "%Y-%m-%dT%H:%M:%S.%f"
datetime_object = datetime.strptime(value_inside, format)
setattr(self, key_inside, datetime_object)
elif key_inside != "__class__":
setattr(self, key_inside, value_inside)
else:
self.id = str(uuid.uuid4())
self.created_at = datetime.now()
self.updated_at = datetime.now()
models.storage.new(self)
def __str__(self):
""" Method that returns a string representation """
return f"[{self.__class__.__name__}] ({self.id}) {self.__dict__}"
def save(self):
""" Method that update the current date and time """
self.updated_at = datetime.now()
models.storage.save()
def to_dict(self):
""" Method that returns a dictionary representation """
new_dict = dict(self.__dict__)
new_dict["__class__"] = self.__class__.__name__
new_dict["created_at"] = self.created_at.isoformat()
new_dict["updated_at"] = self.updated_at.isoformat()
return new_dict
| DevPacho/holbertonschool-AirBnB_clone | models/base_model.py | base_model.py | py | 1,607 | python | en | code | 0 | github-code | 36 |
39910309614 | #step1:
import random
suits =('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten',
'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9,
'Ten':10, 'Jack':10,
'Queen':10, 'King':10, 'Ace':11}
#step2: tạo lá bài
class Card:
def __init__(self, suit, rank):
self.suit =suit
self.rank =rank
self.value = values[rank]
def __str__(self) -> str:
return self.rank +" of " +self.suit
#test class card:
# Mycard = Card("Hearts","Ten")
# print(Mycard)
#step3: tạo bộ bài 52 lá , tạo function trộn và lấy 1 lá khỏi bộ bài
class Deck:
def __init__(self) :
self.deck=[] #khởi tạo 1 list trống
for suit in suits:
for rank in ranks:
self.deck.append(Card(suit,rank))
def __str__(self):
deck_comp='' # empty list
for card in self.deck:
deck_comp+= '\n' + card.__str__() # thêm từng lá bài vào
return 'The deck has:' +deck_comp
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
single_card = self.deck.pop()
return single_card
#test class deck
# my_deck = Deck()
# my_deck.shuffle()
# print(my_deck)
# print(my_deck.deal())
#step4: khởi tạo người chơi, function thêm bài , giá trị , theo dõi các con át, function ace=1/11
class Hand:
def __init__(self) -> None:
self.cards =[] #tạo list bài
self.value =0 # giá trị
self.aces =0 #theo dõi các con át chủ bài
def add_card(self,card):
self.cards.append(card)
self.value+= values[card.rank]
# value của class là tổng
if card.rank =='Ace':
self.aces +=1
def adjust_for_ace(self):
while self.value >21 and self.aces:
self.value-=10
self.aces -=1
#test:
# test_deck=Deck()
# test_deck.shuffle()
# test_player =Hand()
# test_player.add_card(test_deck.deal())
# test_player.add_card(test_deck.deal())
# print(test_player.value)
# for card in test_player.cards:
# print(card)
#Step 5: Create a Chips Class, tạo 1 phần tiền mặc đinh, ăn, mắt tiền cược
class Chips:
def __init__(self):
self.total = 100 # This can be set to a default value or supplied by a user input
self.bet = 0
def win_bet(self):
self.total +=self.bet
def lose_bet(self):
self.total -=self.bet
# #def __init__(self,total=100):
# self.total = total
# self.bet = 0
# cho tổng vào init để có thể ghi đè thay vì tự cho nó thay đổi
#Step 6: Write a function for taking bets, hàm nhập tiền cược, kế thừa hàm chips
def take_bet(chips):
while True:
try:
chips.bet = int(input('How many chips would you like to bet? '))
except ValueError:
print('Sorry, a bet must be an integer!')
else:
if chips.bet > chips.total:
print("Sorry, your bet can't exceed",chips.total)
else:
break
#Step 7: Write a function for taking hits, tạp hàm lấy thêm bài
def hit(deck,hand):
hand.add_card(deck.deal())
hand.adjust_for_ace()
#Step 8: Write a function prompting the Player to Hit or Stand
def hit_or_stand(deck,hand):#hàm question tools của player
global playing # to control an upcoming while loop
while True:
x = input("Would you like to Hit or Stand? Enter 'h' or 's' ")
if x[0].lower() == 'h':
hit(deck,hand) # hit() function defined above
elif x[0].lower() == 's':
print("Player stands. Dealer is playing.")
playing = False
else:
print("Sorry, please try again.")
continue
break
#Step 9: Write functions to display cards
def show_some(player,dealer):
print("\nDealer's Hand:")
print(" <card hidden>")
print('',dealer.cards[1])
print("\nPlayer's Hand:", *player.cards, sep='\n ')
def show_all(player,dealer):
print("\nDealer's Hand:", *dealer.cards, sep='\n ')
print("Dealer's Hand =",dealer.value)
print("\nPlayer's Hand:", *player.cards, sep='\n ')
print("Player's Hand =",player.value)
# *: in tất cả mục trong bộ sưu tập , sep='\n': in trên từng dongf
# ở đây dùng dấu phẩy phân tách đối tượng, muốn + thì pải nêu rõ phương thức ra vd __str__()
#Step 10: Write functions to handle end of game scenarios
def player_busts(player,dealer,chips):
print("Player busts!")
chips.lose_bet()
def player_wins(player,dealer,chips):
print("Player wins!")
chips.win_bet()
def dealer_busts(player,dealer,chips):
print("Dealer busts!")
chips.win_bet()
def dealer_wins(player,dealer,chips):
print("Dealer wins!")
chips.lose_bet()
def push(player,dealer):
print("Dealer and Player tie! It's a push.")
playing = True
while True:
# Print an opening statement
print('Welcome to BlackJack! Get as close to 21 as you can without going over!\n\
Dealer hits until she reaches 17. Aces count as 1 or 11.')
# Create & shuffle the deck, deal two cards to each player
# Create & shuffle the deck, deal two cards to each player
deck = Deck()
deck.shuffle()
player_hand = Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
dealer_hand = Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
# Set up the Player's chips
player_chips = Chips() # remember the default value is 100
# Prompt the Player for their bet
take_bet(player_chips)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
while playing: # recall this variable from our hit_or_stand function
# Prompt for Player to Hit or Stand
hit_or_stand(deck,player_hand)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
# If player's hand exceeds 21, run player_busts() and break out of loop
if player_hand.value > 21:
player_busts(player_hand,dealer_hand,player_chips)
break
# If Player hasn't busted, play Dealer's hand until Dealer reaches 17
if player_hand.value <= 21:
while dealer_hand.value < 17:
hit(deck,dealer_hand)
# Show all cards
if dealer_hand.value > 21:
dealer_busts(player_hand,dealer_hand,player_chips)
# Run different winning scenarios
elif dealer_hand.value > player_hand.value:
dealer_wins(player_hand,dealer_hand,player_chips)
elif dealer_hand.value < player_hand.value:
player_wins(player_hand,dealer_hand,player_chips)
else:
push(player_hand,dealer_hand)
# Inform Player of their chips total
print("\nPlayer's winnings stand at",player_chips.total)
# Ask to play again
new_game = input("Would you like to play another hand? Enter 'y' or 'n' ")
if new_game[0].lower()=='y':
playing=True
continue
else:
print("Thanks for playing!")
break
| ngoNhi123t/python_udemy | project2_udemy.py | project2_udemy.py | py | 7,514 | python | en | code | 0 | github-code | 36 |
35667300076 | import tensorflow as tf
from tensorflow import keras
tf.compat.v1.enable_eager_execution()
tf.executing_eagerly()
class BaseModel(tf.keras.Model):
'''
def __init__(self, n_features):
super(BaseModel, self).__init__()
self.inputs_layer = keras.layers.Input(shape=n_features)
self.layer1 = keras.layers.Dense(100, activation=tf.nn.relu, kernel_regularizer=keras.regularizers.l2(0.03))
self.layer2 = keras.layers.Dense(100, activation=tf.nn.relu, kernel_regularizer=keras.regularizers.l2(0.03))
self.layer3 = keras.layers.Dense(100, activation=tf.nn.relu, kernel_regularizer=keras.regularizers.l2(0.03))
self.outputs_layer = keras.layers.Dense(2, activation=tf.nn.softmax)
def call(self, inputs, training=False):
x = self.inputs_layer(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.outputs_layer(x)
return x
'''
def train_step(self, data):
X, y = data
with tf.GradientTape(persistent=True) as t:
y_pred = self(X, training=True)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
grads = t.gradient(loss, self.trainable_variables)
del t
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
self.compiled_metrics.update_state(y, y_pred)
return {m.name: m.result() for m in self.metrics}
class AdversarialModel(tf.keras.Model):
def __init__(self, *args, **kwargs):
super(AdversarialModel, self).__init__(*args, **kwargs)
self.explanation_loss = tf.Variable(0.)
def train_step(self, data):
alpha, targets = 0.1, [[1, 1], [15, -1]]
X, y = data
with tf.GradientTape(persistent=True) as t:
t.watch(X)
y_pred = self(X, training=True)
performance_loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
explanation_loss_entire = t.gradient(performance_loss, X)
tf.zeros_like(self.explanation_loss)
#print(explanation_loss_entire)
for target in targets:
explanation_loss_r = explanation_loss_entire[:, target[0]]
explanation_loss_r = tf.norm(explanation_loss_r, 1)
explanation_loss_r = explanation_loss_r * target[1]
self.explanation_loss.assign_add(explanation_loss_r)
#explanation_loss += explanation_loss_r
#print("explanation_loss: "+str(tf.norm(self.explanation_loss, 2)))
total_loss = performance_loss + (alpha * self.explanation_loss / X.shape[0])
grads = t.gradient(total_loss, self.trainable_variables)
del t
self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
self.compiled_metrics.update_state(y, y_pred)
return {m.name: m.result() for m in self.metrics} | takehigu26/adv_exp | my_adv_exp/models.py | models.py | py | 2,946 | python | en | code | 0 | github-code | 36 |
3829740430 | #! @PYSHEBANG@
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import sys
import os
import getopt
import time
import socket
try:
import ntp.packet
import ntp.util
import ntp.agentx_packet
ax = ntp.agentx_packet
from ntp.agentx import PacketControl
except ImportError as e:
sys.stderr.write(
"ntpsnmpd: can't find Python NTP library.\n")
sys.stderr.write("%s\n" % e)
sys.exit(1)
# TODO This is either necessary, or a different workaround is.
ntp.util.deunicode_units()
logfp = sys.stderr
nofork = False
debug = 0
defaultTimeout = 30
log = (lambda msg, msgdbg: ntp.util.dolog(logfp, msg, debug, msgdbg))
ntpRootOID = (1, 3, 6, 1, 2, 1, 197) # mib-2 . 197, aka: NTPv4-MIB
snmpTrapOID = (1, 3, 6, 1, 6, 3, 1, 1, 4, 1, 0)
snmpSysUptime = (1, 3, 6, 1, 2, 1, 1, 3, 0)
DEFHOST = "localhost"
DEFLOG = "ntpsnmpd.log"
class DataSource(ntp.agentx.MIBControl):
def __init__(self, hostname=DEFHOST, settingsFile=None, notifySpin=0.1):
# This is defined as a dict tree because it is simpler, and avoids
# certain edge cases
# OIDs are relative from ntp root
ntp.agentx.MIBControl.__init__(self, mibRoot=ntpRootOID)
# MIB node init
# block 0
self.addNode((0,)) # ntpEntNotifications
self.addNode((0, 1)) # ntpEntNotifModeChange
self.addNode((0, 2)) # ntpEntNotifStratumChange
self.addNode((0, 3)) # ntpEntNotifSyspeerChange
self.addNode((0, 4)) # ntpEntNotifAddAssociation
self.addNode((0, 5)) # ntpEntNotifRemoveAsociation
self.addNode((0, 6)) # ntpEntNotifConfigChanged
self.addNode((0, 7)) # ntpEntNotifLeapSecondAnnounced
self.addNode((0, 8)) # ntpEntNotifHeartbeat
# block 1
# block 1, 1
self.addNode((1, 1, 1, 0), # ntpNetSoftwareName utf8str
(lambda oid: self.cbr_systemInfo(oid, "name")))
self.addNode((1, 1, 2, 0), # ntpEntSoftwareVersion utf8str
(lambda oid: self.cbr_systemInfo(oid, "version")))
self.addNode((1, 1, 3, 0), # ntpEntSoftwareVendor utf8str
(lambda oid: self.cbr_systemInfo(oid, "vendor")))
self.addNode((1, 1, 4, 0), # ntpEntSystemType utf8str
(lambda oid: self.cbr_systemInfo(oid, "system")))
self.addNode((1, 1, 5, 0), # ntpEntTimeResolution uint32
self.cbr_timeResolution)
self.addNode((1, 1, 6, 0), # ntpEntTimePrecision int32
self.cbr_timePrecision)
self.addNode((1, 1, 7, 0), # ntpEntTimeDistance DisplayString
self.cbr_timeDistance)
# block 1, 2
self.addNode((1, 2, 1, 0), # ntpEntStatusCurrentMode INTEGER {...}
self.cbr_statusCurrentMode)
self.addNode((1, 2, 2, 0), # ntpEntStatusStratum NtpStratum
self.cbr_statusStratum)
self.addNode((1, 2, 3, 0), # ntpEntStatusActiveRefSourceId uint32
self.cbr_statusActiveRefSourceID)
self.addNode((1, 2, 4, 0), # ntpEntStatusActiveRefSourceName utf8str
self.cbr_statusActiveRefSourceName)
self.addNode((1, 2, 5, 0), # ntpEntStatusActiveOffset DisplayString
self.cbr_statusActiveOffset)
self.addNode((1, 2, 6, 0), # ntpEntStatusNumberOfRefSources unit32
self.cbr_statusNumRefSources)
self.addNode((1, 2, 7, 0), # ntpEntStatusDispersion DisplayString
self.cbr_statusDispersion)
self.addNode((1, 2, 8, 0), # ntpEntStatusEntityUptime TimeTicks
self.cbr_statusEntityUptime)
self.addNode((1, 2, 9, 0), # ntpEntStatusDateTime NtpDateTime
self.cbr_statusDateTime)
self.addNode((1, 2, 10, 0), # ntpEntStatusLeapSecond NtpDateTime
self.cbr_statusLeapSecond)
self.addNode((1, 2, 11, 0), # ntpEntStatusLeapSecondDirection int32
self.cbr_statusLeapSecDirection)
self.addNode((1, 2, 12, 0), # ntpEntStatusInPkts Counter32
self.cbr_statusInPkts)
self.addNode((1, 2, 13, 0), # ntpEntStatusOutPkts Counter32
self.cbr_statusOutPkts)
self.addNode((1, 2, 14, 0), # ntpEntStatusBadVersion Counter32
self.cbr_statusBadVersion)
self.addNode((1, 2, 15, 0), # ntpEntStatusProtocolError Counter32
self.cbr_statusProtocolError)
self.addNode((1, 2, 16, 0), # ntpEntStatusNotifications Counter32
self.cbr_statusNotifications)
self.addNode((1, 2, 17, 1, 1)) # ntpEntStatPktMode INTEGER {...}
self.addNode((1, 2, 17, 1, 2)) # ntpEntStatPktSent Counter32
self.addNode((1, 2, 17, 1, 3)) # ntpEntStatPktRecived Counter32
# block 1, 3
self.addNode((1, 3, 1, 1, 1), # ntpAssocId uint32 (1..99999)
dynamic=self.sub_assocID)
self.addNode((1, 3, 1, 1, 2), # ntpAssocName utf8str
dynamic=self.sub_assocName)
self.addNode((1, 3, 1, 1, 3), # ntpAssocRefId DisplayString
dynamic=self.sub_assocRefID)
self.addNode((1, 3, 1, 1, 4), # ntpAssocAddressType InetAddressType
dynamic=self.sub_assocAddrType)
self.addNode((1, 3, 1, 1, 5), # ntpAssocAddress InetAddress SIZE
dynamic=self.sub_assocAddr)
self.addNode((1, 3, 1, 1, 6), # ntpAssocOffset DisplayString
dynamic=self.sub_assocOffset)
self.addNode((1, 3, 1, 1, 7), # ntpAssocStratum NtpStratum
dynamic=self.sub_assocStratum)
self.addNode((1, 3, 1, 1, 8), # ntpAssocStatusJitter DisplayString
dynamic=self.sub_assocJitter)
self.addNode((1, 3, 1, 1, 9), # ntpAssocStatusDelay DisplayString
dynamic=self.sub_assocDelay)
self.addNode((1, 3, 1, 1, 10), # ntpAssocStatusDispersion DisplayStr
dynamic=self.sub_assocDispersion)
self.addNode((1, 3, 2, 1, 1), # ntpAssocStatInPkts Counter32
dynamic=self.sub_assocStatInPkts)
self.addNode((1, 3, 2, 1, 2), # ntpAssocStatOutPkts Counter32
dynamic=self.sub_assocStatOutPkts)
self.addNode((1, 3, 2, 1, 3), # ntpAssocStatProtocolError Counter32
dynamic=self.sub_assocStatProtoErr)
# block 1, 4
self.addNode((1, 4, 1, 0), # ntpEntHeartbeatInterval unit32
self.cbr_entHeartbeatInterval,
self.cbw_entHeartbeatInterval)
self.addNode((1, 4, 2, 0), # ntpEntNotifBits BITS {...}
self.cbr_entNotifBits,
self.cbw_entNotifBits)
# block 1, 5
self.addNode((1, 5, 1, 0), # ntpEntNotifMessage utf8str
self.cbr_entNotifMessage)
# block 2 # all compliance statements
# print(repr(self.oidTree))
# print(self.oidTree[1]["subids"][1][1][0])
self.session = ntp.packet.ControlSession()
self.hostname = hostname if hostname else DEFHOST
self.session.openhost(self.hostname)
self.settingsFilename = settingsFile
# Cache so we don't hammer ntpd, default 1 second timeout
# Timeout default pulled from a hat: we don't want it to last for
# long, just not flood ntpd with duplicatte requests during a walk.
self.cache = ntp.util.Cache(1)
self.oldValues = {} # Used by notifications to detect changes
# spinGap so we don't spam ntpd with requests during notify checks
self.notifySpinTime = notifySpin
self.lastNotifyCheck = 0
self.lastHeartbeat = 0 # Timestamp used for heartbeat notifications
self.heartbeatInterval = 0 # should save to disk
self.sentNotifications = 0
# Notify bits, they control whether the daemon sends notifications.
# these are saved to disk
self.notifyModeChange = False # 1
self.notifyStratumChange = False # 2
self.notifySyspeerChange = False # 3
self.notifyAddAssociation = False # 4
self.notifyRMAssociation = False # 5
self.notifyConfigChange = False # 6 [This is not implemented]
self.notifyLeapSecondAnnounced = False # 7
self.notifyHeartbeat = False # 8
self.misc_loadDynamicSettings()
# =============================================================
# Data read callbacks start here
# comment divider lines represent not yet implemented callbacks
# =============================================================
# Blank: notification OIDs
def cbr_systemInfo(self, oid, category=None):
if category == "name": # The product name of the running NTP
data = "NTPsec"
elif category == "version": # version string
data = ntp.util.stdversion()
elif category == "vendor": # vendor/author name
data = "Internet Civil Engineering Institute"
elif category == "system": # system / hardware info
# Extract sysname, release, machine from os.uname() tuple
uname = os.uname()
data = " ".join([uname[0], uname[2], uname[4]])
vb = ax.Varbind(ax.VALUE_OCTET_STR, oid, data)
return vb
def cbr_timeResolution(self, oid):
# Uinteger32
# Arrives in fractional milliseconds
fuzz = self.safeReadvar(0, ["fuzz"])
if fuzz is None:
return None
fuzz = fuzz["fuzz"]
# We want to emit fractions of seconds
# Yes we are flooring instead of rounding: don't want to emit a
# resolution value higher than ntpd actually produces.
if fuzz != 0:
fuzz = int(1 / fuzz)
else:
fuzz = 0
return ax.Varbind(ax.VALUE_GAUGE32, oid, fuzz)
def cbr_timePrecision(self, oid):
return self.readCallbackSkeletonSimple(oid, "precision",
ax.VALUE_INTEGER)
def cbr_timeDistance(self, oid):
# Displaystring
data = self.safeReadvar(0, ["rootdist"], raw=True)
if data is None:
return None
data = ntp.util.unitifyvar(data["rootdist"][1], "rootdist",
width=None, unitSpace=True)
return ax.Varbind(ax.VALUE_OCTET_STR, oid, data)
# Blank: ntpEntStatus
def cbr_statusCurrentMode(self, oid):
mode = self.misc_getMode()
return ax.Varbind(ax.VALUE_INTEGER, oid, mode)
def cbr_statusStratum(self, oid):
# NTPstratum
return self.readCallbackSkeletonSimple(oid, "stratum",
ax.VALUE_GAUGE32)
def cbr_statusActiveRefSourceID(self, oid):
# range of uint32
syspeer = self.misc_getSyspeerID()
return ax.Varbind(ax.VALUE_GAUGE32, oid, syspeer)
def cbr_statusActiveRefSourceName(self, oid):
# utf8
data = self.safeReadvar(0, ["peeradr"])
if data is None:
return None
data = ntp.util.canonicalize_dns(data["peeradr"])
return ax.Varbind(ax.VALUE_OCTET_STR, oid, data)
def cbr_statusActiveOffset(self, oid):
# DisplayString
data = self.safeReadvar(0, ["koffset"], raw=True)
if data is None:
return None
data = ntp.util.unitifyvar(data["koffset"][1], "koffset",
width=None, unitSpace=True)
return ax.Varbind(ax.VALUE_OCTET_STR, oid, data)
def cbr_statusNumRefSources(self, oid):
# range of uint32
try:
data = self.session.readstat()
return ax.Varbind(ax.VALUE_GAUGE32, oid, len(data))
except ntp.packet.ControlException:
return None
def cbr_statusDispersion(self, oid):
# DisplayString
data = self.safeReadvar(0, ["rootdisp"], raw=True)
if data is None:
return None
return ax.Varbind(ax.VALUE_OCTET_STR, oid, data["rootdisp"][1])
def cbr_statusEntityUptime(self, oid):
# TimeTicks
# What the spec claims:
# The uptime of the NTP entity, (i.e., the time since ntpd was
# (re-)initialized not sysUptime!). The time is represented in
# hundreds of seconds since Jan 1, 1970 (00:00:00.000) UTC.
#
# First problem: TimeTicks represents hundred*ths* of seconds, could
# easily be a typo.
# Second problem: snmpwalk will happily give you a display of
# how long a period of time a value is, such as uptime since start.
# That is the opposite of what the spec claims.
#
# I am abandoning the spec, and going with what makes a lick of sense
uptime = self.safeReadvar(0, ["ss_reset"])
if uptime is None:
return None
uptime = uptime["ss_reset"] * 100
return ax.Varbind(ax.VALUE_TIME_TICKS, oid, uptime)
def cbr_statusDateTime(self, oid):
# NtpDateTime
data = self.safeReadvar(0, ["reftime"])
if data is None:
return None
txt = data["reftime"]
value = ntp.util.deformatNTPTime(txt)
return ax.Varbind(ax.VALUE_OCTET_STR, oid, value)
def cbr_statusLeapSecond(self, oid): # I am not confident in this yet
# NtpDateTime
DAY = 86400
fmt = "%.8x%.8x"
data = self.safeReadvar(0, ["reftime"])
hasleap = self.safeReadvar(0, ["leap"])
if (data is None) or (hasleap is None):
return None
data = data["reftime"]
hasleap = hasleap["leap"]
if hasleap in (1, 2):
seconds = int(data.split(".")[0], 0)
days = seconds // DAY
scheduled = (days * DAY) + (DAY - 1) # 23:59:59 of $CURRENT_DAY
formatted = fmt % (scheduled, 0)
else:
formatted = fmt % (0, 0)
value = ntp.util.hexstr2octets(formatted)
return ax.Varbind(ax.VALUE_OCTET_STR, oid, value)
def cbr_statusLeapSecDirection(self, oid):
# range of int32
leap = self.safeReadvar(0, ["leap"])
if leap is None:
return None
leap = leap["leap"]
if leap == 1:
pass # leap 1 == forward
elif leap == 2:
leap = -1 # leap 2 == backward
else:
leap = 0 # leap 0 or 3 == no change
return ax.Varbind(ax.VALUE_INTEGER, oid, leap)
def cbr_statusInPkts(self, oid):
return self.readCallbackSkeletonSimple(oid, "io_received",
ax.VALUE_COUNTER32)
def cbr_statusOutPkts(self, oid):
return self.readCallbackSkeletonSimple(oid, "io_sent",
ax.VALUE_COUNTER32)
def cbr_statusBadVersion(self, oid):
return self.readCallbackSkeletonSimple(oid, "ss_oldver",
ax.VALUE_COUNTER32)
def cbr_statusProtocolError(self, oid):
data = self.safeReadvar(0, ["ss_badformat", "ss_badauth"])
if data is None:
return None
protoerr = 0
for key in data.keys():
protoerr += data[key]
return ax.Varbind(ax.VALUE_COUNTER32, oid, protoerr)
def cbr_statusNotifications(self, oid):
return ax.Varbind(ax.VALUE_COUNTER32, oid, self.sentNotifications)
##############################
# == Dynamics ==
# assocID
# assocName
# assocRefID
# assocAddrType
# assocAddr
# assocOffset
# assocStratum
# assocJitter
# assocDelay
# assocDispersion
# assocInPackets
# assocOutPackets
# assocProtocolErrors
#########################
def cbr_entHeartbeatInterval(self, oid):
# uint32
return ax.Varbind(ax.VALUE_GAUGE32, oid, self.heartbeatInterval)
def cbr_entNotifBits(self, oid):
# BITS
data = ax.bools2Bits((False, # notUsed(0)
self.notifyModeChange,
self.notifyStratumChange,
self.notifySyspeerChange,
self.notifyAddAssociation,
self.notifyRMAssociation,
self.notifyConfigChange,
self.notifyLeapSecondAnnounced,
self.notifyHeartbeat))
return ax.Varbind(ax.VALUE_OCTET_STR, oid, data)
##########################
def cbr_entNotifMessage(self, oid):
# utf8str
return ax.Varbind(ax.VALUE_OCTET_STR, oid, "no event")
#########################
# =====================================
# Data write callbacks
# Returns an error value (or noError)
# Must check that the value is correct for the bind, this does not mean
# the type: the master agent handles that
# Actions: test, undo, commit, cleanup
# =====================================
def cbw_entHeartbeatInterval(self, action, varbind, oldData=None):
if action == "test":
return ax.ERR_NOERROR
elif action == "commit":
self.heartbeatInterval = varbind.payload
self.misc_storeDynamicSettings()
return ax.ERR_NOERROR
elif action == "undo":
self.heartbeatInterval = oldData
self.misc_storeDynamicSettings()
return ax.ERR_NOERROR
elif action == "cleanup":
pass
def cbw_entNotifBits(self, action, varbind, oldData=None):
if action == "test":
return ax.ERR_NOERROR
elif action == "commit":
(self.notifyModeChange,
self.notifyStratumChange,
self.notifySyspeerChange,
self.notifyAddAssociation,
self.notifyRMAssociation,
self.notifyConfigChange,
self.notifyLeapSecondAnnounced,
self.notifyHeartbeat) = ax.bits2Bools(varbind.payload, 8)
self.misc_storeDynamicSettings()
return ax.ERR_NOERROR
elif action == "undo":
(self.notifyModeChange,
self.notifyStratumChange,
self.notifySyspeerChange,
self.notifyAddAssociation,
self.notifyRMAssociation,
self.notifyConfigChange,
self.notifyLeapSecondAnnounced,
self.notifyHeartbeat) = ax.bits2Bools(oldData, 8)
self.misc_storeDynamicSettings()
return ax.ERR_NOERROR
elif action == "cleanup":
pass
# ========================================================================
# Dynamic tree generator callbacks
#
# The structure of these callbacks is somewhat complicated because they
# share code that is potentially finicky.
#
# The dynamicCallbackSkeleton() method handles the construction of the
# MIB tree, and the placement of the handler() within it. It also provides
# some useful data to the handler() via the readCallback() layer.
# ========================================================================
# Packet Mode Table
# These are left as stubs for now. Information is lacking on where the
# data should come from.
def sub_statPktMode(self):
pass
def sub_statPktSent(self):
pass
def sub_statPktRecv(self):
pass
# Association Table
def sub_assocID(self):
def handler(oid, associd):
return ax.Varbind(ax.VALUE_GAUGE32, oid, associd)
return self.dynamicCallbackSkeleton(handler)
def sub_assocName(self):
return self.dynamicCallbackPeerdata("srcadr", True,
ax.VALUE_OCTET_STR)
def sub_assocRefID(self):
def handler(oid, associd):
pdata = self.misc_getPeerData()
if pdata is None:
return None
# elaborate code in util.py indicates this may not be stable
try:
refid = pdata[associd]["refid"][1]
except IndexError:
refid = ""
return ax.Varbind(ax.VALUE_OCTET_STR, oid, refid)
return self.dynamicCallbackSkeleton(handler)
def sub_assocAddrType(self):
def handler(oid, associd):
pdata = self.misc_getPeerData()
if pdata is None:
return None
srcadr = pdata[associd]["srcadr"][1]
try:
socklen = len(socket.getaddrinfo(srcadr, None)[0][-1])
except socket.gaierror:
socklen = None
if socklen == 2: # ipv4
addrtype = 1
elif socklen == 4: # ipv6
addrtype = 2
else:
# there is also ipv4z and ipv6z..... don't know how to
# detect those yet. Or if I even need to.
addrtype = 0 # is this ok? or should it return a NULL?
return ax.Varbind(ax.VALUE_INTEGER, oid, addrtype)
return self.dynamicCallbackSkeleton(handler)
def sub_assocAddr(self):
def handler(oid, associd):
pdata = self.misc_getPeerData()
if pdata is None:
return None
srcadr = pdata[associd]["srcadr"][1]
# WARNING: I am only guessing that this is correct
# Discover what type of address we have
try:
sockinfo = socket.getaddrinfo(srcadr, None)[0][-1]
addr = sockinfo[0]
ipv6 = True if len(sockinfo) == 4 else False
except socket.gaierror:
addr = None # how to handle?
ipv6 = None
# Convert address string to octets
srcadr = []
if not ipv6:
pieces = addr.split(".")
for piece in pieces:
try:
srcadr.append(int(piece)) # feed it a list of ints
except ValueError:
# Have gotten piece == "" before. Skip over that.
# Still try to return data because it is potential
# debugging information.
continue
elif ipv6:
pieces = addr.split(":")
for piece in pieces:
srcadr.append(ntp.util.hexstr2octets(piece))
srcadr = "".join(srcadr) # feed it an octet string
# The octet string encoder can handle either chars or 0-255
# ints. We use both of those options.
return ax.Varbind(ax.VALUE_OCTET_STR, oid, srcadr)
return self.dynamicCallbackSkeleton(handler)
def sub_assocOffset(self):
def handler(oid, associd):
pdata = self.misc_getPeerData()
if pdata is None:
return None
offset = pdata[associd]["offset"][1]
offset = ntp.util.unitifyvar(offset, "offset", width=None,
unitSpace=True)
return ax.Varbind(ax.VALUE_OCTET_STR, oid, offset)
return self.dynamicCallbackSkeleton(handler)
def sub_assocStratum(self):
return self.dynamicCallbackPeerdata("stratum", False,
ax.VALUE_GAUGE32)
def sub_assocJitter(self):
return self.dynamicCallbackPeerdata("jitter", True,
ax.VALUE_OCTET_STR)
def sub_assocDelay(self):
return self.dynamicCallbackPeerdata("delay", True,
ax.VALUE_OCTET_STR)
def sub_assocDispersion(self):
return self.dynamicCallbackPeerdata("rootdisp", True,
ax.VALUE_OCTET_STR)
def sub_assocStatInPkts(self):
def handler(oid, associd):
inpkts = self.safeReadvar(associd, ["received"])
if inpkts is None:
return None
inpkts = inpkts["received"]
return ax.Varbind(ax.VALUE_COUNTER32, oid, inpkts)
return self.dynamicCallbackSkeleton(handler)
def sub_assocStatOutPkts(self):
def handler(oid, associd):
outpkts = self.safeReadvar(associd, ["sent"])
if outpkts is None:
return None
outpkts = outpkts["sent"]
return ax.Varbind(ax.VALUE_COUNTER32, oid, outpkts)
return self.dynamicCallbackSkeleton(handler)
def sub_assocStatProtoErr(self):
def handler(oid, associd):
pvars = self.safeReadvar(associd, ["badauth", "bogusorg",
"seldisp", "selbroken"])
if pvars is None:
return None
protoerr = 0
for key in pvars.keys():
protoerr += pvars[key]
return ax.Varbind(ax.VALUE_COUNTER32, oid, protoerr)
return self.dynamicCallbackSkeleton(handler)
# =====================================
# Notification handlers
# =====================================
def checkNotifications(self, control):
currentTime = time.time()
if (currentTime - self.lastNotifyCheck) < self.notifySpinTime:
return
self.lastNotifyCheck = currentTime
if self.notifyModeChange:
self.doNotifyModeChange(control)
if self.notifyStratumChange:
self.doNotifyStratumChange(control)
if self.notifySyspeerChange:
self.doNotifySyspeerChange(control)
# Both add and remove have to look at the same data, don't want them
# stepping on each other. Therefore the functions are combined.
if self.notifyAddAssociation and self.notifyRMAssociation:
self.doNotifyChangeAssociation(control, "both")
elif self.notifyAddAssociation:
self.doNotifyChangeAssociation(control, "add")
elif self.notifyRMAssociation:
self.doNotifyChangeAssociation(control, "rm")
if self.notifyConfigChange:
self.doNotifyConfigChange(control)
if self.notifyLeapSecondAnnounced:
self.doNotifyLeapSecondAnnounced(control)
if self.notifyHeartbeat:
self.doNotifyHeartbeat(control)
def doNotifyModeChange(self, control):
oldMode = self.oldValues.get("mode")
newMode = self.misc_getMode() # connection failure handled by method
if oldMode is None:
self.oldValues["mode"] = newMode
return
elif oldMode != newMode:
self.oldValues["mode"] = newMode
vl = [ax.Varbind(ax.VALUE_OID, snmpTrapOID,
ax.OID(ntpRootOID + (0, 1))),
ax.Varbind(ax.VALUE_INTEGER, ntpRootOID + (1, 2, 1),
newMode)]
control.sendNotify(vl)
self.sentNotifications += 1
def doNotifyStratumChange(self, control):
oldStratum = self.oldValues.get("stratum")
newStratum = self.safeReadvar(0, ["stratum"])
if newStratum is None:
return # couldn't read
newStratum = newStratum["stratum"]
if oldStratum is None:
self.oldValues["stratum"] = newStratum
return
elif oldStratum != newStratum:
self.oldValues["stratum"] = newStratum
datetime = self.safeReadvar(0, ["reftime"])
if datetime is None:
datetime = ""
else:
datetime = ntp.util.deformatNTPTime(datetime["reftime"])
vl = [ax.Varbind(ax.VALUE_OID, snmpTrapOID,
ax.OID(ntpRootOID + (0, 2))),
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 2, 9),
datetime),
ax.Varbind(ax.VALUE_GAUGE32, ntpRootOID + (1, 2, 2),
newStratum),
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 5, 1),
"Stratum changed")] # Uh... what goes here?
control.sendNotify(vl)
self.sentNotifications += 1
def doNotifySyspeerChange(self, control):
oldSyspeer = self.oldValues.get("syspeer")
newSyspeer = self.safeReadvar(0, ["peeradr"])
if newSyspeer is None:
return # couldn't read
newSyspeer = newSyspeer["peeradr"]
if oldSyspeer is None:
self.oldValues["syspeer"] = newSyspeer
return
elif oldSyspeer != newSyspeer:
self.oldValues["syspeer"] = newSyspeer
datetime = self.safeReadvar(0, ["reftime"])
if datetime is None:
datetime = ""
else:
datetime = ntp.util.deformatNTPTime(datetime["reftime"])
syspeer = self.misc_getSyspeerID()
vl = [ax.Varbind(ax.VALUE_OID, snmpTrapOID,
ax.OID(ntpRootOID + (0, 3))),
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 2, 9),
datetime),
ax.Varbind(ax.VALUE_GAUGE32, ntpRootOID + (1, 2, 3),
syspeer),
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 5, 1),
"SysPeer changed")] # Uh... what goes here?
control.sendNotify(vl)
self.sentNotifications += 1
def doNotifyChangeAssociation(self, control, which):
# Add and remove are combined because they use the same data source
# and it would be easy to have them stepping on each other.
changes = self.misc_getAssocListChanges()
if changes is None:
return
datetime = self.safeReadvar(0, ["reftime"])
if datetime is None:
datetime = ""
else:
datetime = ntp.util.deformatNTPTime(datetime["reftime"])
adds, rms = changes
if which in ("add", "both"):
for name in adds:
vl = [ax.Varbind(ax.VALUE_OID, snmpTrapOID,
ax.OID(ntpRootOID + (0, 4))), # Add
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 2, 9),
datetime),
ax.Varbind(ax.VALUE_OCTET_STR,
ntpRootOID + (1, 3, 1, 1, 2),
name),
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 5, 1),
"Association added")]
control.sendNotify(vl)
self.sentNotifications += 1
if which in ("rm", "both"):
for name in rms:
vl = [ax.Varbind(ax.VALUE_OID, snmpTrapOID,
ax.OID(ntpRootOID + (0, 5))), # Remove
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 2, 9),
datetime),
ax.Varbind(ax.VALUE_OCTET_STR,
ntpRootOID + (1, 3, 1, 1, 2),
name),
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 5, 1),
"Association removed")]
control.sendNotify(vl)
self.sentNotifications += 1
def doNotifyConfigChange(self, control):
# This left unimplemented because the MIB wants something we can't
# and/or shouldn't provide
pass
def doNotifyLeapSecondAnnounced(self, control):
oldLeap = self.oldValues.get("leap")
newLeap = self.safeReadvar(0, ["leap"])
if newLeap is None:
return
newLeap = newLeap["leap"]
if oldLeap is None:
self.oldValues["leap"] = newLeap
return
if oldLeap != newLeap:
self.oldValues["leap"] = newLeap
if (oldLeap in (0, 3)) and (newLeap in (1, 2)):
# changed noleap or unsync to a leap announcement
datetime = self.safeReadvar(0, ["reftime"])
if datetime is None:
datetime = ""
else:
datetime = ntp.util.deformatNTPTime(datetime["reftime"])
vl = [ax.Varbind(ax.VALUE_OID, snmpTrapOID,
ax.OID(ntpRootOID + (0, 7))),
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 2, 9),
datetime),
ax.Varbind(ax.VALUE_OCTET_STR, ntpRootOID + (1, 5, 1),
"Leap second announced")]
control.sendNotify(vl)
self.sentNotifications += 1
def doNotifyHeartbeat(self, control): # TODO: check if ntpd running?
vl = [ax.Varbind(ax.VALUE_OID, snmpTrapOID,
ax.OID(ntpRootOID + (0, 8))),
ax.Varbind(ax.VALUE_GAUGE32, ntpRootOID + (0, 1, 4, 1),
self.heartbeatInterval)]
if self.heartbeatInterval == 0: # interval == 0 means send once
self.notifyHeartbeat = False
control.sendNotify(vl)
self.sentNotifications += 1
else:
current = ntp.util.monoclock()
if (current - self.lastHeartbeat) > self.heartbeatInterval:
self.lastHeartbeat = current
control.sendNotify(vl)
self.sentNotifications += 1
# =====================================
# Misc data helpers (not part of the MIB proper)
# =====================================
def misc_loadDynamicSettings(self):
if self.settingsFilename is None:
return
def boolify(d, k):
return True if d[k][0][1] == "True" else False
optionList = ("notify-mode-change", "notify-stratum-change",
"notify-syspeer-change", "notify-add-association",
"notify-rm-association", "notify-leap-announced",
"notify-heartbeat", "heartbeat-interval")
settings = loadSettings(self.settingsFilename, optionList)
if settings is None:
return
for key in settings.keys():
if key == "notify-mode-change":
self.notifyModeChange = boolify(settings, key)
elif key == "notify-stratum-change":
self.notifyStratumChange = boolify(settings, key)
elif key == "notify-syspeer-change":
self.notifySyspeerChange = boolify(settings, key)
elif key == "notify-add-association":
self.notifyAddAssociation = boolify(settings, key)
elif key == "notify-rm-association":
self.notifyRMAssociation = boolify(settings, key)
elif key == "notify-leap-announced":
self.notifyLeapSecondAnnounced = boolify(settings, key)
elif key == "notify-heartbeat":
self.notifyHeartbeat = boolify(settings, key)
elif key == "heartbeat-interval":
self.heartbeatInterval = settings[key][0][1]
def misc_storeDynamicSettings(self):
if self.settingsFilename is None:
return
settings = {}
settings["notify-mode-change"] = str(self.notifyModeChange)
settings["notify-stratum-change"] = str(self.notifyStratumChange)
settings["notify-syspeer-change"] = str(self.notifySyspeerChange)
settings["notify-add-association"] = str(self.notifyAddAssociation)
settings["notify-rm-association"] = str(self.notifyRMAssociation)
settings["notify-leap-announced"] = str(self.notifyLeapSecondAnnounced)
settings["notify-heartbeat"] = str(self.notifyHeartbeat)
settings["heartbeat-interval"] = str(self.heartbeatInterval)
storeSettings(self.settingsFilename, settings)
def misc_getAssocListChanges(self):
# We need to keep the names, because those won't be available
# after they have been removed.
oldAssoc = self.oldValues.get("assoc")
newAssoc = {}
# Yes, these are cached, for a very short time
pdata = self.misc_getPeerData()
if pdata is None:
return
ids = self.misc_getPeerIDs()
if ids is None:
return
for associd in ids:
addr = pdata[associd]["srcadr"][1]
name = ntp.util.canonicalize_dns(addr)
newAssoc[associd] = name
if oldAssoc is None:
self.oldValues["assoc"] = newAssoc
return
elif oldAssoc != newAssoc:
oldIDs = oldAssoc.keys()
newIDs = newAssoc.keys()
adds = []
rms = []
for associd in oldIDs + newIDs:
if associd not in newIDs: # removed
rms.append(oldAssoc[associd])
if associd not in oldIDs: # added
adds.append(newAssoc[associd])
return (adds, rms)
return
def misc_getMode(self): # FIXME: not fully implemented
try:
# Don't care about the data, this is a ploy to get the rstatus
self.session.readvar(0, ["stratum"])
except ntp.packet.ControlException as e:
if e.message == ntp.packet.SERR_SOCKET:
# Can't connect, ntpd probably not running
return 1
else:
raise e
rstatus = self.session.rstatus # a ploy to get the system status
source = ntp.control.CTL_SYS_SOURCE(rstatus)
if source == ntp.control.CTL_SST_TS_UNSPEC:
mode = 2 # Not yet synced
elif False:
mode = 3 # No reference configured
elif source == ntp.control.CTL_SST_TS_LOCAL:
mode = 4 # Distributing local clock (low accuracy)
elif source in (ntp.control.CTL_SST_TS_ATOM,
ntp.control.CTL_SST_TS_LF,
ntp.control.CTL_SST_TS_HF,
ntp.control.CTL_SST_TS_UHF):
# I am not sure if I should be including the radios in this
mode = 5 # Synced to local refclock
elif source == ntp.control.CTL_SST_TS_NTP:
# Should this include "other"? That covers things like chrony...
mode = 6 # Sync to remote NTP
else:
mode = 99 # Unknown
return mode
def misc_getSyspeerID(self):
peers = self.misc_getPeerData()
syspeer = 0
for associd in peers.keys():
rstatus = peers[associd]["peerstatus"]
if (ntp.control.CTL_PEER_STATVAL(rstatus) & 0x7) == \
ntp.control.CTL_PST_SEL_SYSPEER:
syspeer = associd
break
return syspeer
def safeReadvar(self, associd, variables=None, raw=False):
# Use this when we want to catch packet errors, but don't care
# about what they are
try:
return self.session.readvar(associd, varlist=variables, raw=raw)
except ntp.packet.ControlException:
return None
def dynamicCallbackPeerdata(self, variable, raw, valueType):
rawindex = 1 if raw else 0
def handler(oid, associd):
pdata = self.misc_getPeerData()
if pdata is None:
return None
value = pdata[associd][variable][rawindex]
return ax.Varbind(valueType, oid, value)
return self.dynamicCallbackSkeleton(handler)
def dynamicCallbackSkeleton(self, handler):
# Build a dynamic MIB tree, installing the provided handler in it
def readCallback(oid):
# This function assumes that it is a leaf node and that the
# last number in the OID is the index.
index = oid.subids[-1] # if called properly this works (Ha!)
index -= 1 # SNMP reserves index 0, effectively 1-based lists
associd = self.misc_getPeerIDs()[index]
return handler(oid, associd)
subs = {}
associds = self.misc_getPeerIDs() # need the peer count
for i in range(len(associds)):
subs[i+1] = {"reader": readCallback}
return subs
def readCallbackSkeletonSimple(self, oid, varname, dataType):
# Used for entries that just need a simple variable retrevial
# but do not need any processing.
data = self.safeReadvar(0, [varname])
if data is None:
return None
else:
return ax.Varbind(dataType, oid, data[varname])
def misc_getPeerIDs(self):
peerids = self.cache.get("peerids")
if peerids is None:
try:
peerids = [x.associd for x in self.session.readstat()]
except ntp.packet.ControlException:
peerids = []
peerids.sort()
self.cache.set("peerids", peerids)
return peerids
def misc_getPeerData(self):
peerdata = self.cache.get("peerdata")
if peerdata is None:
associds = self.misc_getPeerIDs()
peerdata = {}
for aid in associds:
try:
pdata = self.safeReadvar(aid, raw=True)
pdata["peerstatus"] = self.session.rstatus
except IOError:
continue
peerdata[aid] = pdata
self.cache.set("peerdata", peerdata)
return peerdata
def connect(address):
try:
if type(address) is str:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(address)
else:
host, port = address[0], address[1]
af, _, _, _, _ = socket.getaddrinfo(host, port)[0]
sock = socket.socket(af, socket.SOCK_STREAM)
sock.connect((host, port))
except socket.error as msg:
log("Connection to %s failure: %s" % (repr(address), repr(msg)), 1)
sys.exit(1)
log("connected to master agent at " + repr(address), 3)
return sock
def mainloop(snmpSocket, reconnectionAddr, host=None):
log("initing loop", 3)
dbase = DataSource(host, "/var/ntpsntpd/notify.conf")
while True: # Loop reconnection attempts
control = PacketControl(snmpSocket, dbase, logfp=logfp, debug=debug)
control.loopCallback = dbase.checkNotifications
control.initNewSession()
if not control.mainloop(True): # disconnected
snmpSocket.close()
snmpSocket = connect(reconnectionAddr)
log("disconnected from master, attempting reconnect", 2)
else: # Something else happened
break
def daemonize(runfunc, *runArgs):
pid = os.fork()
if pid < 0:
log("Forking error " + str(pid), 1)
sys.exit(pid)
elif pid > 0: # We are the parent
log("Daemonization success, child pid: " + str(pid), 3)
sys.exit(0)
# We must be the child
os.umask(0)
sid = os.setsid()
# chdir should be here, change to what? root?
global logfp
if logfp == sys.stderr:
logfp = None
sys.stdin.close()
sys.stdin = None
sys.stdout.close()
sys.stdout = None
sys.stderr.close()
sys.stderr = None
runfunc(*runArgs)
def loadSettings(filename, optionList):
log("Loading config file: %s" % filename, 3)
if not os.path.isfile(filename):
return None
options = {}
with open(filename) as f:
data = f.read()
lines = ntp.util.parseConf(data)
for line in lines:
isQuote, token = line[0]
if token in optionList:
options[token] = line[1:]
return options
def storeSettings(filename, settings):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
data = []
for key in settings.keys():
data.append("%s %s\n" % (key, settings[key]))
data = "".join(data)
with open(filename, "w") as f:
f.write(data)
usage = """
USAGE: ntpsnmpd [-n] [ntp host]
Flg Arg Option-Name Description
-n no no-fork Do not fork and daemonize.
-x Adr master-addr Specify address for connecting to the master agent
- default /var/agentx/master
-d no debug-level Increase output debug message level
- may appear multiple times
-l Str logfile Logs debug messages to the provided filename
-D Int set-debug-level Set the output debug message level
- may appear multiple times
-h no help Print a usage message.
-V no version Output version information and exit
"""
if __name__ == "__main__":
bin_ver = "ntpsec-@NTPSEC_VERSION_EXTENDED@"
ntp.util.stdversioncheck(bin_ver)
try:
(options, arguments) = getopt.getopt(
sys.argv[1:],
"nx:dD:Vhl:c:",
["no-fork", "master-address=", "debug-level", "set-debug-level=",
"version", "help", "logfile=", "configfile="])
except getopt.GetoptError as e:
sys.stderr.write("%s\n" % e)
sys.stderr.write(usage)
raise SystemExit(1)
masterAddr = "/var/agentx/master"
logfile = DEFLOG
hostname = DEFHOST
# Check for non-default config-file
conffile = "/etc/ntpsnmpd.conf"
for (switch, val) in options:
if switch in ("-c", "--configfile"):
conffile = val
break
# Load configuration file
conf = loadSettings(conffile,
("master-addr", "logfile", "loglevel", "ntp-addr"))
if conf is not None:
for key in conf.keys():
if key == "master-addr": # Address of the SNMP master daemon
val = conf[key][0][1]
if ":" in val:
host, port = val.split(":")
port = int(port)
masterAddr = (host, port)
else:
masterAddr = val
elif key == "logfile":
logfile = conf[key][0][1]
elif key == "ntp-addr": # Address of the NTP daemon
hostname = conf[key][0][1]
elif key == "loglevel":
errmsg = "Error: loglevel parameter '%s' not a number\n"
debug = conf[key][0][1]
fileLogging = False
for (switch, val) in options:
if switch in ("-n", "--no-fork"):
nofork = True
elif switch in ("-x", "--master-addr"):
if ":" in val:
host, port = val.split(":")
port = int(port)
masterAddr = (host, port)
else:
masterAddr = val
elif switch in ("-d", "--debug-level"):
debug += 1
elif switch in ("-D", "--set-debug-level"):
errmsg = "Error: -D parameter '%s' not a number\n"
debug = ntp.util.safeargcast(val, int, errmsg, usage)
elif switch in ("-V", "--version"):
print("ntpsnmpd %s" % ntp.util.stdversion())
raise SystemExit(0)
elif switch in ("-h", "--help"):
print(usage)
raise SystemExit(0)
elif switch in ("-l", "--logfile"):
logfile = val
fileLogging = True
if not nofork:
fileLogging = True
if fileLogging:
if logfp != sys.stderr:
logfp.close()
logfp = open(logfile, "a", 1) # 1 => line buffered
hostname = arguments[0] if arguments else DEFHOST
# Connect here so it can always report a connection error
sock = connect(masterAddr)
if nofork:
mainloop(sock, hostname)
else:
daemonize(mainloop, sock, hostname)
| ntpsec/ntpsec | ntpclients/ntpsnmpd.py | ntpsnmpd.py | py | 47,973 | python | en | code | 225 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.