hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cf296e88d03c596024b49c000c2c21fe1354248f | 3,991 | py | Python | main.py | prjavidi/C- | 76e7c7720a921e48726ad652cfc0f1000f9a2b3e | [
"MIT"
] | null | null | null | main.py | prjavidi/C- | 76e7c7720a921e48726ad652cfc0f1000f9a2b3e | [
"MIT"
] | null | null | null | main.py | prjavidi/C- | 76e7c7720a921e48726ad652cfc0f1000f9a2b3e | [
"MIT"
] | null | null | null | '''chane the below arguments to check different tasks'''
TRAINSIZE = 5000
TESTSIZE = 500
'''To check TASK 3 put Normalize=1 otherwise 0'''
Nomalize = 1
learningRate = 0.01
threshold = 85
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
@np.vectorize
def sigmoid(x):
return 1 / (1 + np.e ** -x)
def normalize(data):
for i in range(len(data)):
data[i] = data[i] / 255
return data
'''Chaneg the below numbers to pick how many samples you need'''
trainData = np.loadtxt("mnist_train.csv", delimiter=",", max_rows=TRAINSIZE)
testData = np.loadtxt("mnist_test.csv", delimiter=",", max_rows=TESTSIZE)
print(trainData.shape)
print(testData.shape)
# Step 0: Normalization to have 0 and 1
trainImg = np.asfarray(trainData[:, 1:])
testImg = np.asfarray(testData[:, 1:])
# to normalize dataset with binary function
if Nomalize == 0:
trainImg[trainImg < threshold] = 0
trainImg[trainImg >= threshold] = 1
testImg[testImg < threshold] = 0
testImg[testImg >= threshold] = 1
else:
# to normalize dataset in range [0,1]
trainImg = normalize(trainImg)
testImg = normalize(testImg)
train_labels = np.asfarray(trainData[:, :1])
test_labels = np.asfarray(testData[:, :1])
no_of_different_labels = 10
lr = np.arange(10)
train_labels_one_hot = (lr == train_labels).astype(np.float)
test_labels_one_hot = (lr == test_labels).astype(np.float)
# Step 1: Initialize parameters and weights
inputNodes = 784
outputNodes = 10
epoch = 1
w = np.zeros((outputNodes, inputNodes + 1))
w[:, :] = 0.1
# Step 2: Apply input x from training set
MSE = []
while epoch < 50:
mse = []
for idx in range(len(trainImg)):
x = trainImg[idx]
d = train_labels_one_hot[idx]
V = np.dot(w[:, 1:], x) + w[:, 0]
Y = np.zeros(outputNodes)
# step 4: applying activation function
for i in range(outputNodes):
if Nomalize == 0:
if V[i] >= 0:
Y[i] = 1
else:
Y[i] = 0
else:
Y[i] = sigmoid(V[i])
e = d - Y
# e= np.array([e])
w[:, 1:] += (learningRate * (e[:,None] * x[None,:]))
w[:, 0] += learningRate * e
# print("MSE: ", float(MSE))
mse.append(np.sum((d - Y) ** 2))
MSE.append(np.sum(mse) / 2)
epoch += 1
if MSE[-1] < 0.001:
break
# print("epoch: ", epoch,", MSE:", MSE)
fig, ax = plt.subplots()
numberArrayTestIncorrect = np.zeros(10)
numberArrayTest = np.zeros(10)
ax.plot(MSE)
ax.set(xlabel='Iteration', ylabel='MSE', title='Learning curve for learning rate=' + str(learningRate))
ax.grid()
plt.show()
# testing process:
correct = []
incorrect = []
for idx in range(len(testImg)):
x = testImg[idx][np.newaxis]
x = x.T
checkIdx = int(test_labels[idx][0])
d = test_labels_one_hot[idx]
V = np.dot(w[:, 1:], x) + w[:, 0][np.newaxis].T
Y = np.zeros(outputNodes)
for i in range(outputNodes):
if V[i] >= 0:
Y[i] = 1
else:
Y[i] = 0
if np.array_equal(d, Y):
correct.append(1)
numberArrayTest[checkIdx] += 1
else:
incorrect.append(1)
numberArrayTestIncorrect[checkIdx] += 1
print("Correct=", sum(correct), ", incorrect: ", sum(incorrect), ", accuracy: ", sum(correct)/TESTSIZE)
print(numberArrayTest)
print(numberArrayTestIncorrect)
N = 10
fig, ax = plt.subplots()
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = ax.bar(ind, numberArrayTest, width, bottom=0, yerr=(0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
p2 = ax.bar(ind + width, numberArrayTestIncorrect, width, bottom=0, yerr=(0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
ax.set_title('Correct VS incorrect identification')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'))
ax.legend((p1[0], p2[0]), ('Correct', 'Incorrect'))
ax.autoscale_view()
plt.show()
| 27.14966 | 104 | 0.606865 | 576 | 3,991 | 4.154514 | 0.300347 | 0.015044 | 0.020059 | 0.023402 | 0.088592 | 0.075219 | 0.055997 | 0.055997 | 0.055997 | 0.055997 | 0 | 0.041476 | 0.232774 | 3,991 | 146 | 105 | 27.335616 | 0.740039 | 0.117013 | 0 | 0.198113 | 0 | 0 | 0.050088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018868 | false | 0 | 0.028302 | 0.009434 | 0.066038 | 0.04717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf29a3a3726068b1f04b6f5636cece5035884b63 | 465 | py | Python | stackoverflow.py | kenenbek/MultiAgent | 3276d192416503bb1705a3a190649c8bcf3dd630 | [
"MIT"
] | null | null | null | stackoverflow.py | kenenbek/MultiAgent | 3276d192416503bb1705a3a190649c8bcf3dd630 | [
"MIT"
] | null | null | null | stackoverflow.py | kenenbek/MultiAgent | 3276d192416503bb1705a3a190649c8bcf3dd630 | [
"MIT"
] | null | null | null | from scipy.stats import chi2
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize
import pickle
objects = []
with open("priceZZZ", "rb") as openfile:
while True:
try:
objects.append(pickle.load(openfile))
except EOFError:
break
for i in range(len(objects)):
plt.plot(np.linspace(0, len(objects[i][0]), len(objects[i][0])), objects[i][0], label=i)
plt.legend()
plt.show() | 27.352941 | 93 | 0.636559 | 67 | 465 | 4.41791 | 0.567164 | 0.101351 | 0.091216 | 0.081081 | 0.084459 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014085 | 0.236559 | 465 | 17 | 94 | 27.352941 | 0.819718 | 0 | 0 | 0 | 0 | 0 | 0.022222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
cf2bb8c6d785b39a5517d20fc3e9d6b495f276b6 | 1,080 | py | Python | 11_python-data-science-toolbox-(part-2)/2-list-comprehensions-and-generators/10_changing-the-output-in-generator-expressions.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | 5 | 2021-02-03T14:36:58.000Z | 2022-01-01T10:29:26.000Z | 11_python-data-science-toolbox-(part-2)/2-list-comprehensions-and-generators/10_changing-the-output-in-generator-expressions.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | null | null | null | 11_python-data-science-toolbox-(part-2)/2-list-comprehensions-and-generators/10_changing-the-output-in-generator-expressions.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | 3 | 2021-02-08T00:31:16.000Z | 2022-03-17T13:52:32.000Z | '''
10 - Changing the output in generator expressions
Great! At this point, you already know how to write a basic generator expression.
In this exercise, you will push this idea a little further by adding to the output
expression of a generator expression. Because generator expressions and list
comprehensions are so alike in syntax, this should be a familiar task for you!
You are given a list of strings lannister and, using a generator expression, create
a generator object that you will iterate over to print its values.
Instructions:
- Write a generator expression that will generate the lengths of each string in
lannister. Use person as the iterator variable. Assign the result to lengths.
- Supply the correct iterable in the for loop for printing the values in the generator
object.
'''
# Create a list of strings: lannister
lannister = ['cersei', 'jaime', 'tywin', 'tyrion', 'joffrey']
# Create a generator object: lengths
lengths = (len(person) for person in lannister)
# Iterate over and print the values in lengths
for value in lengths:
print(value)
| 36 | 86 | 0.773148 | 167 | 1,080 | 5 | 0.467066 | 0.05988 | 0.071856 | 0.033533 | 0.05509 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00225 | 0.176852 | 1,080 | 29 | 87 | 37.241379 | 0.937008 | 0.847222 | 0 | 0 | 0 | 0 | 0.187097 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf2c4d8068a5e81799ce759db7c058c410706010 | 6,269 | py | Python | polyaxon/scheduler/spawners/tensorboard_spawner.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/scheduler/spawners/tensorboard_spawner.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/scheduler/spawners/tensorboard_spawner.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | import json
import random
from django.conf import settings
from polyaxon_k8s.exceptions import PolyaxonK8SError
from scheduler.spawners.project_job_spawner import ProjectJobSpawner
from scheduler.spawners.templates import constants, ingresses, services
from scheduler.spawners.templates.pod_environment import (
get_affinity,
get_node_selector,
get_tolerations
)
from scheduler.spawners.templates.project_jobs import deployments
from scheduler.spawners.templates.volumes import (
get_pod_outputs_volume,
get_pod_refs_outputs_volumes
)
class TensorboardSpawner(ProjectJobSpawner):
TENSORBOARD_JOB_NAME = 'tensorboard'
PORT = 6006
def get_tensorboard_url(self):
return self._get_service_url(self.TENSORBOARD_JOB_NAME)
def request_tensorboard_port(self):
if not self._use_ingress():
return self.PORT
labels = 'app={},role={}'.format(settings.APP_LABELS_TENSORBOARD,
settings.ROLE_LABELS_DASHBOARD)
ports = [service.spec.ports[0].port for service in self.list_services(labels)]
port = random.randint(*settings.TENSORBOARD_PORT_RANGE)
while port in ports:
port = random.randint(*settings.TENSORBOARD_PORT_RANGE)
return port
def start_tensorboard(self,
image,
outputs_path,
persistence_outputs,
outputs_refs_jobs=None,
outputs_refs_experiments=None,
resources=None,
node_selector=None,
affinity=None,
tolerations=None):
ports = [self.request_tensorboard_port()]
target_ports = [self.PORT]
volumes, volume_mounts = get_pod_outputs_volume(persistence_outputs)
refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes(
outputs_refs=outputs_refs_jobs,
persistence_outputs=persistence_outputs)
volumes += refs_volumes
volume_mounts += refs_volume_mounts
refs_volumes, refs_volume_mounts = get_pod_refs_outputs_volumes(
outputs_refs=outputs_refs_experiments,
persistence_outputs=persistence_outputs)
volumes += refs_volumes
volume_mounts += refs_volume_mounts
node_selector = get_node_selector(
node_selector=node_selector,
default_node_selector=settings.NODE_SELECTOR_EXPERIMENTS)
affinity = get_affinity(
affinity=affinity,
default_affinity=settings.AFFINITY_EXPERIMENTS)
tolerations = get_tolerations(
tolerations=tolerations,
default_tolerations=settings.TOLERATIONS_EXPERIMENTS)
deployment = deployments.get_deployment(
namespace=self.namespace,
app=settings.APP_LABELS_TENSORBOARD,
name=self.TENSORBOARD_JOB_NAME,
project_name=self.project_name,
project_uuid=self.project_uuid,
job_name=self.job_name,
job_uuid=self.job_uuid,
volume_mounts=volume_mounts,
volumes=volumes,
image=image,
command=["/bin/sh", "-c"],
args=["tensorboard --logdir={} --port={}".format(outputs_path, self.PORT)],
ports=target_ports,
container_name=settings.CONTAINER_NAME_PLUGIN_JOB,
resources=resources,
node_selector=node_selector,
affinity=affinity,
tolerations=tolerations,
role=settings.ROLE_LABELS_DASHBOARD,
type=settings.TYPE_LABELS_RUNNER)
deployment_name = constants.JOB_NAME.format(name=self.TENSORBOARD_JOB_NAME,
job_uuid=self.job_uuid)
deployment_labels = deployments.get_labels(app=settings.APP_LABELS_TENSORBOARD,
project_name=self.project_name,
project_uuid=self.project_uuid,
job_name=self.job_name,
job_uuid=self.job_uuid,
role=settings.ROLE_LABELS_DASHBOARD,
type=settings.TYPE_LABELS_RUNNER)
dep_resp, _ = self.create_or_update_deployment(name=deployment_name, data=deployment)
service = services.get_service(
namespace=self.namespace,
name=deployment_name,
labels=deployment_labels,
ports=ports,
target_ports=target_ports,
service_type=self._get_service_type())
service_resp, _ = self.create_or_update_service(name=deployment_name, data=service)
results = {'deployment': dep_resp.to_dict(), 'service': service_resp.to_dict()}
if self._use_ingress():
annotations = json.loads(settings.K8S_INGRESS_ANNOTATIONS)
paths = [{
'path': '/tensorboard/{}'.format(self.project_name.replace('.', '/')),
'backend': {
'serviceName': deployment_name,
'servicePort': ports[0]
}
}]
ingress = ingresses.get_ingress(namespace=self.namespace,
name=deployment_name,
labels=deployment_labels,
annotations=annotations,
paths=paths)
self.create_or_update_ingress(name=deployment_name, data=ingress)
return results
def stop_tensorboard(self):
deployment_name = constants.JOB_NAME.format(name=self.TENSORBOARD_JOB_NAME,
job_uuid=self.job_uuid)
try:
self.delete_deployment(name=deployment_name)
self.delete_service(name=deployment_name)
if self._use_ingress():
self.delete_ingress(name=deployment_name)
return True
except PolyaxonK8SError:
return False
| 43.534722 | 93 | 0.595948 | 591 | 6,269 | 5.978003 | 0.184433 | 0.051514 | 0.040759 | 0.033965 | 0.320974 | 0.276819 | 0.276819 | 0.251344 | 0.251344 | 0.216247 | 0 | 0.0024 | 0.335301 | 6,269 | 143 | 94 | 43.839161 | 0.845452 | 0 | 0 | 0.307692 | 0 | 0 | 0.021375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030769 | false | 0 | 0.069231 | 0.007692 | 0.169231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf2cdf5265503bfa5f46413c8c8ff1d4149197dd | 4,651 | py | Python | iot/rooms/__init__.py | joh90/iot | 4a571be7e0760445dd2d5be858ecb4372b5d59b4 | [
"MIT"
] | 6 | 2018-11-06T02:07:21.000Z | 2021-12-15T07:56:14.000Z | iot/rooms/__init__.py | joh90/iot | 4a571be7e0760445dd2d5be858ecb4372b5d59b4 | [
"MIT"
] | 7 | 2019-06-17T15:50:22.000Z | 2021-03-14T19:24:16.000Z | iot/rooms/__init__.py | joh90/iot | 4a571be7e0760445dd2d5be858ecb4372b5d59b4 | [
"MIT"
] | 1 | 2020-05-26T09:32:56.000Z | 2020-05-26T09:32:56.000Z | import logging
from iot.constants import ROOM_LIST_MESSAGE
from iot.utils import return_mac
from iot.devices import DeviceType
from iot.devices.broadlink import (
BroadlinkDeviceFactory,
BroadlinkDeviceTypes
)
from iot.devices.errors import (
DeviceTypeNotFound, BrandNotFound,
SendCommandError
)
from iot.devices.factory import DeviceFactory
logger = logging.getLogger(__name__)
d_factory = DeviceFactory()
bl_d_factory = BroadlinkDeviceFactory()
# We assume one RM3 RM per room for now
# Supports multiple Broadlink devices
# eg. Smart Plug, Multi Plugs
class Room:
__slots__ = (
"name",
"rm",
"DEVICES",
"BL_DEVICES",
"last_action"
)
def __init__(self, name, rm):
self.name = name
self.rm = rm
self.DEVICES = {}
self.BL_DEVICES = {}
self.last_action = None
def room_info(self):
return {
"name": self.name,
"rm_host": self.rm.host[0] if self.rm else None,
"rm_mac": return_mac(self.rm.mac) if self.rm else None,
"type": self.rm.type if self.rm else None,
"devices": self.DEVICES
}
def format_room_devices(self):
room_devices = [
"*{}* | Type: {}".format(d.id, DeviceType(d.device_type).name) \
for d in self.DEVICES.values()
]
return room_devices
def format_room_bl_devices(self):
room_bl_devices = [
"*{}* | Type: {} | IP: {} | Mac: {}".format(
d.id, d.device_type, d.ip, d.mac_address) \
for d in self.BL_DEVICES.values()
]
return room_bl_devices
def room_list_info(self):
info = self.room_info()
room_devices = self.format_room_devices()
room_broadlink_devices = self.format_room_bl_devices()
return ROOM_LIST_MESSAGE.format(
info["name"],
"Type: {}, IP: {}, Mac: {}".format(
info["type"], info["rm_host"], info["rm_mac"]),
"\n".join(room_devices),
"\n".join(room_broadlink_devices)
)
def populate_devices(self, devices):
populated = []
for d in devices:
if d["id"] not in self.DEVICES:
try:
dev = d_factory.create_device(
d["type"], self, d["id"], d["brand"], d["model"]
)
self.add_device(dev)
populated.append(dev)
except DeviceTypeNotFound:
continue
except BrandNotFound:
logger.error(
"Room: %s, Unable to populate device %s, " \
"Brand %s not found for Device Type %s",
self.name, d["id"], d["brand"], d["type"]
)
continue
return populated
def add_device(self, device):
self.DEVICES[device.id] = device
def get_device(self, device_id):
pass
def populate_broadlink_devices(self, devices):
from iot.server import iot_server
for d in devices:
if d["id"] not in self.BL_DEVICES:
bl_device = iot_server.find_broadlink_device(
d["mac_address"], d["broadlink_type"].upper()
)
if bl_device is None:
logger.error(
"Room: %s, Unable to populate Broadlink device %s, " \
"Broadlink device %s not found with Device Type %s",
self.name, d["id"], d["mac_address"], d["broadlink_type"]
)
continue
try:
dev = bl_d_factory.create_device(
d["broadlink_type"], self, d["id"], bl_device
)
self.add_broadlink_devices(dev.id, dev)
iot_server.devices[dev.id] = dev
except DeviceTypeNotFound:
continue
def add_broadlink_devices(self, id, bl_device):
self.BL_DEVICES[id] = bl_device
def convert_to_bytearray(self, data):
return bytearray.fromhex("".join(data))
def send(self, data):
# Check device type
if self.rm and self.rm.type == "RMMINI":
self.send_rm_data(data)
def send_rm_data(self, data):
try:
self.rm.send_data(
self.convert_to_bytearray(data)
)
except Exception as e:
raise SendCommandError("{}: {}".format(e.__class__, e))
| 29.436709 | 81 | 0.529779 | 517 | 4,651 | 4.562863 | 0.195358 | 0.025435 | 0.023739 | 0.015261 | 0.135227 | 0.090293 | 0.069521 | 0.042391 | 0.022891 | 0.022891 | 0 | 0.000678 | 0.365513 | 4,651 | 157 | 82 | 29.624204 | 0.798712 | 0.025586 | 0 | 0.106557 | 0 | 0 | 0.098962 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106557 | false | 0.008197 | 0.065574 | 0.016393 | 0.237705 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf2d6649ae78a91eff025de10e3d668a7dec13c5 | 2,919 | py | Python | start.py | mutageneral/fossdiscord | 54111e6e6ff8ee64f54241a11b9da52db4776223 | [
"MIT"
] | null | null | null | start.py | mutageneral/fossdiscord | 54111e6e6ff8ee64f54241a11b9da52db4776223 | [
"MIT"
] | null | null | null | start.py | mutageneral/fossdiscord | 54111e6e6ff8ee64f54241a11b9da52db4776223 | [
"MIT"
] | null | null | null | import os, ctypes, sys, subprocess, config, globalconfig, shutil
from git import Repo
from shutil import copyfile
commands = ["--help", "--updatebot", "--start", "--credits"]
def startbot():
print("Attempting to start the bot...")
print("REMEMBER: YOU MUST RUN THE COMMAND '" + config.prefix + "shutdownbot' TO SHUTDOWN THE BOT!!!!")
dir_path = os.getcwd()
subprocess.Popen(['python', dir_path + '/bot.py'])
sys.exit()
def botupdate():
if sys.platform == "linux" or sys.platform == "linux2":
try:
os.mkdir('/tmp/freeupdate')
except OSError:
os.rmdir('/tmp/freeupdate')
os.mkdir('/tmp/freeupdate')
HTTPS_REMOTE_URL = globalconfig.github_login_url
DEST_NAME = '/tmp/freeupdate'
Repo.clone_from(HTTPS_REMOTE_URL, DEST_NAME)
dir_path = os.getcwd()
shutil.rmtree(dir_path + "/cogs/")
#path = dir_path
src = '/tmp/freeupdate/cogs'
dest = dir_path + "/cogs"
shutil.copytree(src, dest)
copyfile('/tmp/freeupdate/bot.py', dir_path + '/bot.py')
copyfile('/tmp/freeupdate/setup.py', dir_path + '/setup.py')
copyfile('/tmp/freeupdate/README.md', dir_path + '/README.md')
copyfile('/tmp/freeupdate/globalconfig.py', dir_path + '/globalconfig.py')
shutil.rmtree('/tmp/freeupdate')
print("Done! Restart the bot to apply the changes!")
print(title = "Updated!", description = "FreeDiscord updated! No error reported. Check your console to confirm this.")
elif sys.platform == "win32":
print("'updatebot' is not yet available for Windows.")
elif sys.platform == "darwin":
print("'updatebot' is not yet available for macOS.")
try:
booloutput = bool(sys.argv[1])
except:
startbot()
for commandList in commands:
if sys.argv[1] not in commands:
sys.exit(sys.argv[1] + " is not a command. To get a command list, run 'python3 start.py --help'.")
if "--help" in sys.argv[1]:
try:
bool(sys.argv[2])
except:
sys.exit("FreeDiscord Start Script\nCommand List:\n\t--help - This message\n\t--start (or no argument) - Starts this FreeDiscord instance.\n\t--credits - Shows the credits of FreeDiscord.\n\t--updatebot - Updates this FreeDiscord instance.")
if sys.argv[2] == "gui":
sys.exit("FreeDiscord Start Script\npython3 start.py --start\nStarts the bot.")
elif sys.argv[2] == "help":
sys.exit("FreeDiscord Start Script\npython3 start.py --help\nShows the command list.")
elif sys.argv[2] == "crash":
sys.exit("FreeDiscord Start Script\npython3 start.py --updatebot\nUpdates the FreeDiscord instance.")
elif sys.argv[2] == "credits":
sys.exit("redev's CrashDash\npython3 start.py --credits\nShows the credits of FreeDiscord.")
if "--updatebot" in sys.argv[1]:
botupdate()
if "--start" in sys.argv[1]:
startbot()
| 42.304348 | 249 | 0.64063 | 385 | 2,919 | 4.807792 | 0.322078 | 0.041599 | 0.025932 | 0.049703 | 0.123717 | 0.10805 | 0.10805 | 0.071313 | 0 | 0 | 0 | 0.008243 | 0.210346 | 2,919 | 68 | 250 | 42.926471 | 0.794794 | 0.005139 | 0 | 0.183333 | 0 | 0.016667 | 0.443679 | 0.052015 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.05 | 0 | 0.083333 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf2ebd0be605b85c733e5e7a385de095a11ecc48 | 932 | py | Python | QTM/MixQC/1.0.0/plt.py | binggu56/qmd | e2628710de15f8a8b9a1280fcf92f9e87559414c | [
"MIT"
] | null | null | null | QTM/MixQC/1.0.0/plt.py | binggu56/qmd | e2628710de15f8a8b9a1280fcf92f9e87559414c | [
"MIT"
] | null | null | null | QTM/MixQC/1.0.0/plt.py | binggu56/qmd | e2628710de15f8a8b9a1280fcf92f9e87559414c | [
"MIT"
] | null | null | null | ##!/usr/bin/python
import numpy as np
import pylab as pl
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
font = {'family' : 'Times New Roman',
# 'weight' : 'bold',
'size' : 20}
pl.rc('font', **font)
data = np.genfromtxt(fname='xoutput')
#data = np.loadtxt('traj.dat')
for x in range(1,20):
pl.plot(data[:,0],data[:,x],'k-',linewidth=1)
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
#pl.ylim(0,1)
pl.xlabel('Time [a.u.]')
pl.ylabel('Positions')
#pl.title('')
pl.savefig('traj.pdf')
pl.show()
| 19.416667 | 49 | 0.549356 | 150 | 932 | 3.386667 | 0.48 | 0.05315 | 0.031496 | 0.047244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030831 | 0.199571 | 932 | 47 | 50 | 19.829787 | 0.650134 | 0.590129 | 0 | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf2ee0d6951dff87d2cc119417466bb9ccb36246 | 2,753 | py | Python | generator/generator.py | zbelateche/ee272_cgra | 4cf2e3cf4a4bdf585d87a9209a5bf252666bc6a2 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T02:57:12.000Z | 2020-07-23T02:57:12.000Z | generator/generator.py | zbelateche/ee272_cgra | 4cf2e3cf4a4bdf585d87a9209a5bf252666bc6a2 | [
"BSD-3-Clause"
] | null | null | null | generator/generator.py | zbelateche/ee272_cgra | 4cf2e3cf4a4bdf585d87a9209a5bf252666bc6a2 | [
"BSD-3-Clause"
] | 1 | 2021-04-27T23:13:43.000Z | 2021-04-27T23:13:43.000Z | from abc import ABC, abstractmethod
from ordered_set import OrderedSet
import magma
from common.collections import DotDict
from generator.port_reference import PortReference, PortReferenceBase
import warnings
class Generator(ABC):
def __init__(self):
self.ports = DotDict()
self.wires = []
@abstractmethod
def name(self):
pass
def add_port(self, name, T):
if name in self.ports:
raise ValueError(f"{name} is already a port")
self.ports[name] = PortReference(self, name, T)
def add_ports(self, **kwargs):
for name, T in kwargs.items():
self.add_port(name, T)
def wire(self, port0, port1):
assert isinstance(port0, PortReferenceBase)
assert isinstance(port1, PortReferenceBase)
connection = self.__sort_ports(port0, port1)
if connection not in self.wires:
self.wires.append(connection)
else:
warnings.warn(f"skipping duplicate connection: "
f"{port0.qualified_name()}, "
f"{port1.qualified_name()}")
def remove_wire(self, port0, port1):
assert isinstance(port0, PortReferenceBase)
assert isinstance(port1, PortReferenceBase)
connection = self.__sort_ports(port0, port1)
if connection in self.wires:
self.wires.remove(connection)
def decl(self):
io = []
for name, port in self.ports.items():
io += [name, port.base_type()]
return io
def children(self):
children = OrderedSet()
for ports in self.wires:
for port in ports:
if port.owner() == self:
continue
children.add(port.owner())
return children
def circuit(self):
children = self.children()
circuits = {}
for child in children:
circuits[child] = child.circuit()
class _Circ(magma.Circuit):
name = self.name()
IO = self.decl()
@classmethod
def definition(io):
instances = {}
for child in children:
instances[child] = circuits[child]()
instances[self] = io
for port0, port1 in self.wires:
inst0 = instances[port0.owner()]
inst1 = instances[port1.owner()]
wire0 = port0.get_port(inst0)
wire1 = port1.get_port(inst1)
magma.wire(wire0, wire1)
return _Circ
def __sort_ports(self, port0, port1):
if id(port0) < id(port1):
return (port0, port1)
else:
return (port1, port0)
| 30.588889 | 69 | 0.55721 | 289 | 2,753 | 5.217993 | 0.256055 | 0.041777 | 0.029178 | 0.023873 | 0.210875 | 0.18435 | 0.18435 | 0.18435 | 0.18435 | 0.18435 | 0 | 0.020101 | 0.349437 | 2,753 | 89 | 70 | 30.932584 | 0.821887 | 0 | 0 | 0.133333 | 0 | 0 | 0.03814 | 0.017799 | 0 | 0 | 0 | 0 | 0.053333 | 1 | 0.146667 | false | 0.013333 | 0.08 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf308bf1eb0d73c66e892cc4b6703edb92094ed6 | 709 | py | Python | oms_cms/backend/info_block/models.py | Hamel007/oms_cms | a120b27932fe1bd89f2c621c181b80b19caba0e0 | [
"BSD-3-Clause"
] | null | null | null | oms_cms/backend/info_block/models.py | Hamel007/oms_cms | a120b27932fe1bd89f2c621c181b80b19caba0e0 | [
"BSD-3-Clause"
] | null | null | null | oms_cms/backend/info_block/models.py | Hamel007/oms_cms | a120b27932fe1bd89f2c621c181b80b19caba0e0 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from oms_gallery.models import Gallery
from oms_cms.backend.languages.models import AbstractLang
class InfoBlock(AbstractLang):
"""Модель инфо блока"""
title = models.CharField("Заголовок", max_length=100)
sub_title = models.CharField("Под заголовок", max_length=100, blank=True, null=True)
description = models.TextField("Описание", max_length=1000, blank=True)
slider = models.ForeignKey(
Gallery,
verbose_name="Слайдер",
on_delete=models.CASCADE,
blank=True,
null=True)
class Meta:
verbose_name = "Инфо блок"
verbose_name_plural = "Инфо блок"
def __str__(self):
return self.title
| 28.36 | 88 | 0.686883 | 86 | 709 | 5.488372 | 0.534884 | 0.057203 | 0.084746 | 0.088983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017953 | 0.214386 | 709 | 24 | 89 | 29.541667 | 0.829443 | 0.023977 | 0 | 0 | 0 | 0 | 0.080175 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0.055556 | 0.611111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
cf314a62fbc67887598a3f07228dd471a1ffe7af | 4,724 | py | Python | modules/SampleGenerator/SampleGenerator.py | ediril/BCI | f211ba70d6d75a9badff6872f86416b065f6192b | [
"BSD-2-Clause"
] | 6 | 2016-12-30T03:43:49.000Z | 2020-04-19T16:04:37.000Z | modules/SampleGenerator/SampleGenerator.py | hongweimao/BCI | 49b7e8137bd5f9d18e3efdbd94a112cde5d16c4c | [
"BSD-2-Clause"
] | 1 | 2022-03-08T09:16:10.000Z | 2022-03-08T09:16:10.000Z | modules/SampleGenerator/SampleGenerator.py | ediril/BCI | f211ba70d6d75a9badff6872f86416b065f6192b | [
"BSD-2-Clause"
] | 2 | 2015-06-16T02:46:03.000Z | 2018-12-20T20:07:59.000Z | #!/usr/bin/python
import time
import sys
import platform
from ConfigParser import SafeConfigParser
from PyDragonfly import Dragonfly_Module, CMessage, copy_to_msg, copy_from_msg, MT_EXIT
from argparse import ArgumentParser
from dragonfly_utils import respond_to_ping
import Dragonfly_config as rc
class SampleGenerator(object):
def __init__(self, config_file, server):
self.serial_no = 2
self.freq = 50 # Hz
self.load_config(config_file)
self.setup_dragonfly(server)
self.run()
def load_config(self, config_file):
self.config = SafeConfigParser()
self.config.read(config_file)
triggers = self.config.get('main','triggers').split()
self.triggers = [eval('rc.MT_%s' % (x)) for x in triggers]
if not triggers:
freq = self.config.get('main','frequency')
if freq != '':
self.freq = self.config.getfloat('main','frequency')
print "Freq: %.2f" % (self.freq)
def setup_dragonfly(self, server):
self.mod = Dragonfly_Module(rc.MID_SAMPLE_GENERATOR, 0)
self.mod.ConnectToMMM(server)
self.mod.Subscribe(MT_EXIT)
self.mod.Subscribe(rc.MT_PING)
self.mod.Subscribe(rc.MT_SPM_SPIKECOUNT)
for trigger in self.triggers:
self.mod.Subscribe(trigger)
self.mod.SendModuleReady()
print "Connected to Dragonfly at", server
if platform.system() == "Windows":
# On Windows, the best timer is time.clock()
self.default_timer = time.clock
else:
# On most other platforms the best timer is time.time()
self.default_timer = time.time
def run(self):
self.delta_time_calc = self.default_timer() #time.time()
while True:
msg = CMessage()
rcv = self.mod.ReadMessage(msg, 0.001)
if rcv == 1:
hdr = msg.GetHeader()
msg_type = hdr.msg_type
dest_mod_id = hdr.dest_mod_id
if msg_type == MT_EXIT:
if (dest_mod_id == 0) or (dest_mod_id == self.mod.GetModuleID()):
print 'Received MT_EXIT, disconnecting...'
self.mod.SendSignal(rc.MT_EXIT_ACK)
self.mod.DisconnectFromMMM()
break;
elif msg_type == rc.MT_PING:
respond_to_ping(self.mod, msg, 'SampleGenerator')
elif (msg_type == rc.MT_SPM_SPIKECOUNT):
msg_src_mod_id = hdr.src_mod_id
if msg_src_mod_id == rc.MID_SPM_MOD:
print "\n\n ** Detected SPM_SPIKECOUNT messages coming from SPM_MOD! Quitting..\n\n";
sys.exit(0);
else:
if len(self.triggers) > 0:
self.process_msg(msg)
else:
# if no triggers...
if len(self.triggers) == 0:
period = (1. / self.freq)
time_now = self.default_timer()
delta_time = period - (time_now - self.delta_time_calc)
#print "%f %f %f\n\n" % (time_now, self.delta_time_calc, delta_time)
if delta_time > 0:
time.sleep(delta_time)
self.delta_time_calc = self.delta_time_calc + period
self.send_sample_generated()
def process_msg(self, msg):
msg_type = msg.GetHeader().msg_type
if msg_type in self.triggers:
time_now = self.default_timer() #time.time()
delta_time = time_now - self.delta_time_calc
self.delta_time_calc = time_now
self.send_sample_generated()
def send_sample_generated(self):
sg = rc.MDF_SAMPLE_GENERATED()
self.serial_no += 1
sg.sample_header.SerialNo = self.serial_no
sg.sample_header.Flags = 0
sg.sample_header.DeltaTime = (1. / self.freq)
sg.source_timestamp = self.default_timer() #time.time()
sg_msg = CMessage(rc.MT_SAMPLE_GENERATED)
copy_to_msg(sg, sg_msg)
self.mod.SendMessage(sg_msg)
sys.stdout.write('|')
sys.stdout.flush()
if __name__ == "__main__":
parser = ArgumentParser(description = 'Send SAMPLE_GENERATED messages' \
' under a range of conditions')
parser.add_argument(type=str, dest='config')
parser.add_argument(type=str, dest='mm_ip', nargs='?', default='')
args = parser.parse_args()
print("Using config file=%s, MM IP=%s" % (args.config, args.mm_ip))
itm = SampleGenerator(args.config, args.mm_ip)
| 41.078261 | 109 | 0.577053 | 576 | 4,724 | 4.494792 | 0.263889 | 0.035149 | 0.035149 | 0.045964 | 0.21321 | 0.069139 | 0.026265 | 0.026265 | 0 | 0 | 0 | 0.005919 | 0.320491 | 4,724 | 114 | 110 | 41.438596 | 0.800623 | 0.049534 | 0 | 0.07 | 0 | 0 | 0.071859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.08 | null | null | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf33c0f359af61ed23f396ff759a9bbdc5a2e5ec | 7,118 | py | Python | app/gws/web/wrappers.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | app/gws/web/wrappers.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | app/gws/web/wrappers.py | ewie/gbd-websuite | 6f2814c7bb64d11cb5a0deec712df751718fb3e1 | [
"Apache-2.0"
] | null | null | null | import os
import gzip
import io
import werkzeug.utils
import werkzeug.wrappers
import werkzeug.wsgi
from werkzeug.utils import cached_property
import gws
import gws.tools.date
import gws.tools.json2
import gws.tools.net
import gws.tools.vendor.umsgpack as umsgpack
import gws.web.error
import gws.types as t
_JSON = 1
_MSGPACK = 2
_struct_mime = {
_JSON: 'application/json',
_MSGPACK: 'application/msgpack',
}
#:export IResponse
class BaseResponse(t.IResponse):
def __init__(self, **kwargs):
if 'wz' in kwargs:
self._wz = kwargs['wz']
else:
self._wz = werkzeug.wrappers.Response(**kwargs)
def __call__(self, environ, start_response):
return self._wz(environ, start_response)
def set_cookie(self, key, **kwargs):
self._wz.set_cookie(key, **kwargs)
def delete_cookie(self, key, **kwargs):
self._wz.delete_cookie(key, **kwargs)
def add_header(self, key, value):
self._wz.headers.add(key, value)
#:export IBaseRequest
class BaseRequest(t.IBaseRequest):
def __init__(self, root: t.IRootObject, environ: dict, site: t.IWebSite):
self._wz = werkzeug.wrappers.Request(environ)
# this is also set in nginx (see server/ini), but we need this for unzipping (see data() below)
self._wz.max_content_length = root.var('server.web.maxRequestLength') * 1024 * 1024
self.params = {}
self._lower_params = {}
self.root: t.IRootObject = root
self.site: t.IWebSite = site
self.method: str = self._wz.method
def init(self):
self.params = self._parse_params() or {}
self._lower_params = {k.lower(): v for k, v in self.params.items()}
@property
def environ(self) -> dict:
return self._wz.environ
@cached_property
def input_struct_type(self) -> int:
if self.method == 'POST':
ct = self.header('content-type', '').lower()
if ct.startswith(_struct_mime[_JSON]):
return _JSON
if ct.startswith(_struct_mime[_MSGPACK]):
return _MSGPACK
return 0
@cached_property
def output_struct_type(self) -> int:
h = self.header('accept', '').lower()
if _struct_mime[_MSGPACK] in h:
return _MSGPACK
if _struct_mime[_JSON] in h:
return _JSON
return self.input_struct_type
@property
def data(self) -> t.Optional[bytes]:
if self.method != 'POST':
return None
data = self._wz.get_data(as_text=False, parse_form_data=False)
if self.root.application.developer_option('request.log_all'):
gws.write_file_b(f'{gws.VAR_DIR}/debug_request_{gws.tools.date.timestamp_msec()}', data)
if self.header('content-encoding') == 'gzip':
with gzip.GzipFile(fileobj=io.BytesIO(data)) as fp:
return fp.read(self._wz.max_content_length)
return data
@property
def text(self) -> t.Optional[str]:
if self.method != 'POST':
return None
charset = self.header('charset', 'utf-8')
try:
return self.data.decode(encoding=charset, errors='strict')
except UnicodeDecodeError as e:
gws.log.error('post data decoding error')
raise gws.web.error.BadRequest() from e
@property
def is_secure(self) -> bool:
return self._wz.is_secure
def env(self, key: str, default: str = None) -> str:
return self._wz.environ.get(key, default)
def param(self, key: str, default: str = None) -> str:
return self._lower_params.get(key.lower(), default)
def has_param(self, key: str) -> bool:
return key.lower() in self._lower_params
def header(self, key: str, default: str = None) -> str:
return self._wz.headers.get(key, default)
def cookie(self, key: str, default: str = None) -> str:
return self._wz.cookies.get(key, default)
def url_for(self, url: t.Url) -> t.Url:
u = self.site.url_for(self, url)
# gws.log.debug(f'url_for: {url!r}=>{u!r}')
return u
def response(self, content: str, mimetype: str, status: int = 200) -> t.IResponse:
return BaseResponse(
response=content,
mimetype=mimetype,
status=status
)
def redirect_response(self, location, status=302):
return werkzeug.utils.redirect(location, status)
def file_response(self, path: str, mimetype: str, status: int = 200, attachment_name: str = None) -> t.IResponse:
headers = {
'Content-Length': os.path.getsize(path)
}
if attachment_name:
headers['Content-Disposition'] = f'attachment; filename="{attachment_name}"'
fp = werkzeug.wsgi.wrap_file(self.environ, open(path, 'rb'))
return BaseResponse(
response=fp,
mimetype=mimetype,
status=status,
headers=headers,
direct_passthrough=True
)
def struct_response(self, data: t.Response, status: int = 200) -> t.IResponse:
typ = self.output_struct_type or _JSON
return self.response(self._encode_struct(data, typ), _struct_mime[typ], status)
def error_response(self, err) -> t.IResponse:
return BaseResponse(wz=err.get_response(self._wz.environ))
def _parse_params(self):
if self.input_struct_type:
return self._decode_struct(self.input_struct_type)
args = {k: v for k, v in self._wz.args.items()}
path = self._wz.path
# the server only understands requests to /_/...
# the params can be given as query string or encoded in the path
# like _/cmd/command/layer/la/x/12/y/34 etc
if path == gws.SERVER_ENDPOINT:
return args
if path.startswith(gws.SERVER_ENDPOINT + '/'):
p = path.split('/')
for n in range(3, len(p), 2):
args[p[n - 1]] = p[n]
return args
gws.log.error(f'invalid request path: {path!r}')
raise gws.web.error.NotFound()
def _encode_struct(self, data, typ):
if typ == _JSON:
return gws.tools.json2.to_string(data, pretty=True)
if typ == _MSGPACK:
return umsgpack.dumps(data, default=gws.as_dict)
raise ValueError('invalid struct type')
def _decode_struct(self, typ):
if typ == _JSON:
try:
s = self.data.decode(encoding='utf-8', errors='strict')
return gws.tools.json2.from_string(s)
except (UnicodeDecodeError, gws.tools.json2.Error):
gws.log.error('malformed json request')
raise gws.web.error.BadRequest()
if typ == _MSGPACK:
try:
return umsgpack.loads(self.data)
except (TypeError, umsgpack.UnpackException):
gws.log.error('malformed msgpack request')
raise gws.web.error.BadRequest()
gws.log.error('invalid struct type')
raise gws.web.error.BadRequest()
| 31.635556 | 117 | 0.609722 | 906 | 7,118 | 4.634658 | 0.232892 | 0.027149 | 0.015718 | 0.019052 | 0.136699 | 0.094784 | 0.036675 | 0.036675 | 0.036675 | 0.027864 | 0 | 0.006958 | 0.27311 | 7,118 | 224 | 118 | 31.776786 | 0.8046 | 0.045659 | 0 | 0.180723 | 0 | 0 | 0.064397 | 0.017094 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162651 | false | 0.006024 | 0.084337 | 0.066265 | 0.451807 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf3478f4de02de9ef45febd053640f7c5386da10 | 778 | py | Python | game_shop/forms.py | ziyic/UoA_PGT_CS551Q_shopping | e0ccf867871f2ecc014a5e6fff95cba4b8342393 | [
"BSD-3-Clause"
] | null | null | null | game_shop/forms.py | ziyic/UoA_PGT_CS551Q_shopping | e0ccf867871f2ecc014a5e6fff95cba4b8342393 | [
"BSD-3-Clause"
] | null | null | null | game_shop/forms.py | ziyic/UoA_PGT_CS551Q_shopping | e0ccf867871f2ecc014a5e6fff95cba4b8342393 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : Ziyi Cao
# @Time : 2021/4/26
# @Function:
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Game
class SignUpForm(UserCreationForm):
username = forms.CharField(max_length=30)
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
email = forms.EmailField(max_length=50)
address = forms.CharField()
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2',
'address', 'first_name', 'last_name',)
class GameForm(forms.ModelForm):
class Meta:
model = Game
fields = ('name', 'price',)
| 25.933333 | 64 | 0.660668 | 94 | 778 | 5.382979 | 0.5 | 0.110672 | 0.100791 | 0.136364 | 0.164032 | 0.114625 | 0 | 0 | 0 | 0 | 0 | 0.030945 | 0.210797 | 778 | 29 | 65 | 26.827586 | 0.79316 | 0.122108 | 0 | 0.111111 | 0 | 0 | 0.097345 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.055556 | 0.222222 | 0 | 0.722222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 2 |
cf34a3f0197c3f6dc8a1f65c74ae293fb179d4ac | 3,299 | py | Python | mozinor/example/toto_stack_model_script.py | Jwuthri/Mozinor | 5a2cd4f0447a96425d899a8e063668741a091a8b | [
"MIT"
] | 3 | 2017-08-17T21:32:05.000Z | 2018-07-30T11:30:09.000Z | mozinor/example/toto_stack_model_script.py | Jwuthri/Mozinor | 5a2cd4f0447a96425d899a8e063668741a091a8b | [
"MIT"
] | null | null | null | mozinor/example/toto_stack_model_script.py | Jwuthri/Mozinor | 5a2cd4f0447a96425d899a8e063668741a091a8b | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on July 2017
@author: JulienWuthrich
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_absolute_error, accuracy_score, r2_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesRegressor, RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import ElasticNetCV, LassoLarsCV, RidgeCV
from sklearn.naive_bayes import BernoulliNB, GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
from xgboost import XGBRegressor, XGBClassifier
from vecstack import stacking
# Read the csv file
data = pd.read_csv("toto.csv")
regression = False
if regression:
metric = r2_score
else:
metric = accuracy_score
# Split dependants and independant variables
y = data[["predict"]]
X = data.drop("predict", axis=1)
# Split into training and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
# Apply Some Featuring
poly_reg = PolynomialFeatures(degree=1)
# Transform into numpy object
x_train = poly_reg.fit_transform(X_train)
x_test = poly_reg.fit_transform(X_test)
y_test = np.array(y_test.ix[:,0])
y_train = np.array(y_train.ix[:,0])
# define lmodels
lmodels = [ExtraTreesClassifier(bootstrap=False, class_weight=None, criterion='entropy',
max_depth=None, max_features=0.6, max_leaf_nodes=None,
min_impurity_split=1e-07, min_samples_leaf=1,
min_samples_split=4, min_weight_fraction_leaf=0.0,
n_estimators=100, n_jobs=1, oob_score=False, random_state=None,
verbose=0, warm_start=False), XGBClassifier(base_score=0.5, colsample_bylevel=1, colsample_bytree=1,
gamma=0, learning_rate=0.5, max_delta_step=0, max_depth=8,
min_child_weight=6, missing=None, n_estimators=50, nthread=-1,
objective='multi:softprob', reg_alpha=0, reg_lambda=1,
scale_pos_weight=1, seed=0, silent=True, subsample=0.9), KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=17, p=2,
weights='distance')]
# build the stack level 1
S_train, S_test = stacking(
lmodels, x_train, y_train, x_test,
regression=regression, metric=metric,
n_folds=3, shuffle=True, random_state=0, verbose=1
)
# build model lvel 2
model = DecisionTreeClassifier(class_weight=None, criterion='entropy', max_depth=10,
max_features=None, max_leaf_nodes=None,
min_impurity_split=1e-07, min_samples_leaf=2,
min_samples_split=5, min_weight_fraction_leaf=0.0,
presort=False, random_state=None, splitter='best')
# Fit the model
model.fit(S_train, y_train)
# Predict
y_pred = model.predict(S_test)
# Scoring
if regression:
print('Score on test set:', mean_absolute_error(y_test, y_pred))
else:
print('Score on test set:', accuracy_score(y_test, y_pred))
print(metric(y_test, y_pred))
| 35.095745 | 136 | 0.76114 | 469 | 3,299 | 5.130064 | 0.381663 | 0.054863 | 0.012469 | 0.012469 | 0.148795 | 0.093101 | 0.073982 | 0.041563 | 0.041563 | 0.041563 | 0 | 0.023734 | 0.144286 | 3,299 | 93 | 137 | 35.473118 | 0.828551 | 0.090634 | 0 | 0.067797 | 0 | 0 | 0.037261 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.271186 | 0 | 0.271186 | 0.050847 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf36336bd222b8046304d99fe89eeed7d9b73ede | 4,330 | py | Python | Detection and Tracking/main.py | Jay-Nehra/Object-Detection | f91085ecf709d21bf7ffd3b2e370fc36ae5e88f2 | [
"BSD-3-Clause"
] | 1 | 2021-01-23T09:11:59.000Z | 2021-01-23T09:11:59.000Z | Detection and Tracking/main.py | Jay-Nehra/Object-Detection | f91085ecf709d21bf7ffd3b2e370fc36ae5e88f2 | [
"BSD-3-Clause"
] | null | null | null | Detection and Tracking/main.py | Jay-Nehra/Object-Detection | f91085ecf709d21bf7ffd3b2e370fc36ae5e88f2 | [
"BSD-3-Clause"
] | null | null | null | """
this program takes in a checkerboard image from a camera and calibrates the
image to remove camera radial and tangential distortion.
"""
import cv2
import YOLO as odYOLO # object detection using YOLO
import HOG as odHOG # object detection using an svm and HOG features
import data
import numpy as np
""" Uncomment below if adding project 4 - advanced lane detection """
#from driveline import Lane
#from camera import CameraImage
#from lane import lane_pipeline
use_yolo = False
def adjust_channel_gamma(channel, gamma=1.):
# adjusts the brightness of an image channel
# channel : 2D source channel
# gamma : brightness correction factor, gamma < 1 => darker image
# returns : gamma corrected image
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
# http://www.pyimagesearch.com/2015/10/05/opencv-gamma-correction/
invGamma = 1.0 / np.absolute(gamma)
table = (np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8"))
# apply gamma correction using the lookup table
return cv2.LUT(channel, table)
def adjust_image_gamma(img, gamma=1.):
# adjusts the brightness of an image
# img : source image
# gamma : brightness correction factor, gamma < 1 => darker image
# returns : gamma corrected image
# convert to HSV to adjust gamma by V
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img[:, :, 2] = adjust_channel_gamma(img[:, :, 2], gamma=gamma)
return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
# Define the codec and create VideoWriter object
if data.isVideo:
# setup video recording when using a video
fourcc = cv2.VideoWriter_fourcc(* 'WMV2') #'MJPG')
filename = 'output_images/YOLO_projectvideo.wmv' # + data.video
out = cv2.VideoWriter(filename, fourcc, 20.0, (1280, 720))
# initalise the video capture
cam = cv2.VideoCapture(data.img_add)
# setup which object detection method to use Yolo or SVM & HOG
if use_yolo is True:
# define the yolo classifier
# this calls the python wrapper implemented by darkflow
# https://github.com/thtrieu/darkflow
# this is an implementation of the yolo object detection method outlined in papers
# You Only Look Once: Unified, Real-Time Object Detection, arXiv:1506.02640 [cs.CV],
# YOLO9000: Better, Faster, Stronger, arXiv:1612.08242 [cs.CV]
yolo = odYOLO.yolo(model="cfg/tiny-yolo-voc.cfg", chkpt="bin/tiny-yolo-voc.weights", threshold=0.12)
else:
# define a SVM and HOG classifier
car_object = odHOG.object(spatial_size=(12,12), hist_bins=34, pix_per_cell=13, hog_channel='ALL', cspace='HLS')
# location of the training data for the SVM
car_object.train_svm("data/vehicles_smallset/", "data/non-vehicles_smallset/")
while(1):
# continually loop if the input is a video until it ends of the user presses 'q'
# if an image execute once and wait till the user presses a key
if data.isVideo:
ret, image = cam.read()
if ret == False:
break
else:
# read in the image to the program
image = cv2.imread(data.img_add, -1)
""" object detection """
if use_yolo is True:
# YOLO classifier
gamma_img = adjust_image_gamma(image.copy(), 2)
objs = yolo.find_object(gamma_img) # find the objects
image = yolo.draw_box(image, objs, show_label=True) # add the detected objects to the window
else:
h, w = image.shape[:2]
# SVM and HOG classifier
gamma_img = adjust_image_gamma(image.copy(), 2)
obj_pos = car_object.locate_objects(gamma_img, h // 2, h-80, 0, w, scale=2, show_obj=False,
show_boxes=False, heat_thresh=6, show_heat=False)
image = car_object.draw_labeled_bboxes(image, obj_pos, color=(0, 0, 255), thick=6)
cv2.imshow('final', image)
# wait for a user key interrupt then close all windows
if data.isVideo:
out.write(image) # save image to video
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
# save the new image
cv2.imwrite('output_images/objects_' + data.image, image)
cv2.waitKey(0)
break
if data.isVideo:
out.release()
cam.release()
cv2.destroyAllWindows()
| 37.982456 | 115 | 0.671132 | 624 | 4,330 | 4.575321 | 0.399038 | 0.031524 | 0.018214 | 0.011208 | 0.1331 | 0.10718 | 0.10718 | 0.10718 | 0.082662 | 0.051839 | 0 | 0.034555 | 0.231409 | 4,330 | 113 | 116 | 38.318584 | 0.823317 | 0.419861 | 0 | 0.272727 | 0 | 0 | 0.073418 | 0.064557 | 0 | 0 | 0.001688 | 0 | 0 | 1 | 0.036364 | false | 0 | 0.090909 | 0 | 0.163636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf372286c3b00f6d57b36a97cb015d54cb8dfc38 | 28,542 | py | Python | IndoorPositionEstimator/cflib/drone_quaternion.py | capriele/Crazyflie-Indoor-Position-Logger-Controller | 6f7a44984553d85a66a29c169a2f7c758a2aaac7 | [
"Apache-2.0"
] | 6 | 2017-04-23T15:47:57.000Z | 2020-03-15T17:52:15.000Z | IndoorPositionEstimator/cflib/drone_quaternion.py | capriele/Crazyflie-Indoor-Position-Logger-Controller | 6f7a44984553d85a66a29c169a2f7c758a2aaac7 | [
"Apache-2.0"
] | null | null | null | IndoorPositionEstimator/cflib/drone_quaternion.py | capriele/Crazyflie-Indoor-Position-Logger-Controller | 6f7a44984553d85a66a29c169a2f7c758a2aaac7 | [
"Apache-2.0"
] | null | null | null | """
Quadcopter Model + LQR Control + BackStepping Control
"""
#
# Author: Alberto Petrucci (petrucci.alberto@gmail.com) 2017
#
#__author__ = "Alberto Petrucci"
#__copyright__ = "Copyright 2017, Alberto Petrucci"
#__credits__ = ["Alberto Petrucci"]
#__license__ = "Apache"
#__version__ = "1.0.0"
#__maintainer__ = "Alberto Petrucci"
#__email__ = "petrucci.alberto@gmail.com"
#__status__ = "Production"
from __future__ import division
from numpy import *
from math import *
from control import *
class Quadcopter:
def __init__(self, dt):
## Parametri ambiente
self.g = 9.81
self.airFriction = 0
self.dt = dt
self.t = 0
## Parametri drone
self.m = 27/1000 # massa del drone in g
self.d = (65.0538/1000)*sin(pi/4) # distanza dal centro ai motori
self.c = 0.1 # inerzia delle eliche
self.alpha = 1
self.Ix = self.m * self.d * self.d
self.Iy = self.m * self.d * self.d
self.Iz = 2 * self.m * self.d * self.d
# Cambiando tali parametri diamo priorita maggiori o minori
self.beta1 = 0.3
self.beta2 = 0.3
self.beta3x = 0.2#1.0
self.beta3y = 0.2#1.0
self.beta3z = 0.2#0.5
self.beta3x = 5.0#5.0
self.beta3y = 5.0#5.0
self.beta3z = 1.0#1.0
self.beta4 = 0.2
self.beta = 500
#self.beta = 3000
self.thrustGain = 1
#self.thrustGain = 1.34
#self.thrustGain = 1.37
self.Tf = dt
self.Mat_J = matrix([
[self.m*self.d*self.d, 0, 0],
[0, self.m*self.d*self.d, 0],
[0, 0, 2*self.m*self.d*self.d]
])
self.Mat_Jinv = self.Mat_J.I
self.Mat_T = matrix([
[1, 1, 1, 1],
[-self.d, -self.d, self.d, self.d],
[self.d, -self.d, -self.d, self.d],
[self.c, -self.c, self.c, -self.c]
])
self.Mat_Tinv = self.Mat_T.I
## Modello linearizzato
self.A = matrix([
[0, 0, 0, 0, 0, 0, -0.5*sqrt(1-self.alpha*self.alpha), 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5*self.alpha, -0.5*sqrt(1-self.alpha*self.alpha), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5*sqrt(1-self.alpha*self.alpha), 0.5*self.alpha, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0.5*self.alpha, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 2*self.g*sqrt(1-self.alpha*self.alpha), 2*self.g*self.alpha, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -2*self.g*self.alpha, 2*self.g*sqrt(1-self.alpha*self.alpha), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
])
self.B = matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1/self.m, 0, 0, 0],
])
self.C = eye(13)
self.D = zeros((13, 4))
## SATURAZIONE MOTORI
self.fmotmax = 0.5886/4 # max forza generata dai motori
self.q_bar = matrix([
[self.alpha],
[0],
[0],
[sqrt(1 - self.alpha*self.alpha)]
])
self.omega_bar = zeros((3, 1))
self.p_bar = matrix([
[0],
[0],
[1]
])
self.v_bar = zeros((3, 1))
self.ftot_bar = self.m * self.g
self.tau_bar = matrix([
[0],
[0],
[0]
])
self.x_bar = vstack((self.q_bar, self.omega_bar, self.p_bar, self.v_bar))
self.u_bar = vstack((self.ftot_bar, self.tau_bar))
self.u = matrix([
[0],
[0],
[0],
[0]
])
self.Qm = matrix([
[self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, self.beta3x, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, self.beta3y, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta3z, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4],
])
self.R = self.beta * eye(4)
## LQR
self.Amm = matrix([
[0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0],
[0, 19.62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-19.62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
self.Bmm = matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 8.7393e+03, 0, 0],
[0, 0, 8.7393e+03, 0],
[0, 0, 0, 4.3696e+03],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[37.0370, 0, 0, 0]
])
self.Cmm = matrix([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
])
self.Qmm = matrix([
[self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, self.beta1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, self.beta2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, self.beta3x, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, self.beta3y, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, self.beta3z, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, self.beta4]
])
self.Ut = matrix([
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
])
[self.Km, self.Pm, self.em] = lqr(self.Amm, self.Bmm, self.Qmm, self.R)
self.K_LQR = self.Km*self.Ut
'''
# stampo guadagni lqr per c
for k in range(0, 4):
string = ""
for i in range(0, 13):
string += str(self.K_LQR.item((k, i)))+", "
# rimuovo gli ultimi due caratteri
string = string[:-2]
print "{"+string+"},"
'''
# Stato
self.q = matrix([
[self.alpha], [0], [0], [sqrt(1-self.alpha*self.alpha)]
])
self.omega = matrix([
[0], [0], [0]
])
self.p = matrix([
[0], [0], [0]
])
self.v = matrix([
[0], [0], [0]
])
self.x = vstack((
self.q,
self.omega,
self.p,
self.v
))
self.setPoint = self.x
# Variabili per l'osservatore (ricostruzione stato)
self.x_hat = self.x
# variabili misurate (quaternioni + posizioni)
self.y = matrix([
[0], [0], [0], [0], [0], [0], [0]
])
# Variabili per BackStepping controller
self.backsteppingSetPoint = matrix([
# Roll
[0, 0, 0],
# Pitch
[0, 0, 0],
# Yaw
[0, 0, 0],
# X
[0, 0, 0],
# Y
[0, 0, 0],
# Z
[0, 0, 0],
])
def setSetPoint(self, q0, q1, q2, q3, omegax, omegay, omegaz, px, py, pz, vx, vy, vz):
self.setPoint = matrix([
[q0],
[q1],
[q2],
[q3],
[omegax],
[omegay],
[omegaz],
[px],
[py],
[pz],
[vx],
[vy],
[vz],
])
def setBacksteppingSetPoint(self, xd):
self.backsteppingSetPoint = xd
def setState(self, q0, q1, q2, q3, omegax, omegay, omegaz, px, py, pz, vx, vy, vz):
self.q = matrix([
[q0], [q1], [q2], [q3]
])
self.q = self.q/linalg.norm(self.q)
deg2rad = pi/180.0
self.omega = matrix([
[omegax*deg2rad], [omegay*deg2rad], [omegaz*deg2rad]
])
self.p = matrix([
[px], [py], [pz]
])
self.v = matrix([
[vx], [vy], [vz]
])
'''
# Aggiorno variabili misurate
self.y = matrix([
[q0], [q1], [q2], [q3], [px], [py], [pz]
])
# Aggiorno l'osservatore
self.update_observer()
# Aggiorno lo stato (misurato + stimato)
self.x = vstack((
self.q,
[self.x_hat[4, 0]*deg2rad],
[self.x_hat[5, 0]*deg2rad],
[self.x_hat[6, 0]*deg2rad],
self.p,
[self.x_hat[10, 0]],
[self.x_hat[11, 0]],
[self.x_hat[12, 0]]
))
'''
# Nel caso in cui misuro tutto (e' lento => stimo)
self.x = vstack((
self.q,
self.omega,
self.p,
self.v
))
def update(self):
self.u = self.u_bar - self.K_LQR * (self.x - self.setPoint)
# Calcolo le forze f1 f2 f3 f4
f = self.Mat_Tinv*self.u
# Applico la saturazione
for i in range(0, 4):
if f[i, 0] > self.fmotmax:
f[i, 0] = self.fmotmax
if f[i, 0] < 0:
f[i, 0] = 0
# Calcolo l'ingresso saturato
self.u = self.Mat_T*f
#self.predict(self.u)
def backstepping2(self):
# Current State
x1 = self.q[0, 0] # wq3
x2 = self.q[1, 0] # wq3
x3 = self.q[2, 0] # wq3
x4 = self.q[3, 0] # wq3
# Angular Speeds
x5 = self.omega[0, 0] # wx
x6 = self.omega[1, 0] # wy
x7 = self.omega[2, 0] # wz
# Positions
x8 = self.p[0, 0] # x
x9 = self.p[1, 0] # y
x10 = self.p[2, 0] # z
# Speeds
x11 = self.v[0, 0] # vx
x12 = self.v[1, 0] # vy
x13 = self.v[2, 0] # vz
# contiene il riferimento + la sua derivata 1a e 2a
xd = self.backsteppingSetPoint
print matrix([
[xd[3, 0], xd[4, 0], xd[5, 0]],
[x8, x9, x10],
])
# Z
c10 = 8
c13 = 3
e10 = xd[5, 0] - x10
e13 = x13 - xd[5, 1] - c10 * e10
u1 = self.m * (self.g + e10 + xd[5, 2] - c13 * e13 + c10 * (xd[5, 1] - x13)) / (x1*x1 - x2*x2 - x3*x3 + x4*x4)
if u1 != 0:
# X
c8 = 8#8
c11 = 4#4
e8 = xd[3, 0] - x8
e11 = x11 - xd[3, 1] - c8 * e8
Ux = self.m * (e8 + xd[3, 2] - c11 * e11 + c8 * (xd[3, 1] - x11)) / (2*u1)
# Y
c9 = 8#8
c12 = 4#4
e9 = xd[4, 0] - x9
e12 = x12 - xd[4, 1] - c9 * e9
Uy = self.m * (e9 + xd[4, 2] - c12 * e12 + c9 * (xd[4, 1] - x12)) / (2*u1)
else:
Ux = 0
Uy = 0
# Desired Quaternion
qd = matrix([
[1],
[-(Uy-x3*x4)/x1],
[(Ux-x2*x4)/x1],
[xd[2, 0]],
])
qd = qd / linalg.norm(qd)
# Compute quaternion error
q = matrix([
[x1],
[-x2],
[-x3],
[-x4]
])
qe = self.quaternionProduct(q, qd)
w = matrix([
[0],
[-x5],
[-x6],
[-x7]
])
norm_w = linalg.norm(w)
if norm_w != 0:
w = w / norm_w
we = self.quaternionProduct(w, qe)
c4 = 20
c44 = 10
e4 = qe[3, 0]
e44 = 0.5 * (-x3 * x5 + x2 * x6 + x1 * x7) - c4 * e4
xd4d = we[3, 0]
c3 = 60
c33 = 60
e3 = qe[2, 0]
e33 = 0.5 * (x4 * x5 + x1 * x6 - x2 * x7) - c3 * e3
xd3d = we[2, 0]
c2 = 60
c22 = 60
e2 = qe[1, 0]
e22 = 0.5 * (x1 * x5 - x4 * x6 + x3 * x7) - c2 * e2
xd2d = we[1, 0]
x1_2 = x1 * x1
x2_2 = x2 * x2
x3_2 = x3 * x3
x4_2 = x4 * x4
x5_2 = x5 * x5
x6_2 = x6 * x6
x7_2 = x7 * x7
x1_3 = x1_2 * x1
x2_3 = x2_2 * x2
x3_3 = x3_2 * x3
x4_3 = x4_2 * x4
div = x1 * (x1_2 + x2_2 + x3_2 + x4_2)
mult = self.s * self.d * self.m
u2 = 0
u3 = 0
u4 = 0
if div != 0:
u4 = (mult * (x4_3 * x6_2 - x4_3 * x5_2 + x4_3 * x7_2 + 4 * e4 * x1_2 + 4 * e4 * x4_2 - 2 * c4 * x1_3 * x7 + 4 * c4 * x1_2 * xd4d + 4 * c4 * x4_2 * xd4d - 2 * x1_3 * x5 * x6 - x1_2 * x4 * x5_2 + x1_2 * x4 * x6_2 + x2_2 * x4 * x5_2 + x1_2 * x4 * x7_2 + x2_2 * x4 * x6_2 + x3_2 * x4 * x5_2 + x2_2 * x4 * x7_2 + x3_2 * x4 * x6_2 + x3_2 * x4 * x7_2 + 4 * e2 * x1 * x3 - 4 * e3 * x1 * x2 + 4 * e2 * x2 * x4 + 4 * e3 * x3 * x4 - 4 * c44 * e44 * x1_2 - 4 * c44 * e44 * x4_2 - 4 * c22 * e22 * x1 * x3 - 4 * c22 * e22 * x2 * x4 + 4 * c33 * e33 * x1 * x2 - 4 * c33 * e33 * x3 * x4 + 4 * c2 * x1 * x3 * xd2d - 4 * c3 * x1 * x2 * xd3d + 4 * c2 * x2 * x4 * xd2d + 4 * c3 * x3 * x4 * xd3d - 2 * c2 * x1_2 * x3 * x5 + 2 * c3 * x1_2 * x2 * x6 - 2 * c2 * x1 * x3_2 * x7 - 2 * c3 * x1 * x2_2 * x7 - 2 * c4 * x1_2 * x2 * x6 + 2 * c4 * x1_2 * x3 * x5 + 2 * c2 * x2 * x4_2 * x6 - 2 * c3 * x3 * x4_2 * x5 - 2 * c4 * x1 * x4_2 * x7 - 2 * c4 * x2 * x4_2 * x6 + 2 * c4 * x3 * x4_2 * x5 + 2 * x1_2 * x2 * x5 * x7 - 2 * x1 * x4_2 * x5 * x6 + 2 * x2 * x4_2 * x5 * x7 - 2 * c2 * x1 * x2 * x4 * x5 + 2 * c3 * x1 * x2 * x4 * x5 + 2 * c2 * x1 * x3 * x4 * x6 - 2 * c3 * x1 * x3 * x4 * x6 - 2 * c2 * x2 * x3 * x4 * x7 + 2 * c3 * x2 * x3 * x4 * x7)) / div
u3 = (mult * (x3_3 * x5_2 + x3_3 * x6_2 + x3_3 * x7_2 + 4 * e3 * x1_2 + 4 * e3 * x3_2 - 2 * c3 * x1_3 * x6 + 4 * c3 * x1_2 * xd3d + 4 * c3 * x3_2 * xd3d - 2 * x1_3 * x5 * x7 + x1_2 * x3 * x5_2 + x1_2 * x3 * x6_2 + x2_2 * x3 * x5_2 + x1_2 * x3 * x7_2 + x2_2 * x3 * x6_2 - x3 * x4_2 * x5_2 + x2_2 * x3 * x7_2 + x3 * x4_2 * x6_2 + x3 * x4_2 * x7_2 - 4 * e2 * x1 * x4 + 4 * e2 * x2 * x3 + 4 * e4 * x1 * x2 + 4 * e4 * x3 * x4 - 4 * c33 * e33 * x1_2 - 4 * c33 * e33 * x3_2 + 4 * c22 * e22 * x1 * x4 - 4 * c22 * e22 * x2 * x3 - 4 * c44 * e44 * x1 * x2 - 4 * c44 * e44 * x3 * x4 - 4 * c2 * x1 * x4 * xd2d + 4 * c2 * x2 * x3 * xd2d + 4 * c4 * x1 * x2 * xd4d + 4 * c4 * x3 * x4 * xd4d + 2 * c2 * x1_2 * x4 * x5 - 2 * c2 * x1 * x4_2 * x6 - 2 * c3 * x1 * x3_2 * x6 + 2 * c3 * x1_2 * x2 * x7 - 2 * c3 * x1_2 * x4 * x5 - 2 * c4 * x1 * x2_2 * x6 - 2 * c2 * x2 * x3_2 * x7 - 2 * c4 * x1_2 * x2 * x7 + 2 * c3 * x2 * x3_2 * x7 - 2 * c3 * x3_2 * x4 * x5 + 2 * c4 * x3_2 * x4 * x5 - 2 * x1 * x2 * x4 * x5_2 - 2 * x1_2 * x2 * x5 * x6 - 2 * x1 * x3_2 * x5 * x7 - 2 * x1 * x4_2 * x5 * x7 - 2 * c2 * x1 * x2 * x3 * x5 + 2 * c4 * x1 * x2 * x3 * x5 + 2 * c2 * x1 * x3 * x4 * x7 + 2 * c2 * x2 * x3 * x4 * x6 - 2 * c4 * x1 * x3 * x4 * x7 - 2 * c4 * x2 * x3 * x4 * x6 - 2 * x1 * x3 * x4 * x5 * x6 + 2 * x2 * x3 * x4 * x5 * x7)) / (2 * div)
u2 = (mult * (x2_3 * x5_2 + x2_3 * x6_2 + x2_3 * x7_2 + 4 * e2 * x1_2 + 4 * e2 * x2_2 - 2 * c2 * x1_3 * x5 + 4 * c2 * x1_2 * xd2d + 4 * c2 * x2_2 * xd2d + 2 * x1_3 * x6 * x7 + x1_2 * x2 * x5_2 + x1_2 * x2 * x6_2 + x2 * x3_2 * x5_2 + x1_2 * x2 * x7_2 + x2 * x3_2 * x6_2 - x2 * x4_2 * x5_2 + x2 * x3_2 * x7_2 + x2 * x4_2 * x6_2 + x2 * x4_2 * x7_2 + 4 * e3 * x1 * x4 + 4 * e3 * x2 * x3 - 4 * e4 * x1 * x3 + 4 * e4 * x2 * x4 - 4 * c22 * e22 * x1_2 - 4 * c22 * e22 * x2_2 - 4 * c33 * e33 * x1 * x4 - 4 * c33 * e33 * x2 * x3 + 4 * c44 * e44 * x1 * x3 - 4 * c44 * e44 * x2 * x4 + 4 * c3 * x1 * x4 * xd3d + 4 * c3 * x2 * x3 * xd3d - 4 * c4 * x1 * x3 * xd4d + 4 * c4 * x2 * x4 * xd4d - 2 * c2 * x1 * x2_2 * x5 - 2 * c2 * x1_2 * x3 * x7 + 2 * c2 * x1_2 * x4 * x6 - 2 * c3 * x1 * x4_2 * x5 - 2 * c4 * x1 * x3_2 * x5 - 2 * c2 * x2_2 * x3 * x7 + 2 * c2 * x2_2 * x4 * x6 - 2 * c3 * x1_2 * x4 * x6 + 2 * c3 * x2_2 * x3 * x7 + 2 * c4 * x1_2 * x3 * x7 - 2 * c4 * x2_2 * x4 * x6 + 2 * x1 * x3 * x4 * x5_2 + 2 * x1_2 * x3 * x5 * x6 + 2 * x1 * x2_2 * x6 * x7 + 2 * x1 * x3_2 * x6 * x7 + 2 * x1 * x4_2 * x6 * x7 + 2 * x2_2 * x4 * x5 * x7 - 2 * c3 * x1 * x2 * x3 * x6 + 2 * c4 * x1 * x2 * x3 * x6 + 2 * c3 * x1 * x2 * x4 * x7 - 2 * c3 * x2 * x3 * x4 * x5 - 2 * c4 * x1 * x2 * x4 * x7 + 2 * c4 * x2 * x3 * x4 * x5 - 2 * x1 * x2 * x3 * x5 * x7 - 2 * x1 * x2 * x4 * x5 * x6)) / (2 * div)
self.u = matrix([
[abs(u1)],
[u2],
[u3],
[u4]
])
def update_observer(self):
x_hat_dot = self.observer_function(self.x_hat)
# Eulero
# self.x_hat = self.x_hat + x_hat_dot*self.dt
# Runge Kutta 4
m1 = x_hat_dot
k1 = self.x_hat + m1 * self.dt
m2 = self.observer_function(k1)
k2 = self.x_hat + (m1 + m2) * self.dt / 4
m3 = self.observer_function(k2)
self.x_hat = self.x_hat + (m1 + m2 + 4 * m3) * (self.dt / 6)
def observer_function(self, x_hat):
x1 = x_hat[0, 0]
x2 = x_hat[1, 0]
x3 = x_hat[2, 0]
x4 = x_hat[3, 0]
x5 = x_hat[4, 0]
x6 = x_hat[5, 0]
x7 = x_hat[6, 0]
x8 = x_hat[7, 0]
x9 = x_hat[8, 0]
x10 = x_hat[9, 0]
x11 = x_hat[10, 0]
x12 = x_hat[11, 0]
x13 = x_hat[12, 0]
# Funzione stato
F = matrix([
[-(x2 * x5 + x3 * x6 + x4 * x7) / 2],
[(x1 * x5 - x4 * x6 + x3 * x7) / 2],
[(x4 * x5 + x1 * x6 - x2 * x7) / 2],
[(-x3 * x5 + x2 * x6 + x1 * x7) / 2],
[-x6 * x7],
[x5 * x7],
[0],
[x11],
[x12],
[x13],
[0],
[0],
[-self.g]
])
# Funzione ingressi
G = matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1/(self.m*self.d*self.d), 0, 0],
[0, 0, 1/(self.m*self.d*self.d), 0],
[0, 0, 0, 1/(2*self.m*self.d*self.d)],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[(2*x2*x4+2*x1*x3)/self.m, 0, 0, 0],
[(2*x3*x4-2*x1*x2)/self.m, 0, 0, 0],
[(x1*x1-x2*x2-x3*x3+x4*x4)/self.m, 0, 0, 0],
])
# Funzione misure
H = matrix([
[x1],
[x2],
[x3],
[x4],
[x8],
[x9],
[x10]
])
# Inversa di Q
Qinv = matrix([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0,1,0,0,0,0,0, 0, 0, 0, 0, 0, 0],
[0,0,0,1,0,0,0, 0, 0, 0, 0, 0, 0],
[0,0,0,0,0,1,0, 0, 0, 0, 0, 0, 0],
[-(x1*x1*x5 + x2*x2*x5 - x1*x3*x7 + x1*x4*x6 + x2*x3*x6 + x2*x4*x7)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (x1*x3*x6 + x1*x4*x7 + x2*x3*x7 - x2*x4*x6)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x1 + x2*x2))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(x7*x1*x1 + x3*x5*x1 + x7*x2*x2 - x4*x5*x2)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x4 + x2*x3))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (18*x1*x3 - 18*x2*x4 + x1*x1*x6 + x2*x2*x6 - x1*x4*x5 - x2*x3*x5)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(2*(x1*x3 - x2*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), 0, 0, 0, 0, 0, 0],
[-(x1*x1*x6 + x3*x3*x6 + x1*x2*x7 - x1*x4*x5 + x2*x3*x5 + x3*x4*x7)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (x7*x1*x1 - x2*x6*x1 + x7*x3*x3 - x4*x6*x3)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(2*(x1*x4 - x2*x3))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (x1*x2*x5 + x1*x4*x7 - x2*x3*x7 + x3*x4*x5)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x1 + x3*x3))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(18*x1*x2 + 18*x3*x4 + x1*x1*x5 + x3*x3*x5 + x1*x4*x6 - x2*x3*x6)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x2 + x3*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), 0, 0, 0, 0, 0, 0],
[-(x1*x1*x7 + x4*x4*x7 - x1*x2*x6 + x1*x3*x5 + x2*x4*x5 + x3*x4*x6)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(x6*x1*x1 + x2*x7*x1 + x6*x4*x4 - x3*x7*x4)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x3 + x2*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (x5*x1*x1 - x3*x7*x1 + x5*x4*x4 - x2*x7*x4)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(2*(x1*x2 - x3*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), -(18*x1*x1 + 18*x4*x4 - x1*x2*x5 - x1*x3*x6 - x2*x4*x6 + x3*x4*x5)/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), (2*(x1*x1 + x4*x4))/(x1*x1*x1 + x1*x2*x2 + x1*x3*x3 + x1*x4*x4), 0, 0, 0, 0, 0, 0],
[0,0,0,0,0,0,0, 1, 0, 0, 0, 0, 0],
[0,0,0,0,0,0,0, 0, 0, 1, 0, 0, 0],
[0,0,0,0,0,0,0, 0, 0, 0, 0, 1, 0],
[0,0,0,0,0,0,0, 0, 1, 0, 0, 0, 0],
[0,0,0,0,0,0,0, 0, 0, 0, 1, 0, 0],
[0,0,0,0,0,0,0, 0, 0, 0, 0, 0, 1]
])
# Guadagni per la convergenza
K = matrix([
[100, 0, 0, 0, 0, 0, 0],
[0, 100, 0, 0, 0, 0, 0],
[0, 500, 0, 0, 0, 0, 0],
[0, 0, 100, 0, 0, 0, 0],
[0, 0, 500, 0, 0, 0, 0],
[0, 0, 0, 100, 0, 0, 0],
[0, 0, 0, 500, 0, 0, 0],
[0, 0, 0, 0, 100, 0, 0],
[0, 0, 0, 0, 10000, 0, 0],
[0, 0, 0, 0, 0, 100, 0],
[0, 0, 0, 0, 0, 10000, 0],
[0, 0, 0, 0, 0, 0, 100],
[0, 0, 0, 0, 0, 0, 10000]
])
# Aggiorno lo stato predetto
x_hat_dot = F + G*self.u + Qinv*K*(self.y - H)
return x_hat_dot
def predict(self, u):
# Faccio evolvere il sistema
F_b = matrix([
[0],
[0],
[u[0, 0]]
])
Mw = 0*matrix([
[0.1],
[-0.1],
[0.2]
])
Fv = 0*matrix([
[1],
[1],
[1]
])
Q = matrix([
[-self.q[1, 0], -self.q[2, 0], -self.q[3, 0]],
[self.q[0, 0], -self.q[3, 0], self.q[2, 0]],
[self.q[3, 0], self.q[0, 0], -self.q[1, 0]],
[-self.q[2, 0], self.q[1, 0], self.q[0, 0]]
])
# Aggiorno lo stato
q_dot = 0.5 * Q * self.omega
self.q = self.q + q_dot * self.dt
self.q = self.q/linalg.norm(self.q)
U = matrix([
[u[1, 0]],
[u[2, 0]],
[u[3, 0]]
])
omega_dot = self.Mat_Jinv * (U - self.VectorialProduct(self.omega) * self.Mat_J * self.omega) + self.Mat_Jinv * Mw
self.omega = self.omega + omega_dot * self.dt
p_dot = self.v
self.p = self.p + p_dot * self.dt
R = self.quaternion2RotationMatrix()
G = matrix([
[0],
[0],
[self.g]
])
v_dot = (1 / self.m) * (R * F_b + Fv) - G - self.airFriction * linalg.norm(self.v) * self.v
self.v = self.v + v_dot * self.dt
self.x = vstack((
self.q,
self.omega,
self.p,
self.v
))
def getMotorInput(self):
scaleFactor = self.thrustGain * 65535.0 / (self.fmotmax * 4)
u = self.u
u[0, 0] = u[0, 0]*scaleFactor
u[1, 0] = (u[1, 0]/2.0)/self.d
u[2, 0] = (u[2, 0]/2.0)/self.d
u[3, 0] = 0/self.c
percentual = 1
if u[1, 0] < -65536 * percentual:
u[1, 0] = -65536 * percentual
elif u[1, 0] > 65536 * percentual:
u[1, 0] = 65536 * percentual
if u[2, 0] < -65536 * percentual:
u[2, 0] = -65536 * percentual
elif u[2, 0] > 65536 * percentual:
u[2, 0] = 65536 * percentual
if u[3, 0] < -65536 * percentual:
u[3, 0] = -65536 * percentual
elif u[3, 0] > 65536 * percentual:
u[3, 0] = 65536 * percentual
m1 = u[0, 0] - u[1, 0] + u[2, 0] + u[3, 0]
m2 = u[0, 0] - u[1, 0] - u[2, 0] - u[3, 0]
m3 = u[0, 0] + u[1, 0] - u[2, 0] + u[3, 0]
m4 = u[0, 0] + u[1, 0] + u[2, 0] - u[3, 0]
return m1, m2, m3, m4
def quaternionProduct(self, q, p):
"""
Compute the quaternion product q*p
:param self:
:param q:
:param p:
:return:
"""
Qq = matrix([
[q[0, 0], -q[1, 0], -q[2, 0], -q[3, 0]],
[q[1, 0], q[0, 0], -q[3, 0], q[2, 0]],
[q[2, 0], q[3, 0], q[0, 0], -q[1, 0]],
[q[3, 0], -q[2, 0], q[1, 0], q[0, 0]]
])
return Qq*p
def quaternion2RotationMatrix(self):
"""
Genera la matrice di rotazione partendo dai quaternioni dello stato
:return:
"""
q0 = self.q[0, 0]
q1 = self.q[1, 0]
q2 = self.q[2, 0]
q3 = self.q[3, 0]
R = matrix([
[1-2*(q2*q2+q3*q3), 2*(q1*q2-q0*q3), 2*(q0*q2+q1*q3)],
[2*(q1*q2+q0*q3), 1-2*(q1*q1+q3*q3), 2*(q2*q3-q0*q1)],
[2*(q1*q3-q0*q2), 2*(q0*q1+q2*q3), 1-2*(q1*q1+q2*q2)]
])
return R
def VectorialProduct(self, v):
"""
Questa funzione prende in ingresso un vettore di tre elementi e ne genera
la matrice che effettua il prodotto vettoriale
:param v:
:return: M
"""
M = matrix([
[0, -v[2,0], v[1,0]],
[v[2,0], 0, -v[0,0]],
[-v[1,0], v[0,0], 0]
])
return M
def quaternion2RPY(self):
q = self.q
g = 2 * (q[0, 0]*q[2, 0] - q[1, 0]*q[3, 0])
if g > 1:
g = 1
elif g < -1:
g = -1
yaw = atan2(2*(q[1, 0]*q[2, 0] + q[0, 0]*q[3, 0]), q[0, 0] * q[0, 0] + q[1, 0] * q[1, 0] - q[2, 0] * q[2, 0] - q[3, 0] * q[3, 0])
pitch = asin(g)
roll = atan2(2*(q[2, 0]*q[3, 0] + q[0, 0]*q[1, 0]), q[0, 0] * q[0, 0] - q[1, 0] * q[1, 0] - q[2, 0] * q[2, 0] + q[3, 0] * q[3, 0])
rad2deg = 180/pi
#euler = matrix([
# [roll * rad2deg], [pitch * rad2deg], [yaw * rad2deg]
#])
#return euler[0, 0], euler[1, 0], euler[2, 0]
return roll, pitch, yaw
| 37.654354 | 1,374 | 0.387044 | 5,041 | 28,542 | 2.134497 | 0.072208 | 0.224164 | 0.292472 | 0.34684 | 0.491171 | 0.400093 | 0.35539 | 0.327509 | 0.311338 | 0.303903 | 0 | 0.231357 | 0.412725 | 28,542 | 757 | 1,375 | 37.704095 | 0.410572 | 0.051748 | 0 | 0.287695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.006932 | null | null | 0.001733 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
cf37f380f3a304d0ac99d99b4a587e12239fe76f | 766 | py | Python | alipay/aop/api/response/AlipayInsSceneInsserviceprodSerinfoSyncResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayInsSceneInsserviceprodSerinfoSyncResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayInsSceneInsserviceprodSerinfoSyncResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayInsSceneInsserviceprodSerinfoSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayInsSceneInsserviceprodSerinfoSyncResponse, self).__init__()
self._ser_biz_no = None
@property
def ser_biz_no(self):
return self._ser_biz_no
@ser_biz_no.setter
def ser_biz_no(self, value):
self._ser_biz_no = value
def parse_response_content(self, response_content):
response = super(AlipayInsSceneInsserviceprodSerinfoSyncResponse, self).parse_response_content(response_content)
if 'ser_biz_no' in response:
self.ser_biz_no = response['ser_biz_no']
| 29.461538 | 120 | 0.733681 | 88 | 766 | 5.988636 | 0.375 | 0.102467 | 0.136622 | 0.091082 | 0.056926 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001595 | 0.181462 | 766 | 25 | 121 | 30.64 | 0.838915 | 0.05483 | 0 | 0 | 0 | 0 | 0.027701 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.125 | 0.0625 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf38a491c875d1bd0ae06532a675a01ccb64787d | 426 | py | Python | tests/integration/test_status.py | pnw/env-tracker | ab7a539afa329b529b6e10e55ca23cc214e0fd49 | [
"MIT"
] | null | null | null | tests/integration/test_status.py | pnw/env-tracker | ab7a539afa329b529b6e10e55ca23cc214e0fd49 | [
"MIT"
] | null | null | null | tests/integration/test_status.py | pnw/env-tracker | ab7a539afa329b529b6e10e55ca23cc214e0fd49 | [
"MIT"
] | null | null | null | from tests.helpers import BaseTestCase
class TestStatusCommand(BaseTestCase):
def test_can_status(self):
"""
Default use case where user invokes `et status` with minimal parameters
"""
self.fail('Not Implemented')
def test_does_not_work_outside_of_a_linked_project(self):
"""
The users cwd must be inside of a project
"""
self.fail('Not Implemented')
| 26.625 | 79 | 0.65493 | 52 | 426 | 5.173077 | 0.730769 | 0.052045 | 0.081784 | 0.163569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.265258 | 426 | 15 | 80 | 28.4 | 0.859425 | 0.265258 | 0 | 0.333333 | 0 | 0 | 0.112782 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.166667 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
cf39abdd7b9db220323875a0a137611f84fce21d | 1,646 | py | Python | functions/07.py | luan-gomes/python-basic-exercises | 213844b421b27ab3e9c09be24d4efb37cc6fce08 | [
"MIT"
] | null | null | null | functions/07.py | luan-gomes/python-basic-exercises | 213844b421b27ab3e9c09be24d4efb37cc6fce08 | [
"MIT"
] | null | null | null | functions/07.py | luan-gomes/python-basic-exercises | 213844b421b27ab3e9c09be24d4efb37cc6fce08 | [
"MIT"
] | null | null | null | """
1) Faça um programa que use a função valorPagamento para determinar o
valor a ser pago por uma prestação de uma conta.
2) O programa deverá solicitar ao usuário o valor da prestação e o número
de dias em atraso e passar estes valores para a função valorPagamento,
que calculará o valor a ser pago e devolverá este valor ao programa que
a chamou. O programa deverá então exibir o valor a ser pago na tela.
3) Após a execução, o programa deverá voltar a pedir outro valor de
prestação e assim continuar até que seja informado um valor igual a zero
para a prestação. Neste momento o programa deverá ser encerrado, exibindo
o relatório do dia, que conterá a quantidade e o valor total de prestações
pagas no dia.
4)O cálculo do valor a ser pago é feito da seguinte forma. Para pagamentos
sem atraso, cobrar o valor da prestação. Quando houver atraso, cobrar 3%
de multa, mais 0,1% de juros por dia de atraso.
"""
def valorPagamento(valorPrestacao, diasAtraso):
if diasAtraso == 0:
return valorPrestacao
else:
multa = 0.03 * valorPrestacao
jurosAoDia = (0.001*diasAtraso) * valorPrestacao
valorAPagar = valorPrestacao + multa + jurosAoDia
return valorAPagar
montanteDoDia = 0
quantidade = 0
while True:
prestacao = float(input("Informe o valor da prestação: "))
dias = int(input("Informe quantos dias de atraso: "))
if prestacao == 0:
print("-"*5+" RELATÓRIO DO DIA "+"-"*5)
print(f"Quantidade de contas pagas: {quantidade}")
print(f"Montante total: {montanteDoDia}")
break
else:
valor = valorPagamento(prestacao, dias)
print(f"Valor a ser pago: {valor}")
quantidade += 1
montanteDoDia += valor
| 35.021277 | 74 | 0.744228 | 253 | 1,646 | 4.841897 | 0.426877 | 0.034286 | 0.036735 | 0.053061 | 0.034286 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015683 | 0.186513 | 1,646 | 46 | 75 | 35.782609 | 0.899178 | 0.556501 | 0 | 0.086957 | 0 | 0 | 0.246537 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.130435 | 0.173913 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf3a73df976f6a84385fb7762c36292debe844b3 | 1,814 | py | Python | common/login.py | zhaopiandehuiyiforsang/python_test | 7a6ef77afd3b436f798ca68c77b9ac8669e00094 | [
"MIT"
] | null | null | null | common/login.py | zhaopiandehuiyiforsang/python_test | 7a6ef77afd3b436f798ca68c77b9ac8669e00094 | [
"MIT"
] | null | null | null | common/login.py | zhaopiandehuiyiforsang/python_test | 7a6ef77afd3b436f798ca68c77b9ac8669e00094 | [
"MIT"
] | null | null | null | # -*- conding:utf-8 -*-
from init_env import BASE_DIR
from common.HttpUtils import HttpUtils
from common.env_config import ServerCC
from common.DateUtils import currentTimeMillis, DateTime
import json
import os
token_json_path = BASE_DIR + '/resources/token.json'
"""
获取接口调用凭证token工具
"""
URL_AUTH = 'https://rasdev9.zhixueyun.com/oauth/api/v1/auth'
def login(url=URL_AUTH, data=None):
if data is None:
return None
r = HttpUtils()
result = r.post(url, data=data)
if result.status_code != 200:
print('获取token失败')
os._exit(0)
token_file = open(token_json_path, 'w')
jsonObj = json.loads(result.text)
expires_in = jsonObj['expires_in']
# 过期时间
out_of_time = currentTimeMillis()+expires_in
jsonObj['out_of_time'] = out_of_time
jsonObj['expires_time'] = DateTime(out_of_time)
jsonObj['create_time'] = DateTime()
jsonStr = json.dumps(jsonObj)
token_file.write(jsonStr)
token_file.close()
r.logJson(jsonStr)
return jsonStr
def getToken(url=URL_AUTH, data=None, content=None):
if content == '' or content == None:
token_file = open(token_json_path, 'r')
content = token_file.read()
token_file.close()
if content == '' or content == None:
content = login(url, data)
return getToken(url, content)
jsonObj = json.loads(content)
access_token = jsonObj['access_token']
token_type = jsonObj['token_type']
out_of_time = jsonObj['out_of_time']
if out_of_time < currentTimeMillis()+5:
content = login(url, data)
return getToken(url, content)
token = token_type+'__'+access_token
return token
if __name__ == "__main__":
server = ServerCC()
URL_AUTH = server.getEnv(ServerCC.DEV)[1]
# print(getToken(''))
login(URL_AUTH)
| 25.914286 | 60 | 0.669239 | 238 | 1,814 | 4.857143 | 0.331933 | 0.030277 | 0.054498 | 0.041522 | 0.188581 | 0.119377 | 0.074394 | 0.074394 | 0 | 0 | 0 | 0.006276 | 0.209482 | 1,814 | 69 | 61 | 26.289855 | 0.799861 | 0.025358 | 0 | 0.163265 | 0 | 0 | 0.095348 | 0.012062 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.122449 | 0 | 0.265306 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf3ae41287d546e236788642c821e46e2316896c | 2,269 | py | Python | practice/src/design_pattern/Interpreter.py | t10471/python | 75056454bfb49197eb44f6b4d6a1b0a0b4b408ec | [
"MIT"
] | null | null | null | practice/src/design_pattern/Interpreter.py | t10471/python | 75056454bfb49197eb44f6b4d6a1b0a0b4b408ec | [
"MIT"
] | null | null | null | practice/src/design_pattern/Interpreter.py | t10471/python | 75056454bfb49197eb44f6b4d6a1b0a0b4b408ec | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import os
#compsiteとcommandをあわせたような形
#ContextがhandlerでCommandが処理
class JobCommand(object):
def execute(self, context):
if context.getCurrentCommand() != 'begin':
raise Exception('illegal command ' + str(context.getCurrentCommand()))
command_list = CommandListCommand()
command_list.execute(context.next())
class CommandListCommand(object):
def execute(self, context):
while (True):
current_command = context.getCurrentCommand()
if current_command is None:
raise Exception('"end" not found ')
elif current_command == 'end':
break
else:
command = CommandCommand()
command.execute(context)
context.next()
class CommandCommand(object):
def execute(self, context):
current_command = context.getCurrentCommand()
if current_command == 'diskspace':
free_size = 100000000.0
max_size = 210000000.0
ratio = free_size / max_size * 100
print( 'Disk Free : %dMB (%.2f%%)' % (free_size / 1024 / 1024, ratio))
elif current_command == 'date':
print datetime.datetime.today().strftime("%Y/%m/%d")
elif current_command == 'line':
print '--------------------'
else:
raise Exception('invalid command [' + str(current_command) + ']')
class Context(object):
def __init__(self, command):
self.commands = []
self.current_index = 0
self.max_index = 0
self.commands = command.strip().split()
print self.commands
self.max_index = len(self.commands)
def next(self):
self.current_index += 1
print self.current_index
return self
def getCurrentCommand(self):
if self.current_index > len(self.commands):
return None
return self.commands[self.current_index].strip()
def execute(command):
job = JobCommand()
try:
job.execute(Context(command))
except Exception, e:
print e.args
if __name__ == '__main__':
command = 'begin date line diskspace end'
if command != '':
execute(command)
| 26.383721 | 82 | 0.588365 | 227 | 2,269 | 5.731278 | 0.330396 | 0.086088 | 0.061491 | 0.046118 | 0.188317 | 0.083013 | 0.083013 | 0 | 0 | 0 | 0 | 0.022528 | 0.295725 | 2,269 | 85 | 83 | 26.694118 | 0.791615 | 0.031732 | 0 | 0.116667 | 0 | 0 | 0.075274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.033333 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf3bbae06f3088b31cf43074001976c60e15c3b8 | 262 | py | Python | wbb/utils/filter_groups.py | Imran95942/userbotisl | 1614af1d1ba904dfd5e28dfd5b3e21d5e24bb55c | [
"MIT"
] | 1 | 2021-11-17T13:25:25.000Z | 2021-11-17T13:25:25.000Z | wbb/utils/filter_groups.py | Imran95942/userbotisl | 1614af1d1ba904dfd5e28dfd5b3e21d5e24bb55c | [
"MIT"
] | null | null | null | wbb/utils/filter_groups.py | Imran95942/userbotisl | 1614af1d1ba904dfd5e28dfd5b3e21d5e24bb55c | [
"MIT"
] | null | null | null | chat_filters_group = 1
chatbot_group = 2
karma_positive_group = 3
karma_negative_group = 4
regex_group = 5
welcome_captcha_group = 6
antiflood_group = 7
blacklist_filters_group = 8
taglog_group = 9
chat_watcher_group = 10
flood_group = 11
autocorrect_group = 12
| 20.153846 | 27 | 0.816794 | 42 | 262 | 4.666667 | 0.666667 | 0.122449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066372 | 0.137405 | 262 | 12 | 28 | 21.833333 | 0.800885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf3bfcccde630a28147bd6f4bf35f454312666f8 | 179 | py | Python | tests/model.py | gunyarakun/cached-image-optimizer | 80e4c9501bcde1a82e8aeb24c32563d97841dafa | [
"MIT"
] | 2 | 2021-04-06T06:07:35.000Z | 2021-04-16T08:42:13.000Z | tests/model.py | gunyarakun/cached-image-optimizer | 80e4c9501bcde1a82e8aeb24c32563d97841dafa | [
"MIT"
] | null | null | null | tests/model.py | gunyarakun/cached-image-optimizer | 80e4c9501bcde1a82e8aeb24c32563d97841dafa | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Dict
@dataclass(frozen=True)
class FixtureURLContent:
content: bytes = b""
FixtureURLS = Dict[str, FixtureURLContent]
| 16.272727 | 42 | 0.776536 | 20 | 179 | 6.95 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.150838 | 179 | 10 | 43 | 17.9 | 0.914474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
cf3e620c460aed9e0fba7d56f5f6161f6fb1dbd6 | 3,162 | py | Python | my_pilz_sandbox/scripts/pause.py | ct2034/my_pilz_sandbox | 40400c6469918f56d384580d41f61b2cca3b49c9 | [
"BSD-3-Clause"
] | null | null | null | my_pilz_sandbox/scripts/pause.py | ct2034/my_pilz_sandbox | 40400c6469918f56d384580d41f61b2cca3b49c9 | [
"BSD-3-Clause"
] | null | null | null | my_pilz_sandbox/scripts/pause.py | ct2034/my_pilz_sandbox | 40400c6469918f56d384580d41f61b2cca3b49c9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from geometry_msgs.msg import Pose, Point, PoseArray, Quaternion
import math
import numpy as np
from pilz_robot_programming import *
import random
import rospy
import time
__REQUIRED_API_VERSION__ = "1" # API version
SLOW_VEL_SCALE = .1
ACC_SCALE = .1
GRIPPER_POSE_CLOSED = 0.001
GRIPPER_POSE_OPEN = 0.029
class MoveThread(threading.Thread):
def __init__(self, robot, cmd):
threading.Thread.__init__(self)
self._robot = robot
self._cmd = cmd
self.exception_thrown = False
def run(self):
rospy.logdebug("Start motion...")
try:
self._robot.move(self._cmd)
except RobotMoveFailed:
rospy.loginfo("Caught expected exception.")
self.exception_thrown = True
# trying to pause a seq command
def pausing_a_sequence(r):
r.move(Ptp(goal=Pose(position=Point(0.0, 0.0, .9), orientation=Quaternion(0,0,0,1)),
vel_scale=SLOW_VEL_SCALE,
acc_scale=ACC_SCALE))
r.move(Ptp(goal=Pose(position=Point(0.0, 0.0, .9), orientation=Quaternion(0,0,0,1)),
vel_scale=SLOW_VEL_SCALE,
acc_scale=ACC_SCALE))
print("prepared.")
seq = Sequence()
seq.append(Ptp(goal=Pose(position=Point(0.0, 0, .9), orientation=Quaternion(0,0,0,1)),
vel_scale=SLOW_VEL_SCALE,
acc_scale=ACC_SCALE))
seq.append(Ptp(goal=Pose(position=Point(0.2, 0, .9), orientation=Quaternion(0,0,0,1)),
vel_scale=SLOW_VEL_SCALE,
acc_scale=ACC_SCALE),
blend_radius=0.099)
seq.append(Ptp(goal=Pose(position=Point(0.2, 0.2, .9), orientation=Quaternion(0,0,0,1)),
vel_scale=SLOW_VEL_SCALE,
acc_scale=ACC_SCALE),
blend_radius=0.099)
seq.append(Ptp(goal=Pose(position=Point(0, 0.2, .9), orientation=Quaternion(0,0,0,1)),
vel_scale=SLOW_VEL_SCALE,
acc_scale=ACC_SCALE))
move_thread = MoveThread(r, seq)
move_thread.start()
for i in range(10):
rospy.sleep(1)
try:
r.pause()
except Exception as e:
rospy.loginfo(e)
rospy.sleep(.2)
r.resume()
move_thread.join()
# trying to pause a ptp command
def pausing_a_ptp(r):
r.move(Ptp(goal=Pose(position=Point(-0.2, 0.0, .9), orientation=Quaternion(0,0,0,1)),
vel_scale=SLOW_VEL_SCALE,
acc_scale=ACC_SCALE))
print("prepared.")
ptp = Ptp(goal=Pose(position=Point(0.2, 0, .9), orientation=Quaternion(0,0,0,1)),
vel_scale=SLOW_VEL_SCALE,
acc_scale=ACC_SCALE)
move_thread = MoveThread(r, ptp)
move_thread.start()
for i in range(10):
rospy.sleep(1)
r.pause()
rospy.sleep(.2)
r.resume()
move_thread.join()
if __name__ == "__main__":
# init a rosnode
rospy.init_node('robot_program_node')
# initialisation
r = Robot(__REQUIRED_API_VERSION__) # instance of the robot
# start the main program
pausing_a_sequence(r)
# pausing_a_ptp(r)
| 30.403846 | 92 | 0.606262 | 441 | 3,162 | 4.113379 | 0.226757 | 0.028666 | 0.114664 | 0.083793 | 0.556229 | 0.556229 | 0.556229 | 0.556229 | 0.520397 | 0.484565 | 0 | 0.041286 | 0.272296 | 3,162 | 103 | 93 | 30.699029 | 0.747066 | 0.058191 | 0 | 0.468354 | 0 | 0 | 0.028966 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050633 | false | 0 | 0.088608 | 0 | 0.151899 | 0.025316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf3f13905a5ccf5bc9884a2805ccfdf8e0e29624 | 822 | py | Python | feed-runner.py | quandram/podcatcher | b1d14b10b3e1afd1947e09ddf2006dac37c6fae7 | [
"MIT"
] | null | null | null | feed-runner.py | quandram/podcatcher | b1d14b10b3e1afd1947e09ddf2006dac37c6fae7 | [
"MIT"
] | null | null | null | feed-runner.py | quandram/podcatcher | b1d14b10b3e1afd1947e09ddf2006dac37c6fae7 | [
"MIT"
] | null | null | null | import configparser
import os
from podcatcher import podcatcher
import configKeys
def update_last_processed_date(config, configSection, lastDownloadedDate):
config.set(configSection, configKeys.LAST_DOWNLOADED_DATE, lastDownloadedDate.strftime("%Y-%m-%d %H:%M:%S %Z"))
with open(os.path.join(os.path.dirname(__file__), "config.ini"), "w") as configFile:
config.write(configFile)
def main():
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "config.ini"))
for configSection in config.sections():
if configSection != configKeys.SETTINGS_NAME:
update_last_processed_date(config, configSection, podcatcher(config[configKeys.SETTINGS_NAME], configSection, config[configSection]).get_new_pods());
if __name__ == "__main__":
main()
| 37.363636 | 161 | 0.744526 | 98 | 822 | 5.959184 | 0.459184 | 0.041096 | 0.065068 | 0.078767 | 0.267123 | 0.267123 | 0.123288 | 0.123288 | 0.123288 | 0 | 0 | 0 | 0.131387 | 822 | 21 | 162 | 39.142857 | 0.817927 | 0 | 0 | 0 | 0 | 0 | 0.059611 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf3ffee88a76c631b85e7a5469a248333708be1a | 34 | py | Python | src/superfit/mainwindow/__init__.py | awacha/superfit | a95d346c4b38f61173c7434eb7389e2cf1ccae9c | [
"BSD-3-Clause"
] | null | null | null | src/superfit/mainwindow/__init__.py | awacha/superfit | a95d346c4b38f61173c7434eb7389e2cf1ccae9c | [
"BSD-3-Clause"
] | null | null | null | src/superfit/mainwindow/__init__.py | awacha/superfit | a95d346c4b38f61173c7434eb7389e2cf1ccae9c | [
"BSD-3-Clause"
] | null | null | null | from .mainwindow import MainWindow | 34 | 34 | 0.882353 | 4 | 34 | 7.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088235 | 34 | 1 | 34 | 34 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cf431e72726e4b11c54c98c1b966e61f78dddfff | 7,755 | py | Python | source/code/tag_utilities.py | awslabs/tag-tamer | bfd164c36b5e3ba8e01aba54d973ce372e982b09 | [
"MIT",
"MIT-0"
] | 15 | 2021-06-27T23:42:37.000Z | 2021-09-24T19:40:00.000Z | source/code/tag_utilities.py | awslabs/tag-tamer | bfd164c36b5e3ba8e01aba54d973ce372e982b09 | [
"MIT",
"MIT-0"
] | 7 | 2021-07-05T06:56:46.000Z | 2021-08-06T00:59:36.000Z | source/code/tag_utilities.py | awslabs/tag-tamer | bfd164c36b5e3ba8e01aba54d973ce372e982b09 | [
"MIT",
"MIT-0"
] | 5 | 2021-06-23T17:59:01.000Z | 2021-10-20T14:22:44.000Z | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
Tag Tamer utility functions to evaluate resource tags
"""
import logging
# Instantiate logging for this module using its file name
log = logging.getLogger(__name__)
def tag_filter_matcher(
conjunction=None,
tag_key1_state=None,
tag_value1_state=None,
tag_key2_state=None,
tag_value2_state=None,
resource_inventory=None,
filter_tags=None,
tag_dict=None,
resource_name=None,
resource_arn=None,
):
"""Updates the passed resource_inventory dictionary with ARN & name of all resources matching the
user-selected filter tag keys & values. User-selected filter tag keys & tag key:value combinations
are AND'ed or OR'ed based on value of conjunction.
"""
def _intersection_union_invalid(tag_dict, resource_name, resource_arn):
resource_inventory.clear()
def _intersection_union_fftt(tag_dict, resource_name, resource_arn):
if tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get("tag_value2"):
resource_inventory[resource_arn] = resource_name
def _intersection_union_ttff(tag_dict, resource_name, resource_arn):
if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get("tag_value1"):
resource_inventory[resource_arn] = resource_name
def _intersection_tfff(tag_dict, resource_name, resource_arn):
if filter_tags.get("tag_key1") in tag_dict:
resource_inventory[resource_arn] = resource_name
def _intersection_fftf(tag_dict, resource_name, resource_arn):
if filter_tags.get("tag_key2") in tag_dict:
resource_inventory[resource_arn] = resource_name
def _intersection_tftf(tag_dict, resource_name, resource_arn):
if (
filter_tags.get("tag_key1") in tag_dict
and filter_tags.get("tag_key2") in tag_dict
):
resource_inventory[resource_arn] = resource_name
def _intersection_tftt(tag_dict, resource_name, resource_arn):
if (
filter_tags.get("tag_key1") in tag_dict
and filter_tags.get("tag_key2") in tag_dict
):
if tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get(
"tag_value2"
):
resource_inventory[resource_arn] = resource_name
def _intersection_tttf(tag_dict, resource_name, resource_arn):
if (
filter_tags.get("tag_key1") in tag_dict
and filter_tags.get("tag_key2") in tag_dict
):
if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get(
"tag_value1"
):
resource_inventory[resource_arn] = resource_name
def _intersection_tttt(tag_dict, resource_name, resource_arn):
if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get(
"tag_value1"
) and tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get(
"tag_value2"
):
resource_inventory[resource_arn] = resource_name
def _intersection_ffff(tag_dict, resource_name, resource_arn):
resource_inventory[resource_arn] = resource_name
def _union_tfff_tftf_fftf(tag_dict, resource_name, resource_arn):
if (
filter_tags.get("tag_key1") in tag_dict
or filter_tags.get("tag_key2") in tag_dict
):
resource_inventory[resource_arn] = resource_name
def _union_tttf(tag_dict, resource_name, resource_arn):
if filter_tags.get("tag_key1") in tag_dict:
if tag_dict[filter_tags.get("tag_key1")] == filter_tags.get("tag_value1"):
resource_inventory[resource_arn] = resource_name
elif filter_tags.get("tag_key2") in tag_dict:
resource_inventory[resource_arn] = resource_name
def _union_tftt(tag_dict, resource_name, resource_arn):
if filter_tags.get("tag_key2") in tag_dict:
if tag_dict[filter_tags.get("tag_key2")] == filter_tags.get("tag_value2"):
resource_inventory[resource_arn] = resource_name
elif filter_tags.get("tag_key1") in tag_dict:
resource_inventory[resource_arn] = resource_name
def _union_tttt(tag_dict, resource_name, resource_arn):
if tag_dict.get(filter_tags.get("tag_key1")) == filter_tags.get(
"tag_value1"
) or tag_dict.get(filter_tags.get("tag_key2")) == filter_tags.get("tag_value2"):
resource_inventory[resource_arn] = resource_name
def _union_ffff(tag_dict, resource_name, resource_arn):
resource_inventory[resource_arn] = resource_name
# "AND" Truth table check for tag_key1, tag_value1, tag_key2, tag_value2
intersection_combos = {
(False, False, False, True): _intersection_union_invalid,
(False, True, False, False): _intersection_union_invalid,
(False, True, False, True): _intersection_union_invalid,
(True, False, False, True): _intersection_union_invalid,
(True, True, False, True): _intersection_union_invalid,
(False, True, True, False): _intersection_union_invalid,
(False, False, True, False): _intersection_fftf,
(False, False, True, True): _intersection_union_fftt,
(True, False, False, False): _intersection_tfff,
(True, True, False, False): _intersection_union_ttff,
(True, False, True, False): _intersection_tftf,
(True, False, True, True): _intersection_tftt,
(True, True, True, False): _intersection_tttf,
(True, True, True, True): _intersection_tttt,
(False, False, False, False): _intersection_ffff,
}
# "OR" Truth table check for tag_key1, tag_value1, tag_key2, tag_value2
union_combos = {
(False, False, False, True): _intersection_union_invalid,
(False, True, False, False): _intersection_union_invalid,
(False, True, False, True): _intersection_union_invalid,
(False, True, True, True): _intersection_union_invalid,
(True, True, False, True): _intersection_union_invalid,
(False, False, True, False): _union_tfff_tftf_fftf,
(False, False, True, True): _intersection_union_fftt,
(True, False, False, False): _union_tfff_tftf_fftf,
(True, False, True, False): _union_tfff_tftf_fftf,
(True, False, True, True): _union_tftt,
(True, True, False, False): _intersection_union_ttff,
(True, True, True, False): _union_tttf,
(True, True, True, True): _union_tttt,
(False, False, False, False): _union_ffff,
}
if conjunction == "AND":
intersection_combos[
(
tag_key1_state,
tag_value1_state,
tag_key2_state,
tag_value2_state,
)
](
tag_dict,
resource_name,
resource_arn,
)
elif conjunction == "OR":
union_combos[
(
tag_key1_state,
tag_value1_state,
tag_key2_state,
tag_value2_state,
)
](
tag_dict,
resource_name,
resource_arn,
)
else:
_intersection_union_invalid(tag_dict, resource_name, resource_arn)
def get_tag_filter_key_value_states(filter_tags=None):
tag_key1_state = True if filter_tags.get("tag_key1") else False
tag_value1_state = True if filter_tags.get("tag_value1") else False
tag_key2_state = True if filter_tags.get("tag_key2") else False
tag_value2_state = True if filter_tags.get("tag_value2") else False
return tag_key1_state, tag_value1_state, tag_key2_state, tag_value2_state
| 40.602094 | 102 | 0.660477 | 970 | 7,755 | 4.878351 | 0.095876 | 0.063609 | 0.104396 | 0.128487 | 0.792265 | 0.75951 | 0.754861 | 0.717244 | 0.678994 | 0.651733 | 0 | 0.011465 | 0.246422 | 7,755 | 190 | 103 | 40.815789 | 0.798255 | 0.076725 | 0 | 0.490196 | 0 | 0 | 0.04677 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.006536 | 0 | 0.124183 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
cf44da1421ffcad816c602ccc4edb40367643818 | 199 | py | Python | prof_school/__init__.py | mohamedmelsayed/erp-school | 6da9bc4c4634e3b362be18f55300aacf147c32a3 | [
"MIT"
] | null | null | null | prof_school/__init__.py | mohamedmelsayed/erp-school | 6da9bc4c4634e3b362be18f55300aacf147c32a3 | [
"MIT"
] | null | null | null | prof_school/__init__.py | mohamedmelsayed/erp-school | 6da9bc4c4634e3b362be18f55300aacf147c32a3 | [
"MIT"
] | null | null | null | from .models import stage
from .models import level
from .models import class_name
from .models import student
from .models import parent
from .models import study_year
from .models import enrollment | 28.428571 | 30 | 0.829146 | 30 | 199 | 5.433333 | 0.4 | 0.429448 | 0.687117 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.135678 | 199 | 7 | 31 | 28.428571 | 0.947674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cf458acadf833d83661ceef97551521840b2249b | 730 | py | Python | brewerslab-orig-commander/metroui/ajaxRecalculate.py | allena29/brewerslabng | f47e671971436b7af806b54f6019c5b185d7d194 | [
"Apache-2.0"
] | 1 | 2020-04-12T10:08:10.000Z | 2020-04-12T10:08:10.000Z | brewerslab-orig-commander/metroui/ajaxRecalculate.py | allena29/brewerslabng | f47e671971436b7af806b54f6019c5b185d7d194 | [
"Apache-2.0"
] | 2 | 2021-12-13T20:09:45.000Z | 2022-03-08T21:09:57.000Z | brewerslab-orig-commander/metroui/ajaxRecalculate.py | allena29/brewerslabng | f47e671971436b7af806b54f6019c5b185d7d194 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import re
import sys
import cgi
import _mysql
import mysql.connector
from thememetro import *
from cloudNG import *
con=mysql.connector.connect(user='brewerslab',password='beer',database="brewerslab")
form=cgi.FieldStorage()
theme=webTheme()
theme.bgcolor="#ffffff"
if theme.localUser:
sys.stdout.write("Content-Type:text/xml\n\n")
grid={}
db=_mysql.connect(host="localhost",user="brewerslab",passwd='beer',db="brewerslab")
print "<xml><junk>"
bc=brewerslabCloudApi()
#bc.calculateRecipe("test@example.com", form['recipe'].value)
#bc.compile("test@example.com", form['recipe'].value,None)
bc.calculateRecipeWrapper("test@example.com",form['recipe'].value)
print "</junk><complete>1</complete></xml>"
| 28.076923 | 84 | 0.746575 | 98 | 730 | 5.540816 | 0.55102 | 0.060773 | 0.077348 | 0.099448 | 0.160221 | 0.160221 | 0 | 0 | 0 | 0 | 0 | 0.001471 | 0.068493 | 730 | 25 | 85 | 29.2 | 0.797059 | 0.182192 | 0 | 0 | 0 | 0 | 0.26431 | 0.10101 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.105263 | 0.368421 | null | null | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 3 |
cf4635f6758052b35e57afc944ef8877bef8bb73 | 3,624 | py | Python | PostDiffMixture/le_experiments/epsilonGreedyPolicy.py | SIGKDDanon/SIGKDD2021DeAnonV2 | 76f0373ec42ab55feefed3f4ce4bf4d532b51dd2 | [
"Apache-2.0"
] | null | null | null | PostDiffMixture/le_experiments/epsilonGreedyPolicy.py | SIGKDDanon/SIGKDD2021DeAnonV2 | 76f0373ec42ab55feefed3f4ce4bf4d532b51dd2 | [
"Apache-2.0"
] | null | null | null | PostDiffMixture/le_experiments/epsilonGreedyPolicy.py | SIGKDDanon/SIGKDD2021DeAnonV2 | 76f0373ec42ab55feefed3f4ce4bf4d532b51dd2 | [
"Apache-2.0"
] | null | null | null | import sys
import csv
import random
import math
import numpy
def constant_policy(num_actions):
''' 1-based action '''
return 1
def getEpsilonGreedyAction(eps, num_actions, default_policy):
'''
Performs epsilon greedy exploration with specified default policy.
This works as follows: in epsilon of the time, action index is
uniformly chosen from [1, num_actions]. In the rest 1 - epsilon
time, action is chosen by calling default policy.
'''
default_policy_action = default_policy(num_actions)
chosen_action = -1
chosen_action_prob = 0
if random.random() < eps:
# 1-based uniform action index
chosen_action = random.randint(1, num_actions)
# update probability if the default policy would have
# chosen the same action
if chosen_action == default_policy_action:
chosen_action_prob = 1 - eps + (eps / num_actions)
else:
chosen_action_prob = eps / num_actions
else:
# choose action from default policy
chosen_action_prob = 1 - eps + (eps / num_actions)
chosen_action = default_policy_action
return (chosen_action, chosen_action_prob)
def calculateEpsilonGreedyPolicy(source, dest, eps=0.1):
'''
Calculate epsilon greedy on the source dataset.
:params source: The input source dataset (e.g. simulated_data_files_input.csv).
:param dest: The output destination dataset.
:param eps: Epsilon parameter.
'''
numActions = 3
numMooclets = 3
with open(source, newline='') as inf, open(dest, 'w', newline='') as outf:
reader = csv.DictReader(inf)
fieldNamesOut = reader.fieldnames[0:3]
#output the conditions chosen
fieldNamesOut.append('MOOClet1')
fieldNamesOut.append('MOOClet2')
fieldNamesOut.append('MOOClet3')
#output our samples drawn
fieldNamesOut.append('RewardMOOClet1')
fieldNamesOut.append('RewardMOOClet2')
fieldNamesOut.append('RewardMOOClet3')
writer = csv.DictWriter(outf, fieldnames=fieldNamesOut)
writer.writeheader()
sampleNumber = 0
for row in reader:
sampleNumber += 1
#get the user vars
ageQuartile = int(row['agequartilesUSER']);
#user 0 instead of -1 for age quartiles
if ageQuartile==-1:
ageQuartile=0;
nDaysAct = int(row['ndaysactUSER']);
#choose a random action
actions = []
for i in range(numMooclets):
a, p = getEpsilonGreedyAction(eps, numActions, constant_policy)
actions.append(a)
# get reward signals
rewards = []
for i in range(numMooclets):
row_key = 'MOOClet{}{}{}'.format(i + 1, chr(ord('A') + i), actions[i])
rewards.append(int(row[row_key]))
#write out some of the inputs, which versions we chose, samples
writer.writerow({'SampleNumber' : sampleNumber, 'agequartilesUSER': ageQuartile, 'ndaysactUSER' : nDaysAct,
'MOOClet1' : actions[0], 'MOOClet2' : actions[1], 'MOOClet3' : actions[2],
'RewardMOOClet1' : rewards[0], 'RewardMOOClet2' : rewards[1], 'RewardMOOClet3' : rewards[2]})
def main():
if len(sys.argv) == 4:
calculateEpsilonGreedyPolicy(sys.argv[1], sys.argv[2], sys.argv[3])
else:
calculateEpsilonGreedyPolicy('simulated_data_files_input.csv', 'testEpsilonGreedy_simData.csv', eps=0.1)
if __name__ == "__main__":
main()
| 34.846154 | 122 | 0.625828 | 402 | 3,624 | 5.519901 | 0.345771 | 0.054078 | 0.036052 | 0.019829 | 0.100946 | 0.029743 | 0.029743 | 0.029743 | 0 | 0 | 0 | 0.017932 | 0.276766 | 3,624 | 103 | 123 | 35.184466 | 0.82831 | 0.095751 | 0 | 0.116667 | 0 | 0 | 0.10292 | 0.021533 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.083333 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf47aca5fbdc5c963454eb2445883327bc3c473e | 267 | py | Python | libp2p/protocol_muxer/exceptions.py | lithp/py-libp2p | f38899e26edabe59b291e466143d1c696c44de8d | [
"Apache-2.0",
"MIT"
] | null | null | null | libp2p/protocol_muxer/exceptions.py | lithp/py-libp2p | f38899e26edabe59b291e466143d1c696c44de8d | [
"Apache-2.0",
"MIT"
] | null | null | null | libp2p/protocol_muxer/exceptions.py | lithp/py-libp2p | f38899e26edabe59b291e466143d1c696c44de8d | [
"Apache-2.0",
"MIT"
] | null | null | null | from libp2p.exceptions import BaseLibp2pError
class MultiselectError(BaseLibp2pError):
"""Raised when an error occurs in multiselect process"""
class MultiselectClientError(BaseLibp2pError):
"""Raised when an error occurs in protocol selection process"""
| 26.7 | 67 | 0.790262 | 28 | 267 | 7.535714 | 0.642857 | 0.199052 | 0.236967 | 0.255924 | 0.379147 | 0.379147 | 0.379147 | 0 | 0 | 0 | 0 | 0.017391 | 0.138577 | 267 | 9 | 68 | 29.666667 | 0.9 | 0.404494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
cf47b256b9183a754f0c9560868b735c8181e6d5 | 9,250 | py | Python | cli/train.py | breid1313/nlp_hw3_text_fcn_pytorch | a4234e90d37e94a3043d9715c90bac7543f4b0ae | [
"Apache-2.0"
] | null | null | null | cli/train.py | breid1313/nlp_hw3_text_fcn_pytorch | a4234e90d37e94a3043d9715c90bac7543f4b0ae | [
"Apache-2.0"
] | null | null | null | cli/train.py | breid1313/nlp_hw3_text_fcn_pytorch | a4234e90d37e94a3043d9715c90bac7543f4b0ae | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Vladislav Lialin and Skillfactory LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Train a neural network classifier."""
import argparse
import logging
import os
import sys
import torch
import torch.nn.functional as F
import datasets
import toml
import wandb
from tqdm.auto import tqdm
from nn_classifier import utils, data_utils
from nn_classifier.modelling import FcnBinaryClassifier
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(os.path.basename(__file__))
def parse_args(args=None):
parser = argparse.ArgumentParser()
# fmt: off
# preprocessing
parser.add_argument("--max_vocab_size", default=50_000, type=int,
help="maximum size of the vocabulary")
# model
parser.add_argument("--hidden_size", default=32, type=int,
help="size of the intermediate layer in the network")
# note that we can't use action='store_true' here or this won't work with wandb sweeps
parser.add_argument("--use_batch_norm", default=False, type=lambda s: s.lower() == 'true')
parser.add_argument("--dropout", default=0.5, type=float)
parser.add_argument("--weight_decay", default=0, type=float,
help="L2 regularization parameter.")
parser.add_argument("--lr", default=1e-3, type=float,
help="Learning rate")
# training
parser.add_argument("--batch_size", default=64, type=int,
help="number of examples in a single batch")
parser.add_argument("--max_epochs", default=5, type=int,
help="number of passes through the dataset during training")
parser.add_argument("--early_stopping", default=1, type=int,
help="Stop training if the model does not improve the results after this many epochs")
# misc
parser.add_argument("--device", default=None, type=str,
help="device to train on, use GPU if available by default")
parser.add_argument("--output_dir", default=None, type=str,
help="a directory to save the model and config, do not save the model by default")
parser.add_argument("--wandb_project", default="nlp_module_3_assignment",
help="wandb project name to log metrics to")
# fmt: on
args = parser.parse_args(args)
return args
def main(args):
"""Train tokenizer, model and save them to a directory
args should __only__ be used in this function or passed to a hyperparameter logger.
Never propagate args further into your code - it causes complicated and tightly connected interfaces
that are easy to modify, but impossible to read and use outside the main file.
"""
if args.output_dir is not None and os.path.exists(args.output_dir):
raise ValueError(f"output_dir {args.output_dir} already exists")
# Initialize wandb as soon as possible to log all stdout to the cloud
wandb.init(config=args)
device = args.device
# TASK 2.1: if device is not specified, set it to "cuda" if torch.cuda.is_available()
# if cuda is not available, set device to "cpu"
# Our implementation is 2 lines
# YOUR CODE STARTS
if not device:
device = "cuda" if torch.cuda.is_available() else "cpu"
# YOUR CODE ENDS
_device_description = "CPU" if device == "cpu" else "GPU"
logger.info(f"Using {_device_description} for training")
# Create dataset objects
logger.info("Loading dataset")
text_dataset = datasets.load_dataset("imdb")
train_texts = text_dataset["train"]["text"]
train_labels = text_dataset["train"]["label"]
tokenizer = utils.make_whitespace_tokenizer(
train_texts, max_vocab_size=args.max_vocab_size
)
train_dataset = data_utils.CountDataset(
train_texts,
tokenizer=tokenizer,
labels=train_labels,
)
test_dataset = data_utils.CountDataset(
text_dataset["test"]["text"], tokenizer, text_dataset["test"]["label"]
)
# It is very important to shuffle the training set
dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True
)
test_dataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False
)
# Create model and optimizer
input_size = tokenizer.get_vocab_size()
model = FcnBinaryClassifier(
input_size=input_size,
hidden_size=args.hidden_size,
dropout_prob=args.dropout,
use_batch_norm=args.use_batch_norm,
)
model = model.to(device)
wandb.watch(model)
# TASK 2.2: Create AdamW optimizer (not Adam)
# and provide learning rate and weight decay parameters to it
# Our implementation is 1 line
# YOUR CODE STARTS
optimizer = torch.optim.AdamW(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# YOUR CODE ENDS
# Initialize current best accuracy as 0 for early stopping
best_acc = 0
epochs_without_improvement = (
0 # training stops when this is larger than args.early_stopping
)
# if args.output_dir is specified, create it and save args as a toml file
# toml is a more flexible, readable and error-prone alternative to yaml and json
if args.output_dir is not None:
os.makedirs(args.output_dir)
with open(os.path.join(args.output_dir, "args.toml"), "w") as f:
toml.dump(vars(args), f)
tokenizer.save(os.path.join(args.output_dir, "tokenizer.json"))
logger.info("Starting training")
for _ in tqdm(range(args.max_epochs), desc="Epochs"):
for x, y in dataloader:
# TASK 2.3a: Define the training loop
# 1. Move and and y to the device you are using for training
# 2. Get class probabilites using model
# 3. Calculate loss using F.binary_cross_entropy
# 4. Zero out the cashed gradients from the previous iteration
# 4. Backpropagate the loss
# 5. Update the parameters
# Our implementation is 7 lines
# YOUR CODE STARTS
x = x.to(device)
y = y.to(device)
probs = model(x)
loss = F.binary_cross_entropy(probs, y)
loss.backward()
optimizer.zero_grad()
optimizer.step()
# YOUR CODE ENDS
wandb.log(
{
"train_acc": utils.accuracy(probs, y),
"train_loss": loss,
}
)
# Task 2.3b: Evaluate the model on the test set
# Use utils.evaluate_model to get it and wandb.log to log it as "test_acc"
# Our implementation is 2 lines
# YOUR CODE STARTS
test_acc = utils.evaluate_model(model, dataloader, device=device)
wandb.log({"test_acc": test_acc})
# YOUR CODE ENDS
# TASK 2.4: if output_dir is provided and test accuracy is better than the current best accuracy
# save the model to output_dir/model_checkpoint.pt
# use os.path.join to write code transferable between Linux/Mac and Windows
# extract save model.state_dict() using torch.save
# set epochs_without_improvement to zero.
# Remember to update best_acc even if output_dir is not provided.
# Stop training (use break) if epochs_without_improvement > early_stopping
# Before that use the logger.info to indicate that the training stopped early.
# Our implementation is 12 lines
# YOUR CODE STARTS
if test_acc >= best_acc:
if args.output_dir:
torch.save(
model.state_dict(),
os.path.join(args.output_dir, "model_checkpoint.pt"),
)
best_acc = test_acc
epochs_without_improvement = 0
else:
epochs_without_improvement += 1
if epochs_without_improvement > args.early_stopping:
logger.info(
f"Stopping training early. {epochs_without_improvement} have passed without improvement, which has crossed the threshold of {args.early_stopping}"
)
break
# YOUR CODE ENDS
# Log the best accuracy as a summary so that wandb would use it instead of the final value
wandb.run.summary["test_acc"] = best_acc
logger.info("Training is finished!")
if __name__ == "__main__":
args = parse_args()
main(args)
| 36.27451 | 162 | 0.647027 | 1,231 | 9,250 | 4.734362 | 0.300569 | 0.023164 | 0.035003 | 0.010295 | 0.103638 | 0.054736 | 0.033974 | 0.013384 | 0 | 0 | 0 | 0.007746 | 0.260324 | 9,250 | 254 | 163 | 36.417323 | 0.844051 | 0.339676 | 0 | 0 | 0 | 0.007463 | 0.185037 | 0.015461 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0.014925 | 0.089552 | 0 | 0.11194 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf483c36d559d50ef56df32e2b8c8288a4ddb79b | 7,436 | py | Python | src/profile.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | null | null | null | src/profile.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | null | null | null | src/profile.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | 1 | 2022-03-08T22:07:50.000Z | 2022-03-08T22:07:50.000Z | from datetime import datetime, timedelta
import asyncio
import math
from collections import defaultdict
import discord
from discord.ext import commands, pages
from discord.commands import slash_command, Option
from database import DatabaseDeck, DatabasePersonality
from roll import min_until_next_claim
import utils
class Profile(commands.Cog):
def __init__(self, bot):
"""Initial the cog with the bot."""
self.bot = bot
#### Commands ####
@slash_command(aliases=['pr'], description='Show the user profile or yours if no user given.',
guild_ids=utils.get_authorized_guild_ids())
async def profile(self, ctx, member: Option(discord.Member, required=False, default=None)):
profile_owner = member or ctx.author
id_perso_profile = DatabaseDeck.get().get_id_perso_profile(ctx.guild.id, profile_owner.id)
image = profile_owner.avatar.url if profile_owner.avatar else None
if id_perso_profile:
current_image = DatabaseDeck.get().get_perso_current_image(ctx.guild.id, id_perso_profile)
perso = DatabasePersonality.get().get_perso_information(id_perso_profile)
# Show profile's perso only if user owns the personality (might not be the case with trade, give, discard)
owner = DatabaseDeck.get().perso_belongs_to(ctx.guild.id, perso['id'])
if owner and owner == profile_owner.id and current_image:
image = current_image
ids_deck = DatabaseDeck.get().get_user_deck(ctx.guild.id, profile_owner.id)
groups_count = defaultdict(int) # Default value of 0
personalities = DatabasePersonality.get().get_multiple_perso_information(ids_deck)
if personalities:
for perso in personalities:
groups_count[perso["group"]] += 1
# Keep only the 10 most popular groups
groups = sorted(groups_count.items(), key=lambda item: item[1], reverse=True)[:10]
# Badges
owned_badges = []
badges = DatabaseDeck.get().get_all_badges_with_perso(ctx.guild.id)
for badge_name in badges:
if all(id_perso in ids_deck for id_perso in badges[badge_name]):
owned_badges.append(badge_name)
badges_embed_msg = 'You don\'t own any badge...'
if owned_badges:
badges_embed_msg = '\n'.join(owned_badges)
embed = discord.Embed(
title=f'Profile of {profile_owner.name if profile_owner.nick is None else profile_owner.nick}', type='rich')
embed.description = f'You own {len(ids_deck)} personalit{"ies" if len(ids_deck) > 1 else "y"}!'
embed.add_field(name='Badges', value=badges_embed_msg)
if groups:
embed.add_field(name='Most owned groups',
value='\n'.join([f'*{group[0].capitalize()}* ({group[1]})' for group in groups]))
if image:
embed.set_thumbnail(url=image)
await ctx.respond(embed=embed)
@slash_command(description='Show the user deck or yours if no user given.',
guild_ids=utils.get_authorized_guild_ids())
async def deck(self, ctx, member: Option(discord.Member, required=False, default=None)):
deck_owner = member or ctx.author
ids_deck = DatabaseDeck.get().get_user_deck(ctx.guild.id, deck_owner.id)
persos_text = []
personalities = DatabasePersonality.get().get_multiple_perso_information(ids_deck)
if personalities:
for perso in personalities:
persos_text.append(f'**{perso["name"]}** *{perso["group"]}*')
persos_text.sort()
nb_per_page = 20
persos_pages = []
for i in range(0, len(persos_text), nb_per_page):
embed = discord.Embed(title=deck_owner.name if deck_owner.nick is None else deck_owner.nick,
description='\n'.join([perso for perso in persos_text[i:i + nb_per_page]]))
if deck_owner.avatar:
embed.set_thumbnail(url=deck_owner.avatar.url)
persos_pages.append(embed)
paginator = pages.Paginator(pages=persos_pages, show_disabled=True, show_indicator=True)
await paginator.send(ctx)
@slash_command(description='Set the profile displayed personality.\n'
'You can leave name blank to remove the current personality.',
guild_ids=utils.get_authorized_guild_ids())
async def set_perso_profile(self, ctx, name: Option(str, 'Pick a name or write yours',
autocomplete=utils.deck_name_searcher),
group: Option(str, 'Pick a group or write yours',
autocomplete=utils.personalities_group_searcher, required=False,
default=None)):
if name is None:
DatabaseDeck.get().set_id_perso_profile(ctx.guild.id, ctx.author.id, None)
await ctx.respond('I removed your profile\'s personality.')
return
name = name.strip()
if group:
group = group.strip()
if group:
id_perso = DatabasePersonality.get().get_perso_group_id(name, group)
else:
id_perso = DatabasePersonality.get().get_perso_id(name)
if not id_perso:
await ctx.respond(f'Personality **{name}**{" from *" + group + "* " if group else ""} not found.')
return
owner = DatabaseDeck.get().perso_belongs_to(ctx.guild.id, id_perso)
if not owner or owner != ctx.author.id:
await ctx.respond(f'You don\'t own **{name}**{" from *" + group + "* " if group else ""}...')
return None
DatabaseDeck.get().set_id_perso_profile(ctx.guild.id, ctx.author.id, id_perso)
await ctx.respond(f'Set your perso profile to {name} {group if group else ""}')
@slash_command(description='Show time before next rolls and claim reset.',
guild_ids=utils.get_authorized_guild_ids())
async def time(self, ctx):
next_claim = min_until_next_claim(ctx.guild.id, ctx.author.id)
username = ctx.author.name if ctx.author.nick is None else ctx.author.nick
msg = f'{username}, you '
if next_claim == 0:
msg += 'can claim right now!'
else:
time = divmod(next_claim, 60)
msg += f'can\'t claim yet. ' \
f'Ready **<t:{int((datetime.now() + timedelta(minutes=next_claim)).timestamp())}:R>**.'
user_nb_rolls = DatabaseDeck.get().get_nb_rolls(ctx.guild.id, ctx.author.id)
max_rolls = DatabaseDeck.get().get_rolls_per_hour(ctx.guild.id)
last_roll = DatabaseDeck.get().get_last_roll(ctx.guild.id, ctx.author.id)
if not last_roll:
user_nb_rolls = 0
else:
last_roll = datetime.strptime(last_roll, '%Y-%m-%d %H:%M:%S')
now = datetime.now()
# If a new hour began
if now.date() != last_roll.date() or (now.date() == last_roll.date() and now.hour != last_roll.hour):
user_nb_rolls = 0
msg += f'\nYou have **{max_rolls - user_nb_rolls}** rolls left.\n' \
f'Next rolls reset **<t:{int((datetime.now().replace(minute=0) + timedelta(hours=1)).timestamp())}:R>**.'
await ctx.respond(msg)
| 44.261905 | 120 | 0.620764 | 966 | 7,436 | 4.593168 | 0.200828 | 0.023665 | 0.029299 | 0.01465 | 0.302457 | 0.249042 | 0.18526 | 0.18526 | 0.18526 | 0.146495 | 0 | 0.003676 | 0.268424 | 7,436 | 167 | 121 | 44.526946 | 0.811949 | 0.030662 | 0 | 0.153226 | 0 | 0.040323 | 0.145202 | 0.025313 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008065 | false | 0 | 0.080645 | 0 | 0.120968 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf49e6d7e31e1c1a3165ef1af9d24717e6080a4b | 11,063 | py | Python | migrations/versions/95805663f7bd_.py | Anioko/CMS | b6465faf2a5d7333f494526bcddf8083d6807aee | [
"MIT"
] | null | null | null | migrations/versions/95805663f7bd_.py | Anioko/CMS | b6465faf2a5d7333f494526bcddf8083d6807aee | [
"MIT"
] | 1 | 2021-06-02T01:40:15.000Z | 2021-06-02T01:40:15.000Z | migrations/versions/95805663f7bd_.py | Anioko/CMS | b6465faf2a5d7333f494526bcddf8083d6807aee | [
"MIT"
] | null | null | null | """empty message
Revision ID: 95805663f7bd
Revises:
Create Date: 2020-05-30 12:10:57.896357
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '95805663f7bd'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('blogcategories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('slug', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=512), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_blogcategories_slug'), 'blogcategories', ['slug'], unique=True)
op.create_table('blogpoststatus',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('editableHTML',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('editor_name', sa.String(length=100), nullable=True),
sa.Column('value', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('editor_name')
)
op.create_table('files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('path', sa.String(length=255), nullable=True),
sa.Column('uploaded_date', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('image_filename', sa.String(), nullable=True),
sa.Column('image_url', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('menus',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('photogalleries',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=128), nullable=True),
sa.Column('slug', sa.String(length=255), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_photogalleries_slug'), 'photogalleries', ['slug'], unique=True)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('index', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
op.create_table('sitesettings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('value', sa.String(length=4000), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('menuitems',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('slug', sa.String(length=256), nullable=True),
sa.Column('weight', sa.Integer(), nullable=True),
sa.Column('menu_id', sa.Integer(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['menu_id'], ['menus.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('photogalleryitems',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=512), nullable=True),
sa.Column('file_id', sa.Integer(), nullable=True),
sa.Column('photogallery_id', sa.Integer(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['file_id'], ['files.id'], ),
sa.ForeignKeyConstraint(['photogallery_id'], ['photogalleries.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('first_name', sa.String(length=64), nullable=True),
sa.Column('last_name', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_first_name'), 'users', ['first_name'], unique=False)
op.create_index(op.f('ix_users_last_name'), 'users', ['last_name'], unique=False)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('blogposts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=128), nullable=True),
sa.Column('slug', sa.String(length=255), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('blogcategory_id', sa.Integer(), nullable=True),
sa.Column('blogpoststatus_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('published_on', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['blogcategory_id'], ['blogcategories.id'], ),
sa.ForeignKeyConstraint(['blogpoststatus_id'], ['blogpoststatus.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_blogposts_slug'), 'blogposts', ['slug'], unique=True)
op.create_table('opportunities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('summary', sa.String(), nullable=True),
sa.Column('city', sa.String(), nullable=True),
sa.Column('state', sa.String(), nullable=True),
sa.Column('country', sa.String(), nullable=True),
sa.Column('opportunity_type', sa.String(), nullable=True),
sa.Column('available_now', sa.String(), nullable=True),
sa.Column('location_type', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='cascade'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('pages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=128), nullable=True),
sa.Column('slug', sa.String(length=255), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('menu_id', sa.Integer(), nullable=True),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.Column('published_on', sa.DateTime(), nullable=True),
sa.Column('is_homepage', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['menu_id'], ['menus.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_pages_slug'), 'pages', ['slug'], unique=True)
op.create_table('schools',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('grading', sa.String(), nullable=True),
sa.Column('start_date', sa.DateTime(), nullable=False),
sa.Column('end_date', sa.DateTime(), nullable=False),
sa.Column('currently', sa.String(), nullable=True),
sa.Column('city', sa.String(), nullable=True),
sa.Column('state', sa.String(), nullable=True),
sa.Column('country', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('workplaces',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('role', sa.String(), nullable=True),
sa.Column('role_description', sa.String(), nullable=True),
sa.Column('start_date', sa.DateTime(), nullable=False),
sa.Column('end_date', sa.DateTime(), nullable=False),
sa.Column('currently', sa.String(), nullable=True),
sa.Column('city', sa.String(), nullable=True),
sa.Column('state', sa.String(), nullable=True),
sa.Column('country', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('workplaces')
op.drop_table('schools')
op.drop_index(op.f('ix_pages_slug'), table_name='pages')
op.drop_table('pages')
op.drop_table('opportunities')
op.drop_index(op.f('ix_blogposts_slug'), table_name='blogposts')
op.drop_table('blogposts')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_last_name'), table_name='users')
op.drop_index(op.f('ix_users_first_name'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('photogalleryitems')
op.drop_table('menuitems')
op.drop_table('sitesettings')
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_table('roles')
op.drop_index(op.f('ix_photogalleries_slug'), table_name='photogalleries')
op.drop_table('photogalleries')
op.drop_table('menus')
op.drop_table('images')
op.drop_table('files')
op.drop_table('editableHTML')
op.drop_table('blogpoststatus')
op.drop_index(op.f('ix_blogcategories_slug'), table_name='blogcategories')
op.drop_table('blogcategories')
# ### end Alembic commands ###
| 45.714876 | 92 | 0.669891 | 1,440 | 11,063 | 5.032639 | 0.091667 | 0.123637 | 0.175797 | 0.204222 | 0.796881 | 0.78667 | 0.705533 | 0.652408 | 0.611287 | 0.571271 | 0 | 0.011454 | 0.131881 | 11,063 | 241 | 93 | 45.904564 | 0.743128 | 0.025581 | 0 | 0.475336 | 0 | 0 | 0.182165 | 0.008191 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008969 | false | 0.004484 | 0.008969 | 0 | 0.017937 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
cf4a7aaf148ceec8298f56012e99d6d50054187d | 957 | py | Python | daseg/slack.py | pzelasko/daseg | 5e3aaf6e81a44a5eb42226bd376c92c7d1879261 | [
"Apache-2.0"
] | 4 | 2021-07-12T00:46:32.000Z | 2022-02-28T07:02:27.000Z | daseg/slack.py | pzelasko/daseg | 5e3aaf6e81a44a5eb42226bd376c92c7d1879261 | [
"Apache-2.0"
] | 2 | 2021-12-09T12:34:24.000Z | 2022-02-14T20:37:01.000Z | daseg/slack.py | pzelasko/daseg | 5e3aaf6e81a44a5eb42226bd376c92c7d1879261 | [
"Apache-2.0"
] | null | null | null | import logging
import os
import requests
def slack_notify(msg: str):
token = os.environ.get('SLACK_API_TOKEN')
if token is None:
return
try:
requests.post(token, json={'text': msg})
except:
logging.warning('Unable to send notification to Slack!')
def print_and_slack(msg: str, *args, **kwargs):
print(msg, *args, **kwargs)
slack_notify(msg)
class SlackNotifier:
def __init__(self, name: str):
self.name = name
self.msgs = [name]
def write(self, msg: str):
self.msgs.append(msg)
return self
def write_and_print(self, msg: str, *args, **kwargs):
print(msg, *args, **kwargs)
self.write(msg)
return self
def push(self):
msg = '\n'.join(self.msgs)
slack_notify(msg)
self.msgs = [self.name]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.push()
| 20.804348 | 64 | 0.596656 | 127 | 957 | 4.307087 | 0.377953 | 0.043876 | 0.076782 | 0.058501 | 0.124314 | 0.124314 | 0.124314 | 0.124314 | 0 | 0 | 0 | 0 | 0.278997 | 957 | 45 | 65 | 21.266667 | 0.792754 | 0 | 0 | 0.212121 | 0 | 0 | 0.060606 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.242424 | false | 0 | 0.090909 | 0.030303 | 0.484848 | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf4a971868a5db584bf5e20d4c62c91c74f32e96 | 271 | py | Python | aids/strings/is_palindrome.py | ueg1990/aids | bb543c6f53983d59edbc6a522ca10d64efd9c42e | [
"MIT"
] | null | null | null | aids/strings/is_palindrome.py | ueg1990/aids | bb543c6f53983d59edbc6a522ca10d64efd9c42e | [
"MIT"
] | null | null | null | aids/strings/is_palindrome.py | ueg1990/aids | bb543c6f53983d59edbc6a522ca10d64efd9c42e | [
"MIT"
] | null | null | null | '''
In this module, we determine if a given string is a palindrome
'''
def is_palindrome(string):
'''
Return True if given string is a palindrome
'''
if len(string) < 2:
return True
if string[0] == string[-1]:
return is_palindrome(string[1:-1])
return False
| 16.9375 | 62 | 0.678967 | 43 | 271 | 4.232558 | 0.44186 | 0.120879 | 0.142857 | 0.153846 | 0.263736 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022936 | 0.195572 | 271 | 15 | 63 | 18.066667 | 0.811927 | 0.391144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf4a98bc5a1223ce3366c7b5c9b02fbe3d60be2f | 2,386 | py | Python | jageocoder/address.py | ny-a/jageocoder | 6c31cd7d81aa496b2fbcd3300ac1ccc9cf00fca3 | [
"MIT"
] | 12 | 2021-03-09T01:22:50.000Z | 2022-03-23T04:18:24.000Z | jageocoder/address.py | ny-a/jageocoder | 6c31cd7d81aa496b2fbcd3300ac1ccc9cf00fca3 | [
"MIT"
] | 3 | 2021-12-14T06:34:52.000Z | 2022-02-18T13:11:59.000Z | jageocoder/address.py | ny-a/jageocoder | 6c31cd7d81aa496b2fbcd3300ac1ccc9cf00fca3 | [
"MIT"
] | 3 | 2021-12-14T06:36:59.000Z | 2022-02-16T00:48:51.000Z | from logging import getLogger
from jageocoder.exceptions import AddressLevelError
logger = getLogger(__name__)
class AddressLevel(object):
"""
Address Levels
1 = 都道府県
2 = 郡・支庁・振興局
3 = 市町村および特別区
4 = 政令市の区
5 = 大字
6 = 字
7 = 地番または住居表示実施地域の街区
8 = 枝番または住居表示実施地域の住居番号
"""
# Constants
UNDEFINED = -1
PREF = 1
COUNTY = 2
CITY = 3
WARD = 4
OAZA = 5
AZA = 6
BLOCK = 7
BLD = 8
@classmethod
def guess(cls, name, parent, trigger):
"""
Guess the level of the address element.
Parameters
----------
name : str
The name of the address element
parent : AddressNode
The parent node of the target.
trigger : dict
properties of the new address node who triggered
adding the address element.
name : str. name. ("2丁目")
x : float. X coordinate or longitude. (139.69175)
y : float. Y coordinate or latitude. (35.689472)
level : int. Address level (1: pref, 3: city, 5: oaza, ...)
note : str. Note.
"""
lastchar = name[-1]
if parent.id == -1:
return cls.PREF
if parent.level == cls.PREF and \
(lastchar == '郡' or name.endswith(('支庁', '振興局',))):
return cls.COUNTY
if lastchar in '市町村':
if parent.level < cls.CITY:
return cls.CITY
if parent.level in (cls.CITY, cls.OAZA,):
return parent.level + 1
if lastchar == '区':
if parent.level == cls.CITY:
return cls.WARD
if parent.name == '東京都':
return cls.CITY
if parent.level < cls.OAZA:
return cls.OAZA
if parent.level == cls.OAZA:
return cls.AZA
if parent.level == cls.AZA:
if trigger['level'] <= cls.BLOCK:
# If the Aza-name is over-segmented, Aza-level address elements
# may appear in series.
# ex: 北海道,帯広市,稲田町南,九線,西,19番地
return cls.AZA
return cls.BLOCK
raise AddressLevelError(
('Cannot estimate the level of the address element. '
'name={}, parent={}, trigger={}'.format(
name, parent, trigger)))
| 25.115789 | 79 | 0.508382 | 273 | 2,386 | 4.435897 | 0.388278 | 0.059455 | 0.075145 | 0.079273 | 0.165153 | 0.165153 | 0.095789 | 0 | 0 | 0 | 0 | 0.029046 | 0.393965 | 2,386 | 94 | 80 | 25.382979 | 0.807054 | 0.305951 | 0 | 0.093023 | 0 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.046512 | 0 | 0.534884 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
cf4c769e8b0574ab598dafe57a75ab7656e052d7 | 2,090 | py | Python | src/sympais/infer/utils.py | ethanluoyc/sympais | 68bc696434c86edb8457a3c74473c810b2c5c8f2 | [
"MIT"
] | 5 | 2021-06-04T23:24:41.000Z | 2021-12-13T21:39:57.000Z | src/sympais/infer/utils.py | ethanluoyc/sympais | 68bc696434c86edb8457a3c74473c810b2c5c8f2 | [
"MIT"
] | 24 | 2021-07-12T02:08:34.000Z | 2021-12-20T02:14:54.000Z | src/sympais/infer/utils.py | ethanluoyc/sympais | 68bc696434c86edb8457a3c74473c810b2c5c8f2 | [
"MIT"
] | 1 | 2021-07-31T10:34:19.000Z | 2021-07-31T10:34:19.000Z | import jax
from jax import lax
from jax import tree_util
import jax.numpy as np
def is_list_like(x):
return isinstance(x, (list, tuple))
def call_fn(fn, args):
if is_list_like(args):
return fn(*args)
return fn(args)
def call_fn_value_and_grad(fn, args):
def _fn(args):
if is_list_like(args):
return fn(*args)
return fn(args)
output, vjp_fn = jax.vjp(_fn, args)
grad = vjp_fn(np.ones_like(output))[0]
return output, grad
def choose(is_accepted, proposed_state, state):
def _choose(is_accepted, proposed_state, state):
def _expand_is_accepted_like(x):
if x.shape is not None and is_accepted.shape is not None:
expand_shape = list(is_accepted.shape) + [1] * (
len(x.shape) - len(is_accepted.shape))
else:
expand_shape = is_accepted.shape + (1,) * (x.ndim - is_accepted.ndim)
return np.reshape(is_accepted, expand_shape)
if is_list_like(proposed_state):
assert is_list_like(state)
return type(proposed_state)(*[
np.where(_expand_is_accepted_like(p), p, s)
for p, s in zip(proposed_state, state)
])
else:
return np.where(_expand_is_accepted_like(proposed_state), proposed_state, state)
return tree_util.tree_multimap(lambda p, s: _choose(is_accepted, p, s),
proposed_state, state)
def trace(state, fn, num_steps, trace_fn=None):
if trace_fn is None:
trace_fn = lambda state, extra: extra
def wrapped_fn(state, _unused):
next_state, aux = fn(state)
return next_state, trace_fn(next_state, aux)
(final_state, out) = lax.scan(wrapped_fn, state, xs=None, length=num_steps)
return final_state, out
def split_rng_as(rng, structure):
struct_flat, tree = tree_util.tree_flatten(structure)
if len(struct_flat) == 1:
rngs = (rng,)
else:
rngs = jax.random.split(rng, len(struct_flat))
return tree_util.tree_unflatten(tree, rngs)
def block_until_ready(state):
def _wait(s):
if isinstance(s, np.ndarray):
s.block_until_ready()
return jax.tree_map(_wait, state)
| 24.880952 | 86 | 0.679904 | 324 | 2,090 | 4.117284 | 0.243827 | 0.089955 | 0.037481 | 0.047976 | 0.167166 | 0.167166 | 0.126687 | 0.126687 | 0.068966 | 0.068966 | 0 | 0.002424 | 0.210526 | 2,090 | 83 | 87 | 25.180723 | 0.806061 | 0 | 0 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017544 | 1 | 0.210526 | false | 0 | 0.070175 | 0.017544 | 0.526316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
cf4c7bd09808208650fcf7902fc5aadd3ebc4e2a | 918 | py | Python | __init__.py | nuki111/env_explore | b5dfa05fbcfb0126e246e4ef4eb5a392a8615cf0 | [
"MIT"
] | null | null | null | __init__.py | nuki111/env_explore | b5dfa05fbcfb0126e246e4ef4eb5a392a8615cf0 | [
"MIT"
] | null | null | null | __init__.py | nuki111/env_explore | b5dfa05fbcfb0126e246e4ef4eb5a392a8615cf0 | [
"MIT"
] | null | null | null | '''
env_explore is a library for quick and easy exploration of python objects
=========================================================================
**env_explore** combines pandas and ipywidgets to extract and process data
from almost any given python object into pandas DataFrame and generate a
clickable widget representation with which users can interact.
'''
__author__ = 'Oscar Nuki'
from .utils.backend import (getmain, envtodict, envtopandas, envtohtmltable,
getattrsafe, maineval, EnvObj, EnvDict, EnvDf)
from .utils.frontend import (usename, hboxes, vboxes, arrange, ishtml,
showobj, runperiodic, runperiodicfactory,
Printed, HTMLCode, LoadingButton, ClearButton,
PausePlayButton)
from .processing import EnvHandler
from .interface import (WidgetCell, WidgetDf, WidgetEnv, AutoWidgetEnv) | 43.714286 | 77 | 0.643791 | 88 | 918 | 6.647727 | 0.818182 | 0.034188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.223312 | 918 | 21 | 78 | 43.714286 | 0.820477 | 0.389978 | 0 | 0 | 0 | 0 | 0.018083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.444444 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
cf4dc6fb0422c61d631abfb411ae82187b6217d2 | 3,433 | py | Python | Chapter08/python/ab-env/lib/python3.8/site-packages/numpy-1.16.4-py3.8-macosx-10.16-x86_64.egg/numpy/core/_dtype_ctypes.py | PacktPublishing/Supercharge-Your-Applications-with-GraalVM | bfb068e445f0325be9c7d526b6e07324dff9d1d2 | [
"MIT"
] | 9 | 2021-06-27T07:22:14.000Z | 2022-02-25T18:05:01.000Z | Chapter08/python/ab-env/lib/python3.8/site-packages/numpy-1.16.4-py3.8-macosx-10.16-x86_64.egg/numpy/core/_dtype_ctypes.py | PacktPublishing/Supercharge-Your-Applications-with-GraalVM | bfb068e445f0325be9c7d526b6e07324dff9d1d2 | [
"MIT"
] | null | null | null | Chapter08/python/ab-env/lib/python3.8/site-packages/numpy-1.16.4-py3.8-macosx-10.16-x86_64.egg/numpy/core/_dtype_ctypes.py | PacktPublishing/Supercharge-Your-Applications-with-GraalVM | bfb068e445f0325be9c7d526b6e07324dff9d1d2 | [
"MIT"
] | 8 | 2021-05-28T15:45:12.000Z | 2022-02-01T10:21:37.000Z | """
Conversion from ctypes to dtype.
In an ideal world, we could acheive this through the PEP3118 buffer protocol,
something like::
def dtype_from_ctypes_type(t):
# needed to ensure that the shape of `t` is within memoryview.format
class DummyStruct(ctypes.Structure):
_fields_ = [('a', t)]
# empty to avoid memory allocation
ctype_0 = (DummyStruct * 0)()
mv = memoryview(ctype_0)
# convert the struct, and slice back out the field
return _dtype_from_pep3118(mv.format)['a']
Unfortunately, this fails because:
* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
* PEP3118 cannot represent unions, but both numpy and ctypes can
* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
"""
import ctypes
import numpy as np
def _from_ctypes_array(t):
return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
def _from_ctypes_structure(t):
for item in t._fields_:
if len(item) > 2:
raise TypeError(
"ctypes bitfields have no dtype equivalent")
if hasattr(t, "_pack_"):
formats = []
offsets = []
names = []
current_offset = 0
for fname, ftyp in t._fields_:
names.append(fname)
formats.append(dtype_from_ctypes_type(ftyp))
# Each type has a default offset, this is platform dependent for some types.
effective_pack = min(t._pack_, ctypes.alignment(ftyp))
current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
offsets.append(current_offset)
current_offset += ctypes.sizeof(ftyp)
return np.dtype(dict(
formats=formats,
offsets=offsets,
names=names,
itemsize=ctypes.sizeof(t)))
else:
fields = []
for fname, ftyp in t._fields_:
fields.append((fname, dtype_from_ctypes_type(ftyp)))
# by default, ctypes structs are aligned
return np.dtype(fields, align=True)
def _from_ctypes_scalar(t):
"""
Return the dtype type with endianness included if it's the case
"""
if getattr(t, '__ctype_be__', None) is t:
return np.dtype('>' + t._type_)
elif getattr(t, '__ctype_le__', None) is t:
return np.dtype('<' + t._type_)
else:
return np.dtype(t._type_)
def _from_ctypes_union(t):
formats = []
offsets = []
names = []
for fname, ftyp in t._fields_:
names.append(fname)
formats.append(dtype_from_ctypes_type(ftyp))
offsets.append(0) # Union fields are offset to 0
return np.dtype(dict(
formats=formats,
offsets=offsets,
names=names,
itemsize=ctypes.sizeof(t)))
def dtype_from_ctypes_type(t):
"""
Construct a dtype object from a ctypes type
"""
if issubclass(t, _ctypes.Array):
return _from_ctypes_array(t)
elif issubclass(t, _ctypes._Pointer):
raise TypeError("ctypes pointers have no dtype equivalent")
elif issubclass(t, _ctypes.Structure):
return _from_ctypes_structure(t)
elif issubclass(t, _ctypes.Union):
return _from_ctypes_union(t)
elif isinstance(getattr(t, '_type_', None), str):
return _from_ctypes_scalar(t)
else:
raise NotImplementedError(
"Unknown ctypes type {}".format(t.__name__))
| 30.380531 | 103 | 0.633848 | 435 | 3,433 | 4.770115 | 0.31954 | 0.072289 | 0.043855 | 0.05494 | 0.250602 | 0.2 | 0.167711 | 0.167711 | 0.143614 | 0.143614 | 0 | 0.015538 | 0.268861 | 3,433 | 112 | 104 | 30.651786 | 0.811155 | 0.30032 | 0 | 0.4 | 0 | 0 | 0.059695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.030769 | 0.015385 | 0.276923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf500d8b74ed4e30cef6a56fa9722244906f9406 | 2,202 | py | Python | tests/test_micromagnetic_zeeman.py | computationalmodelling/fidimag | 07a275c897a44ad1e0d7e8ef563f10345fdc2a6e | [
"BSD-2-Clause"
] | 53 | 2016-02-27T09:40:21.000Z | 2022-01-19T21:37:44.000Z | tests/test_micromagnetic_zeeman.py | computationalmodelling/fidimag | 07a275c897a44ad1e0d7e8ef563f10345fdc2a6e | [
"BSD-2-Clause"
] | 132 | 2016-02-26T13:18:58.000Z | 2021-12-01T21:52:42.000Z | tests/test_micromagnetic_zeeman.py | computationalmodelling/fidimag | 07a275c897a44ad1e0d7e8ef563f10345fdc2a6e | [
"BSD-2-Clause"
] | 32 | 2016-02-26T13:21:40.000Z | 2022-03-08T08:54:51.000Z | from fidimag.micro import Zeeman
from fidimag.common import CuboidMesh
from fidimag.micro import Sim
import numpy as np
def varying_field(pos):
return (1.2 * pos[0], 2.3 * pos[1], 0)
def test_H0_is_indexable_or_callable():
"""
Test that an exception is raised if H0 is not indexable, and that an
exception is not raised if H0 is indexable.
"""
# Test for some different accepted types.
inputSuccess = ([0., 0., 1.],
np.array([0., 0., 1.]),
lambda x: x + 0.1)
for zS in inputSuccess:
Zeeman(zS)
# Test for different failing types. Should perhaps use a unittest.TestCase
# for testing to make this more elegant, but there's probably a reason why
# it's not used elsewhere.
inputFailures = [5., -7]
for zS in inputFailures:
try:
Zeeman(zS)
except ValueError:
pass
else:
raise Exception("Zeeman argument \"{}\" was expected to raise an "
"exception, but did not!."
.format(zS))
def test_zeeman():
mesh = CuboidMesh(nx=5, ny=2, nz=1)
sim = Sim(mesh)
sim.set_m((1, 0, 0))
zeeman = Zeeman(varying_field)
sim.add(zeeman)
field = zeeman.compute_field()
assert field[6] == 1.2 * (2 + 0.5)
assert field[7] == 2.3 * 0.5
def test_zeeman_energy():
mu0 = 4 * np.pi * 1e-7
# A system of 8 cells ( not using nm units)
mesh = CuboidMesh(dx=2, dy=2, dz=2,
nx=2, ny=2, nz=2
)
sim = Sim(mesh)
Ms = 1e5
sim.set_Ms(Ms)
sim.set_m((0, 0, 1))
H = 0.1 / mu0
zeeman = Zeeman((0, 0, H))
sim.add(zeeman)
field = zeeman.compute_field()
zf = sim.get_interaction('Zeeman')
# -> ->
# Expected energy: Int ( -mu0 M * H ) dV
# Since we have 8 cells with the same M, we just sum their contrib
exp_energy = 8 * (-mu0 * H * Ms * mesh.dx * mesh.dy * mesh.dz)
assert np.abs(zf.compute_energy() - exp_energy) < 1e-10
if __name__ == "__main__":
test_zeeman()
test_H0_is_indexable_or_callable()
test_zeeman_energy()
| 25.604651 | 78 | 0.560854 | 319 | 2,202 | 3.761755 | 0.401254 | 0.008333 | 0.0325 | 0.036667 | 0.11 | 0.11 | 0.11 | 0 | 0 | 0 | 0 | 0.044325 | 0.323797 | 2,202 | 85 | 79 | 25.905882 | 0.761585 | 0.232062 | 0 | 0.156863 | 0 | 0 | 0.048707 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.078431 | false | 0.019608 | 0.078431 | 0.019608 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf50585745c7b40989b43625db650caccd9e042a | 13,058 | py | Python | rule_learner_both_classes.py | mgbarsky/classification_rules | 699969b87bd7a9080a7e937025fd26398c11a60d | [
"MIT"
] | null | null | null | rule_learner_both_classes.py | mgbarsky/classification_rules | 699969b87bd7a9080a7e937025fd26398c11a60d | [
"MIT"
] | null | null | null | rule_learner_both_classes.py | mgbarsky/classification_rules | 699969b87bd7a9080a7e937025fd26398c11a60d | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
class Rule:
def __init__(self, class_label):
self.conditions = [] # list of conditions
self.class_label = class_label # rule class
def add_condition(self, condition):
self.conditions.append(condition)
def set_params(self, accuracy, coverage):
self.accuracy = accuracy
self.coverage = coverage
def to_filter(self):
result = ""
for cond in self.conditions:
result += cond.to_filter() + " & "
result += "(current_data[columns[-1]] == class_label)"
return result
def to_filter_no_class(self):
result = ""
for cond in self.conditions:
result += cond.to_filter() + " & "
result += "True"
return result
def __repr__(self):
return "If {} then {}. Coverage:{}, accuracy: {}".format(self.conditions, self.class_label,
self.coverage, self.accuracy)
class Condition:
def __init__(self, attribute, value, true_false = None):
self.attribute = attribute
self.value = value
self.true_false = true_false
def to_filter(self):
result = ""
if self is None:
return result
if self.true_false is None:
result += '(current_data["' + self.attribute + '"]' + "==" + '"' + self.value + '")'
elif self.true_false:
result += '(current_data["' + self.attribute + '"]' + ">=" + str(self.value) + ")"
else:
result += '(current_data["' + self.attribute + '"]' + "<" + str(self.value) + ")"
return result
def __repr__(self):
if self.true_false is None:
return "{}={}".format(self.attribute, self.value)
else:
if self.true_false:
return "{}>={}".format(self.attribute, self.value)
else:
return "{}<{}".format(self.attribute, self.value)
def filter_for_list(condition_list):
result = ""
for cond in condition_list:
result += cond.to_filter() + " & "
result += "True"
return result
def get_best_condition(columns, current_data, prev_conditions, class_labels, min_coverage=30, prev_best_accuracy=0):
used_attributes = [x.attribute for x in prev_conditions]
best_accuracy = prev_best_accuracy
best_coverage = None
best_col = None
best_val = None
best_true_false = None
best_class_label = None
for class_label in class_labels:
# we iterate over all attributes except the class - which is in the last column
for col in columns[:-1]:
# we do not use the same column in one rule
if col in used_attributes:
continue
# Extract unique values from the column
unique_vals = current_data[col].unique().tolist()
# Consider each unique value in turn
# The treatment is different for numeric and categorical attributes
for val in unique_vals:
if isinstance(val, int) or isinstance(val, float):
# Here we construct 2 conditions:
# if actual value >= val or if actual value < val
# First if actual value >= val
# construct new set of conditions by adding a new condition
new_conditions = prev_conditions.copy()
current_cond = Condition(col, val, True)
new_conditions.append(current_cond)
# create a filtering condition
filter = filter_for_list(new_conditions)
# total covered by current condition
total_covered = len(current_data[eval(filter)])
if total_covered >= min_coverage:
# total with this condition and a given class
total_correct = len(current_data[(current_data[columns[-1]] == class_label) & eval(filter)])
acc = total_correct/total_covered
if acc > best_accuracy or (acc == best_accuracy and
(best_coverage is None or total_covered > best_coverage)):
best_accuracy = acc
best_coverage = total_covered
best_col = col
best_val = val
best_true_false = True
best_class_label = class_label
# now repeat the same for the case - if actual value < val
# construct new set of conditions by adding a new condition
new_conditions = prev_conditions.copy()
current_cond = Condition(col, val, False)
new_conditions.append(current_cond)
# create a filtering condition
filter = filter_for_list(new_conditions)
# total covered by current condition
total_covered = len(current_data[eval(filter)])
if total_covered >= min_coverage:
# total with this condition and a given class
total_correct = len(current_data[(current_data[columns[-1]] == class_label) & eval(filter)])
acc = total_correct / total_covered
if acc > best_accuracy or (acc == best_accuracy and
(best_coverage is None or total_covered > best_coverage)):
best_accuracy = acc
best_coverage = total_covered
best_col = col
best_val = val
best_true_false = False
best_class_label = class_label
else: # categorical attribute
# For categorical attributes - this is just single condition if actual value == val
new_conditions = prev_conditions.copy()
current_cond = Condition(col, val)
new_conditions.append(current_cond)
# create a filtering condition
filter = filter_for_list(new_conditions)
# total covered by current condition
total_covered = len(current_data[eval(filter)])
if total_covered >= min_coverage:
# total with this condition and a given class
total_correct = len(current_data[(current_data[columns[-1]] == class_label) & eval(filter)])
acc = total_correct / total_covered
if acc > best_accuracy or (acc == best_accuracy and
(best_coverage is None or total_covered > best_coverage)):
best_accuracy = acc
best_coverage = total_covered
best_col = col
best_val = val
best_true_false = None
best_class_label = class_label
if best_col is None:
return None
return (best_class_label, Condition(best_col,best_val, best_true_false))
def learn_one_rule(columns, current_data, class_labels,
min_coverage=30):
tuple = get_best_condition(columns, current_data, [], class_labels, min_coverage)
if tuple is None:
return None
class_label, best_condition = tuple
# start with creating a new Rule with a single best condition
current_rule = Rule(class_label)
current_rule.add_condition(best_condition)
# create a filtering condition
filter = current_rule.to_filter_no_class()
# total covered by current condition
total_covered = len(current_data[eval(filter)])
# total with this condition and a given class
total_correct = len(current_data[(current_data[columns[-1]] == class_label) & eval(filter)])
current_accuracy = total_correct / total_covered
current_rule.set_params(current_accuracy, total_covered )
if total_covered < min_coverage:
return None
if current_accuracy == 1.0:
return current_rule
# repeatedly try to improve Rule's accuracy as long as coverage remains sufficient
while True:
tuple = get_best_condition(columns, current_data, current_rule.conditions,
class_labels, min_coverage, current_accuracy)
if tuple is None:
return current_rule
class_label, best_condition = tuple
new_rule = Rule(class_label)
for cond in current_rule.conditions:
new_rule.add_condition(cond)
new_rule.add_condition(best_condition)
# create a filtering condition
filter = new_rule.to_filter_no_class()
# total covered by current condition
total_covered = len(current_data[eval(filter)])
if total_covered < min_coverage:
return current_rule # return previous rule
# total with this condition and a given class
total_correct = len(current_data[(current_data[columns[-1]] == class_label) & eval(filter)])
new_accuracy = total_correct / total_covered
new_rule.set_params(new_accuracy, total_covered)
if new_accuracy == 1:
return new_rule
current_rule = new_rule
return current_rule
def learn_rules(columns, data, classes=None,
min_coverage=30, min_accuracy=0.6):
# List of final rules
rules = []
# If list of classes of interest is not provided - it is extracted from the last column of data
if classes is not None:
class_labels = classes
else:
class_labels = data[columns[-1]].unique().tolist()
current_data = data.copy()
# This follows the logic of the original PRISM algorithm
# It processes each class in turn. Because for high accuracy
# the rules generated are disjoint with respect to class label
# this is not a problem when we are just interested in rules themselves - not classification
# For classification the order in which the rules are discovered matters, and we should
# process all classes at the same time, as shown in the lecture examples
done = False
while len(current_data) >= min_coverage and not done:
# Learn a rule with a single condition
rule = learn_one_rule(columns, current_data, class_labels, min_coverage)
# The best rule does not pass the coverage threshold - we are done with this class
if rule is None:
break
# If we get the rule with coverage above threshold
# We check if it passes accuracy threshold
if rule.accuracy >= min_accuracy:
rules.append(rule)
# remove rows covered by this rule
# we have to remove the rows where all of the conditions hold
# create a filtering condition
filter = rule.to_filter_no_class()
current_data = current_data.drop(current_data[eval(filter)].index)
else:
done = True
return rules
if __name__ == "__main__":
data_file = "titanic.csv"
data = pd.read_csv(data_file)
# take a subset of attributes
data = data[['Pclass', 'Sex', 'Age', 'Survived']]
# drop all columns and rows with missing values
data = data.dropna(how="any")
print("Total rows", len(data))
column_list = data.columns.to_numpy().tolist()
print("Columns:", column_list)
# we can set different accuracy thresholds
# here we can reorder class labels - to first learn the rules with class label "survived".
rules = learn_rules(column_list, data, [1, 0], 30, 0.6)
from operator import attrgetter
# sort rules by accuracy descending
rules.sort(key=attrgetter('accuracy', 'coverage'), reverse=True)
for rule in rules[:10]:
print(rule)
'''
Total rows 714
Columns: ['Pclass', 'Sex', 'Age', 'Survived']
If [Pclass<2, Sex=female, Age>=26.0] then 1. Coverage:38, accuracy: 1.0
If [Age<25.0, Pclass<3, Sex=female] then 1. Coverage:48, accuracy: 0.9791666666666666
If [Sex=male, Pclass>=3, Age>=33.0] then 0. Coverage:59, accuracy: 0.9491525423728814
If [Sex=male, Pclass>=2, Age>=32.5] then 0. Coverage:31, accuracy: 0.9354838709677419
If [Sex=male, Age>=54.0, Pclass>=1] then 0. Coverage:37, accuracy: 0.8918918918918919
If [Sex=male, Pclass>=2, Age<29.0] then 0. Coverage:52, accuracy: 0.8653846153846154
If [Sex=male, Age<25.0, Pclass>=1] then 0. Coverage:33, accuracy: 0.8484848484848485
If [Sex=male, Pclass>=3, Age<25.0] then 0. Coverage:118, accuracy: 0.847457627118644
If [Age<6.0, Pclass>=1] then 1. Coverage:31, accuracy: 0.8387096774193549
If [Age>=48.0, Pclass<3] then 1. Coverage:39, accuracy: 0.8205128205128205''' | 38.519174 | 116 | 0.593736 | 1,555 | 13,058 | 4.796141 | 0.151768 | 0.044248 | 0.020649 | 0.015286 | 0.469697 | 0.412443 | 0.373022 | 0.342853 | 0.33159 | 0.323277 | 0 | 0.029144 | 0.327309 | 13,058 | 339 | 117 | 38.519174 | 0.8199 | 0.189998 | 0 | 0.458763 | 0 | 0 | 0.02616 | 0.002699 | 0.015464 | 0 | 0 | 0 | 0 | 1 | 0.06701 | false | 0 | 0.015464 | 0.005155 | 0.190722 | 0.015464 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf51ab45924c97d384d57fc81c4e9f5c32da4311 | 23 | py | Python | samtranslator/__init__.py | bhumikapaharia/serverless-application-model | 4161fdd59f1ec449877a64796401ca074ae7be02 | [
"Apache-2.0"
] | 4 | 2021-12-18T06:44:57.000Z | 2021-12-28T09:52:53.000Z | samtranslator/__init__.py | bhumikapaharia/serverless-application-model | 4161fdd59f1ec449877a64796401ca074ae7be02 | [
"Apache-2.0"
] | 1 | 2021-04-13T17:54:21.000Z | 2021-04-13T17:54:21.000Z | samtranslator/__init__.py | chrisoverzero/serverless-application-model | f297cfb7bb68c75b3a75da49c9488e62bad16347 | [
"Apache-2.0"
] | null | null | null | __version__ = "1.35.0"
| 11.5 | 22 | 0.652174 | 4 | 23 | 2.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 0.130435 | 23 | 1 | 23 | 23 | 0.35 | 0 | 0 | 0 | 0 | 0 | 0.26087 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
cf5251ba997fd509524b5ed305550da937b3de70 | 5,314 | py | Python | packager/rpm/build.py | csdms/packagebuilder | a72f1d264d9219acfb422864fbcd57dfd6cfd51b | [
"MIT"
] | null | null | null | packager/rpm/build.py | csdms/packagebuilder | a72f1d264d9219acfb422864fbcd57dfd6cfd51b | [
"MIT"
] | null | null | null | packager/rpm/build.py | csdms/packagebuilder | a72f1d264d9219acfb422864fbcd57dfd6cfd51b | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#
# Builds binary and source RPMs for a CSDMS model or tool.
#
# Create the executable script `build_rpm` with:
# $ cd path/to/packagebuilder
# $ sudo python setup.py install
#
# Examples:
# $ build_rpm --help
# $ build_rpm --version
# $ build_rpm hydrotrend
# $ build_rpm babel --tag 1.4.0
# $ build_rpm cem --tag 0.2 --quiet
# $ build_rpm hydrotrend --local $HOME/rpm_models
# $ build_rpm babel --prefix /usr/local/csdms
#
# Mark Piper (mark.piper@colorado.edu)
import sys, os, shutil
from subprocess import call
import glob
import shlex
from packager.core.module import Module
from packager.core.flavor import debian_check
class BuildRPM(object):
'''
Uses `rpmbuild` to build a CSDMS model or tool into an RPM.
'''
def __init__(self, name, version, local_dir, prefix, quiet):
self.is_debian = debian_check()
self.is_quiet = " --quiet " if quiet else " "
self.install_prefix = "/usr/local" if prefix is None else prefix
# Get the model or tool and its spec file.
self.module = Module(name, version, local_dir)
self.spec_file = os.path.join(self.module.location, \
self.module.name + ".spec")
# Set up the local rpmbuild directory.
self.rpmbuild = os.path.join(os.getenv("HOME"), "rpmbuild", "")
self.prep_directory()
# Download the module's source code and make a tarball.
self.tarball = self.module.get_source()
# Copy module files to the rpmbuild directory.
self.prep_files()
# Build the binary and source RPMs.
self.build()
self.cleanup()
print("Success!")
def prep_directory(self):
'''
Prepares the RPM build directory `~/rpmbuild`. Sets up member
variables for paths in the build directory.
'''
print("Setting up rpmbuild directory structure.")
if os.path.isdir(self.rpmbuild):
shutil.rmtree(self.rpmbuild)
subdirectories = ["BUILD","BUILDROOT","RPMS","SOURCES","SPECS","SRPMS"]
for dname in subdirectories:
os.makedirs(os.path.join(self.rpmbuild, dname))
self.sources_dir = os.path.join(self.rpmbuild, "SOURCES", "")
self.specs_dir = os.path.join(self.rpmbuild, "SPECS", "")
def prep_files(self):
'''
Copies source tarball, spec file, patches (if any) and scripts
(if any) for the build process. Patches must use the extension
".patch", scripts must use the extension ".sh" or ".py".
'''
print("Copying module files.")
shutil.copy(self.spec_file, self.specs_dir)
shutil.copy(self.tarball, self.sources_dir)
for patch in glob.glob(os.path.join(self.module.location, "*.patch")):
shutil.copy(patch, self.sources_dir)
for script in glob.glob(os.path.join(self.module.location, "*.sh")):
shutil.copy(script, self.sources_dir)
for script in glob.glob(os.path.join(self.module.location, "*.py")):
shutil.copy(script, self.sources_dir)
def build(self):
'''
Builds binary and source RPMS for the module.
'''
print("Building RPMs.")
cmd = "rpmbuild -ba" + self.is_quiet \
+ os.path.join(self.specs_dir, os.path.basename(self.spec_file)) \
+ " --define '_prefix " + self.install_prefix + "'" \
+ " --define '_version " + self.module.version + "'"
if not self.is_debian:
cmd += " --define '_buildrequires " + self.module.dependencies + "'"
print(cmd)
ret = call(shlex.split(cmd))
if ret != 0:
print("Error in building module RPM.")
sys.exit(2) # can't build RPM
def cleanup(self):
'''
Deletes the directory used to store the downloaded archives from
the rpm_models and rpm_tools repos.
'''
self.module.cleanup()
#-----------------------------------------------------------------------------
def main():
'''
Accepts command-line arguments and passes them to an instance of BuildRPM.
'''
import argparse
from packager import __version__
# Allow only Linuxen.
if not sys.platform.startswith('linux'):
print("Error: this OS is not supported.")
sys.exit(1) # not Linux
parser = argparse.ArgumentParser(
description="Builds a CSDMS model or tool into an RPM.")
parser.add_argument("module_name",
help="the name of the model or tool to build")
parser.add_argument("--local",
help="use LOCAL path to the module files")
parser.add_argument("--prefix",
help="use PREFIX as install path for RPM [/usr/local]")
parser.add_argument("--tag",
help="build TAG version of the module [head]")
parser.add_argument("--quiet", action="store_true",
help="provide less detailed output [verbose]")
parser.add_argument('--version', action='version',
version='build_rpm ' + __version__)
args = parser.parse_args()
BuildRPM(args.module_name, args.tag, args.local, args.prefix, args.quiet)
if __name__ == "__main__":
main()
| 36.902778 | 80 | 0.59936 | 661 | 5,314 | 4.711044 | 0.279879 | 0.021195 | 0.028902 | 0.035967 | 0.147078 | 0.125883 | 0.06808 | 0.06808 | 0.051381 | 0.039178 | 0 | 0.002057 | 0.26816 | 5,314 | 143 | 81 | 37.160839 | 0.798663 | 0.258939 | 0 | 0.025316 | 0 | 0 | 0.171946 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075949 | false | 0 | 0.101266 | 0 | 0.189873 | 0.088608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf528e1ce597b280628a646ef42b416b3143745b | 1,094 | py | Python | setup.py | dwhall/sx127x_ahsm | 71605ddb218636cb86f628441c2f1aee904bd271 | [
"MIT"
] | 1 | 2019-09-07T08:59:41.000Z | 2019-09-07T08:59:41.000Z | setup.py | dwhall/sx127x_ahsm | 71605ddb218636cb86f628441c2f1aee904bd271 | [
"MIT"
] | 1 | 2020-06-15T14:25:28.000Z | 2020-06-15T22:55:40.000Z | setup.py | dwhall/sx127x_ahsm | 71605ddb218636cb86f628441c2f1aee904bd271 | [
"MIT"
] | 1 | 2020-06-14T16:35:47.000Z | 2020-06-14T16:35:47.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sx127x_ahsm",
version="0.1.0",
author="Dean Hall",
author_email="dwhall256@gmail.com",
description="A driver for the Semtech SX127X radio data modem.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dwhall/sx127x_ahsm",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
# This project is deprected
"Development Status :: 7 - Inactive",
# This project is designed to run on a Raspberry Pi
# with a SX127X LoRa radio attached via the SPI bus
"Operating System :: POSIX :: Linux",
"Topic :: System :: Hardware :: Hardware Drivers",
"Topic :: Communications :: Ham Radio",
],
)
| 33.151515 | 68 | 0.632541 | 128 | 1,094 | 5.328125 | 0.65625 | 0.087977 | 0.146628 | 0.152493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03253 | 0.241316 | 1,094 | 32 | 69 | 34.1875 | 0.789157 | 0.11426 | 0 | 0 | 0 | 0 | 0.507772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf54217590cfa476b93f2e6e6579db91c814fd52 | 431 | py | Python | buildPersonNet.py | danpelis/CPE322 | 86aa3b77dd710d31c0248957146949ca99b81e0b | [
"MIT"
] | null | null | null | buildPersonNet.py | danpelis/CPE322 | 86aa3b77dd710d31c0248957146949ca99b81e0b | [
"MIT"
] | null | null | null | buildPersonNet.py | danpelis/CPE322 | 86aa3b77dd710d31c0248957146949ca99b81e0b | [
"MIT"
] | null | null | null | PIPELINE_CONFIG_PATH={C:\Users\Dan\Projects\D6\ssd_mobilenet_v1_person.config}
MODEL_DIR={C:\Users\Dan\Projects\D6\personNet}
NUM_TRAIN_STEPS=50000
SAMPLE_1_OF_N_EVAL_EXAMPLES=1
python object_detection/model_main.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--model_dir=${MODEL_DIR} \
--num_train_steps=${NUM_TRAIN_STEPS} \
--sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \
--alsologtostderr | 43.1 | 78 | 0.798144 | 67 | 431 | 4.61194 | 0.462687 | 0.135922 | 0.174757 | 0.097087 | 0.33657 | 0.213592 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0.081207 | 431 | 10 | 79 | 43.1 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf545cb8f22abd776b690122d22917eb5c3778ef | 5,756 | py | Python | Preprocessing/reversegeo.py | salathegroup/Semester_Project | 2de38eef4ae6b3c350f8b742021ff098ecb376c4 | [
"MIT"
] | null | null | null | Preprocessing/reversegeo.py | salathegroup/Semester_Project | 2de38eef4ae6b3c350f8b742021ff098ecb376c4 | [
"MIT"
] | 1 | 2018-02-20T15:25:22.000Z | 2018-02-20T15:25:22.000Z | Preprocessing/reversegeo.py | salathegroup/Semester_Project | 2de38eef4ae6b3c350f8b742021ff098ecb376c4 | [
"MIT"
] | 2 | 2017-11-07T09:12:11.000Z | 2019-04-12T16:07:40.000Z | import reverse_geocoder as rg
import csv
import multiprocessing as mp
import multiprocessing.pool
import glob
import re
mx_ca_us_state_abbrev = {
'Alabama': '1',
'Alaska': '2',
'Arizona': '3',
'Arkansas': '4',
'California': '5',
'Colorado': '6',
'Connecticut': '7',
'Delaware': '8',
'Florida': '9',
'Georgia': '10',
'Hawaii': '11',
'Idaho': '12',
'Illinois': '13',
'Indiana': '14',
'Iowa': '15',
'Kansas': '16',
'Kentucky': '17',
'Louisiana': '18',
'Maine': '19',
'Maryland': '20',
'Massachusetts': '21',
'Michigan': '22',
'Minnesota': '23',
'Mississippi': '24',
'Missouri': '25',
'Montana': '26',
'Nebraska': '27',
'Nevada': '28',
'New Hampshire': '29',
'New Jersey': '30',
'New Mexico': '31',
'New York': '32',
'North Carolina': '33',
'North Dakota': '34',
'Ohio': '35',
'Oklahoma': '36',
'Oregon': '37',
'Pennsylvania': '38',
'Rhode Island': '39',
'South Carolina': '40',
'South Dakota': '41',
'Tennessee': '42',
'Texas': '43',
'Utah': '44',
'Vermont': '45',
'Virginia': '46',
'Washington': '47',
'West Virginia': '48',
'Wisconsin': '49',
'Wyoming': '50',
'Ontario': '51',
'Quebec': '52',
'Nova Scotia': '53',
'New Brunswick': '54',
'Manitoba': '55',
'British Columbia': '56',
'Prince Edward': '57',
'Saskatchewan': '58',
'Alberta': '59',
'Newfoundland and Labrador': '60',
'Washington, D.C.': '61',
'Chihuahua': '62',
'Baja California': '63',
'Freeport': '64',
'Nuevo Leon': '65',
}
# coordinates = (30.5029812,-84.2449241)
#
# results = rg.search(coordinates) # default mode = 2
#
# print(results)
NUM_OF_PROCESSES = 4
def ensure_output_paths_exist():
"""Maybe we will not use this since we will be editing the files directly"""
# ensure OUTPUT_DIRECTORY exists
try:
os.mkdir(OUTPUT_DIRECTORY)
except:
#TODO: Use the correct exception here
pass
##############################################################################
############### Run through all folders ######################################
##############################################################################
def run_all(path):
"""This will allow to run all the directories from a path"""
file_paths = glob.glob(path+"/*.csv")
# Based on the current tweet storage mechanism (from Todd's code)
# ensure_output_paths_exist()
# If NUM_OF_PROCESSES is False, use mp.cpu_count
pool = multiprocessing.pool.ThreadPool(NUM_OF_PROCESSES or mp.cpu_count())
pool.map(gzworker, file_paths, chunksize=1)
pool.close()
##############################################################################
###################### Worker Function #######################################
##############################################################################
# def gzworker(fullpath):
# """Worker opens one .gz file"""
# print('Processing {}'.format(fullpath))
# tweet_buffer = []
# try:
# with open(fullpath, 'r+') as f:
# reader = csv.reader(f)
# #TODO: location = ???
# location = blob
# out_lines = [row + [lstName[i]] for i, row in enumerate(reader)]
# # f.seek(0) # set file position to the beginning of the file
# csv.writer(f, delimiter=',').writerows(out_lines)
#
#
# with csv.open(str(fullpath), 'rb') as infile:
# decoded = io.TextIOWrapper(infile, encoding='utf8')
# for _line in decoded:
# if _line.strip() != "":
# json_data = _line.split('|', 1)[1][:-1]
#
# result = tweet_select(json.loads(json_data))
# if result:
# tweet_buffer.append(result)
#
# except:
# print("Error in {}".format(fullpath))
# pass
#
# #Write to OUTPUT_DIRECTORY (if _buffer has contents)
# if tweet_buffer != None:
# print("going to save")
# OUTPUT_PATH = "%s/%s.csv" % (OUTPUT_DIRECTORY, fullpath[5:-3])
#
# with open(OUTPUT_PATH, "w", errors='ignore') as csvfile:
# writer = csv.writer(csvfile)
# for row in tweet_buffer:
# writer.writerow(row)
#
# print('Finished {}'.format(fullpath))
def gzworker(fullpath):
"""Worker will open the .csv file and process the information inside"""
print('Processing {}'.format(fullpath))
# try:
with open(fullpath, 'r+') as f:
reader = csv.reader(f)
for row in reader:
geoloc = row[3]
geoloc = geoloc.split(',')
lon = geoloc[0].replace('[', '')
lat = geoloc[1].replace(']', '').replace(' ', '')
# print('Longitude: {} \nLatitude: {}'.format(lon, lat))
# m_obj = re.search(r"(\d+)", geoloc)
# print(m_obj)
coordinates = (lat,lon)
results = rg.search(coordinates) # default mode = 2
print(results)
state_num = mx_ca_us_state_abbrev.get(results[0].get('admin1'))
print(state_num)
# state_num = us_state_abbrev.results['admin1']
# print(state_num)
# [('lat', '29.23329'), ('lon', '-98.79641'), ('name', 'Lytle'), ('admin1', 'Texas'), ('admin2', 'Atascosa County'), ('cc', 'US')]
# except:
# print("Error in {}".format(fullpath))
# pass
print('Finished {}'.format(fullpath))
#TODO: Get .csv file loaded
#TODO: extract long-lat from tweet
#TODO: invert long-lat
#TODO: use reverse_geocoder to get the information
#TODO: save the information on the same line in the same .csv file
| 30.455026 | 130 | 0.509034 | 620 | 5,756 | 4.63871 | 0.508065 | 0.029207 | 0.013561 | 0.00765 | 0.098748 | 0.086926 | 0.086926 | 0.061892 | 0.061892 | 0.027121 | 0 | 0.040131 | 0.255386 | 5,756 | 188 | 131 | 30.617021 | 0.630891 | 0.430334 | 0 | 0 | 0 | 0 | 0.274928 | 0 | 0 | 0 | 0 | 0.005319 | 0 | 1 | 0.030303 | false | 0.010101 | 0.060606 | 0 | 0.090909 | 0.040404 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf546ea7ea4bd4fa252d37fbbd6a124b7399d0c3 | 5,784 | py | Python | DIR.py | az7jh2/My-Raystation-Scripts | 3454378239320c2944fd96de8cb86be8824b5210 | [
"MIT"
] | 1 | 2021-05-29T22:48:49.000Z | 2021-05-29T22:48:49.000Z | DIR.py | az7jh2/My-Raystation-Scripts | 3454378239320c2944fd96de8cb86be8824b5210 | [
"MIT"
] | null | null | null | DIR.py | az7jh2/My-Raystation-Scripts | 3454378239320c2944fd96de8cb86be8824b5210 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from connect import *
#求均匀性指数HI
def GetHomogenietyIndex(total_dose, Prescription, RoiName):
#total_dose存在于Patient.TreatmentDelivery.TreatmentCourse.TotalDose
#Prescription可输入数字,或者在getcurrentBeamSet.Prescription.PrimaryDosePrescription.DoseValue,此处还有OnStructure.Name
#RoiName可用字符串自定义,或者在Patient.PatientModel.StructureSets[Examinations.Name].RoiGeometries[number].OfRoi.Name
Doses = total_dose.GetDoseAtRelativeVolumes(RoiName = RoiName, RelativeVolumes = [0.02, 0.98])
#GetDoseAtRelativeVolumes是搜索DVH上点的RayStation自带方法,调用时必须有等号前面部分,第一个参数是字符串,第二个参数是列表
#返回Doses为Array[float]类型
Value = (Doses[0]-Doses[1])/Prescription
return Value
#-----------------------------------------------------------------------------------------------------------#
#求适形度指数CI
def GetConformationIndex(total_dose, DoseValue, RoiName, externalname):
TotalDoseVolume = total_dose.GetRelativeVolumeAtDoseValues(RoiName = externalname, DoseValues = [DoseValue])
#GetRelativeVolumeAtDoseValues是搜索DVH上点的RayStation自带方法,返回Array[float]类型
DoseGridRoi = total_dose.GetDoseGridRoi(RoiName = RoiName)
#GetDoseGridRoi返回ROI的剂量网格表示
#返回类型为ScriptObject,下属InDoseGrid,OfRoiGeometry,RoiVolumeDistribution,VersioningStatus四个方法
#RoiVolumeDistribution下属AlgorithmVersion,RelativeVolumes,TotalVolume,VoxelIndices四个方法
ExternalRoi = total_dose.GetDoseGridRoi(RoiName = externalname)
TotalTargetVolume = DoseGridRoi.RoiVolumeDistribution.TotalVolume #target总体积,float类型
AbsoluteDoseVolume = TotalDoseVolume[0]* ExternalRoi.RoiVolumeDistribution.TotalVolume #用第一个元素做运算,否则视为矩阵运算
return AbsoluteDoseVolume/TotalTargetVolume
#----------------------------------------------------------------------------------------------------#
#求ROI的某剂量覆盖绝对体积
#在DVH中找出该剂量对应的相对体积,乘以该ROI的绝对全部体积
def GetAbsoluteDoseVolume(total_dose, DoseValue,RoiName):
RelativeVolume=total_dose.GetRelativeVolumeAtDoseValues(RoiName = RoiName, DoseValues = [DoseValue])
DoseGridRoi = total_dose.GetDoseGridRoi(RoiName = RoiName)
TotalRoiVolume = DoseGridRoi.RoiVolumeDistribution.TotalVolume
return (TotalRoiVolume*RelativeVolume[0])
#-------------------------------------------------------------------------------------------------------------#
#求ROI的绝对体积
def GetAbsoluteVolume(total_dose,RoiName):
DoseGridRoi = total_dose.GetDoseGridRoi(RoiName = RoiName)
TotalRoiVolume = DoseGridRoi.RoiVolumeDistribution.TotalVolume
return TotalRoiVolume
#------------------------------------------------------------------------------------------------------#
#求一致性指数CN
def GetConformationNumber(total_dose, DoseValue, RoiName, externalname):
TargetDoseVolume = total_dose.GetRelativeVolumeAtDoseValues(RoiName = RoiName, DoseValues = [DoseValue])
TotalDoseVolume = total_dose.GetRelativeVolumeAtDoseValues(RoiName = externalname, DoseValues = [DoseValue])
DoseGridRoi= total_dose.GetDoseGridRoi(RoiName = RoiName)
ExternalRoi = total_dose.GetDoseGridRoi(RoiName = externalname)
TotalTargetVolume = DoseGridRoi.RoiVolumeDistribution.TotalVolume
AbsoluteTargetDoseVolume = TargetDoseVolume[0]*TotalTargetVolume
AbsoluteDoseVolume = TotalDoseVolume[0]* ExternalRoi.RoiVolumeDistribution.TotalVolume
return (AbsoluteTargetDoseVolume* AbsoluteTargetDoseVolume)/(AbsoluteDoseVolume*TotalTargetVolume)
#--------------------------------------------------------------------------------------------------------------#
#求适形指数COnformal INdex
def GetCOnformalINdex(total_dose,DoseValue,TargetName,OrgansName, externalname):
temp=[1-GetAbsoluteDoseVolume(total_dose, DoseValue,i)/GetAbsoluteVolume(total_dose,i) for i in OrgansName]
return GetConformationNumber(total_dose, DoseValue, TargetName, externalname)*reduce(lambda a,b:a*b,temp)
#------------------------------------------------------------------------------------------------------------#
def main(dosename, targets, organs, prescription):
#patient获取为当前患者
patient = get_current('Patient')
planlist = []
doselist = []
#寻找外轮廓
try:
external_roi = next(r for r in patient.PatientModel.RegionsOfInterest if r.Type == 'External')
except:
raise Exception('No external ROI defined')
externalname = external_roi.Name
for plan in patient.TreatmentPlans:
planlist.append(plan.Name)
#添加评估剂量,评估剂量不能用名称访问
for ev in patient.TreatmentDelivery.FractionEvaluations[0].DoseOnExaminations[0].DoseEvaluations:
doselist.append(ev.Name)
#获取剂量
if dosename in planlist:
total_dose = patient.TreatmentPlans[dosename].TreatmentCourse.TotalDose
elif dosename in doselist:
total_dose = patient.TreatmentDelivery.FractionEvaluations[0].DoseOnExaminations[0].DoseEvaluations[doselist.index(dosename)]
for i in range(len(targets)):
print 'the Absolute Volume of '+targets[i]+' is:'+str(round(GetAbsoluteVolume(total_dose,targets[i]),3))
print 'the Homogeniety Index of '+targets[i]+' is:'+str(round(GetHomogenietyIndex(total_dose, prescription, targets[i]),3))
print 'the Conformation Index of '+targets[i]+' is:'+str(round(GetConformationIndex(total_dose, prescription, targets[i], externalname),3))
print 'the Conformation Number of '+targets[i]+' is:'+str(round(GetConformationNumber(total_dose, prescription, targets[i], externalname),3))
print 'the COnformal INdex of '+targets[i]+' is:'+str(round(GetCOnformalINdex(total_dose,prescription,targets[i],organs, externalname),3))
#--------------------------------------------------------------------------------------------------------------#
if __name__=='__main__':
main(dosename = 'P1-33', targets = ['PGTV'], organs = ['Parotid gland L','Parotid gland R', 'Brain-sterm'], prescription = 7200) | 67.255814 | 149 | 0.681017 | 470 | 5,784 | 8.297872 | 0.331915 | 0.062308 | 0.027692 | 0.046154 | 0.408974 | 0.368462 | 0.30641 | 0.255641 | 0.147692 | 0.122051 | 0 | 0.005796 | 0.105118 | 5,784 | 86 | 150 | 67.255814 | 0.747682 | 0.276452 | 0 | 0.241379 | 0 | 0 | 0.057859 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.017241 | null | null | 0.086207 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf54c232a75d4a7341295831e0d07ef22dddb9f7 | 12,143 | py | Python | Trainer.py | Gorilla-Lab-SCUT/OrthDNNs | 7391b1751334c485feea212a80abc4dc8430dc1e | [
"BSD-3-Clause"
] | 4 | 2021-07-15T07:34:30.000Z | 2022-03-30T08:23:46.000Z | Trainer.py | Gorilla-Lab-SCUT/OrthDNNs | 7391b1751334c485feea212a80abc4dc8430dc1e | [
"BSD-3-Clause"
] | 1 | 2020-02-11T10:55:46.000Z | 2020-02-11T10:55:46.000Z | Trainer.py | Yuxin-Wen/OrthDNNs | 7391b1751334c485feea212a80abc4dc8430dc1e | [
"BSD-3-Clause"
] | 1 | 2021-11-23T03:31:09.000Z | 2021-11-23T03:31:09.000Z | from __future__ import division
import time
import numpy as np
import math
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torchvision
from Utility import Average_meter
from Utility import Training_aux
#from Utility import progress_bar
class Trainer(object):
"""a method that packaging dataloader and model and optim_methods"""
"""the model are trained here"""
"""the mixup operation and data_agu operation are perform here"""
def __init__(self, train_loader, val_loader, model, criterion,
optimizer, nEpoch, lr_base = 0.1, lr_end = 0.001, lr_decay_method = 'exp',
is_soft_regu=False, is_SRIP=False, soft_lambda = 1e-4,
svb_flag = False, iter_svb_flag=False, svb_factor = 0.5,
bbn_flag = False, bbn_factor = 0.2, bbn_type = 'rel',
fsave = './Save', print_freq = 10, is_evaluate = False, dataset = 'CIFAR10'):
self.train_loader = train_loader
self.val_loader = val_loader
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.nEpoch = nEpoch
self.lr_base = lr_base
self.lr_end = lr_end
self.lr_decay_method = lr_decay_method
self.is_soft_regu = is_soft_regu
self.is_SRIP = is_SRIP
self.soft_lambda = soft_lambda
self.svb_flag = svb_flag
self.iter_svb_flag = iter_svb_flag
self.svb_factor = svb_factor
self.bbn_flag = bbn_flag
self.bbn_factor = bbn_factor
self.bbn_type = bbn_type
self.training_aux = Training_aux(fsave)
self.is_evaluate = is_evaluate
self.print_freq = print_freq
self.best_prec1 = 0
def train(self, epoch):
"""Train for one epoch on the training set"""
batch_time = Average_meter()
data_time = Average_meter()
losses = Average_meter()
top1 = Average_meter()
top5 = Average_meter()
# switch to train mode
self.model.train()
begin = time.time()
for i, (image, target) in enumerate(self.train_loader):
batch_size= image.size(0)
# measure data loading time
data_time.update(time.time() - begin)
image = image.cuda()
input_var = Variable(image)
target = target.cuda()
target_var = Variable(target)
output = self.model(input_var)
if self.is_soft_regu or self.is_SRIP:
loss = self.criterion(output, target_var, self.model, self.soft_lambda)
else:
loss = self.criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = self.training_aux.accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), batch_size)
top1.update(prec1.item(), batch_size)
top5.update(prec5.item(), batch_size)
# compute gradient and do SGD step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# measure elapsed time
batch_time.update(time.time() - begin)
if i % self.print_freq == 0:
#progress_bar(i, len(self.train_loader), 'Loss: {loss.avg:.4f} | Prec@1 {top1.avg:.3f} | Prec@5 {top5.avg:.3f}'.format(loss=losses, top1=top1, top5=top5))
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.avg:.3f}\t'
'Data {data_time.avg:.3f}\t'
'Loss {loss.avg:.4f}\t'
'Prec@1 {top1.avg:.3f}\t'
'Prec@5 {top5.avg:.3f}'.format(
epoch, i, len(self.train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
begin = time.time()
if (self.iter_svb_flag) and epoch != (self.nEpoch -1) and i != (self.train_loader.__len__() -1):
self.fcConvWeightReguViaSVB()
self.training_aux.write_err_to_file(epoch = epoch, top1 = top1, top5 = top5, trn_loss = losses, mode = 'train')
return
def validate(self, epoch, img_size=320):
"""Perform validation on the validation set"""
batch_time = Average_meter()
losses = Average_meter()
top1 = Average_meter()
top5 = Average_meter()
self.model.eval()
begin = time.time()
with torch.no_grad():
for i, (raw_img, raw_label) in enumerate(self.val_loader):
raw_label = raw_label.cuda()
raw_img = raw_img.cuda()
input_var = Variable(raw_img)
target_var = Variable(raw_label)
# compute output
output = self.model(input_var)
# measure accuracy and record loss
criterion = nn.CrossEntropyLoss()
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = self.training_aux.accuracy(output.data, raw_label, topk=(1, 5))
top1.update(prec1.item(), raw_img.size(0))
top5.update(prec5.item(), raw_img.size(0))
losses.update(loss.data.item(), raw_img.size(0))
# measure elapsed time
batch_time.update(time.time() - begin)
if i % self.print_freq == 0:
#progress_bar(i, len(self.train_loader), 'Loss: {loss.avg:.4f} | Prec@1 {top1.avg:.3f} | Prec@5 {top5.avg:.3f}'.format(loss=losses, top1=top1, top5=top5))
print('Test: [{0}/{1}]\t'
'Time {batch_time.avg:.3f}\t'
'Loss {loss.avg:.4f}\t'
'{top1.avg:.3f}\t'
'{top5.avg:.3f}'.format(
i, len(self.val_loader), batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
begin = time.time()
print(' * Loss {loss.avg:.4f} Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(loss=losses, top1=top1, top5=top5))
self.is_best = top1.avg > self.best_prec1
self.best_prec1 = max(top1.avg, self.best_prec1)
if self.is_evaluate:
return top1.avg
else:
self.training_aux.write_err_to_file(epoch = epoch, top1 = top1, top5 = top5, mode = 'val')
return top1.avg
def adjust_learning_rate(self, epoch, warm_up_epoch = 0,scheduler=None):
"""Sets the learning rate to the initial LR decayed by 10 after 0.5 and 0.75 epochs"""
if self.lr_decay_method == 'exp':
lr = self.lr_base
if epoch < warm_up_epoch:
lr = 0.001 + (self.lr_base - 0.001) * epoch / warm_up_epoch
if epoch >= warm_up_epoch:
lr_series = torch.logspace(math.log(self.lr_base, 10), math.log(self.lr_end, 10), int(self.nEpoch/2))
lr = lr_series[int(math.floor((epoch-warm_up_epoch)/2))]
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
elif self.lr_decay_method == 'noDecay':
lr = self.lr_base
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
print('lr:{0}'.format(self.optimizer.param_groups[-1]['lr']))
return
def save_checkpoint(self, epoch, save_flag = 'learning', filename = False):
if save_flag == 'standard':
model = self.standard_model
optimizer = self.standard_optimizer
elif save_flag == 'learning':
model = self.model
optimizer = self.optimizer
else:
raise Exception('save_flag should be one of standard or learning')
state = {
'epoch': epoch,
'state_dict': model.state_dict(),
'best_prec1': self.best_prec1,
'optimizer' : optimizer.state_dict(),
}
fname = filename or 'checkpoint' + '.pth.tar'
self.training_aux.save_checkpoint(state = state, is_best = self.is_best, filename=fname)
return
def fcConvWeightReguViaSVB(self):
for m in self.model.modules():
#svb
if self.svb_flag == True:
if isinstance(m,nn.Conv2d):
tmpbatchM = m.weight.data.view(m.weight.data.size(0), -1).t().clone()
try:
tmpU, tmpS, tmpV = torch.svd(tmpbatchM)
except:
tmpbatchM = tmpbatchM[np.logical_not(np.isnan(tmpbatchM))]
tmpbatchM = tmpbatchM.view(m.weight.data.size(0), -1).t()
tmpU, tmpS, tmpV = np.linalg.svd(tmpbatchM.cpu().numpy())
tmpU = torch.from_numpy(tmpU).cuda()
tmpS = torch.from_numpy(tmpS).cuda()
tmpV = torch.from_numpy(tmpV).cuda()
for idx in range(0, tmpS.size(0)):
if tmpS[idx] > (1+self.svb_factor):
tmpS[idx] = 1+self.svb_factor
elif tmpS[idx] < 1/(1+self.svb_factor):
tmpS[idx] = 1/(1+self.svb_factor)
tmpbatchM = torch.mm(torch.mm(tmpU, torch.diag(tmpS.cuda())), tmpV.t()).t().contiguous()
m.weight.data.copy_(tmpbatchM.view_as(m.weight.data))
elif isinstance(m, nn.Linear):
tmpbatchM = m.weight.data.t().clone()
tmpU, tmpS, tmpV = torch.svd(tmpbatchM)
for idx in range(0, tmpS.size(0)):
if tmpS[idx] > (1+self.svb_factor):
tmpS[idx] = 1+self.svb_factor
elif tmpS[idx] < 1/(1+self.svb_factor):
tmpS[idx] = 1/(1+self.svb_factor)
tmpbatchM = torch.mm(torch.mm(tmpU, torch.diag(tmpS.cuda())), tmpV.t()).t().contiguous()
m.weight.data.copy_(tmpbatchM.view_as(m.weight.data))
# bbn
if self.bbn_flag == True:
if isinstance(m, nn.BatchNorm2d):
tmpbatchM = m.weight.data
if self.bbn_type == 'abs':
for idx in range(0, tmpbatchM.size(0)):
if tmpbatchM[idx] > (1+self.bbn_factor):
tmpbatchM[idx] = (1+self.bbn_factor)
elif tmpbatchM[idx] < 1/(1+self.bbn_factor):
tmpbatchM[idx] = 1/(1+self.bbn_factor)
elif self.bbn_type == 'rel':
mean = torch.mean(tmpbatchM)
relVec = torch.div(tmpbatchM, mean)
for idx in range(0, tmpbatchM.size(0)):
if relVec[idx] > (1+self.bbn_factor):
tmpbatchM[idx] = mean * (1+self.bbn_factor)
elif relVec[idx] < 1/(1+self.bbn_factor):
tmpbatchM[idx] = mean/(1+self.bbn_factor)
elif self.bbn_type == 'bbn':
running_var = m.running_var
eps = m.eps
running_std = torch.sqrt(torch.add(running_var, eps))
mean = torch.mean(tmpbatchM/running_std)
for idx in range(0, tmpbatchM.size(0)):
if tmpbatchM[idx]/(running_std[idx]*mean) > 1+self.bbn_factor:
tmpbatchM[idx] = running_std[idx] * mean * (1+self.bbn_factor)
elif tmpbatchM[idx]/(running_std[idx]*mean) < 1/(1+self.bbn_factor):
tmpbatchM[idx] = running_std[idx] * mean / (1+self.bbn_factor)
m.weight.data.copy_(tmpbatchM)
| 43.679856 | 174 | 0.534464 | 1,472 | 12,143 | 4.238451 | 0.158288 | 0.01683 | 0.027088 | 0.026927 | 0.431319 | 0.374098 | 0.336432 | 0.317038 | 0.298125 | 0.285623 | 0 | 0.024778 | 0.35189 | 12,143 | 277 | 175 | 43.837545 | 0.76798 | 0.066376 | 0 | 0.253456 | 0 | 0.004608 | 0.043106 | 0.005801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02765 | false | 0 | 0.064516 | 0 | 0.119816 | 0.036866 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf555654bbc3d88a367ec4273df655fffb2396cc | 952 | py | Python | src/utils/login_to_spotify.py | SecondThundeR/spotichecker | 05787bae85cb0d9c5832939c72bad526eb419705 | [
"MIT"
] | null | null | null | src/utils/login_to_spotify.py | SecondThundeR/spotichecker | 05787bae85cb0d9c5832939c72bad526eb419705 | [
"MIT"
] | null | null | null | src/utils/login_to_spotify.py | SecondThundeR/spotichecker | 05787bae85cb0d9c5832939c72bad526eb419705 | [
"MIT"
] | null | null | null | """Utils for logging to Spotify.
This module contains functions for connecting to Spotify API.
This file can also be imported as a module and contains the following functions:
* login_to_spotify - connect to Spotify and return OAuth object
"""
import spotipy
from spotipy.oauth2 import SpotifyOAuth
SCOPES = "user-library-read, playlist-read-private, playlist-read-collaborative"
def login_to_spotify(credentials: dict) -> SpotifyOAuth:
"""Trigger Spotify authentication and return current token.
Args:
credentials (dict): Credentials data (CLIENT_ID and CLIENT_SECRET).
Returns:
spotipy.oauth2.SpotifyOAuth: Spotify OAuth object.
"""
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
client_id=credentials["CLIENT_ID"],
client_secret=credentials["CLIENT_SECRET"],
redirect_uri="http://localhost:8080",
scope=SCOPES,
)
)
return sp
| 27.2 | 80 | 0.698529 | 111 | 952 | 5.882883 | 0.540541 | 0.068913 | 0.042879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008086 | 0.220588 | 952 | 34 | 81 | 28 | 0.871968 | 0.465336 | 0 | 0 | 0 | 0 | 0.235294 | 0.102941 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf556fe0579840dc64ac6b121230f3d881ae21c9 | 17,516 | py | Python | prostate_cancer_nomograms/statistical_analysis/nomograms_performance_evaluation/decision_curve_analysis/__init__.py | MaxenceLarose/ProstateCancerNomograms | 4ff15dccd1f2dbde58d3a21a2e680e909e2e408a | [
"Apache-2.0"
] | 1 | 2021-10-04T18:03:10.000Z | 2021-10-04T18:03:10.000Z | prostate_cancer_nomograms/statistical_analysis/nomograms_performance_evaluation/decision_curve_analysis/__init__.py | MaxenceLarose/ProstateCancerNomograms | 4ff15dccd1f2dbde58d3a21a2e680e909e2e408a | [
"Apache-2.0"
] | null | null | null | prostate_cancer_nomograms/statistical_analysis/nomograms_performance_evaluation/decision_curve_analysis/__init__.py | MaxenceLarose/ProstateCancerNomograms | 4ff15dccd1f2dbde58d3a21a2e680e909e2e408a | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from .algo import *
from .validate import *
from .validate import DCAError
__all__ = ['DecisionCurveAnalysis'] # only public member should be the class
class DecisionCurveAnalysis:
"""DecisionCurveAnalysis(...)
DecisionCurveAnalysis(algorithm='dca', **kwargs)
Create an object of class DecisionCurveAnalysis for generating
and plotting "net benefit" and "interventions avoided" curves
Parameters
----------
algorithm : str
the type of analysis to run
valid values are 'dca' (decision curve) or 'stdca' (survival time decision curve)
**kwargs : object
keyword arguments that are used in the analysis
Attributes
----------
data : pd.DataFrame
The data set to analyze, with observations in each row, and
outcomes/predictors in the columns
outcome : str
The column in `data` to use as the outcome for the analysis
All observations in this column must be coded 0/1
predictors : list(str)
The column(s) in `data` to use as predictors during the analysis
All observations, 'x', in this column must be in the range 0 <= x <= 1
Methods
-------
run : runs the analysis
smooth_results : use local regression (LOWESS) to smooth the
results of the analysis, using the specified fraction
plot_net_benefit : TODO
plot_interv_avoid : TODO
Examples
--------
TODO
"""
#universal parameters for dca
_common_args = {'data' : None,
'outcome' : None,
'predictors' : None,
'thresh_lo' : 0.01,
'thresh_hi' : 0.99,
'thresh_step' : 0.01,
'probabilities' : None,
'harms' : None,
'intervention_per' : 100}
#stdca-specific attributes
_stdca_args = {'tt_outcome' : None,
'time_point' : None,
'cmp_risk' : False}
def __init__(self, algorithm='dca', **kwargs):
"""Initializes the DecisionCurveAnalysis object
Arguments for the analysis may be passed in as keywords upon object initialization
Parameters
----------
algorithm : str
the algorithm to use, valid options are 'dca' or 'stdca'
**kwargs :
keyword arguments to populate instance attributes that will be used in analysis
Raises
------
ValueError
if user doesn't specify a valid algorithm; valid values are 'dca' or 'stdca'
if the user specifies an invalid keyword
"""
if algorithm not in ['dca', 'stdca']:
raise ValueError("did not specify a valid algorithm, only 'dca' and 'stdca' are valid")
self.algorithm = algorithm
#set args based on keywords passed in
#this naively assigns values passed in -- validation occurs afterwords
for kw in kwargs:
if kw in self._common_args:
self._common_args[kw] = kwargs[kw] #assign
continue
elif kw in self._stdca_args:
self._stdca_args[kw] = kwargs[kw]
else:
raise ValueError("{kw} is not a valid decision_curve_analysis keyword"
.format(kw=repr(kw)))
#do validation on all args, make sure we still have a valid analysis
self.data = data_validate(self.data)
self.outcome = outcome_validate(self.data, self.outcome)
self.predictors = predictors_validate(self.predictors, self.data)
#validate bounds
new_bounds = []
curr_bounds = [self._common_args['thresh_lo'], self._common_args['thresh_hi'],
self._common_args['thresh_step']]
for i, bound in enumerate(['lower', 'upper', 'step']):
new_bounds.append(threshold_validate(bound, self.threshold_bound(bound),
curr_bounds))
self.set_threshold_bounds(new_bounds[0], new_bounds[1], new_bounds[2])
#validate predictor-reliant probs/harms
self.probabilities = probabilities_validate(self.probabilities,
self.predictors)
self.harms = harms_validate(self.harms, self.predictors)
#validate the data in each predictor column
self.data = validate_data_predictors(self.data, self.outcome, self.predictors,
self.probabilities)
def _args_dict(self):
"""Forms the arguments to pass to the analysis algorithm
Returns
-------
dict(str, object)
A dictionary that can be unpacked and passed to the algorithm for the
analysis
"""
if self.algorithm == 'dca':
return self._common_args
else:
from collections import Counter
return dict(Counter(self._common_args) + Counter(self._stdca_args))
def _algo(self):
"""The algorithm to use for this analysis
"""
return dca if self.algorithm == 'dca' else stdca
def run(self, return_results=False):
"""Performs the analysis
Parameters
----------
return_results : bool
if `True`, sets the results to the instance attribute `results`
if `False` (default), the function returns the results as a tuple
Returns
-------
tuple(pd.DataFrame, pd.DataFrame)
Returns net_benefit, interventions_avoided if `return_results=True`
"""
nb, ia = self._algo()(**(self._args_dict()))
if return_results:
return nb, ia
else:
self.results = {'net benefit' : nb, 'interventions avoided' : ia}
def smooth_results(self, lowess_frac, return_results=False):
"""Smooths the results using a LOWESS smoother
Parameters
----------
lowess_frac : float
the fraction of the endog value to use when smoothing
return_results : bool
if `True`, sets the results to the instance attribute `results`
if `False` (default), the function returns the results as a tuple
Returns
-------
tuple(pd.DataFrame, pd.DataFrame)
smoothed predictor dataFrames for results if `return_results=True`
"""
from dcapy.calc import lowess_smooth_results
_nb = _ia = None
for predictor in self.predictors:
nb, ia = lowess_smooth_results(predictor, self.results['net benefit'],
self.results['interventions avoided'],
lowess_frac)
#concatenate results
_nb = pd.concat([_nb, nb], axis=1)
_ia = pd.concat([_ia, ia], axis=1)
if return_results:
return _nb, _ia
else:
self.results['net benefit'] = pd.concat(
[self.results['net benefit'], _nb], axis=1)
self.results['interventions avoided'] = pd.concat(
[self.results['interventions avoided'], _ia], axis=1)
def plot_net_benefit(self, custom_axes=None, make_legend=True):
"""Plots the net benefit from the analysis
Parameters
----------
custom_axes : list(float)
a length-4 list of dimensions for the plot, `[x_min, x_max, y_min, y_max]`
make_legend : bool
whether to include a legend in the plot
Returns
-------
matplotlib.rc_context
"""
try:
import matplotlib.pyplot as plt
except ImportError as e:
e.args += ("plotting the analysis requires matplotlib")
raise
try:
net_benefit = getattr(self, 'results')['net benefit']
except AttributeError:
raise DCAError("must run analysis before plotting!")
plt.plot(net_benefit)
plt.ylabel("Net Benefit")
plt.xlabel("Threshold Probability")
#prettify the graph
if custom_axes:
plt.axis(custom_axes)
else: #use default
plt.axis([0, self.threshold_bound('upper')*100,
-0.05, 0.20])
def plot_interventions_avoided(self, custom_axes=None, make_legend=True):
"""Plots the interventions avoided per `interventions_per` patients
Notes
-----
Generated plots are 'interventions avoided per `intervention_per` patients' vs. threshold
Parameters
----------
custom_axes : list(float)
a length-4 list of dimensions for the plot, `[x_min, x_max, y_min, y_max]`
make_legend : bool
whether to include a legend in the plot
Returns
-------
matplotlib.rc_context
context manager for working with the newly-created plot
"""
try:
import matplotlib.pyplot as plt
except ImportError as e:
e.args += ("plotting the analysis requires matplotlib")
raise
try:
interv_avoid = getattr(self, 'results')['interventions avoided']
except AttributeError:
raise DCAError("must run analysis before plotting!")
iaplot = plt.plot(interv_avoid)
#TODO: graph prettying/customization
return iaplot
@property
def data(self):
"""The data set to analyze
Returns
-------
pd.DataFrame
"""
return self._common_args['data']
@data.setter
def data(self, value):
"""Set the data for the analysis
Parameters
----------
value : pd.DataFrame
the data to analyze
"""
value = data_validate(value) # validate
self._common_args['data'] = value
@property
def outcome(self):
"""The outcome to use for the analysis
"""
return self._common_args['outcome']
@outcome.setter
def outcome(self, value):
"""Sets the column in the dataset to use as the outcome for the analysis
Parameters
----------
value : str
the name of the column in `data` to set as `outcome`
"""
value = outcome_validate(self.data, value) # validate
self._common_args['outcome'] = value
@property
def predictors(self):
"""The predictors to use
Returns
-------
list(str)
A list of all predictors for the analysis
"""
return self._common_args['predictors']
@predictors.setter
def predictors(self, value):
"""Sets the predictors to use for the analysis
Parameters
----------
value : list(str)
the list of predictors to use
"""
value = predictors_validate(value, self.data)
self._common_args['predictors'] = value
def threshold_bound(self, bound):
"""Gets the specified threshold boundary
Parameters
----------
bound : str
the boundary to get; valid values are "lower", "upper", or "step"
Returns
-------
float
the current value of that boundary
"""
mapping = {'lower' : 'thresh_lo',
'upper' : 'thresh_hi',
'step' : 'thresh_step'}
try:
return self._common_args[mapping[bound]]
except KeyError:
raise ValueError("did not specify a valid boundary")
def set_threshold_bounds(self, lower, upper, step=None):
"""Sets the threshold boundaries (thresh_*) for the analysis
Notes
-----
Passing `None` for any of the parameters will skip that parameter
The analysis will be run over all steps, x, lower <= x <= upper
Parameters
----------
lower : float
the lower boundary
upper : float
the upper boundary
step : float
the increment between calculations
"""
_step = step if step else self._common_args['thresh_step']
bounds_to_test = [lower, upper, _step]
if lower is not None:
lower = threshold_validate('lower', lower, bounds_to_test)
self._common_args['thresh_lo'] = lower
if upper is not None:
upper = threshold_validate('upper', upper, bounds_to_test)
self._common_args['thresh_hi'] = upper
if step is not None:
step = threshold_validate('step', step, bounds_to_test)
self._common_args['thresh_step'] = step
@property
def probabilities(self):
"""The list of probability values for each predictor
Returns
-------
list(bool)
the probability list
"""
return self._common_args['probabilities']
@probabilities.setter
def probabilities(self, value):
"""Sets the probabilities list for the analysis
Notes
-----
The length of the parameter `value` must match that of the predictors
Parameters
----------
value : list(bool)
a list of probabilities to assign, one for each predictor
"""
value = probabilities_validate(value, self.predictors)
self._common_args['probabilities'] = value
def set_probability_for_predictor(self, predictor, probability):
"""Sets the probability value for the given predictor
Parameters
----------
predictor : str
the predictor to set the probability value for
probability : bool
the probability value
"""
try: # make sure we're setting a valid predictor
ind = self._common_args['predictors'].index(predictor)
except ValueError as e:
e.args += ("did not specify a valid predictor")
raise
self._common_args['probabilities'][ind] = probability
@property
def harms(self):
"""The list of harm values for the predictors
Returns
-------
list(float)
"""
return self._common_args['harms']
@harms.setter
def harms(self, value):
"""Sets the list of harm values to be used
Notes
-----
The length of the parameter `value` must match that of the predictors
Parameters
----------
value : list(float)
a list of floats to assign, one for each predictor
"""
value = harms_validate(value, self.predictors) # validate
self._common_args['harms'] = value
def set_harm_for_predictor(self, predictor, harm):
"""Sets the harm value for the given predictor
Parameters
----------
predictor : str
the predictor to set the harm value for
harm : float
the harm value (must be between 0 and 1)
"""
try: # make sure specifying a valid predictor
ind = self._common_args['harm'].index(predictor)
except ValueError as e:
e.args += ("did not specify a valid predictor")
raise
self._common_args['harm'][ind] = harm
@property
def intervention_per(self):
"""The number of patients per intervention
Returns
-------
int
"""
return self._common_args['intervention_per']
@intervention_per.setter
def intervention_per(self, value):
"""Sets the value of the number of patients to assume per intervention
Parameters
----------
value : int
"""
self._common_args['intervention_per'] = value
@property
def time_to_outcome(self):
"""The column in the data used to specify the time taken to reach the outcome
Returns
-------
str
"""
return self._common_args['tt_outcome']
@time_to_outcome.setter
def time_to_outcome(self, value):
"""Sets the column to use as the `tt_outcome` for the analysis
Parameters
----------
value : str
"""
if value in data.columns:
self._stdca_args['tt_outcome'] = value
else:
raise ValueError("time to outcome must be a valid column in the data set")
@property
def time_point(self):
"""The time point of interest
Returns
-------
float
"""
return self._stdca_args['time_point']
@time_point.setter
def time_point(self, value):
"""Sets the time point of interest
Parameters
----------
value : float
"""
self._stdca_args['time_point'] = value
@property
def competing_risk(self):
"""Run competing risk analysis
Returns
-------
bool
"""
return self._stdca_args['cmp_risk']
@competing_risk.setter
def competing_risk(self, value):
"""Sets whether to run a competing risk analysis
Parameters
----------
value : bool
"""
if not isinstance(value, bool):
raise TypeError("competing risk must be a boolean value")
self._stdca_args['cmp_risk'] = value | 32.13945 | 99 | 0.565711 | 1,925 | 17,516 | 5.02026 | 0.152727 | 0.031043 | 0.042012 | 0.018626 | 0.292529 | 0.224855 | 0.212024 | 0.168874 | 0.162459 | 0.141349 | 0 | 0.003206 | 0.341117 | 17,516 | 545 | 100 | 32.13945 | 0.834156 | 0.375885 | 0 | 0.205882 | 0 | 0 | 0.12688 | 0.004829 | 0 | 0 | 0 | 0.007339 | 0 | 1 | 0.142157 | false | 0 | 0.04902 | 0 | 0.284314 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf565b37008bf14878731348b0d414b055945931 | 1,493 | py | Python | pyxmp/xmp.py | jeslyvarghese/pyxmp | 94e9f97574230f04b47fbcc7ed2caaa26e125ec4 | [
"MIT"
] | null | null | null | pyxmp/xmp.py | jeslyvarghese/pyxmp | 94e9f97574230f04b47fbcc7ed2caaa26e125ec4 | [
"MIT"
] | null | null | null | pyxmp/xmp.py | jeslyvarghese/pyxmp | 94e9f97574230f04b47fbcc7ed2caaa26e125ec4 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
from .__keysearch import keysearch
from .__attribute import Attribute
class XMP(object):
def __init__(self, filepath, **namespaces):
self.filepath = filepath
with open(self.filepath, 'rb') as f:
data = f.read()
xmp_start = data.find(b'<x:xmpmeta')
xmp_end = data.find(b'</x:xmpmeta')
self.__namespaces = namespaces
self.__xmp_string = data[xmp_start:xmp_end+12]
try:
self.__root = ET.fromstring(self.__xmp_string)
self.__rdf_el = self.__root[0][0]
self.__attrib_dict = self.__rdf_el.attrib
except ET.ParseError:
self.__attrib_dict = {}
self.__namespaced_dict = {}
self.__update_namespaced_dict()
self.__create_namespace_attributes()
def __update_namespaced_dict(self):
for k, v in self.__attrib_dict.items():
nk = k
for ns, url in self.__namespaces.items():
nk = k.replace('{'+ url +'}', ns+':')
if k != nk:
break
self.__namespaced_dict[nk] = v
def __create_namespace_attributes(self):
for k in self.__namespaces.keys():
setattr(self, k, Attribute())
obj = getattr(self, k)
for key in keysearch(self.__namespaced_dict, k):
attr_name = key.replace(k+':', '')
setattr(obj, attr_name, self.__namespaced_dict[key])
| 37.325 | 68 | 0.578701 | 176 | 1,493 | 4.494318 | 0.363636 | 0.106195 | 0.091024 | 0.025284 | 0.042984 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00388 | 0.309444 | 1,493 | 39 | 69 | 38.282051 | 0.763337 | 0 | 0 | 0 | 0 | 0 | 0.018084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf56be3b91ad405a29cca6a62ba0311c0c51443c | 1,424 | py | Python | archieves/checkdata.py | Donsuno/conda | d5c8fb2cc3f724c109b7343cc0bdb93a5afa12ba | [
"BSD-3-Clause"
] | null | null | null | archieves/checkdata.py | Donsuno/conda | d5c8fb2cc3f724c109b7343cc0bdb93a5afa12ba | [
"BSD-3-Clause"
] | null | null | null | archieves/checkdata.py | Donsuno/conda | d5c8fb2cc3f724c109b7343cc0bdb93a5afa12ba | [
"BSD-3-Clause"
] | null | null | null | from ipywidgets import widgets,interact, interact_manual
import numpy as np
import pandas as pd
from IPython.display import display,clear_output
from numpy import arange, sin, pi
import plotly.figure_factory as ff
import re
import matplotlib.pyplot as plt
from IPython.display import Image
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode()
%matplotlib inline
def checkdata(b):
clear_output()
display(button0)
print('Initial Data Condition:')
checkdata = pd.read_excel('story_'+ story.value+'/story'+ story.value+'.xlsx', sheet_name='sample')
checkdata
def gantt_fig(checkdata):
data3 = []
for row in checkdata.itertuples():
data3.append(dict(Task=str(row.MV), Start=str(row.Arrival_Date),
Finish=str(row.Departure_Date), Resource='Initial Plan'))
# data3.append(dict(Task=str(row.MV), Start=str(row.FC_Start_Date_change),
# Finish=str(row.FC_End_Date_change), Resource='Resource2'))
fig = ff.create_gantt(data3, index_col='Resource', title='Gantt Chart', show_colorbar = True, group_tasks = True , height=500, width=1300 )
fig['layout'].update(legend=dict(traceorder='reversed'))
return fig
iplot(gantt_fig(checkdata))
button0
checkdata
return button0, display(checkdata),checkdata
# button0, display(checkdata),checkdata=checkdata(b) | 37.473684 | 147 | 0.70014 | 185 | 1,424 | 5.254054 | 0.497297 | 0.037037 | 0.037037 | 0.049383 | 0.078189 | 0.078189 | 0.078189 | 0.078189 | 0.078189 | 0.078189 | 0 | 0.013889 | 0.191011 | 1,424 | 38 | 148 | 37.473684 | 0.829861 | 0.146067 | 0 | 0.066667 | 0 | 0 | 0.07577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.333333 | null | null | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 2 |
cf58512d21f3bb124c3398a2b735d1fa164545ea | 812 | py | Python | accounts/migrations/0002_auto_20180531_1023.py | USKPA-dev/uskpa | 45481ba59a55f2c202723d11dde9e6b457f9b71d | [
"CC0-1.0"
] | 2 | 2018-06-07T13:06:15.000Z | 2022-02-19T07:51:40.000Z | accounts/migrations/0002_auto_20180531_1023.py | USKPA-dev/uskpa | 45481ba59a55f2c202723d11dde9e6b457f9b71d | [
"CC0-1.0"
] | 164 | 2018-04-11T15:11:54.000Z | 2021-09-07T23:58:59.000Z | accounts/migrations/0002_auto_20180531_1023.py | USKPA-dev/uskpa | 45481ba59a55f2c202723d11dde9e6b457f9b71d | [
"CC0-1.0"
] | 3 | 2018-04-24T18:36:57.000Z | 2018-06-08T21:12:34.000Z | # Generated by Django 2.0.5 on 2018-05-31 10:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='historicalhistoryuser',
name='history_user',
),
migrations.RemoveField(
model_name='historicalprofile',
name='history_user',
),
migrations.RemoveField(
model_name='historicalprofile',
name='user',
),
migrations.DeleteModel(
name='HistoryUser',
),
migrations.DeleteModel(
name='HistoricalHistoryUser',
),
migrations.DeleteModel(
name='HistoricalProfile',
),
]
| 23.2 | 47 | 0.552956 | 61 | 812 | 7.262295 | 0.52459 | 0.142212 | 0.176072 | 0.20316 | 0.288939 | 0.288939 | 0.288939 | 0.288939 | 0.288939 | 0 | 0 | 0.035581 | 0.342365 | 812 | 34 | 48 | 23.882353 | 0.794007 | 0.055419 | 0 | 0.571429 | 1 | 0 | 0.198693 | 0.054902 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.035714 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
cf58a225d1a16173cd170707ce55c8de870dc56f | 568 | py | Python | sparse/utils.py | ContinuumIO/sparse | 10da2d31f0228f192b3064ab253bc828b3cf1a50 | [
"BSD-3-Clause"
] | 2 | 2017-09-17T21:22:21.000Z | 2019-08-26T02:28:10.000Z | sparse/utils.py | ContinuumIO/sparse | 10da2d31f0228f192b3064ab253bc828b3cf1a50 | [
"BSD-3-Clause"
] | null | null | null | sparse/utils.py | ContinuumIO/sparse | 10da2d31f0228f192b3064ab253bc828b3cf1a50 | [
"BSD-3-Clause"
] | 4 | 2019-03-21T05:38:06.000Z | 2021-02-23T06:26:48.000Z | import numpy as np
from .core import COO
def assert_eq(x, y):
assert x.shape == y.shape
assert x.dtype == y.dtype
if isinstance(x, COO):
if x.sorted:
assert is_lexsorted(x)
if isinstance(y, COO):
if y.sorted:
assert is_lexsorted(y)
if hasattr(x, 'todense'):
xx = x.todense()
else:
xx = x
if hasattr(y, 'todense'):
yy = y.todense()
else:
yy = y
assert np.allclose(xx, yy)
def is_lexsorted(x):
return not x.shape or (np.diff(x.linear_loc()) > 0).all()
| 19.586207 | 61 | 0.549296 | 86 | 568 | 3.569767 | 0.383721 | 0.107492 | 0.091205 | 0.149837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002597 | 0.322183 | 568 | 28 | 62 | 20.285714 | 0.794805 | 0 | 0 | 0.090909 | 0 | 0 | 0.024648 | 0 | 0 | 0 | 0 | 0 | 0.272727 | 1 | 0.090909 | false | 0 | 0.090909 | 0.045455 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf58f44a787c70c43d9a1a1e3d53a92ccd902710 | 7,019 | py | Python | fusion_platform/translations.py | d-cat-support/fusion-platform-python-sdk | 6f98a60f33a962f6a10861da15affbc28bf4a17a | [
"MIT"
] | null | null | null | fusion_platform/translations.py | d-cat-support/fusion-platform-python-sdk | 6f98a60f33a962f6a10861da15affbc28bf4a17a | [
"MIT"
] | null | null | null | fusion_platform/translations.py | d-cat-support/fusion-platform-python-sdk | 6f98a60f33a962f6a10861da15affbc28bf4a17a | [
"MIT"
] | null | null | null | """
Compiled translations.
author: Matthew Casey
© [Digital Content Analysis Technology Ltd](https://www.d-cat.co.uk)
"""
# Do not modify this file manually as it is built automatically by the localisations.py script.
import i18n
# @formatter:off
i18n.add_translation('session.request_failed', 'API request failed: %{message}', 'en')
i18n.add_translation('session.login_failed', 'Login failed', 'en')
i18n.add_translation('session.missing_password', 'Password must be specified', 'en')
i18n.add_translation('session.missing_email_user_id', 'Either an email address or a user id must be specified', 'en')
i18n.add_translation('fusion_platform.support', 'Support: support@d-cat.co.uk', 'en')
i18n.add_translation('fusion_platform.version_date', 'Date: %{version_date}', 'en')
i18n.add_translation('fusion_platform.version', 'Version: %{version}', 'en')
i18n.add_translation('fusion_platform.sdk', 'Fusion Platform(r) SDK', 'en')
i18n.add_translation('models.data_file.failed_download_url', 'Failed to get URL from download file response', 'en')
i18n.add_translation('models.data_file.no_download', 'No download is in progress', 'en')
i18n.add_translation('models.data_file.download_already_in_progress', 'Cannot download file as the download is already in progress', 'en')
i18n.add_translation('models.data_file.organisation_id.description', 'The owning organisation.', 'en')
i18n.add_translation('models.data_file.organisation_id.title', 'Organisation', 'en')
i18n.add_translation('models.data.no_create', 'No create is in progress', 'en')
i18n.add_translation('models.data.failed_add_missing_file', 'Failed to add file as the file does not exist: %{file}', 'en')
i18n.add_translation('models.data.failed_add_file_not_unique', 'Failed to add file as the id is not unique', 'en')
i18n.add_translation('models.data.failed_add_file_url', 'Failed to get URL from add file response', 'en')
i18n.add_translation('models.data.failed_add_file_id', 'Failed to get id from add file response', 'en')
i18n.add_translation('models.process_execution.execution_failed', 'Execution has failed', 'en')
i18n.add_translation('models.fields.uuid.invalid_uuid', 'Not a valid utf-8 string', 'en')
i18n.add_translation('models.fields.url.invalid_url', 'Not a valid URL', 'en')
i18n.add_translation('models.fields.tuple.invalid', 'Not a valid tuple', 'en')
i18n.add_translation('models.fields.timedelta.invalid', 'Not a valid period of time', 'en')
i18n.add_translation('models.fields.string.invalid_utf8', 'Not a valid utf-8 string', 'en')
i18n.add_translation('models.fields.string.invalid', 'Not a valid string', 'en')
i18n.add_translation('models.fields.relativedelta.invalid', 'Not a valid relative period of time', 'en')
i18n.add_translation('models.fields.nested.type', 'Invalid type', 'en')
i18n.add_translation('models.fields.list.invalid', 'Not a valid list', 'en')
i18n.add_translation('models.fields.ip.invalid_ip', 'Not a valid IP address', 'en')
i18n.add_translation('models.fields.integer.too_large', 'Integer too large', 'en')
i18n.add_translation('models.fields.integer.invalid', 'Not a valid integer', 'en')
i18n.add_translation('models.fields.float.special', 'Special numeric values (nan or infinity) are not permitted.', 'en')
i18n.add_translation('models.fields.float.too_large', 'Float too large', 'en')
i18n.add_translation('models.fields.float.invalid', 'Not a valid float', 'en')
i18n.add_translation('models.fields.email.invalid', 'Not a valid email address', 'en')
i18n.add_translation('models.fields.dict.invalid', 'Not a valid dictionary', 'en')
i18n.add_translation('models.fields.decimal.special', 'Special numeric values (nan or infinity) are not permitted', 'en')
i18n.add_translation('models.fields.decimal.too_large', 'Decimal too large', 'en')
i18n.add_translation('models.fields.decimal.invalid', 'Not a valid decimal', 'en')
i18n.add_translation('models.fields.datetime.format', '\'{input}\' cannot be formatted as a {obj_type}', 'en')
i18n.add_translation('models.fields.datetime.invalid_awareness', 'Not a valid {awareness} {obj_type}', 'en')
i18n.add_translation('models.fields.datetime.invalid', 'Not a valid {obj_type}', 'en')
i18n.add_translation('models.fields.boolean.invalid', 'Not a valid boolean', 'en')
i18n.add_translation('models.model.update_empty_body', 'Update cannot be requested as there are no attributes to be used (read-only attributes have been removed)', 'en')
i18n.add_translation('models.model.create_empty_body', 'Create cannot be requested as there are no attributes to be used (read-only attributes have been removed)', 'en')
i18n.add_translation('models.model.failed_model_validation', 'Failed to validate model: %{message}', 'en')
i18n.add_translation('models.model.failed_model_new', 'Failed to get model template from response', 'en')
i18n.add_translation('models.model.failed_model_send_and_load', 'Failed to request and load model', 'en')
i18n.add_translation('models.model.no_such_keys', 'No such keys %{keys}', 'en')
i18n.add_translation('models.model.readonly_property', 'Property %{property} is read-only and cannot be set', 'en')
i18n.add_translation('models.model.not_persisted', 'Model is not persisted in the Fusion Platform(r)', 'en')
i18n.add_translation('models.model.already_persisted', 'Model is already persisted in the Fusion Platform(r)', 'en')
i18n.add_translation('models.process.execution_should_have_started', 'Process execution should have started by now', 'en')
i18n.add_translation('models.process.not_executable', 'Process is not executable', 'en')
i18n.add_translation('models.process.wrong_file_type', 'File type of supplied data object (%{actual}) does not match the file type for the input (%{expected})', 'en')
i18n.add_translation('models.process.data_not_ready', 'Data object is not ready to be used in a process', 'en')
i18n.add_translation('models.process.option_wrong_type', 'Option value should be of type %{type}', 'en')
i18n.add_translation('models.process.cannot_find_option', 'No such option', 'en')
i18n.add_translation('models.process.cannot_find_input', 'No such input', 'en')
i18n.add_translation('models.process.option_not_specified', 'Option name or object must be provided to set option', 'en')
i18n.add_translation('models.process.data_not_specified', 'Data object must be provided to set input', 'en')
i18n.add_translation('models.process.input_not_specified', 'Input number or object must be provided to set input', 'en')
i18n.add_translation('models.process.no_change_executing', 'Process cannot be modified as it is currently executing', 'en')
i18n.add_translation('models.process.option.constrained_values.description', 'The constrained values for the option.', 'en')
i18n.add_translation('models.process.option.constrained_values.title', 'Constrained Values', 'en')
i18n.add_translation('models.process.option.constrained_names.description', 'The constrained value names for the option.', 'en')
i18n.add_translation('models.process.option.constrained_names.title', 'Constrained Names', 'en')
# @formatter:on
| 85.597561 | 169 | 0.774327 | 1,043 | 7,019 | 5.055609 | 0.173538 | 0.088944 | 0.228712 | 0.250332 | 0.646691 | 0.627157 | 0.524559 | 0.390669 | 0.296416 | 0.183197 | 0 | 0.021574 | 0.082063 | 7,019 | 81 | 170 | 86.654321 | 0.796834 | 0.034763 | 0 | 0 | 0 | 0.044118 | 0.668884 | 0.306726 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.014706 | 0.014706 | 0 | 0.014706 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
cf5b37ee1fc82e3da020ac4e175a1718c4b48d19 | 115 | py | Python | env.py | olukotun-sandbox/name-button | 8205dc783dd72765d44378b0b6ca354352d21ad5 | [
"MIT"
] | null | null | null | env.py | olukotun-sandbox/name-button | 8205dc783dd72765d44378b0b6ca354352d21ad5 | [
"MIT"
] | null | null | null | env.py | olukotun-sandbox/name-button | 8205dc783dd72765d44378b0b6ca354352d21ad5 | [
"MIT"
] | null | null | null | import os
print('this is home:', os.environ['HOME'])
print('this is circle branch:', os.environ['CIRCLE_BRANCH']) | 23 | 60 | 0.704348 | 18 | 115 | 4.444444 | 0.5 | 0.225 | 0.275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.104348 | 115 | 5 | 60 | 23 | 0.776699 | 0 | 0 | 0 | 0 | 0 | 0.448276 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0.666667 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 7 |
cf63128ae3837cdf01a72550d0f6236a6665d83c | 35 | py | Python | scripts/tcutils/tests/cores_ut.py | rombie/contrail-test | a68c71d6f282142501a7e2e889bbb232fdd82dc3 | [
"Apache-2.0"
] | 5 | 2020-09-29T00:36:57.000Z | 2022-02-16T06:51:32.000Z | tcutils/tests/cores_ut.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | 27 | 2019-11-02T02:18:34.000Z | 2022-02-24T18:49:08.000Z | tcutils/tests/cores_ut.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | 20 | 2019-11-28T16:02:25.000Z | 2022-01-06T05:56:58.000Z | """Unittests for cores module.
"""
| 11.666667 | 30 | 0.657143 | 4 | 35 | 5.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 35 | 2 | 31 | 17.5 | 0.766667 | 0.771429 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
cf63b316fcbc7d486530689ffda1e935bef34ddd | 7,937 | py | Python | netunnel/server/peer.py | kobimic/netunnel | fe7f627b01deb67e0d7bd7ae949a42db43738785 | [
"Apache-2.0"
] | 18 | 2021-01-20T16:30:47.000Z | 2022-03-08T03:43:11.000Z | netunnel/server/peer.py | kobimic/netunnel | fe7f627b01deb67e0d7bd7ae949a42db43738785 | [
"Apache-2.0"
] | null | null | null | netunnel/server/peer.py | kobimic/netunnel | fe7f627b01deb67e0d7bd7ae949a42db43738785 | [
"Apache-2.0"
] | 4 | 2021-01-24T17:52:26.000Z | 2021-08-08T14:48:03.000Z | from typing import Dict, List
from .static_tunnel import StaticTunnel
from .schemas import StaticTunnelSchema
from ..client import NETunnelClient
from ..common.utils import get_logger
from ..common.exceptions import NETunnelServerNotFound, NETunnelServerError, NETunnelResponseError, NETunnelAuthError
from ..common.auth import NETunnelClientAuth
import asyncio
import aiohttp
class Peer:
def __init__(self, id, name, target_netunnel_url, auth, proxy_url=None, proxy_username=None, proxy_password=None, ssl=None, logger=None):
"""
Peer is a remote NETunnelServer.
:param id: unique id for this peer
:param name: name of the peer
:param target_netunnel_url: url to the remote netunnel server
:param proxy_url: url to an http proxy to set when making http requests
:param proxy_username: username for the proxy
:param proxy_password: password for the proxy
:param auth: Instance of subclass of netunnel.common.auth.NETunnelClientAuth that will be used to authenticate the peer
:param ssl: SSLContext object. False to skip validation, None for default SSL check.
:param logger: logging.Logger object for logging
"""
self._id = id
self.name = name
self._target_netunnel_url = target_netunnel_url
self._auth: NETunnelClientAuth = auth
self._logger = logger or get_logger(f'Peer `{self.name}`')
self._ssl = ssl
self._proxy_url = proxy_url
self._proxy_username = proxy_username
self._proxy_password = proxy_password
# mapping from static tunnel id to StaticTunnel object belong to this peer
self._static_tunnels: Dict[int, StaticTunnel] = {}
# Used to prevent id duplications when creating new static tunnels
self._creating_static_tunnel_lock = asyncio.Lock()
@property
def id(self) -> int:
return self._id
@property
def target_netunnel_url(self) -> str:
return self._target_netunnel_url
@property
def auth(self):
return self._auth
@property
def auth_data(self):
return self._auth.dump_object()
@property
def static_tunnels(self) -> List[StaticTunnel]:
"""
Return a list of the static tunnels to this peer. Used by the nested field of PeerSchema
"""
return list(self._static_tunnels.values())
async def update_settings(self, new_url, new_auth=None):
"""
Set new settings for either target_netunnel_url, auth or both.
Restart the static tunnels of this peer so they will use the new settings
"""
if new_url:
self._target_netunnel_url = new_url
if new_auth:
self._auth = new_auth
for static_tunnel in self.static_tunnels:
static_tunnel_settings = StaticTunnelSchema().dump3(static_tunnel)
await self.delete_static_tunnel(static_tunnel.id)
await self.add_static_tunnel(**static_tunnel_settings)
def _generate_static_tunnel_id(self) -> int:
"""
Generates an unused static tunnel id
"""
if self._static_tunnels:
return max(self._static_tunnels.keys()) + 1
return 1
def _new_client(self):
"""
Return a NETunnelClient to the peer
"""
return NETunnelClient(server_url=self._target_netunnel_url, proxy_url=self._proxy_url,
proxy_username=self._proxy_username, proxy_password=self._proxy_password,
auth_client=self._auth, ssl=self._ssl, logger=self._logger)
async def verify_connectivity(self):
"""
Make sure there is a connection to the peer by query it's version.
Raises an exception if peer is not connected
"""
try:
async with self._new_client() as client:
await client.get_remote_version()
except NETunnelAuthError as err:
self._logger.debug('The following exception raised when trying to connect to the peer:', exc_info=err)
raise NETunnelAuthError(f'Failed to authenticate with peer `{self.name}`')
except aiohttp.ClientError as err:
self._logger.debug('The following exception raised when trying to connect to the peer:', exc_info=err)
raise NETunnelServerError(f'Failed to connect with peer `{self.name}`')
return True
async def set_new_proxy(self, proxy_url, proxy_username, proxy_password):
"""
Set a new http proxy to use when communicating with this peer
"""
self._proxy_url = proxy_url
self._proxy_username = proxy_username
self._proxy_password = proxy_password
for static_tunnel in self.static_tunnels:
await static_tunnel.set_new_proxy(proxy_url, proxy_username, proxy_password)
async def add_static_tunnel(self, tunnel_remote_address, tunnel_remote_port, tunnel_local_port, tunnel_local_address, id=None, verify_connectivity=True):
"""
Creates a new static tunnel for this peer and start it.
Return the generated static tunnel
:param tunnel_remote_address: Remote address used as the exit address of the tunnel
:param tunnel_remote_port: Remote port used as the exit port of the tunnel
:param tunnel_local_address: Local address used as the entrance address of the tunnel
:param tunnel_local_port: Local port used as the entrance port of the tunnel
:param id: Optional id to set this tunnel. Used when tunnel is initialized from the config
:param verify_connectivity: Whether to verify connectivity before adding the tunnel
"""
if verify_connectivity:
await self.verify_connectivity()
async with self._creating_static_tunnel_lock:
# Set static tunnel id
static_tunnel_id = id or self._generate_static_tunnel_id()
if id in self._static_tunnels:
raise RuntimeError(f'ID `{id}` for static tunnel on peer `{self.name}` is already in use')
# Create and start the new static tunnel
static_tunnel = StaticTunnel(id=static_tunnel_id, tunnel_local_port=tunnel_local_port,
tunnel_local_address=tunnel_local_address, tunnel_remote_port=tunnel_remote_port,
tunnel_remote_address=tunnel_remote_address, target_netunnel_url=self._target_netunnel_url,
auth_client=self._auth, proxy_url=self._proxy_url, proxy_username=self._proxy_username,
proxy_password=self._proxy_password, ssl=self._ssl, logger=self._logger)
self._logger.info('Creating static tunnel `%s` to peer `%s`', static_tunnel.get_tunnel_display_name(), self.name)
await static_tunnel.start()
await static_tunnel.wait_online()
self._static_tunnels[static_tunnel_id] = static_tunnel
return static_tunnel
async def delete_static_tunnels(self):
"""
Stop and remove all static tunnels
"""
while self._static_tunnels:
_, static_tunnel = self._static_tunnels.popitem()
await static_tunnel.stop()
async def delete_static_tunnel(self, id):
"""
Remove static tunnel from this peer by id
"""
if id not in self._static_tunnels:
raise NETunnelServerNotFound(f'No static tunnel by id `{id}` on `{self.name}`')
static_tunnel = self._static_tunnels.pop(id)
await static_tunnel.stop()
def get_static_tunnel(self, id):
"""
Return a static tunnel by ID
"""
if id not in self._static_tunnels:
raise NETunnelServerNotFound(f'No static tunnel by id `{id}` on `{self.name}`')
return self._static_tunnels[id]
| 45.354286 | 157 | 0.666877 | 1,006 | 7,937 | 5.017893 | 0.173956 | 0.099842 | 0.047147 | 0.0208 | 0.292393 | 0.203645 | 0.162044 | 0.133914 | 0.133914 | 0.133914 | 0 | 0.000515 | 0.266473 | 7,937 | 174 | 158 | 45.614943 | 0.866541 | 0.121834 | 0 | 0.207921 | 0 | 0 | 0.077955 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089109 | false | 0.069307 | 0.089109 | 0.039604 | 0.29703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
cf6473217e7645ed213ed7c309d9dc071c16091a | 129 | py | Python | dl/initializers/initializer_base.py | nuka137/DeepLearningFramework | 613881e46b48c2206b9424a49106455cb2336d2e | [
"MIT"
] | 10 | 2020-06-28T05:50:41.000Z | 2022-01-30T01:31:43.000Z | dl/initializers/initializer_base.py | nuka137/DeepLearningFramework | 613881e46b48c2206b9424a49106455cb2336d2e | [
"MIT"
] | null | null | null | dl/initializers/initializer_base.py | nuka137/DeepLearningFramework | 613881e46b48c2206b9424a49106455cb2336d2e | [
"MIT"
] | 1 | 2020-07-26T12:36:32.000Z | 2020-07-26T12:36:32.000Z | class InitializerBase:
def __init__(self):
pass
def init(self, shape):
raise NotImprementedError()
| 16.125 | 35 | 0.612403 | 12 | 129 | 6.25 | 0.75 | 0.186667 | 0.293333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.310078 | 129 | 7 | 36 | 18.428571 | 0.842697 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0.2 | 0 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 5 |
cf6581484116a18845484669a17d5f8076cfe782 | 2,612 | py | Python | baseline/xray.py | RoliKhanna/Anchor-Free | e3d599b7cbdc988ad7720c1e8324cabe87917d59 | [
"MIT"
] | null | null | null | baseline/xray.py | RoliKhanna/Anchor-Free | e3d599b7cbdc988ad7720c1e8324cabe87917d59 | [
"MIT"
] | null | null | null | baseline/xray.py | RoliKhanna/Anchor-Free | e3d599b7cbdc988ad7720c1e8324cabe87917d59 | [
"MIT"
] | 1 | 2019-11-25T22:08:19.000Z | 2019-11-25T22:08:19.000Z |
from nltk.corpus import reuters
import sys
import numpy as np
from scipy import optimize
# Loading data here
train_documents, train_categories = zip(*[(reuters.raw(i), reuters.categories(i)) for i in reuters.fileids() if i.startswith('training/')])
test_documents, test_categories = zip(*[(reuters.raw(i), reuters.categories(i)) for i in reuters.fileids() if i.startswith('test/')])
def col2norm(X):
return np.sum(np.abs(X) ** 2,axis=0)
def xray(X, r):
cols = []
R = np.copy(X)
while len(cols) < r:
i = np.argmax(col2norm(X))
# Loop until we choose a column that has not been selected.
while True:
p = np.random.random((X.shape[0], 1))
scores = col2norm(np.dot(R.T, X)) / col2norm(X)
scores[cols] = -1 # IMPORTANT
best_col = np.argmax(scores)
if best_col in cols:
# Re-try
continue
else:
cols.append(best_col)
H, rel_res = NNLSFrob(X, cols)
R = X - np.dot(X[:, cols] , H)
break
return cols
def GP_cols(data, r):
votes = {}
for row in data:
min_ind = np.argmin(row)
max_ind = np.argmax(row)
for ind in [min_ind, max_ind]:
if ind not in votes:
votes[ind] = 1
else:
votes[ind] += 1
votes = sorted(votes.items(), key=lambda x: x[1], reverse=True)
return [x[0] for x in votes][0:r]
def NNLSFrob(X, cols):
ncols = X.shape[1]
H = np.zeros((len(cols), ncols))
for i in xrange(ncols):
sol, res = optimize.nnls(X[:, cols], X[:, i])
H[:, i] = sol
rel_res = np.linalg.norm(X - np.dot(X[:, cols], H), 'fro')
rel_res /= np.linalg.norm(X, 'fro')
return H, rel_res
def ComputeNMF(data, colnorms, r):
data = np.copy(data)
colinv = np.linalg.pinv(np.diag(colnorms))
_, S, Vt = np.linalg.svd(data)
A = np.dot(np.diag(S), Vt)
cols = xray(data, r)
H, rel_res = NNLSFrob(data, cols)
return cols, H, rel_res
def ParseMatrix(matpath):
matrix = []
with open(matpath, 'r') as f:
for row in f:
matrix.append([float(v) for v in row.split()[1:]])
return np.array(matrix)
def ParseColnorms(colpath):
norms = []
with open(colpath, 'r') as f:
for line in f:
norms.append(float(line.split()[-1]))
return norms
data = ParseMatrix(train_documents)
colnorms = ParseColnorms(train_categories)
r = 4
cols, H, rel_res = ComputeNMF(data, colnorms, r)
cols.sort()
print("Final result: ", rel_res)
| 28.086022 | 139 | 0.568147 | 387 | 2,612 | 3.775194 | 0.315245 | 0.032854 | 0.023956 | 0.031485 | 0.144422 | 0.144422 | 0.102669 | 0.102669 | 0.102669 | 0.102669 | 0 | 0.009677 | 0.287902 | 2,612 | 92 | 140 | 28.391304 | 0.775806 | 0.035222 | 0 | 0.027397 | 0 | 0 | 0.01432 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09589 | false | 0 | 0.054795 | 0.013699 | 0.246575 | 0.013699 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf664ab43e12cf24ecd3e41b3708349ac277b2fd | 2,487 | py | Python | models/deepset.py | sgvdan/OCTransformer | 4bc6861406ea75afd23bdf1608a088dcba99ff14 | [
"Apache-2.0"
] | null | null | null | models/deepset.py | sgvdan/OCTransformer | 4bc6861406ea75afd23bdf1608a088dcba99ff14 | [
"Apache-2.0"
] | null | null | null | models/deepset.py | sgvdan/OCTransformer | 4bc6861406ea75afd23bdf1608a088dcba99ff14 | [
"Apache-2.0"
] | null | null | null | import torch
from torch import nn
# Obtained from: https://github.com/manzilzaheer/DeepSets/blob/master/PointClouds/classifier.py#L58
class PermEqui1_mean(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.Gamma = nn.Linear(in_dim, out_dim)
def forward(self, x):
xm = x.mean(1, keepdim=True)
x = self.Gamma(x-xm)
return x
class DeepSet(nn.Module):
def __init__(self, backbone, x_dim, d_dim, num_classes):
"""
:param backbone:
:param x_dim: backbone's output dim
:param d_dim: the intermediate dim
:param num_classes: number of classes to classify for
"""
super().__init__()
self.backbone = backbone
self.phi = self.phi = nn.Sequential(
PermEqui1_mean(x_dim, d_dim),
nn.ELU(inplace=True),
PermEqui1_mean(d_dim, d_dim),
nn.ELU(inplace=True),
PermEqui1_mean(d_dim, d_dim),
nn.ELU(inplace=True),
)
self.ro = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(d_dim, d_dim),
nn.ELU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(d_dim, num_classes),
)
# Taken from SliverNet
def nonadaptiveconcatpool2d(self, x, k):
# concatenating average and max pool, with kernel and stride the same
ap = torch.nn.functional.avg_pool2d(x, kernel_size=k, stride=k)
mp = torch.nn.functional.max_pool2d(x, kernel_size=k, stride=k)
return torch.cat([mp, ap], 1)
def forward(self, x):
batch_size, slices_num, channels, height, width = x.shape
x = x.view(batch_size * slices_num, channels, height, width)
if x.shape[0] > 100: # Cuda & ResNet are having trouble with long vectors, so split
split = torch.split(x, 100)
temp_features = []
for chunk in split:
temp_features.append(self.backbone(chunk))
features = torch.cat(temp_features)
else:
features = self.backbone(x) # B x M x h x w - B=batch size, M=#slices_per_volume, h=height, w=width
kernel_size = (features.shape[-2], features.shape[-1])
features = self.nonadaptiveconcatpool2d(features, kernel_size).view(batch_size, slices_num, -1)
phi_output = self.phi(features)
sum_output = phi_output.mean(1)
ro_output = self.ro(sum_output)
return ro_output
| 34.068493 | 112 | 0.602734 | 339 | 2,487 | 4.235988 | 0.327434 | 0.027855 | 0.024373 | 0.02507 | 0.245822 | 0.201253 | 0.201253 | 0.114903 | 0.100975 | 0.067549 | 0 | 0.015194 | 0.285485 | 2,487 | 72 | 113 | 34.541667 | 0.792909 | 0.184158 | 0 | 0.24 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.04 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf6665e0703b869005a49a58e097cb3fc9a32910 | 20,965 | py | Python | dft_workflow/job_analysis/collect_collate_dft_data/test_models_on_diff_oer_sets/test_models_on_diff_oer_sets.py | raulf2012/PROJ_IrOx_OER | 56883d6f5b62e67703fe40899e2e68b3f5de143b | [
"MIT"
] | 1 | 2022-03-21T04:43:47.000Z | 2022-03-21T04:43:47.000Z | dft_workflow/job_analysis/collect_collate_dft_data/test_models_on_diff_oer_sets/test_models_on_diff_oer_sets.py | raulf2012/PROJ_IrOx_OER | 56883d6f5b62e67703fe40899e2e68b3f5de143b | [
"MIT"
] | null | null | null | dft_workflow/job_analysis/collect_collate_dft_data/test_models_on_diff_oer_sets/test_models_on_diff_oer_sets.py | raulf2012/PROJ_IrOx_OER | 56883d6f5b62e67703fe40899e2e68b3f5de143b | [
"MIT"
] | 1 | 2021-02-13T12:55:02.000Z | 2021-02-13T12:55:02.000Z | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Test ML models on different OER set picking heuristics
# ---
# ### Import Modules
# +
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import pickle
import numpy as np
import pandas as pd
# #########################################################
from methods import (
get_df_features_targets,
)
# +
from methods_models import run_gp_workflow
sys.path.insert(0,
os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/model_building"))
from methods_model_building import (
simplify_df_features_targets,
run_kfold_cv_wf,
process_feature_targets_df,
process_pca_analysis,
pca_analysis,
run_regression_wf,
)
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
show_plot = True
else:
from tqdm import tqdm
verbose = False
show_plot = False
# ### Script Inputs
# +
num_pca_i = 8
gp_settings = {
"noise": 0.02542,
}
# Length scale parameter
sigma_l_default = 1.8 # Length scale parameter
sigma_f_default = 0.2337970892240513 # Scaling parameter.
kdict = [
{
'type': 'gaussian',
'dimension': 'single',
'width': sigma_l_default,
'scaling': sigma_f_default,
'scaling_bounds': ((0.0001, 10.),),
},
]
# -
cols_to_keep = [
# ('features', 'oh', 'O_magmom'),
# ('features', 'oh', 'Ir_magmom'),
# ('features', 'oh', 'active_o_metal_dist'),
# ('features', 'oh', 'angle_O_Ir_surf_norm'),
# ('features', 'oh', 'ir_o_mean'),
# ('features', 'oh', 'ir_o_std'),
# ('features', 'oh', 'octa_vol'),
('features', 'o', 'O_magmom'),
('features', 'o', 'Ir_magmom'),
('features', 'o', 'active_o_metal_dist'),
# ('features', 'o', 'angle_O_Ir_surf_norm'),
('features', 'o', 'ir_o_mean'),
('features', 'o', 'ir_o_std'),
('features', 'o', 'octa_vol'),
# ('features', 'o', 'Ir*O_bader'),
('features', 'o', 'Ir_bader'),
# ('features', 'o', 'O_bader'),
('features', 'o', 'p_band_center'),
# ('features', 'o', 'Ir*O_bader/ir_o_mean'),
('features', 'dH_bulk', ''),
('features', 'volume_pa', ''),
('features', 'bulk_oxid_state', ''),
('features', 'effective_ox_state', ''),
# ('features_pre_dft', 'active_o_metal_dist__pre', ''),
# ('features_pre_dft', 'ir_o_mean__pre', ''),
# ('features_pre_dft', 'ir_o_std__pre', ''),
# ('features_pre_dft', 'octa_vol__pre', ''),
# #####################################################
# TARGETS #############################################
# ('targets', 'e_o', ''),
# ('targets', 'e_oh', ''),
# ('targets', 'g_o_m_oh', ''),
# ('targets', 'e_o_m_oh', ''),
# ('targets', 'g_o', ''),
('targets', 'g_oh', ''),
]
# ### Reading Data
df_features_targets = get_df_features_targets()
df_m = df_features_targets
# +
root_dir = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/collect_collate_dft_data",
)
# #########################################################
path_i = os.path.join(root_dir,
"out_data/df_ads__from_oh.pickle",)
with open(path_i, "rb") as fle:
df_ads__from_oh = pickle.load(fle)
# #########################################################
path_i = os.path.join(root_dir,
"out_data/df_ads__low_e.pickle",)
with open(path_i, "rb") as fle:
df_ads__low_e = pickle.load(fle)
# #########################################################
path_i = os.path.join(root_dir,
"out_data/df_ads__magmom.pickle",)
with open(path_i, "rb") as fle:
df_ads__magmom = pickle.load(fle)
# #########################################################
path_i = os.path.join(root_dir,
"out_data/df_ads__mine.pickle",)
with open(path_i, "rb") as fle:
df_ads__mine = pickle.load(fle)
# #########################################################
path_i = os.path.join(root_dir,
"out_data/df_ads__mine_2.pickle",)
with open(path_i, "rb") as fle:
df_ads__mine_2 = pickle.load(fle)
# -
# ### Set index on OER set dataframes
# +
df_ads__from_oh = df_ads__from_oh.set_index(
["compenv", "slab_id", "active_site", ],
drop=False)
df_ads__low_e = df_ads__low_e.set_index(
["compenv", "slab_id", "active_site", ],
drop=False)
df_ads__magmom = df_ads__magmom.set_index(
["compenv", "slab_id", "active_site", ],
drop=False)
df_ads__mine = df_ads__mine.set_index(
["compenv", "slab_id", "active_site", ],
drop=False)
df_ads__mine_2 = df_ads__mine_2.set_index(
["compenv", "slab_id", "active_site", ],
drop=False)
# +
df_m_wo_y = df_m.drop(
columns=[
("targets", "g_o", "", ),
("targets", "g_oh", "", ),
],
)
df_m_wo_y.iloc[0:2]
# -
# ## `from_oh`
# + jupyter={"source_hidden": true}
# #########################################################
df_ads__from_oh_y = df_ads__from_oh[["g_o", "g_oh", ]]
new_cols = []
for col_i in df_ads__from_oh_y.columns:
new_col_i = ("targets", col_i, "", )
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_ads__from_oh_y.columns = idx
# #########################################################
df_m__from_oh = pd.concat([
df_m_wo_y,
df_ads__from_oh_y,
], axis=1)
df_m__from_oh = df_m__from_oh.reindex(
columns=list(df_m__from_oh.columns.levels[0]),
level=0)
# #########################################################
df_m__from_oh_2 = df_m__from_oh[
cols_to_keep
]
# + jupyter={"source_hidden": true}
adsorbates = ["o", "oh", "ooh", ]
new_cols = []
for col_i in df_m__from_oh_2.columns:
# print(col_i)
new_col_i = None
if col_i[0] == "targets":
new_col_i = ("targets", col_i[1], )
elif col_i[0] == "features" and col_i[1] in adsorbates:
new_col_i = ("features", col_i[2], )
elif col_i[0] == "features" and col_i[2] == "":
new_col_i = ("features", col_i[1], )
else:
print("Woops")
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_m__from_oh_2.columns = idx
df_m__from_oh_2 = df_m__from_oh_2.dropna(how="any")
# -
df_m__from_oh_2.shape
# + jupyter={"source_hidden": true}
cols_to_use = df_m__from_oh_2["features"].columns.tolist()
out_dict = run_kfold_cv_wf(
df_features_targets=df_m__from_oh_2,
cols_to_use=cols_to_use,
run_pca=True,
num_pca_comp=num_pca_i,
k_fold_partition_size=30,
model_workflow=run_gp_workflow,
model_settings=dict(
gp_settings=gp_settings,
kdict=kdict,
),
)
# #####################################################
df_target_pred = out_dict["df_target_pred"]
MAE = out_dict["MAE"]
R2 = out_dict["R2"]
PCA = out_dict["pca"]
regression_model_list = out_dict["regression_model_list"]
df_target_pred_on_train = out_dict["df_target_pred_on_train"]
MAE_pred_on_train = out_dict["MAE_pred_on_train"]
RM_2 = out_dict["RM_2"]
# #####################################################
if verbose:
print(
"MAE: ",
np.round(MAE, 5),
" eV",
sep="")
print(
"R2: ",
np.round(R2, 5),
sep="")
print(
"MAE (predicting on train set): ",
np.round(MAE_pred_on_train, 5),
sep="")
# -
# ## `low_e`
# + jupyter={"source_hidden": true}
# #########################################################
df_ads__low_e_y = df_ads__low_e[["g_o", "g_oh", ]]
new_cols = []
for col_i in df_ads__low_e_y.columns:
new_col_i = ("targets", col_i, "", )
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_ads__low_e_y.columns = idx
# #########################################################
df_m__low_e = pd.concat([
df_m_wo_y,
df_ads__low_e_y,
], axis=1)
df_m__low_e = df_m__low_e.reindex(
columns=list(df_m__low_e.columns.levels[0]),
level=0)
# #########################################################
df_m__low_e_2 = df_m__low_e[
cols_to_keep
]
# + jupyter={"source_hidden": true}
adsorbates = ["o", "oh", "ooh", ]
new_cols = []
for col_i in df_m__low_e_2.columns:
# print(col_i)
new_col_i = None
if col_i[0] == "targets":
new_col_i = ("targets", col_i[1], )
elif col_i[0] == "features" and col_i[1] in adsorbates:
new_col_i = ("features", col_i[2], )
elif col_i[0] == "features" and col_i[2] == "":
new_col_i = ("features", col_i[1], )
else:
print("Woops")
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_m__low_e_2.columns = idx
df_m__low_e_2 = df_m__low_e_2.dropna(how="any")
# + jupyter={"source_hidden": true}
cols_to_use = df_m__low_e_2["features"].columns.tolist()
out_dict = run_kfold_cv_wf(
df_features_targets=df_m__low_e_2,
cols_to_use=cols_to_use,
run_pca=True,
num_pca_comp=num_pca_i,
k_fold_partition_size=30,
model_workflow=run_gp_workflow,
model_settings=dict(
gp_settings=gp_settings,
kdict=kdict,
),
)
# #####################################################
df_target_pred = out_dict["df_target_pred"]
MAE = out_dict["MAE"]
R2 = out_dict["R2"]
PCA = out_dict["pca"]
regression_model_list = out_dict["regression_model_list"]
df_target_pred_on_train = out_dict["df_target_pred_on_train"]
MAE_pred_on_train = out_dict["MAE_pred_on_train"]
RM_2 = out_dict["RM_2"]
# #####################################################
if verbose:
print(
"MAE: ",
np.round(MAE, 5),
" eV",
sep="")
print(
"R2: ",
np.round(R2, 5),
sep="")
print(
"MAE (predicting on train set): ",
np.round(MAE_pred_on_train, 5),
sep="")
# -
# ## `magmom`
# + jupyter={"source_hidden": true}
# #########################################################
df_ads__magmom_y = df_ads__magmom[["g_o", "g_oh", ]]
new_cols = []
for col_i in df_ads__magmom_y.columns:
new_col_i = ("targets", col_i, "", )
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_ads__magmom_y.columns = idx
# #########################################################
df_m__magmom = pd.concat([
df_m_wo_y,
df_ads__magmom_y,
], axis=1)
df_m__magmom = df_m__magmom.reindex(
columns=list(df_m__magmom.columns.levels[0]),
level=0)
# #########################################################
df_m__magmom_2 = df_m__magmom[
cols_to_keep
]
# + jupyter={"source_hidden": true}
adsorbates = ["o", "oh", "ooh", ]
new_cols = []
for col_i in df_m__magmom_2.columns:
# print(col_i)
new_col_i = None
if col_i[0] == "targets":
new_col_i = ("targets", col_i[1], )
elif col_i[0] == "features" and col_i[1] in adsorbates:
new_col_i = ("features", col_i[2], )
elif col_i[0] == "features" and col_i[2] == "":
new_col_i = ("features", col_i[1], )
else:
print("Woops")
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_m__magmom_2.columns = idx
df_m__magmom_2 = df_m__magmom_2.dropna(how="any")
# + jupyter={"source_hidden": true}
cols_to_use = df_m__magmom_2["features"].columns.tolist()
out_dict = run_kfold_cv_wf(
df_features_targets=df_m__magmom_2,
cols_to_use=cols_to_use,
run_pca=True,
num_pca_comp=num_pca_i,
k_fold_partition_size=30,
model_workflow=run_gp_workflow,
model_settings=dict(
gp_settings=gp_settings,
kdict=kdict,
),
)
# #####################################################
df_target_pred = out_dict["df_target_pred"]
MAE = out_dict["MAE"]
R2 = out_dict["R2"]
PCA = out_dict["pca"]
regression_model_list = out_dict["regression_model_list"]
df_target_pred_on_train = out_dict["df_target_pred_on_train"]
MAE_pred_on_train = out_dict["MAE_pred_on_train"]
RM_2 = out_dict["RM_2"]
# #####################################################
if verbose:
print(
"MAE: ",
np.round(MAE, 5),
" eV",
sep="")
print(
"R2: ",
np.round(R2, 5),
sep="")
print(
"MAE (predicting on train set): ",
np.round(MAE_pred_on_train, 5),
sep="")
# -
# ## `mine`
# + jupyter={"source_hidden": true}
# #########################################################
df_ads__mine_y = df_ads__mine[["g_o", "g_oh", ]]
new_cols = []
for col_i in df_ads__mine_y.columns:
new_col_i = ("targets", col_i, "", )
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_ads__mine_y.columns = idx
# #########################################################
df_m__mine = pd.concat([
df_m_wo_y,
df_ads__mine_y,
], axis=1)
df_m__mine = df_m__mine.reindex(
columns=list(df_m__mine.columns.levels[0]),
level=0)
# #########################################################
df_m__mine_2 = df_m__mine[
cols_to_keep
]
# + jupyter={"source_hidden": true}
adsorbates = ["o", "oh", "ooh", ]
new_cols = []
for col_i in df_m__mine_2.columns:
# print(col_i)
new_col_i = None
if col_i[0] == "targets":
new_col_i = ("targets", col_i[1], )
elif col_i[0] == "features" and col_i[1] in adsorbates:
new_col_i = ("features", col_i[2], )
elif col_i[0] == "features" and col_i[2] == "":
new_col_i = ("features", col_i[1], )
else:
print("Woops")
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_m__mine_2.columns = idx
df_m__mine_2 = df_m__mine_2.dropna(how="any")
# + jupyter={"source_hidden": true}
cols_to_use = df_m__mine_2["features"].columns.tolist()
out_dict = run_kfold_cv_wf(
df_features_targets=df_m__mine_2,
cols_to_use=cols_to_use,
run_pca=True,
num_pca_comp=num_pca_i,
k_fold_partition_size=30,
model_workflow=run_gp_workflow,
model_settings=dict(
gp_settings=gp_settings,
kdict=kdict,
),
)
# #####################################################
df_target_pred = out_dict["df_target_pred"]
MAE = out_dict["MAE"]
R2 = out_dict["R2"]
PCA = out_dict["pca"]
regression_model_list = out_dict["regression_model_list"]
df_target_pred_on_train = out_dict["df_target_pred_on_train"]
MAE_pred_on_train = out_dict["MAE_pred_on_train"]
RM_2 = out_dict["RM_2"]
# #####################################################
if verbose:
print(
"MAE: ",
np.round(MAE, 5),
" eV",
sep="")
print(
"R2: ",
np.round(R2, 5),
sep="")
print(
"MAE (predicting on train set): ",
np.round(MAE_pred_on_train, 5),
sep="")
# -
# ## `mine_2`
# + jupyter={"source_hidden": true}
# #########################################################
df_ads__mine_2_y = df_ads__mine_2[["g_o", "g_oh", ]]
new_cols = []
for col_i in df_ads__mine_2_y.columns:
new_col_i = ("targets", col_i, "", )
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_ads__mine_2_y.columns = idx
# #########################################################
df_m__mine_2 = pd.concat([
df_m_wo_y,
df_ads__mine_2_y,
], axis=1)
df_m__mine_2 = df_m__mine_2.reindex(
columns=list(df_m__mine_2.columns.levels[0]),
level=0)
# #########################################################
df_m__mine_2_2 = df_m__mine_2[
cols_to_keep
]
# + jupyter={"source_hidden": true}
adsorbates = ["o", "oh", "ooh", ]
new_cols = []
for col_i in df_m__mine_2_2.columns:
# print(col_i)
new_col_i = None
if col_i[0] == "targets":
new_col_i = ("targets", col_i[1], )
elif col_i[0] == "features" and col_i[1] in adsorbates:
new_col_i = ("features", col_i[2], )
elif col_i[0] == "features" and col_i[2] == "":
new_col_i = ("features", col_i[1], )
else:
print("Woops")
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_m__mine_2_2.columns = idx
df_m__mine_2_2 = df_m__mine_2_2.dropna(how="any")
# -
df_m__mine_2_2.shape
# + jupyter={"source_hidden": true}
cols_to_use = df_m__mine_2_2["features"].columns.tolist()
out_dict = run_kfold_cv_wf(
df_features_targets=df_m__mine_2_2,
cols_to_use=cols_to_use,
run_pca=True,
num_pca_comp=num_pca_i,
k_fold_partition_size=30,
model_workflow=run_gp_workflow,
model_settings=dict(
gp_settings=gp_settings,
kdict=kdict,
),
)
# #####################################################
df_target_pred = out_dict["df_target_pred"]
MAE = out_dict["MAE"]
R2 = out_dict["R2"]
PCA = out_dict["pca"]
regression_model_list = out_dict["regression_model_list"]
df_target_pred_on_train = out_dict["df_target_pred_on_train"]
MAE_pred_on_train = out_dict["MAE_pred_on_train"]
RM_2 = out_dict["RM_2"]
# #####################################################
if verbose:
print(
"MAE: ",
np.round(MAE, 5),
" eV",
sep="")
print(
"R2: ",
np.round(R2, 5),
sep="")
print(
"MAE (predicting on train set): ",
np.round(MAE_pred_on_train, 5),
sep="")
# -
assert False
# + active=""
#
#
#
#
#
#
# -
# ### Predicting on *OH results
# + active=""
# # FROM OH
# MAE: 0.18735 eV
# R2: 0.70906
# MAE (predicting on train set): 0.14474
#
# # LOW E
# MAE: 0.19039 eV
# R2: 0.7025
# MAE (predicting on train set): 0.10353
#
# # MAGMOM
# MAE: 0.19125 eV
# R2: 0.72463
# MAE (predicting on train set): 0.08905
#
# # MINE
# MAE: 0.18998 eV
# R2: 0.70478
# MAE (predicting on train set): 0.08904
#
# # MINE_2
# MAE: 0.18941 eV
# R2: 0.70577
# MAE (predicting on train set): 0.14718
# -
# ### Predicting on *O results
# + active=""
# # FROM OH
# MAE: 0.19534 eV
# R2: 0.78813
# MAE (predicting on train set): 0.15341
#
# # LOW E
# MAE: 0.18201 eV
# R2: 0.82162
# MAE (predicting on train set): 0.13367
#
# # MAGMOM
# MAE: 0.21635 eV
# R2: 0.7337
# MAE (predicting on train set): 0.17447
#
# # MINE
# MAE: 0.18226 eV
# R2: 0.81959
# MAE (predicting on train set): 0.13481
# + active=""
#
#
#
# + jupyter={"source_hidden": true}
# os.environ[""],
# + jupyter={"source_hidden": true}
# # #########################################################
# # Pickling data ###########################################
# directory = os.path.join(
# root_dir, "out_data")
# if not os.path.exists(directory): os.makedirs(directory)
# with open(os.path.join(directory, "df_ads__magmom.pickle"), "wb") as fle:
# pickle.dump(df_ads__magmom, fle)
# # #########################################################
# + jupyter={"source_hidden": true}
# df_ads.pickle
# df_dict.pickle
# + jupyter={"source_hidden": true}
# df_ads__from_oh.pickle
# df_ads__low_e.pickle
# df_ads__magmom.pickle
# + jupyter={"source_hidden": true}
# df_m__from_oh.sort_
# df_m__from_oh =
# df_m__from_oh.reindex(columns=["data", "features", ], level=0)
# df_m__from_oh.reindex(columns=["targets", ], level=0)
# ["targets", ]
# + jupyter={"source_hidden": true}
# list(df_m__from_oh.columns.levels[0])
# + jupyter={"source_hidden": true}
# df_m["targets"]
# df_m.columns.tolist()
# + jupyter={"source_hidden": true}
# for i in new_cols:
# print(i)
# + jupyter={"source_hidden": true}
# assert False
# + jupyter={"source_hidden": true}
# df_j = df_m__from_oh_2
# +
# for name_i, row_i in df_ads__magmom.iterrows():
# # name_i
# # #####################################################
# job_id_o_i = row_i.job_id_o
# job_id_oh_i = row_i.job_id_oh
# job_id_bare_i = row_i.job_id_bare
# # #####################################################
# # #####################################################
# row_mine_i = df_ads__mine.loc[name_i]
# # #####################################################
# job_id_o_i_2 = row_mine_i.job_id_o
# job_id_oh_i_2 = row_mine_i.job_id_oh
# job_id_bare_i_2 = row_mine_i.job_id_bare
# # #####################################################
# if not job_id_o_i == job_id_o_i_2:
# print("IJI")
# if not job_id_oh_i == job_id_oh_i_2:
# print("IJI")
# if not job_id_bare_i == job_id_bare_i_2:
# print("IJI")
# +
# job_id_
# + active=""
# # FROM_OH
# MAE: 0.18735 eV
# R2: 0.70906
# MAE (predicting on train set): 0.14474
#
# # LOW_E
# MAE: 0.19039 eV
# R2: 0.7025
# MAE (predicting on train set): 0.10353
#
# # MAGMOM
# MAE: 0.19125 eV
# R2: 0.72463
# MAE (predicting on train set): 0.08905
# + active=""
# # FROM OH
# MAE: 0.19001 eV
# R2: 0.71487
# MAE (predicting on train set): 0.13976
#
# # LOW E
# MAE: 0.1893 eV
# R2: 0.70264
# MAE (predicting on train set): 0.11304
#
# # MAGMOM
# MAE: 0.1932 eV
# R2: 0.70798
# MAE (predicting on train set): 0.1057
| 23.582677 | 75 | 0.562127 | 2,976 | 20,965 | 3.523185 | 0.089382 | 0.036242 | 0.023367 | 0.05484 | 0.765474 | 0.718264 | 0.640153 | 0.618121 | 0.58226 | 0.563567 | 0 | 0.029481 | 0.189411 | 20,965 | 888 | 76 | 23.609234 | 0.587501 | 0.225423 | 0 | 0.634361 | 0 | 0 | 0.129252 | 0.032258 | 0 | 0 | 0 | 0 | 0.002203 | 1 | 0 | false | 0 | 0.026432 | 0 | 0.026432 | 0.046256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf677d8bfffcaf593d5e10ff7108b260a1cb5b41 | 2,478 | py | Python | pandoc-wrapfig.py | nsheff/pandoc-wrapfig | d4523cf43ebab47024d7efde27d7ccddfd983d2f | [
"MIT"
] | null | null | null | pandoc-wrapfig.py | nsheff/pandoc-wrapfig | d4523cf43ebab47024d7efde27d7ccddfd983d2f | [
"MIT"
] | null | null | null | pandoc-wrapfig.py | nsheff/pandoc-wrapfig | d4523cf43ebab47024d7efde27d7ccddfd983d2f | [
"MIT"
] | 1 | 2020-08-11T18:35:53.000Z | 2020-08-11T18:35:53.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Pandoc filter to allow variable wrapping of LaTeX/pdf documents
through the wrapfig package.
Simply add a " {?}" tag to the end of the caption for the figure, where
? is an integer specifying the width of the wrap in inches. 0 will
cause the width of the figure to be used.
"""
from pandocfilters import toJSONFilter, Image, RawInline, stringify, Div, RawBlock
import re, sys
FLAG_PAT = re.compile('.*\{(\d+\.?\d?)\}')
def html(x):
return RawBlock('html', x)
def wrapfig(key, val, fmt, meta):
# if key == "Div":
# sys.stderr.write(key)
# # join(str(x) for x in caption)
# [[ident, classes, kvs], contents] = val
# newcontents = [html('<dt>Theorem ' + str("hello") + '</dt>'),
# html('<dd>')] + contents + [html('</dd>')]
# return Div([ident, classes, kvs], newcontents)
if key == 'Latex':
sys.stderr.write(key)
if key == 'Image':
attrs, caption, target = val
if fmt == 'markdown' or fmt == 'html':
return [Image(attrs, caption, target)] + \
[RawInline(fmt, "<span class='caption'>")] + caption + [RawInline(fmt, "</span>")]
if FLAG_PAT.match(stringify(caption)):
# Strip tag
size = FLAG_PAT.match(caption[-1]['c']).group(1)
stripped_caption = caption[:-2]
# sys.stderr.write(caption[:-2])
if fmt == 'latex':
latex_begin = r'\setlength{\intextsep}{2pt}\setlength{\columnsep}{8pt}\begin{wrapfigure}{R}{' + size + 'in}'
if len(stripped_caption) > 0:
latex_fig = r'\centering\includegraphics{' + target[0] \
+ '}\caption{'
latex_end = r'}\vspace{-5pt}\end{wrapfigure}'
return [RawInline(fmt, latex_begin + latex_fig)] \
+ stripped_caption + [RawInline(fmt, latex_end)]
else:
latex_fig = r'\centering\includegraphics{' + target[0] \
+ '}'
latex_end = r'\end{wrapfigure}'
return [RawInline(fmt, latex_begin + latex_fig)] \
+ [RawInline(fmt, latex_end)]
else:
return Image(attrs, stripped_caption, target)
if __name__ == '__main__':
toJSONFilter(wrapfig)
sys.stdout.flush() # Should fix issue #1 (pipe error)
| 38.71875 | 124 | 0.536723 | 280 | 2,478 | 4.657143 | 0.421429 | 0.055215 | 0.052147 | 0.019939 | 0.173313 | 0.136503 | 0.136503 | 0.075153 | 0.075153 | 0 | 0 | 0.007652 | 0.314366 | 2,478 | 63 | 125 | 39.333333 | 0.759859 | 0.274415 | 0 | 0.171429 | 0 | 0 | 0.155318 | 0.090039 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.057143 | 0.028571 | 0.257143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf68743af20103a597b92c1707121c418cb28844 | 34 | py | Python | myscript.py | kRituraj/learnGIt | dad92da290d1aab0713d99af722e86140507e9ab | [
"MIT"
] | null | null | null | myscript.py | kRituraj/learnGIt | dad92da290d1aab0713d99af722e86140507e9ab | [
"MIT"
] | null | null | null | myscript.py | kRituraj/learnGIt | dad92da290d1aab0713d99af722e86140507e9ab | [
"MIT"
] | null | null | null | print("My name is Rituraj Khare")
| 17 | 33 | 0.735294 | 6 | 34 | 4.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.147059 | 34 | 1 | 34 | 34 | 0.862069 | 0 | 0 | 0 | 0 | 0 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
cf697a286088c58c3db9ead0e8a7c5dfcff5c956 | 3,999 | py | Python | las2vola.py | moloned/volumetric_accelerator_toolkit | 8f5cf226a7d788e4dd4215c181db49d9568c6240 | [
"Apache-2.0"
] | 6 | 2019-02-11T14:32:23.000Z | 2021-12-07T09:49:41.000Z | las2vola.py | moloned/volumetric_accelerator_toolkit | 8f5cf226a7d788e4dd4215c181db49d9568c6240 | [
"Apache-2.0"
] | null | null | null | las2vola.py | moloned/volumetric_accelerator_toolkit | 8f5cf226a7d788e4dd4215c181db49d9568c6240 | [
"Apache-2.0"
] | 2 | 2018-10-11T17:29:37.000Z | 2021-09-08T12:01:40.000Z | #!/usr/bin/env python3
"""
Las2vola: Converts Las files into VOLA format.
The ISPRS las format is the standard for LIDAR devices and stores information
on the points obtained. This parser uses the las information
for the nbit per voxel representation. The data stored is: color, height,
number of returns, intensity and classification
@author Jonathan Byrne & Anton Shmatov
@copyright 2018 Intel Ltd (see LICENSE file).
"""
from __future__ import print_function
import glob
import os
import numpy as np
import binutils as bu
from laspy import file as lasfile
from laspy.util import LaspyException
from volatree import VolaTree
def main():
"""Read the file, build the tree. Write a Binary."""
start_time = bu.timer()
parser = bu.parser_args("*.las / *.laz")
args = parser.parse_args()
# Parse directories or filenames, whichever you want!
if os.path.isdir(args.input):
filenames = glob.glob(os.path.join(args.input, '*.laz'))
filenames.extend(glob.glob(os.path.join(args.input, '*.las')))
else:
filenames = glob.glob(args.input)
print("processing: ", ' '.join(filenames))
for filename in filenames:
if args.dense:
outfilename = bu.sub(filename, "dvol")
else:
outfilename = bu.sub(filename, "vol")
if os.path.isfile(outfilename):
print("File already exists!")
continue
print("converting", filename, "to", outfilename)
bbox, points, pointsdata = parse_las(filename, args.nbits)
# work out how many chunks are required for the data
if args.nbits:
print("nbits set, adding metadata to occupancy grid")
div, mod = divmod(len(pointsdata[0]), 8)
if mod > 0:
nbits = div + 1
else:
nbits = div
else:
print("Only occupancy data being set! Use -n flag to add metadata")
nbits = 0
if len(points) > 0:
volatree = VolaTree(args.depth, bbox, args.crs,
args.dense, nbits)
volatree.cubify(points, pointsdata)
volatree.writebin(outfilename)
bu.print_ratio(filename, outfilename)
else:
print("The las file is empty!")
bu.timer(start_time)
def parse_las(filename, nbits):
"""Read las format point data and return header and points."""
pointfile = lasfile.File(filename, mode='r')
header = pointfile.header
maxheight = header.max[2]
points = np.array((pointfile.x, pointfile.y, pointfile.z)).transpose() # get all points, change matrix orientation
pointsdata = np.zeros((len(pointfile), 7), dtype=np.int)
if nbits > 0: # if want to set other data, find in matrices
try:
red = pointfile.red
except LaspyException:
red = [0] * len(points)
try:
green = pointfile.green
except LaspyException:
green = [0] * len(points)
try:
blue = pointfile.blue
except LaspyException:
blue = [0] * len(points)
coldata = np.int64(np.array([red, green, blue]).transpose() / 256)
scaleddata = np.array([pointfile.get_z(), pointfile.get_num_returns(),
pointfile.intensity, pointfile.raw_classification], dtype='int64').transpose()
min = np.array([0, 1, 0, 0])
max = np.array([maxheight, 7, 1000, 31])
normdata = np.int64(bu.normalize_np(scaleddata, min, max) * 255)
coldata[(coldata[:, 0] == 0) & (coldata[:, 1] == 0) &
(coldata[:, 2] == 0)] = 200 # if all three colours are 0, set to 200
pointsdata = np.concatenate([coldata, normdata], axis=1)
if len(points) == 0:
return [], [], None
bbox = [points.min(axis=0).tolist(), points.max(axis=0).tolist()]
if nbits:
return bbox, points, pointsdata
else:
return bbox, points, None
if __name__ == '__main__':
main()
| 32.778689 | 118 | 0.607652 | 499 | 3,999 | 4.819639 | 0.390782 | 0.018711 | 0.012474 | 0.011642 | 0.022453 | 0.022453 | 0.022453 | 0 | 0 | 0 | 0 | 0.020083 | 0.277819 | 3,999 | 121 | 119 | 33.049587 | 0.812673 | 0.186797 | 0 | 0.142857 | 0 | 0 | 0.065965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.095238 | 0 | 0.154762 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf6a926cdf026b6807d2fbef9356b946cbf88279 | 2,871 | py | Python | pipeline/test_users.py | streamsets/datacollector-tests-external | 6f255b5e7496deeef333b57a5e9df4911ba3ef00 | [
"Apache-2.0"
] | 1 | 2020-04-14T03:01:51.000Z | 2020-04-14T03:01:51.000Z | pipeline/test_users.py | streamsets/test | 1ead70179ee92a4acd9cfaa33c56a5a9e233bf3d | [
"Apache-2.0"
] | 1 | 2019-04-24T11:06:38.000Z | 2019-04-24T11:06:38.000Z | pipeline/test_users.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | 2 | 2019-05-24T06:34:37.000Z | 2020-03-30T11:48:18.000Z | # Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
from streamsets.testframework import sdc
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.add_user('jarcec', roles=['admin'], groups=['jarcec', 'employee'])
data_collector.add_user('dima', roles=['admin'], groups=['dima', 'employee'])
data_collector.add_user('bryan', roles=['manager', 'creator'], groups=['bryan', 'contractor'])
data_collector.add_user('arvind', roles=['guest'], groups=['arvind', 'guests'])
return hook
@pytest.fixture(scope='module')
def pipeline(sdc_executor):
builder = sdc_executor.get_pipeline_builder()
dev_data_generator = builder.add_stage('Dev Data Generator')
trash = builder.add_stage('Trash')
dev_data_generator >> trash
pipeline = builder.build()
sdc_executor.set_user('admin')
sdc_executor.add_pipeline(pipeline)
yield pipeline
# Validate "current" user switching and getting the proper groups and roles.
def test_current_user(sdc_executor):
sdc_executor.set_user('admin')
user = sdc_executor.current_user
assert user.name == 'admin'
sdc_executor.set_user('jarcec')
user = sdc_executor.current_user
assert user.name == 'jarcec'
assert user.groups == ['all', 'jarcec', 'employee']
assert user.roles == ['admin']
# Ensure that the operations are indeed executed by the current user.
def test_pipeline_history(sdc_executor, pipeline):
sdc_executor.set_user('jarcec')
sdc_executor.start_pipeline(pipeline)
sdc_executor.set_user('dima')
sdc_executor.stop_pipeline(pipeline)
history = sdc_executor.get_pipeline_history(pipeline)
# History is in descending order.
entry = history.entries[0]
assert entry['user'] == 'dima'
assert entry['status'] == 'STOPPED'
entry = history.entries[1]
assert entry['user'] == 'dima'
assert entry['status'] == 'STOPPING'
entry = history.entries[2]
assert entry['user'] == 'jarcec'
assert entry['status'] == 'RUNNING'
entry = history.entries[3]
assert entry['user'] == 'jarcec'
assert entry['status'] == 'STARTING'
entry = history.entries[4]
assert entry['user'] == 'admin'
assert entry['status'] == 'EDITED'
| 30.542553 | 102 | 0.703239 | 370 | 2,871 | 5.310811 | 0.37027 | 0.083969 | 0.035623 | 0.045802 | 0.23715 | 0.116031 | 0.116031 | 0.040712 | 0 | 0 | 0 | 0.005469 | 0.172065 | 2,871 | 93 | 103 | 30.870968 | 0.821203 | 0.253222 | 0 | 0.230769 | 0 | 0 | 0.144805 | 0 | 0 | 0 | 0 | 0 | 0.269231 | 1 | 0.096154 | false | 0 | 0.057692 | 0 | 0.173077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf6af0cf676fc11ed879ddf07c27b61f75d1ae0d | 1,107 | py | Python | email_client/email_send.py | geeksLabTech/email-client | 0f533f7b33c38d74aec8663ccc6d8116e0a2489d | [
"MIT"
] | 1 | 2021-09-06T16:43:37.000Z | 2021-09-06T16:43:37.000Z | email_client/email_send.py | geeksLabTech/email-client | 0f533f7b33c38d74aec8663ccc6d8116e0a2489d | [
"MIT"
] | null | null | null | email_client/email_send.py | geeksLabTech/email-client | 0f533f7b33c38d74aec8663ccc6d8116e0a2489d | [
"MIT"
] | 2 | 2020-09-13T02:25:50.000Z | 2021-01-06T17:25:38.000Z | import smtplib
from tools.errors import LoginException
from tools.read_config import read_config
def send_mail(sender:str, pwd:str, to:str, subject:str, text:str):
# Read the email config file
config = read_config('./config/config_email.json')
# create connection with the smtp server
smtpserver = smtplib.SMTP_SSL(host=config['smtp_host'], port=config['smtp_port'])
# send enhaced HELO to the server to identify with the server
smtpserver.ehlo()
# login in the server with the credentials given
try:
smtpserver.login(sender, pwd)
except LoginException:
raise LoginException
else:
# create the email
msg = 'Subject:'+subject+'\n\n'+text
# send the email
smtpserver.sendmail(sender, to, msg)
# close connection
smtpserver.close()
| 42.576923 | 91 | 0.515808 | 109 | 1,107 | 5.165138 | 0.422018 | 0.053286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.417344 | 1,107 | 25 | 92 | 44.28 | 0.872868 | 0.200542 | 0 | 0 | 0 | 0 | 0.063854 | 0.029647 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf6ccb75aed895f75e03cfa7e1750d857352705e | 1,511 | py | Python | test/test_scores.py | gigantenbein/UNet-Zoo | d157c22ef8041ed743aa7bbcf377f0f8ad85e755 | [
"Apache-2.0"
] | 20 | 2020-02-16T07:20:23.000Z | 2022-03-14T04:11:02.000Z | test/test_scores.py | suyanzhou626/UNet-Zoo | 76d23952d90a45a01da1cc2926b4d3a24a1adb75 | [
"Apache-2.0"
] | 6 | 2021-06-08T21:03:07.000Z | 2022-03-17T13:28:33.000Z | test/test_scores.py | suyanzhou626/UNet-Zoo | 76d23952d90a45a01da1cc2926b4d3a24a1adb75 | [
"Apache-2.0"
] | 5 | 2020-03-20T02:04:49.000Z | 2021-10-20T17:37:52.000Z | """Testing scoring functions"""
import pytest
import os
from importlib.machinery import SourceFileLoader
import utils
import shutil
import torch
import math
import matplotlib.pyplot as plt
import torchvision
@pytest.fixture
def lidc_data():
config_file = '/Users/marcgantenbein/PycharmProjects/UNet-Zoo/models/experiments/phiseg_rev_7_5_12.py'
config_module = config_file.split('/')[-1].rstrip('.py')
print('Running with local configuration')
import config.local_config as sys_config
import matplotlib.pyplot as plt
exp_config = SourceFileLoader(config_module, config_file).load_module()
data = exp_config.data_loader(sys_config=sys_config, exp_config=exp_config)
return data
def test_ncc(lidc_data):
random_index = 99
s_gt_arr = lidc_data.test.labels[random_index, ...]
x_b = lidc_data.test.images[random_index, ...]
patch = torch.tensor(x_b, dtype=torch.float32).to('cpu')
assert s_gt_arr.shape == (128, 128, 4)
val_masks = torch.tensor(s_gt_arr, dtype=torch.float32).to('cpu') # HWC
val_masks = val_masks.transpose(0, 2).transpose(1, 2)
assert val_masks.shape == (4, 128, 128)
s_gt_arr_r = val_masks.unsqueeze(dim=1)
ground_truth_arrangement_one_hot = utils.convert_batch_to_onehot(s_gt_arr_r, nlabels=2)
ncc = utils.variance_ncc_dist(ground_truth_arrangement_one_hot, ground_truth_arrangement_one_hot)
assert math.isclose(ncc[0], 1.0)
def test_ged(lidc_data):
pass
def test_dice(lidc_data):
pass
| 25.183333 | 106 | 0.743878 | 228 | 1,511 | 4.635965 | 0.429825 | 0.045412 | 0.028382 | 0.070956 | 0.172185 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02648 | 0.150232 | 1,511 | 59 | 107 | 25.610169 | 0.796729 | 0.019854 | 0 | 0.111111 | 0 | 0 | 0.086839 | 0.058345 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.111111 | false | 0.055556 | 0.305556 | 0 | 0.444444 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 2 |
cf704edeb093695bcc194edf614b3bb53790af9a | 1,074 | py | Python | flask_app/services/twitter_service.py | JenBanks8585/twitoff_Banks | 06f18c1daf5745a2d0890d8d04b87d5282b176d8 | [
"MIT"
] | null | null | null | flask_app/services/twitter_service.py | JenBanks8585/twitoff_Banks | 06f18c1daf5745a2d0890d8d04b87d5282b176d8 | [
"MIT"
] | 4 | 2021-06-08T21:50:24.000Z | 2022-03-12T00:42:59.000Z | flask_app/services/twitter_service.py | JenBanks8585/twitoff_Banks | 06f18c1daf5745a2d0890d8d04b87d5282b176d8 | [
"MIT"
] | null | null | null |
import tweepy
import os
from dotenv import load_dotenv
load_dotenv()
TWITTER_API_KEY = os.getenv("TWITTER_API_KEY")
TWITTER_API_SECRET = os.getenv("TWITTER_API_SECRET")
TWITTER_ACCESS_TOKEN = os.getenv("TWITTER_ACCESS_TOKEN")
TWITTER_ACCESS_TOKEN_SECRET = os.getenv("TWITTER_ACCESS_TOKEN_SECRET")
auth = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
print (type(auth))
api = tweepy.API(auth)
print(type(api))
if __name__ == "__main__":
print("_______________")
print("User")
user= api.get_user("elonmusk")
print(type(user))
print(user.screen_name)
print(user.id)
print(user.verified)
print("_______________")
print("Statuses")
#statuses = api.user_timeline("elonmusk", count = 35)
#for status in statuses:
# print(status.text)
statuses = api.user_timeline(screen_name= "elonmusk", tweet_mode = "extended", count = 150, exclude_replies = True, include_rts = False)
for status in statuses:
print(status.text)
| 26.195122 | 140 | 0.733706 | 141 | 1,074 | 5.042553 | 0.319149 | 0.108298 | 0.151899 | 0.101266 | 0.35865 | 0.295359 | 0.213783 | 0 | 0 | 0 | 0 | 0.005488 | 0.151769 | 1,074 | 41 | 141 | 26.195122 | 0.774973 | 0.090317 | 0 | 0.076923 | 0 | 0 | 0.158111 | 0.027721 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0.423077 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
cf70a281c3c891880251c2d76efe8ac3eb44248a | 1,860 | py | Python | spongeauth/api/tests/test_delete_user.py | felixoi/SpongeAuth | d44ee52d0b35b2e1909c7bf6bad29aa7b4835b26 | [
"MIT"
] | 10 | 2016-11-18T12:37:24.000Z | 2022-03-04T09:25:25.000Z | spongeauth/api/tests/test_delete_user.py | felixoi/SpongeAuth | d44ee52d0b35b2e1909c7bf6bad29aa7b4835b26 | [
"MIT"
] | 794 | 2016-11-19T18:34:37.000Z | 2022-03-31T16:49:11.000Z | spongeauth/api/tests/test_delete_user.py | PowerNukkit/OreAuth | 96a2926c9601fce6fac471bdb997077f07e8bf9a | [
"MIT"
] | 11 | 2016-11-26T22:30:17.000Z | 2022-03-16T17:20:14.000Z | import urllib.parse
import django.shortcuts
import pytest
import faker
import accounts.tests.factories
import api.models
@pytest.fixture
def fake():
return faker.Faker()
def _make_path(data):
return "{}?{}".format(django.shortcuts.reverse("api:users-list"), urllib.parse.urlencode(data))
@pytest.mark.django_db
def test_invalid_api_key(client, fake):
assert not api.models.APIKey.objects.exists()
resp = client.delete(_make_path({"apiKey": "foobar", "username": fake.user_name()}))
assert resp.status_code == 403
@pytest.mark.django_db
def test_works(client):
api.models.APIKey.objects.create(key="foobar")
assert not accounts.models.User.objects.exists()
user = accounts.tests.factories.UserFactory.create()
assert user.deleted_at is None
assert user.is_active
resp = client.delete(_make_path({"apiKey": "foobar", "username": user.username}))
assert resp.status_code == 200
# check database
user = accounts.models.User.objects.get(id=user.id)
assert user.deleted_at is not None
assert not user.is_active
# check response
data = resp.json()
assert data["id"] == user.id
assert data["username"] == user.username
assert data["email"] == user.email
assert "avatar_url" in data
@pytest.mark.django_db
def test_not_existing(client, fake):
api.models.APIKey.objects.create(key="foobar")
resp = client.delete(_make_path({"apiKey": "foobar", "username": fake.user_name()}))
assert resp.status_code == 404
@pytest.mark.django_db
def test_deleted(client, fake):
api.models.APIKey.objects.create(key="foobar")
user = accounts.tests.factories.UserFactory.create(deleted_at=fake.date_time_this_century(), is_active=False)
resp = client.delete(_make_path({"apiKey": "foobar", "username": user.username}))
assert resp.status_code == 404
| 26.956522 | 113 | 0.716129 | 255 | 1,860 | 5.078431 | 0.27451 | 0.034749 | 0.049421 | 0.055598 | 0.52278 | 0.485714 | 0.380695 | 0.307336 | 0.307336 | 0.234749 | 0 | 0.007561 | 0.146774 | 1,860 | 68 | 114 | 27.352941 | 0.808444 | 0.015591 | 0 | 0.295455 | 0 | 0 | 0.077681 | 0 | 0 | 0 | 0 | 0 | 0.318182 | 1 | 0.136364 | false | 0 | 0.136364 | 0.045455 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf70d57cf63af1b7800f864d1cbbd1296009fe92 | 2,091 | py | Python | tests/rw_all.py | clayne/retrowrite | 117dad525114bca695317e14affffd4e3de13cce | [
"MIT"
] | 478 | 2019-06-19T09:33:50.000Z | 2022-03-25T09:34:24.000Z | tests/rw_all.py | clayne/retrowrite | 117dad525114bca695317e14affffd4e3de13cce | [
"MIT"
] | 30 | 2019-07-12T09:38:43.000Z | 2022-03-28T04:53:31.000Z | tests/rw_all.py | clayne/retrowrite | 117dad525114bca695317e14affffd4e3de13cce | [
"MIT"
] | 62 | 2019-06-25T16:41:04.000Z | 2022-02-22T15:47:35.000Z | import argparse
import json
import subprocess
import os
from multiprocessing import Pool
def do_test(cmd):
print("[!] Running on {}".format(cmd))
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError:
print("[x] Failed {}".format(cmd))
def do_tests(tests, filter, args, outdir):
assert not (args.ddbg and args.parallel)
pool = Pool()
for test in tests:
if not filter(test):
continue
path = test["path"]
binp = os.path.join(path, test["name"])
outp = os.path.join(outdir, test["name"] + ".s")
if args.ddbg:
outp = os.path.join(outdir, test["name"] + "_asan")
cmd = "python -m debug.ddbg {} {}".format(binp, outp)
elif args.asan:
outp = os.path.join(outdir, test["name"] + "_asan")
cmd = "retrowrite --asan {} {}".format(binp, outp)
else:
cmd = "python -m librw.rw {} {}".format(binp, outp)
if args.parallel:
pool.apply_async(do_test, args=(cmd, ))
else:
do_test(cmd)
pool.close()
pool.join()
if __name__ == "__main__":
argp = argparse.ArgumentParser()
argp.add_argument("test_file", type=str, help="JSON file containing tests")
argp.add_argument(
"--targets",
type=str,
help="Only test build target, comma separated string of names")
argp.add_argument(
"--asan",
action='store_true',
help="Instrument with asan")
argp.add_argument(
"--ddbg",
action='store_true',
help="Do delta debugging")
argp.add_argument(
"--parallel",
action='store_true',
help="Do multiple tests in parallel")
args = argp.parse_args()
filter = lambda x: True
if args.targets:
filter = lambda x: x["name"] in args.targets.split(",")
args.testfile = os.path.abspath(args.test_file)
outdir = os.path.dirname(args.test_file)
with open(args.test_file) as tfd:
do_tests(json.load(tfd), filter, args, outdir)
| 27.155844 | 79 | 0.583931 | 261 | 2,091 | 4.563218 | 0.35249 | 0.030227 | 0.062972 | 0.035264 | 0.117548 | 0.082284 | 0.082284 | 0.058774 | 0.058774 | 0 | 0 | 0 | 0.275466 | 2,091 | 76 | 80 | 27.513158 | 0.786139 | 0 | 0 | 0.180328 | 0 | 0 | 0.175036 | 0 | 0 | 0 | 0 | 0 | 0.016393 | 1 | 0.032787 | false | 0 | 0.081967 | 0 | 0.114754 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf71a671f7a019cfe847b9abcb8b86b99ffb82ad | 1,552 | py | Python | codeChallenge/Exercise1.py | jocardozo/Rooftop-Challenge | 0fe2ea9823f38a25911a760f286b0d55eb26e553 | [
"MIT"
] | null | null | null | codeChallenge/Exercise1.py | jocardozo/Rooftop-Challenge | 0fe2ea9823f38a25911a760f286b0d55eb26e553 | [
"MIT"
] | null | null | null | codeChallenge/Exercise1.py | jocardozo/Rooftop-Challenge | 0fe2ea9823f38a25911a760f286b0d55eb26e553 | [
"MIT"
] | null | null | null | def makeFigure(size):
figure = [[0] *(size) for j in range(size)] #creamos la matriz de 0 en el tamaño pedido
x = 0
y = 0
figure[0][0] =1
'''Funciones auxiliares para el recorrido de la serpiente
'''
def moverEste(figure,x,y,pasos):
for i in range(pasos):
y = y + 1
x = x
figure[x][y] =1
return(x,y)
def moverSur(figure,x,y,pasos):
for i in range(pasos):
x = x + 1
y = y
figure[x][y] =1
return(x,y)
def moverOeste(figure,x,y,pasos):
for i in range(pasos):
y = y - 1
x = x
figure[x][y] =1
return(x,y)
def moverNorte(figure,x,y,pasos):
for i in range(pasos):
y = y
x = x - 1
figure[x][y] =1
return(x,y)
x,y = moverEste(figure,x,y,size-1) #Esta por fuera del patron, asi que 'definimos' como movimiento por defecto
d = "s"
'''Recorrido de la serpiente
'''
for i in range(1,size,1):
if (d == "s"):
x,y = moverSur(figure,x,y,size-i)
d = "o"
continue
if (d == "o"):
x,y = moverOeste(figure,x,y,size-i+1)
d = "n"
continue
if (d == "n"):
x,y = moverNorte(figure,x,y,size-i)
d = "e"
continue
if (d == "e"):
x,y = moverEste(figure,x,y,size-i+1)
d = "s"
continue
return(figure) | 24.25 | 118 | 0.439433 | 221 | 1,552 | 3.085973 | 0.226244 | 0.064516 | 0.152493 | 0.080645 | 0.435484 | 0.435484 | 0.394428 | 0.27566 | 0.246334 | 0.203812 | 0 | 0.022447 | 0.425902 | 1,552 | 64 | 119 | 24.25 | 0.742985 | 0 | 0 | 0.44898 | 0 | 0 | 0.006706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
cf71e91499d6deed3463b430dfcb4800d8deebe8 | 521 | py | Python | app/database.py | dorneanu/flask-app-template | ea238742f354937a19cd72a32418307dd4a7af1a | [
"MIT"
] | null | null | null | app/database.py | dorneanu/flask-app-template | ea238742f354937a19cd72a32418307dd4a7af1a | [
"MIT"
] | null | null | null | app/database.py | dorneanu/flask-app-template | ea238742f354937a19cd72a32418307dd4a7af1a | [
"MIT"
] | null | null | null | from flask import current_app
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# Init engine and db_session
#engine = create_engine(current_app.config['SQLALCHEMY_DATABASE_URI'], convert_unicode=True)
#db_session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine))
#Base = declarative_base()
#Base.query = db_session.query_property()
db = SQLAlchemy()
| 37.214286 | 92 | 0.834933 | 68 | 521 | 6.161765 | 0.441176 | 0.100239 | 0.119332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088292 | 521 | 13 | 93 | 40.076923 | 0.882105 | 0.520154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.833333 | 0 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
cf73010efaaefc559ce2e5d857ca0b89c2eb9c35 | 2,753 | py | Python | tests/conftest.py | Nonse/monkeys | 93681edf18126cc49858992f80df25a7cff931e8 | [
"MIT"
] | null | null | null | tests/conftest.py | Nonse/monkeys | 93681edf18126cc49858992f80df25a7cff931e8 | [
"MIT"
] | null | null | null | tests/conftest.py | Nonse/monkeys | 93681edf18126cc49858992f80df25a7cff931e8 | [
"MIT"
] | null | null | null | import os
import pytest
import random
import config
from monkeygod import create_app, models
from monkeygod.models import db as _db
TEST_DATABASE_URI = 'postgresql://postgres:postgres@localhost/test_monkeydb'
# Adapted from http://goo.gl/KXDq2p
@pytest.fixture(scope='session')
def app(request):
"""Session-wide test `Flask` application."""
config.TESTING = True
config.SQLALCHEMY_DATABASE_URI = TEST_DATABASE_URI
config.CSRF_ENABLED = False
config.WTF_CSRF_ENABLED = False
app = create_app(config)
# Establish an application context before running the tests.
context = app.app_context()
context.push()
def teardown():
context.pop()
request.addfinalizer(teardown)
return app
@pytest.fixture(scope='session')
def db(app, request):
"""Session-wide test database."""
def teardown():
_db.drop_all()
_db.app = app
_db.create_all()
request.addfinalizer(teardown)
return _db
@pytest.fixture(scope='function')
def session(db, request):
"""Creates a new database session for a test."""
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session = db.create_scoped_session(options=options)
db.session = session
def teardown():
transaction.rollback()
connection.close()
session.remove()
request.addfinalizer(teardown)
return session
@pytest.fixture(scope='function')
def testdata(session, request):
monkeys = []
for i in range(20):
monkeys.append(
models.Monkey(
name='monkey{}'.format(i+1),
age=random.randint(0, 20),
email='monkey{}@example.com'.format(i+1)
)
)
session.add_all(monkeys)
session.commit()
def teardown():
for monkey in monkeys:
session.delete(monkey)
session.commit()
request.addfinalizer(teardown)
@pytest.fixture(scope='function')
def testdata_with_friends(session, testdata, request):
monkeys = models.Monkey.query.all()
for monkey in monkeys:
friends = random.sample(monkeys, random.randint(0, 20))
for friend in friends:
if random.randint(0, 5) == 0:
monkey.add_best_friend(friend)
else:
monkey.add_friend(friend)
session.add_all(monkeys)
session.commit()
@pytest.fixture(scope='function')
def testdata_with_many_friends(session, testdata, request):
monkeys = models.Monkey.query.all()
for monkey in monkeys:
friends = random.sample(monkeys, 20)
for friend in friends:
monkey.add_friend(friend)
session.add_all(monkeys)
session.commit()
| 24.149123 | 76 | 0.65129 | 322 | 2,753 | 5.462733 | 0.313665 | 0.044343 | 0.061399 | 0.059125 | 0.361001 | 0.261512 | 0.221717 | 0.175099 | 0.175099 | 0.175099 | 0 | 0.007615 | 0.236833 | 2,753 | 113 | 77 | 24.362832 | 0.829605 | 0.073738 | 0 | 0.375 | 0 | 0 | 0.050533 | 0.021319 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.075 | 0 | 0.2375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf73290c5bcbebb20fd5e98add009b993c971061 | 8,610 | py | Python | src/classifier.py | WattSocialBot/ijcnlp2017-customer-feedback | 2dccdcfaf26df832343dbb76b1e31a094c578c0e | [
"MIT"
] | 17 | 2017-10-27T20:48:38.000Z | 2020-03-16T15:05:47.000Z | src/classifier.py | WattSocialBot/ijcnlp2017-customer-feedback | 2dccdcfaf26df832343dbb76b1e31a094c578c0e | [
"MIT"
] | null | null | null | src/classifier.py | WattSocialBot/ijcnlp2017-customer-feedback | 2dccdcfaf26df832343dbb76b1e31a094c578c0e | [
"MIT"
] | 3 | 2017-10-28T15:34:26.000Z | 2020-03-09T13:56:40.000Z | __author__ = "bplank"
import argparse
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, f1_score
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler
import numpy as np
import random
import seaborn as sn
import matplotlib.pyplot as plt
import pandas as pd
import os
from myutils import ItemSelector, DateStats, MeanEmbedding
seed=103
random.seed(seed)
np.random.seed(seed)
# parse command line options
parser = argparse.ArgumentParser(description="""Simple SVM classifier using various kinds of features (cf. Plank, 2017)""")
parser.add_argument("train", help="train model on a file")
parser.add_argument("test", help="test model on a file")
parser.add_argument("--lang", help="language", default="en")
parser.add_argument("--output", help="output predictions", required=False,action="store_true")
parser.add_argument("--C", help="parameter C for regularization (higher: regularize less)", required=False, default=10, type=float)
parser.add_argument("--num-components", help="svd components", default=40, type=int)
parser.add_argument("--print-confusion-matrix", help="show confusion matrix", action="store_true", default=False)
parser.add_argument("--features", help="feature set", choices=("words","chars","words+chars","embeds", "chars+embeds", "all","all+pos", "chars+embeds+pos"), default="chars+embeds")
args = parser.parse_args()
## read input data
print("load data..")
# using pandas dataframe
df_train = pd.read_csv(args.train)
df_dev = pd.read_csv(args.test)
X_train, y_train = df_train['texts'], df_train['labels']
X_dev, y_dev = df_dev['texts'], df_dev['labels']
labEnc = LabelEncoder()
y_train = labEnc.fit_transform(y_train)
y_dev = labEnc.transform(y_dev)
print("#train instances: {} #dev: {}".format(len(X_train),len(X_dev)))
print("Labels:", labEnc.classes_)
print("vectorize data..")
#algo = LogisticRegression(solver='lbfgs', C=args.C)
algo = LinearSVC(C=args.C)
# tfidf was slightly better than countvectorizer
vectorizerChars = TfidfVectorizer(analyzer='char', ngram_range=(3, 10), binary=True)
vectorizerWords = TfidfVectorizer(ngram_range=(1,2), analyzer='word', binary=True)
vectorizerPos = TfidfVectorizer(ngram_range=(1,3), analyzer='word', binary=True)
if "+" in args.lang:
embSelector = ItemSelector(key='textsPrefix')
else:
embSelector = ItemSelector(key='texts')
if args.features == "words":
features = FeatureUnion([
('words', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerWords),
]))
])
elif args.features == "chars":
features = FeatureUnion([
('chars', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerChars),
]))
])
elif args.features == "words+chars":
features = FeatureUnion([
# ('words', vectorizerWords),
#('chars', vectorizerChars),
('words', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerWords),
]))
,
('chars', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerChars),
]))
])
elif args.features == "embeds":
features = FeatureUnion([
('embeds', Pipeline([
('selector', embSelector),
('mean_emb', MeanEmbedding(args.lang)),
('scaler', MinMaxScaler()),
# ('standardscaler', StandardScaler()),
]))
])
elif args.features == "chars+embeds": # is the all-in-1 model
features = FeatureUnion([
('chars', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerChars),
]))
,
('embeds', Pipeline([
('selector', embSelector),
('mean_emb', MeanEmbedding(args.lang)),
('scaler', MinMaxScaler()),
]))
])
elif args.features == "all":
features = FeatureUnion([
('words', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerWords),
]))
,
('chars', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerChars),
]))
,
('embeds', Pipeline([
('selector', embSelector),
('mean_emb', MeanEmbedding(args.lang)),
('scaler', MinMaxScaler()),
]))
])
elif args.features == "all+pos":
features = FeatureUnion([
('words', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerWords),
]))
,
('chars', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerChars),
]))
,
('pos', Pipeline([
('selector', ItemSelector(key='pos')),
('tfidf', vectorizerPos),
]))
,
('embeds', Pipeline([
('selector', embSelector),
('mean_emb', MeanEmbedding(args.lang)),
('scaler', MinMaxScaler()),
]))
])
elif args.features == "chars+embeds+pos":
features = FeatureUnion([
('chars', Pipeline([
('selector', ItemSelector(key='texts')),
('tfidf', vectorizerChars),
]))
,
('pos', Pipeline([
('selector', ItemSelector(key='pos')),
('tfidf', vectorizerPos),
]))
,
('embeds', Pipeline([
('selector', embSelector),
('mean_emb', MeanEmbedding(args.lang)),
('scaler', MinMaxScaler()),
]))
])
classifier = Pipeline([
('features', features),
('clf', algo)])
print("train model..")
tune=0
debug=0
if tune:
from sklearn.model_selection import GridSearchCV
param_grid = {'clf__C': [0.01, 0.02, 0.5, 0.1, 0.5, 1, 2, 5, 10, 100, 1000]}
grid_search = GridSearchCV(classifier, param_grid, cv=5)
grid_search.fit(X_train, y_train)
y_predicted_dev = grid_search.predict(X_dev)
y_predicted_train = grid_search.predict(X_train)
print("dev: ", accuracy_score(y_dev, y_predicted_dev))
print("train: ", accuracy_score(y_train, y_predicted_train))
print("best:", grid_search.best_params_)
print("best score:", grid_search.best_score_)
else:
y_train = df_train['labels']
y_dev = df_dev['labels']
classifier.fit(df_train, y_train)
y_predicted_dev = classifier.predict(df_dev)
y_predicted_train = classifier.predict(df_train)
if debug:
from scipy import stats
# access weight vectors
for weights in classifier.named_steps['clf'].coef_:
print(weights.shape)
print(stats.describe(weights))
if args.output:
# write output
OUT = open("predictions2/"+os.path.basename(args.test)+"."+os.path.basename(args.train)+"pred.out","w")
sentence_ids = df_dev['sentence_ids'].values
org_dev = df_dev['original_texts'].values
for i, y_pred in enumerate(y_predicted_dev):
sent_id = sentence_ids[i]
text = org_dev[i]
OUT.write("{}\t{}\t{}\n".format(sent_id, text, y_pred))
OUT.close()
###
accuracy_dev = accuracy_score(y_dev, y_predicted_dev)
accuracy_train = accuracy_score(y_train, y_predicted_train)
print("Classifier accuracy train: {0:.2f}".format(accuracy_train*100))
print("===== dev set ====")
print("Classifier: {0:.2f}".format(accuracy_dev*100))
mat = confusion_matrix(y_dev, y_predicted_dev)
if args.print_confusion_matrix:
sn.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=labEnc.classes_, yticklabels=labEnc.classes_)
plt.xlabel('true label')
plt.ylabel('predicted label')
plt.show()
print(classification_report(y_dev, y_predicted_dev, target_names=labEnc.classes_, digits=3))
f1_dev = f1_score(y_dev, y_predicted_dev, average="weighted")
print("weighted f1: {0:.1f}".format(f1_dev*100))
## end
| 30.316901 | 180 | 0.589315 | 901 | 8,610 | 5.477248 | 0.256382 | 0.055117 | 0.068085 | 0.07538 | 0.339412 | 0.324823 | 0.310638 | 0.298886 | 0.285512 | 0.26768 | 0 | 0.010678 | 0.260395 | 8,610 | 283 | 181 | 30.424028 | 0.76429 | 0.03856 | 0 | 0.446078 | 0 | 0 | 0.15339 | 0.002906 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.073529 | 0 | 0.073529 | 0.088235 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf7374d195b400da99176ae7ebdc84ce6102d8fa | 1,329 | py | Python | roles/system/files/boot-setup.py | JesperNaarttijarvi/minotaur-centos-install | df5b9ebdd1ccc717d53c06ef0060c84d72bf8e5e | [
"MIT"
] | null | null | null | roles/system/files/boot-setup.py | JesperNaarttijarvi/minotaur-centos-install | df5b9ebdd1ccc717d53c06ef0060c84d72bf8e5e | [
"MIT"
] | null | null | null | roles/system/files/boot-setup.py | JesperNaarttijarvi/minotaur-centos-install | df5b9ebdd1ccc717d53c06ef0060c84d72bf8e5e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
n_devices = len(os.popen("lspci |grep 'VGA compatible controller: NVIDIA Corporation'").read().rstrip().split("\n"))
if n_devices == 0:
print "fatal: no Nvidia devices found"
sys.exit(1)
with open("/etc/miner-startup.sh","w") as f:
f.write("#!/bin/bash\n")
f.write("sleep 10\n")
f.write("export DISPLAY=:0\n")
f.write("xhost +si:localuser:miner\n")
for i in range(0, n_devices):
f.write("nvidia-settings -a '[gpu:%d]/GPUPowerMizerMode=1'\n" % (i))
f.write("nvidia-smi --id=%d --persistence-mode=1\n" % (i))
f.write("sleep 5\n")
f.write("/usr/bin/sudo -u miner /usr/bin/screen -dmS ex /home/miner/excavator.sh\n")
f.write("sleep 5\n")
f.write("mkdir /var/run/minotaur\n")
f.write("chown miner:miner /var/run/minotaur\n")
f.write("mkdir /var/run/gpustatd\n")
f.write("chown miner:miner /var/run/gpustatd\n")
f.write("mkdir /var/run/excavataur\n")
f.write("chown miner:miner /var/run/excavataur\n")
f.write("/usr/bin/sudo -u miner /usr/bin/screen -dmS exv /home/miner/excavataur.sh\n")
f.write("/usr/bin/sudo -u miner /usr/bin/screen -dmS fan /home/miner/gpustatd.sh\n")
f.write("#/usr/bin/sudo -u miner /usr/bin/screen -dmS min /home/miner/minotaur.sh\n")
f.write("/usr/bin/sudo -u miner /usr/bin/screen -dmS gs /home/miner/gs.sh\n")
| 37.971429 | 116 | 0.665914 | 241 | 1,329 | 3.659751 | 0.327801 | 0.129252 | 0.119048 | 0.056689 | 0.478458 | 0.464853 | 0.352608 | 0.31746 | 0.222222 | 0.222222 | 0 | 0.008554 | 0.120391 | 1,329 | 34 | 117 | 39.088235 | 0.745937 | 0.015049 | 0 | 0.074074 | 0 | 0.185185 | 0.644495 | 0.184251 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.074074 | null | null | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
cf73e7f195ff23cb66846fa6c6da7d28660538de | 20,029 | py | Python | scripts/parser/oldslavdep.py | npedrazzini/jPTDPEarlySlavic | de9d3fa720fb86acadafc923d85473ae3371903f | [
"MIT"
] | 6 | 2021-08-20T20:00:31.000Z | 2022-01-03T15:43:50.000Z | scripts/parser/oldslavdep.py | npedrazzini/jPTDPEarlySlavic | de9d3fa720fb86acadafc923d85473ae3371903f | [
"MIT"
] | 1 | 2021-07-30T13:07:36.000Z | 2021-07-30T13:07:36.000Z | scripts/parser/oldslavdep.py | npedrazzini/jPTDPEarlySlavic | de9d3fa720fb86acadafc923d85473ae3371903f | [
"MIT"
] | 1 | 2021-01-23T20:00:25.000Z | 2021-01-23T20:00:25.000Z | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import str
from io import open
from dynet import *
import dynet
from utils import read_conll, read_conll_predict, write_conll, load_embeddings_file
from operator import itemgetter
import utils, time, random, decoder
import numpy as np
from mnnl import FFSequencePredictor, Layer, RNNSequencePredictor, BiRNNSequencePredictor
class OldSlavDep:
def __init__(self, vocab, pos, rels, w2i, c2i, options):
self.model = ParameterCollection()
random.seed(1)
self.trainer = RMSPropTrainer(self.model)
#if options.learning_rate is not None: #Uncomment if model is used to train new parser or update OldSlavNet
# self.trainer = RMSPropTrainer(self.model, options.learning_rate)
#print("RMSPropTrainer initial learning rate:", options.learning_rate)
self.activations = {'tanh': tanh,
'sigmoid': logistic,
'relu': rectify,
'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))
}
self.activation = self.activations[options.activation]
self.blstmFlag = options.blstmFlag
self.labelsFlag = options.labelsFlag
self.costaugFlag = options.costaugFlag
self.bibiFlag = options.bibiFlag
self.ldims = options.lstm_dims #because it is a bi-lstm (NP)
self.wdims = options.wembedding_dims
self.cdims = options.cembedding_dims
self.layers = options.lstm_layers
self.wordsCount = vocab
self.vocab = {word: ind + 3 for word, ind in w2i.items()}
self.pos = {word: ind for ind, word in enumerate(pos)}
self.id2pos = {ind: word for ind, word in enumerate(pos)}
self.c2i = c2i
self.rels = {word: ind for ind, word in enumerate(rels)}
self.irels = rels
self.pdims = options.pembedding_dims
self.vocab['*PAD*'] = 1
self.vocab['*INITIAL*'] = 2
self.wlookup = self.model.add_lookup_parameters((len(vocab) + 3, self.wdims))
self.clookup = self.model.add_lookup_parameters((len(c2i), self.cdims))
self.plookup = self.model.add_lookup_parameters((len(pos), self.pdims))
if options.external_embedding is not None:
ext_embeddings, ext_emb_dim = load_embeddings_file(options.external_embedding, lower=True)
assert (ext_emb_dim == self.wdims)
print("Initializing word embeddings by pre-trained vectors")
count = 0
for word in self.vocab:
_word = str(word, "utf-8")
if _word in ext_embeddings:
count += 1
self.wlookup.init_row(self.vocab[word], ext_embeddings[_word])
print(("Vocab size: %d; #words having pretrained vectors: %d" % (len(self.vocab), count)))
self.pos_builders = [VanillaLSTMBuilder(1, self.wdims + self.cdims * 2, self.ldims, self.model),
VanillaLSTMBuilder(1, self.wdims + self.cdims * 2, self.ldims, self.model)]
self.pos_bbuilders = [VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model),
VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model)]
if self.bibiFlag:
self.builders = [VanillaLSTMBuilder(1, self.wdims + self.cdims * 2 + self.pdims, self.ldims, self.model),
VanillaLSTMBuilder(1, self.wdims + self.cdims * 2 + self.pdims, self.ldims, self.model)]
self.bbuilders = [VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model),
VanillaLSTMBuilder(1, self.ldims * 2, self.ldims, self.model)]
elif self.layers > 0:
self.builders = [VanillaLSTMBuilder(self.layers, self.wdims + self.cdims * 2 + self.pdims, self.ldims, self.model),
VanillaLSTMBuilder(self.layers, self.wdims + self.cdims * 2 + self.pdims, self.ldims, self.model)]
else:
self.builders = [SimpleRNNBuilder(1, self.wdims + self.cdims * 2, self.ldims, self.model),
SimpleRNNBuilder(1, self.wdims + self.cdims * 2, self.ldims, self.model)]
self.ffSeqPredictor = FFSequencePredictor(Layer(self.model, self.ldims * 2, len(self.pos), softmax))
self.hidden_units = options.hidden_units
self.hidBias = self.model.add_parameters((self.ldims * 8))
self.hidLayer = self.model.add_parameters((self.hidden_units, self.ldims * 8))
self.hid2Bias = self.model.add_parameters((self.hidden_units))
self.outLayer = self.model.add_parameters((1, self.hidden_units if self.hidden_units > 0 else self.ldims * 8))
if self.labelsFlag:
self.rhidBias = self.model.add_parameters((self.ldims * 8))
self.rhidLayer = self.model.add_parameters((self.hidden_units, self.ldims * 8))
self.rhid2Bias = self.model.add_parameters((self.hidden_units))
self.routLayer = self.model.add_parameters(
(len(self.irels), self.hidden_units if self.hidden_units > 0 else self.ldims * 8))
self.routBias = self.model.add_parameters((len(self.irels)))
self.ffRelPredictor = FFSequencePredictor(
Layer(self.model, self.hidden_units if self.hidden_units > 0 else self.ldims * 8, len(self.irels),
softmax))
self.char_rnn = RNNSequencePredictor(LSTMBuilder(1, self.cdims, self.cdims, self.model))
def __getExpr(self, sentence, i, j):
if sentence[i].headfov is None:
sentence[i].headfov = concatenate([sentence[i].lstms[0], sentence[i].lstms[1]])
if sentence[j].modfov is None:
sentence[j].modfov = concatenate([sentence[j].lstms[0], sentence[j].lstms[1]])
_inputVector = concatenate(
[sentence[i].headfov, sentence[j].modfov, dynet.abs(sentence[i].headfov - sentence[j].modfov),
dynet.cmult(sentence[i].headfov, sentence[j].modfov)])
if self.hidden_units > 0:
output = self.outLayer.expr() * self.activation(
self.hid2Bias.expr() + self.hidLayer.expr() * self.activation(
_inputVector + self.hidBias.expr()))
else:
output = self.outLayer.expr() * self.activation(_inputVector + self.hidBias.expr())
return output
def __evaluate(self, sentence):
exprs = [[self.__getExpr(sentence, i, j) for j in range(len(sentence))] for i in range(len(sentence))]
scores = np.array([[output.scalar_value() for output in exprsRow] for exprsRow in exprs])
return scores, exprs
def pick_neg_log(self, pred, gold):
return -dynet.log(dynet.pick(pred, gold))
def __getRelVector(self, sentence, i, j):
if sentence[i].rheadfov is None:
sentence[i].rheadfov = concatenate([sentence[i].lstms[0], sentence[i].lstms[1]])
if sentence[j].rmodfov is None:
sentence[j].rmodfov = concatenate([sentence[j].lstms[0], sentence[j].lstms[1]])
_outputVector = concatenate(
[sentence[i].rheadfov, sentence[j].rmodfov, abs(sentence[i].rheadfov - sentence[j].rmodfov),
cmult(sentence[i].rheadfov, sentence[j].rmodfov)])
if self.hidden_units > 0:
return self.rhid2Bias.expr() + self.rhidLayer.expr() * self.activation(
_outputVector + self.rhidBias.expr())
else:
return _outputVector
def Save(self, filename):
self.model.save(filename)
def Load(self, filename):
self.model.populate(filename)
def Predict(self, conll_path):
with open(conll_path) as conllFP:
for iSentence, sentence in enumerate(read_conll_predict(conllFP, self.c2i, self.wordsCount)):
conll_sentence = [entry for entry in sentence if isinstance(entry, utils.ConllEntry)]
for entry in conll_sentence:
wordvec = self.wlookup[int(self.vocab.get(entry.norm, 0))] if self.wdims > 0 else None
last_state = self.char_rnn.predict_sequence([self.clookup[c] for c in entry.idChars])[-1]
rev_last_state = self.char_rnn.predict_sequence([self.clookup[c] for c in reversed(entry.idChars)])[
-1]
entry.vec = concatenate([_f for _f in [wordvec, last_state, rev_last_state] if _f])
entry.pos_lstms = [entry.vec, entry.vec]
entry.headfov = None
entry.modfov = None
entry.rheadfov = None
entry.rmodfov = None
#Predicted pos tags
lstm_forward = self.pos_builders[0].initial_state()
lstm_backward = self.pos_builders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
lstm_forward = lstm_forward.add_input(entry.vec)
lstm_backward = lstm_backward.add_input(rentry.vec)
entry.pos_lstms[1] = lstm_forward.output()
rentry.pos_lstms[0] = lstm_backward.output()
for entry in conll_sentence:
entry.pos_vec = concatenate(entry.pos_lstms)
blstm_forward = self.pos_bbuilders[0].initial_state()
blstm_backward = self.pos_bbuilders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
blstm_forward = blstm_forward.add_input(entry.pos_vec)
blstm_backward = blstm_backward.add_input(rentry.pos_vec)
entry.pos_lstms[1] = blstm_forward.output()
rentry.pos_lstms[0] = blstm_backward.output()
concat_layer = [concatenate(entry.pos_lstms) for entry in conll_sentence]
outputFFlayer = self.ffSeqPredictor.predict_sequence(concat_layer)
predicted_pos_indices = [np.argmax(o.value()) for o in outputFFlayer]
predicted_postags = [self.id2pos[idx] for idx in predicted_pos_indices]
# Add predicted pos tags for parsing prediction
for entry, posid in zip(conll_sentence, predicted_pos_indices):
entry.vec = concatenate([entry.vec, self.plookup[posid]])
entry.lstms = [entry.vec, entry.vec]
if self.blstmFlag:
lstm_forward = self.builders[0].initial_state()
lstm_backward = self.builders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
lstm_forward = lstm_forward.add_input(entry.vec)
lstm_backward = lstm_backward.add_input(rentry.vec)
entry.lstms[1] = lstm_forward.output()
rentry.lstms[0] = lstm_backward.output()
if self.bibiFlag:
for entry in conll_sentence:
entry.vec = concatenate(entry.lstms)
blstm_forward = self.bbuilders[0].initial_state()
blstm_backward = self.bbuilders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
blstm_forward = blstm_forward.add_input(entry.vec)
blstm_backward = blstm_backward.add_input(rentry.vec)
entry.lstms[1] = blstm_forward.output()
rentry.lstms[0] = blstm_backward.output()
scores, exprs = self.__evaluate(conll_sentence)
heads = decoder.parse_proj(scores)
# Multiple roots: heading to the previous "rooted" one
rootCount = 0
rootWid = -1
for index, head in enumerate(heads):
if head == 0:
rootCount += 1
if rootCount == 1:
rootWid = index
if rootCount > 1:
heads[index] = rootWid
rootWid = index
for entry, head, pos in zip(conll_sentence, heads, predicted_postags):
entry.pred_parent_id = head
entry.pred_relation = '_'
entry.pred_pos = pos
dump = False
if self.labelsFlag:
concat_layer = [self.__getRelVector(conll_sentence, head, modifier + 1) for modifier, head in
enumerate(heads[1:])]
outputFFlayer = self.ffRelPredictor.predict_sequence(concat_layer)
predicted_rel_indices = [np.argmax(o.value()) for o in outputFFlayer]
predicted_rels = [self.irels[idx] for idx in predicted_rel_indices]
for modifier, head in enumerate(heads[1:]):
conll_sentence[modifier + 1].pred_relation = predicted_rels[modifier]
renew_cg()
if not dump:
yield sentence
def Train(self, conll_path):
eloss = 0.0
mloss = 0.0
eerrors = 0
etotal = 0
start = time.time()
with open(conll_path) as conllFP:
shuffledData = list(read_conll(conllFP, self.c2i))
random.shuffle(shuffledData)
errs = []
lerrs = []
posErrs = []
for iSentence, sentence in enumerate(shuffledData):
if iSentence % 500 == 0 and iSentence != 0:
print("Processing sentence number: %d" % iSentence, ", Loss: %.4f" % (
eloss / etotal), ", Time: %.2f" % (time.time() - start))
start = time.time()
eerrors = 0
eloss = 0.0
etotal = 0
conll_sentence = [entry for entry in sentence if isinstance(entry, utils.ConllEntry)]
for entry in conll_sentence:
c = float(self.wordsCount.get(entry.norm, 0))
dropFlag = (random.random() < (c / (0.25 + c)))
wordvec = self.wlookup[
int(self.vocab.get(entry.norm, 0)) if dropFlag else 0] if self.wdims > 0 else None
last_state = self.char_rnn.predict_sequence([self.clookup[c] for c in entry.idChars])[-1]
rev_last_state = self.char_rnn.predict_sequence([self.clookup[c] for c in reversed(entry.idChars)])[
-1]
entry.vec = dynet.dropout(concatenate([_f for _f in [wordvec, last_state, rev_last_state] if _f]), 0.33)
entry.pos_lstms = [entry.vec, entry.vec]
entry.headfov = None
entry.modfov = None
entry.rheadfov = None
entry.rmodfov = None
#POS tagging loss
lstm_forward = self.pos_builders[0].initial_state()
lstm_backward = self.pos_builders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
lstm_forward = lstm_forward.add_input(entry.vec)
lstm_backward = lstm_backward.add_input(rentry.vec)
entry.pos_lstms[1] = lstm_forward.output()
rentry.pos_lstms[0] = lstm_backward.output()
for entry in conll_sentence:
entry.pos_vec = concatenate(entry.pos_lstms)
blstm_forward = self.pos_bbuilders[0].initial_state()
blstm_backward = self.pos_bbuilders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
blstm_forward = blstm_forward.add_input(entry.pos_vec)
blstm_backward = blstm_backward.add_input(rentry.pos_vec)
entry.pos_lstms[1] = blstm_forward.output()
rentry.pos_lstms[0] = blstm_backward.output()
concat_layer = [dynet.dropout(concatenate(entry.pos_lstms), 0.33) for entry in conll_sentence]
outputFFlayer = self.ffSeqPredictor.predict_sequence(concat_layer)
posIDs = [self.pos.get(entry.pos) for entry in conll_sentence]
for pred, gold in zip(outputFFlayer, posIDs):
posErrs.append(self.pick_neg_log(pred, gold))
# Add predicted pos tags
for entry, poses in zip(conll_sentence, outputFFlayer):
entry.vec = concatenate([entry.vec, dynet.dropout(self.plookup[np.argmax(poses.value())], 0.33)])
entry.lstms = [entry.vec, entry.vec]
#Parsing losses
if self.blstmFlag:
lstm_forward = self.builders[0].initial_state()
lstm_backward = self.builders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
lstm_forward = lstm_forward.add_input(entry.vec)
lstm_backward = lstm_backward.add_input(rentry.vec)
entry.lstms[1] = lstm_forward.output()
rentry.lstms[0] = lstm_backward.output()
if self.bibiFlag:
for entry in conll_sentence:
entry.vec = concatenate(entry.lstms)
blstm_forward = self.bbuilders[0].initial_state()
blstm_backward = self.bbuilders[1].initial_state()
for entry, rentry in zip(conll_sentence, reversed(conll_sentence)):
blstm_forward = blstm_forward.add_input(entry.vec)
blstm_backward = blstm_backward.add_input(rentry.vec)
entry.lstms[1] = blstm_forward.output()
rentry.lstms[0] = blstm_backward.output()
scores, exprs = self.__evaluate(conll_sentence)
gold = [entry.parent_id for entry in conll_sentence]
heads = decoder.parse_proj(scores, gold if self.costaugFlag else None)
if self.labelsFlag:
concat_layer = [dynet.dropout(self.__getRelVector(conll_sentence, head, modifier + 1), 0.33) for
modifier, head in enumerate(gold[1:])]
outputFFlayer = self.ffRelPredictor.predict_sequence(concat_layer)
relIDs = [self.rels[conll_sentence[modifier + 1].relation] for modifier, _ in enumerate(gold[1:])]
for pred, goldid in zip(outputFFlayer, relIDs):
lerrs.append(self.pick_neg_log(pred, goldid))
e = sum(1 for h, g in zip(heads[1:], gold[1:]) if h != g)
eerrors += e
if e > 0:
loss = [(exprs[h][i] - exprs[g][i]) for i, (h, g) in enumerate(zip(heads, gold)) if h != g] # * (1.0/e)
eloss += (e)
mloss += (e)
errs.extend(loss)
etotal += len(conll_sentence)
if iSentence % 1 == 0:
if len(errs) > 0 or len(lerrs) > 0 or len(posErrs) > 0:
eerrs = (esum(errs + lerrs + posErrs))
eerrs.scalar_value()
eerrs.backward()
self.trainer.update()
errs = []
lerrs = []
posErrs = []
renew_cg()
print("Loss: %.4f" % (mloss / iSentence))
| 48.379227 | 127 | 0.567277 | 2,230 | 20,029 | 4.943946 | 0.133184 | 0.044807 | 0.017687 | 0.019592 | 0.606168 | 0.56 | 0.521088 | 0.474376 | 0.443356 | 0.434467 | 0 | 0.012726 | 0.333017 | 20,029 | 413 | 128 | 48.496368 | 0.812561 | 0.023017 | 0 | 0.405751 | 0 | 0 | 0.010587 | 0 | 0 | 0 | 0 | 0 | 0.003195 | 1 | 0.028754 | false | 0 | 0.031949 | 0.003195 | 0.079872 | 0.015974 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf74741b8ea29334e97b4fd26bf8a8d8ea156e23 | 18,806 | py | Python | tests/data/ec2_offer.py | andrewmcgilvray/awspricing | fd37598dbdb08545db03c99492ce01f7290ab6f5 | [
"Apache-2.0"
] | null | null | null | tests/data/ec2_offer.py | andrewmcgilvray/awspricing | fd37598dbdb08545db03c99492ce01f7290ab6f5 | [
"Apache-2.0"
] | null | null | null | tests/data/ec2_offer.py | andrewmcgilvray/awspricing | fd37598dbdb08545db03c99492ce01f7290ab6f5 | [
"Apache-2.0"
] | null | null | null |
BASIC_EC2_OFFER_SKU = '4C7N4APU9GEUZ6H6'
BASIC_EC2_OFFER_MODIFIED_FORMAT = {
'offerCode': 'AmazonEC2',
'version': '20161213014831',
'products': {
'4C7N4APU9GEUZ6H6' : {
'sku' : '4C7N4APU9GEUZ6H6',
'productFamily' : 'Compute Instance',
'attributes' : {
'servicecode' : 'AmazonEC2',
'location' : 'US East (N. Virginia)',
'locationType' : 'AWS Region',
'instanceType' : 'c4.large',
'currentGeneration' : 'Yes',
'instanceFamily' : 'Compute optimized',
'vcpu' : '2',
'physicalProcessor' : 'Intel Xeon E5-2666 v3 (Haswell)',
'clockSpeed' : '2.9 GHz',
'memory' : '3.75 GiB',
'storage' : 'EBS only',
'networkPerformance' : 'Moderate',
'processorArchitecture' : '64-bit',
'tenancy' : 'Shared',
'operatingSystem' : 'Linux',
'licenseModel' : 'No License required',
'usagetype' : 'BoxUsage:c4.large',
'operation' : 'RunInstances',
'dedicatedEbsThroughput' : '500 Mbps',
'enhancedNetworkingSupported' : 'Yes',
'preInstalledSw' : 'NA',
'processorFeatures' : 'Intel AVX; Intel AVX2; Intel Turbo'
}
},
'BNSJSY9CBT29VNPD':{
'sku': 'BNSJSY9CBT29VNPD',
'attributes': {
'servicecode': 'AWSDataTransfer',
'transferType': 'Inter Region Peering Data Transfer Inbound',
'fromLocation': 'External',
'fromLocationType': 'AWS Region',
'toLocation': 'US East (Ohio)',
'toLocationType': 'AWS Region',
'usagetype': 'USE2-AWS-In-Bytes',
'operation': '',
'servicename': 'AWS Data Transfer'
}
},
},
'terms': {
'OnDemand': {
'4C7N4APU9GEUZ6H6' : {
'4C7N4APU9GEUZ6H6.JRTCKXETXF' : {
'offerTermCode' : 'JRTCKXETXF',
'sku' : '4C7N4APU9GEUZ6H6',
'effectiveDate' : '2016-12-01T00:00:00Z',
'priceDimensions' : {
'4C7N4APU9GEUZ6H6.JRTCKXETXF.6YS6EN2CT7' : {
'rateCode' : '4C7N4APU9GEUZ6H6.JRTCKXETXF.6YS6EN2CT7',
'description' : '$0.1 per On Demand Linux c4.large Instance Hour',
'beginRange' : '0',
'endRange' : 'Inf',
'unit' : 'Hrs',
'pricePerUnit' : {
'USD' : '0.1000000000'
},
'appliesTo' : [ ]
}
},
'termAttributes' : { }
}
},
},
'Reserved': {
"4C7N4APU9GEUZ6H6" : {
"4C7N4APU9GEUZ6H6.HU7G6KETJZ" : {
"offerTermCode" : "HU7G6KETJZ",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.HU7G6KETJZ.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.HU7G6KETJZ.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0300000000"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.HU7G6KETJZ.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.HU7G6KETJZ.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "263"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "1yr",
"OfferingClass" : "standard",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.38NPMPTW36" : {
"offerTermCode" : "38NPMPTW36",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.38NPMPTW36.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.38NPMPTW36.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "539"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.38NPMPTW36.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.38NPMPTW36.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0210000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "3yr",
"OfferingClass" : "standard",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ" : {
"offerTermCode" : "R5XV2EPZQZ",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.R5XV2EPZQZ.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "710"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.R5XV2EPZQZ.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0270000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "3yr",
"OfferingClass" : "convertible",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.4NA7Y494T4" : {
"offerTermCode" : "4NA7Y494T4",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2017-04-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.4NA7Y494T4.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.4NA7Y494T4.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large reserved instance applied",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0630000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "1yr",
"OfferingClass" : "standard",
"PurchaseOption" : "No Upfront"
}
},
},
}
}
}
# Includes one variation of the c4.xlarge product and just Partial Upfront RIs.
BASIC_EC2_OFFER_DATA = {
'offerCode': 'AmazonEC2',
'version': '20161213014831',
'products': {
'4C7N4APU9GEUZ6H6' : {
'sku' : '4C7N4APU9GEUZ6H6',
'productFamily' : 'Compute Instance',
'attributes' : {
'servicecode' : 'AmazonEC2',
'location' : 'US East (N. Virginia)',
'locationType' : 'AWS Region',
'instanceType' : 'c4.large',
'currentGeneration' : 'Yes',
'instanceFamily' : 'Compute optimized',
'vcpu' : '2',
'physicalProcessor' : 'Intel Xeon E5-2666 v3 (Haswell)',
'clockSpeed' : '2.9 GHz',
'memory' : '3.75 GiB',
'storage' : 'EBS only',
'networkPerformance' : 'Moderate',
'processorArchitecture' : '64-bit',
'tenancy' : 'Shared',
'operatingSystem' : 'Linux',
'licenseModel' : 'No License required',
'usagetype' : 'BoxUsage:c4.large',
'operation' : 'RunInstances',
'dedicatedEbsThroughput' : '500 Mbps',
'enhancedNetworkingSupported' : 'Yes',
'preInstalledSw' : 'NA',
'processorFeatures' : 'Intel AVX; Intel AVX2; Intel Turbo'
}
},
},
'terms': {
'OnDemand': {
'4C7N4APU9GEUZ6H6' : {
'4C7N4APU9GEUZ6H6.JRTCKXETXF' : {
'offerTermCode' : 'JRTCKXETXF',
'sku' : '4C7N4APU9GEUZ6H6',
'effectiveDate' : '2016-12-01T00:00:00Z',
'priceDimensions' : {
'4C7N4APU9GEUZ6H6.JRTCKXETXF.6YS6EN2CT7' : {
'rateCode' : '4C7N4APU9GEUZ6H6.JRTCKXETXF.6YS6EN2CT7',
'description' : '$0.1 per On Demand Linux c4.large Instance Hour',
'beginRange' : '0',
'endRange' : 'Inf',
'unit' : 'Hrs',
'pricePerUnit' : {
'USD' : '0.1000000000'
},
'appliesTo' : [ ]
}
},
'termAttributes' : { }
}
},
},
'Reserved': {
"4C7N4APU9GEUZ6H6" : {
"4C7N4APU9GEUZ6H6.HU7G6KETJZ" : {
"offerTermCode" : "HU7G6KETJZ",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.HU7G6KETJZ.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.HU7G6KETJZ.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0300000000"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.HU7G6KETJZ.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.HU7G6KETJZ.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "263"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "1yr",
"OfferingClass" : "standard",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.38NPMPTW36" : {
"offerTermCode" : "38NPMPTW36",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.38NPMPTW36.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.38NPMPTW36.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "539"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.38NPMPTW36.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.38NPMPTW36.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0210000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "3yr",
"OfferingClass" : "standard",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ" : {
"offerTermCode" : "R5XV2EPZQZ",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2016-11-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ.2TG2D8R56U" : {
"rateCode" : "4C7N4APU9GEUZ6H6.R5XV2EPZQZ.2TG2D8R56U",
"description" : "Upfront Fee",
"unit" : "Quantity",
"pricePerUnit" : {
"USD" : "710"
},
"appliesTo" : [ ]
},
"4C7N4APU9GEUZ6H6.R5XV2EPZQZ.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.R5XV2EPZQZ.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large instance-hours used this month",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0270000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "3yr",
"OfferingClass" : "convertible",
"PurchaseOption" : "Partial Upfront"
}
},
"4C7N4APU9GEUZ6H6.4NA7Y494T4" : {
"offerTermCode" : "4NA7Y494T4",
"sku" : "4C7N4APU9GEUZ6H6",
"effectiveDate" : "2017-04-30T23:59:59Z",
"priceDimensions" : {
"4C7N4APU9GEUZ6H6.4NA7Y494T4.6YS6EN2CT7" : {
"rateCode" : "4C7N4APU9GEUZ6H6.4NA7Y494T4.6YS6EN2CT7",
"description" : "Linux/UNIX (Amazon VPC), c4.large reserved instance applied",
"beginRange" : "0",
"endRange" : "Inf",
"unit" : "Hrs",
"pricePerUnit" : {
"USD" : "0.0630000000"
},
"appliesTo" : [ ]
}
},
"termAttributes" : {
"LeaseContractLength" : "1yr",
"OfferingClass" : "standard",
"PurchaseOption" : "No Upfront"
}
},
},
}
}
}
BARE_METAL_EC2_SKU = 'SBVNSX4BKU246KVM'
BARE_METAL_EC2_OFFER = {
'offerCode': 'AmazonEC2',
'version': '20161213014831',
'products': {
"SBVNSX4BKU246KVM": {
"productFamily": "Compute Instance (bare metal)",
"sku": "SBVNSX4BKU246KVM",
"attributes": {
"servicename": "Amazon Elastic Compute Cloud",
"preInstalledSw": "SQL Ent",
"normalizationSizeFactor": "128",
"ecu": "208",
"capacitystatus": "Used",
"operation": "RunInstances:0102",
"physicalProcessor": "Intel Xeon E5-2686 v4 (Broadwell)",
"vcpu": "72",
"instanceFamily": "Storage optimized",
"currentGeneration": "Yes",
"instanceType": "i3.metal",
"locationType": "AWS Region",
"location": "EU (Ireland)",
"servicecode": "AmazonEC2",
"memory": "512 GiB",
"storage": "8 x 1900 NVMe SSD",
"networkPerformance": "25 Gigabit",
"processorArchitecture": "64-bit",
"tenancy": "Shared",
"operatingSystem": "Windows",
"licenseModel": "No License required",
"usagetype": "EU-BoxUsage:i3.metal"
},
}
}
}
| 44.458629 | 111 | 0.37823 | 917 | 18,806 | 7.739368 | 0.221374 | 0.054107 | 0.045089 | 0.030999 | 0.869522 | 0.857545 | 0.849937 | 0.849937 | 0.849937 | 0.849937 | 0 | 0.118661 | 0.509306 | 18,806 | 422 | 112 | 44.563981 | 0.650412 | 0.004094 | 0 | 0.695652 | 0 | 0 | 0.3909 | 0.089181 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
cf7897f04a99a685cf752ce25bde96a1bd963ec7 | 183 | py | Python | dist/micropy-cli/frozen/uasyncio/funcs.py | kevindawson/Pico-Stub | 6f9112779d4d81f821a3af273a450b9329ccdbab | [
"Apache-2.0"
] | 19 | 2021-01-25T23:56:09.000Z | 2022-02-21T13:55:16.000Z | dist/micropy-cli/frozen/uasyncio/funcs.py | kevindawson/Pico-Stub | 6f9112779d4d81f821a3af273a450b9329ccdbab | [
"Apache-2.0"
] | 18 | 2021-02-06T09:03:09.000Z | 2021-10-04T16:36:35.000Z | dist/micropy-cli/frozen/uasyncio/funcs.py | kevindawson/Pico-Stub | 6f9112779d4d81f821a3af273a450b9329ccdbab | [
"Apache-2.0"
] | 6 | 2021-01-26T08:41:47.000Z | 2021-04-27T11:33:33.000Z | from typing import Any
def wait_for_ms(aw: Any, timeout: int) -> Any: ...
# 0: return wait_for(aw,timeout,core.sleep_ms)
# ? 0: return wait_for(aw, timeout, core.sleep_ms)
| 30.5 | 54 | 0.672131 | 31 | 183 | 3.774194 | 0.483871 | 0.179487 | 0.188034 | 0.239316 | 0.581197 | 0.581197 | 0.581197 | 0.581197 | 0.581197 | 0 | 0 | 0.013423 | 0.185792 | 183 | 5 | 55 | 36.6 | 0.771812 | 0.508197 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0.5 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
cf7bf89fc30751bcda78ce1d1f53a0da0361b74d | 1,509 | py | Python | dashdaemon/keys.py | rGunti/CarPi-DashDaemon | b8b340d35125b6f7fe5bb9647760d37301b07cac | [
"MIT"
] | null | null | null | dashdaemon/keys.py | rGunti/CarPi-DashDaemon | b8b340d35125b6f7fe5bb9647760d37301b07cac | [
"MIT"
] | null | null | null | dashdaemon/keys.py | rGunti/CarPi-DashDaemon | b8b340d35125b6f7fe5bb9647760d37301b07cac | [
"MIT"
] | null | null | null | """
CARPI DASH DAEMON
(C) 2018, Raphael "rGunti" Guntersweiler
Licensed under MIT
"""
from redisdatabus.bus import TypedBusListener as Types
import gpsdaemon.keys as gpskeys
import obddaemon.keys as obdkeys
SETTINGS_KEY_BASE = 'carpi.settings.'
DASH_KEY_BASE = 'carpi.dashboard.'
def _build_key(type, key_base, name):
return "{}{}{}".format(type if type else "", key_base, name)
CONFIG_KEYS = {
'engine_vol': _build_key(Types.TYPE_PREFIX_INT, SETTINGS_KEY_BASE, 'car.enginevolume'),
'vol_efficency': _build_key(Types.TYPE_PREFIX_INT, SETTINGS_KEY_BASE, 'car.efficency'),
'fuel_density': _build_key(Types.TYPE_PREFIX_INT, SETTINGS_KEY_BASE, 'car.fueldensity')
}
CONFIG_DEFAULT_VALUES = {
CONFIG_KEYS['engine_vol']: 1000,
CONFIG_KEYS['vol_efficency']: 85,
CONFIG_KEYS['fuel_density']: 745
}
LIVE_INPUT_DATA_KEYS = {
'car_rpm': obdkeys.KEY_RPM,
'car_map': obdkeys.KEY_INTAKE_PRESSURE,
'car_tmp': obdkeys.KEY_INTAKE_TEMP,
'car_spd': obdkeys.KEY_SPEED,
'gps_spd': gpskeys.KEY_SPEED,
'gps_acc_lng': gpskeys.KEY_EPX,
'gps_acc_lat': gpskeys.KEY_EPY,
'gps_acc_spd': gpskeys.KEY_EPS
}
LIVE_OUTPUT_DATA_KEYS = {
'speed': _build_key(Types.TYPE_PREFIX_INT, DASH_KEY_BASE, 'speed'),
'fuel_usage': _build_key(Types.TYPE_PREFIX_FLOAT, DASH_KEY_BASE, 'fuelusage'),
'fuel_efficiency': _build_key(Types.TYPE_PREFIX_FLOAT, DASH_KEY_BASE, 'fuelefficiency'),
'fuel_fail_flag': _build_key(Types.TYPE_PREFIX_BOOL, DASH_KEY_BASE, 'fuelfailflag')
}
| 30.795918 | 92 | 0.743539 | 214 | 1,509 | 4.808411 | 0.369159 | 0.07483 | 0.088435 | 0.115646 | 0.251701 | 0.229349 | 0.204082 | 0.204082 | 0.204082 | 0.12828 | 0 | 0.009909 | 0.13055 | 1,509 | 48 | 93 | 31.4375 | 0.77439 | 0.051027 | 0 | 0 | 0 | 0 | 0.212781 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.090909 | 0.030303 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf7e16d1f4e90c037eb66831eeffade73df69683 | 261 | py | Python | imdb_movie_review_sentiment_prediction/training_and_evaluation.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | imdb_movie_review_sentiment_prediction/training_and_evaluation.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | imdb_movie_review_sentiment_prediction/training_and_evaluation.py | slaily/deep-learning-bits | cb9ce7ec539efbdfcaa023d141466f919bd31b71 | [
"MIT"
] | null | null | null | model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc']
)
history = model.fit(
x_train,
y_train,
epochs=10,
batch_size=32,
validation_data=(x_val, y_val)
)
model.save_weights('pre_trained_glove_model.h5')
| 18.642857 | 48 | 0.678161 | 35 | 261 | 4.742857 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023474 | 0.183908 | 261 | 13 | 49 | 20.076923 | 0.755869 | 0 | 0 | 0 | 0 | 0 | 0.210728 | 0.099617 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf7e800c7c1a59352899d5b0f4d9c283b3e91edb | 1,010 | py | Python | launches.py | zweed4u/launchesEND | fd016478c6f757e323009611d6b83ea42fbf8116 | [
"MIT"
] | 5 | 2017-12-05T04:00:22.000Z | 2020-12-16T20:44:46.000Z | launches.py | zweed4u/launchesEND | fd016478c6f757e323009611d6b83ea42fbf8116 | [
"MIT"
] | null | null | null | launches.py | zweed4u/launchesEND | fd016478c6f757e323009611d6b83ea42fbf8116 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#Hmmm... http://www.endclothing.com/media/us_sitemap.xml
import urllib2, zlib, json
url='https://launches.endclothing.com/api/products'
req = urllib2.Request(url)
req.add_header(':host','launches.endclothing.com');req.add_header(':method','GET');req.add_header(':path','/api/products');req.add_header(':scheme','https');req.add_header(':version','HTTP/1.1');req.add_header('accept','application/json, text/plain, */*');req.add_header('accept-encoding','gzip,deflate');req.add_header('accept-language','en-US,en;q=0.8');req.add_header('cache-control','max-age=0');req.add_header('cookie','__/');req.add_header('user-agent','Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/37.0.2062.120 Chrome/37.0.2062.120 Safari/537.36');
resp = urllib2.urlopen(req).read()
resp = zlib.decompress(bytes(bytearray(resp)),15+32)
data = json.loads(resp)
for product in data:
for attrib in product.keys():
print str(attrib)+' :: '+ str(product[attrib])
print '\n'
| 67.333333 | 598 | 0.723762 | 162 | 1,010 | 4.419753 | 0.549383 | 0.092179 | 0.184358 | 0.075419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052356 | 0.054455 | 1,010 | 14 | 599 | 72.142857 | 0.697382 | 0.074257 | 0 | 0 | 0 | 0.090909 | 0.436227 | 0.049303 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.090909 | null | null | 0.181818 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
cf815298accec6c14c7afef44e976c7b6069c135 | 73 | py | Python | scalabel/tools/__init__.py | cwlroda/scalabel | 296b7f3050ec0d02b4578d9d1f174ffd22aee3fb | [
"Apache-2.0"
] | 279 | 2019-11-18T01:48:39.000Z | 2022-03-30T00:16:43.000Z | scalabel/tools/__init__.py | cwlroda/scalabel | 296b7f3050ec0d02b4578d9d1f174ffd22aee3fb | [
"Apache-2.0"
] | 141 | 2019-11-20T02:36:11.000Z | 2022-03-29T15:17:46.000Z | scalabel/tools/__init__.py | cwlroda/scalabel | 296b7f3050ec0d02b4578d9d1f174ffd22aee3fb | [
"Apache-2.0"
] | 85 | 2019-11-18T06:10:12.000Z | 2022-03-27T12:32:55.000Z | """Tools for using scalabel."""
from . import edit_labels, prepare_data
| 18.25 | 39 | 0.739726 | 10 | 73 | 5.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.136986 | 73 | 3 | 40 | 24.333333 | 0.825397 | 0.342466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 4 |
cf831543b480d5861c0d351648dc6dd8a55ea5de | 460 | py | Python | python/controls/choicegroup/choicegroup_with_change_event.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | python/controls/choicegroup/choicegroup_with_change_event.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | python/controls/choicegroup/choicegroup_with_change_event.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | import pglet
from pglet import ChoiceGroup, choicegroup, Text
with pglet.page("choicegroup-with-change-event") as page:
def choicegroup_changed(e):
t.value = f"ChoiceGroup value changed to {cg.value}"
t.update()
cg = ChoiceGroup(label='Select color', on_change=choicegroup_changed, options=[
choicegroup.Option('Red'),
choicegroup.Option('Green'),
choicegroup.Option('Blue')
])
t = Text()
page.add(cg, t)
input() | 24.210526 | 81 | 0.680435 | 58 | 460 | 5.344828 | 0.517241 | 0.164516 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184783 | 460 | 19 | 82 | 24.210526 | 0.826667 | 0 | 0 | 0 | 0 | 0 | 0.199566 | 0.062907 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf84fe1671965d8bf607c4db0b1fce05cc370700 | 910 | py | Python | raspberrypi/sound1.py | Shadowsith/python | b8878c822e55528e663de16bd1029d330862c8dc | [
"MIT"
] | null | null | null | raspberrypi/sound1.py | Shadowsith/python | b8878c822e55528e663de16bd1029d330862c8dc | [
"MIT"
] | null | null | null | raspberrypi/sound1.py | Shadowsith/python | b8878c822e55528e663de16bd1029d330862c8dc | [
"MIT"
] | 1 | 2020-05-19T11:32:25.000Z | 2020-05-19T11:32:25.000Z | #!/usr/bin/python
#Doppelklatschen
import time
gpioPort = 40
import RPi.GPIO as GPIO
import mysql.connector
#MySQL Verbindung
statement = "UPDATE Flags SET wert=0 WHERE name='bewegung';"
#GPIO Layout verwenden
GPIO.setmode(GPIO.BOARD)
GPIO.setup(gpioPort, GPIO.IN)
lastSound = 0
def mysqlConnect(statement):
cnx = mysql.connector.connect(user='pi', password='raspberry', host='localhost', database='EIT11C')
cursor = cnx.cursor()
cursor.execute(statement)
cnx.commit()
cursor.close()
cnx.close()
while 1:
if GPIO.input(gpioPort) == GPIO.HIGH:
if lastSound == 0 or (lastSound + 500) < int(round(time.time()*1000)):
lastSound = int(round(time.time()*1000))
time.sleep(0.1)
print("Klatchen1")
else:
print("Klatschen2")
lastSound = 0
time.sleep(0.1)
mysqlConnect(statement)
| 23.947368 | 103 | 0.631868 | 110 | 910 | 5.227273 | 0.554545 | 0.052174 | 0.041739 | 0.055652 | 0.069565 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03741 | 0.236264 | 910 | 37 | 104 | 24.594595 | 0.789928 | 0.074725 | 0 | 0.153846 | 0 | 0 | 0.108592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.038462 | 0.115385 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf85325b7b5d658e0a68da64304ce7b4f2588e9a | 7,466 | py | Python | apted/all_possible_mappings_ted.py | JoaoFelipe/apted | 828b3e3f4c053f7d35f0b55b0d5597e8041719ac | [
"MIT"
] | 52 | 2017-11-14T06:45:45.000Z | 2022-03-01T01:14:45.000Z | apted/all_possible_mappings_ted.py | JoaoFelipe/apted | 828b3e3f4c053f7d35f0b55b0d5597e8041719ac | [
"MIT"
] | 7 | 2018-11-21T17:21:14.000Z | 2021-09-04T09:23:53.000Z | apted/all_possible_mappings_ted.py | JoaoFelipe/apted | 828b3e3f4c053f7d35f0b55b0d5597e8041719ac | [
"MIT"
] | 7 | 2017-12-17T16:49:45.000Z | 2020-07-16T18:49:44.000Z | #
# The MIT License
#
# Copyright 2017 Joao Felipe Pimentel
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Implements an exponential algorithm for the tree edit distance. It
computes all possible TED mappings between two trees and calculated their
minimal cost."""
from __future__ import (absolute_import, division)
from copy import copy
from .config import Config
from .node_indexer import NodeIndexer
class AllPossibleMappingsTED(object):
"""Implements an exponential algorithm for the tree edit distance. It
computes all possible TED mappings between two trees and calculated their
minimal cost."""
def __init__(self, tree1, tree2, config=None):
self.config = config or Config()
"""Config object that specifies how to calculate the edit distance"""
self.it1 = NodeIndexer(tree1, 0, self.config)
"""Stores the indexes of the first input tree"""
self.it2 = NodeIndexer(tree2, 1, self.config)
"""Stores the indexes of the second input tree"""
def compute_edit_distance(self):
"""Computes the tree edit distance between two trees by trying all
possible TED mappings. It uses the specified cost model."""
mappings = [
mapping for mapping in self.generate_all_one_to_one_mappins()
if self.is_ted_mapping(mapping)
]
return self.get_min_cost(mappings)
def generate_all_one_to_one_mappins(self):
"""Generate all possible 1-1 mappings.
These mappings do not conform to TED conditions (sibling-order and
ancestor-descendant).
A mapping is a list of pairs (arrays) of preorder IDs (identifying
nodes).
return set of all 1-1 mappings
"""
mappings = [
[(node1, None) for node1 in self.it1.pre_ltr_info] +
[(None, node2) for node2 in self.it2.pre_ltr_info]
]
# For each node in the source tree
for node1 in self.it1.pre_ltr_info:
# Duplicate all mappings and store in mappings_copy
mappings_copy = [
copy(x) for x in mappings
]
# For each node in the destination tree
for node2 in self.it2.pre_ltr_info:
# For each mapping (produced for all n1 values smaller than
# current n1)
for mapping in mappings_copy:
# Produce new mappings with the pair (n1, n2) by adding this
# pair to all mappings where it is valid to add
element_add = True
# Verify if (n1, n2) can be added to mapping m.
# All elements in m are checked with (n1, n2) for possible
# violation
# One-to-one condition
for ele1, ele2 in mapping:
# n1 is not in any of previous mappings
if ele1 and ele2 and ele2 is node2:
element_add = False
break
# New mappings must be produces by duplicating a previous
# mapping and extending it by (n1, n2)
if element_add:
m_copy = copy(mapping)
m_copy.append((node1, node2))
m_copy.remove((node1, None))
m_copy.remove((None, node2))
mappings.append(m_copy)
return mappings
def is_ted_mapping(self, mapping):
"""Test if a 1-1 mapping is a TED mapping"""
# pylint: disable=no-self-use, invalid-name
# Validade each pait of pairs of mapped nodes in the mapping
for node_a1, node_a2 in mapping:
# Use only pairs of mapped nodes for validation.
if node_a1 is None or node_a2 is None:
continue
for node_b1, node_b2 in mapping:
# Use only pairs of mapped nodes for validation.
if node_b1 is None or node_b2 is None:
continue
# If any of the conditions below doesn't hold, discard m.
# Validate ancestor-descendant condition.
n1 = (
node_a1.pre_ltr < node_b1.pre_ltr and
node_a1.pre_rtl < node_b1.pre_rtl
)
n2 = (
node_a2.pre_ltr < node_b2.pre_ltr and
node_a2.pre_rtl < node_b2.pre_rtl
)
if (n1 and not n2) or (not n1 and n2):
# Discard the mapping.
# If this condition doesn't hold, the next condition
# doesn't have to be verified any more and any other
# pair doesn't have to be verified any more.
return False
# Validade sibling-order condition
n1 = (
node_a1.pre_ltr < node_b1.pre_ltr and
node_a1.pre_rtl > node_b1.pre_rtl
)
n2 = (
node_a2.pre_ltr < node_b2.pre_ltr and
node_a2.pre_rtl > node_b2.pre_rtl
)
if (n1 and not n2) or (not n1 and n2):
# Discard the mapping.
return False
return True
def get_min_cost(self, mappings):
"""Given list of all TED mappings, calculate the cost of the
minimal-cost mapping."""
insert, delete = self.config.insert, self.config.delete
rename = self.config.rename
# Initialize min_cost to the upper bound
min_cost = float('inf')
# verify cost of each mapping
for mapping in mappings:
m_cost = 0
# Sum up edit costs for all elements in the mapping m.
for node1, node2 in mapping:
if node1 and node2:
m_cost += rename(node1.node, node2.node)
elif node1:
m_cost += delete(node1.node)
else:
m_cost += insert(node2.node)
# Break as soon as the current min_cost is exceeded.
# Only for early loop break.
if m_cost > min_cost:
break
# Store the minimal cost - compare m_cost and min_cost
min_cost = min(min_cost, m_cost)
return min_cost
| 42.420455 | 80 | 0.583311 | 964 | 7,466 | 4.409751 | 0.274896 | 0.016937 | 0.00941 | 0.012232 | 0.234533 | 0.228652 | 0.21642 | 0.201835 | 0.175488 | 0.175488 | 0 | 0.019895 | 0.360434 | 7,466 | 175 | 81 | 42.662857 | 0.870366 | 0.425931 | 0 | 0.216867 | 0 | 0 | 0.000757 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060241 | false | 0 | 0.048193 | 0 | 0.192771 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cf89cd77b7a7a86eb1c509ae0d28c2801e9db09a | 9,359 | py | Python | util/dynamic_signal_lights.py | ashwxn/Intelligent-Traffic-Management-System-Using-ML-YOLO | cc111d9895efc19f052656f7d140c6895458a819 | [
"CC0-1.0"
] | 1 | 2021-03-11T06:58:31.000Z | 2021-03-11T06:58:31.000Z | util/dynamic_signal_lights.py | ashwxn/Intelligent-Traffic-Management-System-Using-ML-YOLO | cc111d9895efc19f052656f7d140c6895458a819 | [
"CC0-1.0"
] | null | null | null | util/dynamic_signal_lights.py | ashwxn/Intelligent-Traffic-Management-System-Using-ML-YOLO | cc111d9895efc19f052656f7d140c6895458a819 | [
"CC0-1.0"
] | null | null | null | import time
import emoji
def switch_signal(denser_lane,seconds):
print('\033[1m' + '\n\033[99m' +
"OPENING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
if denser_lane==1:
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
time.sleep(1)
print(
" "+ emoji.emojize(":white_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":green_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now OPEN and will CLOSE after {} seconds ".format(str(denser_lane),str(seconds))+ '\033[0m' ,end="")
while seconds:
mins, secs = divmod(seconds, 60)
print('\033[99m'+".", end="")
time.sleep(1)
seconds -= 1
print()
print('\033[1m' + '\n\033[99m' +
"CLOSING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
time.sleep(1)
print()
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
elif denser_lane==2:
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
time.sleep(1)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":green_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now OPEN and will CLOSE after {} seconds ".format(str(denser_lane),str(seconds))+ '\033[0m' ,end="")
while seconds:
mins, secs = divmod(seconds, 60)
print('\033[99m'+".", end="")
time.sleep(1)
seconds -= 1
print()
print('\033[1m' + '\n\033[99m' +
"CLOSING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
time.sleep(1)
print()
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
elif denser_lane==3:
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
time.sleep(1)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":green_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now OPEN and will CLOSE after {} seconds ".format(str(denser_lane),str(seconds))+ '\033[0m' ,end="")
while seconds:
mins, secs = divmod(seconds, 60)
print('\033[99m'+".", end="")
time.sleep(1)
seconds -= 1
print()
print('\033[1m' + '\n\033[99m' +
"CLOSING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
time.sleep(1)
print()
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
elif denser_lane==4:
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
time.sleep(1)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":green_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now OPEN and will CLOSE after {} seconds ".format(str(denser_lane),str(seconds))+ '\033[0m' ,end="")
while seconds:
mins, secs = divmod(seconds, 60)
print('\033[99m'+".", end="")
time.sleep(1)
seconds -= 1
print()
print('\033[1m' + '\n\033[99m' +
"CLOSING LANE-{}: ".format(str(denser_lane))+ '\033[0m' )
print("----------------------------------------------------------------------------------")
time.sleep(1)
print()
print(
"Lane 1 Lane 2 Lane 3 Lane 4"
)
print(
" "+ emoji.emojize(":red_circle:") + " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+ " "+emoji.emojize(":red_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+
"\n " + emoji.emojize(":white_circle:") + " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:")+ " "+emoji.emojize(":white_circle:") +
"\n")
print('\033[0m' + '\n\033[99m' +
"LANE-{} is now CLOSED ".format(str(denser_lane)+ '\033[0m' ))
| 69.843284 | 221 | 0.398761 | 797 | 9,359 | 4.542033 | 0.056462 | 0.318232 | 0.358011 | 0.40663 | 0.977348 | 0.977348 | 0.966022 | 0.966022 | 0.956906 | 0.956906 | 0 | 0.036313 | 0.382092 | 9,359 | 134 | 222 | 69.843284 | 0.589659 | 0 | 0 | 0.810606 | 0 | 0 | 0.465064 | 0.043803 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007576 | false | 0 | 0.015152 | 0 | 0.022727 | 0.325758 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
cf8a7c68901bef8af36175c6396dc707d25c27e2 | 4,429 | py | Python | Antics/AI/AIPlayer.py | sundercode/AI-Homework | 423f703685852313bc127338f9cf6b4e862b898e | [
"MIT"
] | null | null | null | Antics/AI/AIPlayer.py | sundercode/AI-Homework | 423f703685852313bc127338f9cf6b4e862b898e | [
"MIT"
] | null | null | null | Antics/AI/AIPlayer.py | sundercode/AI-Homework | 423f703685852313bc127338f9cf6b4e862b898e | [
"MIT"
] | null | null | null | import random
import sys
sys.path.append("..") #so other modules can be found in parent dir
from Player import *
from Constants import *
from Construction import CONSTR_STATS
from Ant import UNIT_STATS
from Move import Move
from GameState import *
from AIPlayerUtils import *
##
#AIPlayer
#Description: The responsbility of this class is to interact with the game by
#deciding a valid move based on a given game state. This class has methods that
#will be implemented by students in Dr. Nuxoll's AI course.
#
#Variables:
# playerId - The id of the player.
##
class AIPlayer(Player):
#__init__
#Description: Creates a new Player
#
#Parameters:
# inputPlayerId - The id to give the new player (int)
##
def __init__(self, inputPlayerId):
super(AIPlayer,self).__init__(inputPlayerId, "Random")
##
#getPlacement
#
#Description: called during setup phase for each Construction that
# must be placed by the player. These items are: 1 Anthill on
# the player's side; 1 tunnel on player's side; 9 grass on the
# player's side; and 2 food on the enemy's side.
#
#Parameters:
# construction - the Construction to be placed.
# currentState - the state of the game at this point in time.
#
#Return: The coordinates of where the construction is to be placed
##
def getPlacement(self, currentState):
numToPlace = 0
#implemented by students to return their next move
if currentState.phase == SETUP_PHASE_1: #stuff on my side
numToPlace = 11
moves = []
for i in range(0, numToPlace):
move = None
while move == None:
#Choose any x location
x = random.randint(0, 9)
#Choose any y location on your side of the board
y = random.randint(0, 3)
#Set the move if this space is empty
if currentState.board[x][y].constr == None and (x, y) not in moves:
move = (x, y)
#Just need to make the space non-empty. So I threw whatever I felt like in there.
currentState.board[x][y].constr == True
moves.append(move)
return moves
elif currentState.phase == SETUP_PHASE_2: #stuff on foe's side
numToPlace = 2
moves = []
for i in range(0, numToPlace):
move = None
while move == None:
#Choose any x location
x = random.randint(0, 9)
#Choose any y location on enemy side of the board
y = random.randint(6, 9)
#Set the move if this space is empty
if currentState.board[x][y].constr == None and (x, y) not in moves:
move = (x, y)
#Just need to make the space non-empty. So I threw whatever I felt like in there.
currentState.board[x][y].constr == True
moves.append(move)
return moves
else:
return [(0, 0)]
##
#getMove
#Description: Gets the next move from the Player.
#
#Parameters:
# currentState - The state of the current game waiting for the player's move (GameState)
#
#Return: The Move to be made
##
def getMove(self, currentState):
moves = listAllLegalMoves(currentState)
selectedMove = moves[random.randint(0,len(moves) - 1)];
#don't do a build move if there are already 3+ ants
numAnts = len(currentState.inventories[currentState.whoseTurn].ants)
while (selectedMove.moveType == BUILD and numAnts >= 3):
selectedMove = moves[random.randint(0,len(moves) - 1)];
return selectedMove
##
#getAttack
#Description: Gets the attack to be made from the Player
#
#Parameters:
# currentState - A clone of the current state (GameState)
# attackingAnt - The ant currently making the attack (Ant)
# enemyLocation - The Locations of the Enemies that can be attacked (Location[])
##
def getAttack(self, currentState, attackingAnt, enemyLocations):
#Attack a random enemy.
return enemyLocations[random.randint(0, len(enemyLocations) - 1)]
| 37.533898 | 105 | 0.589298 | 551 | 4,429 | 4.704174 | 0.303085 | 0.006173 | 0.032407 | 0.029321 | 0.337191 | 0.283179 | 0.283179 | 0.261574 | 0.23071 | 0.23071 | 0 | 0.010183 | 0.334839 | 4,429 | 117 | 106 | 37.854701 | 0.869654 | 0.413186 | 0 | 0.423077 | 0 | 0 | 0.003163 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.173077 | 0.019231 | 0.365385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d83daa61f951ded7d9855286838edef9a66c37b5 | 170 | py | Python | AIZU_ONLINE_JUDGE/0007.py | vox256/Codes | c408ef0fbc25af46dacef93b3496985feb98dd5c | [
"MIT"
] | null | null | null | AIZU_ONLINE_JUDGE/0007.py | vox256/Codes | c408ef0fbc25af46dacef93b3496985feb98dd5c | [
"MIT"
] | null | null | null | AIZU_ONLINE_JUDGE/0007.py | vox256/Codes | c408ef0fbc25af46dacef93b3496985feb98dd5c | [
"MIT"
] | null | null | null | n = int(input())
debt = 100000
for i in range (n):
debt *= 1.05
if debt % 1000 != 0:
debt -= debt % 1000
debt += 1000
print (int(debt)) | 17 | 28 | 0.476471 | 25 | 170 | 3.24 | 0.6 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.207547 | 0.376471 | 170 | 10 | 29 | 17 | 0.556604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
d8415d3e67ce2c47d7251854165bcf91208abf86 | 22,718 | py | Python | pysnptools/util/mapreduce1/runner/hpc.py | fastlmm/PySnpTools | ce2ecaa5548e82b64c8ed6a205dbf419701b66b6 | [
"Apache-2.0"
] | 13 | 2019-12-23T06:51:08.000Z | 2022-01-07T18:14:55.000Z | pysnptools/util/mapreduce1/runner/hpc.py | fastlmm/PySnpTools | ce2ecaa5548e82b64c8ed6a205dbf419701b66b6 | [
"Apache-2.0"
] | 3 | 2020-07-30T16:07:43.000Z | 2021-07-14T09:00:42.000Z | pysnptools/util/mapreduce1/runner/hpc.py | fastlmm/PySnpTools | ce2ecaa5548e82b64c8ed6a205dbf419701b66b6 | [
"Apache-2.0"
] | 3 | 2020-05-22T09:46:16.000Z | 2021-01-26T13:27:36.000Z |
from pysnptools.util.mapreduce1.runner import *
import os
import subprocess, sys, os.path
import multiprocessing
import pysnptools.util as pstutil
import pdb
import logging
try:
import dill as pickle
except:
logging.warning("Can't import dill, so won't be able to clusterize lambda expressions. If you try, you'll get this error 'Can't pickle <type 'function'>: attribute lookup __builtin__.function failed'")
import cPickle as pickle
class HPC(Runner):
'''
Old code to run on a Microsoft Widows HPC Cluster. Not currently supported.
'''
#!!LATER make it (and Hadoop) work from root directories -- or give a clear error message
def __init__(self, taskcount, clustername, fileshare, priority="Normal", unit="core", mkl_num_threads=None, runtime="infinite", remote_python_parent=None,
update_remote_python_parent=False, min=None, max=None, excluded_nodes=[], template=None, nodegroups=None, skipinputcopy=False, node_local=True,clean_up=True,preemptable=True,FailOnTaskFailure=False,logging_handler=logging.StreamHandler(sys.stdout)):
logger = logging.getLogger()
if not logger.handlers:
logger.setLevel(logging.INFO)
for h in list(logger.handlers):
logger.removeHandler(h)
logger.addHandler(logging_handler)
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
self.taskcount = taskcount
self.clustername = clustername
self.fileshare = fileshare
self.priority = priority
self.runtime = runtime
self.unit = unit
self.excluded_nodes = excluded_nodes
self.min = min
self.max = max
self.remote_python_parent = remote_python_parent
self.update_remote_python_parent = update_remote_python_parent
self.CheckUnitAndMKLNumThreads(mkl_num_threads, unit)
self.skipinputcopy=skipinputcopy
self.template = template
self.nodegroups = nodegroups
self.node_local = node_local
self.clean_up = clean_up
self.preemptable = preemptable
self.FailOnTaskFailure = FailOnTaskFailure
def run(self, distributable):
# Check that the local machine has python path set
localpythonpath = os.environ.get("PYTHONPATH")#!!should it be able to work without pythonpath being set (e.g. if there was just one file)? Also, is None really the return or is it an exception.
if localpythonpath is None: raise Exception("Expect local machine to have 'pythonpath' set")
remotepythoninstall = self.check_remote_pythoninstall()
remotewd, run_dir_abs, run_dir_rel, nodelocalwd = self.create_run_dir()
pstutil.create_directory_if_necessary(os.path.join(remotewd, distributable.tempdirectory), isfile=False) #create temp directory now so that cluster tasks won't try to create it many times at once
result_remote = os.path.join(run_dir_abs,"result.p")
self.copy_python_settings(run_dir_abs)
inputOutputCopier = HPCCopier(remotewd,skipinput=self.skipinputcopy) #Create the object that copies input and output files to where they are needed
inputOutputCopier.input(distributable) # copy of the input files to where they are needed (i.e. the cluster)
remotepythonpath = self.FindOrCreateRemotePythonPath(localpythonpath, run_dir_abs)
batfilename_rel = self.create_bat_file(distributable, remotepythoninstall, remotepythonpath, remotewd, run_dir_abs, run_dir_rel, result_remote, nodelocalwd, distributable)
self.submit_to_cluster(batfilename_rel, distributable, remotewd, run_dir_abs, run_dir_rel, nodelocalwd)
inputOutputCopier.output(distributable) # copy the output file from where they were created (i.e. the cluster) to the local computer
assert os.path.exists(result_remote), "The HPC job produced no result (and, thus, likely failed)"
with open(result_remote, mode='rb') as f:
result = pickle.load(f)
#logging.info('Done: HPC runner is running a distributable. Returns {0}'.format(result))
return result
def CheckUnitAndMKLNumThreads(self, mkl_num_threads, unit):
if unit.lower() == "core":
if mkl_num_threads is not None and mkl_num_threads!=1 : raise Exception("When 'unit' is 'core', mkl_num_threads must be unspecified or 1")
self.mkl_num_threads = 1
elif unit.lower() == "socket":
if mkl_num_threads is None : raise Exception("When 'unit' is 'socket', mkl_num_threads must be specified")
self.mkl_num_threads = mkl_num_threads
elif unit.lower() == "node":
self.mkl_num_threads = mkl_num_threads
else :
raise Exception("Expect 'unit' to be 'core', 'socket', or 'node'")
def copy_python_settings(self, run_dir_abs):
#localuserprofile = os.environ.get("USERPROFILE")
user_python_settings=".continuum"
python_settings=os.path.join(self.fileshare,user_python_settings)
if os.path.exists(python_settings):
import shutil
remote_user_python_settings=os.path.join(run_dir_abs,user_python_settings)
shutil.copytree(python_settings,remote_user_python_settings)
def FindOrCreateRemotePythonPath(self, localpythonpath, run_dir_abs):
if self.remote_python_parent is None:
remotepythonpath = self.CopySource(localpythonpath, run_dir_abs)
else:
pstutil.create_directory_if_necessary(self.remote_python_parent,isfile=False)
list = []
for rel in os.listdir(self.remote_python_parent):
list.append(os.path.join(self.remote_python_parent,rel))
remotepythonpath = ";".join(list)
if self.update_remote_python_parent:
remotepythonpath = self.CopySource(localpythonpath, run_dir_abs)
return remotepythonpath
def numString(self):
if self.min is None and self.max is None:
return " -Num{0} *-*".format(self.unit.capitalize())
if self.min is None:
return " -Num{0} {1}".format(self.unit.capitalize(), self.max)
if self.max is None:
return " -Num{0} {1}-*".format(self.unit.capitalize(), self.min)
return " -Num{0} {1}-{2}".format(self.unit.capitalize(), self.min, self.max)
def submit_to_cluster(self, batfilename_rel, distributable, remotewd, run_dir_abs, run_dir_rel, nodelocalwd):
stdout_dir_rel = os.path.join(run_dir_rel,"stdout")
stdout_dir_abs = os.path.join(run_dir_abs,"stdout")
pstutil.create_directory_if_necessary(stdout_dir_abs, isfile=False)
stderr_dir_rel = os.path.join(run_dir_rel,"stderr")
stderr_dir_abs = os.path.join(run_dir_abs,"stderr")
pstutil.create_directory_if_necessary(stderr_dir_abs, isfile=False)
if len(self.excluded_nodes) > 0:
excluded_nodes = "Set-HpcJob -Id $r.Id -addExcludedNodes {0}".format(", ".join(self.excluded_nodes))
else:
excluded_nodes = ""
#create the Powershell file
psfilename_rel = os.path.join(run_dir_rel,"dist.ps1")
psfilename_abs = os.path.join(run_dir_abs,"dist.ps1")
pstutil.create_directory_if_necessary(psfilename_abs, isfile=True)
with open(psfilename_abs, "w") as psfile:
psfile.write(r"""Add-PsSnapin Microsoft.HPC
Set-Content Env:CCP_SCHEDULER {0}
$r = New-HpcJob -Name "{7}" -Priority {8}{12}{14}{16} -RunTime {15} -FailOnTaskFailure {23} #-Preemptable {22}
$r.Id
if ({20})
{10}
$from = "{4}"
$to = "{17}"
Add-HpcTask -Name NodePrep -JobId $r.Id -Type NodePrep -CommandLine "${{from}}\{18}" -StdOut "${{from}}\{2}\nodeprep.txt" -StdErr "${{from}}\{3}\nodeprep.txt" -WorkDir .
Add-HpcTask -Name Parametric -JobId $r.Id -Parametric -Start 0 -End {1} -CommandLine "${{from}}\{6} * {5}" -StdOut "${{from}}\{2}\*.txt" -StdErr "${{from}}\{3}\*.txt" -WorkDir $to
Add-HpcTask -Name Reduce -JobId $r.Id -Depend Parametric -CommandLine "${{from}}\{6} {5} {5}" -StdOut "${{from}}\{2}\reduce.txt" -StdErr "${{from}}\{3}\reduce.txt" -WorkDir $to
{21}Add-HpcTask -Name NodeRelease -JobId $r.Id -Type NodeRelease -CommandLine "${{from}}\{19}" -StdOut "${{from}}\{2}\noderelease.txt" -StdErr "${{from}}\{3}\noderelease.txt" -WorkDir .
{11}
else
{10}
Add-HpcTask -Name Parametric -JobId $r.Id -Parametric -Start 0 -End {1} -CommandLine "{6} * {5}" -StdOut "{2}\*.txt" -StdErr "{3}\*.txt" -WorkDir {4}
Add-HpcTask -Name Reduce -JobId $r.Id -Depend Parametric -CommandLine "{6} {5} {5}" -StdOut "{2}\reduce.txt" -StdErr "{3}\reduce.txt" -WorkDir {4}
{11}
{13}
Submit-HpcJob -Id $r.Id
$j = Get-HpcJob -Id $r.Id
$i = $r.id
$s = 10
while(($j.State -ne "Finished") -and ($j.State -ne "Failed") -and ($j.State -ne "Canceled"))
{10}
$x = $j.State
Write-Host "${10}x{11}. Job# ${10}i{11} sleeping for ${10}s{11}"
Start-Sleep -s $s
if ($s -ge 60)
{10}
$s = 60
{11}
else
{10}
$s = $s * 1.1
{11}
$j.Refresh()
{11}
""" .format(
self.clustername, #0
self.taskcount-1, #1
stdout_dir_rel, #2
stderr_dir_rel, #3
remotewd, #4 fileshare wd
self.taskcount, #5
batfilename_rel, #6
self.maxlen(str(distributable),50), #7
self.priority, #8
self.unit, #9 -- not used anymore,. Instead #12 sets unit
"{", #10
"}", #11
self.numString(), #12
excluded_nodes, #13
' -templateName "{0}"'.format(self.template) if self.template is not None else "", #14
self.runtime, #15 RuntimeSeconds
' -NodeGroups "{0}"'.format(self.nodegroups) if self.nodegroups is not None else "", #16
nodelocalwd, #17 the node-local wd
batfilename_rel[0:-8]+"nodeprep.bat", #18
batfilename_rel[0:-8]+"noderelease.bat", #19
1 if self.node_local else 0, #20
"", #21 always run release task
self.preemptable, #22
'$true' if self.FailOnTaskFailure else '$false', #23
))
assert batfilename_rel[-8:] == "dist.bat", "real assert"
import subprocess
proc = subprocess.Popen(["powershell.exe", "-ExecutionPolicy", "Unrestricted", psfilename_abs], cwd=os.getcwd())
if not 0 == proc.wait(): raise Exception("Running powershell cluster submit script results in non-zero return code")
#move to utils?
@staticmethod
def maxlen(s,max):
'''
Truncate cluster job name if longer than max.
'''
if len(s) <= max:
return s
else:
#return s[0:max-1]
return s[-max:] #JL: I prefer the end of the name rather than the start
def create_distributablep(self, distributable, run_dir_abs, run_dir_rel):
distributablep_filename_rel = os.path.join(run_dir_rel, "distributable.p")
distributablep_filename_abs = os.path.join(run_dir_abs, "distributable.p")
with open(distributablep_filename_abs, mode='wb') as f:
pickle.dump(distributable, f, pickle.HIGHEST_PROTOCOL)
return distributablep_filename_rel, distributablep_filename_abs
@staticmethod
def FindDirectoriesToExclude(localpythonpathdir):
logging.info("Looking in '{0}' for directories to skip".format(localpythonpathdir))
xd_string = " /XD $TF /XD .git"
for root, dir, files in os.walk(localpythonpathdir):
for file in files:
if file.lower() == ".ignoretgzchange":
xd_string += " /XD {0}".format(root)
return xd_string
def CopySource(self,localpythonpath, run_dir_abs):
if self.update_remote_python_parent:
remote_python_parent = self.remote_python_parent
else:
remote_python_parent = run_dir_abs + os.path.sep + "pythonpath"
pstutil.create_directory_if_necessary(remote_python_parent, isfile=False)
remotepythonpath_list = []
for i, localpythonpathdir in enumerate(localpythonpath.split(';')):
remotepythonpathdir = os.path.join(remote_python_parent, str(i))
remotepythonpath_list.append(remotepythonpathdir)
xd_string = HPC.FindDirectoriesToExclude(localpythonpathdir)
xcopycommand = 'robocopy /s {0} {1}{2}'.format(localpythonpathdir,remotepythonpathdir,xd_string)
logging.info(xcopycommand)
os.system(xcopycommand)
remotepythonpath = ";".join(remotepythonpath_list)
return remotepythonpath
def create_bat_file(self, distributable, remotepythoninstall, remotepythonpath, remotewd, run_dir_abs, run_dir_rel, result_remote, nodelocalwd, create_bat_file):
path_share_list = [r"",r"Scripts"]
remotepath_list = []
for path_share in path_share_list:
path_share_abs = os.path.join(remotepythoninstall,path_share)
if not os.path.isdir(path_share_abs): raise Exception("Expect path directory at '{0}'".format(path_share_abs))
remotepath_list.append(path_share_abs)
remotepath = ";".join(remotepath_list)
distributablep_filename_rel, distributablep_filename_abs = self.create_distributablep(distributable, run_dir_abs, run_dir_rel)
distributable_py_file = os.path.join(os.path.dirname(__file__),"..","distributable.py")
if not os.path.exists(distributable_py_file): raise Exception("Expect file at " + distributable_py_file + ", but it doesn't exist.")
localfilepath, file = os.path.split(distributable_py_file)
for remote_path_part in remotepythonpath.split(';'):
remoteexe = os.path.join(remote_path_part,"fastlmm","util",file)
if os.path.exists(remoteexe):
break #not continue
remoteexe = None
assert remoteexe is not None, "Could not find '{0}' on remote python path. Is fastlmm on your local python path?".format(file)
#run_dir_rel + os.path.sep + "pythonpath" + os.path.sep + os.path.splitdrive(localfilepath)[1]
#result_remote2 = result_remote.encode("string-escape")
command_string = remoteexe + r""" "{0}" """.format(distributablep_filename_abs) + r""" "LocalInParts(%1,{0},mkl_num_threads={1},result_file=""{2}"",run_dir=""{3}"") " """.format(
self.taskcount,
self.mkl_num_threads,
"result.p",
run_dir_abs.encode("string-escape"))
batfilename_rel = os.path.join(run_dir_rel,"dist.bat")
batfilename_abs = os.path.join(run_dir_abs,"dist.bat")
pstutil.create_directory_if_necessary(batfilename_abs, isfile=True)
matplotlibfilename_rel = os.path.join(run_dir_rel,".matplotlib")
matplotlibfilename_abs = os.path.join(run_dir_abs,".matplotlib")
pstutil.create_directory_if_necessary(matplotlibfilename_abs, isfile=False)
pstutil.create_directory_if_necessary(matplotlibfilename_abs + "/tex.cache", isfile=False)
ipythondir_rel = os.path.join(run_dir_rel,".ipython")
ipythondir_abs = os.path.join(run_dir_abs,".ipython")
pstutil.create_directory_if_necessary(ipythondir_abs, isfile=False)
with open(batfilename_abs, "w") as batfile:
batfile.write("set path={0};%path%\n".format(remotepath))
batfile.write("set PYTHONPATH={0}\n".format(remotepythonpath))
batfile.write("set USERPROFILE={0}\n".format(run_dir_abs))
batfile.write("set MPLCONFIGDIR={0}\n".format(matplotlibfilename_abs))
batfile.write("set IPYTHONDIR={0}\n".format(ipythondir_abs))
batfile.write("python {0}\n".format(command_string))
if (self.node_local):
with open( os.path.join(run_dir_abs,"nodeprep.bat"), "w") as prepfile:
prepfile.write(r"""set f="{0}"{1}""".format(remotewd,'\n'))
prepfile.write(r"""set t="{0}"{1}""".format(nodelocalwd,'\n'))
prepfile.write("if not exist %t% mkdir %t%\n")
with open( os.path.join(run_dir_abs,"noderelease.bat"), "w") as releasefile:
releasefile.write(r"""set f="{0}"{1}""".format(remotewd,'\n'))
releasefile.write(r"""set t="{0}"{1}""".format(nodelocalwd,'\n'))
inputOutputCopier = HPCCopierNodeLocal(prepfile,releasefile,self.clean_up) #Create the object that copies input and output files to where they are needed
inputOutputCopier.input(distributable) # copy of the input files to where they are needed (i.e. to the cluster)
inputOutputCopier.output(distributable) # copy of the output files to where they are needed (i.e. off the cluster)
releasefile.write("rmdir /s %t%\n")
releasefile.write("exit /b 0\n")
return batfilename_rel
def check_remote_pythoninstall(self):
remotepythoninstall = r"\\GCR\Scratch\RR1\escience\pythonInstallD" #!!! don't hardwire this
if not os.path.isdir(remotepythoninstall): raise Exception("Expect Python and related directories at '{0}'".format(remotepythoninstall))
return remotepythoninstall
def create_run_dir(self):
username = os.environ["USERNAME"]
localwd = os.getcwd()
#!!make an option to specify the full remote WD. Also what is the "\\\\" case for?
if localwd.startswith("\\\\"):
remotewd = self.fileshare + os.path.sep + username +os.path.sep + "\\".join(localwd.split('\\')[4:])
nodelocalwd = "d:\scratch\escience" + os.path.sep + username +os.path.sep + "\\".join(localwd.split('\\')[4:]) #!!!const
else:
remotewd = self.fileshare + os.path.sep + username + os.path.splitdrive(localwd)[1] #using '+' because 'os.path.join' isn't work with shares
nodelocalwd = "d:\scratch\escience" + os.path.sep + username + os.path.splitdrive(localwd)[1] #!!! const
import datetime
now = datetime.datetime.now()
run_dir_rel = os.path.join("runs",pstutil._datestamp(appendrandom=True))
run_dir_abs = os.path.join(remotewd,run_dir_rel)
pstutil.create_directory_if_necessary(run_dir_abs,isfile=False)
return remotewd, run_dir_abs, run_dir_rel, nodelocalwd
class HPCCopier(object): #Implements ICopier
def __init__(self, remotewd, skipinput=False):
self.remotewd = remotewd
self.skipinput=skipinput
def input(self,item):
if self.skipinput:
return
if isinstance(item, str):
itemnorm = os.path.normpath(item)
remote_file_name = os.path.join(self.remotewd,itemnorm)
remote_dir_name,ignore = os.path.split(remote_file_name)
pstutil.create_directory_if_necessary(remote_file_name)
xcopycommand = "xcopy /d /e /s /c /h /y {0} {1}".format(itemnorm, remote_dir_name)
logging.info(xcopycommand)
rc = os.system(xcopycommand)
print("rc=" +str(rc))
if rc!=0: raise Exception("xcopy cmd failed with return value={0}, from cmd {1}".format(rc,xcopycommand))
elif hasattr(item,"copyinputs"):
item.copyinputs(self)
# else -- do nothing
def output(self,item):
if isinstance(item, str):
itemnorm = os.path.normpath(item)
pstutil.create_directory_if_necessary(itemnorm)
remote_file_name = os.path.join(self.remotewd,itemnorm)
local_dir_name,ignore = os.path.split(itemnorm)
assert os.path.exists(remote_file_name), "Don't see expected file '{0}'. Did the HPC job fail?".format(remote_file_name)
#xcopycommand = "xcopy /d /e /s /c /h /y {0} {1}".format(remote_file_name, local_dir_name) # we copy to the local dir instead of the local file so that xcopy won't ask 'file or dir?'
xcopycommand = "xcopy /d /c /y {0} {1}".format(remote_file_name, local_dir_name) # we copy to the local
logging.info(xcopycommand)
rc = os.system(xcopycommand)
if rc!=0: logging.info("xcopy cmd failed with return value={0}, from cmd {1}".format(rc,xcopycommand))
elif hasattr(item,"copyoutputs"):
item.copyoutputs(self)
# else -- do nothing
class HPCCopierNodeLocal(object): #Implements ICopier
def __init__(self, fileprep, filerelease, clean_up):
self.fileprep = fileprep
self.filerelease = filerelease
self.clean_up = clean_up
def input(self,item):
if isinstance(item, str):
itemnorm = os.path.normpath(item)
dirname = os.path.dirname(itemnorm)
self.fileprep.write("if not exist %t%\{0} mkdir %t%\{0}\n".format(dirname))
self.fileprep.write("xcopy /d /e /s /c /h /y %f%\{0} %t%\{1}\\\n".format(itemnorm,dirname))
if self.clean_up:
self.filerelease.write("del %t%\{0}\n".format(itemnorm))
elif hasattr(item,"copyinputs"):
item.copyinputs(self)
# else -- do nothing
def output(self,item):
if isinstance(item, str):
itemnorm = os.path.normpath(item)
dirname = os.path.dirname(itemnorm)
self.filerelease.write("xcopy /d /e /s /c /h /y %t%\{0} %f%\{1}\\\n".format(itemnorm,dirname))
if self.clean_up:
self.filerelease.write("del %t%\{0}\n".format(itemnorm))
elif hasattr(item,"copyoutputs"):
item.copyoutputs(self)
# else -- do nothing
| 52.832558 | 265 | 0.61832 | 2,752 | 22,718 | 4.950218 | 0.165334 | 0.026426 | 0.02048 | 0.017177 | 0.340527 | 0.285547 | 0.256478 | 0.201864 | 0.173163 | 0.147618 | 0 | 0.013471 | 0.264812 | 22,718 | 429 | 266 | 52.955711 | 0.802179 | 0.087904 | 0 | 0.204611 | 0 | 0.034582 | 0.20704 | 0.016001 | 0 | 0 | 0 | 0 | 0.011527 | 1 | 0.057637 | false | 0 | 0.037464 | 0 | 0.146974 | 0.002882 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d8424bf36382d1072f3fbfbb2e4fabd3526822c8 | 496 | py | Python | tests/formatters_test.py | MiraGeoscience/mirageoscience-apps | 8c445ec8f2391349aa4cac6c705426301b3c31ca | [
"MIT"
] | 1 | 2022-02-18T16:28:22.000Z | 2022-02-18T16:28:22.000Z | tests/formatters_test.py | nwilliams-kobold/geoapps | eb972321316a33628d8ae04613cc403a27d942ee | [
"MIT"
] | null | null | null | tests/formatters_test.py | nwilliams-kobold/geoapps | eb972321316a33628d8ae04613cc403a27d942ee | [
"MIT"
] | null | null | null | # Copyright (c) 2022 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
import pytest
from geoapps.utils.formatters import string_name
def test_string_name():
chars = "!@#$%^&*().,"
value = "H!e(l@l#o.W$o%r^l&d*"
assert (
string_name(value, characters=chars) == "H_e_l_l_o_W_o_r_l_d_"
), "string_name validator failed"
| 23.619048 | 75 | 0.681452 | 80 | 496 | 4.0375 | 0.6 | 0.123839 | 0.018576 | 0.024768 | 0.06192 | 0.06192 | 0.06192 | 0.06192 | 0.06192 | 0.06192 | 0 | 0.010076 | 0.199597 | 496 | 20 | 76 | 24.8 | 0.803526 | 0.413306 | 0 | 0 | 0 | 0 | 0.282686 | 0 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d843b63a70d99dba02ce0c7f86e18727de78351a | 300 | py | Python | solutions/python3/841.py | sm2774us/amazon_interview_prep_2021 | f580080e4a6b712b0b295bb429bf676eb15668de | [
"MIT"
] | 42 | 2020-08-02T07:03:49.000Z | 2022-03-26T07:50:15.000Z | solutions/python3/841.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | null | null | null | solutions/python3/841.py | ajayv13/leetcode | de02576a9503be6054816b7444ccadcc0c31c59d | [
"MIT"
] | 40 | 2020-02-08T02:50:24.000Z | 2022-03-26T15:38:10.000Z | class Solution:
def canVisitAllRooms(self, rooms):
pool, stack = set(range(len(rooms))), [0]
while stack:
pool.discard(stack[-1])
for nex in rooms[stack.pop()]:
if nex in pool:
stack.append(nex)
return not pool | 33.333333 | 49 | 0.51 | 35 | 300 | 4.371429 | 0.657143 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010753 | 0.38 | 300 | 9 | 50 | 33.333333 | 0.811828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d84405eb47cc619295c6637f42822db41956a203 | 10,213 | py | Python | 15_observation_fixed_direction.py | kuntzer/SALSA-public | 79fd601d3999ac977bbc97be010b2c4ef81e4c35 | [
"BSD-3-Clause"
] | 1 | 2021-07-30T09:59:41.000Z | 2021-07-30T09:59:41.000Z | 15_observation_fixed_direction.py | kuntzer/SALSA-public | 79fd601d3999ac977bbc97be010b2c4ef81e4c35 | [
"BSD-3-Clause"
] | null | null | null | 15_observation_fixed_direction.py | kuntzer/SALSA-public | 79fd601d3999ac977bbc97be010b2c4ef81e4c35 | [
"BSD-3-Clause"
] | 1 | 2021-07-30T10:38:54.000Z | 2021-07-30T10:38:54.000Z | ''' 15-observation_fixed_direction
===============================================
AIM: Similar to 14-<...>.py, but for only one traget.
INPUT: files: - <orbit_id>_misc/orbits.dat
- <orbit_id>_flux/flux_*.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_figures/ : (see below for file name definition)
CMD: python 15-observation_fixed_direction
ISSUES: ! DOES NOT WORK !
REQUIRES:- standard python libraries, specific libraries in resources/ (+ SciPy)
- BaseMap --> http://matplotlib.org/basemap/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: <none>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import matplotlib.cm as cm
import time
from resources.routines import *
from resources.TimeStepping import *
from resources.targets import *
import parameters as param
import resources.constants as const
import resources.figures as figures
import time
from matplotlib import dates
from matplotlib.ticker import MaxNLocator, MultipleLocator, FormatStrFormatter
###########################################################################
### PARAMETERS
# Name of object of interest OI:
OI = 'BD-082823'
# orbit_iditude of the orbit in km
orbit_id = 701
apogee=700
perigee=700
# First minute analysis
minute_ini = 30.*1440.
# Last minute to look for
minute_end = 50.*1440.
# Include SAA ?
SAA = False
# Show plots
show = True
# Save the picture ?
save = False
# Fancy plots ?
fancy = True
# Take into account the stray light?
straylight = False
# Minimum observable time for plots
threshold_obs_time = 50
# Time to acquire a target
t_acquisition = 6
# Catalogue name (in resources/)
catalogue = 'cheops_target_list_v0.1.dat'
# Maximum magnitude that can be seen by CHEOPS, only for cosmetics purposes
CHEOPS_mag_max = 12.5
# File name for the list of orbit file
orbits_file = 'orbits.dat'
# Factor in the SL post treatment correction ?
SL_post_treat = True
# Factor in mirror efficiency for the equivalent star magnitude ?
mirror_correction = True
#####################################################################################################################
# CONSTANTS AND PHYSICAL PARAMETERS
period = altitude2period(apogee,perigee)
###########################################################################
### INITIALISATION
file_flux = 'flux_'
# changes the threshold by addition the acquisition time:
threshold_obs_time += t_acquisition
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
## Prepare grid
n_alpha = param.resx
n_delta = param.resy
ra_i = 0
ra_f = 2.*np.pi
dec_i = -np.pi/2.
dec_f = np.pi/2.
ra_step = (ra_f-ra_i)/n_alpha
dec_step = (dec_f-dec_i)/n_delta
iterable = (ra_i + ra_step/2+ i*ra_step for i in range(n_alpha))
ras = np.fromiter(iterable, np.float)
iterable = (dec_i + dec_step/2+ i*dec_step for i in range(n_delta))
decs = np.fromiter(iterable, np.float)
ra_grid, dec_grid = np.meshgrid(ras, decs)
if SAA:
SAA_data = np.loadtxt('resources/SAA_table_%d.dat' % orbit_id, delimiter=',')
SAA_data = SAA_data[SAA_data[:,0]>= minute_ini]
SAA_data = SAA_data[SAA_data[:,0]<= minute_end]
computed_orbits = np.loadtxt(folder_misc+orbits_file)[:,0]
############################################################################
### Load catalogue and assign them to the nearest grid point
name_cat, ra_cat, dec_cat, mag_cat = load_catalogue(catalogue)
index_ra_cat = np.zeros(np.shape(ra_cat))
index_dec_cat= np.zeros(np.shape(ra_cat))
ii = 0
for name in name_cat:
if name == OI:
break
ii += 1
print 'Target is >>>', name_cat[ii]
name_cat= name_cat[ii]
ra=ra_cat[ii]
dec=dec_cat[ii]
mag=mag_cat[ii]
id_ra = find_nearest(ras, ra/const.RAD)
id_dec = find_nearest(decs, dec/const.RAD)
obj = target_list(name, ra/const.RAD, id_ra, dec/const.RAD, id_dec, mag, int(period+3))
# Apply the flux correction (SL post-treatment removal and the mirror efficiency)
corr_fact = 1.0
if mirror_correction: corr_fact /= param.mirror_efficiency
if SL_post_treat: corr_fact *= (1.0 - param.SL_post_treat_reduction)
############################################################################
### Start the anaylsis
start = time.time()
# Prepare the arrays
visibility = np.zeros(np.shape(ra_grid))
#observations = np.zeros(len(name_cat)*)
workspace = np.zeros(np.shape(ra_grid))
#data = np.zeros(np.shape(ra_grid))
# Load the reference times
orbits = np.loadtxt(folder_misc+orbits_file,dtype='i4')
minutes_orbit_iditude = np.loadtxt('resources/minute_table_%d.dat' % orbit_id, delimiter=',',dtype='Int32')
# Set variables for printing the advance
numberofminutes = minute_end+1 - minute_ini
lo = fast_minute2orbit(minutes_orbit_iditude,minute_end, orbit_id)
fo = fast_minute2orbit(minutes_orbit_iditude,minute_ini, orbit_id)
lp = -1
junk, junk, at_ini, junk = fast_orbit2times(minutes_orbit_iditude, fo, orbit_id)
first_computed = computed_orbits[computed_orbits<=fo][-1]
first_minute = minute_ini
last_minute = minute_end
if not fo == first_computed:
junk, junk, minute_ini, junk = fast_orbit2times(minutes_orbit_iditude, first_computed, orbit_id)
# print '1st referenced orbit: %d\twanted orbit: %d' % (first_computed, fo)
try:
for minute in range(minute_ini,int(minute_end)+1+int(period)):
minute = int(minute)
if SAA and fast_SAA(SAA_data, minute): SAA_at_minute = True
else: SAA_at_minute = False
orbit_current = fast_minute2orbit(minutes_orbit_iditude, minute, orbit_id)
if orbit_current > lp:
lp = orbit_current
message = "Analysing orbit %d on %d...\t" % (lp,lo)
sys.stdout.write( '\r'*len(message) )
sys.stdout.write(message)
sys.stdout.flush()
junk, len_orbit, atc_ini, junk = fast_orbit2times(minutes_orbit_iditude, orbit_current, orbit_id)
try:
ra, dec, S_sl = load_flux_file(minute, file_flux, folder=folder_flux)
load = True
minute_to_load = minute-atc_ini#+shift
except IOError:
# if there is nothing then well, do nothing ie we copy the past values
# in which orbit are we ?
# get the previous orbit computed and copy the stray light data of this orbit :
#orbit_previous = orbits[orbits[:,0] < orbit_current][-1,0]
#minute_replacement = minute - atc_ini + shift #+ at_ini
minute_to_load = minute-atc_ini
if SAA_at_minute:
obj.current_visibility = 0
else:
obj.current_visibility = obj.visible_save[minute_to_load]
load = False
# populate the visbility matrix
# for ii in range(0, targets[0].CountObjects()):
if load:
ra_ = obj.ra
dec_ = obj.dec
a = np.where(np.abs(ra_-ra)<ra_step/2)[0]
b = np.where(np.abs(dec_-dec)<dec_step/2)[0]
INT = np.intersect1d(a,b)
if np.shape(INT)[0] == 0 or (straylight and S_sl[INT]*corr_fact > obj.maximum_flux()):
obj.visible_save[minute_to_load] = 0
obj.current_visibility = 0
continue
else:
obj.visible_save[minute_to_load] = 1
if SAA_at_minute: obj.current_visibility = 0
else: obj.current_visibility = 1
if minute == minute_ini:
obj.workspace=obj.current_visibility
continue
obj.Next(minute,threshold_obs_time)
except KeyboardInterrupt: print hilite('\nWARNING! USER STOPPED LOADING AT MINUTE %d' % minute,False,False)
obj.Next(minute,threshold_obs_time)
print
############################################################################
end = time.time()
elapsed_time = round((end-start)/60.,2)
sys.stdout.write( '\r'*len(message) )
sys.stdout.flush()
print "Time needed: %2.2f min" % elapsed_time
### Plot a few things
if fancy: figures.set_fancy()
### Plot time line
figures.set_fancy()
minute_ini = first_minute
minute_end = last_minute
fig = plt.figure()
ax = plt.subplot(111)
ii = 0
#ax.yaxis.set_major_locator(MultipleLocator(1))
plt.grid(True)
visi = obj.Visibility()
invi = obj.Invisibility()
dist = 0
##for v, i in zip(visi, invi):
## print v, i, i-v, v-dist
## dist = i
timestamps = np.zeros(lo+1-fo)
obs_time = np.zeros(lo+1-fo)
for orbit in range(fo, lo+1):
ii = orbit-fo
junk, junk, a, e = fast_orbit2times(minutes_orbit_iditude, orbit, orbit_id)
timestamps[ii] = a
visi_c = visi[(visi <= e) & (visi >= a)]
next_inv = invi[(visi <= e) & (visi >= a)]
invi_c = invi[(invi <= e) & (invi >= a)]
if np.shape(visi_c)[0] == 2:
print np.shape(visi_c)[0]
exit()
if np.shape(next_inv)[0] == 2:
print np.shape(visi_c)[0]
exit()
if np.shape(visi_c)[0] > 0 and next_inv[0] > e:
obs_time[ii] += e - visi_c + 1
elif np.shape(visi_c)[0] > 0:
print orbit
obs_time[ii] += next_inv - visi_c
#2@ current_in = invi[(invi >= a) & (invi <= e)]
#2@ current_vi = visi[(visi >= a) & (visi <= e)]
#2@shape_in = np.shape(current_in)[0]
#2@shape_vi = np.shape(current_vi)[0]
#2@if shape_in == 2 :
#2@ obs_time[ii] += current_in[0]-a
#2@ np.delete(current_in, 0)
#2@ shape_in = np.shape(current_in)[0]
#2@if shape_in == 1 and shape_vi == 1:
#2@ obs_time[ii] += current_in[0] - current_vi[0]
#2@elif shape_in == 1 and shape_vi == 0:
#2@ obs_time[ii] += current_in[0] - a
#2@elif shape_in == 0 and shape_vi == 1:
#2@ obs_time[ii] += e - current_vi[0]
if obs_time[ii] < 0:
print a,e
print current_in
print current_vi
exit()
#print timestamps
#print obs_time
plt.plot (timestamps, obs_time, lw=2)
plt.ylabel('Available Obs. Time per Orbit [min]')
# convert epoch to matplotlib float format
labels = timestamps * 60. + const.timestamp_2018_01_01
labels = np.linspace(minute_ini, minute_end+1, 12) * 60. + const.timestamp_2018_01_01
plt.xlim([minute_ini, minute_end+1])
#plt.xlim([minute_ini, minute_end+1])
#ax.xaxis.set_major_locator(MultipleLocator((minute_end-minute_ini+1)/11))
# to human readable date
pre = map(time.gmtime, labels)
labels = map(figures.format_second, pre)
ax.set_xticklabels(labels)
fig.autofmt_xdate()
if save:
threshold_obs_time -= t_acquisition
if SAA: note = '_SAA'
else: note = ''
fname = '%svisibility_%s_obs_%d_o_%d_to_%d%s' % (folder_figures, OI, threshold_obs_time,fo,lo, note)
figures.savefig(fname,fig,fancy)
if show: plt.show()
| 27.380697 | 117 | 0.675218 | 1,570 | 10,213 | 4.18535 | 0.227389 | 0.01811 | 0.023132 | 0.010653 | 0.231624 | 0.204687 | 0.113073 | 0.06757 | 0.042307 | 0.027697 | 0 | 0.020298 | 0.146186 | 10,213 | 372 | 118 | 27.454301 | 0.733257 | 0.224812 | 0 | 0.132275 | 0 | 0 | 0.04618 | 0.01795 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.068783 | null | null | 0.05291 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d8455d466c10af2e80eabb1b98ebf27274580915 | 5,992 | py | Python | midonet/neutron/services/l2gateway/plugin.py | NeCTAR-RC/networking-midonet | 7a69af3eab25f57e77738fd8398b6f4854346fd9 | [
"Apache-2.0"
] | null | null | null | midonet/neutron/services/l2gateway/plugin.py | NeCTAR-RC/networking-midonet | 7a69af3eab25f57e77738fd8398b6f4854346fd9 | [
"Apache-2.0"
] | null | null | null | midonet/neutron/services/l2gateway/plugin.py | NeCTAR-RC/networking-midonet | 7a69af3eab25f57e77738fd8398b6f4854346fd9 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015 Midokura SARL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import validators
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import excutils
from networking_l2gw import extensions as l2gateway_ext
from networking_l2gw.services.l2gateway.common import l2gw_validators
from networking_l2gw.services.l2gateway import plugin as l2gw_plugin
from neutron.api import extensions as neutron_extensions
from midonet.neutron.common import constants as mido_const
from midonet.neutron.db import l2gateway_midonet as l2gw_db
from midonet.neutron.services.l2gateway.common import l2gw_midonet_validators
LOG = logging.getLogger(__name__)
class MidonetL2GatewayPlugin(l2gw_plugin.L2GatewayPlugin,
l2gw_db.MidonetL2GatewayMixin):
"""Implementation of the Neutron l2 gateway Service Plugin.
This class manages the workflow of Midonet l2 Gateway request/response.
The base plugin methods are overridden because the MidoNet driver requires
specific ordering of events. For creation, the Neutron data must be
created first, with the resource UUID generated. Also, for both creation
and deletion, by invoking the Neutron DB methods first, all the
validations, such as 'check_admin()' are executed prior to attempting to
modify the MidoNet data, preventing potential data inconsistency.
"""
def __init__(self):
# Dynamically change the validators so that they are applicable to
# the MidoNet implementation of L2GW.
# REVISIT(yamamoto): These validator modifications should not
# have been here in the first place. We should either put them
# in upstream or remove them.
l2gw_validators.validate_gwdevice_list = (l2gw_midonet_validators.
validate_gwdevice_list)
val_type = validators._to_validation_type('l2gwdevice_list')
validators.validators.pop(val_type, None)
validators.add_validator(
val_type,
l2gw_midonet_validators.validate_gwdevice_list)
l2gw_validators.validate_network_mapping_list = (
l2gw_midonet_validators.
validate_network_mapping_list_without_seg_id_validation)
neutron_extensions.append_api_extensions_path(l2gateway_ext.__path__)
super(MidonetL2GatewayPlugin, self).__init__()
def add_port_mac(self, context, port_dict):
# This function is not implemented now in MidoNet plugin.
# We block this function in plugin level to prevent from loading
# l2gw driver in upstream.
self._get_driver_for_provider(mido_const.MIDONET_L2GW_PROVIDER
).add_port_mac(context, port_dict)
def delete_port_mac(self, context, port):
# This function is not implemented now in MidoNet plugin.
# We block this function in plugin level to prevent from loading
# l2gw driver in upstream.
self._get_driver_for_provider(mido_const.MIDONET_L2GW_PROVIDER
).delete_port_mac(context, port)
def create_l2_gateway(self, context, l2_gateway):
# Gateway Device Management Service must be enabled
# when Midonet L2 Gateway is used.
self._check_and_get_gw_dev_service()
self.validate_l2_gateway_for_create(context, l2_gateway)
return l2gw_db.MidonetL2GatewayMixin.create_l2_gateway(
self, context, l2_gateway)
@log_helpers.log_method_call
def create_l2_gateway_connection(self, context, l2_gateway_connection):
self.validate_l2_gateway_connection_for_create(
context, l2_gateway_connection)
l2_gw_conn = (l2gw_db.MidonetL2GatewayMixin.
create_l2_gateway_connection(
self, context, l2_gateway_connection))
# Copy over the ID so that the MidoNet driver knows about it. ID is
# necessary for MidoNet to process its translation.
gw_connection = l2_gateway_connection[self.connection_resource]
gw_connection["id"] = l2_gw_conn["id"]
try:
self._get_driver_for_provider(mido_const.MIDONET_L2GW_PROVIDER
).create_l2_gateway_connection(
context, l2_gateway_connection)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error("Failed to create a l2 gateway connection "
"%(gw_conn_id)s in Midonet:%(err)s",
{"gw_conn_id": l2_gw_conn["id"], "err": ex})
try:
l2gw_db.MidonetL2GatewayMixin.delete_l2_gateway_connection(
self, context, l2_gw_conn["id"])
except Exception:
LOG.exception("Failed to delete a l2 gateway conn %s",
l2_gw_conn["id"])
return l2_gw_conn
@log_helpers.log_method_call
def delete_l2_gateway_connection(self, context, l2_gateway_connection):
l2gw_db.MidonetL2GatewayMixin.delete_l2_gateway_connection(
self, context, l2_gateway_connection)
self._get_driver_for_provider(mido_const.MIDONET_L2GW_PROVIDER
).delete_l2_gateway_connection(
context, l2_gateway_connection)
| 47.555556 | 79 | 0.691255 | 739 | 5,992 | 5.331529 | 0.301759 | 0.061675 | 0.08198 | 0.046701 | 0.374112 | 0.284264 | 0.236548 | 0.195939 | 0.195939 | 0.148731 | 0 | 0.018157 | 0.255507 | 5,992 | 125 | 80 | 47.936 | 0.865053 | 0.315921 | 0 | 0.185714 | 0 | 0 | 0.036954 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.157143 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d8465626d2247b15a81fc86df366ab13b3a32e07 | 1,706 | py | Python | QueroInternetWeb/main/migrations/0008_auto_20190507_2146.py | quero-internet/quero-internet-web | 95f1763ecb587dcb6d09c0cd3c15c29f837ced90 | [
"MIT"
] | null | null | null | QueroInternetWeb/main/migrations/0008_auto_20190507_2146.py | quero-internet/quero-internet-web | 95f1763ecb587dcb6d09c0cd3c15c29f837ced90 | [
"MIT"
] | 2 | 2019-08-06T01:04:37.000Z | 2019-08-27T00:26:32.000Z | QueroInternetWeb/main/migrations/0008_auto_20190507_2146.py | quero-internet/quero-internet-web | 95f1763ecb587dcb6d09c0cd3c15c29f837ced90 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.7 on 2019-05-08 01:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0007_auto_20190424_2141'),
]
operations = [
migrations.CreateModel(
name='Resposta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resposta', models.CharField(max_length=300)),
('valor_implantacao', models.DecimalField(decimal_places=2, max_digits=10, null=True, verbose_name='Valor de implantação')),
('valor_mensalidade', models.DecimalField(decimal_places=2, max_digits=10, null=True, verbose_name='Valor de mensalidade')),
],
options={
'verbose_name': 'Resposta',
'verbose_name_plural': 'Respostas',
},
),
migrations.AlterField(
model_name='solicitacao',
name='observacoes',
field=models.CharField(blank=True, max_length=300, null=True, verbose_name='Observações'),
),
migrations.AddField(
model_name='resposta',
name='solicitacao',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='main.Solicitacao'),
),
migrations.AddField(
model_name='resposta',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
| 37.911111 | 140 | 0.616647 | 175 | 1,706 | 5.84 | 0.44 | 0.064579 | 0.041096 | 0.064579 | 0.328767 | 0.328767 | 0.252446 | 0.252446 | 0.252446 | 0.252446 | 0 | 0.034208 | 0.263189 | 1,706 | 44 | 141 | 38.772727 | 0.778839 | 0.026377 | 0 | 0.210526 | 1 | 0 | 0.151899 | 0.013864 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.078947 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d846ca90573fc0df20d1b67f785499c31a7ee515 | 409 | py | Python | instructionsWW.py | felixboehm/chatBot | 3c3cc9a9a283f9048b6f40dfcf1ac324ad2eecb8 | [
"Apache-2.0"
] | null | null | null | instructionsWW.py | felixboehm/chatBot | 3c3cc9a9a283f9048b6f40dfcf1ac324ad2eecb8 | [
"Apache-2.0"
] | null | null | null | instructionsWW.py | felixboehm/chatBot | 3c3cc9a9a283f9048b6f40dfcf1ac324ad2eecb8 | [
"Apache-2.0"
] | null | null | null | def showHelp(bot, message):
helpText = """** HowTo Play "Werwolf” **
Commands:
`join` trete dem Spiel bei
`join @player` lade dich und einen anderen Spieler ein
`go` starte das Spiel
`bite @player` Werwölfe töten ihr Opfer
`hang @player` Dorfbewohner hängen den Verdächtigen
`restart` stopt das Spiel und löscht alle Teilnehmer"""
bot.sendMessage(message['rid'], helpText) | 37.181818 | 59 | 0.687042 | 52 | 409 | 5.403846 | 0.807692 | 0.05694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.212714 | 409 | 11 | 60 | 37.181818 | 0.872671 | 0 | 0 | 0 | 0 | 0 | 0.770732 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d848f8dd8085e1bf86cb047117735a5685ffbd13 | 1,781 | py | Python | setup.py | mcrowson/wunderpy2 | a3a959d1a3569ccb0869adba10e671978609a697 | [
"MIT"
] | null | null | null | setup.py | mcrowson/wunderpy2 | a3a959d1a3569ccb0869adba10e671978609a697 | [
"MIT"
] | null | null | null | setup.py | mcrowson/wunderpy2 | a3a959d1a3569ccb0869adba10e671978609a697 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
import os.path
import sys
script_dir = os.path.abspath(os.path.dirname(__file__))
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
# argparse is only a builtin in 2.7
# I don't plan to support 2.6, but just in case I do in the future
install_requires = ['requests', 'six']
if sys.hexversion < 0x02070000:
install_requires.append('argparse')
setup(
name='wunderpy2',
version='0.1.4',
description='A Python library for the Wunderlist 2 REST API',
# Idea credit of https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
long_description=(read('README.rst') + '\n\n' +
read('HISTORY.rst') + '\n\n' +
read('AUTHORS.rst')),
url='https://github.com/mieubrisse/wunderpy2',
author='mieubrisse',
author_email='mieubrisse@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='wunderpy wunderpy2 wunderlist api cli',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=install_requires,
)
| 36.346939 | 95 | 0.632229 | 218 | 1,781 | 5.105505 | 0.573395 | 0.085355 | 0.112309 | 0.093441 | 0.048518 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021708 | 0.224031 | 1,781 | 48 | 96 | 37.104167 | 0.783647 | 0.137563 | 0 | 0 | 0 | 0 | 0.442408 | 0 | 0 | 0 | 0.006545 | 0 | 0 | 1 | 0.025 | false | 0 | 0.1 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d8491385a7cb1fe2a3fcabf28f8d930e00a5e6f3 | 612 | py | Python | mpos/web/manager.py | cackharot/ngen-milk-pos | 4814bdbc6bddf02530ff10e1ec842fb316b0fa91 | [
"Apache-2.0"
] | null | null | null | mpos/web/manager.py | cackharot/ngen-milk-pos | 4814bdbc6bddf02530ff10e1ec842fb316b0fa91 | [
"Apache-2.0"
] | null | null | null | mpos/web/manager.py | cackharot/ngen-milk-pos | 4814bdbc6bddf02530ff10e1ec842fb316b0fa91 | [
"Apache-2.0"
] | 1 | 2019-04-24T06:11:47.000Z | 2019-04-24T06:11:47.000Z | # Set the path
import os
import sys
from flask_script import Manager, Server
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from web import app
manager = Manager(app)
# Turn on debugger by default and reloader
manager.add_command("run", Server(
use_debugger=True,
use_reloader=True,
host='0.0.0.0',
#processes=3,
threaded=True,
port=4000)
)
# Turn on debugger by default and reloader
manager.add_command("prod", Server(
use_debugger=False,
use_reloader=False,
host='127.0.0.1',
port=80)
)
if __name__ == "__main__":
manager.run() | 19.741935 | 79 | 0.691176 | 92 | 612 | 4.391304 | 0.48913 | 0.019802 | 0.069307 | 0.079208 | 0.252475 | 0.252475 | 0.252475 | 0.252475 | 0.252475 | 0.252475 | 0 | 0.03373 | 0.176471 | 612 | 31 | 80 | 19.741935 | 0.767857 | 0.173203 | 0 | 0 | 0 | 0 | 0.065737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d849ad31053906c063fe54eb88c77659c721172b | 288 | py | Python | polish_case_trainer/word/word_bag.py | davidhelbig/casetrainer-api | e420070960996302e8cf4ee370f4cf844222ed98 | [
"MIT"
] | 5 | 2018-01-30T22:10:40.000Z | 2020-09-22T10:43:57.000Z | polish_case_trainer/word/word_bag.py | davidhelbig/casetrainer-api | e420070960996302e8cf4ee370f4cf844222ed98 | [
"MIT"
] | 3 | 2017-05-02T21:42:10.000Z | 2019-07-19T09:41:07.000Z | polish_case_trainer/word/word_bag.py | davidhelbig/casetrainer-api | e420070960996302e8cf4ee370f4cf844222ed98 | [
"MIT"
] | 4 | 2017-05-01T22:44:57.000Z | 2020-09-21T23:34:01.000Z | import random
class WordBag:
def __init__(self, word_list):
if not isinstance(word_list, list):
raise TypeError("word_list must be a list object")
self.word_list = word_list
def get_word_from_bag(self):
return random.choice(self.word_list)
| 22.153846 | 62 | 0.670139 | 41 | 288 | 4.390244 | 0.560976 | 0.266667 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.253472 | 288 | 12 | 63 | 24 | 0.837209 | 0 | 0 | 0 | 0 | 0 | 0.107639 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.125 | 0.125 | 0.625 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 5 |
d84b963aacb5fb2dab3e77cf74727cfedec95c03 | 323 | py | Python | setup.py | khsk/Python-App-Capture | a0b893765558f144399ec31f1f11fb0b30025cc7 | [
"MIT"
] | null | null | null | setup.py | khsk/Python-App-Capture | a0b893765558f144399ec31f1f11fb0b30025cc7 | [
"MIT"
] | null | null | null | setup.py | khsk/Python-App-Capture | a0b893765558f144399ec31f1f11fb0b30025cc7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 03 15:54:20 2017
@author: y-takeuchi
"""
from cx_Freeze import setup, Executable
exe = Executable(script = 'capture.py', base = 'Win32Gui')
setup(name = 'AppCapture',
version = '0.1',
description = 'Save Screen',
executables = [exe]) | 17.944444 | 60 | 0.585139 | 39 | 323 | 4.820513 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07113 | 0.260062 | 323 | 18 | 61 | 17.944444 | 0.715481 | 0.244582 | 0 | 0 | 0 | 0 | 0.190909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |