max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
code/read_data.py | ribes96/TFG | 0 | 6615151 | #!/usr/bin/python3
# This script just trains a RandomForestClassifier with a handwriten digits
# dataset with about 7500 instances, and then tests them with a testing dataset
import numpy
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from custom_classes.ribes_DecisionTreeClassifier_method2 import ribes_DecisionTreeClassifier_method2
from custom_classes.ribes_RandomForestClassifier_method1 import ribes_RandomForestClassifier_method1
from custom_classes.ribes_RandomForestClassifier_method2 import ribes_RandomForestClassifier_method2
########################################################
## General Reading of the data
########################################################
def get_data():
train_data_path = "/home/hobber/git/TFG/code/data/pendigits.tra"
test_data_path = "/home/hobber/git/TFG/code/data/pendigits.tes"
n_features = 16
train_raw_data = open(train_data_path, 'rt')
test_raw_data = open(test_data_path, 'rt')
train_all_data = numpy.loadtxt(train_raw_data, delimiter=",")
test_all_data = numpy.loadtxt(test_raw_data, delimiter=",")
train_data, train_predictions = numpy.split(train_all_data, indices_or_sections = [n_features], axis = 1)
test_data, test_predictions = numpy.split(test_all_data, indices_or_sections = [n_features], axis = 1)
return train_data, train_predictions, test_data, test_predictions
###################################################################
# clf = DecisionTreeClassifier()
# clf.fit(train_data, train_predictions.ravel())
# sc_train = clf.score(train_data, train_predictions.ravel())
# sc_test = clf.score(test_data, test_predictions.ravel())
# print()
# print("Results of DecisionTreeClassifier")
# print("---------------------------------")
# print("Train score:", sc_train)
# print("Test score:", sc_test)
#
# clf_ribes = ribes_DecisionTreeClassifier_method2()
# clf_ribes.fit(train_data, train_predictions.ravel())
# sc_ribes_train = clf_ribes.score(train_data, train_predictions.ravel())
# sc_ribes_test = clf_ribes.score(test_data, test_predictions.ravel())
# print()
# print("Results of DecisionTreeClassifier")
# print("---------------------------------")
# print("Train score:", sc_ribes_train)
# print("Test score:", sc_ribes_test)
# clf = RandomForestClassifier(n_estimators = 5)
# print("Training with", len(train_predictions), "instances")
# clf.fit(train_data, train_predictions.ravel())
# print("Testing with", len(test_data), "instances")
# result = clf.predict(test_data)
# correct_prediction = test_predictions.ravel() == result.ravel()
# number_correct = numpy.count_nonzero(correct_prediction)
# print("Number of correct predictions:", number_correct)
# # print("Percentage of correct predictions:", percent, "%")
# score = clf.score(test_data, test_predictions.ravel())
# print("Score:", score)
# print("Error:", 1 - score)
#
# print("Empiezo con el custom")
# print("-----------------------------------------")
# c_clf = ribes_DecisionTreeClassifier_method2(n_RFF = None)
# o_clf = DecisionTreeClassifier()
# c_clf.fit(train_data, train_predictions.ravel())
# o_clf.fit(train_data, train_predictions.ravel())
# pr = c_clf.score(train_data, train_predictions.ravel())
# print(pr)
# c_clf = ribes_RandomForestClassifier_method2(n_estimators=500, n_RFF = 500)
# # c_clf = ribes_DecisionTreeClassifier_method2(n_RFF = 4)
# # o_clf = RandomForestClassifier()
# c_clf.fit(train_data, train_predictions.ravel())
# # o_clf.fit(train_data, train_predictions.ravel())
# pr = c_clf.score(train_data, train_predictions.ravel())
# # pr = c_clf.predict_proba(train_data)
# print(pr)
| #!/usr/bin/python3
# This script just trains a RandomForestClassifier with a handwriten digits
# dataset with about 7500 instances, and then tests them with a testing dataset
import numpy
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from custom_classes.ribes_DecisionTreeClassifier_method2 import ribes_DecisionTreeClassifier_method2
from custom_classes.ribes_RandomForestClassifier_method1 import ribes_RandomForestClassifier_method1
from custom_classes.ribes_RandomForestClassifier_method2 import ribes_RandomForestClassifier_method2
########################################################
## General Reading of the data
########################################################
def get_data():
train_data_path = "/home/hobber/git/TFG/code/data/pendigits.tra"
test_data_path = "/home/hobber/git/TFG/code/data/pendigits.tes"
n_features = 16
train_raw_data = open(train_data_path, 'rt')
test_raw_data = open(test_data_path, 'rt')
train_all_data = numpy.loadtxt(train_raw_data, delimiter=",")
test_all_data = numpy.loadtxt(test_raw_data, delimiter=",")
train_data, train_predictions = numpy.split(train_all_data, indices_or_sections = [n_features], axis = 1)
test_data, test_predictions = numpy.split(test_all_data, indices_or_sections = [n_features], axis = 1)
return train_data, train_predictions, test_data, test_predictions
###################################################################
# clf = DecisionTreeClassifier()
# clf.fit(train_data, train_predictions.ravel())
# sc_train = clf.score(train_data, train_predictions.ravel())
# sc_test = clf.score(test_data, test_predictions.ravel())
# print()
# print("Results of DecisionTreeClassifier")
# print("---------------------------------")
# print("Train score:", sc_train)
# print("Test score:", sc_test)
#
# clf_ribes = ribes_DecisionTreeClassifier_method2()
# clf_ribes.fit(train_data, train_predictions.ravel())
# sc_ribes_train = clf_ribes.score(train_data, train_predictions.ravel())
# sc_ribes_test = clf_ribes.score(test_data, test_predictions.ravel())
# print()
# print("Results of DecisionTreeClassifier")
# print("---------------------------------")
# print("Train score:", sc_ribes_train)
# print("Test score:", sc_ribes_test)
# clf = RandomForestClassifier(n_estimators = 5)
# print("Training with", len(train_predictions), "instances")
# clf.fit(train_data, train_predictions.ravel())
# print("Testing with", len(test_data), "instances")
# result = clf.predict(test_data)
# correct_prediction = test_predictions.ravel() == result.ravel()
# number_correct = numpy.count_nonzero(correct_prediction)
# print("Number of correct predictions:", number_correct)
# # print("Percentage of correct predictions:", percent, "%")
# score = clf.score(test_data, test_predictions.ravel())
# print("Score:", score)
# print("Error:", 1 - score)
#
# print("Empiezo con el custom")
# print("-----------------------------------------")
# c_clf = ribes_DecisionTreeClassifier_method2(n_RFF = None)
# o_clf = DecisionTreeClassifier()
# c_clf.fit(train_data, train_predictions.ravel())
# o_clf.fit(train_data, train_predictions.ravel())
# pr = c_clf.score(train_data, train_predictions.ravel())
# print(pr)
# c_clf = ribes_RandomForestClassifier_method2(n_estimators=500, n_RFF = 500)
# # c_clf = ribes_DecisionTreeClassifier_method2(n_RFF = 4)
# # o_clf = RandomForestClassifier()
# c_clf.fit(train_data, train_predictions.ravel())
# # o_clf.fit(train_data, train_predictions.ravel())
# pr = c_clf.score(train_data, train_predictions.ravel())
# # pr = c_clf.predict_proba(train_data)
# print(pr)
| en | 0.412254 | #!/usr/bin/python3 # This script just trains a RandomForestClassifier with a handwriten digits # dataset with about 7500 instances, and then tests them with a testing dataset ######################################################## ## General Reading of the data ######################################################## ################################################################### # clf = DecisionTreeClassifier() # clf.fit(train_data, train_predictions.ravel()) # sc_train = clf.score(train_data, train_predictions.ravel()) # sc_test = clf.score(test_data, test_predictions.ravel()) # print() # print("Results of DecisionTreeClassifier") # print("---------------------------------") # print("Train score:", sc_train) # print("Test score:", sc_test) # # clf_ribes = ribes_DecisionTreeClassifier_method2() # clf_ribes.fit(train_data, train_predictions.ravel()) # sc_ribes_train = clf_ribes.score(train_data, train_predictions.ravel()) # sc_ribes_test = clf_ribes.score(test_data, test_predictions.ravel()) # print() # print("Results of DecisionTreeClassifier") # print("---------------------------------") # print("Train score:", sc_ribes_train) # print("Test score:", sc_ribes_test) # clf = RandomForestClassifier(n_estimators = 5) # print("Training with", len(train_predictions), "instances") # clf.fit(train_data, train_predictions.ravel()) # print("Testing with", len(test_data), "instances") # result = clf.predict(test_data) # correct_prediction = test_predictions.ravel() == result.ravel() # number_correct = numpy.count_nonzero(correct_prediction) # print("Number of correct predictions:", number_correct) # # print("Percentage of correct predictions:", percent, "%") # score = clf.score(test_data, test_predictions.ravel()) # print("Score:", score) # print("Error:", 1 - score) # # print("Empiezo con el custom") # print("-----------------------------------------") # c_clf = ribes_DecisionTreeClassifier_method2(n_RFF = None) # o_clf = DecisionTreeClassifier() # c_clf.fit(train_data, train_predictions.ravel()) # o_clf.fit(train_data, train_predictions.ravel()) # pr = c_clf.score(train_data, train_predictions.ravel()) # print(pr) # c_clf = ribes_RandomForestClassifier_method2(n_estimators=500, n_RFF = 500) # # c_clf = ribes_DecisionTreeClassifier_method2(n_RFF = 4) # # o_clf = RandomForestClassifier() # c_clf.fit(train_data, train_predictions.ravel()) # # o_clf.fit(train_data, train_predictions.ravel()) # pr = c_clf.score(train_data, train_predictions.ravel()) # # pr = c_clf.predict_proba(train_data) # print(pr) | 2.929718 | 3 |
practice.py | Joshua-Elms/CSCI-B365 | 0 | 6615152 | <filename>practice.py<gh_stars>0
import matplotlib.pyplot as plt
# import pandas as pd
# import sys
# sys.path.insert(0, "/Users/joshuaelms/Desktop/github_repos/CSCI-B365_repo/CSCI-B365/Miscellaneous")
# import homemade_stats as hstat
# t_points = [(0,0), (2,2), (4,4)]
# df = pd.DataFrame(t_points, columns=("X", "Y"))
# print(df.describe())
# fig, ax = plt.subplots()
# ax.plot(t_points, "ro", linewidth=0)
# print(hstat.std(df["X"]))
# print(hstat.std(df["Y"]))
# plt.show()
def get_column_major(arr):
cm_arr = []
for c in range(len(arr[0])):
new_col = []
for row in arr:
new_col.append(row[c])
cm_arr.append(new_col)
return cm_arr
def multiply_matrices(arr1, arr2):
arr_out = []
cm_arr2 = get_column_major(arr2)
for r, row_arr1 in enumerate(arr1):
new_row = []
for col_arr2 in cm_arr2:
dp = 0
for i in range(len(col_arr2)):
dp += row_arr1[i]*col_arr2[i]
new_row.append(dp)
arr_out.append(new_row)
return arr_out
# for n in range(9):
# if n == 10:
# print("- "*10)
# print(q5(n))
arr1 = [[2, -1], [4, 1], [5, -3]]
arr2 = [[3, 1], [-2, -1]]
for row in multiply_matrices(arr1, arr2):
print(row)
# for row in get_column_major(arr2):
# print(row)
| <filename>practice.py<gh_stars>0
import matplotlib.pyplot as plt
# import pandas as pd
# import sys
# sys.path.insert(0, "/Users/joshuaelms/Desktop/github_repos/CSCI-B365_repo/CSCI-B365/Miscellaneous")
# import homemade_stats as hstat
# t_points = [(0,0), (2,2), (4,4)]
# df = pd.DataFrame(t_points, columns=("X", "Y"))
# print(df.describe())
# fig, ax = plt.subplots()
# ax.plot(t_points, "ro", linewidth=0)
# print(hstat.std(df["X"]))
# print(hstat.std(df["Y"]))
# plt.show()
def get_column_major(arr):
cm_arr = []
for c in range(len(arr[0])):
new_col = []
for row in arr:
new_col.append(row[c])
cm_arr.append(new_col)
return cm_arr
def multiply_matrices(arr1, arr2):
arr_out = []
cm_arr2 = get_column_major(arr2)
for r, row_arr1 in enumerate(arr1):
new_row = []
for col_arr2 in cm_arr2:
dp = 0
for i in range(len(col_arr2)):
dp += row_arr1[i]*col_arr2[i]
new_row.append(dp)
arr_out.append(new_row)
return arr_out
# for n in range(9):
# if n == 10:
# print("- "*10)
# print(q5(n))
arr1 = [[2, -1], [4, 1], [5, -3]]
arr2 = [[3, 1], [-2, -1]]
for row in multiply_matrices(arr1, arr2):
print(row)
# for row in get_column_major(arr2):
# print(row)
| en | 0.457639 | # import pandas as pd # import sys # sys.path.insert(0, "/Users/joshuaelms/Desktop/github_repos/CSCI-B365_repo/CSCI-B365/Miscellaneous") # import homemade_stats as hstat # t_points = [(0,0), (2,2), (4,4)] # df = pd.DataFrame(t_points, columns=("X", "Y")) # print(df.describe()) # fig, ax = plt.subplots() # ax.plot(t_points, "ro", linewidth=0) # print(hstat.std(df["X"])) # print(hstat.std(df["Y"])) # plt.show() # for n in range(9): # if n == 10: # print("- "*10) # print(q5(n)) # for row in get_column_major(arr2): # print(row) | 2.963206 | 3 |
Timelapse.py | worryag/Timelapse | 0 | 6615153 | <gh_stars>0
# Copyright (c) 2018 <NAME>
# The PostProcessingPlugin is released under the terms of the AGPLv3 or higher.
# Pause at layers and park XY.
import re
from ..Script import Script
class Timelapse(Script):
def __init__(self):
super().__init__()
def getSettingDataString(self):
return """{
"name": "Time lapse",
"key": "Timelapse",
"metadata": {},
"version": 2,
"settings":
{
"head_park_x":
{
"label": "Park Print Head X",
"description": "What X location does the head move to when pausing.",
"unit": "mm",
"type": "float",
"default_value": 0
},
"head_park_y":
{
"label": "Park Print Head Y",
"description": "What Y location does the head move to when pausing.",
"unit": "mm",
"type": "float",
"default_value": 215
},
"pause_timelapse":
{
"label": "Pause at layers",
"description": "Pause at layers for number of seconds.",
"type": "float",
"default_value": 1
}
}
}"""
def execute(self, data):
parkX = self.getSettingValueByKey("head_park_x")
parkY = self.getSettingValueByKey("head_park_y")
PauseTimeLapse = self.getSettingValueByKey("pause_timelapse")
prepend_gcode=";Start script: Timelapse.py\n"
prepend_gcode+="G0 Y%.1f F9000\n"%(parkY)
prepend_gcode+="G0 X%.1f F9000\n"%(parkX)
prepend_gcode+="G4 S%.1f \n"%(PauseTimeLapse)
prepend_gcode+=";End script: Timelapse.py\n"
pattern = re.compile(r';MESH:.*STL')
for layer_number, layer in enumerate(data):
data[layer_number]=re.sub(pattern,prepend_gcode,layer,flags=re.IGNORECASE)
return data
| # Copyright (c) 2018 <NAME>
# The PostProcessingPlugin is released under the terms of the AGPLv3 or higher.
# Pause at layers and park XY.
import re
from ..Script import Script
class Timelapse(Script):
def __init__(self):
super().__init__()
def getSettingDataString(self):
return """{
"name": "Time lapse",
"key": "Timelapse",
"metadata": {},
"version": 2,
"settings":
{
"head_park_x":
{
"label": "Park Print Head X",
"description": "What X location does the head move to when pausing.",
"unit": "mm",
"type": "float",
"default_value": 0
},
"head_park_y":
{
"label": "Park Print Head Y",
"description": "What Y location does the head move to when pausing.",
"unit": "mm",
"type": "float",
"default_value": 215
},
"pause_timelapse":
{
"label": "Pause at layers",
"description": "Pause at layers for number of seconds.",
"type": "float",
"default_value": 1
}
}
}"""
def execute(self, data):
parkX = self.getSettingValueByKey("head_park_x")
parkY = self.getSettingValueByKey("head_park_y")
PauseTimeLapse = self.getSettingValueByKey("pause_timelapse")
prepend_gcode=";Start script: Timelapse.py\n"
prepend_gcode+="G0 Y%.1f F9000\n"%(parkY)
prepend_gcode+="G0 X%.1f F9000\n"%(parkX)
prepend_gcode+="G4 S%.1f \n"%(PauseTimeLapse)
prepend_gcode+=";End script: Timelapse.py\n"
pattern = re.compile(r';MESH:.*STL')
for layer_number, layer in enumerate(data):
data[layer_number]=re.sub(pattern,prepend_gcode,layer,flags=re.IGNORECASE)
return data | en | 0.496538 | # Copyright (c) 2018 <NAME> # The PostProcessingPlugin is released under the terms of the AGPLv3 or higher. # Pause at layers and park XY. { "name": "Time lapse", "key": "Timelapse", "metadata": {}, "version": 2, "settings": { "head_park_x": { "label": "Park Print Head X", "description": "What X location does the head move to when pausing.", "unit": "mm", "type": "float", "default_value": 0 }, "head_park_y": { "label": "Park Print Head Y", "description": "What Y location does the head move to when pausing.", "unit": "mm", "type": "float", "default_value": 215 }, "pause_timelapse": { "label": "Pause at layers", "description": "Pause at layers for number of seconds.", "type": "float", "default_value": 1 } } } | 2.349024 | 2 |
src/medicineinventory/models.py | vandana0608/Pharmacy-Managament | 0 | 6615154 | <reponame>vandana0608/Pharmacy-Managament<gh_stars>0
from django.db import models
from django.urls import reverse
from supplier.models import supplier
from medicine_group.models import medicine_group
class medicineinventory(models.Model) :
medicine_id = models.CharField(max_length=10, default='MID',primary_key=True)
medicine_name = models.CharField(max_length=35, default='xxx')
medicine_groups = models.ForeignKey(medicine_group,on_delete=models.CASCADE)
quantity_on_hand = models.PositiveIntegerField(default = '0')
reorder_level = models.PositiveIntegerField(default = '1')
reorder_quantity = models.PositiveIntegerField(default = '1')
medicine_price = models.DecimalField(max_digits=10000,decimal_places=2, default='00000.00')
supplier_id = models.ForeignKey(supplier,on_delete=models.CASCADE)
def __str__(self):
return str(self.medicine_name)
return str(self.medicine_groups.medicine_groups)
return str(self.supplier_id.supplier_id)
def get_absolute_url(self):
return reverse('medicineinventory_edit', kwargs={'pk': self.pk})
# Create your models here.
| from django.db import models
from django.urls import reverse
from supplier.models import supplier
from medicine_group.models import medicine_group
class medicineinventory(models.Model) :
medicine_id = models.CharField(max_length=10, default='MID',primary_key=True)
medicine_name = models.CharField(max_length=35, default='xxx')
medicine_groups = models.ForeignKey(medicine_group,on_delete=models.CASCADE)
quantity_on_hand = models.PositiveIntegerField(default = '0')
reorder_level = models.PositiveIntegerField(default = '1')
reorder_quantity = models.PositiveIntegerField(default = '1')
medicine_price = models.DecimalField(max_digits=10000,decimal_places=2, default='00000.00')
supplier_id = models.ForeignKey(supplier,on_delete=models.CASCADE)
def __str__(self):
return str(self.medicine_name)
return str(self.medicine_groups.medicine_groups)
return str(self.supplier_id.supplier_id)
def get_absolute_url(self):
return reverse('medicineinventory_edit', kwargs={'pk': self.pk})
# Create your models here. | en | 0.963489 | # Create your models here. | 2.170287 | 2 |
inventory.py | buttahtoast/k8sdeploy | 1 | 6615155 | <filename>inventory.py
#!/usr/bin/env python3
"""Returns a ansible inventory with groups defined in ENV("STAGES")"""
import json
import os
import sys
## -- Default Variables
HOST="localhost"
DEFAULT={
"_meta": {
"hostvars": {
str(HOST): {
"ansible_connection": "local"
}
}
},
"all": {
"children": [
]
}
}
## -- Check Evnironment Stages
if os.getenv("STAGES"):
for stage in os.getenv("STAGES").split(","):
# Add children
DEFAULT["all"]["children"].append(stage.strip())
# Add group
DEFAULT.update({str(stage.strip()): { str("hosts"): [str(HOST)]}})
else:
print("'STAGES' not defined in your Environment")
sys.exit(1)
print(json.dumps(DEFAULT))
| <filename>inventory.py
#!/usr/bin/env python3
"""Returns a ansible inventory with groups defined in ENV("STAGES")"""
import json
import os
import sys
## -- Default Variables
HOST="localhost"
DEFAULT={
"_meta": {
"hostvars": {
str(HOST): {
"ansible_connection": "local"
}
}
},
"all": {
"children": [
]
}
}
## -- Check Evnironment Stages
if os.getenv("STAGES"):
for stage in os.getenv("STAGES").split(","):
# Add children
DEFAULT["all"]["children"].append(stage.strip())
# Add group
DEFAULT.update({str(stage.strip()): { str("hosts"): [str(HOST)]}})
else:
print("'STAGES' not defined in your Environment")
sys.exit(1)
print(json.dumps(DEFAULT))
| en | 0.516092 | #!/usr/bin/env python3 Returns a ansible inventory with groups defined in ENV("STAGES") ## -- Default Variables ## -- Check Evnironment Stages # Add children # Add group | 2.722466 | 3 |
src/apps/list/views/__init__.py | avibn/todovib | 8 | 6615156 | from .view_list import ViewList
from .update_list import UpdateListView
from .delete_list import DeleteListView
from .add_item import AddItemView
from .update_item import UpdateItemView
from .delete_item import DeleteItemView
from .complete_item import CompleteItemView
| from .view_list import ViewList
from .update_list import UpdateListView
from .delete_list import DeleteListView
from .add_item import AddItemView
from .update_item import UpdateItemView
from .delete_item import DeleteItemView
from .complete_item import CompleteItemView
| none | 1 | 1.124661 | 1 | |
CursoIntensivoPython/curso-intensivo-python-master/capitulo_04/exercicios/cubos.py | SweydAbdul/estudos-python | 0 | 6615157 | cubos = []
for n in range(1, 11):
cubos.append(n ** 3)
for n in cubos:
print(n)
| cubos = []
for n in range(1, 11):
cubos.append(n ** 3)
for n in cubos:
print(n)
| none | 1 | 3.371255 | 3 | |
src/blueberrypy/tests/test_email.py | wyuenho/blueberrypy | 1 | 6615158 | <reponame>wyuenho/blueberrypy<filename>src/blueberrypy/tests/test_email.py
import unittest
import warnings
from email.header import decode_header
from lazr.smtptest.controller import QueueController
from blueberrypy import email
from blueberrypy.email import Mailer
class MailerTest(unittest.TestCase):
def setUp(self):
self.controller = QueueController("localhost", 9025)
self.controller.start()
def tearDown(self):
self.controller.stop()
def test_send_email(self):
mailer = Mailer("localhost", 9025)
body = "This is the bloody test body"
mailer.send_email("<EMAIL>", "<EMAIL>", "test subject", body)
message = list(self.controller)[0]
(from_str, from_cs) = decode_header(message["From"])[0]
(to_str, to_cs) = decode_header(message["To"])[0]
(subject_str, subject_cs) = decode_header(message["Subject"])[0]
self.assertEqual("<EMAIL>", from_str)
self.assertEqual("<EMAIL>", to_str)
self.assertEqual("test subject", subject_str)
self.assertEqual(body, unicode(message.get_payload(decode=True),
message.get_content_charset()))
def test_send_html_email(self):
mailer = Mailer("localhost", 9025)
text = u"This is the bloody test body"
html = u"<p>This is the bloody test body</p>"
mailer.send_html_email("<EMAIL>", "<EMAIL>", "test subject", text, html)
message = list(self.controller)[0]
(from_str, from_cs) = decode_header(message["From"])[0]
(to_str, to_cs) = decode_header(message["To"])[0]
(subject_str, subject_cs) = decode_header(message["Subject"])[0]
self.assertEqual("<EMAIL>", from_str)
self.assertEqual("<EMAIL>", to_str)
self.assertEqual("test subject", subject_str)
self.assertEqual(text, unicode(message.get_payload(0).get_payload(decode=True),
message.get_payload(0).get_content_charset()))
self.assertEqual("text/plain", message.get_payload(0).get_content_type())
self.assertEqual(html, unicode(message.get_payload(1).get_payload(decode=True),
message.get_payload(1).get_content_charset()))
self.assertEqual("text/html", message.get_payload(1).get_content_type())
class EmailModuleFuncTest(unittest.TestCase):
def setUp(self):
self.controller = QueueController("localhost", 9025)
self.controller.start()
def tearDown(self):
self.controller.stop()
def test_warnings(self):
email._mailer = None
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
self.assertRaises(UserWarning, email.send_email, "<EMAIL>", "<EMAIL>",
"test subject", "test body")
self.assertRaises(UserWarning, email.send_html_email, "<EMAIL>",
"<EMAIL>", "test subject", "plain body", "<p>html body</p>")
def test_send_email(self):
email.configure({"host": "localhost",
"port": 9025})
body = "This is the bloody test body"
email.send_email("<EMAIL>", "<EMAIL>", "test subject", body)
message = list(self.controller)[0]
(from_str, from_cs) = decode_header(message["From"])[0]
(to_str, to_cs) = decode_header(message["To"])[0]
(subject_str, subject_cs) = decode_header(message["Subject"])[0]
self.assertEqual("<EMAIL>", from_str)
self.assertEqual("<EMAIL>", to_str)
self.assertEqual("test subject", subject_str)
self.assertEqual(body, unicode(message.get_payload(decode=True),
message.get_content_charset()))
def test_send_html_email(self):
email.configure({"host": "localhost",
"port": 9025})
text = u"This is the bloody test body"
html = u"<p>This is the bloody test body</p>"
email.send_html_email("<EMAIL>", "<EMAIL>", "test subject", text, html)
message = list(self.controller)[0]
(from_str, from_cs) = decode_header(message["From"])[0]
(to_str, to_cs) = decode_header(message["To"])[0]
(subject_str, subject_cs) = decode_header(message["Subject"])[0]
self.assertEqual("<EMAIL>", from_str)
self.assertEqual("<EMAIL>", to_str)
self.assertEqual("test subject", subject_str)
self.assertEqual(text, unicode(message.get_payload(0).get_payload(decode=True),
message.get_payload(0).get_content_charset()))
self.assertEqual("text/plain", message.get_payload(0).get_content_type())
self.assertEqual(html, unicode(message.get_payload(1).get_payload(decode=True),
message.get_payload(1).get_content_charset()))
self.assertEqual("text/html", message.get_payload(1).get_content_type())
| import unittest
import warnings
from email.header import decode_header
from lazr.smtptest.controller import QueueController
from blueberrypy import email
from blueberrypy.email import Mailer
class MailerTest(unittest.TestCase):
def setUp(self):
self.controller = QueueController("localhost", 9025)
self.controller.start()
def tearDown(self):
self.controller.stop()
def test_send_email(self):
mailer = Mailer("localhost", 9025)
body = "This is the bloody test body"
mailer.send_email("<EMAIL>", "<EMAIL>", "test subject", body)
message = list(self.controller)[0]
(from_str, from_cs) = decode_header(message["From"])[0]
(to_str, to_cs) = decode_header(message["To"])[0]
(subject_str, subject_cs) = decode_header(message["Subject"])[0]
self.assertEqual("<EMAIL>", from_str)
self.assertEqual("<EMAIL>", to_str)
self.assertEqual("test subject", subject_str)
self.assertEqual(body, unicode(message.get_payload(decode=True),
message.get_content_charset()))
def test_send_html_email(self):
mailer = Mailer("localhost", 9025)
text = u"This is the bloody test body"
html = u"<p>This is the bloody test body</p>"
mailer.send_html_email("<EMAIL>", "<EMAIL>", "test subject", text, html)
message = list(self.controller)[0]
(from_str, from_cs) = decode_header(message["From"])[0]
(to_str, to_cs) = decode_header(message["To"])[0]
(subject_str, subject_cs) = decode_header(message["Subject"])[0]
self.assertEqual("<EMAIL>", from_str)
self.assertEqual("<EMAIL>", to_str)
self.assertEqual("test subject", subject_str)
self.assertEqual(text, unicode(message.get_payload(0).get_payload(decode=True),
message.get_payload(0).get_content_charset()))
self.assertEqual("text/plain", message.get_payload(0).get_content_type())
self.assertEqual(html, unicode(message.get_payload(1).get_payload(decode=True),
message.get_payload(1).get_content_charset()))
self.assertEqual("text/html", message.get_payload(1).get_content_type())
class EmailModuleFuncTest(unittest.TestCase):
def setUp(self):
self.controller = QueueController("localhost", 9025)
self.controller.start()
def tearDown(self):
self.controller.stop()
def test_warnings(self):
email._mailer = None
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
self.assertRaises(UserWarning, email.send_email, "<EMAIL>", "<EMAIL>",
"test subject", "test body")
self.assertRaises(UserWarning, email.send_html_email, "<EMAIL>",
"<EMAIL>", "test subject", "plain body", "<p>html body</p>")
def test_send_email(self):
email.configure({"host": "localhost",
"port": 9025})
body = "This is the bloody test body"
email.send_email("<EMAIL>", "<EMAIL>", "test subject", body)
message = list(self.controller)[0]
(from_str, from_cs) = decode_header(message["From"])[0]
(to_str, to_cs) = decode_header(message["To"])[0]
(subject_str, subject_cs) = decode_header(message["Subject"])[0]
self.assertEqual("<EMAIL>", from_str)
self.assertEqual("<EMAIL>", to_str)
self.assertEqual("test subject", subject_str)
self.assertEqual(body, unicode(message.get_payload(decode=True),
message.get_content_charset()))
def test_send_html_email(self):
email.configure({"host": "localhost",
"port": 9025})
text = u"This is the bloody test body"
html = u"<p>This is the bloody test body</p>"
email.send_html_email("<EMAIL>", "<EMAIL>", "test subject", text, html)
message = list(self.controller)[0]
(from_str, from_cs) = decode_header(message["From"])[0]
(to_str, to_cs) = decode_header(message["To"])[0]
(subject_str, subject_cs) = decode_header(message["Subject"])[0]
self.assertEqual("<EMAIL>", from_str)
self.assertEqual("<EMAIL>", to_str)
self.assertEqual("test subject", subject_str)
self.assertEqual(text, unicode(message.get_payload(0).get_payload(decode=True),
message.get_payload(0).get_content_charset()))
self.assertEqual("text/plain", message.get_payload(0).get_content_type())
self.assertEqual(html, unicode(message.get_payload(1).get_payload(decode=True),
message.get_payload(1).get_content_charset()))
self.assertEqual("text/html", message.get_payload(1).get_content_type()) | none | 1 | 2.763197 | 3 | |
djstripe/models/sigma.py | jamezpolley/dj-stripe | 0 | 6615159 | # TODO: class ScheduledQuery
| # TODO: class ScheduledQuery
| en | 0.287746 | # TODO: class ScheduledQuery | 1.002466 | 1 |
implementations/WGAN/utils.py | STomoya/animeface | 61 | 6615160 |
import torch
import torch.optim as optim
from torchvision.utils import save_image
from .model import Generator, Discriminator, weights_init_normal
from dataset import AnimeFace
from utils import Status, save_args, add_args
from nnutils import get_device, sample_nnoise
def train(
epochs, n_critic, clip_value,
dataset, latent_dim,
G, optimizer_G,
D, optimizer_D,
device, save_interval
):
status = Status(len(dataset) * epochs)
for epoch in range(epochs):
for index, image in enumerate(dataset, 1):
real = image.to(device)
z = sample_nnoise((real.size(0), latent_dim), device)
# generate image
fake = G(z)
# D(real)
real_prob = D(real)
# D(G(z))
fake_prob = D(fake.detach())
# discriminator loss
D_loss = - real_prob.mean() + fake_prob.mean()
# optimize
optimizer_D.zero_grad()
D_loss.backward()
optimizer_D.step()
# clip weights
for param in D.parameters():
param.data.clamp_(-clip_value, clip_value)
G_loss = torch.tensor([0.])
if index % n_critic == 0:
# D(G(z))
fake_prob = D(fake)
# train to fool D
G_loss = - fake_prob.mean()
# optimize
optimizer_G.zero_grad()
G_loss.backward()
optimizer_G.step()
if status.batches_done % save_interval == 0:
save_image(
fake[:25], f'implementations/WGAN/result/{status.batches_done}.png',
nrow=5, normalize=True)
status.update(
g=G_loss.item(), d=D_loss.item())
status.plot_loss()
def main(parser):
parser = add_args(parser,
dict(
epochs = [150, 'epochs to train'],
latent_dim = [200, 'dimension of input latent'],
lr = [0.00005, 'learning rate'],
n_critic = [5, 'update G only n_critic step'],
clip_value = [0.01, 'clip weight value to [-clip_value,clip_value]']))
args = parser.parse_args()
save_args(args)
device = get_device(not args.disable_gpu)
dataset = AnimeFace.asloader(
args.batch_size, (args.image_size, args.min_year),
pin_memory=not args.disable_gpu)
G = Generator(latent_dim=args.latent_dim)
D = Discriminator()
G.apply(weights_init_normal)
D.apply(weights_init_normal)
G.to(device)
D.to(device)
optimizer_G = optim.RMSprop(G.parameters(), lr=args.lr)
optimizer_D = optim.RMSprop(D.parameters(), lr=args.lr)
train(
args.epochs, args.n_critic, args.clip_value,
dataset, args.latent_dim,
G, optimizer_G, D, optimizer_D,
device, args.save)
|
import torch
import torch.optim as optim
from torchvision.utils import save_image
from .model import Generator, Discriminator, weights_init_normal
from dataset import AnimeFace
from utils import Status, save_args, add_args
from nnutils import get_device, sample_nnoise
def train(
epochs, n_critic, clip_value,
dataset, latent_dim,
G, optimizer_G,
D, optimizer_D,
device, save_interval
):
status = Status(len(dataset) * epochs)
for epoch in range(epochs):
for index, image in enumerate(dataset, 1):
real = image.to(device)
z = sample_nnoise((real.size(0), latent_dim), device)
# generate image
fake = G(z)
# D(real)
real_prob = D(real)
# D(G(z))
fake_prob = D(fake.detach())
# discriminator loss
D_loss = - real_prob.mean() + fake_prob.mean()
# optimize
optimizer_D.zero_grad()
D_loss.backward()
optimizer_D.step()
# clip weights
for param in D.parameters():
param.data.clamp_(-clip_value, clip_value)
G_loss = torch.tensor([0.])
if index % n_critic == 0:
# D(G(z))
fake_prob = D(fake)
# train to fool D
G_loss = - fake_prob.mean()
# optimize
optimizer_G.zero_grad()
G_loss.backward()
optimizer_G.step()
if status.batches_done % save_interval == 0:
save_image(
fake[:25], f'implementations/WGAN/result/{status.batches_done}.png',
nrow=5, normalize=True)
status.update(
g=G_loss.item(), d=D_loss.item())
status.plot_loss()
def main(parser):
parser = add_args(parser,
dict(
epochs = [150, 'epochs to train'],
latent_dim = [200, 'dimension of input latent'],
lr = [0.00005, 'learning rate'],
n_critic = [5, 'update G only n_critic step'],
clip_value = [0.01, 'clip weight value to [-clip_value,clip_value]']))
args = parser.parse_args()
save_args(args)
device = get_device(not args.disable_gpu)
dataset = AnimeFace.asloader(
args.batch_size, (args.image_size, args.min_year),
pin_memory=not args.disable_gpu)
G = Generator(latent_dim=args.latent_dim)
D = Discriminator()
G.apply(weights_init_normal)
D.apply(weights_init_normal)
G.to(device)
D.to(device)
optimizer_G = optim.RMSprop(G.parameters(), lr=args.lr)
optimizer_D = optim.RMSprop(D.parameters(), lr=args.lr)
train(
args.epochs, args.n_critic, args.clip_value,
dataset, args.latent_dim,
G, optimizer_G, D, optimizer_D,
device, args.save)
| en | 0.685286 | # generate image # D(real) # D(G(z)) # discriminator loss # optimize # clip weights # D(G(z)) # train to fool D # optimize | 2.076434 | 2 |
Quantum_Shooting_Game/Single_Player.py | CodieKev/Quantum_Games-Damage-based-multiplayer-games- | 1 | 6615161 | <gh_stars>1-10
"""
Created on Wed Dec 18 02:14:18 2019
@author: codie
"""
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, execute, Aer
import numpy as np
def Result(a,b,d):
qc = QuantumCircuit()
q = QuantumRegister(7, 'q')
c = ClassicalRegister(1, 'c')
qc.add_register(q)
qc.add_register(c)
print("a,b,d",a,b,d)
qc.u3(a*np.pi, 0, 0, q[0])
qc.u3(b*2, 0, 0, q[1])
qc.u3(d*2, 0, 0, q[2])
qc.cx(q[1], q[3])
qc.cx(q[2], q[3])
qc.ccx(q[2], q[1], q[3])
qc.x(q[4])
qc.cswap(q[0], q[4], q[3])
qc.measure(q[3], c[0])
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend=backend)
job_result = job.result()
data = job_result.get_counts(qc).get("1", "")
print(job_result.get_counts(qc))
return data
LB = 0
b = 0
d = 0
while LB==0:
A = B = C = D = F = 0
A = int(input("Use Knife(0 for No and 1 for YES):-"))
if A == 0:
B = int(input("Use Gun(0 for No and 1 for YES):-"))
if B!= 0:
C = (1/(15-2*int(input("Type of bullet(Dammage ranked from 1,4):-"))))
b=np.arcsin((((np.sin(b))**2)+C)**0.5)
elif B ==0:
D = int(input("Use Granade(0 for No and 1 for YES):-"))
if D!= 0:
F = 1/(2 + int(input("Distance(0,2):-")))
if F != 2:
d=np.arcsin((((np.sin(d))**2)+F)**0.5)
print(d)
print("a,b,d",A,b,d)
data = Result(A,b,d)
print("Dammage_Delt =",data/8.2,"%")
if data > 820:
print("You_Got_Killed")
LB = 1 | """
Created on Wed Dec 18 02:14:18 2019
@author: codie
"""
from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, execute, Aer
import numpy as np
def Result(a,b,d):
qc = QuantumCircuit()
q = QuantumRegister(7, 'q')
c = ClassicalRegister(1, 'c')
qc.add_register(q)
qc.add_register(c)
print("a,b,d",a,b,d)
qc.u3(a*np.pi, 0, 0, q[0])
qc.u3(b*2, 0, 0, q[1])
qc.u3(d*2, 0, 0, q[2])
qc.cx(q[1], q[3])
qc.cx(q[2], q[3])
qc.ccx(q[2], q[1], q[3])
qc.x(q[4])
qc.cswap(q[0], q[4], q[3])
qc.measure(q[3], c[0])
backend = Aer.get_backend('qasm_simulator')
job = execute(qc, backend=backend)
job_result = job.result()
data = job_result.get_counts(qc).get("1", "")
print(job_result.get_counts(qc))
return data
LB = 0
b = 0
d = 0
while LB==0:
A = B = C = D = F = 0
A = int(input("Use Knife(0 for No and 1 for YES):-"))
if A == 0:
B = int(input("Use Gun(0 for No and 1 for YES):-"))
if B!= 0:
C = (1/(15-2*int(input("Type of bullet(Dammage ranked from 1,4):-"))))
b=np.arcsin((((np.sin(b))**2)+C)**0.5)
elif B ==0:
D = int(input("Use Granade(0 for No and 1 for YES):-"))
if D!= 0:
F = 1/(2 + int(input("Distance(0,2):-")))
if F != 2:
d=np.arcsin((((np.sin(d))**2)+F)**0.5)
print(d)
print("a,b,d",A,b,d)
data = Result(A,b,d)
print("Dammage_Delt =",data/8.2,"%")
if data > 820:
print("You_Got_Killed")
LB = 1 | en | 0.871184 | Created on Wed Dec 18 02:14:18 2019
@author: codie | 2.205616 | 2 |
src/intranet3/intranet3/asyncfetchers/github.py | tmodrzynski/intranet-open | 0 | 6615162 | <reponame>tmodrzynski/intranet-open
# coding: utf-8
import json
import re
from dateutil.parser import parse
from intranet3.helpers import serialize_url
from intranet3.log import INFO_LOG, EXCEPTION_LOG
from .base import BaseFetcher, BasicAuthMixin, FetcherBadDataError
from .bug import BaseBugProducer, BaseScrumProducer
from .request import RPC
LOG = INFO_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
class GithubScrumProducer(BaseScrumProducer):
def get_points(self, bug, tracker, login_mapping, parsed_data):
digit_labels = [int(label) for label in bug.labels if label.isdigit()]
return digit_labels[0] if digit_labels else 0
class GithubBugProducer(BaseBugProducer):
SCRUM_PRODUCER_CLASS = GithubScrumProducer
def parse(self, tracker, login_mapping, raw_data):
d = raw_data
result = dict(
id=str(d['number']),
github_id=d['id'],
desc=d['title'],
reporter=d['user']['login'],
owner=d['assignee']['login'] if d['assignee'] else None,
status=d['state'],
url=d['html_url'],
opendate=parse(d.get('created_at', '')),
changeddate=parse(d.get('updated_at', '')),
labels=[label['name'] for label in d['labels']],
)
return result
def get_project_name(self, tracker, login_mapping, parsed_data):
m = re.match('(.*?)github.com/(.*?)/(.*?)($|/.*)', parsed_data['url'])
return m and m.group(2) or ''
def get_component_name(self, tracker, login_mapping, parsed_data):
m = re.match('(.*?)github.com/(.*?)/(.*?)($|/.*)', parsed_data['url'])
return m and m.group(3) or ''
class GithubFetcher(BasicAuthMixin, BaseFetcher):
BUG_PRODUCER_CLASS = GithubBugProducer
#klucz do mapowania nazwa_milestonea -> numer milestonea
MILESTONES_KEY = 'milestones_map'
MILESTONES_TIMEOUT = 60*3
def __init__(self, *args, **kwargs):
super(GithubFetcher, self).__init__(*args, **kwargs)
def fetch_milestones(self, url):
url = str(url)
rpc = self.get_rpc()
rpc.url = url
rpc.start()
response = rpc.get_result()
return self.parse_milestones(response.content)
def parse_milestones(self, data):
milestone_map = {}
json_data = json.loads(data)
for milestone in json_data:
milestone_map[milestone['title']] = str(milestone['number'])
return milestone_map
def fetch_scrum(self, sprint_name, project_id=None, component_id=None):
base_url = '%srepos/%s/%s/' % (
self.tracker.url,
project_id,
component_id,
)
milestones_url = ''.join((base_url, 'milestones'))
issues_url = ''.join((base_url, 'issues?'))
milestones = self.fetch_milestones(
milestones_url,
)
if sprint_name not in milestones:
raise FetcherBadDataError('There is no %s milestone' % sprint_name)
opened_bugs_url = serialize_url(
issues_url,
**dict(
milestone=milestones.get(sprint_name),
state='open'
)
)
closed_bugs_url = serialize_url(
issues_url,
**dict(
milestone=milestones.get(sprint_name),
state='closed'
)
)
self.consume(RPC(url=opened_bugs_url))
self.consume(RPC(url=closed_bugs_url))
@staticmethod
def common_url_params():
return dict(
state='open',
format='json'
)
@staticmethod
def single_user_params():
return dict(
filter='assigned'
)
@staticmethod
def all_users_params():
return dict(
filter='all'
)
def fetch_user_tickets(self, resolved=False):
if resolved:
return
params = self.common_url_params()
params.update(self.single_user_params())
url = serialize_url(self.tracker.url + 'issues?', **params)
self.consume(RPC(url=url))
def fetch_all_tickets(self, resolved=False):
if resolved:
return
params = self.common_url_params()
params.update(self.all_users_params())
url = serialize_url(self.tracker.url + 'issues?', **params)
self.consume(RPC(url=url))
def fetch_bugs_for_query(self, ticket_ids=None, project_selector=None,
component_selector=None, version=None,
resolved=False):
if resolved:
return
super(GithubFetcher, self).fetch_bugs_for_query(
ticket_ids,
project_selector,
component_selector,
version,
resolved,
)
params = self.common_url_params()
if ticket_ids:
self._wanted_ticket_ids = ticket_ids
if project_selector and component_selector:
uri = self.tracker.url + "repos/%s/%s/issues?" % (
project_selector,
component_selector[0],
)
url = serialize_url(uri, **params)
self.consume(RPC(url=url))
def parse(self, data):
json_data = json.loads(data)
return json_data
| # coding: utf-8
import json
import re
from dateutil.parser import parse
from intranet3.helpers import serialize_url
from intranet3.log import INFO_LOG, EXCEPTION_LOG
from .base import BaseFetcher, BasicAuthMixin, FetcherBadDataError
from .bug import BaseBugProducer, BaseScrumProducer
from .request import RPC
LOG = INFO_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
class GithubScrumProducer(BaseScrumProducer):
def get_points(self, bug, tracker, login_mapping, parsed_data):
digit_labels = [int(label) for label in bug.labels if label.isdigit()]
return digit_labels[0] if digit_labels else 0
class GithubBugProducer(BaseBugProducer):
SCRUM_PRODUCER_CLASS = GithubScrumProducer
def parse(self, tracker, login_mapping, raw_data):
d = raw_data
result = dict(
id=str(d['number']),
github_id=d['id'],
desc=d['title'],
reporter=d['user']['login'],
owner=d['assignee']['login'] if d['assignee'] else None,
status=d['state'],
url=d['html_url'],
opendate=parse(d.get('created_at', '')),
changeddate=parse(d.get('updated_at', '')),
labels=[label['name'] for label in d['labels']],
)
return result
def get_project_name(self, tracker, login_mapping, parsed_data):
m = re.match('(.*?)github.com/(.*?)/(.*?)($|/.*)', parsed_data['url'])
return m and m.group(2) or ''
def get_component_name(self, tracker, login_mapping, parsed_data):
m = re.match('(.*?)github.com/(.*?)/(.*?)($|/.*)', parsed_data['url'])
return m and m.group(3) or ''
class GithubFetcher(BasicAuthMixin, BaseFetcher):
BUG_PRODUCER_CLASS = GithubBugProducer
#klucz do mapowania nazwa_milestonea -> numer milestonea
MILESTONES_KEY = 'milestones_map'
MILESTONES_TIMEOUT = 60*3
def __init__(self, *args, **kwargs):
super(GithubFetcher, self).__init__(*args, **kwargs)
def fetch_milestones(self, url):
url = str(url)
rpc = self.get_rpc()
rpc.url = url
rpc.start()
response = rpc.get_result()
return self.parse_milestones(response.content)
def parse_milestones(self, data):
milestone_map = {}
json_data = json.loads(data)
for milestone in json_data:
milestone_map[milestone['title']] = str(milestone['number'])
return milestone_map
def fetch_scrum(self, sprint_name, project_id=None, component_id=None):
base_url = '%srepos/%s/%s/' % (
self.tracker.url,
project_id,
component_id,
)
milestones_url = ''.join((base_url, 'milestones'))
issues_url = ''.join((base_url, 'issues?'))
milestones = self.fetch_milestones(
milestones_url,
)
if sprint_name not in milestones:
raise FetcherBadDataError('There is no %s milestone' % sprint_name)
opened_bugs_url = serialize_url(
issues_url,
**dict(
milestone=milestones.get(sprint_name),
state='open'
)
)
closed_bugs_url = serialize_url(
issues_url,
**dict(
milestone=milestones.get(sprint_name),
state='closed'
)
)
self.consume(RPC(url=opened_bugs_url))
self.consume(RPC(url=closed_bugs_url))
@staticmethod
def common_url_params():
return dict(
state='open',
format='json'
)
@staticmethod
def single_user_params():
return dict(
filter='assigned'
)
@staticmethod
def all_users_params():
return dict(
filter='all'
)
def fetch_user_tickets(self, resolved=False):
if resolved:
return
params = self.common_url_params()
params.update(self.single_user_params())
url = serialize_url(self.tracker.url + 'issues?', **params)
self.consume(RPC(url=url))
def fetch_all_tickets(self, resolved=False):
if resolved:
return
params = self.common_url_params()
params.update(self.all_users_params())
url = serialize_url(self.tracker.url + 'issues?', **params)
self.consume(RPC(url=url))
def fetch_bugs_for_query(self, ticket_ids=None, project_selector=None,
component_selector=None, version=None,
resolved=False):
if resolved:
return
super(GithubFetcher, self).fetch_bugs_for_query(
ticket_ids,
project_selector,
component_selector,
version,
resolved,
)
params = self.common_url_params()
if ticket_ids:
self._wanted_ticket_ids = ticket_ids
if project_selector and component_selector:
uri = self.tracker.url + "repos/%s/%s/issues?" % (
project_selector,
component_selector[0],
)
url = serialize_url(uri, **params)
self.consume(RPC(url=url))
def parse(self, data):
json_data = json.loads(data)
return json_data | pl | 0.70315 | # coding: utf-8 #klucz do mapowania nazwa_milestonea -> numer milestonea | 2.207452 | 2 |
graph/edge.py | je-c/Visualising-Graph-Traversals | 0 | 6615163 | """
Edge Module
============
Represents the edge in the undirected graph.
Contains the two connected vertices u and w.
Usage:
* Not to be run as the main class.
* Used as a link between vertices.
Example:
u = Vertex(x1, y1)
v = Vertex(x2, y2)
# Undirected, so Edge(v, u) == Edge(u, v)
e = Edge(v, u)
"""
class Edge:
"""
Edge Class
----------
Represents the edge between two vertices
Attributes:
* u (Vertex): The vertex connected.
* v (Vertex): The vertex connected.
"""
def __init__(self, u, v):
"""
Initialises the edge with two vertices
* :param u (Vertex): Vertex U connected with this edge.
* :param v (Vertex): Vertex V connected with this edge.
"""
self.u = u
self.v = v
def __eq__(self, other):
"""
Overrides the base equality so we can check that
two edges are equal to each other.
* :param other: The other object we are comparing
:return: Bool if equal
"""
# If it's the same class, then it should have the same vertices.
if isinstance(other, Edge):
return (other.u == self.v or other.u == self.u) \
and (other.v == self.u or other.v == self.v)
# If it's not the same class, it's not equal
return False
def __repr__(self):
"""
Defines the string representation of the edge.
"""
return "<{}-{}>".format(self.u, self.v)
def __hash__(self):
"""
Makes the class hashable
"""
return hash(repr(self))
| """
Edge Module
============
Represents the edge in the undirected graph.
Contains the two connected vertices u and w.
Usage:
* Not to be run as the main class.
* Used as a link between vertices.
Example:
u = Vertex(x1, y1)
v = Vertex(x2, y2)
# Undirected, so Edge(v, u) == Edge(u, v)
e = Edge(v, u)
"""
class Edge:
"""
Edge Class
----------
Represents the edge between two vertices
Attributes:
* u (Vertex): The vertex connected.
* v (Vertex): The vertex connected.
"""
def __init__(self, u, v):
"""
Initialises the edge with two vertices
* :param u (Vertex): Vertex U connected with this edge.
* :param v (Vertex): Vertex V connected with this edge.
"""
self.u = u
self.v = v
def __eq__(self, other):
"""
Overrides the base equality so we can check that
two edges are equal to each other.
* :param other: The other object we are comparing
:return: Bool if equal
"""
# If it's the same class, then it should have the same vertices.
if isinstance(other, Edge):
return (other.u == self.v or other.u == self.u) \
and (other.v == self.u or other.v == self.v)
# If it's not the same class, it's not equal
return False
def __repr__(self):
"""
Defines the string representation of the edge.
"""
return "<{}-{}>".format(self.u, self.v)
def __hash__(self):
"""
Makes the class hashable
"""
return hash(repr(self))
| en | 0.818304 | Edge Module ============ Represents the edge in the undirected graph. Contains the two connected vertices u and w. Usage: * Not to be run as the main class. * Used as a link between vertices. Example: u = Vertex(x1, y1) v = Vertex(x2, y2) # Undirected, so Edge(v, u) == Edge(u, v) e = Edge(v, u) Edge Class ---------- Represents the edge between two vertices Attributes: * u (Vertex): The vertex connected. * v (Vertex): The vertex connected. Initialises the edge with two vertices * :param u (Vertex): Vertex U connected with this edge. * :param v (Vertex): Vertex V connected with this edge. Overrides the base equality so we can check that two edges are equal to each other. * :param other: The other object we are comparing :return: Bool if equal # If it's the same class, then it should have the same vertices. # If it's not the same class, it's not equal Defines the string representation of the edge. Makes the class hashable | 4.37121 | 4 |
qutebrowser/.config/qutebrowser/conpig/adblock.py | yusrilip/dotfiles-1 | 30 | 6615164 | from qutebrowser.api import interceptor
# Youtube
def filterYoutube(info: interceptor.Request):
"""Block the given request if necessary."""
# and url.path() == "/get_video_info"
url = info.request_url
if "youtube.com" in url.host() and "adformat=" in url.query():
info.block()
# if "googlevideo.com" in url.host() and "expire=" in url.query():
# info.block()
interceptor.register(filterYoutube)
| from qutebrowser.api import interceptor
# Youtube
def filterYoutube(info: interceptor.Request):
"""Block the given request if necessary."""
# and url.path() == "/get_video_info"
url = info.request_url
if "youtube.com" in url.host() and "adformat=" in url.query():
info.block()
# if "googlevideo.com" in url.host() and "expire=" in url.query():
# info.block()
interceptor.register(filterYoutube)
| en | 0.500415 | # Youtube Block the given request if necessary. # and url.path() == "/get_video_info" # if "googlevideo.com" in url.host() and "expire=" in url.query(): # info.block() | 2.693684 | 3 |
project/lib/corpus.py | pbgnz/automatic-language-identification | 1 | 6615165 | import os
import re
import string
import unidecode
class Corpus:
"""
Corpus is used for handling text files.
"""
def __init__(self, file_path):
file = open(file_path, encoding="utf8")
self.corpus = file.readlines()
file.close()
_, tail = os.path.split(file_path)
self.name = tail
@staticmethod
def clean(corpus):
"""
Separates each line of the corpus by words and removes punctuation.
>>> clean('first10TestSentences.txt')
[['What', 'will', 'the', 'Japanese', 'economy', 'be', 'like', 'next', 'year'],
['She', 'asked', 'him', 'if', 'he', 'was', 'a', 'student', 'at', 'this', 'school'],
['Im', 'OK'], ['Birds', 'build', 'nests'], ['I', 'hate', 'AI'], ['Loiseau', 'vole'],
['Woody', 'Allen', 'parle'], ['Estce', 'que', 'larbitre', 'est', 'la'],
['Cette', 'phrase', 'est', 'en', 'anglais'], ['Jaime', 'lIA']]
:param string corpus: String path of the corpus text file
:return list: List of lists.
"""
# separate the corpus by words
words_split = []
for line in corpus:
unaccented = unidecode.unidecode(line)
words_split.append(unaccented.split())
# Remove punctuation
regex = re.compile('[%s]' % re.escape(string.punctuation))
no_punctuation = []
for review in words_split:
new_review = []
for token in review:
new_token = regex.sub(u'', token)
if not new_token == u'':
new_review.append(new_token)
no_punctuation.append(new_review)
# remove empty lists
return [x for x in no_punctuation if x != []]
@staticmethod
def tokenize(cleaned_text):
"""
Flattens a list of lists and splits all the words into characters.
>>> tokenize([['the', 'Japanese', 'economy'],['next','year']])
['t', 'h', 'e', 'j', 'a', 'p', 'a', 'n', 'e', 's', 'e', 'e', 'c',
'o', 'n', 'o', 'm', 'y', 'n', 'e', 'x', 't', 'y', 'e', 'a', 'r']
:param list cleaned_text: List of lists.
:return list: List of characters.
"""
# flatten the list
flattened = [val for sublist in cleaned_text for val in sublist]
# Separate by character
tokens = []
for word in flattened:
l = list(word)
# only add ascii letters
for c in l:
if c in string.ascii_uppercase:
tokens.extend(c.lower())
elif c in string.ascii_lowercase:
tokens.extend(c)
return tokens
def clean_and_tokenize(self):
return Corpus.tokenize(Corpus.clean(self.corpus))
def sanitize(self):
return Corpus.clean(self.corpus)
| import os
import re
import string
import unidecode
class Corpus:
"""
Corpus is used for handling text files.
"""
def __init__(self, file_path):
file = open(file_path, encoding="utf8")
self.corpus = file.readlines()
file.close()
_, tail = os.path.split(file_path)
self.name = tail
@staticmethod
def clean(corpus):
"""
Separates each line of the corpus by words and removes punctuation.
>>> clean('first10TestSentences.txt')
[['What', 'will', 'the', 'Japanese', 'economy', 'be', 'like', 'next', 'year'],
['She', 'asked', 'him', 'if', 'he', 'was', 'a', 'student', 'at', 'this', 'school'],
['Im', 'OK'], ['Birds', 'build', 'nests'], ['I', 'hate', 'AI'], ['Loiseau', 'vole'],
['Woody', 'Allen', 'parle'], ['Estce', 'que', 'larbitre', 'est', 'la'],
['Cette', 'phrase', 'est', 'en', 'anglais'], ['Jaime', 'lIA']]
:param string corpus: String path of the corpus text file
:return list: List of lists.
"""
# separate the corpus by words
words_split = []
for line in corpus:
unaccented = unidecode.unidecode(line)
words_split.append(unaccented.split())
# Remove punctuation
regex = re.compile('[%s]' % re.escape(string.punctuation))
no_punctuation = []
for review in words_split:
new_review = []
for token in review:
new_token = regex.sub(u'', token)
if not new_token == u'':
new_review.append(new_token)
no_punctuation.append(new_review)
# remove empty lists
return [x for x in no_punctuation if x != []]
@staticmethod
def tokenize(cleaned_text):
"""
Flattens a list of lists and splits all the words into characters.
>>> tokenize([['the', 'Japanese', 'economy'],['next','year']])
['t', 'h', 'e', 'j', 'a', 'p', 'a', 'n', 'e', 's', 'e', 'e', 'c',
'o', 'n', 'o', 'm', 'y', 'n', 'e', 'x', 't', 'y', 'e', 'a', 'r']
:param list cleaned_text: List of lists.
:return list: List of characters.
"""
# flatten the list
flattened = [val for sublist in cleaned_text for val in sublist]
# Separate by character
tokens = []
for word in flattened:
l = list(word)
# only add ascii letters
for c in l:
if c in string.ascii_uppercase:
tokens.extend(c.lower())
elif c in string.ascii_lowercase:
tokens.extend(c)
return tokens
def clean_and_tokenize(self):
return Corpus.tokenize(Corpus.clean(self.corpus))
def sanitize(self):
return Corpus.clean(self.corpus)
| en | 0.238127 | Corpus is used for handling text files. Separates each line of the corpus by words and removes punctuation. >>> clean('first10TestSentences.txt') [['What', 'will', 'the', 'Japanese', 'economy', 'be', 'like', 'next', 'year'], ['She', 'asked', 'him', 'if', 'he', 'was', 'a', 'student', 'at', 'this', 'school'], ['Im', 'OK'], ['Birds', 'build', 'nests'], ['I', 'hate', 'AI'], ['Loiseau', 'vole'], ['Woody', 'Allen', 'parle'], ['Estce', 'que', 'larbitre', 'est', 'la'], ['Cette', 'phrase', 'est', 'en', 'anglais'], ['Jaime', 'lIA']] :param string corpus: String path of the corpus text file :return list: List of lists. # separate the corpus by words # Remove punctuation # remove empty lists Flattens a list of lists and splits all the words into characters. >>> tokenize([['the', 'Japanese', 'economy'],['next','year']]) ['t', 'h', 'e', 'j', 'a', 'p', 'a', 'n', 'e', 's', 'e', 'e', 'c', 'o', 'n', 'o', 'm', 'y', 'n', 'e', 'x', 't', 'y', 'e', 'a', 'r'] :param list cleaned_text: List of lists. :return list: List of characters. # flatten the list # Separate by character # only add ascii letters | 3.458428 | 3 |
1b2b/learn.py | AIandSocialGoodLab/FeatureDeceptionGame | 0 | 6615166 | from nn import NN
import numpy as np
import torch
class LearnNN:
def __init__(self, seed, n, K, K2, K3, target, numData):
self.n = n
self.K = K
self.K2 = K2
self.K3 = K3
self.seed = seed
self.numData = numData
self.Ntest = 500000
self.lr = 1e-1
self.nepoch = 20
self.nstep = 10
self.lb = -1
self.ub = 1
np.random.seed(self.seed)
torch.manual_seed(self.seed)
self.target = target
self.learn_model = NN(K, K2, K3, True)
self.history = np.zeros((self.numData.size, 4))
self.xTest = torch.randn(self.Ntest, self.n, self.K)
testScore = self.target.forward(self.xTest)
testProb = torch.nn.functional.softmax(testScore, dim=1)
self.yTest = torch.squeeze(torch.multinomial(testProb, 1))
def learn(self):
for datasizei in range(self.numData.size):
self.Ntrain = self.numData[datasizei]
self.batch_size = int(self.Ntrain / self.nepoch)
self.learn_model = NN(self.K, self.K2, self.K3, True)
self.learnGD(datasizei)
np.savetxt("history"+str(self.n)+"_"+str(self.K)+"_"+str(self.K2)+"_"+str(self.K3)+"_"+str(self.seed)+".csv", self.history, delimiter=',')
def learnGD(self, sizei):
xTrain = torch.randn(self.Ntrain, self.n, self.K)
trueScore = self.target.forward(xTrain)
trueProb = torch.nn.functional.softmax(trueScore, dim=1)
yTrain = torch.squeeze(torch.multinomial(trueProb, 1))
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
criterionTest = torch.nn.L1Loss(reduction='mean')
optimizer = torch.optim.RMSprop(self.learn_model.parameters(), lr=self.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=6, gamma=0.1)
for epoch in range(self.nepoch):
scheduler.step()
for t in range(self.nstep):
rand_ix = np.random.randint(0, self.Ntrain, (self.batch_size,))
batch_x = xTrain[rand_ix]
batch_y = yTrain[rand_ix]
y_pred = self.learn_model(batch_x)
loss = criterion(y_pred, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
y_pred_test = self.learn_model(self.xTest)
loss_test = criterion(y_pred_test, self.yTest)
y_pred_validation = self.target(self.xTest)
loss_validation = criterion(y_pred_validation, self.yTest)
optimizer.zero_grad()
y_pred_test_prob = torch.nn.functional.softmax(y_pred_test, dim=1)
y_pred_validation_prob = torch.nn.functional.softmax(y_pred_validation, dim=1)
TV_loss = criterionTest(y_pred_test_prob, y_pred_validation_prob)*self.n/2
L1distance = 0
numParam = 0
for i in range(len(list(self.learn_model.parameters()))):
numParam += np.array(list(self.learn_model.parameters())[i].flatten().data).size
w = np.array(list(self.learn_model.parameters())[i].data)
target_w = np.array(list(self.target.parameters())[i].data)
L1distance += np.sum(np.abs(w - target_w))
L1distance = L1distance / numParam
self.history[sizei,:] = np.array([self.Ntrain, loss_test.item()/self.Ntest - loss_validation.item()/self.Ntest, TV_loss.item(), L1distance])
return self.learn_model
| from nn import NN
import numpy as np
import torch
class LearnNN:
def __init__(self, seed, n, K, K2, K3, target, numData):
self.n = n
self.K = K
self.K2 = K2
self.K3 = K3
self.seed = seed
self.numData = numData
self.Ntest = 500000
self.lr = 1e-1
self.nepoch = 20
self.nstep = 10
self.lb = -1
self.ub = 1
np.random.seed(self.seed)
torch.manual_seed(self.seed)
self.target = target
self.learn_model = NN(K, K2, K3, True)
self.history = np.zeros((self.numData.size, 4))
self.xTest = torch.randn(self.Ntest, self.n, self.K)
testScore = self.target.forward(self.xTest)
testProb = torch.nn.functional.softmax(testScore, dim=1)
self.yTest = torch.squeeze(torch.multinomial(testProb, 1))
def learn(self):
for datasizei in range(self.numData.size):
self.Ntrain = self.numData[datasizei]
self.batch_size = int(self.Ntrain / self.nepoch)
self.learn_model = NN(self.K, self.K2, self.K3, True)
self.learnGD(datasizei)
np.savetxt("history"+str(self.n)+"_"+str(self.K)+"_"+str(self.K2)+"_"+str(self.K3)+"_"+str(self.seed)+".csv", self.history, delimiter=',')
def learnGD(self, sizei):
xTrain = torch.randn(self.Ntrain, self.n, self.K)
trueScore = self.target.forward(xTrain)
trueProb = torch.nn.functional.softmax(trueScore, dim=1)
yTrain = torch.squeeze(torch.multinomial(trueProb, 1))
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
criterionTest = torch.nn.L1Loss(reduction='mean')
optimizer = torch.optim.RMSprop(self.learn_model.parameters(), lr=self.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=6, gamma=0.1)
for epoch in range(self.nepoch):
scheduler.step()
for t in range(self.nstep):
rand_ix = np.random.randint(0, self.Ntrain, (self.batch_size,))
batch_x = xTrain[rand_ix]
batch_y = yTrain[rand_ix]
y_pred = self.learn_model(batch_x)
loss = criterion(y_pred, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
with torch.no_grad():
y_pred_test = self.learn_model(self.xTest)
loss_test = criterion(y_pred_test, self.yTest)
y_pred_validation = self.target(self.xTest)
loss_validation = criterion(y_pred_validation, self.yTest)
optimizer.zero_grad()
y_pred_test_prob = torch.nn.functional.softmax(y_pred_test, dim=1)
y_pred_validation_prob = torch.nn.functional.softmax(y_pred_validation, dim=1)
TV_loss = criterionTest(y_pred_test_prob, y_pred_validation_prob)*self.n/2
L1distance = 0
numParam = 0
for i in range(len(list(self.learn_model.parameters()))):
numParam += np.array(list(self.learn_model.parameters())[i].flatten().data).size
w = np.array(list(self.learn_model.parameters())[i].data)
target_w = np.array(list(self.target.parameters())[i].data)
L1distance += np.sum(np.abs(w - target_w))
L1distance = L1distance / numParam
self.history[sizei,:] = np.array([self.Ntrain, loss_test.item()/self.Ntest - loss_validation.item()/self.Ntest, TV_loss.item(), L1distance])
return self.learn_model
| none | 1 | 2.721579 | 3 | |
src/GamePrep.py | AndrewMDelgado/UTA_ChessBot | 0 | 6615167 | from Board import Board
from Pawn import Pawn
from Rook import Rook
from King import King
from Bishop import Bishop
from Knight import Knight
from Queen import Queen
from Coordinate import Coordinate as C
from InputParser import InputParser
import re
import os
WHITE = True
BLACK = False
class GamePrep:
def loadGame(self, filename):
board = Board()
board.pieces.clear()
parserWhite = InputParser(board, WHITE)
parserBlack = InputParser(board, BLACK)
gameFile = open(filename, "r")
lines = []
for line in gameFile:
lines.append(line)
gameFile.close()
histStr = lines[8]
currentSide = ('W' == histStr[0])
board.currentSide = currentSide
pieceDict = dict.fromkeys(['p', '0', '1', '6', '7'], Pawn)
pieceDict.update(dict.fromkeys(['k'], Knight))
pieceDict.update(dict.fromkeys(['b'], Bishop))
pieceDict.update(dict.fromkeys(['r', '2', '3', '8', '9'], Rook))
pieceDict.update(dict.fromkeys(['q'], Queen))
pieceDict.update(dict.fromkeys(['k', '4', '5', 'm'], King))
for i in range(8):
for j, p in enumerate(lines[i]):
if p == '-':
continue
coords = C(j, i)
movesMade = 0
if p.isalpha():
side = p.isupper() #WHITE if True, else BLACK
if p.lower() == 'm': #king on either side
movesMade = 2
#movesMade += 1 + int(p.isupper()) #movesMade = 1 or 2 based on p's case
#if p.lower() in "ab" and ((side == WHITE and coords[1] == 1) \
# or (side == BLACK and coords[1] == 6)): #proves p is a pawn that hasn't moved
# movesMade = 0
else:
numP = int(p)
side = (numP % 2 == 0) #WHITE if True, else BLACK
movesMade = 1 + int(numP > 5)
board.pieces.extend(pieceDict[p.lower()](board, side, coords, movesMade))
#TODO: Process last move to enter into board.history
return board
def saveGame(self, board, filename):
def replaceChar(s, i, r):
return s[:i]+r+s[i+1:]
def getPieceNewRep(piece):
importantMovers = "PpRrKk"
p = piece.stringRep
if piece.side == BLACK:
p = p.lower()
mover = piece.movesMade > 0
traveler = piece.movesMade > 1
if mover and p in importantMovers:
pIdx = importantMovers.index(p)
if traveler:
pIdx += 6
if pIdx == 10:
pIdx = 'M' #Monarch
if pIdx == 11:
pIdx = 'm'
p = str(pIdx)
return p
gameFile = open(filename, "w")
filepath = os.path.abspath(gameFile.name)
pieceStr = "----------------------------------------------------------------"
for piece in board.pieces:
p = getPieceNewRep(piece)
pos = piece.position[0] + (8 * piece.position[1])
pieceStr = replaceChar(pieceStr, pos, p)
for i in range(8):
fin = 64 - (i * 8)
init = fin - 8
gameFile.write(pieceStr[init:fin] + '\n')
if board.currentSide == WHITE:
histStr = "B "
else:
histStr = "W "
dashes = "-----"
takenStr = "-"
if board.history:
lastMove = board.getLastMove()
lastMoveStr = str(lastMove)
if lastMove.passant:
lastMoveStr = lastMoveStr[:4] + 'P'
elif lastMove.promotion:
lastMoveStr = lastMoveStr[:4] + lastMoveStr[5]
idx = len(lastMoveStr)
histStr += lastMoveStr + dashes[idx:]
pieceTaken = lastMove.pieceToCapture
if pieceTaken:
takenStr = getPieceNewRep(pieceTaken)
else:
histStr += dashes
histStr += takenStr
gameFile.write(histStr)
gameFile.close()
return filepath
def processTags(self, argv):
showBoard = not "--hb" in argv
twoPlayer = "--two" in argv
readBoard = "--rb" in argv
if "--help" in argv:
print("Tags:")
print("--hb Hide board: Opt out of automatically displaying the board afer each move.")
print("--two Two-player: Play game with two human players rather than the AI.")
print("--rb Read board: Player input solely from moves on the physical board.")
print("--vg Variant game: Test move functionality with preset board states," \
" or load a saved game state (load games not yet implemented).\n")
if len(argv) == 2: #--help is the only tag
exit(0)
qsRegEx = re.compile('--qs(w|W|b|B)[1-3]$')
quickStart = None
for arg in argv:
if qsRegEx.match(arg):
quickStart = [WHITE, 1]
if arg[4] == 'b' or arg[4] == 'B':
quickStart[0] = BLACK
quickStart[1] = int(arg[5])
variantGame = ""
customGame = None
if "--vg" in argv:
variantGame = input(
"Variant game? [mate, castle, passant, promotion, custom]: ").lower()
#if variantGame == 'custom':
# filename = input("Enter custom game filename: ")
# customGame = self.loadGame(filename)
return showBoard, twoPlayer, readBoard, quickStart, variantGame, customGame
| from Board import Board
from Pawn import Pawn
from Rook import Rook
from King import King
from Bishop import Bishop
from Knight import Knight
from Queen import Queen
from Coordinate import Coordinate as C
from InputParser import InputParser
import re
import os
WHITE = True
BLACK = False
class GamePrep:
def loadGame(self, filename):
board = Board()
board.pieces.clear()
parserWhite = InputParser(board, WHITE)
parserBlack = InputParser(board, BLACK)
gameFile = open(filename, "r")
lines = []
for line in gameFile:
lines.append(line)
gameFile.close()
histStr = lines[8]
currentSide = ('W' == histStr[0])
board.currentSide = currentSide
pieceDict = dict.fromkeys(['p', '0', '1', '6', '7'], Pawn)
pieceDict.update(dict.fromkeys(['k'], Knight))
pieceDict.update(dict.fromkeys(['b'], Bishop))
pieceDict.update(dict.fromkeys(['r', '2', '3', '8', '9'], Rook))
pieceDict.update(dict.fromkeys(['q'], Queen))
pieceDict.update(dict.fromkeys(['k', '4', '5', 'm'], King))
for i in range(8):
for j, p in enumerate(lines[i]):
if p == '-':
continue
coords = C(j, i)
movesMade = 0
if p.isalpha():
side = p.isupper() #WHITE if True, else BLACK
if p.lower() == 'm': #king on either side
movesMade = 2
#movesMade += 1 + int(p.isupper()) #movesMade = 1 or 2 based on p's case
#if p.lower() in "ab" and ((side == WHITE and coords[1] == 1) \
# or (side == BLACK and coords[1] == 6)): #proves p is a pawn that hasn't moved
# movesMade = 0
else:
numP = int(p)
side = (numP % 2 == 0) #WHITE if True, else BLACK
movesMade = 1 + int(numP > 5)
board.pieces.extend(pieceDict[p.lower()](board, side, coords, movesMade))
#TODO: Process last move to enter into board.history
return board
def saveGame(self, board, filename):
def replaceChar(s, i, r):
return s[:i]+r+s[i+1:]
def getPieceNewRep(piece):
importantMovers = "PpRrKk"
p = piece.stringRep
if piece.side == BLACK:
p = p.lower()
mover = piece.movesMade > 0
traveler = piece.movesMade > 1
if mover and p in importantMovers:
pIdx = importantMovers.index(p)
if traveler:
pIdx += 6
if pIdx == 10:
pIdx = 'M' #Monarch
if pIdx == 11:
pIdx = 'm'
p = str(pIdx)
return p
gameFile = open(filename, "w")
filepath = os.path.abspath(gameFile.name)
pieceStr = "----------------------------------------------------------------"
for piece in board.pieces:
p = getPieceNewRep(piece)
pos = piece.position[0] + (8 * piece.position[1])
pieceStr = replaceChar(pieceStr, pos, p)
for i in range(8):
fin = 64 - (i * 8)
init = fin - 8
gameFile.write(pieceStr[init:fin] + '\n')
if board.currentSide == WHITE:
histStr = "B "
else:
histStr = "W "
dashes = "-----"
takenStr = "-"
if board.history:
lastMove = board.getLastMove()
lastMoveStr = str(lastMove)
if lastMove.passant:
lastMoveStr = lastMoveStr[:4] + 'P'
elif lastMove.promotion:
lastMoveStr = lastMoveStr[:4] + lastMoveStr[5]
idx = len(lastMoveStr)
histStr += lastMoveStr + dashes[idx:]
pieceTaken = lastMove.pieceToCapture
if pieceTaken:
takenStr = getPieceNewRep(pieceTaken)
else:
histStr += dashes
histStr += takenStr
gameFile.write(histStr)
gameFile.close()
return filepath
def processTags(self, argv):
showBoard = not "--hb" in argv
twoPlayer = "--two" in argv
readBoard = "--rb" in argv
if "--help" in argv:
print("Tags:")
print("--hb Hide board: Opt out of automatically displaying the board afer each move.")
print("--two Two-player: Play game with two human players rather than the AI.")
print("--rb Read board: Player input solely from moves on the physical board.")
print("--vg Variant game: Test move functionality with preset board states," \
" or load a saved game state (load games not yet implemented).\n")
if len(argv) == 2: #--help is the only tag
exit(0)
qsRegEx = re.compile('--qs(w|W|b|B)[1-3]$')
quickStart = None
for arg in argv:
if qsRegEx.match(arg):
quickStart = [WHITE, 1]
if arg[4] == 'b' or arg[4] == 'B':
quickStart[0] = BLACK
quickStart[1] = int(arg[5])
variantGame = ""
customGame = None
if "--vg" in argv:
variantGame = input(
"Variant game? [mate, castle, passant, promotion, custom]: ").lower()
#if variantGame == 'custom':
# filename = input("Enter custom game filename: ")
# customGame = self.loadGame(filename)
return showBoard, twoPlayer, readBoard, quickStart, variantGame, customGame
| en | 0.630642 | #WHITE if True, else BLACK #king on either side #movesMade += 1 + int(p.isupper()) #movesMade = 1 or 2 based on p's case #if p.lower() in "ab" and ((side == WHITE and coords[1] == 1) \ # or (side == BLACK and coords[1] == 6)): #proves p is a pawn that hasn't moved # movesMade = 0 #WHITE if True, else BLACK #TODO: Process last move to enter into board.history #Monarch #--help is the only tag #if variantGame == 'custom': # filename = input("Enter custom game filename: ") # customGame = self.loadGame(filename) | 2.997164 | 3 |
logger.py | XLEric/dpcs | 0 | 6615168 | #-*-coding:utf-8-*-
import logging
from logging.handlers import TimedRotatingFileHandler
import os
import sys
import multiprocessing as mp
def get_logger(proc_name="log_DpEngine"):
# create log file
log_file = './log/' + proc_name + '.log'
if not os.path.exists('./log'):
os.mkdir('./log')
if not os.path.exists(log_file):
open(log_file, "a+").close()
# logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)# logging.DEBUG
# fhandler
handler = TimedRotatingFileHandler(log_file, when='midnight', interval=1, backupCount=30)
strfmt = "[%(asctime)s] %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"
# format
formatter = logging.Formatter(strfmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
std_handler = logging.StreamHandler(sys.stdout)
std_handler.setFormatter(formatter)
logger.addHandler(std_handler)
return logger
logger = get_logger()
| #-*-coding:utf-8-*-
import logging
from logging.handlers import TimedRotatingFileHandler
import os
import sys
import multiprocessing as mp
def get_logger(proc_name="log_DpEngine"):
# create log file
log_file = './log/' + proc_name + '.log'
if not os.path.exists('./log'):
os.mkdir('./log')
if not os.path.exists(log_file):
open(log_file, "a+").close()
# logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)# logging.DEBUG
# fhandler
handler = TimedRotatingFileHandler(log_file, when='midnight', interval=1, backupCount=30)
strfmt = "[%(asctime)s] %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"
# format
formatter = logging.Formatter(strfmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
std_handler = logging.StreamHandler(sys.stdout)
std_handler.setFormatter(formatter)
logger.addHandler(std_handler)
return logger
logger = get_logger()
| en | 0.423737 | #-*-coding:utf-8-*- # create log file # logger # logging.DEBUG # fhandler # format | 2.411666 | 2 |
SM.py | Mxhmovd/Swipe-Mailer | 1 | 6615169 | <reponame>Mxhmovd/Swipe-Mailer<filename>SM.py
#!/usr/bin/python
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import sys, time
from getpass import getpass
eServices = ["smtp.gmail.com",
"smtp.mail.yahoo.com",
"smtp-mail.outlook.com"]
b, r, w, y, p, g, bld, z = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[1m', '\033[0m'
animation = (g +'——►'+ g, '»—————►')
for i in range(15):
sys.stdout.write('\b\b\b')
sys.stdout.write(animation[i % len(animation)])
sys.stdout.flush()
time.sleep(0.04)
print (' 100%')
time.sleep(1)
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
sys.stdout.write(g + (" " * 76) + """
________ _____ ______ ___ ___________
__ ___/__ ____(_)____________ ___ |/ /_____ ___(_)__ /____________
_____ \__ | /| / /_ /___ __ \ _ \ __ /|_/ /_ __ `/_ /__ /_ _ \_ ___/
____/ /__ |/ |/ /_ / __ /_/ / __/ _ / / / / /_/ /_ / _ / / __/ /
/____/ ____/|__/ /_/ _ .___/\___/ /_/ /_/ \__,_/ /_/ /_/ \___//_/
/_/
""" + z + b +
'\n' + '{}Email Marketing sender ({}Swipe Mailer{}){}'.format(y, r, y, b).center(108) +
'Made With <3 by: {0}<NAME> ({1}(Mxhmovd){2}) '.format(y, r, y).center(101) +
'Version: {}0.1{} \n'.format(y, z).center(97))
sys.stdout.write("%-6s \n" % (""))
username = input("{}E-mail: ".format(bld))
password = <PASSWORD>pass('Password:')
domain = username.split('@')[-1]
if domain == "gmail.com":
host, port = eServices[0], 587
elif domain == "yahoo.com":
host, port = eServices[1], 587
else:
an = input ("Are you using Microsoft outlook? (y/n)")
if an.lower()=="y":
host, port = eServices[2], 587
else:
print(bld +r +domain + " is NOT supported yet")
exit()
a=[]
for i in list(username.split('@')[0]):
if ord(i)>=97 and ord(i)<=122 or ord(i)>=65 and ord(i)<=90:
a.append(i)
print ("Hello, " +"".join(a)+r +" \u2665"+ r+" Enjoy!")
time.sleep(2)
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
class emailHandler():
userEmail = []
def addUser(self, email):
self.userEmail.append(email)
def getUser(self):
return self.userEmail
def connect(self):
connect = smtplib.SMTP(host, port)
connect.ehlo()
connect.starttls()
try:
connect.login(username, password)
except:
print(r+bld+'Wrong E-mail/password')
exit()
return connect
def testConnect(self):
try:
self.connect()
except smtplib.SMTPException:
return False
return True
def sendEmail(self, choice):
for i in range(len(self.userEmail)):
try:
Message = MIMEMultipart("alternative")
Message['Subject'] = msgSubject
if choice==1:
content = MIMEText(wholeMessage, 'html')
else:
content = MIMEText(wholeMessage, 'plain')
Message.attach(content)
self.connect().sendmail(username, self.userEmail[i], Message.as_string())
except smtplib.SMTPException:
return False
self.connect().quit()
return True
def exit():
print ("Exiting ...")
time.sleep(0.8)
sys.exit(0)
obj = emailHandler()
if obj.connect():
print(y+bld+'\nConnection Successfully established !'+z)
else:
print(r+bld+'Error connecting !'+z)
exit()
print(w+bld+"\n******** MESSAGE ********"+z)
msgSubject = input( g+"[+]"+g + w+bld+"Message subject: "+z )
print(g+"[+]"+g+ w+bld+"Format of the message? "+z)
print ("1.HTML 2.Plain")
formatChoice = int(input())
print ("1.Load File 2.Write it here")
operationChoice = int(input())
if operationChoice == 1:
try:
direc = input(bld+"Path of the message. For ex. /home/Messages/message.txt/html : "+z)
f2=open(direc, "r")
wholeMessage = f2.read()
f2.close()
except IOError:
print(r+bld+'File NOT found')
exit()
elif operationChoice == 2:
print(bld+"Enter/Paste your message. Ctrl+D or Ctrl+Z to save it."+z)
contents = []
while True:
try:
line = input()
except EOFError:
break
contents.append(line)
wholeMessage = "\n".join(contents)
if "<html>" not in wholeMessage:
if formatChoice == 1:
print(r+bld+'You choosed "HTML", but wrote in "Plain"')
exit()
if "<html>" in wholeMessage:
if formatChoice == 2:
print(r+bld+'You choosed "Plain", but wrote in "HTML"')
exit()
else:
print(r+bld+'Choose 1 or 2')
exit()
print(bld+"\n******** E-mail Address ********")
print ("1.Load File 2.Write it here")
operationChoice2 = int(input())
if operationChoice2 == 1:
try:
direc2 = input("Path of the E-mail list. For ex. /home/E-mail/emailList.txt : ")
with open(direc2) as f:
for email in f:
obj.addUser(email)
except IOError:
print(bld+r+'File NOT found')
exit()
elif operationChoice2 == 2:
print("How many Addresses you want to add?")
addressChoice=int(input())
for i in range(addressChoice):
email=input("Email no.{0}: ".format(i+1))
obj.addUser(email)
else:
print(bld+r+'Choose 1 or 2')
exit()
if obj.sendEmail(formatChoice):
print(bld+g+'E-mail Successfully sent !')
else:
print(bld+r+'Error sending the message !')
exit()
#MVX | #!/usr/bin/python
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import sys, time
from getpass import getpass
eServices = ["smtp.gmail.com",
"smtp.mail.yahoo.com",
"smtp-mail.outlook.com"]
b, r, w, y, p, g, bld, z = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[1m', '\033[0m'
animation = (g +'——►'+ g, '»—————►')
for i in range(15):
sys.stdout.write('\b\b\b')
sys.stdout.write(animation[i % len(animation)])
sys.stdout.flush()
time.sleep(0.04)
print (' 100%')
time.sleep(1)
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
sys.stdout.write(g + (" " * 76) + """
________ _____ ______ ___ ___________
__ ___/__ ____(_)____________ ___ |/ /_____ ___(_)__ /____________
_____ \__ | /| / /_ /___ __ \ _ \ __ /|_/ /_ __ `/_ /__ /_ _ \_ ___/
____/ /__ |/ |/ /_ / __ /_/ / __/ _ / / / / /_/ /_ / _ / / __/ /
/____/ ____/|__/ /_/ _ .___/\___/ /_/ /_/ \__,_/ /_/ /_/ \___//_/
/_/
""" + z + b +
'\n' + '{}Email Marketing sender ({}Swipe Mailer{}){}'.format(y, r, y, b).center(108) +
'Made With <3 by: {0}<NAME> ({1}(Mxhmovd){2}) '.format(y, r, y).center(101) +
'Version: {}0.1{} \n'.format(y, z).center(97))
sys.stdout.write("%-6s \n" % (""))
username = input("{}E-mail: ".format(bld))
password = <PASSWORD>pass('Password:')
domain = username.split('@')[-1]
if domain == "gmail.com":
host, port = eServices[0], 587
elif domain == "yahoo.com":
host, port = eServices[1], 587
else:
an = input ("Are you using Microsoft outlook? (y/n)")
if an.lower()=="y":
host, port = eServices[2], 587
else:
print(bld +r +domain + " is NOT supported yet")
exit()
a=[]
for i in list(username.split('@')[0]):
if ord(i)>=97 and ord(i)<=122 or ord(i)>=65 and ord(i)<=90:
a.append(i)
print ("Hello, " +"".join(a)+r +" \u2665"+ r+" Enjoy!")
time.sleep(2)
sys.stdout.write('\x1b[1A')
sys.stdout.write('\x1b[2K')
class emailHandler():
userEmail = []
def addUser(self, email):
self.userEmail.append(email)
def getUser(self):
return self.userEmail
def connect(self):
connect = smtplib.SMTP(host, port)
connect.ehlo()
connect.starttls()
try:
connect.login(username, password)
except:
print(r+bld+'Wrong E-mail/password')
exit()
return connect
def testConnect(self):
try:
self.connect()
except smtplib.SMTPException:
return False
return True
def sendEmail(self, choice):
for i in range(len(self.userEmail)):
try:
Message = MIMEMultipart("alternative")
Message['Subject'] = msgSubject
if choice==1:
content = MIMEText(wholeMessage, 'html')
else:
content = MIMEText(wholeMessage, 'plain')
Message.attach(content)
self.connect().sendmail(username, self.userEmail[i], Message.as_string())
except smtplib.SMTPException:
return False
self.connect().quit()
return True
def exit():
print ("Exiting ...")
time.sleep(0.8)
sys.exit(0)
obj = emailHandler()
if obj.connect():
print(y+bld+'\nConnection Successfully established !'+z)
else:
print(r+bld+'Error connecting !'+z)
exit()
print(w+bld+"\n******** MESSAGE ********"+z)
msgSubject = input( g+"[+]"+g + w+bld+"Message subject: "+z )
print(g+"[+]"+g+ w+bld+"Format of the message? "+z)
print ("1.HTML 2.Plain")
formatChoice = int(input())
print ("1.Load File 2.Write it here")
operationChoice = int(input())
if operationChoice == 1:
try:
direc = input(bld+"Path of the message. For ex. /home/Messages/message.txt/html : "+z)
f2=open(direc, "r")
wholeMessage = f2.read()
f2.close()
except IOError:
print(r+bld+'File NOT found')
exit()
elif operationChoice == 2:
print(bld+"Enter/Paste your message. Ctrl+D or Ctrl+Z to save it."+z)
contents = []
while True:
try:
line = input()
except EOFError:
break
contents.append(line)
wholeMessage = "\n".join(contents)
if "<html>" not in wholeMessage:
if formatChoice == 1:
print(r+bld+'You choosed "HTML", but wrote in "Plain"')
exit()
if "<html>" in wholeMessage:
if formatChoice == 2:
print(r+bld+'You choosed "Plain", but wrote in "HTML"')
exit()
else:
print(r+bld+'Choose 1 or 2')
exit()
print(bld+"\n******** E-mail Address ********")
print ("1.Load File 2.Write it here")
operationChoice2 = int(input())
if operationChoice2 == 1:
try:
direc2 = input("Path of the E-mail list. For ex. /home/E-mail/emailList.txt : ")
with open(direc2) as f:
for email in f:
obj.addUser(email)
except IOError:
print(bld+r+'File NOT found')
exit()
elif operationChoice2 == 2:
print("How many Addresses you want to add?")
addressChoice=int(input())
for i in range(addressChoice):
email=input("Email no.{0}: ".format(i+1))
obj.addUser(email)
else:
print(bld+r+'Choose 1 or 2')
exit()
if obj.sendEmail(formatChoice):
print(bld+g+'E-mail Successfully sent !')
else:
print(bld+r+'Error sending the message !')
exit()
#MVX | fa | 0.185943 | #!/usr/bin/python ________ _____ ______ ___ ___________ __ ___/__ ____(_)____________ ___ |/ /_____ ___(_)__ /____________ _____ \__ | /| / /_ /___ __ \ _ \ __ /|_/ /_ __ `/_ /__ /_ _ \_ ___/ ____/ /__ |/ |/ /_ / __ /_/ / __/ _ / / / / /_/ /_ / _ / / __/ / /____/ ____/|__/ /_/ _ .___/\___/ /_/ /_/ \__,_/ /_/ /_/ \___//_/ /_/ #MVX | 2.359336 | 2 |
Core/utils.py | sushi-chaaaan/sakamata-alpha-pycord | 3 | 6615170 | <filename>Core/utils.py
import os
from datetime import datetime, timedelta, timezone
import discord
from discord import ApplicationContext, Option
from discord.commands import slash_command
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
guild_id = int(os.environ["GUILD_ID"])
mod_role = int(os.environ["MOD_ROLE"])
admin_role = int(os.environ["ADMIN_ROLE"])
server_member_role = int(os.environ["SERVER_MEMBER_ROLE"])
stop_role = int(os.environ["STOP_ROLE"])
vc_stop_role = int(os.environ["VC_STOP_ROLE"])
utc = timezone.utc
jst = timezone(timedelta(hours=9), "Asia/Tokyo")
stop_list = [stop_role, vc_stop_role]
class Utils_Command(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(name="user", guild_ids=[guild_id], default_permission=False)
async def _newuser(
self,
ctx: ApplicationContext,
member: Option(discord.Member, "対象のIDや名前を入力してください。"),
):
"""ユーザー情報を取得できます。"""
# guild = ctx.guild
# member = guild.get_member(int(id))
# この先表示する用
await ctx.defer()
member_created: datetime = member.created_at.astimezone(jst)
created = member_created.strftime("%Y/%m/%d %H:%M:%S")
member_joined: datetime = member.joined_at.astimezone(jst)
joined = member_joined.strftime("%Y/%m/%d %H:%M:%S")
desc = f"対象ユーザー:{member.mention}\nユーザー名:{member}\nID:`{member.id}`\nBot:{member.bot}"
roles = sorted(
[role for role in member.roles],
key=lambda role: role.position,
reverse=True,
)
send_roles = "\n".join([role.mention for role in roles])
avatars = [member.avatar, member.display_avatar]
if member.default_avatar in avatars:
avatar_url = member.default_avatar.url
else:
avatar_url = member.display_avatar.replace(
size=1024, static_format="webp"
).url
desc = desc + f"\n[Avatar url]({avatar_url})"
deal = []
if member.communication_disabled_until:
until_jst: datetime = member.communication_disabled_until.astimezone(jst)
until = until_jst.strftime("%Y/%m/%d %H:%M:%S")
deal.append(f"Timeout: {until} に解除")
stops = "\n".join([role.name for role in member.roles if role.id in stop_list])
if stops:
deal.append(stops)
if not deal:
send_deal = "なし"
else:
send_deal = "\n".join(deal)
embed = discord.Embed(
title="ユーザー情報照会結果",
description=desc,
color=3983615,
)
embed.set_thumbnail(url=avatar_url)
embed.add_field(
name="アカウント作成日時",
value=created,
)
embed.add_field(
name="サーバー参加日時",
value=joined,
)
embed.add_field(name=f"所持ロール({len(roles)})", value=send_roles, inline=False)
embed.add_field(
name="実行中措置",
value=send_deal,
)
await ctx.respond(embed=embed)
return
@commands.command()
@commands.has_role(mod_role)
async def test(self, ctx: commands.Context):
"""生存確認用"""
await ctx.send("hello")
return
@commands.command(name="private")
@commands.has_role(admin_role)
async def _private(self, ctx):
role = ctx.guild.get_role(server_member_role)
channels = sorted(
[channel for channel in ctx.guild.channels if channel.category],
key=lambda channel: channel.position,
)
for channel in channels:
result = channel.permissions_for(role).view_channel
print(channel.name, result)
return
@slash_command(guild_ids=[guild_id], name="ping")
async def _ping(self, ctx: ApplicationContext):
"""生存確認用"""
raw_ping = self.bot.latency
ping = round(raw_ping * 1000)
await ctx.respond(f"Pong!\nPing is {ping}ms")
return
def setup(bot):
return bot.add_cog(Utils_Command(bot))
| <filename>Core/utils.py
import os
from datetime import datetime, timedelta, timezone
import discord
from discord import ApplicationContext, Option
from discord.commands import slash_command
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
guild_id = int(os.environ["GUILD_ID"])
mod_role = int(os.environ["MOD_ROLE"])
admin_role = int(os.environ["ADMIN_ROLE"])
server_member_role = int(os.environ["SERVER_MEMBER_ROLE"])
stop_role = int(os.environ["STOP_ROLE"])
vc_stop_role = int(os.environ["VC_STOP_ROLE"])
utc = timezone.utc
jst = timezone(timedelta(hours=9), "Asia/Tokyo")
stop_list = [stop_role, vc_stop_role]
class Utils_Command(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(name="user", guild_ids=[guild_id], default_permission=False)
async def _newuser(
self,
ctx: ApplicationContext,
member: Option(discord.Member, "対象のIDや名前を入力してください。"),
):
"""ユーザー情報を取得できます。"""
# guild = ctx.guild
# member = guild.get_member(int(id))
# この先表示する用
await ctx.defer()
member_created: datetime = member.created_at.astimezone(jst)
created = member_created.strftime("%Y/%m/%d %H:%M:%S")
member_joined: datetime = member.joined_at.astimezone(jst)
joined = member_joined.strftime("%Y/%m/%d %H:%M:%S")
desc = f"対象ユーザー:{member.mention}\nユーザー名:{member}\nID:`{member.id}`\nBot:{member.bot}"
roles = sorted(
[role for role in member.roles],
key=lambda role: role.position,
reverse=True,
)
send_roles = "\n".join([role.mention for role in roles])
avatars = [member.avatar, member.display_avatar]
if member.default_avatar in avatars:
avatar_url = member.default_avatar.url
else:
avatar_url = member.display_avatar.replace(
size=1024, static_format="webp"
).url
desc = desc + f"\n[Avatar url]({avatar_url})"
deal = []
if member.communication_disabled_until:
until_jst: datetime = member.communication_disabled_until.astimezone(jst)
until = until_jst.strftime("%Y/%m/%d %H:%M:%S")
deal.append(f"Timeout: {until} に解除")
stops = "\n".join([role.name for role in member.roles if role.id in stop_list])
if stops:
deal.append(stops)
if not deal:
send_deal = "なし"
else:
send_deal = "\n".join(deal)
embed = discord.Embed(
title="ユーザー情報照会結果",
description=desc,
color=3983615,
)
embed.set_thumbnail(url=avatar_url)
embed.add_field(
name="アカウント作成日時",
value=created,
)
embed.add_field(
name="サーバー参加日時",
value=joined,
)
embed.add_field(name=f"所持ロール({len(roles)})", value=send_roles, inline=False)
embed.add_field(
name="実行中措置",
value=send_deal,
)
await ctx.respond(embed=embed)
return
@commands.command()
@commands.has_role(mod_role)
async def test(self, ctx: commands.Context):
"""生存確認用"""
await ctx.send("hello")
return
@commands.command(name="private")
@commands.has_role(admin_role)
async def _private(self, ctx):
role = ctx.guild.get_role(server_member_role)
channels = sorted(
[channel for channel in ctx.guild.channels if channel.category],
key=lambda channel: channel.position,
)
for channel in channels:
result = channel.permissions_for(role).view_channel
print(channel.name, result)
return
@slash_command(guild_ids=[guild_id], name="ping")
async def _ping(self, ctx: ApplicationContext):
"""生存確認用"""
raw_ping = self.bot.latency
ping = round(raw_ping * 1000)
await ctx.respond(f"Pong!\nPing is {ping}ms")
return
def setup(bot):
return bot.add_cog(Utils_Command(bot))
| ja | 0.978817 | ユーザー情報を取得できます。 # guild = ctx.guild # member = guild.get_member(int(id)) # この先表示する用 生存確認用 生存確認用 | 2.438025 | 2 |
bot/migrations/0003_auto_20220110_1522.py | poohc300/auto-trade-system | 0 | 6615171 | # Generated by Django 3.2.8 on 2022-01-10 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0002_auto_20220110_1456'),
]
operations = [
migrations.AlterField(
model_name='bot',
name='coin_balance',
field=models.FloatField(max_length=100, null=True),
),
migrations.AlterField(
model_name='bot',
name='is_bid',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='bot',
name='market',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='bot',
name='volume',
field=models.FloatField(max_length=100, null=True),
),
]
| # Generated by Django 3.2.8 on 2022-01-10 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0002_auto_20220110_1456'),
]
operations = [
migrations.AlterField(
model_name='bot',
name='coin_balance',
field=models.FloatField(max_length=100, null=True),
),
migrations.AlterField(
model_name='bot',
name='is_bid',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='bot',
name='market',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='bot',
name='volume',
field=models.FloatField(max_length=100, null=True),
),
]
| en | 0.861398 | # Generated by Django 3.2.8 on 2022-01-10 06:22 | 1.618459 | 2 |
tests/testcase.py | alexdlaird/air-quality-bot-e2e | 0 | 6615172 | import os
import time
import unittest
from twilio.rest import Client
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__version__ = "0.1.1"
class TestCase(unittest.TestCase):
def setUp(self):
self.client = Client(os.environ.get("TWILIO_ACCOUNT_SID"), os.environ.get("TWILIO_AUTH_TOKEN"))
def await_reply_message(self, now, text, retries=0):
if retries >= 10:
raise TimeoutError(
"A response from the Air Quality Bot for \"{}\" was not seen after {} retries.".format(text, retries))
time.sleep(retries * 3 if retries > 0 else 2)
latest_message = None
for message in self.client.messages.list(from_=os.environ.get("AIR_QUALITY_BOT_PHONE_NUMBER"),
to=os.environ.get("TWILIO_E2E_FROM_PHONE_NUMBER"),
date_sent=now.date()):
if message.direction != "inbound" or message.date_created < now:
continue
latest_message = message
if latest_message is None:
latest_message = self.await_reply_message(now, text, retries + 1)
return latest_message
| import os
import time
import unittest
from twilio.rest import Client
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__version__ = "0.1.1"
class TestCase(unittest.TestCase):
def setUp(self):
self.client = Client(os.environ.get("TWILIO_ACCOUNT_SID"), os.environ.get("TWILIO_AUTH_TOKEN"))
def await_reply_message(self, now, text, retries=0):
if retries >= 10:
raise TimeoutError(
"A response from the Air Quality Bot for \"{}\" was not seen after {} retries.".format(text, retries))
time.sleep(retries * 3 if retries > 0 else 2)
latest_message = None
for message in self.client.messages.list(from_=os.environ.get("AIR_QUALITY_BOT_PHONE_NUMBER"),
to=os.environ.get("TWILIO_E2E_FROM_PHONE_NUMBER"),
date_sent=now.date()):
if message.direction != "inbound" or message.date_created < now:
continue
latest_message = message
if latest_message is None:
latest_message = self.await_reply_message(now, text, retries + 1)
return latest_message
| none | 1 | 2.794829 | 3 | |
2_svd/read.py | Roboramv2/Image-compression | 3 | 6615173 | import pickle
import os
import numpy as np
from cv2 import cv2
name = 'bear'
filebase = os.listdir('.')
files = []
for x in filebase:
if x.startswith(name):
if x.split('.')[-1]=='pkl':
files.append(x)
for fil in files:
with open(fil, 'rb') as f:
[U, s, V] = pickle.load(f)
mid = np.dot(np.diag(s[:]),V[:,:])
reconst_matrix = np.dot(U[:, :],mid)
shap = (reconst_matrix.shape)
newshap = (shap[0], int(shap[1]/3), 3)
image = reconst_matrix.reshape(newshap)
image = image/255
imshape = (newshap[0], newshap[1])
# if newshap[0]>500:
# rat = newshap[1]/newshap[0]
# w = int(500*rat)
# imshape = (w, 500)
# image = cv2.resize(image, imshape)
cv2.imshow('out', image)
cv2.waitKey(0)
| import pickle
import os
import numpy as np
from cv2 import cv2
name = 'bear'
filebase = os.listdir('.')
files = []
for x in filebase:
if x.startswith(name):
if x.split('.')[-1]=='pkl':
files.append(x)
for fil in files:
with open(fil, 'rb') as f:
[U, s, V] = pickle.load(f)
mid = np.dot(np.diag(s[:]),V[:,:])
reconst_matrix = np.dot(U[:, :],mid)
shap = (reconst_matrix.shape)
newshap = (shap[0], int(shap[1]/3), 3)
image = reconst_matrix.reshape(newshap)
image = image/255
imshape = (newshap[0], newshap[1])
# if newshap[0]>500:
# rat = newshap[1]/newshap[0]
# w = int(500*rat)
# imshape = (w, 500)
# image = cv2.resize(image, imshape)
cv2.imshow('out', image)
cv2.waitKey(0)
| en | 0.592535 | # if newshap[0]>500: # rat = newshap[1]/newshap[0] # w = int(500*rat) # imshape = (w, 500) # image = cv2.resize(image, imshape) | 2.340516 | 2 |
imgwpipe/hydro_daily.py | matenow/imgwpipe | 0 | 6615174 | import os
import zipfile as zp
import pandas as pd
import numpy as np
import core
import requests
class Labels:
init_cols = [
'station_id', 'station_name', 'riv_or_lake', 'hydroy', 'hydrom', 'day',
'lvl', 'flow', 'temp', 'month']
trans_cols = [
'date', 'year', 'month', 'day', 'hydroy', 'hydrom', 'station_id', 'station_name',
'riv_or_lake', 'riv_or_lake_id', 'lvl', 'flow', 'temp']
def transform(trans_df):
trans_df = trans_df.reset_index().drop('index', axis=1)
dfc = trans_df.copy()
lstrip = 'AĄBCĆDEĘFGHIJKLŁMNŃOÓPQRSŚTUVWXYZŹŻaąbcćdeęfghijklłmnńoópqrsśtuvwxyzźż( '
rivlakeid = dfc['riv_or_lake'].map(lambda x: x.lstrip(lstrip).rstrip(')'))
trans_df['riv_or_lake'] = trans_df['riv_or_lake'].map(lambda x: x.rstrip(' ()1234567890 '))
trans_df['riv_or_lake_id'] = rivlakeid
trans_df['month'] = trans_df['month'].fillna(method='ffill').astype(int)
trans_df['day'] = trans_df['day'].fillna(method='ffill').astype(int)
trans_df['year'] = trans_df['hydroy']
trans_df.loc[(trans_df['month'] == 11) | (trans_df['month'] == 12), 'year'] = trans_df['year'].astype(int) - 1
trans_df['date'] = pd.to_datetime(trans_df[['year', 'month', 'day']])
trans_df = trans_df[Labels.trans_cols]
trans_df.loc[trans_df['lvl'] == 9999, 'lvl'] = np.nan
trans_df.loc[trans_df['flow'] == 99999.999, 'flow'] = np.nan
trans_df.loc[trans_df['temp'] == 99.9, 'temp'] = np.nan
return trans_df
def getframe(year: int, month: int, stationid=None, station=None):
core.makedir(dirname='temp')
zipname = f'codz_{year}_{core.strnumb(month)}.zip'
csvname = f'codz_{year}_{core.strnumb(month)}.csv'
url = f'https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_hydrologiczne/dobowe/{year}/{zipname}'
r = requests.get(url)
with open(f'temp/{zipname}', 'wb') as file:
file.write(r.content)
with zp.ZipFile(f'temp/{zipname}', 'r') as zip_ref:
zip_ref.extractall(path='temp')
df = pd.read_csv(f'temp/{csvname}', encoding='windows-1250', header=None)
df.columns = Labels.init_cols
if stationid is not None:
df = df.loc[df['station_id'] == int(stationid)]
elif station is not None:
df = df.loc[df['station_name'] == station]
os.remove(f'temp/{zipname}')
os.remove(f'temp/{csvname}')
return df
def getyear(year: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(year, int):
raise Exception('year argument must be an integer')
elif year not in range(1951, 2021):
raise Exception('year argument not in available range (1951, 2020)')
else:
year_df = pd.DataFrame([], columns=Labels.init_cols)
for month in range(1, 12+1):
df = getframe(year, month, stationid, station)
year_df = pd.concat([year_df, df], ignore_index=True)
year_df = transform(year_df)
if save:
core.makedir('Saved')
if stationid is not None:
year_df.to_csv(f'Saved/hydro_daily_{year}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
year_df.to_csv(f'Saved/hydro_daily_{year}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
year_df.to_csv(f'Saved/hydro_daily_{year}_all.csv', index=False, encoding='utf-8')
return year_df.reset_index().drop('index', axis=1)
def getrange(first_year: int, last_year: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(first_year, int) or not isinstance(last_year, int):
raise Exception('first_year and last_year arguments must be integers')
elif first_year not in range(1951, 2021) or last_year not in range(1951, 2021):
raise Exception('year argument out of available range (1951-2020)')
else:
range_df = pd.DataFrame([], columns=Labels.trans_cols)
for year in range(first_year, last_year + 1):
for month in range(1, 12+1):
df = getframe(year, month, stationid, station)
range_df = pd.concat([range_df, df], ignore_index=True)
range_df = transform(range_df)
if save:
core.makedir('Saved')
if stationid is not None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_all.csv', index=False, encoding='utf-8')
return range_df.reset_index().drop('index', axis=1)
def getmonth(year: int, month: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(year, int) or not isinstance(month, int):
raise Exception('year and month arguments must be integers')
elif month not in range(1, 13):
raise Exception('month argument not in range (1-12)')
elif year not in range(1951, 2021):
raise Exception('year argument not in available range (1951-2020)')
else:
month_df = getframe(year, month, stationid, station)
if month_df.empty:
raise Exception('there is no station with chosen name or id ')
else:
month_df.columns = Labels.init_cols
month_df = transform(month_df)
if save:
core.makedir('Saved')
if stationid is not None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_all.csv', index=False, encoding='utf-8')
return month_df
def err(stationid, station):
if not isinstance(stationid, int) and stationid is not None:
raise Exception('stationid argument must be an integer')
elif not isinstance(station, str) and station is not None:
raise Exception('station argument must be a string')
def metadata(stationid: int, data: str) -> list:
if stationid is None:
raise Exception('missing stationid argument')
elif not isinstance(stationid, int) and stationid is not None:
raise Exception('stationid argument must be an integer')
meta = pd.read_csv('metadata/hydro_stations.csv', encoding='utf-8')
if meta.loc[meta['id'] == stationid].empty:
raise Exception('station with chosen id does not exist')
if data == 'coords':
xcoord = meta.loc[meta['id'] == stationid]['X'].unique()[0]
ycoord = meta.loc[meta['id'] == stationid]['Y'].unique()[0]
return [xcoord, ycoord]
elif data == 'riv_or_lake':
rivlake = meta.loc[meta['id'] == stationid]['riv_or_lake'].unique()[0]
rivlakeid = meta.loc[meta['id'] == stationid]['riv_or_lake_id'].unique()[0]
return [rivlake, rivlakeid]
elif data == 'station_name':
station_name = meta.loc[meta['id'] == stationid]['name'].unique()[0]
return [station_name]
else:
raise Exception('unknown data argument')
def stations(year: int, month=None) -> list:
if not isinstance(year, int):
raise Exception('year argument must be an integer')
elif not isinstance(month, int) and month is not None:
raise Exception('month argument must be an integer')
elif month not in range(1, 13) and month is not None:
raise Exception('month argument not in range (1-12)')
elif month is not None:
stations_names = getmonth(year, month)['station_name'].sort_values()
stations_ids = getmonth(year, month)['station_id'].sort_values()
else:
stations_names = getyear(year)['station_name'].sort_values()
stations_ids = getyear(year)['station_id'].sort_values()
stations_list = list()
for x, y in zip(stations_names, stations_ids):
stations_list.append(y)
return list(set(stations_list))
| import os
import zipfile as zp
import pandas as pd
import numpy as np
import core
import requests
class Labels:
init_cols = [
'station_id', 'station_name', 'riv_or_lake', 'hydroy', 'hydrom', 'day',
'lvl', 'flow', 'temp', 'month']
trans_cols = [
'date', 'year', 'month', 'day', 'hydroy', 'hydrom', 'station_id', 'station_name',
'riv_or_lake', 'riv_or_lake_id', 'lvl', 'flow', 'temp']
def transform(trans_df):
trans_df = trans_df.reset_index().drop('index', axis=1)
dfc = trans_df.copy()
lstrip = 'AĄBCĆDEĘFGHIJKLŁMNŃOÓPQRSŚTUVWXYZŹŻaąbcćdeęfghijklłmnńoópqrsśtuvwxyzźż( '
rivlakeid = dfc['riv_or_lake'].map(lambda x: x.lstrip(lstrip).rstrip(')'))
trans_df['riv_or_lake'] = trans_df['riv_or_lake'].map(lambda x: x.rstrip(' ()1234567890 '))
trans_df['riv_or_lake_id'] = rivlakeid
trans_df['month'] = trans_df['month'].fillna(method='ffill').astype(int)
trans_df['day'] = trans_df['day'].fillna(method='ffill').astype(int)
trans_df['year'] = trans_df['hydroy']
trans_df.loc[(trans_df['month'] == 11) | (trans_df['month'] == 12), 'year'] = trans_df['year'].astype(int) - 1
trans_df['date'] = pd.to_datetime(trans_df[['year', 'month', 'day']])
trans_df = trans_df[Labels.trans_cols]
trans_df.loc[trans_df['lvl'] == 9999, 'lvl'] = np.nan
trans_df.loc[trans_df['flow'] == 99999.999, 'flow'] = np.nan
trans_df.loc[trans_df['temp'] == 99.9, 'temp'] = np.nan
return trans_df
def getframe(year: int, month: int, stationid=None, station=None):
core.makedir(dirname='temp')
zipname = f'codz_{year}_{core.strnumb(month)}.zip'
csvname = f'codz_{year}_{core.strnumb(month)}.csv'
url = f'https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_hydrologiczne/dobowe/{year}/{zipname}'
r = requests.get(url)
with open(f'temp/{zipname}', 'wb') as file:
file.write(r.content)
with zp.ZipFile(f'temp/{zipname}', 'r') as zip_ref:
zip_ref.extractall(path='temp')
df = pd.read_csv(f'temp/{csvname}', encoding='windows-1250', header=None)
df.columns = Labels.init_cols
if stationid is not None:
df = df.loc[df['station_id'] == int(stationid)]
elif station is not None:
df = df.loc[df['station_name'] == station]
os.remove(f'temp/{zipname}')
os.remove(f'temp/{csvname}')
return df
def getyear(year: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(year, int):
raise Exception('year argument must be an integer')
elif year not in range(1951, 2021):
raise Exception('year argument not in available range (1951, 2020)')
else:
year_df = pd.DataFrame([], columns=Labels.init_cols)
for month in range(1, 12+1):
df = getframe(year, month, stationid, station)
year_df = pd.concat([year_df, df], ignore_index=True)
year_df = transform(year_df)
if save:
core.makedir('Saved')
if stationid is not None:
year_df.to_csv(f'Saved/hydro_daily_{year}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
year_df.to_csv(f'Saved/hydro_daily_{year}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
year_df.to_csv(f'Saved/hydro_daily_{year}_all.csv', index=False, encoding='utf-8')
return year_df.reset_index().drop('index', axis=1)
def getrange(first_year: int, last_year: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(first_year, int) or not isinstance(last_year, int):
raise Exception('first_year and last_year arguments must be integers')
elif first_year not in range(1951, 2021) or last_year not in range(1951, 2021):
raise Exception('year argument out of available range (1951-2020)')
else:
range_df = pd.DataFrame([], columns=Labels.trans_cols)
for year in range(first_year, last_year + 1):
for month in range(1, 12+1):
df = getframe(year, month, stationid, station)
range_df = pd.concat([range_df, df], ignore_index=True)
range_df = transform(range_df)
if save:
core.makedir('Saved')
if stationid is not None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_all.csv', index=False, encoding='utf-8')
return range_df.reset_index().drop('index', axis=1)
def getmonth(year: int, month: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(year, int) or not isinstance(month, int):
raise Exception('year and month arguments must be integers')
elif month not in range(1, 13):
raise Exception('month argument not in range (1-12)')
elif year not in range(1951, 2021):
raise Exception('year argument not in available range (1951-2020)')
else:
month_df = getframe(year, month, stationid, station)
if month_df.empty:
raise Exception('there is no station with chosen name or id ')
else:
month_df.columns = Labels.init_cols
month_df = transform(month_df)
if save:
core.makedir('Saved')
if stationid is not None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_all.csv', index=False, encoding='utf-8')
return month_df
def err(stationid, station):
if not isinstance(stationid, int) and stationid is not None:
raise Exception('stationid argument must be an integer')
elif not isinstance(station, str) and station is not None:
raise Exception('station argument must be a string')
def metadata(stationid: int, data: str) -> list:
if stationid is None:
raise Exception('missing stationid argument')
elif not isinstance(stationid, int) and stationid is not None:
raise Exception('stationid argument must be an integer')
meta = pd.read_csv('metadata/hydro_stations.csv', encoding='utf-8')
if meta.loc[meta['id'] == stationid].empty:
raise Exception('station with chosen id does not exist')
if data == 'coords':
xcoord = meta.loc[meta['id'] == stationid]['X'].unique()[0]
ycoord = meta.loc[meta['id'] == stationid]['Y'].unique()[0]
return [xcoord, ycoord]
elif data == 'riv_or_lake':
rivlake = meta.loc[meta['id'] == stationid]['riv_or_lake'].unique()[0]
rivlakeid = meta.loc[meta['id'] == stationid]['riv_or_lake_id'].unique()[0]
return [rivlake, rivlakeid]
elif data == 'station_name':
station_name = meta.loc[meta['id'] == stationid]['name'].unique()[0]
return [station_name]
else:
raise Exception('unknown data argument')
def stations(year: int, month=None) -> list:
if not isinstance(year, int):
raise Exception('year argument must be an integer')
elif not isinstance(month, int) and month is not None:
raise Exception('month argument must be an integer')
elif month not in range(1, 13) and month is not None:
raise Exception('month argument not in range (1-12)')
elif month is not None:
stations_names = getmonth(year, month)['station_name'].sort_values()
stations_ids = getmonth(year, month)['station_id'].sort_values()
else:
stations_names = getyear(year)['station_name'].sort_values()
stations_ids = getyear(year)['station_id'].sort_values()
stations_list = list()
for x, y in zip(stations_names, stations_ids):
stations_list.append(y)
return list(set(stations_list))
| none | 1 | 2.61875 | 3 | |
database/models/user.py | thanhtuan1198/togo | 0 | 6615175 | from sqlalchemy import Integer, String, Column
from sqlalchemy.orm import relationship
from sqlalchemy.orm.collections import InstrumentedList
from .base import BaseEntityModel
class User(BaseEntityModel):
__tablename__ = "user"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(250))
ability = Column(Integer)
assignments: InstrumentedList = relationship("Assignment", lazy="select", cascade="all, delete-orphan")
def __repr__(self):
return f"{self.name}"
| from sqlalchemy import Integer, String, Column
from sqlalchemy.orm import relationship
from sqlalchemy.orm.collections import InstrumentedList
from .base import BaseEntityModel
class User(BaseEntityModel):
__tablename__ = "user"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(250))
ability = Column(Integer)
assignments: InstrumentedList = relationship("Assignment", lazy="select", cascade="all, delete-orphan")
def __repr__(self):
return f"{self.name}"
| none | 1 | 2.60582 | 3 | |
car_and_LP/tensor_rt_test/test_trt.py | n8886919/YOLO | 52 | 6615176 | from utils import *
from YOLO import *
args = yolo_Parser()
yolo = YOLO(args)
sym, arg_params, aux_params = mx.model.load_checkpoint('export/YOLO_export', 0)
batch_shape = (1, 3, 320, 512)
input = nd.zeros(batch_shape).as_in_context(yolo.ctx[0])
# -------------------------------------------------------------------------
'''
print('Warming up YOLO.net')
for i in range(0, 10):
y_gen = yolo.net(input)
y_gen[0][0].wait_to_read()
print('Starting YOLO.net timed run')
start = time.time()
for i in range(0, 1000):
y_gen = yolo.net(input)
y_gen[0][0].wait_to_read()
print(time.time() - start)
'''
# Execute with MXNet
os.environ['MXNET_USE_TENSORRT'] = '0'
# -------------------------------------------------------------------------
executor = sym.simple_bind(
ctx=mx.gpu(1),
data=batch_shape,
grad_req='null',
force_rebind=True)
executor.copy_params_from(arg_params, aux_params)
out = executor.forward(is_train=False, data=input)
batch_out = out[:5]
print(batch_out)
'''
# -------------------------------------------------------------------------
# Warmup
print('Warming up MXNet')
for i in range(0, 10):
y_gen = executor.forward(is_train=False, data=input)
y_gen[0].wait_to_read()
# Timing
print('Starting MXNet timed run')
start = time.time()
for i in range(0, 1000):
y_gen = executor.forward(is_train=True, data=input)
y_gen[0].wait_to_read()
print(time.time() - start)
# Execute with TensorRT
print('Building TensorRT engine')
os.environ['MXNET_USE_TENSORRT'] = '1'
# -------------------------------------------------------------------------
arg_params.update(aux_params)
all_params = dict([(k, v.as_in_context(mx.gpu(0))) for k, v in arg_params.items()])
executor = mx.contrib.tensorrt.tensorrt_bind(
sym,
all_params=all_params,
ctx=mx.gpu(1),
data=batch_shape,
grad_req='null',
force_rebind=True)
# -------------------------------------------------------------------------
#Warmup
print('Warming up TensorRT')
for i in range(0, 10):
y_gen = executor.forward(is_train=False, data=input)
y_gen[0].wait_to_read()
a = y_gen[0].asnumpy()
print(a)
# Timing
print('Starting TensorRT timed run')
start = time.time()
for i in range(0, 1000):
y_gen = executor.forward(is_train=False, data=input)
y_gen[0].wait_to_read()
print(time.time() - start)
'''
| from utils import *
from YOLO import *
args = yolo_Parser()
yolo = YOLO(args)
sym, arg_params, aux_params = mx.model.load_checkpoint('export/YOLO_export', 0)
batch_shape = (1, 3, 320, 512)
input = nd.zeros(batch_shape).as_in_context(yolo.ctx[0])
# -------------------------------------------------------------------------
'''
print('Warming up YOLO.net')
for i in range(0, 10):
y_gen = yolo.net(input)
y_gen[0][0].wait_to_read()
print('Starting YOLO.net timed run')
start = time.time()
for i in range(0, 1000):
y_gen = yolo.net(input)
y_gen[0][0].wait_to_read()
print(time.time() - start)
'''
# Execute with MXNet
os.environ['MXNET_USE_TENSORRT'] = '0'
# -------------------------------------------------------------------------
executor = sym.simple_bind(
ctx=mx.gpu(1),
data=batch_shape,
grad_req='null',
force_rebind=True)
executor.copy_params_from(arg_params, aux_params)
out = executor.forward(is_train=False, data=input)
batch_out = out[:5]
print(batch_out)
'''
# -------------------------------------------------------------------------
# Warmup
print('Warming up MXNet')
for i in range(0, 10):
y_gen = executor.forward(is_train=False, data=input)
y_gen[0].wait_to_read()
# Timing
print('Starting MXNet timed run')
start = time.time()
for i in range(0, 1000):
y_gen = executor.forward(is_train=True, data=input)
y_gen[0].wait_to_read()
print(time.time() - start)
# Execute with TensorRT
print('Building TensorRT engine')
os.environ['MXNET_USE_TENSORRT'] = '1'
# -------------------------------------------------------------------------
arg_params.update(aux_params)
all_params = dict([(k, v.as_in_context(mx.gpu(0))) for k, v in arg_params.items()])
executor = mx.contrib.tensorrt.tensorrt_bind(
sym,
all_params=all_params,
ctx=mx.gpu(1),
data=batch_shape,
grad_req='null',
force_rebind=True)
# -------------------------------------------------------------------------
#Warmup
print('Warming up TensorRT')
for i in range(0, 10):
y_gen = executor.forward(is_train=False, data=input)
y_gen[0].wait_to_read()
a = y_gen[0].asnumpy()
print(a)
# Timing
print('Starting TensorRT timed run')
start = time.time()
for i in range(0, 1000):
y_gen = executor.forward(is_train=False, data=input)
y_gen[0].wait_to_read()
print(time.time() - start)
'''
| en | 0.369665 | # ------------------------------------------------------------------------- print('Warming up YOLO.net') for i in range(0, 10): y_gen = yolo.net(input) y_gen[0][0].wait_to_read() print('Starting YOLO.net timed run') start = time.time() for i in range(0, 1000): y_gen = yolo.net(input) y_gen[0][0].wait_to_read() print(time.time() - start) # Execute with MXNet # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # Warmup print('Warming up MXNet') for i in range(0, 10): y_gen = executor.forward(is_train=False, data=input) y_gen[0].wait_to_read() # Timing print('Starting MXNet timed run') start = time.time() for i in range(0, 1000): y_gen = executor.forward(is_train=True, data=input) y_gen[0].wait_to_read() print(time.time() - start) # Execute with TensorRT print('Building TensorRT engine') os.environ['MXNET_USE_TENSORRT'] = '1' # ------------------------------------------------------------------------- arg_params.update(aux_params) all_params = dict([(k, v.as_in_context(mx.gpu(0))) for k, v in arg_params.items()]) executor = mx.contrib.tensorrt.tensorrt_bind( sym, all_params=all_params, ctx=mx.gpu(1), data=batch_shape, grad_req='null', force_rebind=True) # ------------------------------------------------------------------------- #Warmup print('Warming up TensorRT') for i in range(0, 10): y_gen = executor.forward(is_train=False, data=input) y_gen[0].wait_to_read() a = y_gen[0].asnumpy() print(a) # Timing print('Starting TensorRT timed run') start = time.time() for i in range(0, 1000): y_gen = executor.forward(is_train=False, data=input) y_gen[0].wait_to_read() print(time.time() - start) | 2.345637 | 2 |
plotting/plot_voltage_input.py | MylesKelly/MCTrans | 2 | 6615177 | <reponame>MylesKelly/MCTrans<filename>plotting/plot_voltage_input.py
#!/usr/bin/python3
from netCDF4 import Dataset
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import sys
filename = ""
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
print("Usage: ./plot_time_traces.py <netcdf filename>")
sys.exit()
nc_root = Dataset(filename, "r", format="NETCDF4")
volt_var = nc_root.variables["Voltage"]
t_var = nc_root.variables["Time"]
plt.figure(figsize=(10,6),dpi=210)
plt.title("Imposed Voltage" )
plt.xlabel("t (seconds)")
plt.ylabel("Voltage (V)" )
plt.plot(t_var[:],volt_var[:],label='V',linewidth=1)
plt.legend()
plt.show()
| #!/usr/bin/python3
from netCDF4 import Dataset
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import sys
filename = ""
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
print("Usage: ./plot_time_traces.py <netcdf filename>")
sys.exit()
nc_root = Dataset(filename, "r", format="NETCDF4")
volt_var = nc_root.variables["Voltage"]
t_var = nc_root.variables["Time"]
plt.figure(figsize=(10,6),dpi=210)
plt.title("Imposed Voltage" )
plt.xlabel("t (seconds)")
plt.ylabel("Voltage (V)" )
plt.plot(t_var[:],volt_var[:],label='V',linewidth=1)
plt.legend()
plt.show() | fr | 0.386793 | #!/usr/bin/python3 | 2.767066 | 3 |
Python/041.py | jaimeliew1/Project_Euler_Solutions | 0 | 6615178 | # -*- coding: utf-8 -*-
"""
Solution to Project Euler problem X
Author: <NAME>
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
from EulerFunctions import is_prime
import itertools
def run():
for i in range(10, 2, -1):
panlist = []
digits = list(x for x in range(1, i))
perms = list(itertools.permutations(digits))
for p in perms:
temp = int(''.join(map(str, p)))
if all([temp%2, temp%3]):
panlist.append(temp)
panlist = list(reversed(sorted(panlist)))
for num in panlist:
if is_prime(num):
return num
return -1
if __name__ == "__main__":
print(run())
| # -*- coding: utf-8 -*-
"""
Solution to Project Euler problem X
Author: <NAME>
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
from EulerFunctions import is_prime
import itertools
def run():
for i in range(10, 2, -1):
panlist = []
digits = list(x for x in range(1, i))
perms = list(itertools.permutations(digits))
for p in perms:
temp = int(''.join(map(str, p)))
if all([temp%2, temp%3]):
panlist.append(temp)
panlist = list(reversed(sorted(panlist)))
for num in panlist:
if is_prime(num):
return num
return -1
if __name__ == "__main__":
print(run())
| en | 0.767317 | # -*- coding: utf-8 -*- Solution to Project Euler problem X Author: <NAME> https://github.com/jaimeliew1/Project_Euler_Solutions | 3.501175 | 4 |
Python/NetworkX/max_flow.py | Gjacquenot/training-material | 115 | 6615179 | <reponame>Gjacquenot/training-material
#!/usr/bin/env python
from argparse import ArgumentParser
import networkx as nx
import random
def create_graph(nr_nodes_per_layer=3):
alpha = 3
beta = 0.5
G = nx.DiGraph()
source = 0
sink = 2*nr_nodes_per_layer + 1
fmt_str = 'capacity {0} -> {1}: {2:.3f}'
# from source to first layter
for i in range(1, nr_nodes_per_layer + 1):
capacity = random.gammavariate(alpha, beta)
G.add_edge(source, i, capacity=capacity)
print(fmt_str.format(source, i, capacity))
# from layter 1 to layer 2
for i in range(1, nr_nodes_per_layer + 1):
j = i + nr_nodes_per_layer
capacity = random.gammavariate(alpha, beta)
G.add_edge(i, j, capacity=capacity)
print(fmt_str.format(i, j, capacity))
# rom layer 2 to sink
for i in range(nr_nodes_per_layer + 1, 2*nr_nodes_per_layer + 1):
capacity = random.gammavariate(alpha, beta)
G.add_edge(i, sink, capacity=capacity)
print(fmt_str.format(i, sink, capacity))
return G, source, sink
def print_flow_dict(G, flow_dict):
for edge in G.edges_iter():
i, j = edge
print('flow {0} -> {1}: {2:.3f}'.format(i, j, flow_dict[i][j]))
if __name__ == '__main__':
arg_parser = ArgumentParser(description='experiment with maximum flow '
'algorithm')
arg_parser.add_argument('--n', type=int, help='number of nodes/layer')
options = arg_parser.parse_args()
G, source, sink = create_graph(options.n)
flow_value, flow_dict = nx.maximum_flow(G, source, sink)
print('value = {0:.3f}'.format(flow_value))
print_flow_dict(G, flow_dict)
| #!/usr/bin/env python
from argparse import ArgumentParser
import networkx as nx
import random
def create_graph(nr_nodes_per_layer=3):
alpha = 3
beta = 0.5
G = nx.DiGraph()
source = 0
sink = 2*nr_nodes_per_layer + 1
fmt_str = 'capacity {0} -> {1}: {2:.3f}'
# from source to first layter
for i in range(1, nr_nodes_per_layer + 1):
capacity = random.gammavariate(alpha, beta)
G.add_edge(source, i, capacity=capacity)
print(fmt_str.format(source, i, capacity))
# from layter 1 to layer 2
for i in range(1, nr_nodes_per_layer + 1):
j = i + nr_nodes_per_layer
capacity = random.gammavariate(alpha, beta)
G.add_edge(i, j, capacity=capacity)
print(fmt_str.format(i, j, capacity))
# rom layer 2 to sink
for i in range(nr_nodes_per_layer + 1, 2*nr_nodes_per_layer + 1):
capacity = random.gammavariate(alpha, beta)
G.add_edge(i, sink, capacity=capacity)
print(fmt_str.format(i, sink, capacity))
return G, source, sink
def print_flow_dict(G, flow_dict):
for edge in G.edges_iter():
i, j = edge
print('flow {0} -> {1}: {2:.3f}'.format(i, j, flow_dict[i][j]))
if __name__ == '__main__':
arg_parser = ArgumentParser(description='experiment with maximum flow '
'algorithm')
arg_parser.add_argument('--n', type=int, help='number of nodes/layer')
options = arg_parser.parse_args()
G, source, sink = create_graph(options.n)
flow_value, flow_dict = nx.maximum_flow(G, source, sink)
print('value = {0:.3f}'.format(flow_value))
print_flow_dict(G, flow_dict) | en | 0.596418 | #!/usr/bin/env python # from source to first layter # from layter 1 to layer 2 # rom layer 2 to sink | 3.044953 | 3 |
scripts/unittest/script/09-find_blobs.py | jibonaronno/OpenMV-openmv | 1 | 6615180 | def unittest(data_path, temp_path):
import image
thresholds = [(0, 100, 56, 95, 41, 74), # generic_red_thresholds
(0, 100, -128, -22, -128, 99), # generic_green_thresholds
(0, 100, -128, 98, -128, -16)] # generic_blue_thresholds
# Load image
img = image.Image("unittest/data/blobs.ppm", copy_to_fb=True)
blobs = img.find_blobs(thresholds, pixels_threshold=2000, area_threshold=200)
return blobs[0][0:-3] == (122, 41, 96, 81, 6228, 168, 82) and\
blobs[1][0:-3] == (44, 40, 77, 89, 5113, 80, 84) and\
blobs[2][0:-3] == (210, 40, 71, 82, 3890, 249, 76)
| def unittest(data_path, temp_path):
import image
thresholds = [(0, 100, 56, 95, 41, 74), # generic_red_thresholds
(0, 100, -128, -22, -128, 99), # generic_green_thresholds
(0, 100, -128, 98, -128, -16)] # generic_blue_thresholds
# Load image
img = image.Image("unittest/data/blobs.ppm", copy_to_fb=True)
blobs = img.find_blobs(thresholds, pixels_threshold=2000, area_threshold=200)
return blobs[0][0:-3] == (122, 41, 96, 81, 6228, 168, 82) and\
blobs[1][0:-3] == (44, 40, 77, 89, 5113, 80, 84) and\
blobs[2][0:-3] == (210, 40, 71, 82, 3890, 249, 76)
| en | 0.580305 | # generic_red_thresholds # generic_green_thresholds # generic_blue_thresholds # Load image | 2.825973 | 3 |
Space_Invader/Space_Ship.py | TheotimeQ/Python | 0 | 6615181 | #Importation Librairies
from tkinter import NW,PhotoImage,Canvas
from PIL import Image, ImageTk
from tkinter.filedialog import askopenfilename
# Mes fonctions
from Affichage import *
from Laser import *
#Class SpaceShip
#Class qui stock les données du SpaceShip (Joueur)
class SpaceShip:
def __init__(self,Class_Screen):
self.x = Class_Screen.Windows_Largeur/2
self.y = Class_Screen.Windows_Hauteur*0.86
self.Largeur = 60
self.Hauteur = 80
self.xSpeed = 3
self.image = Image.open("IMG\Ship.png")
self.img_copy= self.image.copy()
self.image = self.img_copy.resize((self.Largeur, self.Hauteur))
self.image_Ship = ImageTk.PhotoImage(self.image)
self.Laser_Liste = []
self.Max_Laser = 5
#Place le Vaisseau du joueur
def Place_SpaceShip(self,Class_Screen):
self.Ship = Class_Screen.Fond.create_image(0,0, anchor = NW, image= self.image_Ship)
Class_Screen.Fond.coords(self.Ship, self.x, self.y)
def Moove_SpaceShip(self,Class_Screen):
Class_Screen.Fond.coords(self.Ship, self.x, self.y)
def Clear_Laser(self,Jeux):
while(len(self.Laser_Liste) != 0 ):
for Laser in self.Laser_Liste :
Jeux.Fond.delete(Laser.Laser)
self.Laser_Liste.remove(Laser)
def Create_Laser(self,Class_Screen):
if( len(self.Laser_Liste) < self.Max_Laser ):
self.Laser_Liste.append(Laser(Class_Screen,self.x + self.Largeur/2 ,self.y - 3))
def Moove_Laser(self,Jeux):
for Laser in self.Laser_Liste :
Laser.y -= Laser.ySpeed_Player
Jeux.Screen.Fond.coords(Laser.Laser, Laser.x ,Laser.y , Laser.x + Laser.Largeur , Laser.y + Laser.Hauteur)
if(Laser.y < -5):
Jeux.Fond.delete(Laser.Laser)
self.Laser_Liste.remove(Laser)
| #Importation Librairies
from tkinter import NW,PhotoImage,Canvas
from PIL import Image, ImageTk
from tkinter.filedialog import askopenfilename
# Mes fonctions
from Affichage import *
from Laser import *
#Class SpaceShip
#Class qui stock les données du SpaceShip (Joueur)
class SpaceShip:
def __init__(self,Class_Screen):
self.x = Class_Screen.Windows_Largeur/2
self.y = Class_Screen.Windows_Hauteur*0.86
self.Largeur = 60
self.Hauteur = 80
self.xSpeed = 3
self.image = Image.open("IMG\Ship.png")
self.img_copy= self.image.copy()
self.image = self.img_copy.resize((self.Largeur, self.Hauteur))
self.image_Ship = ImageTk.PhotoImage(self.image)
self.Laser_Liste = []
self.Max_Laser = 5
#Place le Vaisseau du joueur
def Place_SpaceShip(self,Class_Screen):
self.Ship = Class_Screen.Fond.create_image(0,0, anchor = NW, image= self.image_Ship)
Class_Screen.Fond.coords(self.Ship, self.x, self.y)
def Moove_SpaceShip(self,Class_Screen):
Class_Screen.Fond.coords(self.Ship, self.x, self.y)
def Clear_Laser(self,Jeux):
while(len(self.Laser_Liste) != 0 ):
for Laser in self.Laser_Liste :
Jeux.Fond.delete(Laser.Laser)
self.Laser_Liste.remove(Laser)
def Create_Laser(self,Class_Screen):
if( len(self.Laser_Liste) < self.Max_Laser ):
self.Laser_Liste.append(Laser(Class_Screen,self.x + self.Largeur/2 ,self.y - 3))
def Moove_Laser(self,Jeux):
for Laser in self.Laser_Liste :
Laser.y -= Laser.ySpeed_Player
Jeux.Screen.Fond.coords(Laser.Laser, Laser.x ,Laser.y , Laser.x + Laser.Largeur , Laser.y + Laser.Hauteur)
if(Laser.y < -5):
Jeux.Fond.delete(Laser.Laser)
self.Laser_Liste.remove(Laser)
| fr | 0.956487 | #Importation Librairies # Mes fonctions #Class SpaceShip #Class qui stock les données du SpaceShip (Joueur) #Place le Vaisseau du joueur | 3.088431 | 3 |
perception/vehicle_spawner.py | jostl/masters-thesis | 3 | 6615182 | <filename>perception/vehicle_spawner.py<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Spawn NPCs into the simulation"""
import math
import random
from typing import List
import glob
import os
import sys
from typing import Dict
import carla
class VehicleSpawner(object):
def __init__(self, client: carla.Client, world: carla.World, safe_mode=True):
self.client = client
self.world = world
self.spawn_points = self.world.get_map().get_spawn_points()
self.blueprints = self.world.get_blueprint_library().filter("vehicle.*")
self.blueprintsWalkers = world.get_blueprint_library().filter("walker.pedestrian.*")
self.vehicles_list: List[int] = []
self.walkers_list = []
self.all_id = []
self.all_actors = []
self.safe_mode = safe_mode
self._bad_colors = [
"255,255,255", "183,187,162", "237,237,237",
"134,134,134", "243,243,243", "127,130,135",
"109,109,109", "181,181,181", "140,140,140",
"181,178,124", "171,255,0", "251,241,176",
"158,149,129", "233,216,168", "233,216,168",
"108,109,126", "193,193,193", "227,227,227",
"151,150,125", "206,206,206", "255,222,218",
"211,211,211", "191,191,191"
] if safe_mode else []
def init_traffic_manager(self):
traffic_manager = self.client.get_trafficmanager(8000)
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
traffic_manager.global_percentage_speed_difference(25.0)
traffic_manager.set_hybrid_physics_mode(True)
traffic_manager.set_synchronous_mode(True)
def spawn_nearby(self, hero_spawn_point_index, number_of_vehicles_min, number_of_vehicles_max,
number_of_walkers_min, number_of_walkers_max, radius):
number_of_vehicles = random.randint(number_of_vehicles_min, number_of_vehicles_max)
number_of_walkers = random.randint(number_of_walkers_min, number_of_walkers_max)
print(f"Attempting to spawn {number_of_vehicles} vehicles, {number_of_walkers} walkers")
valid_spawn_points = self.get_valid_spawn_points(hero_spawn_point_index, radius)
if self.safe_mode:
self.blueprints = [x for x in self.blueprints if int(x.get_attribute('number_of_wheels')) == 4]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('isetta')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('carlacola')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('cybertruck')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('t2')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('coupe')]
number_of_spawn_points = len(valid_spawn_points)
if number_of_spawn_points > number_of_vehicles:
random.shuffle(valid_spawn_points)
elif number_of_vehicles > number_of_spawn_points:
msg = 'requested %d vehicles, but could only find %d spawn points'
number_of_vehicles = number_of_spawn_points
# @todo cannot import these directly.
SpawnActor = carla.command.SpawnActor
SetAutopilot = carla.command.SetAutopilot
FutureActor = carla.command.FutureActor
batch = []
for n, transform in enumerate(valid_spawn_points):
if n >= number_of_vehicles:
break
blueprint = random.choice(self.blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
while color in self._bad_colors:
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
batch.append(SpawnActor(blueprint, transform).then(SetAutopilot(FutureActor, True)))
for response in self.client.apply_batch_sync(batch, True):
if response.error:
print(f"Vehicle spawn error: {response.error}")
else:
self.vehicles_list.append(response.actor_id)
# -------------
# Spawn Walkers
# -------------
# some settings
percentagePedestriansRunning = 0.0 # how many pedestrians will run
percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road
# 1. take all the random locations to spawn
spawn_points = []
for i in range(number_of_walkers):
spawn_point = carla.Transform()
loc = self.world.get_random_location_from_navigation()
if (loc != None):
spawn_point.location = loc
spawn_points.append(spawn_point)
# 2. we spawn the walker object
batch = []
walker_speed = []
for spawn_point in spawn_points:
walker_bp = random.choice(self.blueprintsWalkers)
# set as not invincible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
# set the max speed
if walker_bp.has_attribute('speed'):
if (random.random() > percentagePedestriansRunning):
# walking
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1])
else:
# running
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2])
else:
print("Walker has no speed")
walker_speed.append(0.0)
batch.append(SpawnActor(walker_bp, spawn_point))
results = self.client.apply_batch_sync(batch, True)
walker_speed2 = []
for i in range(len(results)):
if results[i].error:
print(results[i].error)
else:
self.walkers_list.append({"id": results[i].actor_id})
walker_speed2.append(walker_speed[i])
walker_speed = walker_speed2
# 3. we spawn the walker controller
batch = []
walker_controller_bp = self.world.get_blueprint_library().find('controller.ai.walker')
for i in range(len(self.walkers_list)):
batch.append(SpawnActor(walker_controller_bp, carla.Transform(), self.walkers_list[i]["id"]))
results = self.client.apply_batch_sync(batch, True)
for i in range(len(results)):
if results[i].error:
print(results[i].error)
else:
self.walkers_list[i]["con"] = results[i].actor_id
# 4. we put altogether the walkers and controllers id to get the objects from their id
for i in range(len(self.walkers_list)):
self.all_id.append(self.walkers_list[i]["con"])
self.all_id.append(self.walkers_list[i]["id"])
self.all_actors = self.world.get_actors(self.all_id)
# tick to ensure client receives the last transform of the walkers we have just created
self.world.tick()
# 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])
# set how many pedestrians can cross the road
self.world.set_pedestrians_cross_factor(percentagePedestriansCrossing)
for i in range(0, len(self.all_id), 2):
# start walker
self.all_actors[i].start()
# set walk to random point
self.all_actors[i].go_to_location(self.world.get_random_location_from_navigation())
# max speed
self.all_actors[i].set_max_speed(float(walker_speed[int(i / 2)]))
print(f'Spawned {len(self.vehicles_list):d} vehicles and {len(self.walkers_list):d} walkers,')
def get_valid_spawn_points(self, hero_spawn_point_index, radius):
hero_spawn_point = self.spawn_points[hero_spawn_point_index]
hero_x = hero_spawn_point.location.x
hero_y = hero_spawn_point.location.y
valid_spawn_points = []
for spawn_point in self.spawn_points:
# Distance between spaw points
loc = hero_spawn_point.location
dx = spawn_point.location.x - loc.x
dy = spawn_point.location.y - loc.y
distance = math.sqrt(dx * dx + dy * dy)
min_distance = 10
if spawn_point == hero_spawn_point or distance < min_distance:
continue
if radius != 0:
x = spawn_point.location.x
y = spawn_point.location.y
yaw = spawn_point.rotation.yaw
angle_diff = hero_spawn_point.rotation.yaw - yaw
angle_diff = abs((angle_diff + 180) % 360 - 180)
if abs(hero_x - x) <= radius and abs(hero_y - y) <= radius and angle_diff < 50:
valid_spawn_points.append(spawn_point)
else:
valid_spawn_points.append(spawn_point)
return valid_spawn_points
def destroy_vehicles(self):
print(f'Destroying {len(self.vehicles_list):d} vehicles.\n')
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in self.vehicles_list], True)
self.vehicles_list.clear()
# stop walker controllers (list is [controller, actor, controller, actor ...])
for i in range(0, len(self.all_id), 2):
self.all_actors[i].stop()
print('\ndestroying %d walkers' % len(self.walkers_list))
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in self.all_id], True)
self.walkers_list = []
self.all_id = []
self.all_actors = []
| <filename>perception/vehicle_spawner.py<gh_stars>1-10
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Spawn NPCs into the simulation"""
import math
import random
from typing import List
import glob
import os
import sys
from typing import Dict
import carla
class VehicleSpawner(object):
def __init__(self, client: carla.Client, world: carla.World, safe_mode=True):
self.client = client
self.world = world
self.spawn_points = self.world.get_map().get_spawn_points()
self.blueprints = self.world.get_blueprint_library().filter("vehicle.*")
self.blueprintsWalkers = world.get_blueprint_library().filter("walker.pedestrian.*")
self.vehicles_list: List[int] = []
self.walkers_list = []
self.all_id = []
self.all_actors = []
self.safe_mode = safe_mode
self._bad_colors = [
"255,255,255", "183,187,162", "237,237,237",
"134,134,134", "243,243,243", "127,130,135",
"109,109,109", "181,181,181", "140,140,140",
"181,178,124", "171,255,0", "251,241,176",
"158,149,129", "233,216,168", "233,216,168",
"108,109,126", "193,193,193", "227,227,227",
"151,150,125", "206,206,206", "255,222,218",
"211,211,211", "191,191,191"
] if safe_mode else []
def init_traffic_manager(self):
traffic_manager = self.client.get_trafficmanager(8000)
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
traffic_manager.global_percentage_speed_difference(25.0)
traffic_manager.set_hybrid_physics_mode(True)
traffic_manager.set_synchronous_mode(True)
def spawn_nearby(self, hero_spawn_point_index, number_of_vehicles_min, number_of_vehicles_max,
number_of_walkers_min, number_of_walkers_max, radius):
number_of_vehicles = random.randint(number_of_vehicles_min, number_of_vehicles_max)
number_of_walkers = random.randint(number_of_walkers_min, number_of_walkers_max)
print(f"Attempting to spawn {number_of_vehicles} vehicles, {number_of_walkers} walkers")
valid_spawn_points = self.get_valid_spawn_points(hero_spawn_point_index, radius)
if self.safe_mode:
self.blueprints = [x for x in self.blueprints if int(x.get_attribute('number_of_wheels')) == 4]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('isetta')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('carlacola')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('cybertruck')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('t2')]
self.blueprints = [x for x in self.blueprints if not x.id.endswith('coupe')]
number_of_spawn_points = len(valid_spawn_points)
if number_of_spawn_points > number_of_vehicles:
random.shuffle(valid_spawn_points)
elif number_of_vehicles > number_of_spawn_points:
msg = 'requested %d vehicles, but could only find %d spawn points'
number_of_vehicles = number_of_spawn_points
# @todo cannot import these directly.
SpawnActor = carla.command.SpawnActor
SetAutopilot = carla.command.SetAutopilot
FutureActor = carla.command.FutureActor
batch = []
for n, transform in enumerate(valid_spawn_points):
if n >= number_of_vehicles:
break
blueprint = random.choice(self.blueprints)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
while color in self._bad_colors:
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
blueprint.set_attribute('role_name', 'autopilot')
batch.append(SpawnActor(blueprint, transform).then(SetAutopilot(FutureActor, True)))
for response in self.client.apply_batch_sync(batch, True):
if response.error:
print(f"Vehicle spawn error: {response.error}")
else:
self.vehicles_list.append(response.actor_id)
# -------------
# Spawn Walkers
# -------------
# some settings
percentagePedestriansRunning = 0.0 # how many pedestrians will run
percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road
# 1. take all the random locations to spawn
spawn_points = []
for i in range(number_of_walkers):
spawn_point = carla.Transform()
loc = self.world.get_random_location_from_navigation()
if (loc != None):
spawn_point.location = loc
spawn_points.append(spawn_point)
# 2. we spawn the walker object
batch = []
walker_speed = []
for spawn_point in spawn_points:
walker_bp = random.choice(self.blueprintsWalkers)
# set as not invincible
if walker_bp.has_attribute('is_invincible'):
walker_bp.set_attribute('is_invincible', 'false')
# set the max speed
if walker_bp.has_attribute('speed'):
if (random.random() > percentagePedestriansRunning):
# walking
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1])
else:
# running
walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2])
else:
print("Walker has no speed")
walker_speed.append(0.0)
batch.append(SpawnActor(walker_bp, spawn_point))
results = self.client.apply_batch_sync(batch, True)
walker_speed2 = []
for i in range(len(results)):
if results[i].error:
print(results[i].error)
else:
self.walkers_list.append({"id": results[i].actor_id})
walker_speed2.append(walker_speed[i])
walker_speed = walker_speed2
# 3. we spawn the walker controller
batch = []
walker_controller_bp = self.world.get_blueprint_library().find('controller.ai.walker')
for i in range(len(self.walkers_list)):
batch.append(SpawnActor(walker_controller_bp, carla.Transform(), self.walkers_list[i]["id"]))
results = self.client.apply_batch_sync(batch, True)
for i in range(len(results)):
if results[i].error:
print(results[i].error)
else:
self.walkers_list[i]["con"] = results[i].actor_id
# 4. we put altogether the walkers and controllers id to get the objects from their id
for i in range(len(self.walkers_list)):
self.all_id.append(self.walkers_list[i]["con"])
self.all_id.append(self.walkers_list[i]["id"])
self.all_actors = self.world.get_actors(self.all_id)
# tick to ensure client receives the last transform of the walkers we have just created
self.world.tick()
# 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...])
# set how many pedestrians can cross the road
self.world.set_pedestrians_cross_factor(percentagePedestriansCrossing)
for i in range(0, len(self.all_id), 2):
# start walker
self.all_actors[i].start()
# set walk to random point
self.all_actors[i].go_to_location(self.world.get_random_location_from_navigation())
# max speed
self.all_actors[i].set_max_speed(float(walker_speed[int(i / 2)]))
print(f'Spawned {len(self.vehicles_list):d} vehicles and {len(self.walkers_list):d} walkers,')
def get_valid_spawn_points(self, hero_spawn_point_index, radius):
hero_spawn_point = self.spawn_points[hero_spawn_point_index]
hero_x = hero_spawn_point.location.x
hero_y = hero_spawn_point.location.y
valid_spawn_points = []
for spawn_point in self.spawn_points:
# Distance between spaw points
loc = hero_spawn_point.location
dx = spawn_point.location.x - loc.x
dy = spawn_point.location.y - loc.y
distance = math.sqrt(dx * dx + dy * dy)
min_distance = 10
if spawn_point == hero_spawn_point or distance < min_distance:
continue
if radius != 0:
x = spawn_point.location.x
y = spawn_point.location.y
yaw = spawn_point.rotation.yaw
angle_diff = hero_spawn_point.rotation.yaw - yaw
angle_diff = abs((angle_diff + 180) % 360 - 180)
if abs(hero_x - x) <= radius and abs(hero_y - y) <= radius and angle_diff < 50:
valid_spawn_points.append(spawn_point)
else:
valid_spawn_points.append(spawn_point)
return valid_spawn_points
def destroy_vehicles(self):
print(f'Destroying {len(self.vehicles_list):d} vehicles.\n')
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in self.vehicles_list], True)
self.vehicles_list.clear()
# stop walker controllers (list is [controller, actor, controller, actor ...])
for i in range(0, len(self.all_id), 2):
self.all_actors[i].stop()
print('\ndestroying %d walkers' % len(self.walkers_list))
self.client.apply_batch_sync([carla.command.DestroyActor(x) for x in self.all_id], True)
self.walkers_list = []
self.all_id = []
self.all_actors = []
| en | 0.86141 | #!/usr/bin/env python # Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see <https://opensource.org/licenses/MIT>. Spawn NPCs into the simulation # @todo cannot import these directly. # ------------- # Spawn Walkers # ------------- # some settings # how many pedestrians will run # how many pedestrians will walk through the road # 1. take all the random locations to spawn # 2. we spawn the walker object # set as not invincible # set the max speed # walking # running # 3. we spawn the walker controller # 4. we put altogether the walkers and controllers id to get the objects from their id # tick to ensure client receives the last transform of the walkers we have just created # 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...]) # set how many pedestrians can cross the road # start walker # set walk to random point # max speed # Distance between spaw points # stop walker controllers (list is [controller, actor, controller, actor ...]) | 2.373191 | 2 |
mcipy/print_project_pipeline.py | brandjon/continuous-integration | 0 | 6615183 | #!/usr/bin/env python3
#
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from config import CLOUD_PROJECT, DOWNSTREAM_PROJECTS
from steps import runner_step, create_docker_step, create_step
from update_last_green_commit import get_last_green_commit
from utils import fetch_bazelcipy_command
from utils import python_binary
def is_pull_request():
third_party_repo = os.getenv("BUILDKITE_PULL_REQUEST_REPO", "")
return len(third_party_repo) > 0
def main(
configs,
project_name,
http_config,
file_config,
git_repository,
monitor_flaky_tests,
use_but,
incompatible_flags,
):
platform_configs = configs.get("platforms", None)
if not platform_configs:
raise Exception("{0} pipeline configuration is empty.".format(project_name))
pipeline_steps = []
if configs.get("buildifier"):
pipeline_steps.append(
create_docker_step("Buildifier", image=f"gcr.io/{CLOUD_PROJECT}/buildifier")
)
# In Bazel Downstream Project pipelines, git_repository and project_name must be specified,
# and we should test the project at the last green commit.
git_commit = None
if (use_but or incompatible_flags) and git_repository and project_name:
git_commit = get_last_green_commit(
git_repository, DOWNSTREAM_PROJECTS[project_name]["pipeline_slug"]
)
for platform in platform_configs:
step = runner_step(
platform,
project_name,
http_config,
file_config,
git_repository,
git_commit,
monitor_flaky_tests,
use_but,
incompatible_flags,
)
pipeline_steps.append(step)
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
all_downstream_pipeline_slugs = []
for _, config in DOWNSTREAM_PROJECTS.items():
all_downstream_pipeline_slugs.append(config["pipeline_slug"])
# We don't need to update last green commit in the following cases:
# 1. This job is a github pull request
# 2. This job uses a custom built Bazel binary (In Bazel Downstream Projects pipeline)
# 3. This job doesn't run on master branch (Could be a custom build launched manually)
# 4. We don't intend to run the same job in downstream with Bazel@HEAD (eg. google-bazel-presubmit)
# 5. We are testing incompatible flags
if not (
is_pull_request()
or use_but
or os.getenv("BUILDKITE_BRANCH") != "master"
or pipeline_slug not in all_downstream_pipeline_slugs
or incompatible_flags
):
pipeline_steps.append("wait")
# If all builds succeed, update the last green commit of this project
pipeline_steps.append(
create_step(
label="Try Update Last Green Commit",
commands=[
fetch_bazelcipy_command(),
python_binary() + " bazelci.py try_update_last_green_commit",
],
)
)
print(yaml.dump({"steps": pipeline_steps}))
| #!/usr/bin/env python3
#
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from config import CLOUD_PROJECT, DOWNSTREAM_PROJECTS
from steps import runner_step, create_docker_step, create_step
from update_last_green_commit import get_last_green_commit
from utils import fetch_bazelcipy_command
from utils import python_binary
def is_pull_request():
third_party_repo = os.getenv("BUILDKITE_PULL_REQUEST_REPO", "")
return len(third_party_repo) > 0
def main(
configs,
project_name,
http_config,
file_config,
git_repository,
monitor_flaky_tests,
use_but,
incompatible_flags,
):
platform_configs = configs.get("platforms", None)
if not platform_configs:
raise Exception("{0} pipeline configuration is empty.".format(project_name))
pipeline_steps = []
if configs.get("buildifier"):
pipeline_steps.append(
create_docker_step("Buildifier", image=f"gcr.io/{CLOUD_PROJECT}/buildifier")
)
# In Bazel Downstream Project pipelines, git_repository and project_name must be specified,
# and we should test the project at the last green commit.
git_commit = None
if (use_but or incompatible_flags) and git_repository and project_name:
git_commit = get_last_green_commit(
git_repository, DOWNSTREAM_PROJECTS[project_name]["pipeline_slug"]
)
for platform in platform_configs:
step = runner_step(
platform,
project_name,
http_config,
file_config,
git_repository,
git_commit,
monitor_flaky_tests,
use_but,
incompatible_flags,
)
pipeline_steps.append(step)
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
all_downstream_pipeline_slugs = []
for _, config in DOWNSTREAM_PROJECTS.items():
all_downstream_pipeline_slugs.append(config["pipeline_slug"])
# We don't need to update last green commit in the following cases:
# 1. This job is a github pull request
# 2. This job uses a custom built Bazel binary (In Bazel Downstream Projects pipeline)
# 3. This job doesn't run on master branch (Could be a custom build launched manually)
# 4. We don't intend to run the same job in downstream with Bazel@HEAD (eg. google-bazel-presubmit)
# 5. We are testing incompatible flags
if not (
is_pull_request()
or use_but
or os.getenv("BUILDKITE_BRANCH") != "master"
or pipeline_slug not in all_downstream_pipeline_slugs
or incompatible_flags
):
pipeline_steps.append("wait")
# If all builds succeed, update the last green commit of this project
pipeline_steps.append(
create_step(
label="Try Update Last Green Commit",
commands=[
fetch_bazelcipy_command(),
python_binary() + " bazelci.py try_update_last_green_commit",
],
)
)
print(yaml.dump({"steps": pipeline_steps}))
| en | 0.849605 | #!/usr/bin/env python3 # # Copyright 2019 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # In Bazel Downstream Project pipelines, git_repository and project_name must be specified, # and we should test the project at the last green commit. # We don't need to update last green commit in the following cases: # 1. This job is a github pull request # 2. This job uses a custom built Bazel binary (In Bazel Downstream Projects pipeline) # 3. This job doesn't run on master branch (Could be a custom build launched manually) # 4. We don't intend to run the same job in downstream with Bazel@HEAD (eg. google-bazel-presubmit) # 5. We are testing incompatible flags # If all builds succeed, update the last green commit of this project | 1.900381 | 2 |
django_auto_filter/default_settings.py | weijia/django-auto-filter | 0 | 6615184 | <gh_stars>0
# import os
#
INSTALLED_APPS += (
'django.contrib.staticfiles',
'tagging',
'tagging_app',
'bootstrapform',
'django_tables2',
'django_tables2_reports',
'webmanager',
'django_auto_filter',
'dynamic_preferences',
'ajax_select',
)
#
# STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.static',
'django.core.context_processors.request',
)
# Not working
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
# )
# }
MIDDLEWARE_CLASSES += (
'reversion.middleware.RevisionMiddleware',
)
TEMPLATES[0]['OPTIONS']['context_processors'].extend([
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
])
# STATIC_ROOT = os.path.join('D:\\work\\codes\\new_base\\', 'static')
| # import os
#
INSTALLED_APPS += (
'django.contrib.staticfiles',
'tagging',
'tagging_app',
'bootstrapform',
'django_tables2',
'django_tables2_reports',
'webmanager',
'django_auto_filter',
'dynamic_preferences',
'ajax_select',
)
#
# STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.static',
'django.core.context_processors.request',
)
# Not working
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
# )
# }
MIDDLEWARE_CLASSES += (
'reversion.middleware.RevisionMiddleware',
)
TEMPLATES[0]['OPTIONS']['context_processors'].extend([
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
])
# STATIC_ROOT = os.path.join('D:\\work\\codes\\new_base\\', 'static') | en | 0.435043 | # import os # # # STATIC_URL = '/static/' # Not working # REST_FRAMEWORK = { # 'DEFAULT_AUTHENTICATION_CLASSES': ( # 'rest_framework.authentication.BasicAuthentication', # 'rest_framework.authentication.SessionAuthentication', # ) # } # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this # list if you haven't customized them: # STATIC_ROOT = os.path.join('D:\\work\\codes\\new_base\\', 'static') | 1.808358 | 2 |
cj/bookmaker/pdf.py | james-garfield/Captain-Japan | 0 | 6615185 | <reponame>james-garfield/Captain-Japan<filename>cj/bookmaker/pdf.py<gh_stars>0
from bcrypt import os
from cj.bookmaker import BookMaker, Book
import subprocess
import json
from cj.utils.naming import get_file_name
# The path to the pdf_maker executable
PDF_MAKER_LOCATION = "extensions/pdf/pdf_maker.exe"
class PdfMaker(BookMaker):
def __init__(self, chapters: list[str], book: Book, description: str = None) -> None:
super().__init__(chapters, book, description=description)
def setup(self, language, cover=None):
# ! PDF Maker does not support language or cover so we just pass this method.
pass
def format_chapters(self) -> list:
"""
Format the chapters to either match a Manga or a Novel.
"""
if self.book.is_manga:
chapters = []
for chapter in self.chapters:
chapters.append({
"title": get_file_name(chapter),
"pages": [
]
})
return chapters
else:
return self.chapters
def make(self):
# TODO The PDF_Maker executable has changed logic, to accept a JSON file as input.
# Write to a Json file with the pdf data
pdf_json = {
"title": self.book.title,
"author": "Captain Japan", # TODO: Add author
"isManga": self.book.is_manga,
"output": self.path,
"chapters": self.format_chapters(),
}
with open("pdf.json", "w") as f:
f.write(json.dumps(pdf_json))
# Run the PDF_Maker executable
subprocess.run([PDF_MAKER_LOCATION, "pdf.json"])
# delete the json file
os.remove("pdf.json")
# Save to the database.
self.save()
if __name__ == "__main__":
pass | from bcrypt import os
from cj.bookmaker import BookMaker, Book
import subprocess
import json
from cj.utils.naming import get_file_name
# The path to the pdf_maker executable
PDF_MAKER_LOCATION = "extensions/pdf/pdf_maker.exe"
class PdfMaker(BookMaker):
def __init__(self, chapters: list[str], book: Book, description: str = None) -> None:
super().__init__(chapters, book, description=description)
def setup(self, language, cover=None):
# ! PDF Maker does not support language or cover so we just pass this method.
pass
def format_chapters(self) -> list:
"""
Format the chapters to either match a Manga or a Novel.
"""
if self.book.is_manga:
chapters = []
for chapter in self.chapters:
chapters.append({
"title": get_file_name(chapter),
"pages": [
]
})
return chapters
else:
return self.chapters
def make(self):
# TODO The PDF_Maker executable has changed logic, to accept a JSON file as input.
# Write to a Json file with the pdf data
pdf_json = {
"title": self.book.title,
"author": "Captain Japan", # TODO: Add author
"isManga": self.book.is_manga,
"output": self.path,
"chapters": self.format_chapters(),
}
with open("pdf.json", "w") as f:
f.write(json.dumps(pdf_json))
# Run the PDF_Maker executable
subprocess.run([PDF_MAKER_LOCATION, "pdf.json"])
# delete the json file
os.remove("pdf.json")
# Save to the database.
self.save()
if __name__ == "__main__":
pass | en | 0.864464 | # The path to the pdf_maker executable # ! PDF Maker does not support language or cover so we just pass this method. Format the chapters to either match a Manga or a Novel. # TODO The PDF_Maker executable has changed logic, to accept a JSON file as input. # Write to a Json file with the pdf data # TODO: Add author # Run the PDF_Maker executable # delete the json file # Save to the database. | 2.66838 | 3 |
backend/flask-api/migrations/versions/0724f5537934_.py | lucasbibianot/inova-cnj-time16 | 0 | 6615186 | """Correção de default value para tb_fluxo.id_fluxo_movimento
Revision ID: 0724f5537934
Revises: dd4a73113bc6
Create Date: 2020-10-21 14:36:03.999160
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0724f5537934'
down_revision = 'dd4a73113bc6'
branch_labels = None
depends_on = None
def upgrade():
op.execute("alter TABLE sanjus.tb_fluxo alter column id_fluxo_movimento set default nextval('sanjus.tb_fluxo_id_fluxo_seq')")
def downgrade():
pass
| """Correção de default value para tb_fluxo.id_fluxo_movimento
Revision ID: 0724f5537934
Revises: dd4a73113bc6
Create Date: 2020-10-21 14:36:03.999160
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0724f5537934'
down_revision = 'dd4a73113bc6'
branch_labels = None
depends_on = None
def upgrade():
op.execute("alter TABLE sanjus.tb_fluxo alter column id_fluxo_movimento set default nextval('sanjus.tb_fluxo_id_fluxo_seq')")
def downgrade():
pass
| pt | 0.262309 | Correção de default value para tb_fluxo.id_fluxo_movimento Revision ID: 0724f5537934 Revises: dd4a73113bc6 Create Date: 2020-10-21 14:36:03.999160 # revision identifiers, used by Alembic. | 1.185754 | 1 |
debug.py | RivollierG/othello | 0 | 6615187 | <reponame>RivollierG/othello
from utils.board import Board
from utils.player import Player
from utils.ia import Ia
# Initialisation
joueur1 = Ia("noir")
joueur2 = Player("blanc")
plateau = Board()
joueur1.is_actif = True
print(type(joueur1))
if isinstance(joueur1, Ia):
print("ça marche !!!")
# plateau.calcul_poids(joueur1)
# for i in range(8):
# for j in range(8):
# print(i, j, plateau.table[i][j].poids)
| from utils.board import Board
from utils.player import Player
from utils.ia import Ia
# Initialisation
joueur1 = Ia("noir")
joueur2 = Player("blanc")
plateau = Board()
joueur1.is_actif = True
print(type(joueur1))
if isinstance(joueur1, Ia):
print("ça marche !!!")
# plateau.calcul_poids(joueur1)
# for i in range(8):
# for j in range(8):
# print(i, j, plateau.table[i][j].poids) | fr | 0.339734 | # Initialisation # plateau.calcul_poids(joueur1) # for i in range(8): # for j in range(8): # print(i, j, plateau.table[i][j].poids) | 2.447266 | 2 |
contrib/i18n_lint.py | Khan/khan-linter | 24 | 6615188 | <gh_stars>10-100
# TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes)
# pep8-disable:E124,E127,E129,E271
"""Linters that warn about common problems with i18n markup."""
from __future__ import absolute_import
import logging
import os
import re
import subprocess
import sys
import tokenize
from google.appengine.tools import appcfg
from shared import ka_root
from shared.testutil import lintutil
from third_party import i18nize_templates
from third_party.babel.messages import jslexer
import intl.data
import intl.english_only
import intl.locale
import kake.make
import modules_util
# This catches all the jinja2 function calls: it matches {{.*(.*}}
# TODO(csilvers): handle " and ' so we can have }}'s inside them.
_J2_FUNCTION_RE = re.compile(r'{{(}?[^}])*\((}?[^}])*}}')
# This catch i18n._() (or the obsolete $._()) being used inside of
# jinja2. We should use {{ _js("..") }} when marking up text inside a
# <script/> tag in jinja2 instead.
_BAD_JS_MARKUP = re.compile(r'(?:i18n|\$)\._\(([^\)])')
# This captures a _() call when the input is a jinja2 function call.
# The string is in group(1) and any |-modifiers are in group(2).
# Any keyword arguments to _() are in group(3).
# TODO(csilvers): modify group(3) so nested parens or parens in "..."
# don't trip it up. Nested parens is hardest, so I didn't bother.
_GETTEXT_RE = re.compile(r'\b(?:_|gettext)\('
r'("(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\\])*\')'
r'([^,)]*)([^)]*)',
re.DOTALL)
# This captures a bad i18n_do_not_translate() call that is not a literal string
# when the input is a jinja2 function call. The args to the
# i18n_do_not_translate() function are in group(1) and may contain any number
# of non-nested function calls. As this is just used for reporting and there
# is not a good way to do nested parens this is good enough.
_BAD_DO_NOT_TRANSLATE_RE = re.compile(r'\bi18n_do_not_translate\s*\(\s*('
r'(?![\'"])'
r'(?:[^\(\)]*(?:\([^\)]*\))*)*)\)',
re.DOTALL)
# This captures a ngettext() call when the input is a jinja2 function call.
# The two string are in group(1) and group(3), and their |-modifiers (if
# any) are in group(2) and group(4). ngettext keyword args are in group(5).
_NGETTEXT_RE = re.compile(r'\bngettext\('
r'("(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\\])*\')'
r'([^,)]*)'
r'\s*,\s*'
r'("(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\\])*\')'
r'([^,)]*)([^)]*)',
re.DOTALL)
# This captures string arguments in the kwargs.
# The string is in group(1) and any |-modifiers are in group(2).
_KWARG_RE = re.compile(r',\s*\w+\s*=\s*'
r'("(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\\])*\')'
r'([^,)]*)')
# These are the characters that require us to add |safe, because
# otherwise they'd be html-escaped.
_NEED_SAFE_RE = re.compile(r'[<>&]')
def _needs_safe(string, post_string):
"""True if 'string' has html chars in it, and post_string lacks |safe."""
# String includes the leading and trailing "'s, which aren't part
# of the string proper.
assert string[0] in '"\'' and string[-1] in '"\'', string
return _NEED_SAFE_RE.search(string[1:-1]) and '|safe' not in post_string
def lint_missing_safe_in_jinja2(files_to_lint):
"""Find instances where we translate html but don't mark it |safe.
We html-escape the output of {{ _("...") }} in html templates, which
is safe but can cause problems when the text is {{ _("<b>hi</b>") }}.
In that case, the user needs to do {{ _("<b>hi</b>")|safe }}.
We detect instances where the user does {{ _("...<..") }} in jinja2
templates but lacks the |safe afterwards. Unless that line is marked
with {{# @Nolint #}}, we flag it as a lint error.
Returns:
List of triples: (filename, lineno, error message)
"""
# Limit files-to-lint to html and txt files under templates/.
files = lintutil.filter(files_to_lint,
prefix='templates/', suffix=('.html', '.txt'))
for filename in files:
contents = lintutil.file_contents(filename)
for fn_match in _J2_FUNCTION_RE.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
for m in _GETTEXT_RE.finditer(fn_match.group(0)):
if _needs_safe(m.group(1), m.group(2)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(1))
yield (filename, linenum,
'Replace %s with _("..."|safe) to avoid escaping'
'the tag/attr inside the _().' % m.group(1))
for km in _KWARG_RE.finditer(m.group(3)):
if _needs_safe(km.group(1), km.group(2)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() +
m.start() + km.start(1))
yield (filename, linenum,
'Replace %s with _(..., foo="..."|safe) to '
'avoid escaping the tag/attr inside foo.'
% km.group(1))
for m in _NGETTEXT_RE.finditer(fn_match.group(0)):
if _needs_safe(m.group(1), m.group(2)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(1))
yield (filename, linenum,
'Replace %s with ngettext("..."|safe, ...) to '
'avoid escaping the tag/attr inside the _().'
% m.group(1))
if _needs_safe(m.group(3), m.group(4)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(3))
yield (filename, linenum,
'Replace %s with ngettext(..., "..."|safe) to '
'avoid escaping the tag/attr inside the _().'
% m.group(3))
for km in _KWARG_RE.finditer(m.group(5)):
if _needs_safe(km.group(1), km.group(2)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() +
m.start() + km.start(1))
yield (filename, linenum,
'Replace %s with ngettext(..., foo="..."|safe)'
' to avoid escaping the tag/attr inside foo.'
% km.group(1))
def lint_no_wrong_i18n_markup_in_jinja2(files_to_lint):
"""Find where we mark js within html with i18n._ instead of {{ _js()
Returns:
List of triples: (filename, lineno, error message)
"""
files = lintutil.filter(files_to_lint, prefix='templates/', suffix='.html')
for filename in files:
contents = lintutil.file_contents(filename)
for fn_match in _BAD_JS_MARKUP.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
for m in _BAD_JS_MARKUP.finditer(fn_match.group(0)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(1))
yield (filename, linenum,
'Do {{ _js("%s") }} instead of %s inside <script> '
'tags within jinja2.' % (m.group(1), m.group(0)))
def lint_non_literal_i18n_do_not_translate(files_to_lint):
"""Find where we mark html as not needing translation but its non-literal
We require anything we mark with i18n_do_not_translate to be only a string
literal. i18n_do_not_translate marks the string as "safe" (that is jinja2
won't autoescape it) so we need to be extra careful we don't create a
potential XSS attack vector if we end up marking some variable as
safe.
Unless that line is marked with {{# @Nolint #}}, we flag it as a lint
error.
Returns:
List of triples: (filename, lineno, error message)
"""
# Limit files-to-lint to files under templates/ that are html or txt.
files = lintutil.filter(files_to_lint,
prefix='templates/', suffix=('.html', '.txt'))
for filename in files:
contents = lintutil.file_contents(filename)
for fn_match in _J2_FUNCTION_RE.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
for m in _BAD_DO_NOT_TRANSLATE_RE.finditer(fn_match.group(0)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(1))
yield (filename, linenum,
'%s contains something that is not just a '
'literal string. Only literal strings can be '
'inside i18n_do_not_translate.' % m.group(1))
# State machine when lexing uses of i18n._ in source code.
# Keys are what token we're currently looking at, value is what tokens
# should come next. Tokens can either be a type (tokenize.XXX) or a
# literal value. Key of None indicates when to start parsing; value
# of None terminates a successful parse. Newlines and comments are
# always elided when looking at tokens.
_START_TOKENS = ('_', 'gettext', 'ngettext', 'cached_gettext',
'cached_ngettext', 'mark_for_translation')
_GETTEXT_STATE_MACHINE = {
None: _START_TOKENS,
'_': ('(',),
'gettext': ('(',),
'ngettext': ('(',),
'cached_gettext': ('(',),
'cached_ngettext': ('(',),
'mark_for_translation': ('(',),
'(': (tokenize.STRING,),
# comma introduces the keyword args for the gettext call.
tokenize.STRING: ('+', ',', ')', tokenize.STRING),
'+': (tokenize.STRING,),
# Once we are at keywords, the function call is ok.
# TODO(csilvers): for ngettext we want to stop after the second comma.
',': None,
')': None
}
def lint_non_literal_i18n_in_python(files_to_lint):
"""Complain about uses of i18n._() on something other than a string.
i18n._(variable) is dangerous -- you don't know if the variable
has been translated or not. Sometimes it's ok, but usually it's a
mistake, and a better solution is to pass in translated_variable
instead. (The OK cases can be marked with @Nolint.)
"""
current_state = None # not currently in a gettext context
gettext_linenum = None # linenum of current gettext call
has_nolint = False # any line of the i18n._ may have @nolint
# Only need to lint python files, but not test files.
files = lintutil.filter(files_to_lint, suffix='.py',
exclude_substrings=['_test.py'])
for (fname, ttype, token, (linenum, _), _, line) in (
lintutil.python_tokens(files)):
# Don't lint *definitions* of these methods.
if line.strip().startswith('def '):
continue
# The state machine may transition on either the type
# of the token, or its literal value.
if token in _GETTEXT_STATE_MACHINE[current_state]:
current_state = token
gettext_linenum = gettext_linenum or linenum
has_nolint = has_nolint or '@Nolint' in line
elif ttype in _GETTEXT_STATE_MACHINE[current_state]:
current_state = ttype
gettext_linenum = gettext_linenum or linenum
has_nolint = has_nolint or '@Nolint' in line
elif current_state is None:
# We weren't in gettext before, and we're still not.
pass
else:
# We're in gettext, but can't transition: we're bad.
# Give ourselves *one more* chance for nolint.
has_nolint = has_nolint or '@Nolint' in line
if current_state in _START_TOKENS:
# BUT: If we last saw _ or ngettext and can't
# transition because we're not a '(', that
# means it's not a function call, so we can
# ignore it. e.g. '(f, _) = file_and_line()'.
pass
elif not has_nolint:
yield (fname, gettext_linenum,
'gettext-like calls should only have '
'literal strings as their arguments')
current_state = None
gettext_linenum = None
has_nolint = False
# If we've happily ended a gettext call, clear the state.
if _GETTEXT_STATE_MACHINE[current_state] is None:
current_state = None
gettext_linenum = None
has_nolint = False
def lint_non_literal_i18n_in_javascript(files_to_lint):
"""Complain about uses of i18n._() on something other than a string.
i18n._(variable) is dangerous -- you don't know if the variable
has been translated or not.
"""
files_to_lint = lintutil.filter(
files_to_lint, suffix=('.js', '.jsx'),
exclude_substrings=('/i18n.js', '/i18n_test.js'))
# This regexp pattern captures a string, possibly concatenated with +'s.
js_str = (r'(?:' +
r'"(?:\\.|[^"])*"|' +
r"'(?:\\.|[^'])*'|" +
r'`(?:\\.|[^`])*`' +
')')
js_concat_str = '\s*%s(?:\s*\+\s*%s)*\s*' % (js_str, js_str)
gettext_occurrences = re.compile(r'\b(i18n._|i18n.ngettext)\(')
valid_gettext_occurrences = re.compile(
r'\bi18n._\(%(str)s[,)]|\bi18n.ngettext\(%(str)s,\s*%(str)s[,)]'
% {'str': js_concat_str})
for f in files_to_lint:
contents = lintutil.file_contents(f)
all_occurrences = {
m.start(): m for m in gettext_occurrences.finditer(contents)}
valid_occurrences = {
m.start(): m for m in valid_gettext_occurrences.finditer(contents)}
for (startpos, m) in all_occurrences.iteritems():
i18n_fn = m.group(1)
msg = None # set to non-None if there's a problem.
if startpos not in valid_occurrences:
msg = ('%s must have string literals as arguments, '
'with no variables or templates' % i18n_fn)
else:
# Then we're ok with this! *Unless* it's a template string
# with $(...) inside it, then we're not ok.
m2 = valid_occurrences[startpos]
if m2.group().count(r'${') > m2.group().count(r'\${'):
msg = ('You must use %' + '(...)s with template strings '
'inside %s, not ${...}' % i18n_fn)
if msg:
start_lineno = 1 + contents.count('\n', 0, startpos)
# Doing a real regexp to find the end of this function call
# is tough, we just do something simple and pray.
end_paren = contents.find(')', startpos)
if end_paren == -1:
end_paren = len(contents)
end_lineno = 1 + contents.count('\n', 0, end_paren)
if any(lintutil.has_nolint(f, lineno)
for lineno in xrange(start_lineno, end_lineno + 1)):
continue
yield (f, start_lineno, msg)
def lint_templates_are_translated(files_to_lint):
"""Verify that nltext in the input templates are marked for translation.
All natural-language text in jinja2 and handlebars files should be
marked for translation, using {{ _("...") }} or {{#_}}...{{/_}}.
i18nize_templates.py is a tool that can do this for you automatically.
We run this tool in 'check' mode to verify that every input file
is already marked up appropriately.
Since i18nize_templates isn't perfect (it thinks you need to
translate text like '<NAME>' or 'x' when used on a 'close'
button), you can use nolint-like functionality to tell this linter
it's ok if some text is not marked up to be translated. Unlike
other tests though, we do not use the @Nolint directive for this,
but instead wrap the relevant text in
{{ i18n_do_not_translate(...) }}
or
{{#i18nDoNotTranslate}}...{{/i18nDoNotTranslate}}
"""
# Add some ka-specific function we know do not have nltext arguments.
i18nize_templates.mark_function_args_lack_nltext(
'js_css_packages.package',
'js_css_packages.script',
'handlebars_template',
'youtube.player_embed',
'log.date.strftime',
'emails.tracking_image_url',
'templatetags.to_canonical_url',
'render_react',
)
for f in files_to_lint:
abs_f = f
f = ka_root.relpath(f)
# Exclude files that we don't need to translate: we don't care
# if those files are 'properly' marked up or not.
if intl.english_only.should_not_translate_file(f):
continue
if (f.startswith('templates' + os.sep) and
(f.endswith('.html') or f.endswith('.txt'))):
# jinja2 template
parser = i18nize_templates.get_parser_for_file(f)
correction = 'wrap the text in {{ i18n_do_not_translate() }}'
elif f.endswith('.handlebars'):
# handlebars template
parser = i18nize_templates.get_parser_for_file(f)
correction = ('wrap the text in {{#i18nDoNotTranslate}}...'
'{{/i18nDoNotTranslate}}')
else:
continue
file_contents = lintutil.file_contents(abs_f)
try:
parsed_output = parser.parse(
file_contents.decode('utf-8')).encode('utf-8')
except i18nize_templates.HTMLParser.HTMLParseError, why:
m = re.search(r'at line (\d+)', str(why))
linenum = int(m.group(1)) if m else 1
yield (abs_f, linenum,
'"i18nize_templates.py %s" fails: %s' % (f, why))
continue
orig_lines = file_contents.splitlines()
parsed_lines = parsed_output.splitlines()
for i in xrange(len(orig_lines)):
if orig_lines[i] != parsed_lines[i]:
yield (abs_f, i + 1,
'Missing _(); run tools/i18nize_templates.py or %s '
'(expecting "%s")'
% (correction, parsed_lines[i].strip()))
def _lint_js_content(filename, content):
"""Verify that nltext in the js content is marked for translation.
All natural-language text in js files should be marked for
translation using i18n._ or i18n.ngettext. It is very hard though
to figure out if a string in js should be translated or not. So we
check for strings that we know should be translated. For now this
just checks to make sure that string arguments inside a function
called Text() or React.Dom.* are marked for translation.
filename should be an absolute path.
Returns:
List of triples: (filename, lineno, error message)
"""
line_number = None
last_argument = None
is_first_argument_within_func = False
concatenate_next_argument = False
call_stack = []
last_maybe_func_name = None
concatenate_next_name = None
if ".jsx" in filename:
correct_wrappers = ("<$_> .. </$_> or "
"<$i18nDoNotTranslate> .. "
"</$i18nDoNotTranslate>")
else:
correct_wrappers = "i18n._(..) or i18n.i18nDoNotTranslate(..)"
def func_string_args_should_be_translated(func_name):
"""Return true if string args should be translated.
eg. React.DOM.strong({style:{color:"red"}}, "Test")
The first argument is an object, but the second is an untranslated
string that should be wrapped in i18n._()
"""
if not func_name:
return False
# Old versions of react have React.creatElement(). Newer ones
# hae _react2.default.createElement().
if ((func_name == "React.createElement" or
'react' in func_name and 'createElement' in func_name) and
not is_first_argument_within_func):
# The first arg within CreateElement can be a string like "div"
# all others must be translated
return True
elif func_name.startswith("React.DOM.") or func_name == "Text":
return True
return False
for token in jslexer.tokenize(content):
if token.type == 'operator' and token.value in ["(", "{", "["]:
call_stack.append(last_maybe_func_name)
is_first_argument_within_func = True
elif (token.type == 'string' and
call_stack and
func_string_args_should_be_translated(call_stack[-1])):
# Collect any string that is an immediate child of Text - there
# should not be any, it should be wrapped in i18n._
new_value = jslexer.unquote_string(token.value.decode('utf-8'))
line_number = token.lineno
if concatenate_next_argument:
last_argument = (last_argument or '') + new_value
concatenate_next_argument = False
else:
last_argument = new_value
elif token.type == 'operator' and token.value == '+':
concatenate_next_argument = True
elif token.type == 'operator':
last_func = call_stack[-1] if call_stack else None
if (func_string_args_should_be_translated(last_func) and
last_argument and
not intl.english_only.should_not_translate_string(
last_argument) and
token.value in [")", "}", "]", ",", ":"]
):
yield (filename, line_number,
"The string '%s' inside of a %s() is not translated. "
"Please wrap it in %s or add the file to "
"intl/english_only.py" % (
last_argument.encode("utf-8"),
last_func.encode("utf-8"),
correct_wrappers))
is_first_argument_within_func = False
last_argument = None
if token.value in [")", "}", "]"] and call_stack:
call_stack.pop()
# Keep track of last full func name eg. React.DOM.div
if token.type == 'name':
# This could also be variable, keyword, or something else, but
# we will keep it around just in case it is followed by a (
if last_maybe_func_name and concatenate_next_name:
last_maybe_func_name += ".%s" % token.value
concatenate_next_name = True
else:
last_maybe_func_name = token.value
elif token.type == 'operator' and token.value == '.':
concatenate_next_name = True
else:
concatenate_next_name = False
last_maybe_func_name = None
def lint_js_files_are_translated(files_to_lint):
"""Verify that nltext in the js files are marked for translation.
See docstring of: _lint_js_content
Returns:
List of triples: (filename, lineno, error message)
"""
# Make sure jsx files are compiled first, then we will lint the resulting
# js.
kake.make.build_many([('genfiles/compiled_jsx/en/%s.js' %
ka_root.relpath(f), {})
for f in files_to_lint if f.endswith('.jsx')])
files_to_lint = lintutil.filter(files_to_lint, suffix=('.js', '.jsx'))
for f in files_to_lint:
abs_f = f
f = ka_root.relpath(f)
# Exclude files that we don't need to translate: we don't care
# if those files are 'properly' marked up or not.
if intl.english_only.should_not_translate_file(f):
continue
if f.endswith(".jsx"):
abs_f = "%s/genfiles/compiled_jsx/en/%s.js" % (ka_root.root, f)
f = ka_root.relpath(abs_f)
file_contents = lintutil.file_contents(abs_f)
for error in _lint_js_content(abs_f, file_contents):
yield error
def lint_have_needed_babel_locales(files_to_lint):
"""Make sure we have all the locales we need, in third_party/babel.
third_party/babel/localedata comes with 664 languages, which is
great for coverage but bad for deploy time.
So to speed things up, I added to app.yaml's skip_files all
language files that aren't used by either a locale in all_ka_locales or
a YouTube locale.
This lint check makes sure that when we update those lists (or update the
babel subrepo), we upload any localedata languages that we need to.
"""
if (ka_root.join('intl', 'i18n.py') not in files_to_lint and
not any(f.startswith(intl.data.INTL_VIDEO_PLAYLISTS_DIR)
for f in files_to_lint) and
ka_root.join('third_party', 'babel-khansrc') not in files_to_lint):
return
config = modules_util.module_yaml('default')
# Take only the rules for third_party/babel/localedata, and strip
# off that prefix since we're starting the FileIterator in the
# localedata directory rather than ka-root.
# Note this depends on the babel rules starting with ^ and ending with $.
localedata_root = 'third_party/babel/localedata'
prefix = re.escape(r'(?:.*/webapp/)?')
babel_regexps = [s for s in re.findall(r'\^(?:%s)?([^$]*)\$' % prefix,
config['skip_files'].regex.pattern)
if s.startswith(localedata_root + '/')]
skip_files = [s.replace('%s/' % localedata_root, '^')
for s in babel_regexps]
skip_re = re.compile('|'.join('(?:%s)' % p for p in skip_files))
orig_level = logging.getLogger().level
try:
logging.getLogger().setLevel(logging.ERROR)
localedata_files = appcfg.FileIterator(ka_root.join(localedata_root),
skip_re, config['runtime'])
localedata_files = list(localedata_files)
finally:
logging.getLogger().setLevel(orig_level)
# Remove the '.dat' extension.
all_locales_for_babel = frozenset(os.path.splitext(f)[0]
for f in localedata_files)
needed_locales = intl.data.all_ka_locales(include_english=True)
babel_locales = set([b for b in [intl.locale.ka_locale_to_babel(l)
for l in needed_locales] if b])
for babel_locale in babel_locales:
# We need to check zh_Hans_CN.dat exists, but also zh_Hans.dat, etc.
for prefix in intl.locale.locale_prefixes(babel_locale):
# We need to convert from KA-style - to babel-style _.
prefix = prefix.replace('-', '_')
if prefix not in all_locales_for_babel:
yield ('skip_files.yaml', 1,
"We need babel locale info for %s but it's been added"
" to skip-files (need to whitelist it)." % prefix)
def lint_not_using_gettext_at_import_time(files_to_lint):
"""Make sure we don't use i18n._/etc in a static context.
If you have a global variable such as '_FOO = i18n._("bar")', at
the top of some .py file, it won't work the way you intend because
i18n._() needs to be called while handling a request in order to
know what language to translate to. (Instead, you'd need to do
_FOO = lambda: i18n._("bar")
or some such.)
This tests for this by mocking i18n._ et al., and then importing
everything (but running nothing). Any i18n._ calls that happen
during this import are problematic! We have to spawn a new
python process to make sure we do the importing properly (and
without messing with the currently running python environment!)
"""
candidate_files_to_lint = lintutil.filter(files_to_lint, suffix='.py')
files_to_lint = []
for filename in candidate_files_to_lint:
contents = lintutil.file_contents(filename)
# Check that it's plausible this file uses i18n._ or similar.
# This also avoids importing random third-party files that may
# have nasty side-effects at import time (all our code is too
# well-written to do that!)
if 'import intl' in contents or 'from intl' in contents:
files_to_lint.append(filename)
program = """\
import os # @Nolint(linter can't tell this is in a string!)
import sys # @Nolint(linter can't tell this is in a string!)
import traceback
import intl.request # @Nolint(seems unused to our linter but it's used)
_ROOT = "%s"
def add_lint_error(f):
# We assume code in 'intl' doesn't make this mistake, and thus
# the first stack-frame before we get into 'intl' is the
# offending code. ctx == '<string>' means the error occurred in
# this pseudo-script.
for (ctx, lineno, fn, line) in reversed(traceback.extract_stack()):
if os.path.isabs(ctx):
ctx = os.path.relpath(ctx, _ROOT)
if ctx != '<string>' and not ctx.startswith('intl/'):
if ctx == f:
print 'GETTEXT ERROR {} {}'.format(ctx, lineno)
break
return 'en' # a fake value for intl.request.ka_locale
""" % ka_root.root
if not files_to_lint:
return
for filename in files_to_lint:
modulename = ka_root.relpath(filename)
modulename = os.path.splitext(modulename)[0] # nix .py
modulename = modulename.replace('/', '.')
# Force a re-import.
program += 'sys.modules.pop("%s", None)\n' % modulename
program += ('intl.request.ka_locale = lambda: add_lint_error("%s")\n'
% ka_root.relpath(filename))
program += 'import %s\n' % modulename
p = subprocess.Popen(
['env', 'PYTHONPATH=%s' % ':'.join(sys.path),
sys.executable, '-c', program],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
lint_output = p.stdout.read()
for line in lint_output.splitlines():
if line.startswith('GETTEXT ERROR '):
line = line[len('GETTEXT ERROR '):]
(filename, linenum) = line.rsplit(' ', 1)
yield (ka_root.join(filename), int(linenum),
'Trying to translate at import-time, but '
'translation only works at runtime! '
'Use intl.i18n.mark_for_translation() instead.')
_SKIP_FILES_RE = None
def _does_not_upload(f):
"""True if f, relative to ka-root, is not uploaded to appengine."""
global _SKIP_FILES_RE
if not _SKIP_FILES_RE:
config = modules_util.module_yaml('default', for_production=True)
_SKIP_FILES_RE = config['skip_files'].regex
assert not os.path.isabs(f), f # or the while will never terminate
while f:
if _SKIP_FILES_RE.match(f):
return True
f = os.path.dirname(f) # see if we skip this whole directory
return False
def lint_strftime(files_to_lint):
"""Complain if you use strftime() instead of i18n.format_date()."""
_BAD_REGEXPS = (
# Javascript
r'toDateString\(\)',
# Jinja2 and python. These are all the modifiers that depend
# on the current locale (e.g. %B).
r'strftime\([\'\"][^\'\"]*%[aAbBcDhpPrxX+]',
# These are modifiers that are numbers, but used in contexts that
# indicate they're probably US-specific, e.g. '%d,', which means
# the current day-of-month followed by a comma, or day before
# month.
r'strftime\([\'\"][^\'\"]*(?:%d,|%d.%m)',
)
bad_re = re.compile('|'.join('(?:%s)' % b for b in _BAD_REGEXPS))
for f in files_to_lint:
relpath = ka_root.relpath(f)
# Ignore third_party code. Normally third_party code wouldn't wind up
# being linted in the first place because all of third_party is in
# webapp's lint_blacklist.txt, but for code that lives in third_party
# that has its own lint_blacklist.txt (e.g. live-editor), webapp's lint
# blacklist.txt gets overridden.
if relpath.startswith('third_party'):
continue
if intl.english_only.should_not_translate_file(relpath):
continue
# Ignore python files we're not uploading to appengine. (We
# can't use this rule with all files since js and html files
# aren't uploaded directly, but we still want to lint them.)
if f.endswith('.py') and _does_not_upload(ka_root.relpath(f)):
continue
badline = lintutil.line_number(f, bad_re, default=None)
if badline is not None:
yield (f, badline,
'Using U.S.-specific date formatting. '
'Use intl.i18n.format_date() and friends instead.')
| # TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes)
# pep8-disable:E124,E127,E129,E271
"""Linters that warn about common problems with i18n markup."""
from __future__ import absolute_import
import logging
import os
import re
import subprocess
import sys
import tokenize
from google.appengine.tools import appcfg
from shared import ka_root
from shared.testutil import lintutil
from third_party import i18nize_templates
from third_party.babel.messages import jslexer
import intl.data
import intl.english_only
import intl.locale
import kake.make
import modules_util
# This catches all the jinja2 function calls: it matches {{.*(.*}}
# TODO(csilvers): handle " and ' so we can have }}'s inside them.
_J2_FUNCTION_RE = re.compile(r'{{(}?[^}])*\((}?[^}])*}}')
# This catch i18n._() (or the obsolete $._()) being used inside of
# jinja2. We should use {{ _js("..") }} when marking up text inside a
# <script/> tag in jinja2 instead.
_BAD_JS_MARKUP = re.compile(r'(?:i18n|\$)\._\(([^\)])')
# This captures a _() call when the input is a jinja2 function call.
# The string is in group(1) and any |-modifiers are in group(2).
# Any keyword arguments to _() are in group(3).
# TODO(csilvers): modify group(3) so nested parens or parens in "..."
# don't trip it up. Nested parens is hardest, so I didn't bother.
_GETTEXT_RE = re.compile(r'\b(?:_|gettext)\('
r'("(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\\])*\')'
r'([^,)]*)([^)]*)',
re.DOTALL)
# This captures a bad i18n_do_not_translate() call that is not a literal string
# when the input is a jinja2 function call. The args to the
# i18n_do_not_translate() function are in group(1) and may contain any number
# of non-nested function calls. As this is just used for reporting and there
# is not a good way to do nested parens this is good enough.
_BAD_DO_NOT_TRANSLATE_RE = re.compile(r'\bi18n_do_not_translate\s*\(\s*('
r'(?![\'"])'
r'(?:[^\(\)]*(?:\([^\)]*\))*)*)\)',
re.DOTALL)
# This captures a ngettext() call when the input is a jinja2 function call.
# The two string are in group(1) and group(3), and their |-modifiers (if
# any) are in group(2) and group(4). ngettext keyword args are in group(5).
_NGETTEXT_RE = re.compile(r'\bngettext\('
r'("(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\\])*\')'
r'([^,)]*)'
r'\s*,\s*'
r'("(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\\])*\')'
r'([^,)]*)([^)]*)',
re.DOTALL)
# This captures string arguments in the kwargs.
# The string is in group(1) and any |-modifiers are in group(2).
_KWARG_RE = re.compile(r',\s*\w+\s*=\s*'
r'("(?:\\.|[^"\\])*"|\'(?:\\.|[^\'\\])*\')'
r'([^,)]*)')
# These are the characters that require us to add |safe, because
# otherwise they'd be html-escaped.
_NEED_SAFE_RE = re.compile(r'[<>&]')
def _needs_safe(string, post_string):
"""True if 'string' has html chars in it, and post_string lacks |safe."""
# String includes the leading and trailing "'s, which aren't part
# of the string proper.
assert string[0] in '"\'' and string[-1] in '"\'', string
return _NEED_SAFE_RE.search(string[1:-1]) and '|safe' not in post_string
def lint_missing_safe_in_jinja2(files_to_lint):
"""Find instances where we translate html but don't mark it |safe.
We html-escape the output of {{ _("...") }} in html templates, which
is safe but can cause problems when the text is {{ _("<b>hi</b>") }}.
In that case, the user needs to do {{ _("<b>hi</b>")|safe }}.
We detect instances where the user does {{ _("...<..") }} in jinja2
templates but lacks the |safe afterwards. Unless that line is marked
with {{# @Nolint #}}, we flag it as a lint error.
Returns:
List of triples: (filename, lineno, error message)
"""
# Limit files-to-lint to html and txt files under templates/.
files = lintutil.filter(files_to_lint,
prefix='templates/', suffix=('.html', '.txt'))
for filename in files:
contents = lintutil.file_contents(filename)
for fn_match in _J2_FUNCTION_RE.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
for m in _GETTEXT_RE.finditer(fn_match.group(0)):
if _needs_safe(m.group(1), m.group(2)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(1))
yield (filename, linenum,
'Replace %s with _("..."|safe) to avoid escaping'
'the tag/attr inside the _().' % m.group(1))
for km in _KWARG_RE.finditer(m.group(3)):
if _needs_safe(km.group(1), km.group(2)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() +
m.start() + km.start(1))
yield (filename, linenum,
'Replace %s with _(..., foo="..."|safe) to '
'avoid escaping the tag/attr inside foo.'
% km.group(1))
for m in _NGETTEXT_RE.finditer(fn_match.group(0)):
if _needs_safe(m.group(1), m.group(2)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(1))
yield (filename, linenum,
'Replace %s with ngettext("..."|safe, ...) to '
'avoid escaping the tag/attr inside the _().'
% m.group(1))
if _needs_safe(m.group(3), m.group(4)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(3))
yield (filename, linenum,
'Replace %s with ngettext(..., "..."|safe) to '
'avoid escaping the tag/attr inside the _().'
% m.group(3))
for km in _KWARG_RE.finditer(m.group(5)):
if _needs_safe(km.group(1), km.group(2)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() +
m.start() + km.start(1))
yield (filename, linenum,
'Replace %s with ngettext(..., foo="..."|safe)'
' to avoid escaping the tag/attr inside foo.'
% km.group(1))
def lint_no_wrong_i18n_markup_in_jinja2(files_to_lint):
"""Find where we mark js within html with i18n._ instead of {{ _js()
Returns:
List of triples: (filename, lineno, error message)
"""
files = lintutil.filter(files_to_lint, prefix='templates/', suffix='.html')
for filename in files:
contents = lintutil.file_contents(filename)
for fn_match in _BAD_JS_MARKUP.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
for m in _BAD_JS_MARKUP.finditer(fn_match.group(0)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(1))
yield (filename, linenum,
'Do {{ _js("%s") }} instead of %s inside <script> '
'tags within jinja2.' % (m.group(1), m.group(0)))
def lint_non_literal_i18n_do_not_translate(files_to_lint):
"""Find where we mark html as not needing translation but its non-literal
We require anything we mark with i18n_do_not_translate to be only a string
literal. i18n_do_not_translate marks the string as "safe" (that is jinja2
won't autoescape it) so we need to be extra careful we don't create a
potential XSS attack vector if we end up marking some variable as
safe.
Unless that line is marked with {{# @Nolint #}}, we flag it as a lint
error.
Returns:
List of triples: (filename, lineno, error message)
"""
# Limit files-to-lint to files under templates/ that are html or txt.
files = lintutil.filter(files_to_lint,
prefix='templates/', suffix=('.html', '.txt'))
for filename in files:
contents = lintutil.file_contents(filename)
for fn_match in _J2_FUNCTION_RE.finditer(contents):
# Make sure there's no @Nolint anywhere around this function.
newline = contents.find('\n', fn_match.end())
newline = newline if newline > -1 else len(contents)
if '@Nolint' in contents[fn_match.start():newline]:
continue
for m in _BAD_DO_NOT_TRANSLATE_RE.finditer(fn_match.group(0)):
linenum = 1 + contents.count('\n', 0,
fn_match.start() + m.start(1))
yield (filename, linenum,
'%s contains something that is not just a '
'literal string. Only literal strings can be '
'inside i18n_do_not_translate.' % m.group(1))
# State machine when lexing uses of i18n._ in source code.
# Keys are what token we're currently looking at, value is what tokens
# should come next. Tokens can either be a type (tokenize.XXX) or a
# literal value. Key of None indicates when to start parsing; value
# of None terminates a successful parse. Newlines and comments are
# always elided when looking at tokens.
_START_TOKENS = ('_', 'gettext', 'ngettext', 'cached_gettext',
'cached_ngettext', 'mark_for_translation')
_GETTEXT_STATE_MACHINE = {
None: _START_TOKENS,
'_': ('(',),
'gettext': ('(',),
'ngettext': ('(',),
'cached_gettext': ('(',),
'cached_ngettext': ('(',),
'mark_for_translation': ('(',),
'(': (tokenize.STRING,),
# comma introduces the keyword args for the gettext call.
tokenize.STRING: ('+', ',', ')', tokenize.STRING),
'+': (tokenize.STRING,),
# Once we are at keywords, the function call is ok.
# TODO(csilvers): for ngettext we want to stop after the second comma.
',': None,
')': None
}
def lint_non_literal_i18n_in_python(files_to_lint):
"""Complain about uses of i18n._() on something other than a string.
i18n._(variable) is dangerous -- you don't know if the variable
has been translated or not. Sometimes it's ok, but usually it's a
mistake, and a better solution is to pass in translated_variable
instead. (The OK cases can be marked with @Nolint.)
"""
current_state = None # not currently in a gettext context
gettext_linenum = None # linenum of current gettext call
has_nolint = False # any line of the i18n._ may have @nolint
# Only need to lint python files, but not test files.
files = lintutil.filter(files_to_lint, suffix='.py',
exclude_substrings=['_test.py'])
for (fname, ttype, token, (linenum, _), _, line) in (
lintutil.python_tokens(files)):
# Don't lint *definitions* of these methods.
if line.strip().startswith('def '):
continue
# The state machine may transition on either the type
# of the token, or its literal value.
if token in _GETTEXT_STATE_MACHINE[current_state]:
current_state = token
gettext_linenum = gettext_linenum or linenum
has_nolint = has_nolint or '@Nolint' in line
elif ttype in _GETTEXT_STATE_MACHINE[current_state]:
current_state = ttype
gettext_linenum = gettext_linenum or linenum
has_nolint = has_nolint or '@Nolint' in line
elif current_state is None:
# We weren't in gettext before, and we're still not.
pass
else:
# We're in gettext, but can't transition: we're bad.
# Give ourselves *one more* chance for nolint.
has_nolint = has_nolint or '@Nolint' in line
if current_state in _START_TOKENS:
# BUT: If we last saw _ or ngettext and can't
# transition because we're not a '(', that
# means it's not a function call, so we can
# ignore it. e.g. '(f, _) = file_and_line()'.
pass
elif not has_nolint:
yield (fname, gettext_linenum,
'gettext-like calls should only have '
'literal strings as their arguments')
current_state = None
gettext_linenum = None
has_nolint = False
# If we've happily ended a gettext call, clear the state.
if _GETTEXT_STATE_MACHINE[current_state] is None:
current_state = None
gettext_linenum = None
has_nolint = False
def lint_non_literal_i18n_in_javascript(files_to_lint):
"""Complain about uses of i18n._() on something other than a string.
i18n._(variable) is dangerous -- you don't know if the variable
has been translated or not.
"""
files_to_lint = lintutil.filter(
files_to_lint, suffix=('.js', '.jsx'),
exclude_substrings=('/i18n.js', '/i18n_test.js'))
# This regexp pattern captures a string, possibly concatenated with +'s.
js_str = (r'(?:' +
r'"(?:\\.|[^"])*"|' +
r"'(?:\\.|[^'])*'|" +
r'`(?:\\.|[^`])*`' +
')')
js_concat_str = '\s*%s(?:\s*\+\s*%s)*\s*' % (js_str, js_str)
gettext_occurrences = re.compile(r'\b(i18n._|i18n.ngettext)\(')
valid_gettext_occurrences = re.compile(
r'\bi18n._\(%(str)s[,)]|\bi18n.ngettext\(%(str)s,\s*%(str)s[,)]'
% {'str': js_concat_str})
for f in files_to_lint:
contents = lintutil.file_contents(f)
all_occurrences = {
m.start(): m for m in gettext_occurrences.finditer(contents)}
valid_occurrences = {
m.start(): m for m in valid_gettext_occurrences.finditer(contents)}
for (startpos, m) in all_occurrences.iteritems():
i18n_fn = m.group(1)
msg = None # set to non-None if there's a problem.
if startpos not in valid_occurrences:
msg = ('%s must have string literals as arguments, '
'with no variables or templates' % i18n_fn)
else:
# Then we're ok with this! *Unless* it's a template string
# with $(...) inside it, then we're not ok.
m2 = valid_occurrences[startpos]
if m2.group().count(r'${') > m2.group().count(r'\${'):
msg = ('You must use %' + '(...)s with template strings '
'inside %s, not ${...}' % i18n_fn)
if msg:
start_lineno = 1 + contents.count('\n', 0, startpos)
# Doing a real regexp to find the end of this function call
# is tough, we just do something simple and pray.
end_paren = contents.find(')', startpos)
if end_paren == -1:
end_paren = len(contents)
end_lineno = 1 + contents.count('\n', 0, end_paren)
if any(lintutil.has_nolint(f, lineno)
for lineno in xrange(start_lineno, end_lineno + 1)):
continue
yield (f, start_lineno, msg)
def lint_templates_are_translated(files_to_lint):
"""Verify that nltext in the input templates are marked for translation.
All natural-language text in jinja2 and handlebars files should be
marked for translation, using {{ _("...") }} or {{#_}}...{{/_}}.
i18nize_templates.py is a tool that can do this for you automatically.
We run this tool in 'check' mode to verify that every input file
is already marked up appropriately.
Since i18nize_templates isn't perfect (it thinks you need to
translate text like '<NAME>' or 'x' when used on a 'close'
button), you can use nolint-like functionality to tell this linter
it's ok if some text is not marked up to be translated. Unlike
other tests though, we do not use the @Nolint directive for this,
but instead wrap the relevant text in
{{ i18n_do_not_translate(...) }}
or
{{#i18nDoNotTranslate}}...{{/i18nDoNotTranslate}}
"""
# Add some ka-specific function we know do not have nltext arguments.
i18nize_templates.mark_function_args_lack_nltext(
'js_css_packages.package',
'js_css_packages.script',
'handlebars_template',
'youtube.player_embed',
'log.date.strftime',
'emails.tracking_image_url',
'templatetags.to_canonical_url',
'render_react',
)
for f in files_to_lint:
abs_f = f
f = ka_root.relpath(f)
# Exclude files that we don't need to translate: we don't care
# if those files are 'properly' marked up or not.
if intl.english_only.should_not_translate_file(f):
continue
if (f.startswith('templates' + os.sep) and
(f.endswith('.html') or f.endswith('.txt'))):
# jinja2 template
parser = i18nize_templates.get_parser_for_file(f)
correction = 'wrap the text in {{ i18n_do_not_translate() }}'
elif f.endswith('.handlebars'):
# handlebars template
parser = i18nize_templates.get_parser_for_file(f)
correction = ('wrap the text in {{#i18nDoNotTranslate}}...'
'{{/i18nDoNotTranslate}}')
else:
continue
file_contents = lintutil.file_contents(abs_f)
try:
parsed_output = parser.parse(
file_contents.decode('utf-8')).encode('utf-8')
except i18nize_templates.HTMLParser.HTMLParseError, why:
m = re.search(r'at line (\d+)', str(why))
linenum = int(m.group(1)) if m else 1
yield (abs_f, linenum,
'"i18nize_templates.py %s" fails: %s' % (f, why))
continue
orig_lines = file_contents.splitlines()
parsed_lines = parsed_output.splitlines()
for i in xrange(len(orig_lines)):
if orig_lines[i] != parsed_lines[i]:
yield (abs_f, i + 1,
'Missing _(); run tools/i18nize_templates.py or %s '
'(expecting "%s")'
% (correction, parsed_lines[i].strip()))
def _lint_js_content(filename, content):
"""Verify that nltext in the js content is marked for translation.
All natural-language text in js files should be marked for
translation using i18n._ or i18n.ngettext. It is very hard though
to figure out if a string in js should be translated or not. So we
check for strings that we know should be translated. For now this
just checks to make sure that string arguments inside a function
called Text() or React.Dom.* are marked for translation.
filename should be an absolute path.
Returns:
List of triples: (filename, lineno, error message)
"""
line_number = None
last_argument = None
is_first_argument_within_func = False
concatenate_next_argument = False
call_stack = []
last_maybe_func_name = None
concatenate_next_name = None
if ".jsx" in filename:
correct_wrappers = ("<$_> .. </$_> or "
"<$i18nDoNotTranslate> .. "
"</$i18nDoNotTranslate>")
else:
correct_wrappers = "i18n._(..) or i18n.i18nDoNotTranslate(..)"
def func_string_args_should_be_translated(func_name):
"""Return true if string args should be translated.
eg. React.DOM.strong({style:{color:"red"}}, "Test")
The first argument is an object, but the second is an untranslated
string that should be wrapped in i18n._()
"""
if not func_name:
return False
# Old versions of react have React.creatElement(). Newer ones
# hae _react2.default.createElement().
if ((func_name == "React.createElement" or
'react' in func_name and 'createElement' in func_name) and
not is_first_argument_within_func):
# The first arg within CreateElement can be a string like "div"
# all others must be translated
return True
elif func_name.startswith("React.DOM.") or func_name == "Text":
return True
return False
for token in jslexer.tokenize(content):
if token.type == 'operator' and token.value in ["(", "{", "["]:
call_stack.append(last_maybe_func_name)
is_first_argument_within_func = True
elif (token.type == 'string' and
call_stack and
func_string_args_should_be_translated(call_stack[-1])):
# Collect any string that is an immediate child of Text - there
# should not be any, it should be wrapped in i18n._
new_value = jslexer.unquote_string(token.value.decode('utf-8'))
line_number = token.lineno
if concatenate_next_argument:
last_argument = (last_argument or '') + new_value
concatenate_next_argument = False
else:
last_argument = new_value
elif token.type == 'operator' and token.value == '+':
concatenate_next_argument = True
elif token.type == 'operator':
last_func = call_stack[-1] if call_stack else None
if (func_string_args_should_be_translated(last_func) and
last_argument and
not intl.english_only.should_not_translate_string(
last_argument) and
token.value in [")", "}", "]", ",", ":"]
):
yield (filename, line_number,
"The string '%s' inside of a %s() is not translated. "
"Please wrap it in %s or add the file to "
"intl/english_only.py" % (
last_argument.encode("utf-8"),
last_func.encode("utf-8"),
correct_wrappers))
is_first_argument_within_func = False
last_argument = None
if token.value in [")", "}", "]"] and call_stack:
call_stack.pop()
# Keep track of last full func name eg. React.DOM.div
if token.type == 'name':
# This could also be variable, keyword, or something else, but
# we will keep it around just in case it is followed by a (
if last_maybe_func_name and concatenate_next_name:
last_maybe_func_name += ".%s" % token.value
concatenate_next_name = True
else:
last_maybe_func_name = token.value
elif token.type == 'operator' and token.value == '.':
concatenate_next_name = True
else:
concatenate_next_name = False
last_maybe_func_name = None
def lint_js_files_are_translated(files_to_lint):
"""Verify that nltext in the js files are marked for translation.
See docstring of: _lint_js_content
Returns:
List of triples: (filename, lineno, error message)
"""
# Make sure jsx files are compiled first, then we will lint the resulting
# js.
kake.make.build_many([('genfiles/compiled_jsx/en/%s.js' %
ka_root.relpath(f), {})
for f in files_to_lint if f.endswith('.jsx')])
files_to_lint = lintutil.filter(files_to_lint, suffix=('.js', '.jsx'))
for f in files_to_lint:
abs_f = f
f = ka_root.relpath(f)
# Exclude files that we don't need to translate: we don't care
# if those files are 'properly' marked up or not.
if intl.english_only.should_not_translate_file(f):
continue
if f.endswith(".jsx"):
abs_f = "%s/genfiles/compiled_jsx/en/%s.js" % (ka_root.root, f)
f = ka_root.relpath(abs_f)
file_contents = lintutil.file_contents(abs_f)
for error in _lint_js_content(abs_f, file_contents):
yield error
def lint_have_needed_babel_locales(files_to_lint):
"""Make sure we have all the locales we need, in third_party/babel.
third_party/babel/localedata comes with 664 languages, which is
great for coverage but bad for deploy time.
So to speed things up, I added to app.yaml's skip_files all
language files that aren't used by either a locale in all_ka_locales or
a YouTube locale.
This lint check makes sure that when we update those lists (or update the
babel subrepo), we upload any localedata languages that we need to.
"""
if (ka_root.join('intl', 'i18n.py') not in files_to_lint and
not any(f.startswith(intl.data.INTL_VIDEO_PLAYLISTS_DIR)
for f in files_to_lint) and
ka_root.join('third_party', 'babel-khansrc') not in files_to_lint):
return
config = modules_util.module_yaml('default')
# Take only the rules for third_party/babel/localedata, and strip
# off that prefix since we're starting the FileIterator in the
# localedata directory rather than ka-root.
# Note this depends on the babel rules starting with ^ and ending with $.
localedata_root = 'third_party/babel/localedata'
prefix = re.escape(r'(?:.*/webapp/)?')
babel_regexps = [s for s in re.findall(r'\^(?:%s)?([^$]*)\$' % prefix,
config['skip_files'].regex.pattern)
if s.startswith(localedata_root + '/')]
skip_files = [s.replace('%s/' % localedata_root, '^')
for s in babel_regexps]
skip_re = re.compile('|'.join('(?:%s)' % p for p in skip_files))
orig_level = logging.getLogger().level
try:
logging.getLogger().setLevel(logging.ERROR)
localedata_files = appcfg.FileIterator(ka_root.join(localedata_root),
skip_re, config['runtime'])
localedata_files = list(localedata_files)
finally:
logging.getLogger().setLevel(orig_level)
# Remove the '.dat' extension.
all_locales_for_babel = frozenset(os.path.splitext(f)[0]
for f in localedata_files)
needed_locales = intl.data.all_ka_locales(include_english=True)
babel_locales = set([b for b in [intl.locale.ka_locale_to_babel(l)
for l in needed_locales] if b])
for babel_locale in babel_locales:
# We need to check zh_Hans_CN.dat exists, but also zh_Hans.dat, etc.
for prefix in intl.locale.locale_prefixes(babel_locale):
# We need to convert from KA-style - to babel-style _.
prefix = prefix.replace('-', '_')
if prefix not in all_locales_for_babel:
yield ('skip_files.yaml', 1,
"We need babel locale info for %s but it's been added"
" to skip-files (need to whitelist it)." % prefix)
def lint_not_using_gettext_at_import_time(files_to_lint):
"""Make sure we don't use i18n._/etc in a static context.
If you have a global variable such as '_FOO = i18n._("bar")', at
the top of some .py file, it won't work the way you intend because
i18n._() needs to be called while handling a request in order to
know what language to translate to. (Instead, you'd need to do
_FOO = lambda: i18n._("bar")
or some such.)
This tests for this by mocking i18n._ et al., and then importing
everything (but running nothing). Any i18n._ calls that happen
during this import are problematic! We have to spawn a new
python process to make sure we do the importing properly (and
without messing with the currently running python environment!)
"""
candidate_files_to_lint = lintutil.filter(files_to_lint, suffix='.py')
files_to_lint = []
for filename in candidate_files_to_lint:
contents = lintutil.file_contents(filename)
# Check that it's plausible this file uses i18n._ or similar.
# This also avoids importing random third-party files that may
# have nasty side-effects at import time (all our code is too
# well-written to do that!)
if 'import intl' in contents or 'from intl' in contents:
files_to_lint.append(filename)
program = """\
import os # @Nolint(linter can't tell this is in a string!)
import sys # @Nolint(linter can't tell this is in a string!)
import traceback
import intl.request # @Nolint(seems unused to our linter but it's used)
_ROOT = "%s"
def add_lint_error(f):
# We assume code in 'intl' doesn't make this mistake, and thus
# the first stack-frame before we get into 'intl' is the
# offending code. ctx == '<string>' means the error occurred in
# this pseudo-script.
for (ctx, lineno, fn, line) in reversed(traceback.extract_stack()):
if os.path.isabs(ctx):
ctx = os.path.relpath(ctx, _ROOT)
if ctx != '<string>' and not ctx.startswith('intl/'):
if ctx == f:
print 'GETTEXT ERROR {} {}'.format(ctx, lineno)
break
return 'en' # a fake value for intl.request.ka_locale
""" % ka_root.root
if not files_to_lint:
return
for filename in files_to_lint:
modulename = ka_root.relpath(filename)
modulename = os.path.splitext(modulename)[0] # nix .py
modulename = modulename.replace('/', '.')
# Force a re-import.
program += 'sys.modules.pop("%s", None)\n' % modulename
program += ('intl.request.ka_locale = lambda: add_lint_error("%s")\n'
% ka_root.relpath(filename))
program += 'import %s\n' % modulename
p = subprocess.Popen(
['env', 'PYTHONPATH=%s' % ':'.join(sys.path),
sys.executable, '-c', program],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
lint_output = p.stdout.read()
for line in lint_output.splitlines():
if line.startswith('GETTEXT ERROR '):
line = line[len('GETTEXT ERROR '):]
(filename, linenum) = line.rsplit(' ', 1)
yield (ka_root.join(filename), int(linenum),
'Trying to translate at import-time, but '
'translation only works at runtime! '
'Use intl.i18n.mark_for_translation() instead.')
_SKIP_FILES_RE = None
def _does_not_upload(f):
"""True if f, relative to ka-root, is not uploaded to appengine."""
global _SKIP_FILES_RE
if not _SKIP_FILES_RE:
config = modules_util.module_yaml('default', for_production=True)
_SKIP_FILES_RE = config['skip_files'].regex
assert not os.path.isabs(f), f # or the while will never terminate
while f:
if _SKIP_FILES_RE.match(f):
return True
f = os.path.dirname(f) # see if we skip this whole directory
return False
def lint_strftime(files_to_lint):
"""Complain if you use strftime() instead of i18n.format_date()."""
_BAD_REGEXPS = (
# Javascript
r'toDateString\(\)',
# Jinja2 and python. These are all the modifiers that depend
# on the current locale (e.g. %B).
r'strftime\([\'\"][^\'\"]*%[aAbBcDhpPrxX+]',
# These are modifiers that are numbers, but used in contexts that
# indicate they're probably US-specific, e.g. '%d,', which means
# the current day-of-month followed by a comma, or day before
# month.
r'strftime\([\'\"][^\'\"]*(?:%d,|%d.%m)',
)
bad_re = re.compile('|'.join('(?:%s)' % b for b in _BAD_REGEXPS))
for f in files_to_lint:
relpath = ka_root.relpath(f)
# Ignore third_party code. Normally third_party code wouldn't wind up
# being linted in the first place because all of third_party is in
# webapp's lint_blacklist.txt, but for code that lives in third_party
# that has its own lint_blacklist.txt (e.g. live-editor), webapp's lint
# blacklist.txt gets overridden.
if relpath.startswith('third_party'):
continue
if intl.english_only.should_not_translate_file(relpath):
continue
# Ignore python files we're not uploading to appengine. (We
# can't use this rule with all files since js and html files
# aren't uploaded directly, but we still want to lint them.)
if f.endswith('.py') and _does_not_upload(ka_root.relpath(f)):
continue
badline = lintutil.line_number(f, bad_re, default=None)
if badline is not None:
yield (f, badline,
'Using U.S.-specific date formatting. '
'Use intl.i18n.format_date() and friends instead.') | en | 0.836265 | # TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes) # pep8-disable:E124,E127,E129,E271 Linters that warn about common problems with i18n markup. # This catches all the jinja2 function calls: it matches {{.*(.*}} # TODO(csilvers): handle " and ' so we can have }}'s inside them. # This catch i18n._() (or the obsolete $._()) being used inside of # jinja2. We should use {{ _js("..") }} when marking up text inside a # <script/> tag in jinja2 instead. # This captures a _() call when the input is a jinja2 function call. # The string is in group(1) and any |-modifiers are in group(2). # Any keyword arguments to _() are in group(3). # TODO(csilvers): modify group(3) so nested parens or parens in "..." # don't trip it up. Nested parens is hardest, so I didn't bother. # This captures a bad i18n_do_not_translate() call that is not a literal string # when the input is a jinja2 function call. The args to the # i18n_do_not_translate() function are in group(1) and may contain any number # of non-nested function calls. As this is just used for reporting and there # is not a good way to do nested parens this is good enough. # This captures a ngettext() call when the input is a jinja2 function call. # The two string are in group(1) and group(3), and their |-modifiers (if # any) are in group(2) and group(4). ngettext keyword args are in group(5). # This captures string arguments in the kwargs. # The string is in group(1) and any |-modifiers are in group(2). # These are the characters that require us to add |safe, because # otherwise they'd be html-escaped. True if 'string' has html chars in it, and post_string lacks |safe. # String includes the leading and trailing "'s, which aren't part # of the string proper. Find instances where we translate html but don't mark it |safe. We html-escape the output of {{ _("...") }} in html templates, which is safe but can cause problems when the text is {{ _("<b>hi</b>") }}. In that case, the user needs to do {{ _("<b>hi</b>")|safe }}. We detect instances where the user does {{ _("...<..") }} in jinja2 templates but lacks the |safe afterwards. Unless that line is marked with {{# @Nolint #}}, we flag it as a lint error. Returns: List of triples: (filename, lineno, error message) # Limit files-to-lint to html and txt files under templates/. # Make sure there's no @Nolint anywhere around this function. Find where we mark js within html with i18n._ instead of {{ _js() Returns: List of triples: (filename, lineno, error message) # Make sure there's no @Nolint anywhere around this function. Find where we mark html as not needing translation but its non-literal We require anything we mark with i18n_do_not_translate to be only a string literal. i18n_do_not_translate marks the string as "safe" (that is jinja2 won't autoescape it) so we need to be extra careful we don't create a potential XSS attack vector if we end up marking some variable as safe. Unless that line is marked with {{# @Nolint #}}, we flag it as a lint error. Returns: List of triples: (filename, lineno, error message) # Limit files-to-lint to files under templates/ that are html or txt. # Make sure there's no @Nolint anywhere around this function. # State machine when lexing uses of i18n._ in source code. # Keys are what token we're currently looking at, value is what tokens # should come next. Tokens can either be a type (tokenize.XXX) or a # literal value. Key of None indicates when to start parsing; value # of None terminates a successful parse. Newlines and comments are # always elided when looking at tokens. # comma introduces the keyword args for the gettext call. # Once we are at keywords, the function call is ok. # TODO(csilvers): for ngettext we want to stop after the second comma. Complain about uses of i18n._() on something other than a string. i18n._(variable) is dangerous -- you don't know if the variable has been translated or not. Sometimes it's ok, but usually it's a mistake, and a better solution is to pass in translated_variable instead. (The OK cases can be marked with @Nolint.) # not currently in a gettext context # linenum of current gettext call # any line of the i18n._ may have @nolint # Only need to lint python files, but not test files. # Don't lint *definitions* of these methods. # The state machine may transition on either the type # of the token, or its literal value. # We weren't in gettext before, and we're still not. # We're in gettext, but can't transition: we're bad. # Give ourselves *one more* chance for nolint. # BUT: If we last saw _ or ngettext and can't # transition because we're not a '(', that # means it's not a function call, so we can # ignore it. e.g. '(f, _) = file_and_line()'. # If we've happily ended a gettext call, clear the state. Complain about uses of i18n._() on something other than a string. i18n._(variable) is dangerous -- you don't know if the variable has been translated or not. # This regexp pattern captures a string, possibly concatenated with +'s. # set to non-None if there's a problem. # Then we're ok with this! *Unless* it's a template string # with $(...) inside it, then we're not ok. # Doing a real regexp to find the end of this function call # is tough, we just do something simple and pray. Verify that nltext in the input templates are marked for translation. All natural-language text in jinja2 and handlebars files should be marked for translation, using {{ _("...") }} or {{#_}}...{{/_}}. i18nize_templates.py is a tool that can do this for you automatically. We run this tool in 'check' mode to verify that every input file is already marked up appropriately. Since i18nize_templates isn't perfect (it thinks you need to translate text like '<NAME>' or 'x' when used on a 'close' button), you can use nolint-like functionality to tell this linter it's ok if some text is not marked up to be translated. Unlike other tests though, we do not use the @Nolint directive for this, but instead wrap the relevant text in {{ i18n_do_not_translate(...) }} or {{#i18nDoNotTranslate}}...{{/i18nDoNotTranslate}} # Add some ka-specific function we know do not have nltext arguments. # Exclude files that we don't need to translate: we don't care # if those files are 'properly' marked up or not. # jinja2 template # handlebars template #i18nDoNotTranslate}}...' Verify that nltext in the js content is marked for translation. All natural-language text in js files should be marked for translation using i18n._ or i18n.ngettext. It is very hard though to figure out if a string in js should be translated or not. So we check for strings that we know should be translated. For now this just checks to make sure that string arguments inside a function called Text() or React.Dom.* are marked for translation. filename should be an absolute path. Returns: List of triples: (filename, lineno, error message) Return true if string args should be translated. eg. React.DOM.strong({style:{color:"red"}}, "Test") The first argument is an object, but the second is an untranslated string that should be wrapped in i18n._() # Old versions of react have React.creatElement(). Newer ones # hae _react2.default.createElement(). # The first arg within CreateElement can be a string like "div" # all others must be translated # Collect any string that is an immediate child of Text - there # should not be any, it should be wrapped in i18n._ # Keep track of last full func name eg. React.DOM.div # This could also be variable, keyword, or something else, but # we will keep it around just in case it is followed by a ( Verify that nltext in the js files are marked for translation. See docstring of: _lint_js_content Returns: List of triples: (filename, lineno, error message) # Make sure jsx files are compiled first, then we will lint the resulting # js. # Exclude files that we don't need to translate: we don't care # if those files are 'properly' marked up or not. Make sure we have all the locales we need, in third_party/babel. third_party/babel/localedata comes with 664 languages, which is great for coverage but bad for deploy time. So to speed things up, I added to app.yaml's skip_files all language files that aren't used by either a locale in all_ka_locales or a YouTube locale. This lint check makes sure that when we update those lists (or update the babel subrepo), we upload any localedata languages that we need to. # Take only the rules for third_party/babel/localedata, and strip # off that prefix since we're starting the FileIterator in the # localedata directory rather than ka-root. # Note this depends on the babel rules starting with ^ and ending with $. # Remove the '.dat' extension. # We need to check zh_Hans_CN.dat exists, but also zh_Hans.dat, etc. # We need to convert from KA-style - to babel-style _. Make sure we don't use i18n._/etc in a static context. If you have a global variable such as '_FOO = i18n._("bar")', at the top of some .py file, it won't work the way you intend because i18n._() needs to be called while handling a request in order to know what language to translate to. (Instead, you'd need to do _FOO = lambda: i18n._("bar") or some such.) This tests for this by mocking i18n._ et al., and then importing everything (but running nothing). Any i18n._ calls that happen during this import are problematic! We have to spawn a new python process to make sure we do the importing properly (and without messing with the currently running python environment!) # Check that it's plausible this file uses i18n._ or similar. # This also avoids importing random third-party files that may # have nasty side-effects at import time (all our code is too # well-written to do that!) \ import os # @Nolint(linter can't tell this is in a string!) import sys # @Nolint(linter can't tell this is in a string!) import traceback import intl.request # @Nolint(seems unused to our linter but it's used) _ROOT = "%s" def add_lint_error(f): # We assume code in 'intl' doesn't make this mistake, and thus # the first stack-frame before we get into 'intl' is the # offending code. ctx == '<string>' means the error occurred in # this pseudo-script. for (ctx, lineno, fn, line) in reversed(traceback.extract_stack()): if os.path.isabs(ctx): ctx = os.path.relpath(ctx, _ROOT) if ctx != '<string>' and not ctx.startswith('intl/'): if ctx == f: print 'GETTEXT ERROR {} {}'.format(ctx, lineno) break return 'en' # a fake value for intl.request.ka_locale # nix .py # Force a re-import. True if f, relative to ka-root, is not uploaded to appengine. # or the while will never terminate # see if we skip this whole directory Complain if you use strftime() instead of i18n.format_date(). # Javascript # Jinja2 and python. These are all the modifiers that depend # on the current locale (e.g. %B). # These are modifiers that are numbers, but used in contexts that # indicate they're probably US-specific, e.g. '%d,', which means # the current day-of-month followed by a comma, or day before # month. # Ignore third_party code. Normally third_party code wouldn't wind up # being linted in the first place because all of third_party is in # webapp's lint_blacklist.txt, but for code that lives in third_party # that has its own lint_blacklist.txt (e.g. live-editor), webapp's lint # blacklist.txt gets overridden. # Ignore python files we're not uploading to appengine. (We # can't use this rule with all files since js and html files # aren't uploaded directly, but we still want to lint them.) | 2.033638 | 2 |
pdbutil/pdbutil.py | ShintaroMinami/pdbutil | 0 | 6615189 | """
The MIT License (MIT)
Copyright (c) 2020 <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import numpy as np
class ProteinBackbone:
"""
Simple class for handling protein backbone structure.
Attributes
----------
naa : int
Number of residues.
coord : numpy float matrix (naa, 6, 3)
3D Coordinates of 6 backbone atoms (N,CA,C,O,CB,H).
exists : numpy bool matrix (naa, 6)
Existence of the coodinates.
resname : numpy str vector (naa)
Residue name.
iaa2org : numpy str vector (naa)
Original chain ID and residue number.
org2iaa : dict
Convert original chain ID and residue number to system residue number.
dihedral : numpy float matrix (naa, 3)
Dihedral angles (phi, psi, omega).
distmat : numpy float matrix (naa, naa)
Distance matrix.
chainbreak : numpy bool vector (naa)
Existence of chainbreak
seglist : list of tuples (start_aa, end_aa)
List of continuous segments
"""
def __init__(self, length=0, file=None, copyfrom=None, extractfrom=None, calc_dihedral=True, check_chainbreak=True):
"""
Parameters
----------
file : str
Path to the PDB file.
copyfrom : instance of this class (ProteinBackbone).
Original instance to be copied.
length : int
Number of residues.
calc_dihedral : bool
with calculating dihedral angles
check_chainbreak : bool
with checking chainbreak
"""
self.atom2id = {'N':0, 'CA':1, 'C':2, 'O':3, 'CB':4, 'H':5, '1HA':6, '2HA':7}
self.id2atom = ['N', 'CA', 'C', 'O', 'CB', 'H', '1HA', '2HA']
self.param = {'angle_N_CA_C':np.deg2rad(111.2), 'angle_CA_C_N':np.deg2rad(116.2),
'angle_C_N_CA':np.deg2rad(121.7),
'angle_N_CA_CB':np.deg2rad(110.6), 'angle_CB_CA_C':np.deg2rad(110.6),
'angle_C_N_H':np.deg2rad(123.0), 'angle_N_C_O':np.deg2rad(122.7),
'angle_N_CA_1HA':np.deg2rad(109.5), 'angle_N_CA_2HA':np.deg2rad(109.5),
'dhdrl_C_N_CA_CB':np.deg2rad(-121.4), 'dhdrl_N_C_CA_CB':np.deg2rad(121.4),
'dhdrl_CA_C_N_H':np.deg2rad(0.0), 'dhdrl_CA_N_C_O':np.deg2rad(0.0),
'dhdrl_C_N_CA_1HA':np.deg2rad(121.4), 'dhdrl_C_N_CA_2HA':np.deg2rad(-121.4),
'length_CN':1.33, 'length_NCA':1.46, 'length_CAC':1.52,
'length_CC':1.54, 'length_CO':1.24, 'length_NH':1.01, 'length_CH':1.09}
self.chainbreak = []
self.seglist = []
if file is not None:
self.file = file
self.readpdb(self.file)
if calc_dihedral == True: self.calc_dihedral()
if check_chainbreak == True: self.seglist = self.check_chainbreak()
elif copyfrom is not None:
self.naa = copyfrom.naa
self.coord = copyfrom.coord.copy()
self.exists = copyfrom.exists.copy()
self.resname = copyfrom.resname.copy()
self.iaa2org = copyfrom.iaa2org.copy()
self.dihedral = copyfrom.dihedral.copy()
self.chainbreak = copyfrom.chainbreak.copy()
self.seglist = copyfrom.seglist.copy()
elif extractfrom is not None:
original, start, goal = extractfrom
self.naa = goal - start + 1
self.coord = original.coord[start:goal+1].copy()
self.exists = original.exists[start:goal+1].copy()
self.resname = original.resname[start:goal+1].copy()
self.iaa2org = original.iaa2org[start:goal+1].copy()
self.dihedral = original.dihedral[start:goal+1].copy()
self.seglist = self.check_chainbreak()
elif length >= 0:
self.naa = length
self.coord = np.zeros((self.naa, len(self.atom2id), 3), dtype=np.float)
self.exists = np.ones((self.naa, len(self.atom2id)), dtype=np.bool)
self.exists[:,self.atom2id['CB']] = False
self.exists[:,self.atom2id['H']] = False
self.exists[:,self.atom2id['1HA']] = False
self.exists[:,self.atom2id['2HA']] = False
self.resname = ['NON']*self.naa
self.iaa2org = ['A0000']*self.naa
self.dihedral = np.zeros((self.naa, 3), dtype=np.float)
def __getitem__(self, ids):
return self.coord[ids]
def __setitem__(self, ids, val):
self.coord[ids] = val
def __len__(self):
return self.naa
## delete residues ##
def delete(self, position, length):
naa_org = self.naa
coord_org = self.coord
exists_org = self.exists
resname_org = self.resname
iaa2org_org = self.iaa2org
self.naa = self.naa - length
self.coord = np.zeros((self.naa, len(self.atom2id), 3), dtype=np.float)
self.exists = np.zeros((self.naa, len(self.atom2id)), dtype=np.bool)
self.resname = ['NAN']*self.naa
self.iaa2org = ['A0000']*self.naa
iaa_new = 0
for iaa in range(naa_org):
if position <= iaa < position+length: continue
self.coord[iaa_new] = coord_org[iaa]
self.exists[iaa_new] = exists_org[iaa]
self.resname[iaa_new] = resname_org[iaa]
self.iaa2org[iaa_new] = iaa2org_org[iaa]
iaa_new += 1
self.calc_dihedral()
self.seglist = self.check_chainbreak()
## insert blank residues ##
def insert_blank(self, position, length, chain='A', resname='INS', calc_dihedral=True, check_chainbreak=True):
naa_org = self.naa
coord_org = self.coord
exists_org = self.exists
resname_org = self.resname
iaa2org_org = self.iaa2org
self.naa = self.naa + length
self.coord = np.zeros((self.naa, len(self.atom2id), 3), dtype=np.float)
self.exists = np.ones((self.naa, len(self.atom2id)), dtype=np.bool)
self.exists[:,self.atom2id['CB']] = False
self.exists[:,self.atom2id['H']] = False
self.exists[:,self.atom2id['1HA']] = False
self.exists[:,self.atom2id['2HA']] = False
self.resname = [resname]*self.naa
self.iaa2org = [chain+'0000']*self.naa
iaa_new = 0
for iaa in range(naa_org):
if iaa == position:
for i in range(length):
iaa_new += 1
self.coord[iaa_new] = coord_org[iaa]
self.exists[iaa_new] = exists_org[iaa]
self.resname[iaa_new] = resname_org[iaa]
self.iaa2org[iaa_new] = iaa2org_org[iaa]
iaa_new += 1
if calc_dihedral==True: self.calc_dihedral()
if check_chainbreak==True: self.seglist = self.check_chainbreak()
## insert fragment ##
def insert(self, position, insertion):
length = len(insertion)
self.insert_blank(position, length, calc_dihedral=False, check_chainbreak=False)
self.coord[position:position+length] = insertion.coord
self.exists[position:position+length] = insertion.exists
self.resname[position:position+length] = insertion.resname
self.iaa2org[position:position+length] = insertion.iaa2org
self.calc_dihedral()
self.seglist = []
self.chainbreak = []
self.seglist = self.check_chainbreak()
## add virtual O atoms ##
def addO(self, force=False):
for iaa in range(len(self.coord)-1):
if ((self.exists[iaa][self.atom2id['O']] == True) and (force==False)): continue
co = _zmat2xyz(self.param['length_CO'],
self.param['angle_N_C_O'],
self.param['dhdrl_CA_N_C_O'],
self.coord[iaa+1][self.atom2id['CA']],
self.coord[iaa+1][self.atom2id['N']],
self.coord[iaa][self.atom2id['C']])
self.coord[iaa][self.atom2id['O']][0] = co[0]
self.coord[iaa][self.atom2id['O']][1] = co[1]
self.coord[iaa][self.atom2id['O']][2] = co[2]
self.exists[iaa][self.atom2id['O']] = True
## add virtual CB atoms ##
def addCB(self, force=False):
for iaa in range(len(self.coord)):
if ((self.exists[iaa][self.atom2id['CB']] == True) and (force==False)): continue
cb1 = _zmat2xyz(self.param['length_CC'],
self.param['angle_N_CA_CB'],
self.param['dhdrl_C_N_CA_CB'],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']])
cb2 = _zmat2xyz(self.param['length_CC'],
self.param['angle_CB_CA_C'],
self.param['dhdrl_N_C_CA_CB'],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa][self.atom2id['CA']])
cb = (cb1 + cb2)/2.0
self.coord[iaa][self.atom2id['CB']][0] = cb[0]
self.coord[iaa][self.atom2id['CB']][1] = cb[1]
self.coord[iaa][self.atom2id['CB']][2] = cb[2]
self.exists[iaa][self.atom2id['CB']] = True
## add vitual H atoms ##
def addH(self, force=False):
for iaa in range(1,len(self.coord)):
if ((self.exists[iaa][self.atom2id['H']] == True) and (force==False)): continue
nh = _zmat2xyz(self.param['length_NH'],
self.param['angle_C_N_H'],
self.param['dhdrl_CA_C_N_H'],
self.coord[iaa-1][self.atom2id['CA']],
self.coord[iaa-1][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']])
self.coord[iaa][self.atom2id['H']][0] = nh[0]
self.coord[iaa][self.atom2id['H']][1] = nh[1]
self.coord[iaa][self.atom2id['H']][2] = nh[2]
self.exists[iaa][self.atom2id['H']] = True
def addHA(self, force=False):
for iaa in range(len(self.coord)):
if ((self.exists[iaa][self.atom2id['1HA']] == False) or (force==True)):
ha1 = _zmat2xyz(self.param['length_CH'],
self.param['angle_N_CA_1HA'],
self.param['dhdrl_C_N_CA_1HA'],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']])
self.coord[iaa][self.atom2id['1HA']][0] = ha1[0]
self.coord[iaa][self.atom2id['1HA']][1] = ha1[1]
self.coord[iaa][self.atom2id['1HA']][2] = ha1[2]
self.exists[iaa][self.atom2id['1HA']] = True
if ((self.exists[iaa][self.atom2id['2HA']] == False) or (force==True)):
ha2 = _zmat2xyz(self.param['length_CH'],
self.param['angle_N_CA_2HA'],
self.param['dhdrl_C_N_CA_2HA'],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']])
self.coord[iaa][self.atom2id['2HA']][0] = ha2[0]
self.coord[iaa][self.atom2id['2HA']][1] = ha2[1]
self.coord[iaa][self.atom2id['2HA']][2] = ha2[2]
self.exists[iaa][self.atom2id['2HA']] = True
## check chain break ##
def check_chainbreak(self):
self.chainbreak = [False] * self.naa
self.seglist = []
ini = 0
for iaa in range(self.naa):
cn = np.sqrt(((self.coord[iaa-1][self.atom2id['C']] - self.coord[iaa][self.atom2id['N']])**2).sum()) if iaa!=0 else self.param['length_CN']
nca = np.sqrt(((self.coord[iaa][self.atom2id['N']] - self.coord[iaa][self.atom2id['CA']])**2).sum())
cac = np.sqrt(((self.coord[iaa][self.atom2id['CA']] - self.coord[iaa][self.atom2id['C']])**2).sum())
(break_cn, break_nca, break_cac) = (False, False, False)
if cn < self.param['length_CN']/1.25 or self.param['length_CN']*1.25 < cn: (self.chainbreak[iaa], break_cn) = (True, True)
if nca < self.param['length_NCA']/1.25 or self.param['length_NCA']*1.25 < nca: (self.chainbreak[iaa], break_nca) = (True, True)
if cac < self.param['length_CAC']/1.25 or self.param['length_CAC']*1.25 < cac: (self.chainbreak[iaa], break_cac) = (True, True)
if self.chainbreak[iaa] == True:
if ini < iaa:
self.seglist.append((ini, iaa-1))
ini = iaa if (break_cn, break_nca, break_cac) == (True, False, False) else iaa+1
if ini < iaa: self.seglist.append((ini, iaa))
return self.seglist
## calc dihedral angle ##
def calc_dihedral(self):
self.dihedral = np.zeros((self.naa, 3), dtype=np.float)
for iaa in range(self.naa):
if (iaa > 0) and (self.exists[iaa-1][self.atom2id['C']] == True):
self.dihedral[iaa][0] = _xyz2dihedral(self.coord[iaa-1][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']],
self.coord[iaa][self.atom2id['C']])
if (iaa < self.naa-1) and (self.exists[iaa+1][self.atom2id['N']] == True):
self.dihedral[iaa][1] = _xyz2dihedral(self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa+1][self.atom2id['N']])
if (iaa < self.naa-1) and (self.exists[iaa+1][self.atom2id['CA']] == True):
self.dihedral[iaa][2] = _xyz2dihedral(self.coord[iaa][self.atom2id['CA']],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa+1][self.atom2id['N']],
self.coord[iaa+1][self.atom2id['CA']])
## distance matrix ##
def calc_distmat(self, atomtype='CA'):
points = self.coord[:,self.atom2id[atomtype],:]
self.distmat = np.sqrt( np.sum((points[np.newaxis,:,:] - points[:,np.newaxis,:])**2, axis=2) )
## get nearest N residues ##
def get_nearestN(self, N, atomtype='CA', distmat=True, rm_self=True):
if distmat:
self.calc_distmat(atomtype=atomtype)
if rm_self:
N = N+1
args_topN_unsorted = np.argpartition(self.distmat, N)[:,:N]
args_topN_sorted = np.ndarray((self.distmat.shape[0], N), dtype=np.int)
for i in range(self.distmat.shape[0]):
vals = self.distmat[i][args_topN_unsorted[i]]
indices = np.argsort(vals)
args_topN_sorted[i] = args_topN_unsorted[i][indices]
if rm_self:
args_topN_sorted = args_topN_sorted[:,1:]
return args_topN_sorted
## print pdb format ##
def printpdb(self, file=sys.stdout, chain=None, start=None, region=None):
icount = 0
if region is not None:
outrange = range(region[0], region[1]+1)
else:
outrange = range(len(self.coord))
for iaa in outrange:
if chain is None:
chain = self.iaa2org[iaa][0:1]
if start is None:
resnum = int(self.iaa2org[iaa][1:5])
else:
resnum = int(start) + iaa - outrange[0]
for iatom in range(len(self.id2atom)):
if(self.exists[iaa][iatom] == False): continue
icount += 1
file.write("ATOM%7d %-3s %3s %s%4d %8.3f%8.3f%8.3f %4.2f%6.2f\n"
% (icount, self.id2atom[iatom], self.resname[iaa],
chain, resnum,
self.coord[iaa][iatom][0],
self.coord[iaa][iatom][1],
self.coord[iaa][iatom][2],
1.0, 100.0))
## read pdb file ##
def readpdb(self, file):
with open(file, "r") as fh:
lines = fh.read().splitlines()
# exists protein length
self.naa = 0
self.org2iaa = {}
for l in lines:
(header, atomtype, resname, chain, iaa_org) = (l[0:4], l[12:16].strip(), l[17:20], l[21:22], l[22:27])
if not ((header == "ATOM") and (atomtype == 'CA')) : continue
self.org2iaa[(chain+iaa_org)] = self.naa
self.naa += 1
# read ATOM lines
self.coord = np.zeros((self.naa, len(self.atom2id), 3), dtype=np.float)
self.exists = np.zeros((self.naa, len(self.atom2id)), dtype=np.bool)
self.resname = ['NAN']*self.naa
self.iaa2org = ['A0000 ']*self.naa
for l in lines:
(header, atomtype, resname, chain, iaa_org) = (l[0:4], l[12:16].strip(), l[17:20], l[21:22], l[22:27])
if not (header == "ATOM"): continue
if atomtype not in self.atom2id: continue
org = (chain+iaa_org)
iaa = self.org2iaa.get(org)
if iaa is None: continue
id_atom = self.atom2id[atomtype]
coord = [np.float(l[30:38]), np.float(l[38:46]), np.float(l[46:54])]
self.coord[iaa][id_atom][0] = coord[0]
self.coord[iaa][id_atom][1] = coord[1]
self.coord[iaa][id_atom][2] = coord[2]
self.exists[iaa][id_atom] = True
self.resname[iaa] = resname
self.iaa2org[iaa] = org
return
#### Functions ####
def _zmat2xyz(bond, angle, dihedral, one, two , three):
oldvec = np.ones(4, dtype=np.float)
oldvec[0] = bond * np.sin(angle) * np.sin(dihedral)
oldvec[1] = bond * np.sin(angle) * np.cos(dihedral)
oldvec[2] = bond * np.cos(angle)
mat = _viewat(three, two, one)
newvec = np.dot(mat, oldvec)
# return
return newvec
def _viewat(p1, p2, p3):
# vector #
p12 = p2 - p1
p13 = p3 - p1
# normalize #
z = p12 / np.linalg.norm(p12)
# crossproduct #
x = np.cross(p13, p12)
x /= np.linalg.norm(x)
y = np.cross(z, x)
y /= np.linalg.norm(y)
# transpation matrix
mat = np.zeros((4, 4), dtype=np.float)
for i in range(3):
mat[i][0] = x[i]
mat[i][1] = y[i]
mat[i][2] = z[i]
mat[i][3] = p1[i]
mat[3][3] = 1.0
# return
return mat
def _xyz2dihedral(p1, p2, p3, p4):
# small val #
eps = 0.0000001
# bond vector
v1 = p2 - p1
v2 = p3 - p2
v3 = p4 - p3
# perpendicular vector #
perp123 = np.cross(v1, v2)
perp234 = np.cross(v2, v3)
perp123 /= np.linalg.norm(perp123)
perp234 /= np.linalg.norm(perp234)
# scalar product #
scp = np.dot(perp123, perp234)
scp = scp - eps if scp > 1 else scp
scp = scp + eps if scp < -1 else scp
# absolute angle #
angle = np.rad2deg( np.arccos(scp) )
# return #
return angle if np.dot(v1, perp234) > 0 else -angle
| """
The MIT License (MIT)
Copyright (c) 2020 <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import numpy as np
class ProteinBackbone:
"""
Simple class for handling protein backbone structure.
Attributes
----------
naa : int
Number of residues.
coord : numpy float matrix (naa, 6, 3)
3D Coordinates of 6 backbone atoms (N,CA,C,O,CB,H).
exists : numpy bool matrix (naa, 6)
Existence of the coodinates.
resname : numpy str vector (naa)
Residue name.
iaa2org : numpy str vector (naa)
Original chain ID and residue number.
org2iaa : dict
Convert original chain ID and residue number to system residue number.
dihedral : numpy float matrix (naa, 3)
Dihedral angles (phi, psi, omega).
distmat : numpy float matrix (naa, naa)
Distance matrix.
chainbreak : numpy bool vector (naa)
Existence of chainbreak
seglist : list of tuples (start_aa, end_aa)
List of continuous segments
"""
def __init__(self, length=0, file=None, copyfrom=None, extractfrom=None, calc_dihedral=True, check_chainbreak=True):
"""
Parameters
----------
file : str
Path to the PDB file.
copyfrom : instance of this class (ProteinBackbone).
Original instance to be copied.
length : int
Number of residues.
calc_dihedral : bool
with calculating dihedral angles
check_chainbreak : bool
with checking chainbreak
"""
self.atom2id = {'N':0, 'CA':1, 'C':2, 'O':3, 'CB':4, 'H':5, '1HA':6, '2HA':7}
self.id2atom = ['N', 'CA', 'C', 'O', 'CB', 'H', '1HA', '2HA']
self.param = {'angle_N_CA_C':np.deg2rad(111.2), 'angle_CA_C_N':np.deg2rad(116.2),
'angle_C_N_CA':np.deg2rad(121.7),
'angle_N_CA_CB':np.deg2rad(110.6), 'angle_CB_CA_C':np.deg2rad(110.6),
'angle_C_N_H':np.deg2rad(123.0), 'angle_N_C_O':np.deg2rad(122.7),
'angle_N_CA_1HA':np.deg2rad(109.5), 'angle_N_CA_2HA':np.deg2rad(109.5),
'dhdrl_C_N_CA_CB':np.deg2rad(-121.4), 'dhdrl_N_C_CA_CB':np.deg2rad(121.4),
'dhdrl_CA_C_N_H':np.deg2rad(0.0), 'dhdrl_CA_N_C_O':np.deg2rad(0.0),
'dhdrl_C_N_CA_1HA':np.deg2rad(121.4), 'dhdrl_C_N_CA_2HA':np.deg2rad(-121.4),
'length_CN':1.33, 'length_NCA':1.46, 'length_CAC':1.52,
'length_CC':1.54, 'length_CO':1.24, 'length_NH':1.01, 'length_CH':1.09}
self.chainbreak = []
self.seglist = []
if file is not None:
self.file = file
self.readpdb(self.file)
if calc_dihedral == True: self.calc_dihedral()
if check_chainbreak == True: self.seglist = self.check_chainbreak()
elif copyfrom is not None:
self.naa = copyfrom.naa
self.coord = copyfrom.coord.copy()
self.exists = copyfrom.exists.copy()
self.resname = copyfrom.resname.copy()
self.iaa2org = copyfrom.iaa2org.copy()
self.dihedral = copyfrom.dihedral.copy()
self.chainbreak = copyfrom.chainbreak.copy()
self.seglist = copyfrom.seglist.copy()
elif extractfrom is not None:
original, start, goal = extractfrom
self.naa = goal - start + 1
self.coord = original.coord[start:goal+1].copy()
self.exists = original.exists[start:goal+1].copy()
self.resname = original.resname[start:goal+1].copy()
self.iaa2org = original.iaa2org[start:goal+1].copy()
self.dihedral = original.dihedral[start:goal+1].copy()
self.seglist = self.check_chainbreak()
elif length >= 0:
self.naa = length
self.coord = np.zeros((self.naa, len(self.atom2id), 3), dtype=np.float)
self.exists = np.ones((self.naa, len(self.atom2id)), dtype=np.bool)
self.exists[:,self.atom2id['CB']] = False
self.exists[:,self.atom2id['H']] = False
self.exists[:,self.atom2id['1HA']] = False
self.exists[:,self.atom2id['2HA']] = False
self.resname = ['NON']*self.naa
self.iaa2org = ['A0000']*self.naa
self.dihedral = np.zeros((self.naa, 3), dtype=np.float)
def __getitem__(self, ids):
return self.coord[ids]
def __setitem__(self, ids, val):
self.coord[ids] = val
def __len__(self):
return self.naa
## delete residues ##
def delete(self, position, length):
naa_org = self.naa
coord_org = self.coord
exists_org = self.exists
resname_org = self.resname
iaa2org_org = self.iaa2org
self.naa = self.naa - length
self.coord = np.zeros((self.naa, len(self.atom2id), 3), dtype=np.float)
self.exists = np.zeros((self.naa, len(self.atom2id)), dtype=np.bool)
self.resname = ['NAN']*self.naa
self.iaa2org = ['A0000']*self.naa
iaa_new = 0
for iaa in range(naa_org):
if position <= iaa < position+length: continue
self.coord[iaa_new] = coord_org[iaa]
self.exists[iaa_new] = exists_org[iaa]
self.resname[iaa_new] = resname_org[iaa]
self.iaa2org[iaa_new] = iaa2org_org[iaa]
iaa_new += 1
self.calc_dihedral()
self.seglist = self.check_chainbreak()
## insert blank residues ##
def insert_blank(self, position, length, chain='A', resname='INS', calc_dihedral=True, check_chainbreak=True):
naa_org = self.naa
coord_org = self.coord
exists_org = self.exists
resname_org = self.resname
iaa2org_org = self.iaa2org
self.naa = self.naa + length
self.coord = np.zeros((self.naa, len(self.atom2id), 3), dtype=np.float)
self.exists = np.ones((self.naa, len(self.atom2id)), dtype=np.bool)
self.exists[:,self.atom2id['CB']] = False
self.exists[:,self.atom2id['H']] = False
self.exists[:,self.atom2id['1HA']] = False
self.exists[:,self.atom2id['2HA']] = False
self.resname = [resname]*self.naa
self.iaa2org = [chain+'0000']*self.naa
iaa_new = 0
for iaa in range(naa_org):
if iaa == position:
for i in range(length):
iaa_new += 1
self.coord[iaa_new] = coord_org[iaa]
self.exists[iaa_new] = exists_org[iaa]
self.resname[iaa_new] = resname_org[iaa]
self.iaa2org[iaa_new] = iaa2org_org[iaa]
iaa_new += 1
if calc_dihedral==True: self.calc_dihedral()
if check_chainbreak==True: self.seglist = self.check_chainbreak()
## insert fragment ##
def insert(self, position, insertion):
length = len(insertion)
self.insert_blank(position, length, calc_dihedral=False, check_chainbreak=False)
self.coord[position:position+length] = insertion.coord
self.exists[position:position+length] = insertion.exists
self.resname[position:position+length] = insertion.resname
self.iaa2org[position:position+length] = insertion.iaa2org
self.calc_dihedral()
self.seglist = []
self.chainbreak = []
self.seglist = self.check_chainbreak()
## add virtual O atoms ##
def addO(self, force=False):
for iaa in range(len(self.coord)-1):
if ((self.exists[iaa][self.atom2id['O']] == True) and (force==False)): continue
co = _zmat2xyz(self.param['length_CO'],
self.param['angle_N_C_O'],
self.param['dhdrl_CA_N_C_O'],
self.coord[iaa+1][self.atom2id['CA']],
self.coord[iaa+1][self.atom2id['N']],
self.coord[iaa][self.atom2id['C']])
self.coord[iaa][self.atom2id['O']][0] = co[0]
self.coord[iaa][self.atom2id['O']][1] = co[1]
self.coord[iaa][self.atom2id['O']][2] = co[2]
self.exists[iaa][self.atom2id['O']] = True
## add virtual CB atoms ##
def addCB(self, force=False):
for iaa in range(len(self.coord)):
if ((self.exists[iaa][self.atom2id['CB']] == True) and (force==False)): continue
cb1 = _zmat2xyz(self.param['length_CC'],
self.param['angle_N_CA_CB'],
self.param['dhdrl_C_N_CA_CB'],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']])
cb2 = _zmat2xyz(self.param['length_CC'],
self.param['angle_CB_CA_C'],
self.param['dhdrl_N_C_CA_CB'],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa][self.atom2id['CA']])
cb = (cb1 + cb2)/2.0
self.coord[iaa][self.atom2id['CB']][0] = cb[0]
self.coord[iaa][self.atom2id['CB']][1] = cb[1]
self.coord[iaa][self.atom2id['CB']][2] = cb[2]
self.exists[iaa][self.atom2id['CB']] = True
## add vitual H atoms ##
def addH(self, force=False):
for iaa in range(1,len(self.coord)):
if ((self.exists[iaa][self.atom2id['H']] == True) and (force==False)): continue
nh = _zmat2xyz(self.param['length_NH'],
self.param['angle_C_N_H'],
self.param['dhdrl_CA_C_N_H'],
self.coord[iaa-1][self.atom2id['CA']],
self.coord[iaa-1][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']])
self.coord[iaa][self.atom2id['H']][0] = nh[0]
self.coord[iaa][self.atom2id['H']][1] = nh[1]
self.coord[iaa][self.atom2id['H']][2] = nh[2]
self.exists[iaa][self.atom2id['H']] = True
def addHA(self, force=False):
for iaa in range(len(self.coord)):
if ((self.exists[iaa][self.atom2id['1HA']] == False) or (force==True)):
ha1 = _zmat2xyz(self.param['length_CH'],
self.param['angle_N_CA_1HA'],
self.param['dhdrl_C_N_CA_1HA'],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']])
self.coord[iaa][self.atom2id['1HA']][0] = ha1[0]
self.coord[iaa][self.atom2id['1HA']][1] = ha1[1]
self.coord[iaa][self.atom2id['1HA']][2] = ha1[2]
self.exists[iaa][self.atom2id['1HA']] = True
if ((self.exists[iaa][self.atom2id['2HA']] == False) or (force==True)):
ha2 = _zmat2xyz(self.param['length_CH'],
self.param['angle_N_CA_2HA'],
self.param['dhdrl_C_N_CA_2HA'],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']])
self.coord[iaa][self.atom2id['2HA']][0] = ha2[0]
self.coord[iaa][self.atom2id['2HA']][1] = ha2[1]
self.coord[iaa][self.atom2id['2HA']][2] = ha2[2]
self.exists[iaa][self.atom2id['2HA']] = True
## check chain break ##
def check_chainbreak(self):
self.chainbreak = [False] * self.naa
self.seglist = []
ini = 0
for iaa in range(self.naa):
cn = np.sqrt(((self.coord[iaa-1][self.atom2id['C']] - self.coord[iaa][self.atom2id['N']])**2).sum()) if iaa!=0 else self.param['length_CN']
nca = np.sqrt(((self.coord[iaa][self.atom2id['N']] - self.coord[iaa][self.atom2id['CA']])**2).sum())
cac = np.sqrt(((self.coord[iaa][self.atom2id['CA']] - self.coord[iaa][self.atom2id['C']])**2).sum())
(break_cn, break_nca, break_cac) = (False, False, False)
if cn < self.param['length_CN']/1.25 or self.param['length_CN']*1.25 < cn: (self.chainbreak[iaa], break_cn) = (True, True)
if nca < self.param['length_NCA']/1.25 or self.param['length_NCA']*1.25 < nca: (self.chainbreak[iaa], break_nca) = (True, True)
if cac < self.param['length_CAC']/1.25 or self.param['length_CAC']*1.25 < cac: (self.chainbreak[iaa], break_cac) = (True, True)
if self.chainbreak[iaa] == True:
if ini < iaa:
self.seglist.append((ini, iaa-1))
ini = iaa if (break_cn, break_nca, break_cac) == (True, False, False) else iaa+1
if ini < iaa: self.seglist.append((ini, iaa))
return self.seglist
## calc dihedral angle ##
def calc_dihedral(self):
self.dihedral = np.zeros((self.naa, 3), dtype=np.float)
for iaa in range(self.naa):
if (iaa > 0) and (self.exists[iaa-1][self.atom2id['C']] == True):
self.dihedral[iaa][0] = _xyz2dihedral(self.coord[iaa-1][self.atom2id['C']],
self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']],
self.coord[iaa][self.atom2id['C']])
if (iaa < self.naa-1) and (self.exists[iaa+1][self.atom2id['N']] == True):
self.dihedral[iaa][1] = _xyz2dihedral(self.coord[iaa][self.atom2id['N']],
self.coord[iaa][self.atom2id['CA']],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa+1][self.atom2id['N']])
if (iaa < self.naa-1) and (self.exists[iaa+1][self.atom2id['CA']] == True):
self.dihedral[iaa][2] = _xyz2dihedral(self.coord[iaa][self.atom2id['CA']],
self.coord[iaa][self.atom2id['C']],
self.coord[iaa+1][self.atom2id['N']],
self.coord[iaa+1][self.atom2id['CA']])
## distance matrix ##
def calc_distmat(self, atomtype='CA'):
points = self.coord[:,self.atom2id[atomtype],:]
self.distmat = np.sqrt( np.sum((points[np.newaxis,:,:] - points[:,np.newaxis,:])**2, axis=2) )
## get nearest N residues ##
def get_nearestN(self, N, atomtype='CA', distmat=True, rm_self=True):
if distmat:
self.calc_distmat(atomtype=atomtype)
if rm_self:
N = N+1
args_topN_unsorted = np.argpartition(self.distmat, N)[:,:N]
args_topN_sorted = np.ndarray((self.distmat.shape[0], N), dtype=np.int)
for i in range(self.distmat.shape[0]):
vals = self.distmat[i][args_topN_unsorted[i]]
indices = np.argsort(vals)
args_topN_sorted[i] = args_topN_unsorted[i][indices]
if rm_self:
args_topN_sorted = args_topN_sorted[:,1:]
return args_topN_sorted
## print pdb format ##
def printpdb(self, file=sys.stdout, chain=None, start=None, region=None):
icount = 0
if region is not None:
outrange = range(region[0], region[1]+1)
else:
outrange = range(len(self.coord))
for iaa in outrange:
if chain is None:
chain = self.iaa2org[iaa][0:1]
if start is None:
resnum = int(self.iaa2org[iaa][1:5])
else:
resnum = int(start) + iaa - outrange[0]
for iatom in range(len(self.id2atom)):
if(self.exists[iaa][iatom] == False): continue
icount += 1
file.write("ATOM%7d %-3s %3s %s%4d %8.3f%8.3f%8.3f %4.2f%6.2f\n"
% (icount, self.id2atom[iatom], self.resname[iaa],
chain, resnum,
self.coord[iaa][iatom][0],
self.coord[iaa][iatom][1],
self.coord[iaa][iatom][2],
1.0, 100.0))
## read pdb file ##
def readpdb(self, file):
with open(file, "r") as fh:
lines = fh.read().splitlines()
# exists protein length
self.naa = 0
self.org2iaa = {}
for l in lines:
(header, atomtype, resname, chain, iaa_org) = (l[0:4], l[12:16].strip(), l[17:20], l[21:22], l[22:27])
if not ((header == "ATOM") and (atomtype == 'CA')) : continue
self.org2iaa[(chain+iaa_org)] = self.naa
self.naa += 1
# read ATOM lines
self.coord = np.zeros((self.naa, len(self.atom2id), 3), dtype=np.float)
self.exists = np.zeros((self.naa, len(self.atom2id)), dtype=np.bool)
self.resname = ['NAN']*self.naa
self.iaa2org = ['A0000 ']*self.naa
for l in lines:
(header, atomtype, resname, chain, iaa_org) = (l[0:4], l[12:16].strip(), l[17:20], l[21:22], l[22:27])
if not (header == "ATOM"): continue
if atomtype not in self.atom2id: continue
org = (chain+iaa_org)
iaa = self.org2iaa.get(org)
if iaa is None: continue
id_atom = self.atom2id[atomtype]
coord = [np.float(l[30:38]), np.float(l[38:46]), np.float(l[46:54])]
self.coord[iaa][id_atom][0] = coord[0]
self.coord[iaa][id_atom][1] = coord[1]
self.coord[iaa][id_atom][2] = coord[2]
self.exists[iaa][id_atom] = True
self.resname[iaa] = resname
self.iaa2org[iaa] = org
return
#### Functions ####
def _zmat2xyz(bond, angle, dihedral, one, two , three):
oldvec = np.ones(4, dtype=np.float)
oldvec[0] = bond * np.sin(angle) * np.sin(dihedral)
oldvec[1] = bond * np.sin(angle) * np.cos(dihedral)
oldvec[2] = bond * np.cos(angle)
mat = _viewat(three, two, one)
newvec = np.dot(mat, oldvec)
# return
return newvec
def _viewat(p1, p2, p3):
# vector #
p12 = p2 - p1
p13 = p3 - p1
# normalize #
z = p12 / np.linalg.norm(p12)
# crossproduct #
x = np.cross(p13, p12)
x /= np.linalg.norm(x)
y = np.cross(z, x)
y /= np.linalg.norm(y)
# transpation matrix
mat = np.zeros((4, 4), dtype=np.float)
for i in range(3):
mat[i][0] = x[i]
mat[i][1] = y[i]
mat[i][2] = z[i]
mat[i][3] = p1[i]
mat[3][3] = 1.0
# return
return mat
def _xyz2dihedral(p1, p2, p3, p4):
# small val #
eps = 0.0000001
# bond vector
v1 = p2 - p1
v2 = p3 - p2
v3 = p4 - p3
# perpendicular vector #
perp123 = np.cross(v1, v2)
perp234 = np.cross(v2, v3)
perp123 /= np.linalg.norm(perp123)
perp234 /= np.linalg.norm(perp234)
# scalar product #
scp = np.dot(perp123, perp234)
scp = scp - eps if scp > 1 else scp
scp = scp + eps if scp < -1 else scp
# absolute angle #
angle = np.rad2deg( np.arccos(scp) )
# return #
return angle if np.dot(v1, perp234) > 0 else -angle
| en | 0.560323 | The MIT License (MIT) Copyright (c) 2020 <NAME>. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Simple class for handling protein backbone structure. Attributes ---------- naa : int Number of residues. coord : numpy float matrix (naa, 6, 3) 3D Coordinates of 6 backbone atoms (N,CA,C,O,CB,H). exists : numpy bool matrix (naa, 6) Existence of the coodinates. resname : numpy str vector (naa) Residue name. iaa2org : numpy str vector (naa) Original chain ID and residue number. org2iaa : dict Convert original chain ID and residue number to system residue number. dihedral : numpy float matrix (naa, 3) Dihedral angles (phi, psi, omega). distmat : numpy float matrix (naa, naa) Distance matrix. chainbreak : numpy bool vector (naa) Existence of chainbreak seglist : list of tuples (start_aa, end_aa) List of continuous segments Parameters ---------- file : str Path to the PDB file. copyfrom : instance of this class (ProteinBackbone). Original instance to be copied. length : int Number of residues. calc_dihedral : bool with calculating dihedral angles check_chainbreak : bool with checking chainbreak ## delete residues ## ## insert blank residues ## ## insert fragment ## ## add virtual O atoms ## ## add virtual CB atoms ## ## add vitual H atoms ## ## check chain break ## ## calc dihedral angle ## ## distance matrix ## ## get nearest N residues ## ## print pdb format ## ## read pdb file ## # exists protein length # read ATOM lines #### Functions #### # return # vector # # normalize # # crossproduct # # transpation matrix # return # small val # # bond vector # perpendicular vector # # scalar product # # absolute angle # # return # | 1.93391 | 2 |
STK08-temp-sensor.py | joachimlindborg/onion_omega | 0 | 6615190 | # import modules and classes
import time
from temperatureSensor import TemperatureSensor
import oneWire
# setup onewire and polling interval
oneWireGpio = 19 # set the sensor GPIO
pollingInterval = 1 # seconds
def __main__():
# check if 1-Wire is setup in the kernel
if not oneWire.setupOneWire(str(oneWireGpio)):
print "Kernel module could not be inserted. Please reboot and try again."
return -1
# get the address of the temperature sensor
# it should be the only device connected in this experiment
sensorAddress = oneWire.scanOneAddress()
# instantiate the temperature sensor object
sensor = TemperatureSensor("oneWire", { "address": sensorAddress, "gpio": oneWireGpio })
if not sensor.ready:
print "Sensor was not set up correctly. Please make sure that your sensor is firmly connected to the GPIO specified above and try again."
return -1
# infinite loop - runs main program code continuously
while 1:
# check and print the temperature
value = sensor.readValue()
print "T = " + str(value) + " C"
time.sleep(pollingInterval)
if __name__ == '__main__':
__main__()
| # import modules and classes
import time
from temperatureSensor import TemperatureSensor
import oneWire
# setup onewire and polling interval
oneWireGpio = 19 # set the sensor GPIO
pollingInterval = 1 # seconds
def __main__():
# check if 1-Wire is setup in the kernel
if not oneWire.setupOneWire(str(oneWireGpio)):
print "Kernel module could not be inserted. Please reboot and try again."
return -1
# get the address of the temperature sensor
# it should be the only device connected in this experiment
sensorAddress = oneWire.scanOneAddress()
# instantiate the temperature sensor object
sensor = TemperatureSensor("oneWire", { "address": sensorAddress, "gpio": oneWireGpio })
if not sensor.ready:
print "Sensor was not set up correctly. Please make sure that your sensor is firmly connected to the GPIO specified above and try again."
return -1
# infinite loop - runs main program code continuously
while 1:
# check and print the temperature
value = sensor.readValue()
print "T = " + str(value) + " C"
time.sleep(pollingInterval)
if __name__ == '__main__':
__main__()
| en | 0.807351 | # import modules and classes # setup onewire and polling interval # set the sensor GPIO # seconds # check if 1-Wire is setup in the kernel # get the address of the temperature sensor # it should be the only device connected in this experiment # instantiate the temperature sensor object # infinite loop - runs main program code continuously # check and print the temperature | 3.46507 | 3 |
jintesting.py | bored117/CorePro | 0 | 6615191 | import sys, os
import unittest
import datetime
from corepro.connection import Connection
| import sys, os
import unittest
import datetime
from corepro.connection import Connection
| none | 1 | 1.080704 | 1 | |
learning/03.python-test-and-debug/11.logging.py | dinotumu/code.py | 0 | 6615192 | <reponame>dinotumu/code.py
#!/usr/bin/python3
"""
Module Docstring
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
# imports
# init variables
def main():
""" Main entry point of the app """
pass
if __name__ == "__main__":
""" This is executed when run from the command line """
main() | #!/usr/bin/python3
"""
Module Docstring
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
# imports
# init variables
def main():
""" Main entry point of the app """
pass
if __name__ == "__main__":
""" This is executed when run from the command line """
main() | en | 0.729574 | #!/usr/bin/python3 Module Docstring # imports # init variables Main entry point of the app This is executed when run from the command line | 2.606317 | 3 |
blaster/constants.py | abhinavabcd/blaster | 4 | 6615193 | '''
Created on 06-Jul-2018
@author: abhinav
'''
TYPE_OK = 1
TYPE_ERROR = -1
TYPE_NEED_LOGIN = -2
| '''
Created on 06-Jul-2018
@author: abhinav
'''
TYPE_OK = 1
TYPE_ERROR = -1
TYPE_NEED_LOGIN = -2
| en | 0.655732 | Created on 06-Jul-2018 @author: abhinav | 1.062847 | 1 |
gifminterp.py | akx/gifminterp | 0 | 6615194 | import argparse
import tempfile
import subprocess
import shutil
import os
import sys
import glob
from multiprocessing.dummy import Pool as ThreadPool
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--input", required=True)
ap.add_argument("--output", required=True)
ap.add_argument("--loop", type=int, default=50)
ap.add_argument("--input-fps", type=int, default=24)
ap.add_argument("--minterpolate-fps", type=int, default=60)
ap.add_argument("--output-fps", type=int, default=120)
ap.add_argument("--background", default="white")
args = ap.parse_args()
tempdir = tempfile.mkdtemp(prefix="barrot-")
input_pat = os.path.join(tempdir, "f_%08d.png")
subprocess.check_call(["magick", "convert", args.input, "-coalesce", input_pat])
frames = glob.glob(os.path.join(tempdir, "f_*.png"))
with ThreadPool() as p:
print(f"flattening {len(frames)} frames using {p._processes} processes...")
def flatten_frame(frame):
subprocess.check_call(
["magick", "mogrify", "-background", args.background, "-flatten", frame]
)
list(p.imap_unordered(flatten_frame, frames))
print(
f"running ffmpeg pipeline (it's normal for this to take a while to output anything)..."
)
minterp_processor = subprocess.Popen(
[
"ffmpeg",
"-hide_banner",
"-loglevel",
"panic",
"-stream_loop",
str(args.loop),
"-framerate",
str(args.input_fps),
"-i",
input_pat,
"-vf",
f"minterpolate=fps={args.minterpolate_fps}",
"-c:v",
"png",
"-f",
"image2pipe",
"-",
],
stdout=subprocess.PIPE,
)
output_processor = subprocess.Popen(
[
"ffmpeg",
"-hide_banner",
"-y",
"-f",
"image2pipe",
"-framerate",
str(args.output_fps),
"-i",
"-",
"-tune",
"animation",
"-preset",
"medium",
"-crf",
"24",
"-pix_fmt",
"yuv420p",
args.output,
],
stdin=minterp_processor.stdout,
)
minterp_processor.stdout.close()
output_processor.wait()
minterp_processor.kill()
print(f"done! cleaning up {tempdir}")
shutil.rmtree(tempdir)
sys.exit(output_processor.returncode)
if __name__ == "__main__":
main()
| import argparse
import tempfile
import subprocess
import shutil
import os
import sys
import glob
from multiprocessing.dummy import Pool as ThreadPool
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--input", required=True)
ap.add_argument("--output", required=True)
ap.add_argument("--loop", type=int, default=50)
ap.add_argument("--input-fps", type=int, default=24)
ap.add_argument("--minterpolate-fps", type=int, default=60)
ap.add_argument("--output-fps", type=int, default=120)
ap.add_argument("--background", default="white")
args = ap.parse_args()
tempdir = tempfile.mkdtemp(prefix="barrot-")
input_pat = os.path.join(tempdir, "f_%08d.png")
subprocess.check_call(["magick", "convert", args.input, "-coalesce", input_pat])
frames = glob.glob(os.path.join(tempdir, "f_*.png"))
with ThreadPool() as p:
print(f"flattening {len(frames)} frames using {p._processes} processes...")
def flatten_frame(frame):
subprocess.check_call(
["magick", "mogrify", "-background", args.background, "-flatten", frame]
)
list(p.imap_unordered(flatten_frame, frames))
print(
f"running ffmpeg pipeline (it's normal for this to take a while to output anything)..."
)
minterp_processor = subprocess.Popen(
[
"ffmpeg",
"-hide_banner",
"-loglevel",
"panic",
"-stream_loop",
str(args.loop),
"-framerate",
str(args.input_fps),
"-i",
input_pat,
"-vf",
f"minterpolate=fps={args.minterpolate_fps}",
"-c:v",
"png",
"-f",
"image2pipe",
"-",
],
stdout=subprocess.PIPE,
)
output_processor = subprocess.Popen(
[
"ffmpeg",
"-hide_banner",
"-y",
"-f",
"image2pipe",
"-framerate",
str(args.output_fps),
"-i",
"-",
"-tune",
"animation",
"-preset",
"medium",
"-crf",
"24",
"-pix_fmt",
"yuv420p",
args.output,
],
stdin=minterp_processor.stdout,
)
minterp_processor.stdout.close()
output_processor.wait()
minterp_processor.kill()
print(f"done! cleaning up {tempdir}")
shutil.rmtree(tempdir)
sys.exit(output_processor.returncode)
if __name__ == "__main__":
main()
| none | 1 | 2.706784 | 3 | |
digit-recognizer/run_functional.py | Jim-Lin/kaggle-competition | 0 | 6615195 | <gh_stars>0
import numpy as np
import pandas as pd
import keras
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import Input, Dense
df = pd.read_csv('train.csv')
X = df.drop(['label'], axis=1).as_matrix()
y = to_categorical(df['label'])
X_test = pd.read_csv('test.csv').as_matrix()
inputs = Input(shape=(X.shape[1],))
x = Dense(25, activation='relu')(inputs)
x = Dense(25, activation='relu')(x)
x = Dense(25, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X, y, validation_split=0.3, epochs=10)
predict = np.argmax(model.predict(X_test), axis=1)
result = pd.DataFrame({'ImageId': np.arange(1, predict.shape[0]+1), 'Label': predict})
result.to_csv('submission.csv', index=False)
| import numpy as np
import pandas as pd
import keras
from keras.utils import to_categorical
from keras.models import Model
from keras.layers import Input, Dense
df = pd.read_csv('train.csv')
X = df.drop(['label'], axis=1).as_matrix()
y = to_categorical(df['label'])
X_test = pd.read_csv('test.csv').as_matrix()
inputs = Input(shape=(X.shape[1],))
x = Dense(25, activation='relu')(inputs)
x = Dense(25, activation='relu')(x)
x = Dense(25, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X, y, validation_split=0.3, epochs=10)
predict = np.argmax(model.predict(X_test), axis=1)
result = pd.DataFrame({'ImageId': np.arange(1, predict.shape[0]+1), 'Label': predict})
result.to_csv('submission.csv', index=False) | none | 1 | 3.20803 | 3 | |
SheldonSite/Laboratoria/apps.py | marcciszek/Sheldon | 0 | 6615196 | from django.apps import AppConfig
class LaboratoriaConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Laboratoria'
| from django.apps import AppConfig
class LaboratoriaConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'Laboratoria'
| none | 1 | 1.206654 | 1 | |
koku/reporting/migrations/0202_azure_partables.py | bsquizz/koku | 0 | 6615197 | # Generated by Django 3.1.13 on 2021-11-04 01:04
import django.contrib.postgres.fields
import django.db.models.deletion
from django.db import migrations
from django.db import models
from koku.database import set_pg_extended_mode
from koku.database import unset_pg_extended_mode
class Migration(migrations.Migration):
dependencies = [("api", "0050_exchangerates"), ("reporting", "0201_ocp_partables")]
operations = [
migrations.RunPython(code=set_pg_extended_mode, reverse_code=unset_pg_extended_mode),
migrations.CreateModel(
name="AzureStorageSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_storage_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_storage_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureNetworkSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_network_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_network_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureDatabaseSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_database_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_database_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureCostSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_cost_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_cost_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureCostSummaryByServiceP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_cost_summary_by_service_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_cost_summary_by_service_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureCostSummaryByLocationP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("resource_location", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_cost_summary_by_location_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_cost_summary_by_location_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureCostSummaryByAccountP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_cost_summary_by_account_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_cost_summary_by_account_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureComputeSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("instance_type", models.TextField(null=True)),
(
"instance_ids",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None),
),
("instance_count", models.IntegerField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_compute_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_compute_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.AddIndex(
model_name="azurestoragesummaryp",
index=models.Index(fields=["usage_start"], name="azurestorsumm_usage_start"),
),
migrations.AddIndex(
model_name="azurestoragesummaryp",
index=models.Index(fields=["service_name"], name="azurestorsumm_svc_name"),
),
migrations.AddIndex(
model_name="azurenetworksummaryp",
index=models.Index(fields=["usage_start"], name="azurenetsumm_usage_start"),
),
migrations.AddIndex(
model_name="azurenetworksummaryp",
index=models.Index(fields=["service_name"], name="azurenetsumm_svc_name"),
),
migrations.AddIndex(
model_name="azuredatabasesummaryp",
index=models.Index(fields=["usage_start"], name="azuredbsumm_usage_start"),
),
migrations.AddIndex(
model_name="azuredatabasesummaryp",
index=models.Index(fields=["service_name"], name="azuredbsumm_svc_name"),
),
migrations.AddIndex(
model_name="azurecostsummaryp",
index=models.Index(fields=["usage_start"], name="azurecostsumm_usage_start"),
),
migrations.AddIndex(
model_name="azurecostsummarybyservicep",
index=models.Index(fields=["usage_start"], name="azurecostsumm_svc_usage_start"),
),
migrations.AddIndex(
model_name="azurecostsummarybyservicep",
index=models.Index(fields=["service_name"], name="azurecostsumm_svc_svc_name"),
),
migrations.AddIndex(
model_name="azurecostsummarybylocationp",
index=models.Index(fields=["usage_start"], name="azurecostsumm_loc_usage_start"),
),
migrations.AddIndex(
model_name="azurecostsummarybylocationp",
index=models.Index(fields=["resource_location"], name="azurecostsumm_loc_res_loc"),
),
migrations.AddIndex(
model_name="azurecostsummarybyaccountp",
index=models.Index(fields=["usage_start"], name="azurecostsumm_acc_usage_start"),
),
migrations.AddIndex(
model_name="azurecostsummarybyaccountp",
index=models.Index(fields=["subscription_guid"], name="azurecostsumm_acc_sub_guid"),
),
migrations.AddIndex(
model_name="azurecomputesummaryp",
index=models.Index(fields=["usage_start"], name="azurecompsumm_usage_start"),
),
migrations.AddIndex(
model_name="azurecomputesummaryp",
index=models.Index(fields=["instance_type"], name="azurecompsumm_insttyp"),
),
migrations.RunPython(code=unset_pg_extended_mode, reverse_code=set_pg_extended_mode),
]
| # Generated by Django 3.1.13 on 2021-11-04 01:04
import django.contrib.postgres.fields
import django.db.models.deletion
from django.db import migrations
from django.db import models
from koku.database import set_pg_extended_mode
from koku.database import unset_pg_extended_mode
class Migration(migrations.Migration):
dependencies = [("api", "0050_exchangerates"), ("reporting", "0201_ocp_partables")]
operations = [
migrations.RunPython(code=set_pg_extended_mode, reverse_code=unset_pg_extended_mode),
migrations.CreateModel(
name="AzureStorageSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_storage_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_storage_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureNetworkSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_network_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_network_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureDatabaseSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_database_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_database_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureCostSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_cost_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_cost_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureCostSummaryByServiceP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("service_name", models.TextField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_cost_summary_by_service_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_cost_summary_by_service_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureCostSummaryByLocationP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("resource_location", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_cost_summary_by_location_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_cost_summary_by_location_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureCostSummaryByAccountP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_cost_summary_by_account_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_cost_summary_by_account_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.CreateModel(
name="AzureComputeSummaryP",
fields=[
("id", models.UUIDField(primary_key=True, serialize=False)),
("usage_start", models.DateField()),
("usage_end", models.DateField()),
("subscription_guid", models.TextField()),
("instance_type", models.TextField(null=True)),
(
"instance_ids",
django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), null=True, size=None),
),
("instance_count", models.IntegerField(null=True)),
("usage_quantity", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("unit_of_measure", models.TextField(null=True)),
("pretax_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("markup_cost", models.DecimalField(decimal_places=9, max_digits=24, null=True)),
("currency", models.TextField(null=True)),
(
"source_uuid",
models.ForeignKey(
db_column="source_uuid",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="api.provider",
),
),
],
options={"db_table": "reporting_azure_compute_summary_p"},
),
migrations.RunSQL(
sql="ALTER TABLE reporting_azure_compute_summary_p ALTER COLUMN id SET DEFAULT uuid_generate_v4()",
reverse_sql="select 1",
),
migrations.AddIndex(
model_name="azurestoragesummaryp",
index=models.Index(fields=["usage_start"], name="azurestorsumm_usage_start"),
),
migrations.AddIndex(
model_name="azurestoragesummaryp",
index=models.Index(fields=["service_name"], name="azurestorsumm_svc_name"),
),
migrations.AddIndex(
model_name="azurenetworksummaryp",
index=models.Index(fields=["usage_start"], name="azurenetsumm_usage_start"),
),
migrations.AddIndex(
model_name="azurenetworksummaryp",
index=models.Index(fields=["service_name"], name="azurenetsumm_svc_name"),
),
migrations.AddIndex(
model_name="azuredatabasesummaryp",
index=models.Index(fields=["usage_start"], name="azuredbsumm_usage_start"),
),
migrations.AddIndex(
model_name="azuredatabasesummaryp",
index=models.Index(fields=["service_name"], name="azuredbsumm_svc_name"),
),
migrations.AddIndex(
model_name="azurecostsummaryp",
index=models.Index(fields=["usage_start"], name="azurecostsumm_usage_start"),
),
migrations.AddIndex(
model_name="azurecostsummarybyservicep",
index=models.Index(fields=["usage_start"], name="azurecostsumm_svc_usage_start"),
),
migrations.AddIndex(
model_name="azurecostsummarybyservicep",
index=models.Index(fields=["service_name"], name="azurecostsumm_svc_svc_name"),
),
migrations.AddIndex(
model_name="azurecostsummarybylocationp",
index=models.Index(fields=["usage_start"], name="azurecostsumm_loc_usage_start"),
),
migrations.AddIndex(
model_name="azurecostsummarybylocationp",
index=models.Index(fields=["resource_location"], name="azurecostsumm_loc_res_loc"),
),
migrations.AddIndex(
model_name="azurecostsummarybyaccountp",
index=models.Index(fields=["usage_start"], name="azurecostsumm_acc_usage_start"),
),
migrations.AddIndex(
model_name="azurecostsummarybyaccountp",
index=models.Index(fields=["subscription_guid"], name="azurecostsumm_acc_sub_guid"),
),
migrations.AddIndex(
model_name="azurecomputesummaryp",
index=models.Index(fields=["usage_start"], name="azurecompsumm_usage_start"),
),
migrations.AddIndex(
model_name="azurecomputesummaryp",
index=models.Index(fields=["instance_type"], name="azurecompsumm_insttyp"),
),
migrations.RunPython(code=unset_pg_extended_mode, reverse_code=set_pg_extended_mode),
]
| en | 0.828521 | # Generated by Django 3.1.13 on 2021-11-04 01:04 | 1.555876 | 2 |
tests/test_constants.py | uktrade/directory-cms-client | 0 | 6615198 | <reponame>uktrade/directory-cms-client
import pytest
def test_raises_error():
with pytest.raises(ImportError):
from directory_cms_client import constants # NOQA
| import pytest
def test_raises_error():
with pytest.raises(ImportError):
from directory_cms_client import constants # NOQA | none | 1 | 1.470297 | 1 | |
analysis/energy.py | MorrisHuang-skipper/Serial-MD | 0 | 6615199 | <reponame>MorrisHuang-skipper/Serial-MD
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from pylab import cm
import math
mpl.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = [5.6, 4]
plt.rcParams['axes.titlesize'] = 16
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 6
plt.rcParams['legend.fontsize'] = 13
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['axes.linewidth'] = 1
colors = cm.get_cmap('Set1', 5)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# ax2 = fig.add_subplot(3, 1, 2)
# ax3 = fig.add_subplot(3, 1, 3)
ax.xaxis.set_tick_params(which='major', size=5, width=1,
direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=5, width=1,
direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', right='on')
# ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(20))
# ax.xaxis.set_minor_locator(mpl.ticker.MultipleLocator(5))
# ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(.05))
# ax.yaxis.set_minor_locator(mpl.ticker.MultipleLocator(.01))
e = 1.60217e-19
epsilon = 8.854187e-12
Me = 9.10938356e-31
Mi = 1836 * Me
timestep = [r'$150\times 10^{-18}$', r'$100\times 10^{-18}$', r'$50 \times 10^{-18}$', r'$30 \times 10^{-18}$', '$0.1$', '$.2$']
for i in range(1, 5):
fname = 'conv'+str(i)
t, Ki, Ke, K, U, T, coll, accum, px, py, pz = np.loadtxt('../data/'+fname+'/info.dat', unpack=True)
E = K + U
# ax.plot(t, Ki+Ke, label=r'$E$')
# ax.plot(t, U, label=r'$U$')
# ax.plot(t, E, label=r'$dt=$'+timestep[i], color=colors(i))
# ax.plot(t, px)
# ax.plot(t, py)
# ax.plot(t, pz)
ax.plot(t*1e15, abs((E-E[0])/E[0])/coll, label=r'$dt=$'+timestep[i-1], color=colors(i))
# ax.plot(t, abs((E-E[0])/E[0]), label=r'$dt=$'+timestep[i], color=colors(i))
# ax2.plot(t, coll, '--', color=colors(i))
# ax3.plot(t, T, color=colors(i))
# ax2.plot(t, accum, '--', color=colors(i))
ax.set_yscale('log')
ax.set_xlabel(r'$Time \ [fs]$')
ax.set_ylabel(r'$\dfrac{\Delta E_{loss}}{collision} \ [\varepsilon/coll.]$')
ax.set_title('Mic3 ($R=1\AA, \ NP=1000, \, r_{cut}=3w_{pic}$)')
ax.legend()
plt.tight_layout()
plt.savefig('../figures/conv.eps')
plt.savefig('../figures/conv.png', dpi=1200)
plt.show()
| import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from pylab import cm
import math
mpl.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
plt.rcParams['font.size'] = 16
plt.rcParams['figure.figsize'] = [5.6, 4]
plt.rcParams['axes.titlesize'] = 16
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['lines.linewidth'] = 2
plt.rcParams['lines.markersize'] = 6
plt.rcParams['legend.fontsize'] = 13
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['axes.linewidth'] = 1
colors = cm.get_cmap('Set1', 5)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# ax2 = fig.add_subplot(3, 1, 2)
# ax3 = fig.add_subplot(3, 1, 3)
ax.xaxis.set_tick_params(which='major', size=5, width=1,
direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=5, width=1,
direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=3, width=1,
direction='in', right='on')
# ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(20))
# ax.xaxis.set_minor_locator(mpl.ticker.MultipleLocator(5))
# ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(.05))
# ax.yaxis.set_minor_locator(mpl.ticker.MultipleLocator(.01))
e = 1.60217e-19
epsilon = 8.854187e-12
Me = 9.10938356e-31
Mi = 1836 * Me
timestep = [r'$150\times 10^{-18}$', r'$100\times 10^{-18}$', r'$50 \times 10^{-18}$', r'$30 \times 10^{-18}$', '$0.1$', '$.2$']
for i in range(1, 5):
fname = 'conv'+str(i)
t, Ki, Ke, K, U, T, coll, accum, px, py, pz = np.loadtxt('../data/'+fname+'/info.dat', unpack=True)
E = K + U
# ax.plot(t, Ki+Ke, label=r'$E$')
# ax.plot(t, U, label=r'$U$')
# ax.plot(t, E, label=r'$dt=$'+timestep[i], color=colors(i))
# ax.plot(t, px)
# ax.plot(t, py)
# ax.plot(t, pz)
ax.plot(t*1e15, abs((E-E[0])/E[0])/coll, label=r'$dt=$'+timestep[i-1], color=colors(i))
# ax.plot(t, abs((E-E[0])/E[0]), label=r'$dt=$'+timestep[i], color=colors(i))
# ax2.plot(t, coll, '--', color=colors(i))
# ax3.plot(t, T, color=colors(i))
# ax2.plot(t, accum, '--', color=colors(i))
ax.set_yscale('log')
ax.set_xlabel(r'$Time \ [fs]$')
ax.set_ylabel(r'$\dfrac{\Delta E_{loss}}{collision} \ [\varepsilon/coll.]$')
ax.set_title('Mic3 ($R=1\AA, \ NP=1000, \, r_{cut}=3w_{pic}$)')
ax.legend()
plt.tight_layout()
plt.savefig('../figures/conv.eps')
plt.savefig('../figures/conv.png', dpi=1200)
plt.show() | en | 0.072769 | # ax2 = fig.add_subplot(3, 1, 2) # ax3 = fig.add_subplot(3, 1, 3) # ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(20)) # ax.xaxis.set_minor_locator(mpl.ticker.MultipleLocator(5)) # ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(.05)) # ax.yaxis.set_minor_locator(mpl.ticker.MultipleLocator(.01)) # ax.plot(t, Ki+Ke, label=r'$E$') # ax.plot(t, U, label=r'$U$') # ax.plot(t, E, label=r'$dt=$'+timestep[i], color=colors(i)) # ax.plot(t, px) # ax.plot(t, py) # ax.plot(t, pz) # ax.plot(t, abs((E-E[0])/E[0]), label=r'$dt=$'+timestep[i], color=colors(i)) # ax2.plot(t, coll, '--', color=colors(i)) # ax3.plot(t, T, color=colors(i)) # ax2.plot(t, accum, '--', color=colors(i)) | 2.288928 | 2 |
test/test_file_client.py | MainRo/cyclotron-std | 1 | 6615200 | <filename>test/test_file_client.py
import functools
from unittest import TestCase
import rx
import rx.operators as ops
from rx.subject import Subject
import cyclotron_std.io.file as file
class FileClientTestCase(TestCase):
def setUp(self):
self.actual = {}
def create_actual(self):
return {
'next': [],
'error': None,
'completed': False
}
def on_next(self, key, i):
if not key in self.actual:
self.actual[key] = self.create_actual()
self.actual[key]['next'].append(i)
def on_error(self, key, e):
if not key in self.actual:
self.actual[key] = self.create_actual()
self.actual[key]['error'] = e
def on_completed(self, key):
if not key in self.actual:
self.actual[key] = self.create_actual()
self.actual[key]['completed'] = True
def test_read_data(self):
driver_response = Subject()
read_request = rx.just(file.Read(id=1, path='/foo.txt', size=-1, mode='r'))
driver_request, read_response = read_request.pipe(file.read(driver_response))
driver_request.subscribe(
on_next=functools.partial(self.on_next, 'driver_request'),
on_error=functools.partial(self.on_error, 'driver_request'),
on_completed=functools.partial(self.on_completed, 'driver_request'))
read_response.pipe(ops.flat_map(lambda i: i.data)).subscribe(
on_next=functools.partial(self.on_next, 'response'),
on_error=functools.partial(self.on_error, 'response'),
on_completed=functools.partial(self.on_completed, 'response'))
self.assertEqual(
file.Context(
id=read_request,
observable=read_request
),
self.actual['driver_request']['next'][0])
result = file.Context(
id=read_request,
observable=rx.just(file.ReadResponse(id=1, path='/foo.txt', data=rx.just(b'bar')))
)
driver_response.on_next(result)
self.assertIs(
b'bar',
self.actual['response']['next'][0])
| <filename>test/test_file_client.py
import functools
from unittest import TestCase
import rx
import rx.operators as ops
from rx.subject import Subject
import cyclotron_std.io.file as file
class FileClientTestCase(TestCase):
def setUp(self):
self.actual = {}
def create_actual(self):
return {
'next': [],
'error': None,
'completed': False
}
def on_next(self, key, i):
if not key in self.actual:
self.actual[key] = self.create_actual()
self.actual[key]['next'].append(i)
def on_error(self, key, e):
if not key in self.actual:
self.actual[key] = self.create_actual()
self.actual[key]['error'] = e
def on_completed(self, key):
if not key in self.actual:
self.actual[key] = self.create_actual()
self.actual[key]['completed'] = True
def test_read_data(self):
driver_response = Subject()
read_request = rx.just(file.Read(id=1, path='/foo.txt', size=-1, mode='r'))
driver_request, read_response = read_request.pipe(file.read(driver_response))
driver_request.subscribe(
on_next=functools.partial(self.on_next, 'driver_request'),
on_error=functools.partial(self.on_error, 'driver_request'),
on_completed=functools.partial(self.on_completed, 'driver_request'))
read_response.pipe(ops.flat_map(lambda i: i.data)).subscribe(
on_next=functools.partial(self.on_next, 'response'),
on_error=functools.partial(self.on_error, 'response'),
on_completed=functools.partial(self.on_completed, 'response'))
self.assertEqual(
file.Context(
id=read_request,
observable=read_request
),
self.actual['driver_request']['next'][0])
result = file.Context(
id=read_request,
observable=rx.just(file.ReadResponse(id=1, path='/foo.txt', data=rx.just(b'bar')))
)
driver_response.on_next(result)
self.assertIs(
b'bar',
self.actual['response']['next'][0])
| none | 1 | 2.497981 | 2 | |
baekjoon/10699.py | phillip5094/algorithm-practice | 0 | 6615201 | <reponame>phillip5094/algorithm-practice
import datetime
print(str(datetime.datetime.now())[:10]) | import datetime
print(str(datetime.datetime.now())[:10]) | none | 1 | 2.366861 | 2 | |
students/K33421/Khlestunova_Stefania/LR3/worriors/worriors_app/migrations/0002_rename_worrior_warrior.py | Stefania-K/ITMO_ICT_WebDevelopment_2021-2022 | 0 | 6615202 | <reponame>Stefania-K/ITMO_ICT_WebDevelopment_2021-2022<gh_stars>0
# Generated by Django 3.2.9 on 2022-01-15 15:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('worriors_app', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Worrior',
new_name='Warrior',
),
]
| # Generated by Django 3.2.9 on 2022-01-15 15:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('worriors_app', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Worrior',
new_name='Warrior',
),
] | en | 0.880612 | # Generated by Django 3.2.9 on 2022-01-15 15:39 | 1.643551 | 2 |
src/feedback/admin.py | flokli/bornhack-website | 7 | 6615203 | <reponame>flokli/bornhack-website
from django.contrib import admin
from .models import Feedback
@admin.register(Feedback)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ("user", "camp", "feedback")
| from django.contrib import admin
from .models import Feedback
@admin.register(Feedback)
class FeedbackAdmin(admin.ModelAdmin):
list_display = ("user", "camp", "feedback") | none | 1 | 1.512089 | 2 | |
app/http/exception_handlers/http.py | Speccy-Rom/My-web-service-architecture | 0 | 6615204 | <reponame>Speccy-Rom/My-web-service-architecture
from typing import Callable
from starlette.requests import Request
from starlette.responses import JSONResponse
from app.src.exception import APIException
def http_exception_factory(status_code: int) -> Callable:
def http_exception(_: Request, exception: APIException) -> JSONResponse:
return JSONResponse(status_code=status_code, content={"message": exception.message})
return http_exception
| from typing import Callable
from starlette.requests import Request
from starlette.responses import JSONResponse
from app.src.exception import APIException
def http_exception_factory(status_code: int) -> Callable:
def http_exception(_: Request, exception: APIException) -> JSONResponse:
return JSONResponse(status_code=status_code, content={"message": exception.message})
return http_exception | none | 1 | 2.334605 | 2 | |
main.py | Freshewok101/KaprekarNumbers | 0 | 6615205 | def listToInt(array):
integer = 0
for e, i in enumerate(reversed(array)):
integer += (int(i) * (10 ** e))
return integer
# gets the number from the user
userInput = int(input("Input a number: "))
# squares the number
squareNumber = userInput**2
print(f"The number squared is: {squareNumber}")
# splits the number into individual integers
listNumber = [i for i in str(squareNumber)]
numberLength = len(listNumber)
splitNums = []
if numberLength == 2:
splitNums.append(listNumber[0])
splitNums.append(listNumber[1])
else:
splitNums.append(listNumber[0:(numberLength // 2)])
splitNums.append(listNumber[(numberLength // 2):])
result = 0
for i in splitNums:
result += listToInt(i)
print(f"{listToInt(splitNums[0])} added to {listToInt(splitNums[1])} is [{result}]")
if userInput == result:
print("This is a kaprekar number")
else:
print("This is not a kaprekar number")
| def listToInt(array):
integer = 0
for e, i in enumerate(reversed(array)):
integer += (int(i) * (10 ** e))
return integer
# gets the number from the user
userInput = int(input("Input a number: "))
# squares the number
squareNumber = userInput**2
print(f"The number squared is: {squareNumber}")
# splits the number into individual integers
listNumber = [i for i in str(squareNumber)]
numberLength = len(listNumber)
splitNums = []
if numberLength == 2:
splitNums.append(listNumber[0])
splitNums.append(listNumber[1])
else:
splitNums.append(listNumber[0:(numberLength // 2)])
splitNums.append(listNumber[(numberLength // 2):])
result = 0
for i in splitNums:
result += listToInt(i)
print(f"{listToInt(splitNums[0])} added to {listToInt(splitNums[1])} is [{result}]")
if userInput == result:
print("This is a kaprekar number")
else:
print("This is not a kaprekar number")
| en | 0.818694 | # gets the number from the user # squares the number # splits the number into individual integers | 3.901871 | 4 |
core/models.py | akshatvg/Narrative | 10 | 6615206 | <reponame>akshatvg/Narrative
from django.db import models
import uuid
class TemplateDivision(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
division_name = models.CharField(max_length=100)
class TemplateDatabase(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
division = models.ForeignKey(TemplateDivision, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
image = models.CharField(max_length=1000)
| from django.db import models
import uuid
class TemplateDivision(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
division_name = models.CharField(max_length=100)
class TemplateDatabase(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
division = models.ForeignKey(TemplateDivision, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
image = models.CharField(max_length=1000) | none | 1 | 2.117584 | 2 | |
theory/test.py | johnterickson/Mesh | 1,494 | 6615207 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 25 14:34:04 2016
@author: devd
"""
from __future__ import division
import logging
import math
from choose import nCr
import numpy as np
from scipy.misc import comb
import createRandomString as c
import meshers
import time
import random
import functools
import json
import pickle
import os
from mesh_util import occupancySort, formatStrings, fast_q
from createRandomString import createIndependentRandomStrings
#logging.getLogger('').handlers = []
#logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
#logging.debug('This is a log message.')
#logging.info('test')
#logging.warning('double test')
#
#strings = createIndependentRandomStrings(4,10,numOnes = 2)
#new_strings = []
#for string in strings:
# new_strings.append((string, long(string, base=2)))
#print new_strings
#print "\n \n \n"
##occupancySort(strings)
#new_strings.sort(key = lambda x: x[0].count("1"))
#print new_strings
strings = createIndependentRandomStrings(256, 10000, numOnes = 5)
strings = formatStrings(strings)
occs = [x[2] for x in strings]
print np.mean(occs)
print np.std(occs)
def faster_q(length, occ1, occ2):
numerator = 1
for i in range(length-occ1, length-occ1-occ2, -1):
numerator *= i
denominator = 1
for i in range(length, length-occ2, -1):
denominator *= i
return float(numerator)/float(denominator)
length = 128
start = time.time()
for occ1 in range(0,50):
for occ2 in range(0,50):
result1 = fast_q(length, occ1, occ2)
t1 = time.time() - start
start = time.time()
for occ1 in range(0,50):
for occ2 in range(0,50):
result2 = faster_q(length, occ1, occ2)
t2 = time.time()-start
print 'fast_q got {} in {} ms'.format(result1, t1)
print 'faster_q got {} in {} ms'.format(result2, t2) | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 25 14:34:04 2016
@author: devd
"""
from __future__ import division
import logging
import math
from choose import nCr
import numpy as np
from scipy.misc import comb
import createRandomString as c
import meshers
import time
import random
import functools
import json
import pickle
import os
from mesh_util import occupancySort, formatStrings, fast_q
from createRandomString import createIndependentRandomStrings
#logging.getLogger('').handlers = []
#logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
#logging.debug('This is a log message.')
#logging.info('test')
#logging.warning('double test')
#
#strings = createIndependentRandomStrings(4,10,numOnes = 2)
#new_strings = []
#for string in strings:
# new_strings.append((string, long(string, base=2)))
#print new_strings
#print "\n \n \n"
##occupancySort(strings)
#new_strings.sort(key = lambda x: x[0].count("1"))
#print new_strings
strings = createIndependentRandomStrings(256, 10000, numOnes = 5)
strings = formatStrings(strings)
occs = [x[2] for x in strings]
print np.mean(occs)
print np.std(occs)
def faster_q(length, occ1, occ2):
numerator = 1
for i in range(length-occ1, length-occ1-occ2, -1):
numerator *= i
denominator = 1
for i in range(length, length-occ2, -1):
denominator *= i
return float(numerator)/float(denominator)
length = 128
start = time.time()
for occ1 in range(0,50):
for occ2 in range(0,50):
result1 = fast_q(length, occ1, occ2)
t1 = time.time() - start
start = time.time()
for occ1 in range(0,50):
for occ2 in range(0,50):
result2 = faster_q(length, occ1, occ2)
t2 = time.time()-start
print 'fast_q got {} in {} ms'.format(result1, t1)
print 'faster_q got {} in {} ms'.format(result2, t2) | en | 0.352446 | # -*- coding: utf-8 -*- Created on Mon Apr 25 14:34:04 2016 @author: devd #logging.getLogger('').handlers = [] #logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') #logging.debug('This is a log message.') #logging.info('test') #logging.warning('double test') # #strings = createIndependentRandomStrings(4,10,numOnes = 2) #new_strings = [] #for string in strings: # new_strings.append((string, long(string, base=2))) #print new_strings #print "\n \n \n" ##occupancySort(strings) #new_strings.sort(key = lambda x: x[0].count("1")) #print new_strings | 1.969754 | 2 |
source_code/DVPred/scripts/f2_no_head_vcf.py | WCH-IRD/DVPred | 0 | 6615208 | <gh_stars>0
import os
# def no_head(vcffile,non_repeat_region_vcf):
def no_head(vcffile):
# res1 = os.system("grep -v '#' "+vcffile+"> ./ML/tmp/no_head_1.vcf")
# res2 = os.system("grep -v '#' "+non_repeat_region_vcf+" | awk -F '\t' '{print $NF}' > ./ML/tmp/no_head_2.vcf")
# res3 = os.system('paste ./ML/tmp/no_head_1.vcf ./ML/tmp/no_head_2.vcf > ./ML/tmp/no_head.vcf')
# os.system('rm ./ML/tmp/no_head_1.vcf ./ML/tmp/no_head_2.vcf')
no_head_path=''
res1 = os.system("grep -v '#' "+vcffile+"> ./ML/tmp/no_head.vcf")
# if res1==0 and res2==0 and res3==0:
if res1==0 :
no_head_path = './ML/tmp/no_head.vcf'
return no_head_path | import os
# def no_head(vcffile,non_repeat_region_vcf):
def no_head(vcffile):
# res1 = os.system("grep -v '#' "+vcffile+"> ./ML/tmp/no_head_1.vcf")
# res2 = os.system("grep -v '#' "+non_repeat_region_vcf+" | awk -F '\t' '{print $NF}' > ./ML/tmp/no_head_2.vcf")
# res3 = os.system('paste ./ML/tmp/no_head_1.vcf ./ML/tmp/no_head_2.vcf > ./ML/tmp/no_head.vcf')
# os.system('rm ./ML/tmp/no_head_1.vcf ./ML/tmp/no_head_2.vcf')
no_head_path=''
res1 = os.system("grep -v '#' "+vcffile+"> ./ML/tmp/no_head.vcf")
# if res1==0 and res2==0 and res3==0:
if res1==0 :
no_head_path = './ML/tmp/no_head.vcf'
return no_head_path | en | 0.218178 | # def no_head(vcffile,non_repeat_region_vcf): # res1 = os.system("grep -v '#' "+vcffile+"> ./ML/tmp/no_head_1.vcf") # res2 = os.system("grep -v '#' "+non_repeat_region_vcf+" | awk -F '\t' '{print $NF}' > ./ML/tmp/no_head_2.vcf") # res3 = os.system('paste ./ML/tmp/no_head_1.vcf ./ML/tmp/no_head_2.vcf > ./ML/tmp/no_head.vcf') # os.system('rm ./ML/tmp/no_head_1.vcf ./ML/tmp/no_head_2.vcf') # if res1==0 and res2==0 and res3==0: | 2.291479 | 2 |
Development Sripts/Thresholding.py | MonicaSelvaraj/Drosophila-germ-plasm | 0 | 6615209 | import matplotlib.pyplot as plt
import numpy as np
import math
import csv
import sys
#np.set_printoptions(threshold=sys.maxsize) #Print out full arrays/data
'''
Reads and stores the intensity matrix for one z-slice
Cuts off first row and first column
'''
def GetSliceIntensities(path):
with open(path, 'r') as csv_file:
matrix = []
csv_reader = csv.reader (csv_file)
for row in csv_reader:
matrix.append(row)
I = np.array(matrix)
I = I[1:,1:]
return I
'''
Creates, displays and closes a 2D plot
'''
def TwoDimPlot(x,y):
plt.plot(x,y)
plt.show()
plt.close()
'''
L - number of possible intensity values
I - matrix of intensity values
Counts number of pixels of each intensity value
'''
def hist(L, I):
h = np.zeros(L) #1D array that stores the number of pixels with each intensity value
size = I.shape
for i in range (0, size[0]): #For every row
for j in range (0, size[1]): #For each column value in the row
index = int(I[i][j])
h[index] = h[index] + 1
return h
'''
h - original histogram values
n - number of pixels
'''
def normHist(h,n,L):
nh = np.zeros(L)
for i in range (0, len(h)):#For every element in h
nh[i] = h[i]/n
return nh
def eqHist(L, nh,I):
#Cumulative sum
c = np.zeros(L)
c[0] = nh[0]
for i in range(1, L):
c[i] = c[i-1]+ nh[i]
#Transforming original image intensities to improve contrast
size = I.shape
for i in range (0, size[0]): #For every row
for j in range (0, size[1]): #For each column value in the row
newI = math.floor((L-1)* c[int(I[i][j])])
I[i][j] = newI
return I
'''
Dividing the total data range (maximum-minimum) pixel value into 256 separate bins with equal widths.
These bins are then used to sort pixels that fall within a certain range into the appropriate bin.
(Source: https://petebankhead.gitbooks.io/imagej-intro/content/chapters/thresholding/thresholding.html)
'''
#def histBins():
#return
#Main
I = GetSliceIntensities('Input/Slice1.csv') #I is the matrix of intensities
n = I.shape[0]*I.shape[1] #n is the number of pixels in the z-slice
#Note:To access an element in I use I[row][col]; The first element is I[0][0]
#GRAY-LEVEL HISTOGRAM
#Note: The z-slices have 16 bit resolution. The grayscale intensity values range from 0-65535
L = 65535
h = hist(L, I)
print(h)
bins = np.arange(0,L,1)
TwoDimPlot(bins,h)
#NORMALIZED HISTOGRAM (or) Probability density function
nh = normHist(h,n,L)
print(nh)
TwoDimPlot(bins,nh)
#EQUALIZED HISTOGRAM
equalizedI = eqHist(L, nh, I)
eh = hist(L, equalizedI)
print(eh)
TwoDimPlot(bins,eh)
#Checking if binning is useful
plt.hist(I, bins = 256)
plt.title("Histogram with 256 bins")
plt.show()
#Otsu's algorithm - Find a threshold t that minimizes the within class variance (weighted sum of )
#Compute histogram
| import matplotlib.pyplot as plt
import numpy as np
import math
import csv
import sys
#np.set_printoptions(threshold=sys.maxsize) #Print out full arrays/data
'''
Reads and stores the intensity matrix for one z-slice
Cuts off first row and first column
'''
def GetSliceIntensities(path):
with open(path, 'r') as csv_file:
matrix = []
csv_reader = csv.reader (csv_file)
for row in csv_reader:
matrix.append(row)
I = np.array(matrix)
I = I[1:,1:]
return I
'''
Creates, displays and closes a 2D plot
'''
def TwoDimPlot(x,y):
plt.plot(x,y)
plt.show()
plt.close()
'''
L - number of possible intensity values
I - matrix of intensity values
Counts number of pixels of each intensity value
'''
def hist(L, I):
h = np.zeros(L) #1D array that stores the number of pixels with each intensity value
size = I.shape
for i in range (0, size[0]): #For every row
for j in range (0, size[1]): #For each column value in the row
index = int(I[i][j])
h[index] = h[index] + 1
return h
'''
h - original histogram values
n - number of pixels
'''
def normHist(h,n,L):
nh = np.zeros(L)
for i in range (0, len(h)):#For every element in h
nh[i] = h[i]/n
return nh
def eqHist(L, nh,I):
#Cumulative sum
c = np.zeros(L)
c[0] = nh[0]
for i in range(1, L):
c[i] = c[i-1]+ nh[i]
#Transforming original image intensities to improve contrast
size = I.shape
for i in range (0, size[0]): #For every row
for j in range (0, size[1]): #For each column value in the row
newI = math.floor((L-1)* c[int(I[i][j])])
I[i][j] = newI
return I
'''
Dividing the total data range (maximum-minimum) pixel value into 256 separate bins with equal widths.
These bins are then used to sort pixels that fall within a certain range into the appropriate bin.
(Source: https://petebankhead.gitbooks.io/imagej-intro/content/chapters/thresholding/thresholding.html)
'''
#def histBins():
#return
#Main
I = GetSliceIntensities('Input/Slice1.csv') #I is the matrix of intensities
n = I.shape[0]*I.shape[1] #n is the number of pixels in the z-slice
#Note:To access an element in I use I[row][col]; The first element is I[0][0]
#GRAY-LEVEL HISTOGRAM
#Note: The z-slices have 16 bit resolution. The grayscale intensity values range from 0-65535
L = 65535
h = hist(L, I)
print(h)
bins = np.arange(0,L,1)
TwoDimPlot(bins,h)
#NORMALIZED HISTOGRAM (or) Probability density function
nh = normHist(h,n,L)
print(nh)
TwoDimPlot(bins,nh)
#EQUALIZED HISTOGRAM
equalizedI = eqHist(L, nh, I)
eh = hist(L, equalizedI)
print(eh)
TwoDimPlot(bins,eh)
#Checking if binning is useful
plt.hist(I, bins = 256)
plt.title("Histogram with 256 bins")
plt.show()
#Otsu's algorithm - Find a threshold t that minimizes the within class variance (weighted sum of )
#Compute histogram
| en | 0.714692 | #np.set_printoptions(threshold=sys.maxsize) #Print out full arrays/data Reads and stores the intensity matrix for one z-slice Cuts off first row and first column Creates, displays and closes a 2D plot L - number of possible intensity values I - matrix of intensity values Counts number of pixels of each intensity value #1D array that stores the number of pixels with each intensity value #For every row #For each column value in the row h - original histogram values n - number of pixels #For every element in h #Cumulative sum #Transforming original image intensities to improve contrast #For every row #For each column value in the row Dividing the total data range (maximum-minimum) pixel value into 256 separate bins with equal widths. These bins are then used to sort pixels that fall within a certain range into the appropriate bin. (Source: https://petebankhead.gitbooks.io/imagej-intro/content/chapters/thresholding/thresholding.html) #def histBins(): #return #Main #I is the matrix of intensities #n is the number of pixels in the z-slice #Note:To access an element in I use I[row][col]; The first element is I[0][0] #GRAY-LEVEL HISTOGRAM #Note: The z-slices have 16 bit resolution. The grayscale intensity values range from 0-65535 #NORMALIZED HISTOGRAM (or) Probability density function #EQUALIZED HISTOGRAM #Checking if binning is useful #Otsu's algorithm - Find a threshold t that minimizes the within class variance (weighted sum of ) #Compute histogram | 2.928526 | 3 |
src/testsklearn.py | endofsamsara/CMPS242 | 0 | 6615210 | <filename>src/testsklearn.py
from sklearn import datasets,svm
from SVR import SVR
X, y = datasets.load_svmlight_file('../data/train_vectors.txt')
clf = svm.SVR()
clf.fit(X[:4000],y[:4000])
yp = clf.predict(X[4000:])
rmse = SVR().jduge(yp,y[4000:])
print rmse | <filename>src/testsklearn.py
from sklearn import datasets,svm
from SVR import SVR
X, y = datasets.load_svmlight_file('../data/train_vectors.txt')
clf = svm.SVR()
clf.fit(X[:4000],y[:4000])
yp = clf.predict(X[4000:])
rmse = SVR().jduge(yp,y[4000:])
print rmse | none | 1 | 2.528193 | 3 | |
python-sdk/nuscenes/eval/tracking/tracking_vis.py | nmll/nuscenes-tools | 15 | 6615211 |
# nuScenes dev-kit.
# Code written by <NAME>, <NAME> and <NAME>, 2019.
import trk_render as render
import argparse
import json
import os
import time
from typing import Tuple, List, Dict, Any, Callable, Tuple
import tqdm
import sklearn
import numpy as np
import unittest
try:
import pandas
except ModuleNotFoundError:
raise unittest.SkipTest('Skipping test as pandas was not found!')
from nuscenes.nuscenes import NuScenes
from nuscenes.eval.common.config import config_factory
from nuscenes.eval.common.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.tracking.algo import TrackingEvaluation
from nuscenes.eval.tracking.constants import AVG_METRIC_MAP, MOT_METRIC_MAP, LEGACY_METRICS
from nuscenes.eval.tracking.data_classes import TrackingMetrics, TrackingMetricDataList, TrackingConfig, TrackingBox, \
TrackingMetricData
from nuscenes.eval.tracking.loaders import create_tracks
#from nuscenes.eval.tracking.render import TrackingRenderer, recall_metric_curve, summary_plot
from trk_render import TrackingRenderer, recall_metric_curve, summary_plot
from nuscenes.eval.tracking.utils import print_final_metrics
from nuscenes.eval.tracking.mot import MOTAccumulatorCustom
from nuscenes.utils.data_classes import LidarPointCloud#读取点云
save_path = "./visresult/cp_vis_zs" #保存路径及名称
datset_path = "/share/nuscenes/v1.0-trainval/" #数据集路径
#result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_pp_multihead/pp_neiborcbam_iouglogred/eval/epoch_20/val/default/final_result/data/trk_results/trk_results_nusc.json"
#result_file = "/share/lycode/work_dirs/cp_imgdefroi_reid/cp_imgdefroi_reid_mgpu1/cp_ve_epo20_GT/trk_results/trk_results_nusc.json"
#result_file = "/share/lycode/nuscenes-devkit/python-sdk/nuscenes/eval/tracking/trk_results/tracking_result_filter.json"
result_file = "/share/lycode/CenterPoint/work_dirs/voxel/trk/tracking_result.json"
#result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_second_multihead/sec_neiborcbamreid3_marepre/eval/epoch_20/val/default/final_result/data/trk_results/trk_results_nusc.json" #结果文件
#result_file = "/share/nuscenes/OpenPCDet/output/cfgs/nuscenes_models/pointrcnn_2/default/eval/eval_with_train/epoch_20/val/final_result/data/results_nusc.json" #结果文件
#result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_second_multihead/default/eval/epoch_6229/val/default/final_result/data/\
#trk_results/trk_results_nusc.json"
#info_path = datset_path + "/nuscenes_infos_10sweeps_val.pkl"
render_classes = ["car"] #class_names = ['car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier','motorcycle', 'bicycle', 'pedestrian', 'traffic_cone']
frame_id_thr = 30 #no use
ifplotgt = False
ifpltsco = False #是否画score和分数
ifdetection = False #wei实现
outscen_class = False #是否输出分类FP/FN情况 单个类进行测试 仅可car 和 pedestrian
ifplthis = False #是否画历史轨迹坐标点
scene_id_thr = 150 #共150个场景,每个场景约40帧 仅画前 scene_id_thr 个场景
lidar_name = 'LIDAR_TOP'
cam_name = 'CAM_FRONT'
class TrackingEval:
"""
This is the official nuScenes tracking evaluation code.
Results are written to the provided output_dir.
Here is an overview of the functions in this method:
- init: Loads GT annotations and predictions stored in JSON format and filters the boxes.
- run: Performs evaluation and dumps the metric data to disk.
- render: Renders various plots and dumps to disk.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://www.nuscenes.org/tracking for more details.
"""
def __init__(self,
config: TrackingConfig,
result_path: str,
eval_set: str,
output_dir: str,
nusc_version: str,
nusc_dataroot: str,
verbose: bool = True,
render_classes: List[str] = None):
"""
Initialize a TrackingEval object.
:param config: A TrackingConfig object.
:param result_path: Path of the nuScenes JSON result file.
:param eval_set: The dataset split to evaluate on, e.g. train, val or test.
:param output_dir: Folder to save plots and results to.
:param nusc_version: The version of the NuScenes dataset.
:param nusc_dataroot: Path of the nuScenes dataset on disk.
:param verbose: Whether to print to stdout.
:param render_classes: Classes to render to disk or None.
"""
self.cfg = config
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.render_classes = render_classes
# Check result file exists.
assert os.path.exists(result_path), 'Error: The result file does not exist!'
# Make dirs.
self.plot_dir = os.path.join(self.output_dir, 'plots')
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.isdir(self.plot_dir):
os.makedirs(self.plot_dir)
# Initialize NuScenes object.
# We do not store it in self to let garbage collection take care of it and save memory.
nusc = NuScenes(version=nusc_version, verbose=verbose, dataroot=nusc_dataroot)
self.nusc = nusc
# Load data.
if verbose:
print('Initializing nuScenes tracking evaluation')
pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, TrackingBox,
verbose=verbose)
gt_boxes = load_gt(nusc, self.eval_set, TrackingBox, verbose=verbose)
assert set(pred_boxes.sample_tokens) == set(gt_boxes.sample_tokens), \
"Samples in split don't match samples in predicted tracks."
# Add center distances.
pred_boxes = add_center_dist(nusc, pred_boxes)
gt_boxes = add_center_dist(nusc, gt_boxes)
# Filter boxes (distance, points per box, etc.).
if verbose:
print('Filtering tracks')
pred_boxes = filter_eval_boxes(nusc, pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth tracks')
gt_boxes = filter_eval_boxes(nusc, gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = gt_boxes.sample_tokens #len():6019
# Convert boxes to tracks format.
self.tracks_gt = create_tracks(gt_boxes, nusc, self.eval_set, gt=True)
self.tracks_pred = create_tracks(pred_boxes, nusc, self.eval_set, gt=False)
def evaluate(self) -> Tuple[TrackingMetrics, TrackingMetricDataList]:
"""
Performs the actual evaluation.
:return: A tuple of high-level and the raw metric data.
"""
start_time = time.time()
metrics = TrackingMetrics(self.cfg)
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data...')
metric_data_list = TrackingMetricDataList()
def init_outscen():
print('init outscen for FP/FN....')
outscen = render_classes[0] # 'car' 'ped'
TrackingRenderer.outscen = outscen
TrackingRenderer.his_track = []
TrackingRenderer.his_trackid = []
TrackingRenderer.fp_disrange = {'<15m':0, '15-30m':0, '30-45m':0, '45-54m':0, '-1': 0}
if outscen == 'car':
TrackingRenderer.fp_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 } #car 绝对速度
TrackingRenderer.fp_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 } #car lidar点云数
TrackingRenderer.trk_ratio = {'angvar>30':0, '30>angvar>20':0, '20>angvar>10':0, '10>angvar':0, 'vevari>1.5':0, '1.0<vevari<1.5':0, '0.5<vevari<1.0':0, 'vevari<0.5':0} #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
else:
TrackingRenderer.fp_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 } #ped 绝对速度
TrackingRenderer.fp_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 } #ped lidar点云数
TrackingRenderer.trk_ratio = {'angvar>30':0, '30>angvar>20':0, '20>angvar>10':0, '10>angvar':0, 'vevari>0.5':0, '0.3<vevari<0.5':0, '0.1<vevari<0.3':0, 'vevari<0.1':0} #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
TrackingRenderer.fpscorrange = {'0-0.1':0, '0.2-0.4':0, '0.4-0.6':0,'0.6-1.0':0}
TrackingRenderer.vis_ratio = {'0-0.4':0, '0.4-0.6':0, '0.6-0.8':0, '0.8-1.0':0} #相机视角 0-40%, 40-60%, 60-80% and 80-100% The visibility of an instance is the fraction of annotation visible in all 6 images.
TrackingRenderer.fn_disrange = {'<15m':0, '15-30m':0, '30-45m':0, '45-54m':0, '-1':0}
if outscen == 'car':
TrackingRenderer.fn_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 } #car 绝对速度
TrackingRenderer.fn_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0, '-1':0 } #car lidar点云数 抽样参考比例:0.21 0.23 0.26,0.2,0.1
TrackingRenderer.gt_ratio = {'angvar>30':0, '30>angvar>20':0, '20>angvar>10':0, '10>angvar':0, 'firfn_trk':0, 'nonfirfn_trk':0, 'vevari>1.5':0, '1.0<vevari<1.5':0, '0.5<vevari<1.0':0, 'vevari<0.5':0} #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
else:
TrackingRenderer.fn_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 } #ped 绝对速度
TrackingRenderer.fn_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0, '-1':0 } #ped lidar点云数
TrackingRenderer.gt_ratio = {'angvar>30':0, '30>angvar>20':0, '20>angvar>10':0, '10>angvar':0, 'firfn_trk':0, 'nonfirfn_trk':0, 'vevari>0.5':0, '0.3<vevari<0.5':0, '0.1<vevari<0.3':0, 'vevari<0.1':0} #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
if outscen == 'car':
TrackingRenderer.ids_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 } #car 绝对速度
TrackingRenderer.mutave_thr = [1.5, 1.0, 0.5] # car均匀加速度为2.778m/s^2 3*0.5s=1.5 #可能有点小
TrackingRenderer.ids_factratio = {'delay_trk':0, 'del_oth_trk':0, 'reappear':0, 'reapother':0, 'reapdeltrk':0 ,'vevari>1.5':0, '1.0<vevari<1.5':0, '0.5<vevari<1.0':0, 'vevari<0.5':0}
else:
TrackingRenderer.ids_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 } #ped 绝对速度
TrackingRenderer.mutave_thr = [0.5, 0.3, 0.1] # ped
TrackingRenderer.ids_factratio = {'delay_trk':0, 'del_oth_trk':0, 'reappear':0, 'reapother':0, 'reapdeltrk':0,'vevari>0.5':0, '0.3<vevari<0.5':0, '0.1<vevari<0.3':0, 'vevari<0.1':0 }
if outscen == 'car':
TrackingRenderer.gt_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 } #car lidar点云数
else:
TrackingRenderer.gt_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 } #ped lidar点云数
TrackingRenderer.fault_datas = 0
def accumulate_class(curr_class_name):
# curr_ev = TrackingEvaluation(self.tracks_gt, self.tracks_pred, curr_class_name, self.cfg.dist_fcn_callable,\
# self.cfg.dist_th_tp, self.cfg.min_recall,\
# num_thresholds=TrackingMetricData.nelem,\
# metric_worst=self.cfg.metric_worst,\
# verbose=self.verbose,\
# output_dir=self.output_dir,\
# render_classes=self.render_classes)
#curr_md = curr_ev.accumulate()
"""
Compute metrics for all recall thresholds of the current class.
:return: TrackingMetricData instance which holds the metrics for each threshold.
"""
# Init.
if self.verbose:
print('Computing metrics for class %s...\n' % curr_class_name)
accumulators = []
thresh_metrics = []
init_outscen()
#md = TrackingMetricData()
# Skip missing classes.
gt_box_count = 0
gt_track_ids = set()
for scene_tracks_gt in self.tracks_gt.values():
for frame_gt in scene_tracks_gt.values():
for box in frame_gt:
if box.tracking_name == curr_class_name:
gt_box_count += 1
gt_track_ids.add(box.tracking_id)
if gt_box_count == 0:
print("gtboxcount=0")
# Do not add any metric. The average metrics will then be nan.
#return md
# Register mot metrics.
#mh = create_motmetrics()
# Get thresholds.
# Note: The recall values are the hypothetical recall (10%, 20%, ..).
# The actual recall may vary as there is no way to compute it without trying all thresholds.
thresholds = np.array([0.1]) #, recalls = self.compute_thresholds(gt_box_count)
#md.confidence = thresholds
#md.recall_hypo = recalls
if self.verbose:
print('Computed thresholds\n')
for t, threshold in enumerate(thresholds):
# If recall threshold is not achieved, we assign the worst possible value in AMOTA and AMOTP.
if np.isnan(threshold):
continue
# Do not compute the same threshold twice.
# This becomes relevant when a user submits many boxes with the exact same score.
if threshold in thresholds[:t]:
continue
"""
Accumulate metrics for a particular recall threshold of the current class.
The scores are only computed if threshold is set to None. This is used to infer the recall thresholds.
:param threshold: score threshold used to determine positives and negatives.
:return: (The MOTAccumulator that stores all the hits/misses/etc, Scores for each TP).
"""
accs = []
scores = [] # The scores of the TPs. These are used to determine the recall thresholds initially.
# Go through all frames and associate ground truth and tracker results.
# Groundtruth and tracker contain lists for every single frame containing lists detections.
tracks_gt = self.tracks_gt
scene_num_id = 0
sum_fp = 0
sum_fn = 0
for scene_id in tqdm.tqdm(list(tracks_gt.keys()), disable=not self.verbose, leave=False):#按场景
# Initialize accumulator and frame_id for this scene
acc = MOTAccumulatorCustom()
frame_id = 0 # Frame ids must be unique across all scenes
# Retrieve GT and preds.
scene_tracks_gt = tracks_gt[scene_id]
scene_tracks_pred = self.tracks_pred[scene_id]
# if len(tracks_gt) == 151:
# tracks_gt.pop('0')
# Visualize the boxes in this frame.
if curr_class_name in self.render_classes and threshold is not None and scene_num_id < scene_id_thr:
save_path = os.path.join(self.output_dir, 'render', str(scene_id), curr_class_name)
os.makedirs(save_path, exist_ok=True)
renderer = TrackingRenderer(save_path)
else:
renderer = None
for timestamp in scene_tracks_gt.keys(): #每个场景分别每帧
# Select only the current class.
frame_gt = scene_tracks_gt[timestamp]
frame_pred = scene_tracks_pred[timestamp]
frame_gt = [f for f in frame_gt if f.tracking_name == curr_class_name]
frame_pred = [f for f in frame_pred if f.tracking_name == curr_class_name]
# Threshold boxes by score. Note that the scores were previously averaged over the whole track.
if threshold is not None:
frame_pred = [f for f in frame_pred if f.tracking_score >= threshold]
# Abort if there are neither GT nor pred boxes.
gt_ids = [gg.tracking_id for gg in frame_gt]
pred_ids = [tt.tracking_id for tt in frame_pred]
if len(gt_ids) == 0 and len(pred_ids) == 0:
continue
# Calculate distances.
# Note that the distance function is hard-coded to achieve significant speedups via vectorization.
assert self.cfg.dist_fcn_callable.__name__ == 'center_distance'
if len(frame_gt) == 0 or len(frame_pred) == 0:
distances = np.ones((0, 0))
else:
gt_boxes = np.array([b.translation[:2] for b in frame_gt])
pred_boxes = np.array([b.translation[:2] for b in frame_pred])
distances = sklearn.metrics.pairwise.euclidean_distances(gt_boxes, pred_boxes)
# Distances that are larger than the threshold won't be associated.
assert len(distances) == 0 or not np.all(np.isnan(distances))
distances[distances >= self.cfg.dist_th_tp] = np.nan
# Accumulate results.
# Note that we cannot use timestamp as frameid as motmetrics assumes it's an integer.
acc.update(gt_ids, pred_ids, distances, frameid=frame_id)
# Store scores of matches, which are used to determine recall thresholds.
if threshold is not None:
events = acc.events.loc[frame_id]
matches = events[events.Type == 'MATCH']
match_ids = matches.HId.values
match_scores = [tt.tracking_score for tt in frame_pred if tt.tracking_id in match_ids]
scores.extend(match_scores)
else:
events = None
# Render the boxes in this frame.
if curr_class_name in self.render_classes and threshold is not None and scene_num_id < scene_id_thr:
# load lidar points data按每帧加载
#https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/scripts/export_kitti.py
try:
frame0 = frame_pred[0]
except:
frame0 = scene_tracks_gt[timestamp][0]
sample = self.nusc.get('sample', frame0.sample_token) #frame_pred是该帧所有的物体
#sample_annotation_tokens = sample['anns'] #标注
#cam_front_token = sample['data'][cam_name]#某点位的图像
lidar_token = sample['data'][lidar_name]
# Retrieve sensor records.
#sd_record_cam = self.nusc.get('sample_data', cam_front_token)
sd_record_lid = self.nusc.get('sample_data', lidar_token)
cs_record = self.nusc.get('calibrated_sensor', sd_record_lid["calibrated_sensor_token"])
pose_record = self.nusc.get('ego_pose', sd_record_lid["ego_pose_token"])
#cs_record_cam = self.nusc.get('calibrated_sensor', sd_record_cam['calibrated_sensor_token'])
#cs_record_lid = self.nusc.get('calibrated_sensor', sd_record_lid['calibrated_sensor_token'])
# Retrieve the token from the lidar.
# Note that this may be confusing as the filename of the camera will include the timestamp of the lidar,
# not the camera.
#filename_cam_full = sd_record_cam['filename']
filename_lid_full = sd_record_lid['filename']
src_lid_path = os.path.join(datset_path, filename_lid_full)
points = LidarPointCloud.from_file(src_lid_path)
if scene_id == "4efbf4c0b77f467385fc2e19da45c989": #or lidar_token == "16be583c31a2403caa6c158bb55ae616":#选择特定帧 上面要设成150个场景
renderer.render(events, timestamp, frame_gt, frame_pred, points, pose_record, cs_record, ifplotgt, threshold, ifpltsco, outscen_class, nusc= self.nusc, ifplthis=ifplthis)
# Increment the frame_id, unless there are no boxes (equivalent to what motmetrics does).
frame_id += 1
scene_num_id += 1
accs.append(acc)
print("visually have done!")
if outscen_class and renderer is not None:
print('trk_ratio:',TrackingRenderer.trk_ratio,'\n', 'fp_disrange:',TrackingRenderer.fp_disrange, '\n', 'fp_verange:', TrackingRenderer.fp_verange, '\n', 'fp_ptsnumrange:',TrackingRenderer.fp_ptsnumrange, '\n', \
'fpscorrange', TrackingRenderer.fpscorrange, '\n', 'gt_ratio', TrackingRenderer.gt_ratio, '\n', 'vis_ratio', TrackingRenderer.vis_ratio, '\n', 'fn_disrange', TrackingRenderer.fn_disrange, '\n',\
'fn_verange', TrackingRenderer.fn_verange, '\n', 'fn_ptsnumrange', TrackingRenderer.fn_ptsnumrange, '\n', 'ids_verange', TrackingRenderer.ids_verange, '\n', 'ids_factratio', TrackingRenderer.ids_factratio, '\n',\
'gt_ptsnumrange', TrackingRenderer.gt_ptsnumrange, 'at least fault_datas', TrackingRenderer.fault_datas )
# Accumulate track data.
#acc, _ = self.accumulate_threshold(threshold)
#accumulators.append(acc)
# # Compute metrics for current threshold.
# thresh_name = self.name_gen(threshold)
# thresh_summary = mh.compute(acc, metrics=MOT_METRIC_MAP.keys(), name=thresh_name)
# thresh_metrics.append(thresh_summary)
# # Print metrics to stdout.
# if self.verbose:
# print_threshold_metrics(thresh_summary.to_dict())
for class_name in self.cfg.class_names:
accumulate_class(class_name)
def main(self, render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param render_curves: Whether to render PR and TP curves to disk.
:return: The serialized TrackingMetrics computed during evaluation.
"""
# Run evaluation.
metrics, metric_data_list = self.evaluate()
# Dump the metric data, meta and metrics to disk.
if self.verbose:
print('Saving metrics to: %s' % self.output_dir)
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
# Print metrics to stdout.
if self.verbose:
print_final_metrics(metrics)
# Render curves.
if render_curves:
self.render(metric_data_list)
return metrics_summary
if __name__ == "__main__":
# Settings.
parser = argparse.ArgumentParser(description='Evaluate nuScenes tracking results.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--result_path', type=str, default="%s"%(result_file) , help='The submission as a JSON file.')
parser.add_argument('--output_dir', type=str, default=save_path,
help='Folder to store result metrics, graphs and example visualizations.')
parser.add_argument('--eval_set', type=str, default='val',
help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--dataroot', type=str, default=datset_path,
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval',
help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--config_path', type=str, default='',
help='Path to the configuration file.'
'If no path given, the NIPS 2019 configuration will be used.')
parser.add_argument('--render_curves', type=int, default=1,
help='Whether to render statistic curves to disk.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
parser.add_argument('--render_classes', type=str, default=render_classes, nargs='+',
help='For which classes we render tracking results to disk.')
args = parser.parse_args()
result_path_ = os.path.expanduser(args.result_path)
output_dir_ = os.path.expanduser(args.output_dir)
eval_set_ = args.eval_set
dataroot_ = args.dataroot
version_ = args.version
config_path = args.config_path
render_curves_ = bool(args.render_curves)
verbose_ = bool(args.verbose)
render_classes_ = args.render_classes
if config_path == '':
cfg_ = config_factory('tracking_nips_2019')
else:
with open(config_path, 'r') as _f:
cfg_ = TrackingConfig.deserialize(json.load(_f))
nusc_eval = TrackingEval(config=cfg_, result_path=result_path_, eval_set=eval_set_, output_dir=output_dir_,
nusc_version=version_, nusc_dataroot=dataroot_, verbose=verbose_,
render_classes=render_classes_)
nusc_eval.main(render_curves=render_curves_)
|
# nuScenes dev-kit.
# Code written by <NAME>, <NAME> and <NAME>, 2019.
import trk_render as render
import argparse
import json
import os
import time
from typing import Tuple, List, Dict, Any, Callable, Tuple
import tqdm
import sklearn
import numpy as np
import unittest
try:
import pandas
except ModuleNotFoundError:
raise unittest.SkipTest('Skipping test as pandas was not found!')
from nuscenes.nuscenes import NuScenes
from nuscenes.eval.common.config import config_factory
from nuscenes.eval.common.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes
from nuscenes.eval.tracking.algo import TrackingEvaluation
from nuscenes.eval.tracking.constants import AVG_METRIC_MAP, MOT_METRIC_MAP, LEGACY_METRICS
from nuscenes.eval.tracking.data_classes import TrackingMetrics, TrackingMetricDataList, TrackingConfig, TrackingBox, \
TrackingMetricData
from nuscenes.eval.tracking.loaders import create_tracks
#from nuscenes.eval.tracking.render import TrackingRenderer, recall_metric_curve, summary_plot
from trk_render import TrackingRenderer, recall_metric_curve, summary_plot
from nuscenes.eval.tracking.utils import print_final_metrics
from nuscenes.eval.tracking.mot import MOTAccumulatorCustom
from nuscenes.utils.data_classes import LidarPointCloud#读取点云
save_path = "./visresult/cp_vis_zs" #保存路径及名称
datset_path = "/share/nuscenes/v1.0-trainval/" #数据集路径
#result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_pp_multihead/pp_neiborcbam_iouglogred/eval/epoch_20/val/default/final_result/data/trk_results/trk_results_nusc.json"
#result_file = "/share/lycode/work_dirs/cp_imgdefroi_reid/cp_imgdefroi_reid_mgpu1/cp_ve_epo20_GT/trk_results/trk_results_nusc.json"
#result_file = "/share/lycode/nuscenes-devkit/python-sdk/nuscenes/eval/tracking/trk_results/tracking_result_filter.json"
result_file = "/share/lycode/CenterPoint/work_dirs/voxel/trk/tracking_result.json"
#result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_second_multihead/sec_neiborcbamreid3_marepre/eval/epoch_20/val/default/final_result/data/trk_results/trk_results_nusc.json" #结果文件
#result_file = "/share/nuscenes/OpenPCDet/output/cfgs/nuscenes_models/pointrcnn_2/default/eval/eval_with_train/epoch_20/val/final_result/data/results_nusc.json" #结果文件
#result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_second_multihead/default/eval/epoch_6229/val/default/final_result/data/\
#trk_results/trk_results_nusc.json"
#info_path = datset_path + "/nuscenes_infos_10sweeps_val.pkl"
render_classes = ["car"] #class_names = ['car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier','motorcycle', 'bicycle', 'pedestrian', 'traffic_cone']
frame_id_thr = 30 #no use
ifplotgt = False
ifpltsco = False #是否画score和分数
ifdetection = False #wei实现
outscen_class = False #是否输出分类FP/FN情况 单个类进行测试 仅可car 和 pedestrian
ifplthis = False #是否画历史轨迹坐标点
scene_id_thr = 150 #共150个场景,每个场景约40帧 仅画前 scene_id_thr 个场景
lidar_name = 'LIDAR_TOP'
cam_name = 'CAM_FRONT'
class TrackingEval:
"""
This is the official nuScenes tracking evaluation code.
Results are written to the provided output_dir.
Here is an overview of the functions in this method:
- init: Loads GT annotations and predictions stored in JSON format and filters the boxes.
- run: Performs evaluation and dumps the metric data to disk.
- render: Renders various plots and dumps to disk.
We assume that:
- Every sample_token is given in the results, although there may be not predictions for that sample.
Please see https://www.nuscenes.org/tracking for more details.
"""
def __init__(self,
config: TrackingConfig,
result_path: str,
eval_set: str,
output_dir: str,
nusc_version: str,
nusc_dataroot: str,
verbose: bool = True,
render_classes: List[str] = None):
"""
Initialize a TrackingEval object.
:param config: A TrackingConfig object.
:param result_path: Path of the nuScenes JSON result file.
:param eval_set: The dataset split to evaluate on, e.g. train, val or test.
:param output_dir: Folder to save plots and results to.
:param nusc_version: The version of the NuScenes dataset.
:param nusc_dataroot: Path of the nuScenes dataset on disk.
:param verbose: Whether to print to stdout.
:param render_classes: Classes to render to disk or None.
"""
self.cfg = config
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.render_classes = render_classes
# Check result file exists.
assert os.path.exists(result_path), 'Error: The result file does not exist!'
# Make dirs.
self.plot_dir = os.path.join(self.output_dir, 'plots')
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.isdir(self.plot_dir):
os.makedirs(self.plot_dir)
# Initialize NuScenes object.
# We do not store it in self to let garbage collection take care of it and save memory.
nusc = NuScenes(version=nusc_version, verbose=verbose, dataroot=nusc_dataroot)
self.nusc = nusc
# Load data.
if verbose:
print('Initializing nuScenes tracking evaluation')
pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, TrackingBox,
verbose=verbose)
gt_boxes = load_gt(nusc, self.eval_set, TrackingBox, verbose=verbose)
assert set(pred_boxes.sample_tokens) == set(gt_boxes.sample_tokens), \
"Samples in split don't match samples in predicted tracks."
# Add center distances.
pred_boxes = add_center_dist(nusc, pred_boxes)
gt_boxes = add_center_dist(nusc, gt_boxes)
# Filter boxes (distance, points per box, etc.).
if verbose:
print('Filtering tracks')
pred_boxes = filter_eval_boxes(nusc, pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth tracks')
gt_boxes = filter_eval_boxes(nusc, gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = gt_boxes.sample_tokens #len():6019
# Convert boxes to tracks format.
self.tracks_gt = create_tracks(gt_boxes, nusc, self.eval_set, gt=True)
self.tracks_pred = create_tracks(pred_boxes, nusc, self.eval_set, gt=False)
def evaluate(self) -> Tuple[TrackingMetrics, TrackingMetricDataList]:
"""
Performs the actual evaluation.
:return: A tuple of high-level and the raw metric data.
"""
start_time = time.time()
metrics = TrackingMetrics(self.cfg)
# -----------------------------------
# Step 1: Accumulate metric data for all classes and distance thresholds.
# -----------------------------------
if self.verbose:
print('Accumulating metric data...')
metric_data_list = TrackingMetricDataList()
def init_outscen():
print('init outscen for FP/FN....')
outscen = render_classes[0] # 'car' 'ped'
TrackingRenderer.outscen = outscen
TrackingRenderer.his_track = []
TrackingRenderer.his_trackid = []
TrackingRenderer.fp_disrange = {'<15m':0, '15-30m':0, '30-45m':0, '45-54m':0, '-1': 0}
if outscen == 'car':
TrackingRenderer.fp_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 } #car 绝对速度
TrackingRenderer.fp_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 } #car lidar点云数
TrackingRenderer.trk_ratio = {'angvar>30':0, '30>angvar>20':0, '20>angvar>10':0, '10>angvar':0, 'vevari>1.5':0, '1.0<vevari<1.5':0, '0.5<vevari<1.0':0, 'vevari<0.5':0} #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
else:
TrackingRenderer.fp_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 } #ped 绝对速度
TrackingRenderer.fp_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 } #ped lidar点云数
TrackingRenderer.trk_ratio = {'angvar>30':0, '30>angvar>20':0, '20>angvar>10':0, '10>angvar':0, 'vevari>0.5':0, '0.3<vevari<0.5':0, '0.1<vevari<0.3':0, 'vevari<0.1':0} #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
TrackingRenderer.fpscorrange = {'0-0.1':0, '0.2-0.4':0, '0.4-0.6':0,'0.6-1.0':0}
TrackingRenderer.vis_ratio = {'0-0.4':0, '0.4-0.6':0, '0.6-0.8':0, '0.8-1.0':0} #相机视角 0-40%, 40-60%, 60-80% and 80-100% The visibility of an instance is the fraction of annotation visible in all 6 images.
TrackingRenderer.fn_disrange = {'<15m':0, '15-30m':0, '30-45m':0, '45-54m':0, '-1':0}
if outscen == 'car':
TrackingRenderer.fn_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 } #car 绝对速度
TrackingRenderer.fn_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0, '-1':0 } #car lidar点云数 抽样参考比例:0.21 0.23 0.26,0.2,0.1
TrackingRenderer.gt_ratio = {'angvar>30':0, '30>angvar>20':0, '20>angvar>10':0, '10>angvar':0, 'firfn_trk':0, 'nonfirfn_trk':0, 'vevari>1.5':0, '1.0<vevari<1.5':0, '0.5<vevari<1.0':0, 'vevari<0.5':0} #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
else:
TrackingRenderer.fn_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 } #ped 绝对速度
TrackingRenderer.fn_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0, '-1':0 } #ped lidar点云数
TrackingRenderer.gt_ratio = {'angvar>30':0, '30>angvar>20':0, '20>angvar>10':0, '10>angvar':0, 'firfn_trk':0, 'nonfirfn_trk':0, 'vevari>0.5':0, '0.3<vevari<0.5':0, '0.1<vevari<0.3':0, 'vevari<0.1':0} #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并
if outscen == 'car':
TrackingRenderer.ids_verange = {'0-0.1m/s':0, '0.1-2.5m/s':0, '2.5-5m/s':0, '5-10m/s':0, '>10m/s': 0 } #car 绝对速度
TrackingRenderer.mutave_thr = [1.5, 1.0, 0.5] # car均匀加速度为2.778m/s^2 3*0.5s=1.5 #可能有点小
TrackingRenderer.ids_factratio = {'delay_trk':0, 'del_oth_trk':0, 'reappear':0, 'reapother':0, 'reapdeltrk':0 ,'vevari>1.5':0, '1.0<vevari<1.5':0, '0.5<vevari<1.0':0, 'vevari<0.5':0}
else:
TrackingRenderer.ids_verange = {'0-0.1m/s':0, '0.1-1.0m/s':0, '1.0-1.5m/s':0, '1.5-2m/s':0, '>2m/s': 0 } #ped 绝对速度
TrackingRenderer.mutave_thr = [0.5, 0.3, 0.1] # ped
TrackingRenderer.ids_factratio = {'delay_trk':0, 'del_oth_trk':0, 'reappear':0, 'reapother':0, 'reapdeltrk':0,'vevari>0.5':0, '0.3<vevari<0.5':0, '0.1<vevari<0.3':0, 'vevari<0.1':0 }
if outscen == 'car':
TrackingRenderer.gt_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-50nums':0, '50-200nums':0, '>200nums': 0 } #car lidar点云数
else:
TrackingRenderer.gt_ptsnumrange = {'0-5nums':0, '5-10nums':0, '10-20nums':0, '20-30nums':0, '>30nums': 0 } #ped lidar点云数
TrackingRenderer.fault_datas = 0
def accumulate_class(curr_class_name):
# curr_ev = TrackingEvaluation(self.tracks_gt, self.tracks_pred, curr_class_name, self.cfg.dist_fcn_callable,\
# self.cfg.dist_th_tp, self.cfg.min_recall,\
# num_thresholds=TrackingMetricData.nelem,\
# metric_worst=self.cfg.metric_worst,\
# verbose=self.verbose,\
# output_dir=self.output_dir,\
# render_classes=self.render_classes)
#curr_md = curr_ev.accumulate()
"""
Compute metrics for all recall thresholds of the current class.
:return: TrackingMetricData instance which holds the metrics for each threshold.
"""
# Init.
if self.verbose:
print('Computing metrics for class %s...\n' % curr_class_name)
accumulators = []
thresh_metrics = []
init_outscen()
#md = TrackingMetricData()
# Skip missing classes.
gt_box_count = 0
gt_track_ids = set()
for scene_tracks_gt in self.tracks_gt.values():
for frame_gt in scene_tracks_gt.values():
for box in frame_gt:
if box.tracking_name == curr_class_name:
gt_box_count += 1
gt_track_ids.add(box.tracking_id)
if gt_box_count == 0:
print("gtboxcount=0")
# Do not add any metric. The average metrics will then be nan.
#return md
# Register mot metrics.
#mh = create_motmetrics()
# Get thresholds.
# Note: The recall values are the hypothetical recall (10%, 20%, ..).
# The actual recall may vary as there is no way to compute it without trying all thresholds.
thresholds = np.array([0.1]) #, recalls = self.compute_thresholds(gt_box_count)
#md.confidence = thresholds
#md.recall_hypo = recalls
if self.verbose:
print('Computed thresholds\n')
for t, threshold in enumerate(thresholds):
# If recall threshold is not achieved, we assign the worst possible value in AMOTA and AMOTP.
if np.isnan(threshold):
continue
# Do not compute the same threshold twice.
# This becomes relevant when a user submits many boxes with the exact same score.
if threshold in thresholds[:t]:
continue
"""
Accumulate metrics for a particular recall threshold of the current class.
The scores are only computed if threshold is set to None. This is used to infer the recall thresholds.
:param threshold: score threshold used to determine positives and negatives.
:return: (The MOTAccumulator that stores all the hits/misses/etc, Scores for each TP).
"""
accs = []
scores = [] # The scores of the TPs. These are used to determine the recall thresholds initially.
# Go through all frames and associate ground truth and tracker results.
# Groundtruth and tracker contain lists for every single frame containing lists detections.
tracks_gt = self.tracks_gt
scene_num_id = 0
sum_fp = 0
sum_fn = 0
for scene_id in tqdm.tqdm(list(tracks_gt.keys()), disable=not self.verbose, leave=False):#按场景
# Initialize accumulator and frame_id for this scene
acc = MOTAccumulatorCustom()
frame_id = 0 # Frame ids must be unique across all scenes
# Retrieve GT and preds.
scene_tracks_gt = tracks_gt[scene_id]
scene_tracks_pred = self.tracks_pred[scene_id]
# if len(tracks_gt) == 151:
# tracks_gt.pop('0')
# Visualize the boxes in this frame.
if curr_class_name in self.render_classes and threshold is not None and scene_num_id < scene_id_thr:
save_path = os.path.join(self.output_dir, 'render', str(scene_id), curr_class_name)
os.makedirs(save_path, exist_ok=True)
renderer = TrackingRenderer(save_path)
else:
renderer = None
for timestamp in scene_tracks_gt.keys(): #每个场景分别每帧
# Select only the current class.
frame_gt = scene_tracks_gt[timestamp]
frame_pred = scene_tracks_pred[timestamp]
frame_gt = [f for f in frame_gt if f.tracking_name == curr_class_name]
frame_pred = [f for f in frame_pred if f.tracking_name == curr_class_name]
# Threshold boxes by score. Note that the scores were previously averaged over the whole track.
if threshold is not None:
frame_pred = [f for f in frame_pred if f.tracking_score >= threshold]
# Abort if there are neither GT nor pred boxes.
gt_ids = [gg.tracking_id for gg in frame_gt]
pred_ids = [tt.tracking_id for tt in frame_pred]
if len(gt_ids) == 0 and len(pred_ids) == 0:
continue
# Calculate distances.
# Note that the distance function is hard-coded to achieve significant speedups via vectorization.
assert self.cfg.dist_fcn_callable.__name__ == 'center_distance'
if len(frame_gt) == 0 or len(frame_pred) == 0:
distances = np.ones((0, 0))
else:
gt_boxes = np.array([b.translation[:2] for b in frame_gt])
pred_boxes = np.array([b.translation[:2] for b in frame_pred])
distances = sklearn.metrics.pairwise.euclidean_distances(gt_boxes, pred_boxes)
# Distances that are larger than the threshold won't be associated.
assert len(distances) == 0 or not np.all(np.isnan(distances))
distances[distances >= self.cfg.dist_th_tp] = np.nan
# Accumulate results.
# Note that we cannot use timestamp as frameid as motmetrics assumes it's an integer.
acc.update(gt_ids, pred_ids, distances, frameid=frame_id)
# Store scores of matches, which are used to determine recall thresholds.
if threshold is not None:
events = acc.events.loc[frame_id]
matches = events[events.Type == 'MATCH']
match_ids = matches.HId.values
match_scores = [tt.tracking_score for tt in frame_pred if tt.tracking_id in match_ids]
scores.extend(match_scores)
else:
events = None
# Render the boxes in this frame.
if curr_class_name in self.render_classes and threshold is not None and scene_num_id < scene_id_thr:
# load lidar points data按每帧加载
#https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/scripts/export_kitti.py
try:
frame0 = frame_pred[0]
except:
frame0 = scene_tracks_gt[timestamp][0]
sample = self.nusc.get('sample', frame0.sample_token) #frame_pred是该帧所有的物体
#sample_annotation_tokens = sample['anns'] #标注
#cam_front_token = sample['data'][cam_name]#某点位的图像
lidar_token = sample['data'][lidar_name]
# Retrieve sensor records.
#sd_record_cam = self.nusc.get('sample_data', cam_front_token)
sd_record_lid = self.nusc.get('sample_data', lidar_token)
cs_record = self.nusc.get('calibrated_sensor', sd_record_lid["calibrated_sensor_token"])
pose_record = self.nusc.get('ego_pose', sd_record_lid["ego_pose_token"])
#cs_record_cam = self.nusc.get('calibrated_sensor', sd_record_cam['calibrated_sensor_token'])
#cs_record_lid = self.nusc.get('calibrated_sensor', sd_record_lid['calibrated_sensor_token'])
# Retrieve the token from the lidar.
# Note that this may be confusing as the filename of the camera will include the timestamp of the lidar,
# not the camera.
#filename_cam_full = sd_record_cam['filename']
filename_lid_full = sd_record_lid['filename']
src_lid_path = os.path.join(datset_path, filename_lid_full)
points = LidarPointCloud.from_file(src_lid_path)
if scene_id == "4efbf4c0b77f467385fc2e19da45c989": #or lidar_token == "16be583c31a2403caa6c158bb55ae616":#选择特定帧 上面要设成150个场景
renderer.render(events, timestamp, frame_gt, frame_pred, points, pose_record, cs_record, ifplotgt, threshold, ifpltsco, outscen_class, nusc= self.nusc, ifplthis=ifplthis)
# Increment the frame_id, unless there are no boxes (equivalent to what motmetrics does).
frame_id += 1
scene_num_id += 1
accs.append(acc)
print("visually have done!")
if outscen_class and renderer is not None:
print('trk_ratio:',TrackingRenderer.trk_ratio,'\n', 'fp_disrange:',TrackingRenderer.fp_disrange, '\n', 'fp_verange:', TrackingRenderer.fp_verange, '\n', 'fp_ptsnumrange:',TrackingRenderer.fp_ptsnumrange, '\n', \
'fpscorrange', TrackingRenderer.fpscorrange, '\n', 'gt_ratio', TrackingRenderer.gt_ratio, '\n', 'vis_ratio', TrackingRenderer.vis_ratio, '\n', 'fn_disrange', TrackingRenderer.fn_disrange, '\n',\
'fn_verange', TrackingRenderer.fn_verange, '\n', 'fn_ptsnumrange', TrackingRenderer.fn_ptsnumrange, '\n', 'ids_verange', TrackingRenderer.ids_verange, '\n', 'ids_factratio', TrackingRenderer.ids_factratio, '\n',\
'gt_ptsnumrange', TrackingRenderer.gt_ptsnumrange, 'at least fault_datas', TrackingRenderer.fault_datas )
# Accumulate track data.
#acc, _ = self.accumulate_threshold(threshold)
#accumulators.append(acc)
# # Compute metrics for current threshold.
# thresh_name = self.name_gen(threshold)
# thresh_summary = mh.compute(acc, metrics=MOT_METRIC_MAP.keys(), name=thresh_name)
# thresh_metrics.append(thresh_summary)
# # Print metrics to stdout.
# if self.verbose:
# print_threshold_metrics(thresh_summary.to_dict())
for class_name in self.cfg.class_names:
accumulate_class(class_name)
def main(self, render_curves: bool = True) -> Dict[str, Any]:
"""
Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.
:param render_curves: Whether to render PR and TP curves to disk.
:return: The serialized TrackingMetrics computed during evaluation.
"""
# Run evaluation.
metrics, metric_data_list = self.evaluate()
# Dump the metric data, meta and metrics to disk.
if self.verbose:
print('Saving metrics to: %s' % self.output_dir)
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
# Print metrics to stdout.
if self.verbose:
print_final_metrics(metrics)
# Render curves.
if render_curves:
self.render(metric_data_list)
return metrics_summary
if __name__ == "__main__":
# Settings.
parser = argparse.ArgumentParser(description='Evaluate nuScenes tracking results.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--result_path', type=str, default="%s"%(result_file) , help='The submission as a JSON file.')
parser.add_argument('--output_dir', type=str, default=save_path,
help='Folder to store result metrics, graphs and example visualizations.')
parser.add_argument('--eval_set', type=str, default='val',
help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--dataroot', type=str, default=datset_path,
help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval',
help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--config_path', type=str, default='',
help='Path to the configuration file.'
'If no path given, the NIPS 2019 configuration will be used.')
parser.add_argument('--render_curves', type=int, default=1,
help='Whether to render statistic curves to disk.')
parser.add_argument('--verbose', type=int, default=1,
help='Whether to print to stdout.')
parser.add_argument('--render_classes', type=str, default=render_classes, nargs='+',
help='For which classes we render tracking results to disk.')
args = parser.parse_args()
result_path_ = os.path.expanduser(args.result_path)
output_dir_ = os.path.expanduser(args.output_dir)
eval_set_ = args.eval_set
dataroot_ = args.dataroot
version_ = args.version
config_path = args.config_path
render_curves_ = bool(args.render_curves)
verbose_ = bool(args.verbose)
render_classes_ = args.render_classes
if config_path == '':
cfg_ = config_factory('tracking_nips_2019')
else:
with open(config_path, 'r') as _f:
cfg_ = TrackingConfig.deserialize(json.load(_f))
nusc_eval = TrackingEval(config=cfg_, result_path=result_path_, eval_set=eval_set_, output_dir=output_dir_,
nusc_version=version_, nusc_dataroot=dataroot_, verbose=verbose_,
render_classes=render_classes_)
nusc_eval.main(render_curves=render_curves_)
| en | 0.632552 | # nuScenes dev-kit. # Code written by <NAME>, <NAME> and <NAME>, 2019. #from nuscenes.eval.tracking.render import TrackingRenderer, recall_metric_curve, summary_plot #读取点云 #保存路径及名称 #数据集路径 #result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_pp_multihead/pp_neiborcbam_iouglogred/eval/epoch_20/val/default/final_result/data/trk_results/trk_results_nusc.json" #result_file = "/share/lycode/work_dirs/cp_imgdefroi_reid/cp_imgdefroi_reid_mgpu1/cp_ve_epo20_GT/trk_results/trk_results_nusc.json" #result_file = "/share/lycode/nuscenes-devkit/python-sdk/nuscenes/eval/tracking/trk_results/tracking_result_filter.json" #result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_second_multihead/sec_neiborcbamreid3_marepre/eval/epoch_20/val/default/final_result/data/trk_results/trk_results_nusc.json" #结果文件 #result_file = "/share/nuscenes/OpenPCDet/output/cfgs/nuscenes_models/pointrcnn_2/default/eval/eval_with_train/epoch_20/val/final_result/data/results_nusc.json" #结果文件 #result_file = "/share/OpenPCDet/output/cfgs/nuscenes_models/cbgs_second_multihead/default/eval/epoch_6229/val/default/final_result/data/\ #trk_results/trk_results_nusc.json" #info_path = datset_path + "/nuscenes_infos_10sweeps_val.pkl" #class_names = ['car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier','motorcycle', 'bicycle', 'pedestrian', 'traffic_cone'] #no use #是否画score和分数 #wei实现 #是否输出分类FP/FN情况 单个类进行测试 仅可car 和 pedestrian #是否画历史轨迹坐标点 #共150个场景,每个场景约40帧 仅画前 scene_id_thr 个场景 This is the official nuScenes tracking evaluation code. Results are written to the provided output_dir. Here is an overview of the functions in this method: - init: Loads GT annotations and predictions stored in JSON format and filters the boxes. - run: Performs evaluation and dumps the metric data to disk. - render: Renders various plots and dumps to disk. We assume that: - Every sample_token is given in the results, although there may be not predictions for that sample. Please see https://www.nuscenes.org/tracking for more details. Initialize a TrackingEval object. :param config: A TrackingConfig object. :param result_path: Path of the nuScenes JSON result file. :param eval_set: The dataset split to evaluate on, e.g. train, val or test. :param output_dir: Folder to save plots and results to. :param nusc_version: The version of the NuScenes dataset. :param nusc_dataroot: Path of the nuScenes dataset on disk. :param verbose: Whether to print to stdout. :param render_classes: Classes to render to disk or None. # Check result file exists. # Make dirs. # Initialize NuScenes object. # We do not store it in self to let garbage collection take care of it and save memory. # Load data. # Add center distances. # Filter boxes (distance, points per box, etc.). #len():6019 # Convert boxes to tracks format. Performs the actual evaluation. :return: A tuple of high-level and the raw metric data. # ----------------------------------- # Step 1: Accumulate metric data for all classes and distance thresholds. # ----------------------------------- # 'car' 'ped' #car 绝对速度 #car lidar点云数 #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并 #ped 绝对速度 #ped lidar点云数 #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并 #相机视角 0-40%, 40-60%, 60-80% and 80-100% The visibility of an instance is the fraction of annotation visible in all 6 images. #car 绝对速度 #car lidar点云数 抽样参考比例:0.21 0.23 0.26,0.2,0.1 #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并 #ped 绝对速度 #ped lidar点云数 #仅包含与上一帧持续追踪的样本,角度和速度突变分别与other相并 #car 绝对速度 # car均匀加速度为2.778m/s^2 3*0.5s=1.5 #可能有点小 #ped 绝对速度 # ped #car lidar点云数 #ped lidar点云数 # curr_ev = TrackingEvaluation(self.tracks_gt, self.tracks_pred, curr_class_name, self.cfg.dist_fcn_callable,\ # self.cfg.dist_th_tp, self.cfg.min_recall,\ # num_thresholds=TrackingMetricData.nelem,\ # metric_worst=self.cfg.metric_worst,\ # verbose=self.verbose,\ # output_dir=self.output_dir,\ # render_classes=self.render_classes) #curr_md = curr_ev.accumulate() Compute metrics for all recall thresholds of the current class. :return: TrackingMetricData instance which holds the metrics for each threshold. # Init. #md = TrackingMetricData() # Skip missing classes. # Do not add any metric. The average metrics will then be nan. #return md # Register mot metrics. #mh = create_motmetrics() # Get thresholds. # Note: The recall values are the hypothetical recall (10%, 20%, ..). # The actual recall may vary as there is no way to compute it without trying all thresholds. #, recalls = self.compute_thresholds(gt_box_count) #md.confidence = thresholds #md.recall_hypo = recalls # If recall threshold is not achieved, we assign the worst possible value in AMOTA and AMOTP. # Do not compute the same threshold twice. # This becomes relevant when a user submits many boxes with the exact same score. Accumulate metrics for a particular recall threshold of the current class. The scores are only computed if threshold is set to None. This is used to infer the recall thresholds. :param threshold: score threshold used to determine positives and negatives. :return: (The MOTAccumulator that stores all the hits/misses/etc, Scores for each TP). # The scores of the TPs. These are used to determine the recall thresholds initially. # Go through all frames and associate ground truth and tracker results. # Groundtruth and tracker contain lists for every single frame containing lists detections. #按场景 # Initialize accumulator and frame_id for this scene # Frame ids must be unique across all scenes # Retrieve GT and preds. # if len(tracks_gt) == 151: # tracks_gt.pop('0') # Visualize the boxes in this frame. #每个场景分别每帧 # Select only the current class. # Threshold boxes by score. Note that the scores were previously averaged over the whole track. # Abort if there are neither GT nor pred boxes. # Calculate distances. # Note that the distance function is hard-coded to achieve significant speedups via vectorization. # Distances that are larger than the threshold won't be associated. # Accumulate results. # Note that we cannot use timestamp as frameid as motmetrics assumes it's an integer. # Store scores of matches, which are used to determine recall thresholds. # Render the boxes in this frame. # load lidar points data按每帧加载 #https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/scripts/export_kitti.py #frame_pred是该帧所有的物体 #sample_annotation_tokens = sample['anns'] #标注 #cam_front_token = sample['data'][cam_name]#某点位的图像 # Retrieve sensor records. #sd_record_cam = self.nusc.get('sample_data', cam_front_token) #cs_record_cam = self.nusc.get('calibrated_sensor', sd_record_cam['calibrated_sensor_token']) #cs_record_lid = self.nusc.get('calibrated_sensor', sd_record_lid['calibrated_sensor_token']) # Retrieve the token from the lidar. # Note that this may be confusing as the filename of the camera will include the timestamp of the lidar, # not the camera. #filename_cam_full = sd_record_cam['filename'] #or lidar_token == "16be583c31a2403caa6c158bb55ae616":#选择特定帧 上面要设成150个场景 # Increment the frame_id, unless there are no boxes (equivalent to what motmetrics does). # Accumulate track data. #acc, _ = self.accumulate_threshold(threshold) #accumulators.append(acc) # # Compute metrics for current threshold. # thresh_name = self.name_gen(threshold) # thresh_summary = mh.compute(acc, metrics=MOT_METRIC_MAP.keys(), name=thresh_name) # thresh_metrics.append(thresh_summary) # # Print metrics to stdout. # if self.verbose: # print_threshold_metrics(thresh_summary.to_dict()) Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots. :param render_curves: Whether to render PR and TP curves to disk. :return: The serialized TrackingMetrics computed during evaluation. # Run evaluation. # Dump the metric data, meta and metrics to disk. # Print metrics to stdout. # Render curves. # Settings. | 1.936044 | 2 |
test/test_isomorphism/test_isomorphism.py | ArkasDev/modri-subdue | 0 | 6615212 | import pickle
import experiment_scripts.compute_components
from experiment_scripts.evaluation import get_position_sorted_list
from experiment_scripts.evaluation import plot_graphs
from termcolor import colored
def test_isomorphism_load_pickle():
print("test_isomorphism_load_pickle")
correct = pickle.load(open("correct_graph_networkx.p", "rb"))
pattern = experiment_scripts.compute_components.convert_node_link_graph_to_nx_graph("correct_graph.json")
score_1 = get_position_sorted_list(correct, [pattern])
plot_graphs([correct], "correct_graph_networkx")
plot_graphs([pattern], "correct_graph")
if score_1 == -1:
print(colored("Error. Output: " + str(score_1), "red"))
else:
print(colored("Passed. Output: " + str(score_1), "green"))
print("--------------------------------------------")
def test_isomorphism_node_link_graphs():
print("test_isomorphism_node_link_graphs")
correct = experiment_scripts.compute_components.convert_node_link_graph_to_nx_graph("correct_graph_isomorphic.json")
pattern = experiment_scripts.compute_components.convert_node_link_graph_to_nx_graph("correct_graph.json")
score_1 = get_position_sorted_list(correct, [pattern])
plot_graphs([correct], "correct_graph_isomorphic")
if score_1 == -1:
print(colored("Error. Output: " + str(score_1), "red"))
else:
print(colored("Passed. Output: " + str(score_1), "green"))
print("--------------------------------------------")
if __name__ == "__main__":
test_isomorphism_load_pickle()
test_isomorphism_node_link_graphs()
| import pickle
import experiment_scripts.compute_components
from experiment_scripts.evaluation import get_position_sorted_list
from experiment_scripts.evaluation import plot_graphs
from termcolor import colored
def test_isomorphism_load_pickle():
print("test_isomorphism_load_pickle")
correct = pickle.load(open("correct_graph_networkx.p", "rb"))
pattern = experiment_scripts.compute_components.convert_node_link_graph_to_nx_graph("correct_graph.json")
score_1 = get_position_sorted_list(correct, [pattern])
plot_graphs([correct], "correct_graph_networkx")
plot_graphs([pattern], "correct_graph")
if score_1 == -1:
print(colored("Error. Output: " + str(score_1), "red"))
else:
print(colored("Passed. Output: " + str(score_1), "green"))
print("--------------------------------------------")
def test_isomorphism_node_link_graphs():
print("test_isomorphism_node_link_graphs")
correct = experiment_scripts.compute_components.convert_node_link_graph_to_nx_graph("correct_graph_isomorphic.json")
pattern = experiment_scripts.compute_components.convert_node_link_graph_to_nx_graph("correct_graph.json")
score_1 = get_position_sorted_list(correct, [pattern])
plot_graphs([correct], "correct_graph_isomorphic")
if score_1 == -1:
print(colored("Error. Output: " + str(score_1), "red"))
else:
print(colored("Passed. Output: " + str(score_1), "green"))
print("--------------------------------------------")
if __name__ == "__main__":
test_isomorphism_load_pickle()
test_isomorphism_node_link_graphs()
| none | 1 | 2.632054 | 3 | |
app.py | ummatias/detector-expressao-facial | 17 | 6615213 | import cv2 as cv
import tensorflow as tf
from tensorflow import keras
from models.Fac_Model import Fac_Model
from data_preprocess import load_data, webcam_img_process
import numpy as np
model = keras.models.load_model('./Fac_Model')
cam = cv.VideoCapture(0)
face_cascade = cv.CascadeClassifier('./util/haarcascade_frontalface_alt2.xml')
labels_map = { 0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral' }
while 1:
ret, img = cam.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
face = face_cascade.detectMultiScale(gray, 1.2, 4)
for x, y, w, h in face:
cv.rectangle(img, (x,y),(x+w,y+h),(255, 239, 0), 2)
gray_face = gray[y:y+h, x:x+w]
proc_img = webcam_img_process(gray_face)
predict = model.predict(proc_img)
label = labels_map[np.argmax(predict[0])]
cv.putText(img, label, (x - 20,y + h + 50), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 239, 0), 3, cv.LINE_AA)
cv.imshow('CAM', img)
if cv.waitKey(1) & 0xff == 27: break
cv.destroyAllWindows()
| import cv2 as cv
import tensorflow as tf
from tensorflow import keras
from models.Fac_Model import Fac_Model
from data_preprocess import load_data, webcam_img_process
import numpy as np
model = keras.models.load_model('./Fac_Model')
cam = cv.VideoCapture(0)
face_cascade = cv.CascadeClassifier('./util/haarcascade_frontalface_alt2.xml')
labels_map = { 0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral' }
while 1:
ret, img = cam.read()
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
face = face_cascade.detectMultiScale(gray, 1.2, 4)
for x, y, w, h in face:
cv.rectangle(img, (x,y),(x+w,y+h),(255, 239, 0), 2)
gray_face = gray[y:y+h, x:x+w]
proc_img = webcam_img_process(gray_face)
predict = model.predict(proc_img)
label = labels_map[np.argmax(predict[0])]
cv.putText(img, label, (x - 20,y + h + 50), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 239, 0), 3, cv.LINE_AA)
cv.imshow('CAM', img)
if cv.waitKey(1) & 0xff == 27: break
cv.destroyAllWindows()
| none | 1 | 2.631264 | 3 | |
scrapers/__init__.py | spyker77/skillhunter | 1 | 6615214 | """Scrapers app is responsible for the functionality of the scrapers.
The main functionality includes:
- keeping the list of skills to identify,
- keeping the job titles to parse on schedule,
- collecting the data about vacancies from job sites,
- saving user's queries from the search field,
- producing the search result for the requested job.
"""
| """Scrapers app is responsible for the functionality of the scrapers.
The main functionality includes:
- keeping the list of skills to identify,
- keeping the job titles to parse on schedule,
- collecting the data about vacancies from job sites,
- saving user's queries from the search field,
- producing the search result for the requested job.
"""
| en | 0.896105 | Scrapers app is responsible for the functionality of the scrapers. The main functionality includes: - keeping the list of skills to identify, - keeping the job titles to parse on schedule, - collecting the data about vacancies from job sites, - saving user's queries from the search field, - producing the search result for the requested job. | 2.295046 | 2 |
src/dbspy/gui/analyze/sw/__init__.py | ZhengKeli/PositronSpector | 1 | 6615215 | <filename>src/dbspy/gui/analyze/sw/__init__.py
from ._sw import Controller
| <filename>src/dbspy/gui/analyze/sw/__init__.py
from ._sw import Controller
| none | 1 | 1.054668 | 1 | |
uni_ticket/migrations/0105_auto_20200520_1059.py | biotech2021/uniTicket | 15 | 6615216 | # Generated by Django 3.0.6 on 2020-05-20 08:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizational_area', '0028_auto_20200505_1149'),
('uni_ticket', '0104_organizationalstructurewsarchipro'),
]
operations = [
migrations.AlterField(
model_name='organizationalstructurewsarchipro',
name='organizational_structure',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='organizational_area.OrganizationalStructure'),
),
]
| # Generated by Django 3.0.6 on 2020-05-20 08:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizational_area', '0028_auto_20200505_1149'),
('uni_ticket', '0104_organizationalstructurewsarchipro'),
]
operations = [
migrations.AlterField(
model_name='organizationalstructurewsarchipro',
name='organizational_structure',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='organizational_area.OrganizationalStructure'),
),
]
| en | 0.786308 | # Generated by Django 3.0.6 on 2020-05-20 08:59 | 1.286646 | 1 |
giraphics/utilities/mathtext.py | tghira16/Giraphics | 1 | 6615217 | <reponame>tghira16/Giraphics<gh_stars>1-10
import os
def math_to_svg(expression, outfile):
command = r"/usr/local/lib/node_modules/mathjax-node-cli/bin/tex2svg '%s' > %s" % (expression, outfile)
os.system(command)
# math_to_svg('e^{i\pi} = -1', os.getcwd()+'/ste.svg')
| import os
def math_to_svg(expression, outfile):
command = r"/usr/local/lib/node_modules/mathjax-node-cli/bin/tex2svg '%s' > %s" % (expression, outfile)
os.system(command)
# math_to_svg('e^{i\pi} = -1', os.getcwd()+'/ste.svg') | fa | 0.122871 | # math_to_svg('e^{i\pi} = -1', os.getcwd()+'/ste.svg') | 2.252517 | 2 |
tests/checks/test_substitutions.py | MarcinOrlowski/prop-tool | 1 | 6615218 | """
# trans-tool
# The translation files checker and syncing tool.
#
# Copyright ©2021 <NAME> <mail [@] <EMAIL>>
# https://github.com/MarcinOrlowski/trans-tool/
#
"""
from typing import Dict, Union, List
from transtool.checks.substitutions import Substitutions
from transtool.decorators.overrides import overrides
from transtool.prop.file import PropFile
from transtool.prop.items import Comment, Translation
from tests.checks.checks_test_case import ChecksTestCase
class SubstitutionsTests(ChecksTestCase):
@overrides(ChecksTestCase)
def get_checker(self, config: Union[Dict, None] = None) -> Substitutions:
return Substitutions(config)
# #################################################################################################
def _get_valid_strings(self) -> List[str]:
return [
'Foo. Bar. All is fin5e!',
'Foo!',
]
def _get_faulty_strings(self) -> List[str]:
return [
'Triple dots...',
'Ough!!!',
]
def test_translation_no_faults(self) -> None:
for test in self._get_valid_strings():
self.check_single_file(Translation('key', test))
def test_empty_translation(self) -> None:
self.check(PropFile(self.config))
# #################################################################################################
def test_comment_no_faults(self) -> None:
for test in self._get_valid_strings():
self.check_single_file(Comment(test))
def test_comment_with_faults(self) -> None:
faults = self._get_faulty_strings()
for fault in faults:
# We should see no issues if comment scanning is disabled.
self.checker.config['comments'] = False
self.check_single_file(Comment(fault))
# And some warnings when comment scanning in enabled.
self.checker.config['comments'] = True
self.check_single_file(Comment(fault), exp_warnings = 1)
# #################################################################################################
def test_fail_with_error_flag(self) -> None:
"""
Ensures FLAG_FAIL_WITH_ERROR flag aborts scanning and returns error while
FLAG_DEFAULT yields warning.
"""
cfg = {
'regexp': r'([\.]{3})',
'replace': '…',
}
self.checker.config['map'] = [cfg]
cfg['flag'] = Substitutions.FLAG_DEFAULT
self.check_single_file(Translation('key', 'Triple dots...'), exp_warnings = 1)
cfg['flag'] = Substitutions.FLAG_FAIL_WITH_ERROR
self.check_single_file(Translation('key', 'Triple dots...'), exp_errors = 1)
| """
# trans-tool
# The translation files checker and syncing tool.
#
# Copyright ©2021 <NAME> <mail [@] <EMAIL>>
# https://github.com/MarcinOrlowski/trans-tool/
#
"""
from typing import Dict, Union, List
from transtool.checks.substitutions import Substitutions
from transtool.decorators.overrides import overrides
from transtool.prop.file import PropFile
from transtool.prop.items import Comment, Translation
from tests.checks.checks_test_case import ChecksTestCase
class SubstitutionsTests(ChecksTestCase):
@overrides(ChecksTestCase)
def get_checker(self, config: Union[Dict, None] = None) -> Substitutions:
return Substitutions(config)
# #################################################################################################
def _get_valid_strings(self) -> List[str]:
return [
'Foo. Bar. All is fin5e!',
'Foo!',
]
def _get_faulty_strings(self) -> List[str]:
return [
'Triple dots...',
'Ough!!!',
]
def test_translation_no_faults(self) -> None:
for test in self._get_valid_strings():
self.check_single_file(Translation('key', test))
def test_empty_translation(self) -> None:
self.check(PropFile(self.config))
# #################################################################################################
def test_comment_no_faults(self) -> None:
for test in self._get_valid_strings():
self.check_single_file(Comment(test))
def test_comment_with_faults(self) -> None:
faults = self._get_faulty_strings()
for fault in faults:
# We should see no issues if comment scanning is disabled.
self.checker.config['comments'] = False
self.check_single_file(Comment(fault))
# And some warnings when comment scanning in enabled.
self.checker.config['comments'] = True
self.check_single_file(Comment(fault), exp_warnings = 1)
# #################################################################################################
def test_fail_with_error_flag(self) -> None:
"""
Ensures FLAG_FAIL_WITH_ERROR flag aborts scanning and returns error while
FLAG_DEFAULT yields warning.
"""
cfg = {
'regexp': r'([\.]{3})',
'replace': '…',
}
self.checker.config['map'] = [cfg]
cfg['flag'] = Substitutions.FLAG_DEFAULT
self.check_single_file(Translation('key', 'Triple dots...'), exp_warnings = 1)
cfg['flag'] = Substitutions.FLAG_FAIL_WITH_ERROR
self.check_single_file(Translation('key', 'Triple dots...'), exp_errors = 1)
| de | 0.506767 | # trans-tool # The translation files checker and syncing tool. # # Copyright ©2021 <NAME> <mail [@] <EMAIL>> # https://github.com/MarcinOrlowski/trans-tool/ # # ################################################################################################# # ################################################################################################# # We should see no issues if comment scanning is disabled. # And some warnings when comment scanning in enabled. # ################################################################################################# Ensures FLAG_FAIL_WITH_ERROR flag aborts scanning and returns error while FLAG_DEFAULT yields warning. | 2.310116 | 2 |
fintopy/prices.py | mbonix/fintopy | 0 | 6615219 | """Accessor to `pandas.Series` for historical series of financial prices.
"""
import numpy as np
import pandas as pd
@pd.api.extensions.register_series_accessor('prices')
class PricesSeriesAccessor:
"""Accessor for historical series of financial prices.
Examples:
>>> s.prices.set_frequency()
>>> s.prices.rebase()
>>> s.prices.log_returns()
>>> s.prices.pct_returns()
>>> s.prices.abs_return()
>>> s.prices.annualized_return()
>>> s.prices.cagr()
>>> s.prices.drawdown()
>>> s.prices.max_drawdown()
"""
def __init__(self, series: pd.Series) -> None:
self._validate(series)
self._series = series.sort_index()
@staticmethod
def _validate(series: pd.Series) -> None:
# Validates the price series.
if series.index.inferred_type != 'datetime64':
raise TypeError('The series index must be a DateTimeIndex.')
if not series.index.is_unique:
raise ValueError('The series index cannot have duplicates.')
if not (series > 0).all():
raise ValueError('The series cannot have negative prices.')
def set_frequency(self, freq: str = 'B', method: str = 'pad') -> pd.Series:
"""Modifies / sets the frequency of the series.
Args:
freq: The frequency of the new series. Typical values could be 'B' (Business Day), 'BW' (alias for 'W-FRI', Business Week), 'BM' (Business Month), 'BQ' (Business Quarter), 'BY' (Business Year), Defaults to 'B' (Business Day).
method: Method fo filling the holes in the reindexed series. Can assume values `None` (fills with NaN), 'pad'/'ffill' (fills with previous value), 'backfill'/'bfill' (fills with next value). Defaults to 'pad'.
Returns:
pd.Series: A series with a modified frequency.
"""
if freq == 'BW':
freq = 'W-FRI'
return self._series.asfreq(freq, method)
def rebase(self, base: int = 100) -> pd.Series:
"""Rebases the series.
Args:
base: The base for the new series. Defaults to 100.
Returns:
pd.Series: The rebased series.
"""
return self._series.divide(self._series.iloc[0]).multiply(base)
def log_returns(self, period: int = 1, dropna: bool = False) -> pd.Series:
"""Calculates logarithmic returns.
Args:
period: The calculation period. Defaults to 1.
dropna: If True, NAs are dropped. Defaults to False.
Returns:
pd.Series: The series of returns.
"""
if dropna:
return self._series.apply(np.log).diff(period).dropna()
else:
return self._series.apply(np.log).diff(period)
def pct_returns(self, period: int = 1, dropna: bool = False) -> pd.Series:
"""Calculates percentage returns.
Args:
period: The calculation period. Defaults to 1.
dropna: If True, NAs are dropped. Defaults to False.
Returns:
pd.Series: The series of returns.
"""
if dropna:
return self._series.pct_change(period).dropna()
else:
return self._series.pct_change(period)
def abs_return(self) -> float:
"""Calculates the absolute return over the series.
Returns:
float: The absolute return over the series.
"""
return self._series.iat[-1] / self._series.iat[0] - 1
def annualized_return(self, base: int = 365) -> float:
"""Calculates an annualized return over the series.
Args:
base: Number of periods in a year. Defaults to 365.
Returns:
float: The annualized return over the series.
"""
return self.abs_return() * base /\
(self._series.index[-1] - self._series.index[0]).days
def cagr(self, base: int = 365) -> float:
"""Calculates a compounded annual growth rate (CAGR).
Args:
base: Number of periods in a year. Defaults to 365.
Returns:
float: The compounded annual growth rate.
"""
return (1 + self.abs_return()) ** (base / (self._series.index[-1] -\
self._series.index[0]).days) - 1
def drawdown(self, negative: bool = False) -> pd.Series:
"""Calculates the drawdown of the series.
Args:
negative: If True, returns a negative series. Defaults to False.
Returns:
pd.Series: The drawdown series.
"""
if negative:
return self._series.cummax().sub(self._series).\
div(self._series.cummax()).mul(-1)
else:
return self._series.cummax().sub(self._series).\
div(self._series.cummax())
def max_drawdown(self, negative=False) -> float:
"""Calculates the max drawdown.
Args:
negative: If True, uses a negative series. Defaults to False.
Returns:
float: The max drawdown.
"""
if negative:
return self.drawdown(negative=True).min()
else:
return self.drawdown().max()
| """Accessor to `pandas.Series` for historical series of financial prices.
"""
import numpy as np
import pandas as pd
@pd.api.extensions.register_series_accessor('prices')
class PricesSeriesAccessor:
"""Accessor for historical series of financial prices.
Examples:
>>> s.prices.set_frequency()
>>> s.prices.rebase()
>>> s.prices.log_returns()
>>> s.prices.pct_returns()
>>> s.prices.abs_return()
>>> s.prices.annualized_return()
>>> s.prices.cagr()
>>> s.prices.drawdown()
>>> s.prices.max_drawdown()
"""
def __init__(self, series: pd.Series) -> None:
self._validate(series)
self._series = series.sort_index()
@staticmethod
def _validate(series: pd.Series) -> None:
# Validates the price series.
if series.index.inferred_type != 'datetime64':
raise TypeError('The series index must be a DateTimeIndex.')
if not series.index.is_unique:
raise ValueError('The series index cannot have duplicates.')
if not (series > 0).all():
raise ValueError('The series cannot have negative prices.')
def set_frequency(self, freq: str = 'B', method: str = 'pad') -> pd.Series:
"""Modifies / sets the frequency of the series.
Args:
freq: The frequency of the new series. Typical values could be 'B' (Business Day), 'BW' (alias for 'W-FRI', Business Week), 'BM' (Business Month), 'BQ' (Business Quarter), 'BY' (Business Year), Defaults to 'B' (Business Day).
method: Method fo filling the holes in the reindexed series. Can assume values `None` (fills with NaN), 'pad'/'ffill' (fills with previous value), 'backfill'/'bfill' (fills with next value). Defaults to 'pad'.
Returns:
pd.Series: A series with a modified frequency.
"""
if freq == 'BW':
freq = 'W-FRI'
return self._series.asfreq(freq, method)
def rebase(self, base: int = 100) -> pd.Series:
"""Rebases the series.
Args:
base: The base for the new series. Defaults to 100.
Returns:
pd.Series: The rebased series.
"""
return self._series.divide(self._series.iloc[0]).multiply(base)
def log_returns(self, period: int = 1, dropna: bool = False) -> pd.Series:
"""Calculates logarithmic returns.
Args:
period: The calculation period. Defaults to 1.
dropna: If True, NAs are dropped. Defaults to False.
Returns:
pd.Series: The series of returns.
"""
if dropna:
return self._series.apply(np.log).diff(period).dropna()
else:
return self._series.apply(np.log).diff(period)
def pct_returns(self, period: int = 1, dropna: bool = False) -> pd.Series:
"""Calculates percentage returns.
Args:
period: The calculation period. Defaults to 1.
dropna: If True, NAs are dropped. Defaults to False.
Returns:
pd.Series: The series of returns.
"""
if dropna:
return self._series.pct_change(period).dropna()
else:
return self._series.pct_change(period)
def abs_return(self) -> float:
"""Calculates the absolute return over the series.
Returns:
float: The absolute return over the series.
"""
return self._series.iat[-1] / self._series.iat[0] - 1
def annualized_return(self, base: int = 365) -> float:
"""Calculates an annualized return over the series.
Args:
base: Number of periods in a year. Defaults to 365.
Returns:
float: The annualized return over the series.
"""
return self.abs_return() * base /\
(self._series.index[-1] - self._series.index[0]).days
def cagr(self, base: int = 365) -> float:
"""Calculates a compounded annual growth rate (CAGR).
Args:
base: Number of periods in a year. Defaults to 365.
Returns:
float: The compounded annual growth rate.
"""
return (1 + self.abs_return()) ** (base / (self._series.index[-1] -\
self._series.index[0]).days) - 1
def drawdown(self, negative: bool = False) -> pd.Series:
"""Calculates the drawdown of the series.
Args:
negative: If True, returns a negative series. Defaults to False.
Returns:
pd.Series: The drawdown series.
"""
if negative:
return self._series.cummax().sub(self._series).\
div(self._series.cummax()).mul(-1)
else:
return self._series.cummax().sub(self._series).\
div(self._series.cummax())
def max_drawdown(self, negative=False) -> float:
"""Calculates the max drawdown.
Args:
negative: If True, uses a negative series. Defaults to False.
Returns:
float: The max drawdown.
"""
if negative:
return self.drawdown(negative=True).min()
else:
return self.drawdown().max()
| en | 0.705973 | Accessor to `pandas.Series` for historical series of financial prices. Accessor for historical series of financial prices. Examples: >>> s.prices.set_frequency() >>> s.prices.rebase() >>> s.prices.log_returns() >>> s.prices.pct_returns() >>> s.prices.abs_return() >>> s.prices.annualized_return() >>> s.prices.cagr() >>> s.prices.drawdown() >>> s.prices.max_drawdown() # Validates the price series. Modifies / sets the frequency of the series. Args: freq: The frequency of the new series. Typical values could be 'B' (Business Day), 'BW' (alias for 'W-FRI', Business Week), 'BM' (Business Month), 'BQ' (Business Quarter), 'BY' (Business Year), Defaults to 'B' (Business Day). method: Method fo filling the holes in the reindexed series. Can assume values `None` (fills with NaN), 'pad'/'ffill' (fills with previous value), 'backfill'/'bfill' (fills with next value). Defaults to 'pad'. Returns: pd.Series: A series with a modified frequency. Rebases the series. Args: base: The base for the new series. Defaults to 100. Returns: pd.Series: The rebased series. Calculates logarithmic returns. Args: period: The calculation period. Defaults to 1. dropna: If True, NAs are dropped. Defaults to False. Returns: pd.Series: The series of returns. Calculates percentage returns. Args: period: The calculation period. Defaults to 1. dropna: If True, NAs are dropped. Defaults to False. Returns: pd.Series: The series of returns. Calculates the absolute return over the series. Returns: float: The absolute return over the series. Calculates an annualized return over the series. Args: base: Number of periods in a year. Defaults to 365. Returns: float: The annualized return over the series. Calculates a compounded annual growth rate (CAGR). Args: base: Number of periods in a year. Defaults to 365. Returns: float: The compounded annual growth rate. Calculates the drawdown of the series. Args: negative: If True, returns a negative series. Defaults to False. Returns: pd.Series: The drawdown series. Calculates the max drawdown. Args: negative: If True, uses a negative series. Defaults to False. Returns: float: The max drawdown. | 3.307087 | 3 |
web/forms/forms.py | mavidser/tardis-web-interface | 0 | 6615220 | #TODO: Added a property for label names in the YAML File, to make the web-app more clear.
from wtforms import validators, fields
from wtforms_tornado import Form
import yaml
import json
from collections import OrderedDict
mandatory = [validators.DataRequired()]
#------------------------------------------------------------------------------
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
config_data = ordered_load(file('config.yml', 'r'))
field_type = {
'quantity': fields.TextField,
'quantity_range_sampled': fields.TextField,
'float': fields.TextField,
'int': fields.TextField,
'string': fields.TextField,
'list': fields.TextField,
'bool': fields.BooleanField,
'container-declaration': fields.SelectField
}
association_dict = {}
#------------------------------------------------------------------------------
def createField(schema,parent='',name=None):
description = {}
validation = []
if 'help' in schema:
description['help_text'] = schema['help']
if 'default' in schema and schema['default']!='None':
description['default'] = schema['default']
else:
description['default'] = ''
if name in association_dict and parent==association_dict[name]['parent']:
try:
description['association']=association_dict[name]['association']
except:
pass
if parent:
description['parent']=parent
if 'mandatory' in schema and schema['mandatory']:
validation = [validators.DataRequired()]
# if 'file' in schema and schema['file']:
# form_field = fields.FileField
if 'allowed_value' in schema:
form_field = fields.SelectField
choices = [(i,i) for i in schema['allowed_value']]
return form_field(choices=choices,validators=validation,description=description)
else:
form_field = field_type[schema['property_type']]
if schema['property_type']=='container-declaration':
choices=[(i,i) for i in schema['containers']]
children = [i for i in schema if (i[0]=='_' or i[0]=='+')]
for i in children:
for j in schema[i]:
if j in association_dict:
association_dict[j]['association'] = association_dict[j]['association']+" "+i[1:]
else:
association_dict[j] = {}
if parent:
association_dict[j]['parent'] = parent
association_dict[j]['association'] = i[1:] + " " +parent+'__'+ name
else:
association_dict[j]['parent'] = parent
association_dict[j]['association'] = i[1:] + " " +name
return form_field(choices=choices,validators=validation,description=description)
return form_field(validators=validation,description=description)
def populate_fields(yml_field,single_item,parent=''):
def run(cls):
global association_dict
if single_item:
setattr(cls, single_item, createField(yml_field))
else:
for i in yml_field:
try:
association = association_dict[i]['association']
except:
association = ''
if i=='property_type' and yml_field[i]=='container-property':
pass
elif 'property_type' in yml_field[i]:
if 'property_type' in yml_field[i] and yml_field[i]['property_type']=='container-property':
if association:
setattr(cls, i, fields.BooleanField(description={'association':association}))
else:
setattr(cls, i, fields.BooleanField())
populate_fields(yml_field[i],single_item,parent=i)(cls)
association_dict = {}
else:
if parent:
setattr(cls, parent+'__'+i, createField(yml_field[i],parent=parent,name=i))
else:
setattr(cls, i, createField(yml_field[i],parent=parent,name=i))
else:
if parent:
setattr(cls, i, fields.BooleanField(description={'association':association,'parent':parent}))
else:
setattr(cls, i, fields.BooleanField(description={'association':association}))
populate_fields(yml_field[i],single_item,parent=i)(cls)
return cls
return run
#------------------------------------------------------------------------------
@populate_fields(config_data['supernova'],single_item=False)
class SupernoveForm(Form):
pass
@populate_fields(config_data['atom_data'],single_item='atom_data')
class AtomForm(Form):
pass
@populate_fields(config_data['plasma'],single_item=False)
class PlasmaForm(Form):
pass
@populate_fields(config_data['model']['abundances'],single_item=False)
class AbundanceForm(Form):
uniform_abundances = fields.TextAreaField(description={'help_text':'Insert Uniform abundances of all the shells, in the format: C: 0.01 O: 0.01 etc...'})
@populate_fields(config_data['model']['structure'],single_item=False)
class StructureForm(Form):
pass
@populate_fields(config_data['montecarlo'],single_item=False)
class MonteCarloForm(Form):
pass
@populate_fields(config_data['spectrum'],single_item='spectrum')
class SpectrumForm(Form):
pass
#------------------------------------------------------------------------------
class TardisForm(Form):
supernova = fields.FormField(SupernoveForm)
atom_data = fields.FormField(AtomForm)
plasma = fields.FormField(PlasmaForm)
structure_model = fields.FormField(StructureForm)
abundances_model = fields.FormField(AbundanceForm)
montecarlo = fields.FormField(MonteCarloForm)
spectrum = fields.FormField(SpectrumForm)
| #TODO: Added a property for label names in the YAML File, to make the web-app more clear.
from wtforms import validators, fields
from wtforms_tornado import Form
import yaml
import json
from collections import OrderedDict
mandatory = [validators.DataRequired()]
#------------------------------------------------------------------------------
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
config_data = ordered_load(file('config.yml', 'r'))
field_type = {
'quantity': fields.TextField,
'quantity_range_sampled': fields.TextField,
'float': fields.TextField,
'int': fields.TextField,
'string': fields.TextField,
'list': fields.TextField,
'bool': fields.BooleanField,
'container-declaration': fields.SelectField
}
association_dict = {}
#------------------------------------------------------------------------------
def createField(schema,parent='',name=None):
description = {}
validation = []
if 'help' in schema:
description['help_text'] = schema['help']
if 'default' in schema and schema['default']!='None':
description['default'] = schema['default']
else:
description['default'] = ''
if name in association_dict and parent==association_dict[name]['parent']:
try:
description['association']=association_dict[name]['association']
except:
pass
if parent:
description['parent']=parent
if 'mandatory' in schema and schema['mandatory']:
validation = [validators.DataRequired()]
# if 'file' in schema and schema['file']:
# form_field = fields.FileField
if 'allowed_value' in schema:
form_field = fields.SelectField
choices = [(i,i) for i in schema['allowed_value']]
return form_field(choices=choices,validators=validation,description=description)
else:
form_field = field_type[schema['property_type']]
if schema['property_type']=='container-declaration':
choices=[(i,i) for i in schema['containers']]
children = [i for i in schema if (i[0]=='_' or i[0]=='+')]
for i in children:
for j in schema[i]:
if j in association_dict:
association_dict[j]['association'] = association_dict[j]['association']+" "+i[1:]
else:
association_dict[j] = {}
if parent:
association_dict[j]['parent'] = parent
association_dict[j]['association'] = i[1:] + " " +parent+'__'+ name
else:
association_dict[j]['parent'] = parent
association_dict[j]['association'] = i[1:] + " " +name
return form_field(choices=choices,validators=validation,description=description)
return form_field(validators=validation,description=description)
def populate_fields(yml_field,single_item,parent=''):
def run(cls):
global association_dict
if single_item:
setattr(cls, single_item, createField(yml_field))
else:
for i in yml_field:
try:
association = association_dict[i]['association']
except:
association = ''
if i=='property_type' and yml_field[i]=='container-property':
pass
elif 'property_type' in yml_field[i]:
if 'property_type' in yml_field[i] and yml_field[i]['property_type']=='container-property':
if association:
setattr(cls, i, fields.BooleanField(description={'association':association}))
else:
setattr(cls, i, fields.BooleanField())
populate_fields(yml_field[i],single_item,parent=i)(cls)
association_dict = {}
else:
if parent:
setattr(cls, parent+'__'+i, createField(yml_field[i],parent=parent,name=i))
else:
setattr(cls, i, createField(yml_field[i],parent=parent,name=i))
else:
if parent:
setattr(cls, i, fields.BooleanField(description={'association':association,'parent':parent}))
else:
setattr(cls, i, fields.BooleanField(description={'association':association}))
populate_fields(yml_field[i],single_item,parent=i)(cls)
return cls
return run
#------------------------------------------------------------------------------
@populate_fields(config_data['supernova'],single_item=False)
class SupernoveForm(Form):
pass
@populate_fields(config_data['atom_data'],single_item='atom_data')
class AtomForm(Form):
pass
@populate_fields(config_data['plasma'],single_item=False)
class PlasmaForm(Form):
pass
@populate_fields(config_data['model']['abundances'],single_item=False)
class AbundanceForm(Form):
uniform_abundances = fields.TextAreaField(description={'help_text':'Insert Uniform abundances of all the shells, in the format: C: 0.01 O: 0.01 etc...'})
@populate_fields(config_data['model']['structure'],single_item=False)
class StructureForm(Form):
pass
@populate_fields(config_data['montecarlo'],single_item=False)
class MonteCarloForm(Form):
pass
@populate_fields(config_data['spectrum'],single_item='spectrum')
class SpectrumForm(Form):
pass
#------------------------------------------------------------------------------
class TardisForm(Form):
supernova = fields.FormField(SupernoveForm)
atom_data = fields.FormField(AtomForm)
plasma = fields.FormField(PlasmaForm)
structure_model = fields.FormField(StructureForm)
abundances_model = fields.FormField(AbundanceForm)
montecarlo = fields.FormField(MonteCarloForm)
spectrum = fields.FormField(SpectrumForm)
| en | 0.160965 | #TODO: Added a property for label names in the YAML File, to make the web-app more clear. #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # if 'file' in schema and schema['file']: # form_field = fields.FileField #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ | 2.044945 | 2 |
cadnano/strand/modscmd.py | mctrinh/cadnano2.5 | 1 | 6615221 | <reponame>mctrinh/cadnano2.5
from cadnano.proxies.cnproxy import UndoCommand
class AddModsCommand(UndoCommand):
def __init__(self, document, strand, idx, mod_id):
super(AddModsCommand, self).__init__()
self._strand = strand
self._id_num = strand.idNum()
self._idx = idx
self._mod_id = mod_id
self.document = document
# end def
def redo(self):
strand = self._strand
mid = self._mod_id
part = strand.part()
idx = self._idx
part.addModStrandInstance(strand, idx, mid)
strand.strandModsAddedSignal.emit(strand, self.document, mid, idx)
# end def
def undo(self):
strand = self._strand
mid = self._mod_id
part = strand.part()
idx = self._idx
part.removeModStrandInstance(strand, idx, mid)
strand.strandModsRemovedSignal.emit(strand, self.document, mid, idx)
# end def
# end class
class RemoveModsCommand(UndoCommand):
def __init__(self, document, strand, idx, mod_id):
super(RemoveModsCommand, self).__init__()
self._strand = strand
self._id_num = strand.idNum()
self._idx = idx
self._mod_id = mod_id
self.document = document
# end def
def redo(self):
strand = self._strand
strand.isStaple()
mid = self._mod_id
part = strand.part()
idx = self._idx
part.removeModStrandInstance(strand, idx, mid)
strand.strandModsRemovedSignal.emit(strand, self.document, mid, idx)
# end def
def undo(self):
strand = self._strand
strand.isStaple()
mid = self._mod_id
part = strand.part()
idx = self._idx
part.addModStrandInstance(strand, idx, mid)
strand.strandModsAddedSignal.emit(strand, self.document, mid, idx)
# end def
# end class
| from cadnano.proxies.cnproxy import UndoCommand
class AddModsCommand(UndoCommand):
def __init__(self, document, strand, idx, mod_id):
super(AddModsCommand, self).__init__()
self._strand = strand
self._id_num = strand.idNum()
self._idx = idx
self._mod_id = mod_id
self.document = document
# end def
def redo(self):
strand = self._strand
mid = self._mod_id
part = strand.part()
idx = self._idx
part.addModStrandInstance(strand, idx, mid)
strand.strandModsAddedSignal.emit(strand, self.document, mid, idx)
# end def
def undo(self):
strand = self._strand
mid = self._mod_id
part = strand.part()
idx = self._idx
part.removeModStrandInstance(strand, idx, mid)
strand.strandModsRemovedSignal.emit(strand, self.document, mid, idx)
# end def
# end class
class RemoveModsCommand(UndoCommand):
def __init__(self, document, strand, idx, mod_id):
super(RemoveModsCommand, self).__init__()
self._strand = strand
self._id_num = strand.idNum()
self._idx = idx
self._mod_id = mod_id
self.document = document
# end def
def redo(self):
strand = self._strand
strand.isStaple()
mid = self._mod_id
part = strand.part()
idx = self._idx
part.removeModStrandInstance(strand, idx, mid)
strand.strandModsRemovedSignal.emit(strand, self.document, mid, idx)
# end def
def undo(self):
strand = self._strand
strand.isStaple()
mid = self._mod_id
part = strand.part()
idx = self._idx
part.addModStrandInstance(strand, idx, mid)
strand.strandModsAddedSignal.emit(strand, self.document, mid, idx)
# end def
# end class | en | 0.421146 | # end def # end def # end def # end class # end def # end def # end def # end class | 2.219426 | 2 |
SourceCode/BarelangFC-Map-Visualization.py | ekorudiawan/BarelangFC-Map-Visualization | 0 | 6615222 | <gh_stars>0
from flask import Flask, render_template, Response, request
from scipy.spatial import distance
from numpy.random import uniform, normal
import time
import socket
import sys
import numpy as np
import scipy.stats
import math
import cv2
import webbrowser
import os
R1_IP = "127.0.0.1"
R2_IP = "127.0.0.1"
R3_IP = "127.0.0.1"
R4_IP = "127.0.0.1"
R5_IP = "127.0.0.1"
R1_IN_PORT = 8000
R2_IN_PORT = 8000
R3_IN_PORT = 8000
R4_IN_PORT = 8000
R5_IN_PORT = 8000
# Configuration in Cm
fieldLength = 900
fieldWidth = 600
mapImage = np.zeros((800,1100,3), np.uint8)
# Variabel posisi robot
robot1Position = np.zeros((3))
robot2Position = np.zeros((3))
robot3Position = np.zeros((3))
robot4Position = np.zeros((3))
robot5Position = np.zeros((3))
ballRobot1Position = np.zeros((2))
ballRobot2Position = np.zeros((2))
ballRobot3Position = np.zeros((2))
ballRobot4Position = np.zeros((2))
ballRobot5Position = np.zeros((2))
ballMeanPosition = np.zeros((2))
deltaTime = 1
robot1Color = (0,127,127)
robot2Color = (0,127,255)
robot3Color = (0,255,127)
robot4Color = (0,255,255)
robot5Color = (255,0,127)
ballColor = (0,0,255)
app = Flask(__name__)
# http://mattrichardson.com/Raspberry-Pi-Flask/
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(main(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def worldCoorToImageCoor(x, y):
x = x + 100
y = 800 - (y + 100)
return x, y
def main():
simulationMode = True
if simulationMode == False:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((R1_IP, R1_IN_PORT))
sock.bind((R2_IP, R2_IN_PORT))
sock.bind((R3_IP, R3_IN_PORT))
sock.bind((R4_IP, R4_IN_PORT))
sock.bind((R5_IP, R5_IN_PORT))
except socket.error:
print 'Failed to create socket'
sys.exit()
# Timing value
nowTime = 0
lastTime = 0
loop = 0
while True:
nowTime = time.clock()
timer = nowTime - lastTime
halfDeltaTime = deltaTime / 2.00
# Update every 0.5 * deltatime
if timer > halfDeltaTime:
lastTime = nowTime
loop += 1
print 'Runtime : {} s'.format(deltaTime*loop)
mapFromFile = False
if mapFromFile == True:
# image tidak clear
mapImage[:] = cv2.imread('mapImage.jpg')
else:
mapImage[:] = (0, 255, 0)
cv2.rectangle(mapImage, (100,100), (1000,700), (255,255,255), 3) # Garis Luar
cv2.rectangle(mapImage, (40,530), (100,270), (255,0,0), 10) # Garis Luar Gawang Kiri
cv2.rectangle(mapImage, (1000,530), (1060,270), (0,0,255), 10) # Garis Luar Gawang Kiri
cv2.rectangle(mapImage, (100,650), (200,150), (255,255,255), 3) # Garis Luar Gawang Kiri
cv2.rectangle(mapImage, (900,650), (1000,150), (255,255,255), 3) # Garis Luar Gawang Kiri
cv2.line(mapImage, (550,100), (550,700), (255,255,255), 3) # Garis Tengah
cv2.circle(mapImage, (550,400), 75, (255,255,255), 3) # Lingkaran Tengah
cv2.circle(mapImage, (310,400), 3, (255,255,255), 5)
cv2.circle(mapImage, (790,400), 3, (255,255,255), 5)
textLine = "(0,0)"
x, y = worldCoorToImageCoor(0,0)
cv2.putText(mapImage, textLine, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "(0,600)"
x, y = worldCoorToImageCoor(0,600)
cv2.putText(mapImage, textLine, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "(900,600)"
x, y = worldCoorToImageCoor(900,600)
cv2.putText(mapImage, textLine, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "(900,0)"
x, y = worldCoorToImageCoor(900,0)
cv2.putText(mapImage, textLine, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
# cv2.imwrite("mapImage.jpg", mapImage)
# break
if simulationMode == True:
robot1Position[0] = uniform(0, fieldLength)
robot1Position[1] = uniform(0, fieldWidth)
robot1Position[2] = uniform(0, 360)
robot2Position[0] = uniform(0, fieldLength)
robot2Position[1] = uniform(0, fieldWidth)
robot2Position[2] = uniform(0, 360)
robot3Position[0] = uniform(0, fieldLength)
robot3Position[1] = uniform(0, fieldWidth)
robot3Position[2] = uniform(0, 360)
robot4Position[0] = uniform(0, fieldLength)
robot4Position[1] = uniform(0, fieldWidth)
robot4Position[2] = uniform(0, 360)
robot5Position[0] = uniform(0, fieldLength)
robot5Position[1] = uniform(0, fieldWidth)
robot5Position[2] = uniform(0, 360)
ballMeanPosition[0] = uniform(0, fieldLength)
ballMeanPosition[1] = uniform(0, fieldWidth)
drawRobot = True
if drawRobot == True:
x, y = worldCoorToImageCoor(int(robot1Position[0]), int(robot1Position[1]))
cv2.circle(mapImage,(x, y), 20, robot1Color, -1)
textLine = "R1"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
x, y = worldCoorToImageCoor(int(robot2Position[0]), int(robot2Position[1]))
cv2.circle(mapImage,(x, y), 20, robot2Color, -1)
textLine = "R2"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
x, y = worldCoorToImageCoor(int(robot3Position[0]), int(robot3Position[1]))
cv2.circle(mapImage,(x, y), 20, robot3Color, -1)
textLine = "R3"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
x, y = worldCoorToImageCoor(int(robot4Position[0]), int(robot4Position[1]))
cv2.circle(mapImage,(x, y), 20, robot4Color, -1)
textLine = "R4"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
x, y = worldCoorToImageCoor(int(robot5Position[0]), int(robot5Position[1]))
cv2.circle(mapImage,(x, y), 20, robot5Color, -1)
textLine = "R5"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
drawBall = True
if drawBall == True:
x, y = worldCoorToImageCoor(int(ballMeanPosition[0]), int(ballMeanPosition[1]))
cv2.circle(mapImage,(x, y), 15, ballColor, -1)
textLine = "B"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
textLine = "R1 Position : ({}, {}, {})".format(int(robot1Position[0]), int(robot1Position[1]), int(robot1Position[2]))
cv2.putText(mapImage, textLine, (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "R2 Position : ({}, {}, {})".format(int(robot2Position[0]), int(robot2Position[1]), int(robot2Position[2]))
cv2.putText(mapImage, textLine, (10,40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "R3 Position : ({}, {}, {})".format(int(robot3Position[0]), int(robot3Position[1]), int(robot3Position[2]))
cv2.putText(mapImage, textLine, (300,20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "R4 Position : ({}, {}, {})".format(int(robot4Position[0]), int(robot4Position[1]), int(robot4Position[2]))
cv2.putText(mapImage, textLine, (300,40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "R5 Position : ({}, {}, {})".format(int(robot5Position[0]), int(robot5Position[1]), int(robot5Position[2]))
cv2.putText(mapImage, textLine, (590,20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "Ball Position : ({}, {})".format(int(ballMeanPosition[0]), int(ballMeanPosition[1]))
cv2.putText(mapImage, textLine, (590,40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
# Enable GUI Streaming
showGUI = True
if showGUI:
cv2.imshow("Barelang FC - Map Visualization", mapImage)
# Enable URL Streaming
streamUrl = False
if streamUrl == True:
smallMapImage = cv2.resize(mapImage, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
cv2.imwrite('stream.jpg', smallMapImage)
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + open('stream.jpg', 'rb').read() + b'\r\n')
if showGUI:
k = cv2.waitKey(1)
if k == ord('x'):
cv2.destroyAllWindows()
break
if __name__ == "__main__":
url = "http://0.0.0.0:9999"
if (os.name == "nt"):
chromedir= 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chromedir).open(url)
else:
webbrowser.get(using='firefox').open_new_tab(url)
app.run(host='0.0.0.0', port=9999, debug=False, threaded=False)
| from flask import Flask, render_template, Response, request
from scipy.spatial import distance
from numpy.random import uniform, normal
import time
import socket
import sys
import numpy as np
import scipy.stats
import math
import cv2
import webbrowser
import os
R1_IP = "127.0.0.1"
R2_IP = "127.0.0.1"
R3_IP = "127.0.0.1"
R4_IP = "127.0.0.1"
R5_IP = "127.0.0.1"
R1_IN_PORT = 8000
R2_IN_PORT = 8000
R3_IN_PORT = 8000
R4_IN_PORT = 8000
R5_IN_PORT = 8000
# Configuration in Cm
fieldLength = 900
fieldWidth = 600
mapImage = np.zeros((800,1100,3), np.uint8)
# Variabel posisi robot
robot1Position = np.zeros((3))
robot2Position = np.zeros((3))
robot3Position = np.zeros((3))
robot4Position = np.zeros((3))
robot5Position = np.zeros((3))
ballRobot1Position = np.zeros((2))
ballRobot2Position = np.zeros((2))
ballRobot3Position = np.zeros((2))
ballRobot4Position = np.zeros((2))
ballRobot5Position = np.zeros((2))
ballMeanPosition = np.zeros((2))
deltaTime = 1
robot1Color = (0,127,127)
robot2Color = (0,127,255)
robot3Color = (0,255,127)
robot4Color = (0,255,255)
robot5Color = (255,0,127)
ballColor = (0,0,255)
app = Flask(__name__)
# http://mattrichardson.com/Raspberry-Pi-Flask/
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(main(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def worldCoorToImageCoor(x, y):
x = x + 100
y = 800 - (y + 100)
return x, y
def main():
simulationMode = True
if simulationMode == False:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((R1_IP, R1_IN_PORT))
sock.bind((R2_IP, R2_IN_PORT))
sock.bind((R3_IP, R3_IN_PORT))
sock.bind((R4_IP, R4_IN_PORT))
sock.bind((R5_IP, R5_IN_PORT))
except socket.error:
print 'Failed to create socket'
sys.exit()
# Timing value
nowTime = 0
lastTime = 0
loop = 0
while True:
nowTime = time.clock()
timer = nowTime - lastTime
halfDeltaTime = deltaTime / 2.00
# Update every 0.5 * deltatime
if timer > halfDeltaTime:
lastTime = nowTime
loop += 1
print 'Runtime : {} s'.format(deltaTime*loop)
mapFromFile = False
if mapFromFile == True:
# image tidak clear
mapImage[:] = cv2.imread('mapImage.jpg')
else:
mapImage[:] = (0, 255, 0)
cv2.rectangle(mapImage, (100,100), (1000,700), (255,255,255), 3) # Garis Luar
cv2.rectangle(mapImage, (40,530), (100,270), (255,0,0), 10) # Garis Luar Gawang Kiri
cv2.rectangle(mapImage, (1000,530), (1060,270), (0,0,255), 10) # Garis Luar Gawang Kiri
cv2.rectangle(mapImage, (100,650), (200,150), (255,255,255), 3) # Garis Luar Gawang Kiri
cv2.rectangle(mapImage, (900,650), (1000,150), (255,255,255), 3) # Garis Luar Gawang Kiri
cv2.line(mapImage, (550,100), (550,700), (255,255,255), 3) # Garis Tengah
cv2.circle(mapImage, (550,400), 75, (255,255,255), 3) # Lingkaran Tengah
cv2.circle(mapImage, (310,400), 3, (255,255,255), 5)
cv2.circle(mapImage, (790,400), 3, (255,255,255), 5)
textLine = "(0,0)"
x, y = worldCoorToImageCoor(0,0)
cv2.putText(mapImage, textLine, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "(0,600)"
x, y = worldCoorToImageCoor(0,600)
cv2.putText(mapImage, textLine, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "(900,600)"
x, y = worldCoorToImageCoor(900,600)
cv2.putText(mapImage, textLine, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "(900,0)"
x, y = worldCoorToImageCoor(900,0)
cv2.putText(mapImage, textLine, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
# cv2.imwrite("mapImage.jpg", mapImage)
# break
if simulationMode == True:
robot1Position[0] = uniform(0, fieldLength)
robot1Position[1] = uniform(0, fieldWidth)
robot1Position[2] = uniform(0, 360)
robot2Position[0] = uniform(0, fieldLength)
robot2Position[1] = uniform(0, fieldWidth)
robot2Position[2] = uniform(0, 360)
robot3Position[0] = uniform(0, fieldLength)
robot3Position[1] = uniform(0, fieldWidth)
robot3Position[2] = uniform(0, 360)
robot4Position[0] = uniform(0, fieldLength)
robot4Position[1] = uniform(0, fieldWidth)
robot4Position[2] = uniform(0, 360)
robot5Position[0] = uniform(0, fieldLength)
robot5Position[1] = uniform(0, fieldWidth)
robot5Position[2] = uniform(0, 360)
ballMeanPosition[0] = uniform(0, fieldLength)
ballMeanPosition[1] = uniform(0, fieldWidth)
drawRobot = True
if drawRobot == True:
x, y = worldCoorToImageCoor(int(robot1Position[0]), int(robot1Position[1]))
cv2.circle(mapImage,(x, y), 20, robot1Color, -1)
textLine = "R1"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
x, y = worldCoorToImageCoor(int(robot2Position[0]), int(robot2Position[1]))
cv2.circle(mapImage,(x, y), 20, robot2Color, -1)
textLine = "R2"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
x, y = worldCoorToImageCoor(int(robot3Position[0]), int(robot3Position[1]))
cv2.circle(mapImage,(x, y), 20, robot3Color, -1)
textLine = "R3"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
x, y = worldCoorToImageCoor(int(robot4Position[0]), int(robot4Position[1]))
cv2.circle(mapImage,(x, y), 20, robot4Color, -1)
textLine = "R4"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
x, y = worldCoorToImageCoor(int(robot5Position[0]), int(robot5Position[1]))
cv2.circle(mapImage,(x, y), 20, robot5Color, -1)
textLine = "R5"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
drawBall = True
if drawBall == True:
x, y = worldCoorToImageCoor(int(ballMeanPosition[0]), int(ballMeanPosition[1]))
cv2.circle(mapImage,(x, y), 15, ballColor, -1)
textLine = "B"
cv2.putText(mapImage, textLine, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 1, cv2.LINE_AA)
textLine = "R1 Position : ({}, {}, {})".format(int(robot1Position[0]), int(robot1Position[1]), int(robot1Position[2]))
cv2.putText(mapImage, textLine, (10,20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "R2 Position : ({}, {}, {})".format(int(robot2Position[0]), int(robot2Position[1]), int(robot2Position[2]))
cv2.putText(mapImage, textLine, (10,40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "R3 Position : ({}, {}, {})".format(int(robot3Position[0]), int(robot3Position[1]), int(robot3Position[2]))
cv2.putText(mapImage, textLine, (300,20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "R4 Position : ({}, {}, {})".format(int(robot4Position[0]), int(robot4Position[1]), int(robot4Position[2]))
cv2.putText(mapImage, textLine, (300,40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "R5 Position : ({}, {}, {})".format(int(robot5Position[0]), int(robot5Position[1]), int(robot5Position[2]))
cv2.putText(mapImage, textLine, (590,20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
textLine = "Ball Position : ({}, {})".format(int(ballMeanPosition[0]), int(ballMeanPosition[1]))
cv2.putText(mapImage, textLine, (590,40), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 127), 1, cv2.LINE_AA)
# Enable GUI Streaming
showGUI = True
if showGUI:
cv2.imshow("Barelang FC - Map Visualization", mapImage)
# Enable URL Streaming
streamUrl = False
if streamUrl == True:
smallMapImage = cv2.resize(mapImage, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
cv2.imwrite('stream.jpg', smallMapImage)
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + open('stream.jpg', 'rb').read() + b'\r\n')
if showGUI:
k = cv2.waitKey(1)
if k == ord('x'):
cv2.destroyAllWindows()
break
if __name__ == "__main__":
url = "http://0.0.0.0:9999"
if (os.name == "nt"):
chromedir= 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'
webbrowser.get(chromedir).open(url)
else:
webbrowser.get(using='firefox').open_new_tab(url)
app.run(host='0.0.0.0', port=9999, debug=False, threaded=False) | en | 0.256337 | # Configuration in Cm # Variabel posisi robot # http://mattrichardson.com/Raspberry-Pi-Flask/ Video streaming home page. Video streaming route. Put this in the src attribute of an img tag. # Timing value # Update every 0.5 * deltatime # image tidak clear # Garis Luar # Garis Luar Gawang Kiri # Garis Luar Gawang Kiri # Garis Luar Gawang Kiri # Garis Luar Gawang Kiri # Garis Tengah # Lingkaran Tengah # cv2.imwrite("mapImage.jpg", mapImage) # break # Enable GUI Streaming # Enable URL Streaming | 2.308898 | 2 |
workshop2/ecommerce/core/migrations/0001_initial.py | joelibaceta/backend-codigo-10 | 1 | 6615223 | <reponame>joelibaceta/backend-codigo-10
# Generated by Django 3.2.9 on 2021-11-06 14:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('subtitle', models.CharField(max_length=150)),
('main_picture', models.ImageField(upload_to='assets/categories/')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('price', models.FloatField()),
('main_picture', models.ImageField(upload_to='assets/products/')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='core.category')),
],
),
]
| # Generated by Django 3.2.9 on 2021-11-06 14:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('subtitle', models.CharField(max_length=150)),
('main_picture', models.ImageField(upload_to='assets/categories/')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('price', models.FloatField()),
('main_picture', models.ImageField(upload_to='assets/products/')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='core.category')),
],
),
] | en | 0.886913 | # Generated by Django 3.2.9 on 2021-11-06 14:33 | 1.803223 | 2 |
hands_on_machine_learning_with_scikit_learn_&_tensorflow/6_decision_trees/exercises/8.py | HuangStomach/machine-learning | 0 | 6615224 | <reponame>HuangStomach/machine-learning
from sklearn.datasets import make_moons
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import accuracy_score
from sklearn.base import clone
from scipy.stats import mode
import numpy as np
X, y = make_moons(n_samples=10000, noise=0.4, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
n_trees = 1000
n_instances = 100
mini_sets = []
rs = ShuffleSplit(n_splits=n_trees, test_size=len(X_train) - n_instances, random_state=42)
for mini_train_index, mini_test_index in rs.split(X_train):
X_mini_train = X_train[mini_train_index]
y_mini_train = y_train[mini_train_index]
mini_sets.append((X_mini_train, y_mini_train))
tree_clf = DecisionTreeClassifier(max_leaf_nodes=17, random_state=42)
forest = [clone(tree_clf) for _ in range(n_trees)]
accuracy_scores = []
for tree, (X_mini_train, y_mini_train) in zip(forest, mini_sets):
tree.fit(X_mini_train, y_mini_train)
y_pred = tree.predict(X_test)
accuracy_scores.append(accuracy_score(y_test, y_pred))
np.mean(accuracy_scores)
Y_pred = np.empty([n_trees, len(X_test)], dtype=np.uint8)
for tree_index, tree in enumerate(forest):
Y_pred[tree_index] = tree.predict(X_test)
y_pred_majority_votes, n_votes = mode(Y_pred, axis=0)
print(accuracy_score(y_test, y_pred_majority_votes.reshape([-1])))
| from sklearn.datasets import make_moons
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import accuracy_score
from sklearn.base import clone
from scipy.stats import mode
import numpy as np
X, y = make_moons(n_samples=10000, noise=0.4, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
n_trees = 1000
n_instances = 100
mini_sets = []
rs = ShuffleSplit(n_splits=n_trees, test_size=len(X_train) - n_instances, random_state=42)
for mini_train_index, mini_test_index in rs.split(X_train):
X_mini_train = X_train[mini_train_index]
y_mini_train = y_train[mini_train_index]
mini_sets.append((X_mini_train, y_mini_train))
tree_clf = DecisionTreeClassifier(max_leaf_nodes=17, random_state=42)
forest = [clone(tree_clf) for _ in range(n_trees)]
accuracy_scores = []
for tree, (X_mini_train, y_mini_train) in zip(forest, mini_sets):
tree.fit(X_mini_train, y_mini_train)
y_pred = tree.predict(X_test)
accuracy_scores.append(accuracy_score(y_test, y_pred))
np.mean(accuracy_scores)
Y_pred = np.empty([n_trees, len(X_test)], dtype=np.uint8)
for tree_index, tree in enumerate(forest):
Y_pred[tree_index] = tree.predict(X_test)
y_pred_majority_votes, n_votes = mode(Y_pred, axis=0)
print(accuracy_score(y_test, y_pred_majority_votes.reshape([-1]))) | none | 1 | 2.666107 | 3 | |
api/models.py | jschmidtnj/healthtech19 | 0 | 6615225 | from main import db
from passlib.hash import pbkdf2_sha256 as sha256
from sqlalchemy.ext.mutable import MutableList
from sqlalchemy.dialects.postgresql import ARRAY
class UserModel(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
first_name = db.Column(db.String(120), nullable=False)
last_name = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120), nullable=False)
gender = db.Column(db.String(15), nullable=False)
dob = db.Column(db.String(15), nullable=False)
dod = db.Column(db.String(15), nullable=False)
password = db.Column(db.String(120), nullable=False)
medications = db.Column(MutableList.as_mutable(db.ARRAY(db.String(120))), nullable=False)
heatmap = db.Column(MutableList.as_mutable(db.ARRAY(db.String(120))), nullable=False)
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def find_by_email(cls, email):
return cls.query.filter_by(email=email).first()
def to_json(x):
return {
'email': x.email,
'first_name': x.first_name,
'last_name': x.last_name,
'phone': x.phone,
'gender': x.gender,
'dob': x.dob,
'dod': x.dod,
'password': <PASSWORD>,
'medications': x.medications,
'heatmap': x.heatmap
}
@classmethod
def return_all(cls):
return {'users': list(map(lambda x: to_json(x), UserModel.query.all()))}
@classmethod
def delete_all(cls):
try:
num_rows_deleted = db.session.query(cls).delete()
db.session.commit()
return {'message': '{} row(s) deleted'.format(num_rows_deleted)}
except:
return {'message': 'Something went wrong'}
@staticmethod
def generate_hash(password):
return sha256.hash(password)
@staticmethod
def verify_hash(password, hash):
return sha256.verify(password, hash)
class RevokedTokenModel(db.Model):
__tablename__ = 'revoked_tokens'
id = db.Column(db.Integer, primary_key=True)
jti = db.Column(db.String(120))
def add(self):
db.session.add(self)
db.session.commit()
@classmethod
def is_jti_blacklisted(cls, jti):
query = cls.query.filter_by(jti=jti).first()
return bool(query)
| from main import db
from passlib.hash import pbkdf2_sha256 as sha256
from sqlalchemy.ext.mutable import MutableList
from sqlalchemy.dialects.postgresql import ARRAY
class UserModel(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
first_name = db.Column(db.String(120), nullable=False)
last_name = db.Column(db.String(120), nullable=False)
phone = db.Column(db.String(120), nullable=False)
gender = db.Column(db.String(15), nullable=False)
dob = db.Column(db.String(15), nullable=False)
dod = db.Column(db.String(15), nullable=False)
password = db.Column(db.String(120), nullable=False)
medications = db.Column(MutableList.as_mutable(db.ARRAY(db.String(120))), nullable=False)
heatmap = db.Column(MutableList.as_mutable(db.ARRAY(db.String(120))), nullable=False)
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def find_by_email(cls, email):
return cls.query.filter_by(email=email).first()
def to_json(x):
return {
'email': x.email,
'first_name': x.first_name,
'last_name': x.last_name,
'phone': x.phone,
'gender': x.gender,
'dob': x.dob,
'dod': x.dod,
'password': <PASSWORD>,
'medications': x.medications,
'heatmap': x.heatmap
}
@classmethod
def return_all(cls):
return {'users': list(map(lambda x: to_json(x), UserModel.query.all()))}
@classmethod
def delete_all(cls):
try:
num_rows_deleted = db.session.query(cls).delete()
db.session.commit()
return {'message': '{} row(s) deleted'.format(num_rows_deleted)}
except:
return {'message': 'Something went wrong'}
@staticmethod
def generate_hash(password):
return sha256.hash(password)
@staticmethod
def verify_hash(password, hash):
return sha256.verify(password, hash)
class RevokedTokenModel(db.Model):
__tablename__ = 'revoked_tokens'
id = db.Column(db.Integer, primary_key=True)
jti = db.Column(db.String(120))
def add(self):
db.session.add(self)
db.session.commit()
@classmethod
def is_jti_blacklisted(cls, jti):
query = cls.query.filter_by(jti=jti).first()
return bool(query)
| none | 1 | 2.711679 | 3 | |
etabotsite/etabotapp/TMSlib/data_conversion.py | ShanshanHe/pmp | 2 | 6615226 | <reponame>ShanshanHe/pmp
"""Collection of data conversion tools."""
import logging
def get_velocity_json(velocities, project_name):
"""Creates velocity json for UI from velocities python object
for a project named project_name"""
velocity = velocities.get(project_name)
velocity_json = {}
if velocity is not None:
velocity_json['mean'] = velocity.value
velocity_json['upper_estimate'] = velocity.higher_estimate()
velocity_json['lower_estimate'] = velocity.lower_estimate()
else:
logging.debug('velocity is None for project {}'.format(
project_name))
return velocity_json
| """Collection of data conversion tools."""
import logging
def get_velocity_json(velocities, project_name):
"""Creates velocity json for UI from velocities python object
for a project named project_name"""
velocity = velocities.get(project_name)
velocity_json = {}
if velocity is not None:
velocity_json['mean'] = velocity.value
velocity_json['upper_estimate'] = velocity.higher_estimate()
velocity_json['lower_estimate'] = velocity.lower_estimate()
else:
logging.debug('velocity is None for project {}'.format(
project_name))
return velocity_json | en | 0.729728 | Collection of data conversion tools. Creates velocity json for UI from velocities python object for a project named project_name | 3.235724 | 3 |
src/analysis/feature_engineer.py | KewJS/Customer_Segmentation | 0 | 6615227 | <reponame>KewJS/Customer_Segmentation<gh_stars>0
import pandas as pd
import numpy as np
from datetime import timedelta
import squarify
from src.Config import Config
class Logger(object):
info = print
warning = print
error = print
critical = print
class Feature_Engineer(Config):
def peak_day_assignment(self, order_day):
if order_day <= 1:
return 1
elif order_day > 1:
return 0
else:
self.logger.info(order_day)
| import pandas as pd
import numpy as np
from datetime import timedelta
import squarify
from src.Config import Config
class Logger(object):
info = print
warning = print
error = print
critical = print
class Feature_Engineer(Config):
def peak_day_assignment(self, order_day):
if order_day <= 1:
return 1
elif order_day > 1:
return 0
else:
self.logger.info(order_day) | none | 1 | 2.835055 | 3 | |
dewarp/vertical_dewarp.py | kvklink/complete-OMR-pipeline | 1 | 6615228 | import numpy as np
from utils.util import *
from dewarp.deskew import std_houghlines
''' Assumes image is already dewarped horizontally '''
def dewarp_vertical(img):
blocks_img = make_blocks(img)
keypoints = find_keypoints(blocks_img, show=False)
dewarped_ver = fix_verticals(img, keypoints)
return dewarped_ver
def make_blocks(img):
rows, cols = img.shape[:2]
img_struct = img.copy()
gray = cv.cvtColor(img_struct, cv.COLOR_BGR2GRAY)
(thresh, im_bw) = cv.threshold(gray, 200, 255, cv.THRESH_BINARY)# | cv.THRESH_OTSU)
erode_struct = cv.getStructuringElement(cv.MORPH_RECT, (1, 50))
dilate_struct = cv.getStructuringElement(cv.MORPH_RECT, (20, 1))
step1 = cv.dilate(im_bw, dilate_struct, 1)
# bgr_imshow("dilate", step1)
step2 = cv.erode(step1, erode_struct, 1)
# bgr_imshow("erode", step2)
im_inv = cv.bitwise_not(step2)
# bgr_imshow("blocks made", im_inv)
# cv.imwrite("blocks.png", im_inv)
return im_inv
def find_verticals(img, show=False):
# Find candidate vertical lines
height, width = img.shape[:2]
rho = 1 # 1 pixel
theta_deg = 0.1 # 0.1 degree
threshold = int(height * 0.1) #100
lines, coords = std_houghlines(img, rho=rho, theta_deg=theta_deg,
threshold=threshold, show=False, save=False)
# Collect 2 clusters of vertical lines: one leftmost, one rightmost
max_slope = 45
max_theta = to_radians(max_slope) #45)
min_theta = to_radians(180 - max_slope)
lefts = None
rights = None
all_verticals = []
marg = 10 #px
page_xc = width / 2
for line, coord in zip(lines, coords):
line = tuple(line[0])
line_theta = get_theta(line)
seems_vertical = line_theta <= max_theta or line_theta >= min_theta
if seems_vertical:
all_verticals.append(coord)
this_xc = get_vertical_xcenter(coord, height)
if lefts is None:
lefts = [coord]
else:
on_left_half = this_xc < page_xc
if on_left_half:
left_xc = np.mean([get_vertical_xcenter(coord,height) for
coord in lefts])
is_lefter = this_xc < left_xc
is_close = (this_xc <= left_xc + marg and
this_xc >= left_xc - marg)
if is_lefter and not is_close:
lefts = [coord]
elif is_close:
lefts.append(coord)
if rights is None:
rights = [coord]
else:
on_right_half = this_xc > page_xc
if on_right_half:
right_xc = np.mean([get_vertical_xcenter(coord,height) for
coord in rights])
is_righter = this_xc > right_xc
is_close = (this_xc <= right_xc + marg and
this_xc >= right_xc - marg)
if is_righter and not is_close:
rights = [coord]
elif is_close:
rights.append(coord)
avg_left = get_avg_coords(lefts)
avg_right = get_avg_coords(rights)
# show_points = lefts + rights
show_points = [avg_left, avg_right]
if show:
bgr_img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
for (pt1, pt2) in show_points:
cv.line(bgr_img, pt1, pt2, (0, 0, 255), thickness=2,
lineType=cv.LINE_AA)
xc, yc = get_vertical_center((pt1, pt2), height)
cv.circle(bgr_img, (xc,yc), radius=4, color=(0,255,0), thickness=2)
bgr_imshow("main 2 verticals", bgr_img)
return avg_left, avg_right
def find_keypoints(img, show=False):
avg_left, avg_right = find_verticals(img, show)
topl, botl = avg_left
topr, botr = avg_right
return (topl, topr, botl, botr)
def fix_verticals(img, keypoints):
height, width = img.shape[:2]
topl, topr, botl, botr = [list(kp) for kp in keypoints]
topl_x = topl[0]
topr_x = topr[0]
botl_y = botl[1]
botr_y = botr[1]
src_m = np.array([topl, botl, topr, botr])
dst_m = np.array([topl, (topl_x, botl_y), topr, (topr_x, botr_y)])
h, status = cv.findHomography(src_m, dst_m)
img_new = cv.warpPerspective(img, h, (width, height))
# bgr_imshow("Fix verticals", img_new)
return img_new
####### Helper methods
def get_center(line_coords):
(x1, y1), (x2, y2) = line_coords
xc = np.mean([x1,x2])
yc = np.mean([y1,y2])
return xc, yc
def get_xcenter(line_coords):
xc, yc = get_center(line_coords)
return xc
def get_vertical_center(line_coords, page_height):
(x1, y1), (x2, y2) = line_coords
yc = page_height // 2
xc = x1 + (x2-x1)/(y2-y1)*(yc-y1)
return int(xc), int(yc)
def get_vertical_xcenter(line_coords, page_height):
xc, yc = get_vertical_center(line_coords, page_height)
return xc
def get_avg_coords(lines_coords):
lines_coords_downwards = []
for p1,p2 in lines_coords:
x1, y1 = p1
x2, y2 = p2
if y1 < y2:
downwards_coord = (p1,p2)
else:
downwards_coord = (p2,p1)
lines_coords_downwards.append(downwards_coord)
avg_coords = np.mean(lines_coords_downwards, axis=0)
avg_coord = avg_coords.astype(int)
return list(map(tuple, avg_coord))
def get_theta(line):
return line[1]
if __name__ == "__main__":
img = cv.imread("blocks1.png", cv.IMREAD_GRAYSCALE)
find_verticals(img, show=True) | import numpy as np
from utils.util import *
from dewarp.deskew import std_houghlines
''' Assumes image is already dewarped horizontally '''
def dewarp_vertical(img):
blocks_img = make_blocks(img)
keypoints = find_keypoints(blocks_img, show=False)
dewarped_ver = fix_verticals(img, keypoints)
return dewarped_ver
def make_blocks(img):
rows, cols = img.shape[:2]
img_struct = img.copy()
gray = cv.cvtColor(img_struct, cv.COLOR_BGR2GRAY)
(thresh, im_bw) = cv.threshold(gray, 200, 255, cv.THRESH_BINARY)# | cv.THRESH_OTSU)
erode_struct = cv.getStructuringElement(cv.MORPH_RECT, (1, 50))
dilate_struct = cv.getStructuringElement(cv.MORPH_RECT, (20, 1))
step1 = cv.dilate(im_bw, dilate_struct, 1)
# bgr_imshow("dilate", step1)
step2 = cv.erode(step1, erode_struct, 1)
# bgr_imshow("erode", step2)
im_inv = cv.bitwise_not(step2)
# bgr_imshow("blocks made", im_inv)
# cv.imwrite("blocks.png", im_inv)
return im_inv
def find_verticals(img, show=False):
# Find candidate vertical lines
height, width = img.shape[:2]
rho = 1 # 1 pixel
theta_deg = 0.1 # 0.1 degree
threshold = int(height * 0.1) #100
lines, coords = std_houghlines(img, rho=rho, theta_deg=theta_deg,
threshold=threshold, show=False, save=False)
# Collect 2 clusters of vertical lines: one leftmost, one rightmost
max_slope = 45
max_theta = to_radians(max_slope) #45)
min_theta = to_radians(180 - max_slope)
lefts = None
rights = None
all_verticals = []
marg = 10 #px
page_xc = width / 2
for line, coord in zip(lines, coords):
line = tuple(line[0])
line_theta = get_theta(line)
seems_vertical = line_theta <= max_theta or line_theta >= min_theta
if seems_vertical:
all_verticals.append(coord)
this_xc = get_vertical_xcenter(coord, height)
if lefts is None:
lefts = [coord]
else:
on_left_half = this_xc < page_xc
if on_left_half:
left_xc = np.mean([get_vertical_xcenter(coord,height) for
coord in lefts])
is_lefter = this_xc < left_xc
is_close = (this_xc <= left_xc + marg and
this_xc >= left_xc - marg)
if is_lefter and not is_close:
lefts = [coord]
elif is_close:
lefts.append(coord)
if rights is None:
rights = [coord]
else:
on_right_half = this_xc > page_xc
if on_right_half:
right_xc = np.mean([get_vertical_xcenter(coord,height) for
coord in rights])
is_righter = this_xc > right_xc
is_close = (this_xc <= right_xc + marg and
this_xc >= right_xc - marg)
if is_righter and not is_close:
rights = [coord]
elif is_close:
rights.append(coord)
avg_left = get_avg_coords(lefts)
avg_right = get_avg_coords(rights)
# show_points = lefts + rights
show_points = [avg_left, avg_right]
if show:
bgr_img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
for (pt1, pt2) in show_points:
cv.line(bgr_img, pt1, pt2, (0, 0, 255), thickness=2,
lineType=cv.LINE_AA)
xc, yc = get_vertical_center((pt1, pt2), height)
cv.circle(bgr_img, (xc,yc), radius=4, color=(0,255,0), thickness=2)
bgr_imshow("main 2 verticals", bgr_img)
return avg_left, avg_right
def find_keypoints(img, show=False):
avg_left, avg_right = find_verticals(img, show)
topl, botl = avg_left
topr, botr = avg_right
return (topl, topr, botl, botr)
def fix_verticals(img, keypoints):
height, width = img.shape[:2]
topl, topr, botl, botr = [list(kp) for kp in keypoints]
topl_x = topl[0]
topr_x = topr[0]
botl_y = botl[1]
botr_y = botr[1]
src_m = np.array([topl, botl, topr, botr])
dst_m = np.array([topl, (topl_x, botl_y), topr, (topr_x, botr_y)])
h, status = cv.findHomography(src_m, dst_m)
img_new = cv.warpPerspective(img, h, (width, height))
# bgr_imshow("Fix verticals", img_new)
return img_new
####### Helper methods
def get_center(line_coords):
(x1, y1), (x2, y2) = line_coords
xc = np.mean([x1,x2])
yc = np.mean([y1,y2])
return xc, yc
def get_xcenter(line_coords):
xc, yc = get_center(line_coords)
return xc
def get_vertical_center(line_coords, page_height):
(x1, y1), (x2, y2) = line_coords
yc = page_height // 2
xc = x1 + (x2-x1)/(y2-y1)*(yc-y1)
return int(xc), int(yc)
def get_vertical_xcenter(line_coords, page_height):
xc, yc = get_vertical_center(line_coords, page_height)
return xc
def get_avg_coords(lines_coords):
lines_coords_downwards = []
for p1,p2 in lines_coords:
x1, y1 = p1
x2, y2 = p2
if y1 < y2:
downwards_coord = (p1,p2)
else:
downwards_coord = (p2,p1)
lines_coords_downwards.append(downwards_coord)
avg_coords = np.mean(lines_coords_downwards, axis=0)
avg_coord = avg_coords.astype(int)
return list(map(tuple, avg_coord))
def get_theta(line):
return line[1]
if __name__ == "__main__":
img = cv.imread("blocks1.png", cv.IMREAD_GRAYSCALE)
find_verticals(img, show=True) | en | 0.584225 | Assumes image is already dewarped horizontally # | cv.THRESH_OTSU) # bgr_imshow("dilate", step1) # bgr_imshow("erode", step2) # bgr_imshow("blocks made", im_inv) # cv.imwrite("blocks.png", im_inv) # Find candidate vertical lines # 1 pixel # 0.1 degree #100 # Collect 2 clusters of vertical lines: one leftmost, one rightmost #45) #px # show_points = lefts + rights # bgr_imshow("Fix verticals", img_new) ####### Helper methods | 2.901076 | 3 |
lesson04/liyongli/update.py | herrywen-nanj/51reboot | 0 | 6615229 | <filename>lesson04/liyongli/update.py
# -*- coding:utf-8 -*-
# author: lyl
import check
import json
import logs
def user(info_list, role):
# 检查用户权限
if role != 'admin':
print("\033[1;31mpermission denied\033[0m")
return
# 检查用户输入内容
# update monkey set age = 18
if len(info_list) != 6:
print("\033[1;31m输入长度有误,请检查输入内容 eg: update monkey set age = 18\033[0m")
return
if info_list[2] != 'set' or info_list[4] != '=':
print("\033[1;31m输入长度有误,请检查输入内容 eg: update monkey set age = 18\033[0m")
return
# 检查用户是否存在
result, message, user_id = check.user(info_list[1])
if result is False:
print(message)
return
elif result is None:
return
if info_list[3] not in ['username', 'age', 'tel', 'email']:
print("\033[1;31m更新字段有误,请检查\033[0m")
return
tag = check.user_input(tag=info_list[3], check_world=info_list[-1])
if tag:
with open('user.txt', 'r') as user_fd:
user_note = user_fd.read()
if user_note == '':
user_note = '{}'
try:
user_dict = json.loads(user_note)
except Exception as e:
print("\033[1;31m id文件 user.txt存在异常,请手动修复\033[0m")
logs.error(e)
return
user_dict[info_list[1]][info_list[3]] = info_list[-1]
with open('user.txt', 'w') as user_fd_rw:
user_fd_rw.write(json.dumps(user_dict, indent=4))
print("\033[1;32m用户{} {}修改成功\033[0m".format(info_list[1], info_list[3]))
else:
print(tag)
| <filename>lesson04/liyongli/update.py
# -*- coding:utf-8 -*-
# author: lyl
import check
import json
import logs
def user(info_list, role):
# 检查用户权限
if role != 'admin':
print("\033[1;31mpermission denied\033[0m")
return
# 检查用户输入内容
# update monkey set age = 18
if len(info_list) != 6:
print("\033[1;31m输入长度有误,请检查输入内容 eg: update monkey set age = 18\033[0m")
return
if info_list[2] != 'set' or info_list[4] != '=':
print("\033[1;31m输入长度有误,请检查输入内容 eg: update monkey set age = 18\033[0m")
return
# 检查用户是否存在
result, message, user_id = check.user(info_list[1])
if result is False:
print(message)
return
elif result is None:
return
if info_list[3] not in ['username', 'age', 'tel', 'email']:
print("\033[1;31m更新字段有误,请检查\033[0m")
return
tag = check.user_input(tag=info_list[3], check_world=info_list[-1])
if tag:
with open('user.txt', 'r') as user_fd:
user_note = user_fd.read()
if user_note == '':
user_note = '{}'
try:
user_dict = json.loads(user_note)
except Exception as e:
print("\033[1;31m id文件 user.txt存在异常,请手动修复\033[0m")
logs.error(e)
return
user_dict[info_list[1]][info_list[3]] = info_list[-1]
with open('user.txt', 'w') as user_fd_rw:
user_fd_rw.write(json.dumps(user_dict, indent=4))
print("\033[1;32m用户{} {}修改成功\033[0m".format(info_list[1], info_list[3]))
else:
print(tag)
| zh | 0.393652 | # -*- coding:utf-8 -*- # author: lyl # 检查用户权限 # 检查用户输入内容 # update monkey set age = 18 # 检查用户是否存在 | 2.8315 | 3 |
preprocess.py | juliakreutzer/neural-slack-bot | 22 | 6615230 | <gh_stars>10-100
import re
import json
import os
import codecs
import sys
import cPickle as pkl
# load json files, filter messages (remove those with urls), tokenize (by regexp)
def tokenize_lowercase(message):
# tokenize at ,.?!
chars = ['.', ',', ';', '?', '!']
for c in chars:
if c in message:
prefix = " "+c
m = prefix.join(message.split(c))
message = m
filtered_m = message.lower().replace(" ", " ").replace("(", "").replace(")", "") # delete brackets
return filtered_m
def split_sents(message, split_symbol):
# insert a split symbol into message for splitting sentences
chars = ['.', '?', '!', ')', '(']
m = message
for c in chars:
if c in message:
split_m = message.strip().split(c)
if len(split_m) > 1:
if sum([1 if len(part)<4 else 0 for part in split_m]) < 3:
prefix = c+split_symbol
m = prefix.join(split_m)
if "\n" in m:
m = m.replace("\n", split_symbol)
return m
def get_messages(channels_dir):
channels = os.listdir(channels_dir)
print "Preprocessing %d channels: %s\n" % (len(channels), str(channels))
all_messages = []
for channel in channels:
channel_sents = 0
with codecs.open(channels_dir+"/"+channel, "r", "utf8") as c:
loaded = json.load(c)
channel_messages = loaded["messages"]
for message in channel_messages:
if message["type"] == "message":
if "subtype" in message.keys() or "bot" in message["user"].lower():
continue
message_text = message["text"]
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', message_text)
mailtos = re.findall('<mailto:.*', message_text)
if len(urls) >= 1 or len(mailtos):
continue
tokenized = tokenize_lowercase(message_text)
if len(tokenized) < 3:
continue
split_symbol = " ### "
sents = split_sents(tokenized, split_symbol).split(split_symbol)
channel_sents += len(sents)
all_messages.extend(sents)
print "%d sentences from channel %s" % (channel_sents, channel)
print "total: %d sentences" % (len(all_messages))
return all_messages
def is_mostl_numeric(token):
"""
Checks whether the string contains at least 50% numbers
:param token:
:return:
"""
a = len(token)
for i in range(0, 10):
token = token.replace(str(i), "")
if len(token) < 0.5*a and len(token) != a:
return True
else:
return False
def get_vocab(messages, limit):
word2id = {"PAD": 0, "<s>":1, "</s>":2, "UNK": 3}
counts = {}
for m in messages:
tokens = m.split()
for t in tokens:
if is_mostl_numeric(t):
continue
if t[0] in [u'.', u';', u'!', u'-', '.', ';', '!', '-'] and len(t) > 1:
continue
if len(t) < 20: # filter out too long words
c = counts.get(t, 0)
counts[t] = c+1
vocab = sorted(counts, key=counts.get, reverse=True)[:limit]
for v in vocab:
word2id[v] = len(word2id)
return word2id
def write_corpus(corpus_dir, sentences, vocab):
"""
write the sentences to a text file, dump the vocabulary
:param corpus_dir:
:param sentences:
:param vocab:
:return:
"""
vocab_file = corpus_dir+"/vocab.pkl"
sents_file = corpus_dir+"/corpus.txt"
with open(vocab_file, "wb") as vf:
pkl.dump(vocab, vf)
with codecs.open(sents_file, "w") as sf: #, "utf8"
c = 0
for sent in sentences:
if len(sent) > 5:
c += 1
sf.write(sent.strip()+"\n")
print "Dumped vocab in %s" % vocab_file
print "Wrote corpus (%d sents.) to %s" % (c, sents_file)
def main():
channels_dir = "slack-data/channels"
corpus_dir = "slack-data/corpus"
messages = get_messages(channels_dir)
limit = 15000
vocab = get_vocab(messages, limit)
write_corpus(corpus_dir, messages, vocab)
if __name__ == "__main__":
main()
| import re
import json
import os
import codecs
import sys
import cPickle as pkl
# load json files, filter messages (remove those with urls), tokenize (by regexp)
def tokenize_lowercase(message):
# tokenize at ,.?!
chars = ['.', ',', ';', '?', '!']
for c in chars:
if c in message:
prefix = " "+c
m = prefix.join(message.split(c))
message = m
filtered_m = message.lower().replace(" ", " ").replace("(", "").replace(")", "") # delete brackets
return filtered_m
def split_sents(message, split_symbol):
# insert a split symbol into message for splitting sentences
chars = ['.', '?', '!', ')', '(']
m = message
for c in chars:
if c in message:
split_m = message.strip().split(c)
if len(split_m) > 1:
if sum([1 if len(part)<4 else 0 for part in split_m]) < 3:
prefix = c+split_symbol
m = prefix.join(split_m)
if "\n" in m:
m = m.replace("\n", split_symbol)
return m
def get_messages(channels_dir):
channels = os.listdir(channels_dir)
print "Preprocessing %d channels: %s\n" % (len(channels), str(channels))
all_messages = []
for channel in channels:
channel_sents = 0
with codecs.open(channels_dir+"/"+channel, "r", "utf8") as c:
loaded = json.load(c)
channel_messages = loaded["messages"]
for message in channel_messages:
if message["type"] == "message":
if "subtype" in message.keys() or "bot" in message["user"].lower():
continue
message_text = message["text"]
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', message_text)
mailtos = re.findall('<mailto:.*', message_text)
if len(urls) >= 1 or len(mailtos):
continue
tokenized = tokenize_lowercase(message_text)
if len(tokenized) < 3:
continue
split_symbol = " ### "
sents = split_sents(tokenized, split_symbol).split(split_symbol)
channel_sents += len(sents)
all_messages.extend(sents)
print "%d sentences from channel %s" % (channel_sents, channel)
print "total: %d sentences" % (len(all_messages))
return all_messages
def is_mostl_numeric(token):
"""
Checks whether the string contains at least 50% numbers
:param token:
:return:
"""
a = len(token)
for i in range(0, 10):
token = token.replace(str(i), "")
if len(token) < 0.5*a and len(token) != a:
return True
else:
return False
def get_vocab(messages, limit):
word2id = {"PAD": 0, "<s>":1, "</s>":2, "UNK": 3}
counts = {}
for m in messages:
tokens = m.split()
for t in tokens:
if is_mostl_numeric(t):
continue
if t[0] in [u'.', u';', u'!', u'-', '.', ';', '!', '-'] and len(t) > 1:
continue
if len(t) < 20: # filter out too long words
c = counts.get(t, 0)
counts[t] = c+1
vocab = sorted(counts, key=counts.get, reverse=True)[:limit]
for v in vocab:
word2id[v] = len(word2id)
return word2id
def write_corpus(corpus_dir, sentences, vocab):
"""
write the sentences to a text file, dump the vocabulary
:param corpus_dir:
:param sentences:
:param vocab:
:return:
"""
vocab_file = corpus_dir+"/vocab.pkl"
sents_file = corpus_dir+"/corpus.txt"
with open(vocab_file, "wb") as vf:
pkl.dump(vocab, vf)
with codecs.open(sents_file, "w") as sf: #, "utf8"
c = 0
for sent in sentences:
if len(sent) > 5:
c += 1
sf.write(sent.strip()+"\n")
print "Dumped vocab in %s" % vocab_file
print "Wrote corpus (%d sents.) to %s" % (c, sents_file)
def main():
channels_dir = "slack-data/channels"
corpus_dir = "slack-data/corpus"
messages = get_messages(channels_dir)
limit = 15000
vocab = get_vocab(messages, limit)
write_corpus(corpus_dir, messages, vocab)
if __name__ == "__main__":
main() | en | 0.71195 | # load json files, filter messages (remove those with urls), tokenize (by regexp) # tokenize at ,.?! # delete brackets # insert a split symbol into message for splitting sentences ### " Checks whether the string contains at least 50% numbers :param token: :return: # filter out too long words write the sentences to a text file, dump the vocabulary :param corpus_dir: :param sentences: :param vocab: :return: #, "utf8" | 2.598698 | 3 |
_ebcf_alexa/speechlet.py | dmotles/ebcf-alexa | 0 | 6615231 | <reponame>dmotles/ebcf-alexa<gh_stars>0
from typing import Union
import xml.etree.ElementTree as libxml
class _Dictable(object):
def dict(self) -> dict:
return {}
class SSMLParseError(ValueError):
"""Raised if ssml is broken."""
def xml_error(parse_err: libxml.ParseError, txt: str) -> SSMLParseError:
line_no, offset = parse_err.position
default_msg = parse_err.args[0]
for line in txt.splitlines(False):
line_no -= 1
if line_no == 0:
msg = '\n'.join([
default_msg,
'',
'\t' + line,
'\t' + ' '*offset + '^',
])
return SSMLParseError(msg)
return SSMLParseError(default_msg)
def validate_ssml(ssml_str: str):
try:
doc: libxml.Element = libxml.fromstring(ssml_str)
except libxml.ParseError as perr:
raise xml_error(perr, ssml_str) from None
if doc.tag != 'speak':
raise SSMLParseError('ssml must start and end with <speak> tags.')
class SSML(_Dictable):
def __init__(self, ssml: str):
starttag = '<speak>' if not ssml.startswith('<speak>') else ''
endtag = '</speak>' if not ssml.endswith('</speak>') else ''
ssml = ''.join((starttag, ssml, endtag))
validate_ssml(ssml)
self.ssml = ssml
def dict(self) -> dict:
return {
'type': 'SSML',
'ssml': self.ssml
}
class PlainText(_Dictable):
def __init__(self, text: str):
self.text = text
def dict(self) -> dict:
return {
'type': 'PlainText',
'text': self.text
}
def to_ssml(self) -> SSML:
return SSML('<speak>%s</speak>' % self.text)
SpeechType = Union[SSML, PlainText]
class SimpleCard(_Dictable):
def __init__(self, title: str, content: str):
self.title = title
self.content = content
def dict(self) -> dict:
return {
'type': 'Simple',
'title': self.title,
'content': self.content
}
def _convert_https(url: str) -> str:
if url.startswith('http://'):
return url.replace('http://', 'https://', 1)
return url
class StandardCard(SimpleCard):
def __init__(self, title: str, content: str, small_image_url: str=None, large_image_url: str=None):
super().__init__(title, content)
if small_image_url:
small_image_url = _convert_https(small_image_url)
if large_image_url:
large_image_url = _convert_https(large_image_url)
self.small_image_url = small_image_url
self.large_image_url = large_image_url
def dict(self) -> dict:
x = {
'type': 'Standard',
'title': self.title,
'text': self.content,
}
if self.small_image_url:
x.setdefault('image', {})['smallImageUrl'] = self.small_image_url
if self.large_image_url:
x.setdefault('image', {})['largeImageUrl'] = self.large_image_url
return x
class SpeechletResponse(_Dictable):
def __init__(self,
output_speech: SpeechType=None,
card: SimpleCard=None,
reprompt: SpeechType=None,
attributes: dict=None,
should_end: bool=True):
self.output_speech = output_speech
self.card = card
self.reprompt = reprompt
self.attributes = attributes
self.should_end = should_end
def dict(self) -> dict:
x = {
'version': '1.0',
'response': {
'shouldEndSession': self.should_end
},
'sessionAttributes': self.attributes or {}
}
if self.output_speech:
x['response']['outputSpeech'] = self.output_speech.dict()
if self.reprompt:
x['response']['reprompt'] = {
'outputSpeech': self.reprompt.dict()
}
if self.card:
x['response']['card'] = self.card.dict()
return x
| from typing import Union
import xml.etree.ElementTree as libxml
class _Dictable(object):
def dict(self) -> dict:
return {}
class SSMLParseError(ValueError):
"""Raised if ssml is broken."""
def xml_error(parse_err: libxml.ParseError, txt: str) -> SSMLParseError:
line_no, offset = parse_err.position
default_msg = parse_err.args[0]
for line in txt.splitlines(False):
line_no -= 1
if line_no == 0:
msg = '\n'.join([
default_msg,
'',
'\t' + line,
'\t' + ' '*offset + '^',
])
return SSMLParseError(msg)
return SSMLParseError(default_msg)
def validate_ssml(ssml_str: str):
try:
doc: libxml.Element = libxml.fromstring(ssml_str)
except libxml.ParseError as perr:
raise xml_error(perr, ssml_str) from None
if doc.tag != 'speak':
raise SSMLParseError('ssml must start and end with <speak> tags.')
class SSML(_Dictable):
def __init__(self, ssml: str):
starttag = '<speak>' if not ssml.startswith('<speak>') else ''
endtag = '</speak>' if not ssml.endswith('</speak>') else ''
ssml = ''.join((starttag, ssml, endtag))
validate_ssml(ssml)
self.ssml = ssml
def dict(self) -> dict:
return {
'type': 'SSML',
'ssml': self.ssml
}
class PlainText(_Dictable):
def __init__(self, text: str):
self.text = text
def dict(self) -> dict:
return {
'type': 'PlainText',
'text': self.text
}
def to_ssml(self) -> SSML:
return SSML('<speak>%s</speak>' % self.text)
SpeechType = Union[SSML, PlainText]
class SimpleCard(_Dictable):
def __init__(self, title: str, content: str):
self.title = title
self.content = content
def dict(self) -> dict:
return {
'type': 'Simple',
'title': self.title,
'content': self.content
}
def _convert_https(url: str) -> str:
if url.startswith('http://'):
return url.replace('http://', 'https://', 1)
return url
class StandardCard(SimpleCard):
def __init__(self, title: str, content: str, small_image_url: str=None, large_image_url: str=None):
super().__init__(title, content)
if small_image_url:
small_image_url = _convert_https(small_image_url)
if large_image_url:
large_image_url = _convert_https(large_image_url)
self.small_image_url = small_image_url
self.large_image_url = large_image_url
def dict(self) -> dict:
x = {
'type': 'Standard',
'title': self.title,
'text': self.content,
}
if self.small_image_url:
x.setdefault('image', {})['smallImageUrl'] = self.small_image_url
if self.large_image_url:
x.setdefault('image', {})['largeImageUrl'] = self.large_image_url
return x
class SpeechletResponse(_Dictable):
def __init__(self,
output_speech: SpeechType=None,
card: SimpleCard=None,
reprompt: SpeechType=None,
attributes: dict=None,
should_end: bool=True):
self.output_speech = output_speech
self.card = card
self.reprompt = reprompt
self.attributes = attributes
self.should_end = should_end
def dict(self) -> dict:
x = {
'version': '1.0',
'response': {
'shouldEndSession': self.should_end
},
'sessionAttributes': self.attributes or {}
}
if self.output_speech:
x['response']['outputSpeech'] = self.output_speech.dict()
if self.reprompt:
x['response']['reprompt'] = {
'outputSpeech': self.reprompt.dict()
}
if self.card:
x['response']['card'] = self.card.dict()
return x | en | 0.85016 | Raised if ssml is broken. | 2.91387 | 3 |
hec.py | jhroy/theses2021 | 0 | 6615232 | <reponame>jhroy/theses2021
# coding=utf-8
# ©2021, <NAME> - Licence GNU GPL v3
from bs4 import BeautifulSoup
import csv, requests, io, time
from PyPDF2 import PdfFileReader
from selenium import webdriver
finput = "hecURLs.csv"
fichier = "hec2021.csv"
entetes1 = {"User-Agent":"<NAME>, UQAM, <EMAIL> - moissonnage en vue d'un article pour la revue de l'ACFAS"}
f1 = open(finput)
uuids = csv.reader(f1)
for uuid in sorted(uuids):
annee = 0
departement = "inconnu"
diplome = "inconnu"
urlThese = "http://reflexion.hec.ca/notice?id={}".format(uuid[1])
print(urlThese)
yo = webdriver.Chrome()
yo.get(urlThese)
time.sleep(4)
resultats = yo.page_source
page2 = BeautifulSoup(resultats,"html.parser")
titre = page2.find("title").text
auteur = "?"
divs = page2.find_all("div")
try:
for div in divs:
if div.text == "Date de diplomation":
annee = div.find_next("span").text.strip()
except:
annee = 0
try:
for div in divs:
if div.text == "Programme":
departement = div.find_next("span").text.strip()
except:
departement = "inconnu"
try:
for div in divs:
if div.text == "Cheminement":
diplome = div.find_next("span").text.strip()
except:
diplome = "inconnu"
for div in divs:
if div.text == "Libre accès à la publication":
urlPDF = div.find_next("a")["href"]
try:
response = requests.get(urlPDF)
with io.BytesIO(response.content) as open_pdf_file:
read_pdf = PdfFileReader(open_pdf_file)
num_pages = read_pdf.getNumPages()
# print(num_pages)
except:
num_pages = None
langue = "inconnue"
infos = ["HEC", titre, annee, auteur, departement, langue, diplome, num_pages, urlThese]
print(infos)
asterix = open(fichier,"a")
obelix = csv.writer(asterix)
obelix.writerow(infos)
yo.close()
print("$"*10)
| # coding=utf-8
# ©2021, <NAME> - Licence GNU GPL v3
from bs4 import BeautifulSoup
import csv, requests, io, time
from PyPDF2 import PdfFileReader
from selenium import webdriver
finput = "hecURLs.csv"
fichier = "hec2021.csv"
entetes1 = {"User-Agent":"<NAME>, UQAM, <EMAIL> - moissonnage en vue d'un article pour la revue de l'ACFAS"}
f1 = open(finput)
uuids = csv.reader(f1)
for uuid in sorted(uuids):
annee = 0
departement = "inconnu"
diplome = "inconnu"
urlThese = "http://reflexion.hec.ca/notice?id={}".format(uuid[1])
print(urlThese)
yo = webdriver.Chrome()
yo.get(urlThese)
time.sleep(4)
resultats = yo.page_source
page2 = BeautifulSoup(resultats,"html.parser")
titre = page2.find("title").text
auteur = "?"
divs = page2.find_all("div")
try:
for div in divs:
if div.text == "Date de diplomation":
annee = div.find_next("span").text.strip()
except:
annee = 0
try:
for div in divs:
if div.text == "Programme":
departement = div.find_next("span").text.strip()
except:
departement = "inconnu"
try:
for div in divs:
if div.text == "Cheminement":
diplome = div.find_next("span").text.strip()
except:
diplome = "inconnu"
for div in divs:
if div.text == "Libre accès à la publication":
urlPDF = div.find_next("a")["href"]
try:
response = requests.get(urlPDF)
with io.BytesIO(response.content) as open_pdf_file:
read_pdf = PdfFileReader(open_pdf_file)
num_pages = read_pdf.getNumPages()
# print(num_pages)
except:
num_pages = None
langue = "inconnue"
infos = ["HEC", titre, annee, auteur, departement, langue, diplome, num_pages, urlThese]
print(infos)
asterix = open(fichier,"a")
obelix = csv.writer(asterix)
obelix.writerow(infos)
yo.close()
print("$"*10) | en | 0.579905 | # coding=utf-8 # ©2021, <NAME> - Licence GNU GPL v3 # print(num_pages) | 2.88777 | 3 |
dbhelper.py | fgrullon/waitercaller | 0 | 6615233 | import pymongo
from bson.objectid import ObjectId
DATABASE = "waitercaller"
class DBHelper:
def __init__(self):
client = pymongo.MongoClient()
self.db = client[DATABASE]
def get_user(self, email):
return self.db.users.find_one({"email" : email})
def add_user(self, email, salt, hashed):
self.db.users.insert({"email" : email, "salt" : salt,
"hashed" : hashed})
def add_table(self, number, owner):
new_id = self.db.tables.insert({"number" : number, "owner" : owner})
return new_id
def update_table(self, _id, url):
self.db.tables.update({"_id" : _id}, {"$set" : {"url" : url}})
def get_tables(self, owner_id):
return list(self.db.tables.find({"owner" : owner_id}))
def get_table(self, table_id):
return self.db.tables.find_one({"_id" : ObjectId(table_id)})
def delete_table(self, table_id):
self.db.tables.remove({"_id" : ObjectId(table_id)})
def add_request(self, table_id, time):
table = self.get_table(table_id)
try:
self.db.requests.insert({"owner" : table['owner'],
"table_number" : table['number'],
"table_id" : table_id, "time" : time})
return True
except pymongo.errors.DuplicateKeyError:
return False
def get_requests(self, owner_id):
return list(self.db.requests.find({"owner" : owner_id}))
def delete_request(self, request_id):
self.db.requests.remove({"_id": ObjectId(request_id)})
def add_categories(self, name):
self.db.categories.insert({"categorie_name" : name})
def add_menu_items(self, name, item, description, price):
self.db.menu.insert({"categorie_name" : name, "item_name" : item, "description" : description, "price": price})
def get_categories_name(self):
categories = self.db.categories.find({}, {"_id" : 0, "categorie_name" : 1})
return categories
def get_menu_item_by_name(self, item):
return self.db.menu.find_one({"item_name" : item})
def get_menu(self, categorie_name):
menu_all = self.db.menu.find({"categorie_name" : categorie_name}, {"_id" : 0, "categorie_name" : 1, "item_name" : 1, "description" : 1, "price" : 1})
return menu_all | import pymongo
from bson.objectid import ObjectId
DATABASE = "waitercaller"
class DBHelper:
def __init__(self):
client = pymongo.MongoClient()
self.db = client[DATABASE]
def get_user(self, email):
return self.db.users.find_one({"email" : email})
def add_user(self, email, salt, hashed):
self.db.users.insert({"email" : email, "salt" : salt,
"hashed" : hashed})
def add_table(self, number, owner):
new_id = self.db.tables.insert({"number" : number, "owner" : owner})
return new_id
def update_table(self, _id, url):
self.db.tables.update({"_id" : _id}, {"$set" : {"url" : url}})
def get_tables(self, owner_id):
return list(self.db.tables.find({"owner" : owner_id}))
def get_table(self, table_id):
return self.db.tables.find_one({"_id" : ObjectId(table_id)})
def delete_table(self, table_id):
self.db.tables.remove({"_id" : ObjectId(table_id)})
def add_request(self, table_id, time):
table = self.get_table(table_id)
try:
self.db.requests.insert({"owner" : table['owner'],
"table_number" : table['number'],
"table_id" : table_id, "time" : time})
return True
except pymongo.errors.DuplicateKeyError:
return False
def get_requests(self, owner_id):
return list(self.db.requests.find({"owner" : owner_id}))
def delete_request(self, request_id):
self.db.requests.remove({"_id": ObjectId(request_id)})
def add_categories(self, name):
self.db.categories.insert({"categorie_name" : name})
def add_menu_items(self, name, item, description, price):
self.db.menu.insert({"categorie_name" : name, "item_name" : item, "description" : description, "price": price})
def get_categories_name(self):
categories = self.db.categories.find({}, {"_id" : 0, "categorie_name" : 1})
return categories
def get_menu_item_by_name(self, item):
return self.db.menu.find_one({"item_name" : item})
def get_menu(self, categorie_name):
menu_all = self.db.menu.find({"categorie_name" : categorie_name}, {"_id" : 0, "categorie_name" : 1, "item_name" : 1, "description" : 1, "price" : 1})
return menu_all | none | 1 | 2.638634 | 3 | |
cloudbackup/database/sqlite.py | nloadholtes/python-cloudbackup-sdk | 4 | 6615234 | <filename>cloudbackup/database/sqlite.py
"""
Rackspace Cloud Backup SQLite Database Functionality
"""
from __future__ import print_function
import base64
import datetime
import logging
import os
import os.path
import random
import sqlite3
class CloudBackupCleanUpOffset(object):
MAX_CLEANUP_OFFSET = int(datetime.timedelta(weeks=1).total_seconds())
INVALID_CLEANUP_OFFSET = (MAX_CLEANUP_OFFSET + 1)
KEEP_EXPIRED_FILES_FOREVER = 0
CLEANUP_INDEX_NEVER_DELETE = 1999999999
@classmethod
def random_index(cls):
"""Generate a new Random Offset Value
:returns: int - random value where 0 <= v <= MAX_CLEANUP_OFFSET
"""
return random.randint(0, cls.MAX_CLEANUP_OFFSET)
@classmethod
def make_new_valid_offset(cls, offset, seconds):
"""Calculate a new offset value based on an existing offset and the number of seconds to change it by
:returns: int - number of seconds the offset adds to the base time
"""
o = CloudBackupCleanUpOffset(offset=offset)
if (o.offset == cls.INVALID_CLEANUP_OFFSET):
raise ValueError('Offset is not modifiable at this time. Please set a modifiable offset first.')
# Enforce that the new offset must be a valid offset value
if o.offset == cls.INVALID_CLEANUP_OFFSET:
o.offset = cls.MAX_CLEANUP_OFFSET
if(o.offset + seconds) < 0:
raise ValueError('Offset will go below the minimum value. Please adjust forward in time.')
return int((o.offset + seconds) % cls.MAX_CLEANUP_OFFSET)
@classmethod
def get_next_cleanup_time(cls, offset, daysToKeepOldVersions, adjustment=None):
"""Calculate the next cleanup week
(Current Time (Unix Epoch) + (mDaysToKeepFiles * Seconds Per Day) + Offset) / Seconds Per Week
"""
cleanup_week = None
weekday = None
if daysToKeepOldVersions is cls.KEEP_EXPIRED_FILES_FOREVER:
cleanup_week = cls.CLEANUP_INDEX_NEVER_DELETE
elif not isinstance(offset, CloudBackupCleanUpOffset):
raise TypeError('offset must be an instance of CloudBackupCleanUpOffset')
elif not ((adjustment is None) or isinstance(adjustment, datetime.timedelta)):
raise TypeError('adjustment must be none or must be an instance of datetime.timedelta')
else:
current_time = datetime.datetime.utcnow()
adjust_by_days = datetime.timedelta(days=daysToKeepOldVersions)
adjust_offset = datetime.timedelta(seconds=offset.offset)
new_time = current_time + adjust_by_days + adjust_offset
if adjustment is not None:
new_time = new_time + adjustment
# Convert to ISO representation
iso_cal = new_time.isocalendar()
# ISO Week of the current year
current_week = iso_cal[1]
# Number of weeks since Jan 1, 1970
weeks_offset = (iso_cal[0] - 1970) * 52
# Cleanup Week is # of weeks since 1970
cleanup_week = current_week + weeks_offset
# offset.offset is really just a Unix Epoch offset,
# so treat it as a Unix Epoch to determine which day of the week starts
# the cleanup week for this agent
weekday = datetime.datetime.fromtimestamp(offset.offset).strftime('%A - %H:%M:%S UTC')
return (cleanup_week, weekday)
@staticmethod
def getInvalidOffset():
"""Create a new offset with the invalid offset set
:returns: CloudBackupCleanUpOffset with the new offset
"""
return CloudBackupCleanUpOffset(offset=CloudBackupCleanUpOffset.INVALID_CLEANUP_OFFSET)
@staticmethod
def getRandomOffset():
"""Create a new offset with the random offset set
:returns: CloudBackupCleanUpOffset with the new offset
"""
return CloudBackupCleanUpOffset(offset=CloudBackupCleanUpOffset.random_index())
def __init__(self, offset=None):
"""Initialize the cleanup offset
:param offset: int or CloudBackupCleanUpOffset - the cleanup offset being represented
:raises: TypeError - if offset is not an instance of int or CloudBackupCleanUpOffset
"""
# if in then just store it
if isinstance(offset, int):
self._offset = offset
self.validate_offset()
# If CloudBackupCleanUpOffset then copy the offset over
elif isinstance(offset, CloudBackupCleanUpOffset):
offset.validate_offset()
self._offset = offset.offset
# Otherwise we don't support the conversion
else:
raise TypeError('Unknown offset type {0:}'.format(offset.__class__))
def validate_offset(self):
"""Enforce that the stored offset is within the valid values
:raises: ValueError when the value exceeds the limit of 0 <= v <= INVALID_CLEANUP_INDEX
"""
if self._offset < 0 or self._offset > self.__class__.INVALID_CLEANUP_OFFSET:
raise ValueError('offset must be in a range between 0 and {0:}'.format(self.__class__.INVALID_CLEANUP_OFFSET))
@property
def offset(self):
"""Offset being stored
:returns: int - number of seconds the offset adds to the base time
"""
if self._offset is None:
return self.__class__.INVALID_CLEANUP_OFFSET
else:
return self._offset
@offset.setter
def offset(self, offset):
"""Update the stored offset to a new, specific value
"""
self._offset = offset
self.validate_offset()
def changeBy(self, seconds=None):
"""Modify the stored offset by the specified number of seconds
:raises: ValueError - when the new value is not valid
"""
if self._offset is None:
raise ValueError('Offset is not modifiable at this time. Please set a modifiable offset first.')
old_offset = self._offset
try:
self._offset = self._offset + seconds
self.validate_offset()
return True
except ValueError:
self._offset = old_offset
raise ValueError('Seconds ({1:}) cannot make offset go below zero (0) or above {0:}'.format(self.__class__.INVALID_CLEANUP_OFFSET, seconds))
class CloudBackupSqlite(object):
"""
Cloud Backup Sqlite Database Interface
"""
def __init__(self, dbfile):
"""
Open a SQLite3 instance to the specified sqlite3 db file
"""
self.log = logging.getLogger(__name__)
self.dbfile = dbfile
self.dbinstance = None
self.__open_db()
def __del__(self):
"""
Clean up
"""
self.__close_db()
def __open_db(self):
"""
Open the database
"""
self.log.debug('Opening database')
self.dbinstance = sqlite3.connect(self.dbfile)
self.dbinstance.text_factory = str
def __close_db(self):
"""
Close the database instance
"""
self.log.debug('Closing database')
self.dbinstance.close()
del self.dbinstance
self.dbinstance = None
def __is_db_opened(self):
"""
Return whether or not the database is currently opened for use
"""
self.log.debug('Checking open: {0:}'.format((self.dbinstance is not None)))
return (self.dbinstance is not None)
def GetDirectoryPath(self, directoryid):
"""
Given a directory id from the database, retrieve its path and parent directory id
"""
conn = self.dbinstance.cursor()
conn.execute('SELECT parentdirectoryid, path FROM directories WHERE directoryid=:id', {'id': directoryid})
results = conn.fetchone()
self.log.debug(' directoryid(%d) has parentdirectoryid:%d and path:%s' % (directoryid, results[0], results[1]))
return results
def GetFilenameSet(self, snapshotid):
"""
Given a snapshotid return all the files and their relevant data from the database
snapshotid - id value maching the snapshots table for a valid snapshot
"""
conn = self.dbinstance.cursor()
# note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id
# that was provided if that file is still in the latest snapshot and therefore fresh.
# Not sure how else to associate the files to a specific snapshot ftm.
results = {}
results['filedata'] = {}
files = set()
for row in conn.execute('SELECT fileid FROM files WHERE digest IS NOT NULL AND (lastsnapshotid=:snapshotid or lastsnapshotid=2000000000)', {'snapshotid': snapshotid}):
files.add(row[0])
return files
def GetFileInformation(self, fileid):
"""
Given a fileid return all the files and their relevant data from the database
Returns a dictionary containing the following:
filedata
path
name
sha512
size
blockdata (for when size is below a given threshold)
blocks
dictionary containing:
id
sha512
size
bundle (dictionary containing id and offset)
bundles
set() - list of bundles that go with the file
bundles
set() - list of unique bundles that go with the all files returned
"""
conn = self.dbinstance.cursor()
# note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id
# that was provided if that file is still in the latest snapshot and therefore fresh.
# Not sure how else to associate the files to a specific snapshot ftm.
results = {}
results['filedata'] = []
bundledata = set()
# Should only run once...
for row in conn.execute('SELECT directories.path, files.filename, files.digest, files.size, files.fileid, files.blockdata FROM files, directories WHERE digest IS NOT NULL AND files.directoryid=directories.directoryid AND files.fileid=:fileid', {'fileid': fileid}):
self.log.debug('%s/%s has SHA1 %s and is %u bytes' % (row[0], row[1], row[2], row[3]))
# Get the file specific block data
blockdata = self.GetFileBlocks(row[4])
# Build up the data we're returning
filepath = row[0] + '/' + row[1]
filedata = {}
filedata['name'] = filepath
filedata['base64-sha512'] = row[2].upper()
filedata['sha512'] = base64.b16encode(base64.b64decode(row[2])).upper()
filedata['size'] = row[3]
filedata['blockdata'] = row[5]
filedata['blocks'] = blockdata['blocks']
filedata['bundles'] = blockdata['bundles']
results['filedata'].append(filedata)
if not len(blockdata['bundles']):
self.log.debug('\tblock data:\n\"\"\"\n%s\n\"\"\"' % row[5])
else:
# Added the bundle data back to the main set - we only want one copy of each bundle
for bundleid in blockdata['bundles']:
bundledata.add(bundleid)
# now that we're done capturing all the file specific data, we can now capture the bundle data
results['bundles'] = self.GetFileBundles(bundledata)
self.log.debug(results['bundles'])
return results
def GetFilenames(self, snapshotid):
"""
Given a snapshotid return all the files contained in it and their relevant data from the database
Returns a dictionary containing the following:
filedata
path
name
sha512
size
blockdata (for when size is below a given threshold)
blocks
dictionary containing:
id
sha512
size
bundle (dictionary containing id and offset)
bundles
set() - list of bundles that go with the file
bundles
set() - list of unique bundles that go with the all files returned
"""
conn = self.dbinstance.cursor()
# note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id
# that was provided if that file is still in the latest snapshot and therefore fresh.
# Not sure how else to associate the files to a specific snapshot ftm.
results = {}
results['filedata'] = []
bundledata = set()
for row in conn.execute('SELECT directories.path, files.filename, files.digest, files.size, files.fileid, files.blockdata FROM files, directories WHERE digest IS NOT NULL AND files.directoryid=directories.directoryid AND (files.lastsnapshotid=:snapshotid or files.lastsnapshotid=2000000000) and files.backupconfigurationid = (select backupconfigurationid from snapshots where snapshotid=:snapshotid)', {'snapshotid': snapshotid}):
self.log.debug('%s/%s has SHA512 %s and is %u bytes' % (row[0], row[1], row[2], row[3]))
# Get the file specific block data
blockdata = self.GetFileBlocks(row[4])
# Build up the data we're returning
filepath = row[0] + '/' + row[1]
filedata = {}
filedata['name'] = filepath
filedata['base64-sha512'] = row[2].upper()
filedata['sha512'] = base64.b16encode(base64.b64decode(row[2])).upper()
filedata['size'] = row[3]
filedata['blockdata'] = row[5]
filedata['blocks'] = blockdata['blocks']
filedata['bundles'] = blockdata['bundles']
results['filedata'].append(filedata)
if not len(blockdata['bundles']):
self.log.debug('\tblock data:\n\"\"\"\n%s\n\"\"\"' % row[5])
else:
# Added the bundle data back to the main set - we only want one copy of each bundle
for bundleid in blockdata['bundles']:
bundledata.add(bundleid)
# now that we're done capturing all the file specific data, we can now capture the bundle data
results['bundles'] = self.GetFileBundles(bundledata)
self.log.debug(results['bundles'])
return results
def GetFileBlocks(self, fileid):
"""
Given a fileid retrieve all the associated block information
Returns a dictionary containing the following:
blocks
dictionary containing:
id
sha1
size
bundle (dictionary containing id and offset)
bundles
set() - list of bundles that go with the file
"""
conn = self.dbinstance.cursor()
blocks = {}
bundles = set()
for row in conn.execute('SELECT fileblocks.idx, blocks.blockid, blocks.sha1, blocks.size, blocks.bundleid, blocks.bundleoffset FROM fileblocks,blocks WHERE fileblocks.fileid=:fileid AND blocks.blockid=fileblocks.blockid ORDER BY fileblocks.idx', {'fileid': fileid}):
blocks[row[0]] = {}
blocks[row[0]]['id'] = row[1]
blocks[row[0]]['sha1'] = row[2].upper()
blocks[row[0]]['size'] = row[3]
blocks[row[0]]['bundle'] = {}
blocks[row[0]]['bundle']['id'] = row[4]
blocks[row[0]]['bundle']['offset'] = row[5]
bundles.add(row[4])
self.log.debug('\tfileid(' + str(fileid) + ') has blocks ' + str(blocks))
results = {}
results['blocks'] = blocks
results['bundles'] = bundles
return results
def GetFileBundles(self, bundleids):
"""
Given a bundle list retrieve all the information
Returns a dictionary of bundle is containins the following:
id
md5
totalsize
garbagesize
usedsized
"""
conn = self.dbinstance.cursor()
bundles = []
for bundleid in bundleids:
for row in conn.execute('SELECT md5, totalsize, garbagesize FROM bundles WHERE bundleid=:bundleid', {'bundleid': bundleid}):
bundledata = {}
bundledata['id'] = bundleid
bundledata['name'] = '{0:010}'.format(bundleid)
bundledata['md5'] = row[0].upper()
bundledata['totalsize'] = row[1]
bundledata['garbagesize'] = row[2]
bundledata['usedsize'] = (row[1] - row[2])
bundles.append(bundledata)
return bundles
def GetFileAddedInSnapshot(self, snapshotid, limit_lower=None, limit_higher=None):
"""
Given a snapshot id, retrieve basic file information from the files table
Returns a list of dictionaries.
Each dictionary contains:
id
directoryid
directory -- directory path
filename
type -- 1 folder, 0 file, 2 symlink
"""
conn = self.dbinstance.cursor()
stmt = 'SELECT f.fileid, f.directoryid, d.path, f.filename, f.type, ' \
'f.metadata FROM files f ' \
'JOIN directories d ON f.directoryid=d.directoryid ' \
'WHERE addedinsnapshotid =: snapshotid '\
'ORDER BY d.path'
stmt_dict = {
'snapshotid': snapshotid
}
if limit_lower:
stmt = '{0:} AND f.filename >= :lower_limit'.format(stmt)
stmt_dict['limit_lower'] = limit_lower
if limit_higher:
stmt = '{0:} AND f.filename < :higher_limit'.format(stmt)
stmt_dict['limit_higher'] = limit_higher
stmt = '{0:} ORDER BY d.path'.format(stmt)
print('SQL: {0:}'.format(stmt))
results = list()
for row in conn.execute(stmt, {'snapshotid': snapshotid}):
fileInfo = dict()
fileInfo['id'] = row[0]
fileInfo['directoryid'] = row[1]
fileInfo['directory'] = row[2]
fileInfo['filename'] = row[3]
fileInfo['type'] = row[4]
fileInfo['metadata'] = row[5]
results.append(fileInfo)
return results
def GetBackupConfigurations(self):
"""
Returns the list of existing backup configurations
"""
conn = self.dbinstance.cursor()
backupconfigurations = []
for result in conn.execute('SELECT backupconfigurationid, legacyguid, externalid, cleanupdays, removed FROM backupconfigurations'):
backupconfiguration = {
'backupconfigurationid': int(result[0]),
'legacyguid': result[1],
'externalid': int(result[2]),
'cleanupdays': int(result[3]),
'removed': int(result[4])
}
backupconfigurations.append(backupconfiguration)
return backupconfigurations
def GetExternalBackupConfigurationId(self, backupconfigurationid):
"""
Given a backup configuration id used internally by the agent return its equivalent for the API
"""
conn = self.dbinstance.cursor()
result = conn.execute('SELECT externalid FROM backupconfigurations WHERE backupconfigurationid=:backupconfigurationid', {'backupconfigurationid': backupconfigurationid})
data = result.fetchone()
return data[0]
def GetInternalBackupConfigurationId(self, backupconfigurationid):
"""
Given a backup configuration id used externally by the API return its equivalent for the agent
"""
conn = self.dbinstance.cursor()
result = conn.execute('SELECT backupconfigurationid FROM backupconfigurations WHERE externalid=:backupconfigurationid', {'backupconfigurationid': backupconfigurationid})
data = result.fetchone()
return data[0]
def DetectUniqueConstraintViolations(self):
"""
Detect the database entries that create a unique constraint violation
Returns: True/False for whether or not the Unique Constraint Violation was detected
"""
conn = self.dbinstance.cursor()
conn2 = self.dbinstance.cursor()
results = list()
results.append(0)
self.log.debug('Checking for unique constraint errors...')
for entry in conn.execute('SELECT COUNT(fileid), backupconfigurationid, directoryid, filename, addedinsnapshotid, lastsnapshotid FROM files WHERE lastsnapshotid=2000000000 GROUP BY filename, directoryid, backupconfigurationid HAVING COUNT(fileid) > 1'):
info = {
'count': entry[0],
'backupconfigurationid': entry[1],
'directoryid': entry[2],
'filename': entry[3],
'addedinsnapshotid': entry[4],
'lastsnapshotid': entry[5]
}
self.log.debug('Found entry (directoryid={0:}, filename={1:}, backupconfigurationid={2:}: added-in={3:}, count={4:}) with possible errors'
.format(info['directoryid'], info['filename'], info['backupconfigurationid'], info['addedinsnapshotid'], info['count']))
# Reset the addedinsnapshot for each round
first_round = True
# Retrieve all invalid rows from the database for the provided entry
for row in conn2.execute('SELECT directoryid, fileid, filename, addedinsnapshotid, lastsnapshotid, backupconfigurationid FROM files WHERE directoryid = ? AND filename = ? AND backupconfigurationid = ? AND lastsnapshotid = ? ORDER BY addedinsnapshotid DESC',
(info['directoryid'], info['filename'], info['backupconfigurationid'], info['lastsnapshotid'])):
confirmed_info = {
'directoryid': row[0],
'fileid': row[1],
'filename': row[2],
'addedinsnapshotid': row[3],
'lastsnapshotid': row[4],
'backupconfigurationid': row[5]
}
# First round we just want the snapshotid
# All remaining rounds we update the lastsnapshotid field to be the added in snapshotid of the previous round
if not first_round:
self.log.debug('Confirmed entry (directoryid={0:}, filename={1:}, backupconfigurationid={2:}) has errors'
.format(confirmed_info['directoryid'], confirmed_info['filename'], confirmed_info['backupconfigurationid']))
results[0] = results[0] + 1
first_round = False
if results[0] == 0:
results.pop()
return results
def DetectUnicodeDirectoryNameErrors(self):
"""
Detect if any directory names are in violation of the ASCII characters
"""
conn = self.dbinstance.cursor()
results = list()
self.log.debug('Checking for ASCII errors in directory names...')
for entry in conn.execute('SELECT directoryid, path FROM directories'):
path_id = int(entry[0])
path = entry[1]
for v in path:
if ord(v) > 128:
self.log.debug('Error with directory name. Directory ID = {0:}'.format(path_id))
results.append((path_id, path))
break
return results
def DetectUnicodeFileNameErrors(self):
"""
Detect if any file names are in violation of the ASCII characters
"""
conn = self.dbinstance.cursor()
results = list()
self.log.debug('Checking for ASCII errors in directory names...')
for entry in conn.execute('SELECT fileid, filename FROM files'):
file_id = int(entry[0])
filename = entry[1]
for v in filename:
if ord(v) > 128:
self.log.debug('Error with file name. File ID = {0:}'.format(file_id))
results.append((file_id, filename))
break
return results
def GetDirectory(self, directoryid):
"""
Return the directory associated with the specified directoryid
"""
conn = self.dbinstance.cursor()
results = conn.execute('SELECT path FROM directories WHERE directoryid == {0:}'.format(directoryid))
directory = results.fetchone()
return directory[0]
def FixUniqueConstraintViolations(self, unique_constraint_rows):
"""
Fix the Unique Constraint Violations
"""
conn = self.dbinstance.cursor()
conn2 = self.dbinstance.cursor()
conn3 = self.dbinstance.cursor()
commit_database = False
self.log.debug('Checking for unique constraint errors...')
for entry in conn.execute('SELECT COUNT(fileid), backupconfigurationid, directoryid, filename, addedinsnapshotid, lastsnapshotid FROM files WHERE lastsnapshotid=2000000000 GROUP BY filename, directoryid, backupconfigurationid HAVING COUNT(fileid) > 1'):
info = {
'count': entry[0],
'backupconfigurationid': entry[1],
'directoryid': entry[2],
'filename': entry[3],
'addedinsnapshotid': entry[4],
'lastsnapshotid': entry[5]
}
self.log.debug('Found entry (directoryid={0:}, filename={1:}, backupconfigurationid={2:}: added-in={3:}, count={4:}) with possible errors'
.format(info['directoryid'], info['filename'], info['backupconfigurationid'], info['addedinsnapshotid'], info['count']))
# Reset the addedinsnapshot for each round
first_round = True
# Retrieve all invalid rows from the database for the provided entry
for row in conn2.execute('SELECT directoryid, fileid, filename, addedinsnapshotid, lastsnapshotid, backupconfigurationid FROM files WHERE directoryid = ? AND filename = ? AND backupconfigurationid = ? AND lastsnapshotid = ? ORDER BY addedinsnapshotid DESC',
(info['directoryid'], info['filename'], info['backupconfigurationid'], info['lastsnapshotid'])):
confirmed_info = {
'directoryid': row[0],
'fileid': row[1],
'filename': row[2],
'addedinsnapshotid': row[3],
'lastsnapshotid': row[4],
'backupconfigurationid': row[5]
}
# First round we just want the snapshotid
# All remaining rounds we update the lastsnapshotid field to be the added in snapshotid of the previous round
if not first_round:
self.log.debug('Fixing entry (directoryid={0:}, filename={1:}, backupconfigurationid={2:}) - lastsnapshotid ({3:} -> {4:})'
.format(confirmed_info['directoryid'], confirmed_info['filename'], confirmed_info['backupconfigurationid'], confirmed_info['lastsnapshotid'], confirmed_info['addedinsnapshotid']))
conn3.execute('UPDATE files SET lastsnapshotid = ? WHERE directoryid = ? AND filename = ? AND addedinsnapshotid = ? AND lastsnapshotid = ? AND backupconfigurationid = ?',
(confirmed_info['addedinsnapshotid'], confirmed_info['directoryid'], confirmed_info['filename'], confirmed_info['addedinsnapshotid'], confirmed_info['lastsnapshotid'], confirmed_info['backupconfigurationid']))
commit_database = True
first_round = False
if commit_database is True:
self.dbinstance.commit()
return True
def ResetAgentCleanUpOffset(self):
"""
Returns the Cleanup Offset to the default value which will cause the agent to generate a new random
time for the cleanup offset to occur at.
"""
new_offset = CloudBackupCleanUpOffset.getInvalidOffset()
self.ChangeAgentCleanUpOffset(cleanup_offset=new_offset)
def GetAgentCleanUpOffset(self):
"""
Retrieves the Cleanup Offset from the database and returns an CloudBackupCleanUpOffset object containing the result.
"""
conn = self.dbinstance.cursor()
results = conn.execute('SELECT intvalue FROM keyvalues WHERE key="cleanupoffset"')
result = results.fetchone()
return CloudBackupCleanUpOffset(offset=int(result[0]))
def SetAgentCleanUpOffset(self, cleanup_offset=CloudBackupCleanUpOffset.getRandomOffset()):
"""
Sets the Cleanup Offset into the database based on the provided CloudBackupCleanUpOffset object
"""
if isinstance(cleanup_offset, CloudBackupCleanUpOffset):
if isinstance(cleanup_offset.offset, int):
conn = self.dbinstance.cursor()
conn.execute('INSERT OR REPLACE INTO keyvalues (key, intvalue) VALUES (?, ?)', ('cleanupoffset', cleanup_offset.offset))
self.dbinstance.commit()
return True
else:
raise TypeError('Cleanup Offset must be an integer type')
else:
raise TypeError('cleanup_offset must be an instance of CloudBackupCleanUpOffset')
def GetAgentLastCleanUpWeek(self):
"""
Retrieves the Last Cleanup Week from the database and returns as an integer
"""
conn = self.dbinstance.cursor()
results = conn.execute('SELECT intvalue FROM keyvalues WHERE key="lastcleanupindex"')
result = results.fetchone()
return int(result[0])
def GetAgentNextCleanupsForConfigurations(self, cleanup_offset=None, adjustment=None):
"""
Calculate the next cleanup time for each configuration in the database
:param cleanup_offset: CloudBackupCleanUpOffset - the cleanup offset to use for the calculation
:returns: a list of the configurations and their next cleanup information
each entry is a tuple of ( backupconfigurationid, cleanup week index, day of week that starts the cleanup week)
"""
if cleanup_offset is None:
cleanup_offset = self.GetAgentCleanUpOffset()
conn = self.dbinstance.cursor()
results = conn.execute('SELECT backupconfigurationid, cleanupdays FROM backupconfigurations')
cleanup_times = []
for backupconfig_result in results:
backupconfigid = backupconfig_result[0]
cleanup_days = int(backupconfig_result[1])
next_cleanup_time = CloudBackupCleanUpOffset.get_next_cleanup_time(cleanup_offset, cleanup_days, adjustment=adjustment)
data = (backupconfigid, next_cleanup_time[0], next_cleanup_time[1])
cleanup_times.append(data)
return cleanup_times
def AddSnapshot(self, old_snapshots=None):
"""
Insert new snapshot id(s).
If oldsnapshots is None, then it detects the maximum snapshot id and replicates it alone.
Parameters:
oldsnapshots - None or an iterable containing the snapshotids to replicate with new snapshotids
"""
conn = self.dbinstance.cursor()
# If no old snapshotid was specified, then find the maximum one in the database
if old_snapshots is None:
max_existing_snapshot_results = conn.execute('SELECT MAX(snapshotid) FROM snapshots')
max_existing_snapshot = max_existing_snapshot_results.fetchone()
old_snapshots = list()
old_snapshots.append(max_existing_snapshot[0])
# Keep a list of the new snapshotids so we can return the maximum one, useful for the Caller for uploads to CloudFiles
new_snapshots = list()
for snapshot in old_snapshots:
# Each new snapshot needs to be after the maximum entry in the table
max_existing_snapshot_results = conn.execute('SELECT MAX(snapshotid) FROM snapshots')
max_existing_snapshot = max_existing_snapshot_results.fetchone()
new_snapshot_id = int(max_existing_snapshot[0]) + 1
# Insert a new snapshot into the database using an existing snapshot as a baseline for certain values
conn.execute('INSERT INTO snapshots (snapshotid, startdate, state, cleanupindex, backupconfigurationid) SELECT {0:}, startdate, 4, cleanupindex, backupconfigurationid FROM snapshots WHERE snapshotid == {1:}'.format(new_snapshot_id, snapshot))
# And save the result
new_snapshots.append(new_snapshot_id)
# Reverse the short so it goes max->min
sorted_snapshots = sorted(new_snapshots, reverse=True)
if len(new_snapshots):
self.dbinstance.commit()
return sorted_snapshots[0]
def GetSnapshots(self, backupconfigurationids=None, states=None):
"""
Returns the list of existing snapshots
:param backupconfigurationids: list of backup configuration ids to return snapshots for
:param states: list of backup states to return snapshots for
"""
conn = self.dbinstance.cursor()
snapshots = []
for result in conn.execute('SELECT snapshotid, startdate, state, cleanupindex, backupconfigurationid FROM snapshots'):
snapshot = {
'snapshotid': int(result[0]),
'startdate': result[1],
'state': int(result[2]),
'cleanupindex': int(result[3]) if result[3] else '',
'backupconfigurationid': int(result[4])
}
# Skip any backup configurations that are not desired by the caller
if backupconfigurationids is not None:
if str(snapshot['backupconfigurationid']) not in backupconfigurationids:
continue
# Skip any states that are not desired by the caller
if states is not None:
if str(snapshot['state']) not in states:
continue
snapshots.append(snapshot)
return snapshots
def Vacuum(self):
"""
Shrink the database file to minimum size
"""
conn = self.dbinstance.cursor()
conn.execute('VACUUM')
self.dbinstance.commit()
# If we make it here
return True
def Rename(self, new_filename):
"""
Rename the working database file to the specified file name
"""
self.__close_db()
os.rename(self.dbfile, new_filename)
self.dbfile = new_filename
self.__open_db()
return True
def BloatDatabase(self, table_suffix=None, granularity=1024 * 1024, minimum_compressed_size=5.1 * 1024 * 1024 * 1024):
"""
Insert a table with random data in its columns to grow the database sufficiently to create a compressed database >5GB.
File size typically needs to be in the 12GB+ range
"""
def __bloat_db_find_test_compressed_size():
import tempfile
import gzip
database_is_opened = False
self.log.debug('Ensuring database closed in order to reliably generate a compressed file for testing')
# Iff the database was opened then
# Clean up and close the database for a reliable number
if self.__is_db_opened():
self.log.debug('Found the database open.')
self.Vacuum()
self.__close_db()
database_is_opened = True
# Generate a temp file for copying the compressed database to
temp_file_info = tempfile.mkstemp()
bloat_db_temp_file = temp_file_info[1]
print('Compressing file for size check')
# Apparently there is a bug in gzip.py that prevents the following from working:
#
# Compress the database to the new file
with gzip.open(bloat_db_temp_file, 'wb') as gzip_file:
with open(self.dbfile, 'rb') as input_file:
check_compressed_file_continue_loop = True
while check_compressed_file_continue_loop:
file_chunk = input_file.read(1024)
if len(file_chunk) == 0:
check_compressed_file_continue_loop = False
else:
gzip_file.write(file_chunk)
# Get the compressed file size
gzip_file_size = os.path.getsize(bloat_db_temp_file)
file_sizes = (gzip_file_size, gzip_file_size / 1024, gzip_file_size / (1024 * 1024), gzip_file_size / (1024 * 1024 * 1024))
print('\tSize: {0:} bytes, {1:} kilobytes, {2:} megabytes, {3:} gigabytes'.format(file_sizes[0], file_sizes[1], file_sizes[2], file_sizes[3]))
# Remove the file since we don't really need it
os.remove(bloat_db_temp_file)
# And re-open the database iff it was previously opened
if database_is_opened is True:
self.log.debug('Database was found open. Re-opening.')
self.__open_db()
assert self.__is_db_opened()
# return the size of the file
return gzip_file_size
original_compressed_size = __bloat_db_find_test_compressed_size()
# in case the file doesn't get changed...
new_compressed_size = original_compressed_size
# Minimum 5.1 GB = 5.1*1024 MB = 5.1*1024*1024 KB = 5.1*1024*1024*1024 bytes
if original_compressed_size < minimum_compressed_size:
# Ensure the database is opened for use
if self.__is_db_opened() is False:
self.log.debug('Database was closed. Opening.')
self.__open_db()
assert self.__is_db_opened()
# Determine the table name that will be used
table_name = None
if table_suffix is None:
table_name = 'bloat_table'
else:
table_name = 'bloat_table_{0:}'.format(table_suffix)
conn = self.dbinstance.cursor()
# Ensure the table already exists
conn.execute('CREATE TABLE IF NOT EXISTS {0:} ( a INTEGER PRIMARY KEY ASC, b DATETIME NOT NULL, c TEXT NOT NULL)'.format(table_name))
# Loop on the file size being large enough
# Note: This will incur a bit of disk thrashing but it's the only reliable way to get the database size large enough
while new_compressed_size <= minimum_compressed_size:
# make sure we have a good cursor
loop_conn = self.dbinstance.cursor()
# Insert a lot of records to get the size up
jumbo_count = 0
while jumbo_count < granularity:
loop_conn.execute('INSERT INTO {0:} (a, b, c) VALUES(NULL, DATETIME(\'now\'), HEX(RANDOMBLOB(128)))'.format(table_name))
jumbo_count = jumbo_count + 1
# Ensure the data is persistent
self.dbinstance.commit()
# Get the new size
new_compressed_size = __bloat_db_find_test_compressed_size()
# else don't do anything - the file's big enough
return (original_compressed_size, new_compressed_size)
| <filename>cloudbackup/database/sqlite.py
"""
Rackspace Cloud Backup SQLite Database Functionality
"""
from __future__ import print_function
import base64
import datetime
import logging
import os
import os.path
import random
import sqlite3
class CloudBackupCleanUpOffset(object):
MAX_CLEANUP_OFFSET = int(datetime.timedelta(weeks=1).total_seconds())
INVALID_CLEANUP_OFFSET = (MAX_CLEANUP_OFFSET + 1)
KEEP_EXPIRED_FILES_FOREVER = 0
CLEANUP_INDEX_NEVER_DELETE = 1999999999
@classmethod
def random_index(cls):
"""Generate a new Random Offset Value
:returns: int - random value where 0 <= v <= MAX_CLEANUP_OFFSET
"""
return random.randint(0, cls.MAX_CLEANUP_OFFSET)
@classmethod
def make_new_valid_offset(cls, offset, seconds):
"""Calculate a new offset value based on an existing offset and the number of seconds to change it by
:returns: int - number of seconds the offset adds to the base time
"""
o = CloudBackupCleanUpOffset(offset=offset)
if (o.offset == cls.INVALID_CLEANUP_OFFSET):
raise ValueError('Offset is not modifiable at this time. Please set a modifiable offset first.')
# Enforce that the new offset must be a valid offset value
if o.offset == cls.INVALID_CLEANUP_OFFSET:
o.offset = cls.MAX_CLEANUP_OFFSET
if(o.offset + seconds) < 0:
raise ValueError('Offset will go below the minimum value. Please adjust forward in time.')
return int((o.offset + seconds) % cls.MAX_CLEANUP_OFFSET)
@classmethod
def get_next_cleanup_time(cls, offset, daysToKeepOldVersions, adjustment=None):
"""Calculate the next cleanup week
(Current Time (Unix Epoch) + (mDaysToKeepFiles * Seconds Per Day) + Offset) / Seconds Per Week
"""
cleanup_week = None
weekday = None
if daysToKeepOldVersions is cls.KEEP_EXPIRED_FILES_FOREVER:
cleanup_week = cls.CLEANUP_INDEX_NEVER_DELETE
elif not isinstance(offset, CloudBackupCleanUpOffset):
raise TypeError('offset must be an instance of CloudBackupCleanUpOffset')
elif not ((adjustment is None) or isinstance(adjustment, datetime.timedelta)):
raise TypeError('adjustment must be none or must be an instance of datetime.timedelta')
else:
current_time = datetime.datetime.utcnow()
adjust_by_days = datetime.timedelta(days=daysToKeepOldVersions)
adjust_offset = datetime.timedelta(seconds=offset.offset)
new_time = current_time + adjust_by_days + adjust_offset
if adjustment is not None:
new_time = new_time + adjustment
# Convert to ISO representation
iso_cal = new_time.isocalendar()
# ISO Week of the current year
current_week = iso_cal[1]
# Number of weeks since Jan 1, 1970
weeks_offset = (iso_cal[0] - 1970) * 52
# Cleanup Week is # of weeks since 1970
cleanup_week = current_week + weeks_offset
# offset.offset is really just a Unix Epoch offset,
# so treat it as a Unix Epoch to determine which day of the week starts
# the cleanup week for this agent
weekday = datetime.datetime.fromtimestamp(offset.offset).strftime('%A - %H:%M:%S UTC')
return (cleanup_week, weekday)
@staticmethod
def getInvalidOffset():
"""Create a new offset with the invalid offset set
:returns: CloudBackupCleanUpOffset with the new offset
"""
return CloudBackupCleanUpOffset(offset=CloudBackupCleanUpOffset.INVALID_CLEANUP_OFFSET)
@staticmethod
def getRandomOffset():
"""Create a new offset with the random offset set
:returns: CloudBackupCleanUpOffset with the new offset
"""
return CloudBackupCleanUpOffset(offset=CloudBackupCleanUpOffset.random_index())
def __init__(self, offset=None):
"""Initialize the cleanup offset
:param offset: int or CloudBackupCleanUpOffset - the cleanup offset being represented
:raises: TypeError - if offset is not an instance of int or CloudBackupCleanUpOffset
"""
# if in then just store it
if isinstance(offset, int):
self._offset = offset
self.validate_offset()
# If CloudBackupCleanUpOffset then copy the offset over
elif isinstance(offset, CloudBackupCleanUpOffset):
offset.validate_offset()
self._offset = offset.offset
# Otherwise we don't support the conversion
else:
raise TypeError('Unknown offset type {0:}'.format(offset.__class__))
def validate_offset(self):
"""Enforce that the stored offset is within the valid values
:raises: ValueError when the value exceeds the limit of 0 <= v <= INVALID_CLEANUP_INDEX
"""
if self._offset < 0 or self._offset > self.__class__.INVALID_CLEANUP_OFFSET:
raise ValueError('offset must be in a range between 0 and {0:}'.format(self.__class__.INVALID_CLEANUP_OFFSET))
@property
def offset(self):
"""Offset being stored
:returns: int - number of seconds the offset adds to the base time
"""
if self._offset is None:
return self.__class__.INVALID_CLEANUP_OFFSET
else:
return self._offset
@offset.setter
def offset(self, offset):
"""Update the stored offset to a new, specific value
"""
self._offset = offset
self.validate_offset()
def changeBy(self, seconds=None):
"""Modify the stored offset by the specified number of seconds
:raises: ValueError - when the new value is not valid
"""
if self._offset is None:
raise ValueError('Offset is not modifiable at this time. Please set a modifiable offset first.')
old_offset = self._offset
try:
self._offset = self._offset + seconds
self.validate_offset()
return True
except ValueError:
self._offset = old_offset
raise ValueError('Seconds ({1:}) cannot make offset go below zero (0) or above {0:}'.format(self.__class__.INVALID_CLEANUP_OFFSET, seconds))
class CloudBackupSqlite(object):
"""
Cloud Backup Sqlite Database Interface
"""
def __init__(self, dbfile):
"""
Open a SQLite3 instance to the specified sqlite3 db file
"""
self.log = logging.getLogger(__name__)
self.dbfile = dbfile
self.dbinstance = None
self.__open_db()
def __del__(self):
"""
Clean up
"""
self.__close_db()
def __open_db(self):
"""
Open the database
"""
self.log.debug('Opening database')
self.dbinstance = sqlite3.connect(self.dbfile)
self.dbinstance.text_factory = str
def __close_db(self):
"""
Close the database instance
"""
self.log.debug('Closing database')
self.dbinstance.close()
del self.dbinstance
self.dbinstance = None
def __is_db_opened(self):
"""
Return whether or not the database is currently opened for use
"""
self.log.debug('Checking open: {0:}'.format((self.dbinstance is not None)))
return (self.dbinstance is not None)
def GetDirectoryPath(self, directoryid):
"""
Given a directory id from the database, retrieve its path and parent directory id
"""
conn = self.dbinstance.cursor()
conn.execute('SELECT parentdirectoryid, path FROM directories WHERE directoryid=:id', {'id': directoryid})
results = conn.fetchone()
self.log.debug(' directoryid(%d) has parentdirectoryid:%d and path:%s' % (directoryid, results[0], results[1]))
return results
def GetFilenameSet(self, snapshotid):
"""
Given a snapshotid return all the files and their relevant data from the database
snapshotid - id value maching the snapshots table for a valid snapshot
"""
conn = self.dbinstance.cursor()
# note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id
# that was provided if that file is still in the latest snapshot and therefore fresh.
# Not sure how else to associate the files to a specific snapshot ftm.
results = {}
results['filedata'] = {}
files = set()
for row in conn.execute('SELECT fileid FROM files WHERE digest IS NOT NULL AND (lastsnapshotid=:snapshotid or lastsnapshotid=2000000000)', {'snapshotid': snapshotid}):
files.add(row[0])
return files
def GetFileInformation(self, fileid):
"""
Given a fileid return all the files and their relevant data from the database
Returns a dictionary containing the following:
filedata
path
name
sha512
size
blockdata (for when size is below a given threshold)
blocks
dictionary containing:
id
sha512
size
bundle (dictionary containing id and offset)
bundles
set() - list of bundles that go with the file
bundles
set() - list of unique bundles that go with the all files returned
"""
conn = self.dbinstance.cursor()
# note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id
# that was provided if that file is still in the latest snapshot and therefore fresh.
# Not sure how else to associate the files to a specific snapshot ftm.
results = {}
results['filedata'] = []
bundledata = set()
# Should only run once...
for row in conn.execute('SELECT directories.path, files.filename, files.digest, files.size, files.fileid, files.blockdata FROM files, directories WHERE digest IS NOT NULL AND files.directoryid=directories.directoryid AND files.fileid=:fileid', {'fileid': fileid}):
self.log.debug('%s/%s has SHA1 %s and is %u bytes' % (row[0], row[1], row[2], row[3]))
# Get the file specific block data
blockdata = self.GetFileBlocks(row[4])
# Build up the data we're returning
filepath = row[0] + '/' + row[1]
filedata = {}
filedata['name'] = filepath
filedata['base64-sha512'] = row[2].upper()
filedata['sha512'] = base64.b16encode(base64.b64decode(row[2])).upper()
filedata['size'] = row[3]
filedata['blockdata'] = row[5]
filedata['blocks'] = blockdata['blocks']
filedata['bundles'] = blockdata['bundles']
results['filedata'].append(filedata)
if not len(blockdata['bundles']):
self.log.debug('\tblock data:\n\"\"\"\n%s\n\"\"\"' % row[5])
else:
# Added the bundle data back to the main set - we only want one copy of each bundle
for bundleid in blockdata['bundles']:
bundledata.add(bundleid)
# now that we're done capturing all the file specific data, we can now capture the bundle data
results['bundles'] = self.GetFileBundles(bundledata)
self.log.debug(results['bundles'])
return results
def GetFilenames(self, snapshotid):
"""
Given a snapshotid return all the files contained in it and their relevant data from the database
Returns a dictionary containing the following:
filedata
path
name
sha512
size
blockdata (for when size is below a given threshold)
blocks
dictionary containing:
id
sha512
size
bundle (dictionary containing id and offset)
bundles
set() - list of bundles that go with the file
bundles
set() - list of unique bundles that go with the all files returned
"""
conn = self.dbinstance.cursor()
# note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id
# that was provided if that file is still in the latest snapshot and therefore fresh.
# Not sure how else to associate the files to a specific snapshot ftm.
results = {}
results['filedata'] = []
bundledata = set()
for row in conn.execute('SELECT directories.path, files.filename, files.digest, files.size, files.fileid, files.blockdata FROM files, directories WHERE digest IS NOT NULL AND files.directoryid=directories.directoryid AND (files.lastsnapshotid=:snapshotid or files.lastsnapshotid=2000000000) and files.backupconfigurationid = (select backupconfigurationid from snapshots where snapshotid=:snapshotid)', {'snapshotid': snapshotid}):
self.log.debug('%s/%s has SHA512 %s and is %u bytes' % (row[0], row[1], row[2], row[3]))
# Get the file specific block data
blockdata = self.GetFileBlocks(row[4])
# Build up the data we're returning
filepath = row[0] + '/' + row[1]
filedata = {}
filedata['name'] = filepath
filedata['base64-sha512'] = row[2].upper()
filedata['sha512'] = base64.b16encode(base64.b64decode(row[2])).upper()
filedata['size'] = row[3]
filedata['blockdata'] = row[5]
filedata['blocks'] = blockdata['blocks']
filedata['bundles'] = blockdata['bundles']
results['filedata'].append(filedata)
if not len(blockdata['bundles']):
self.log.debug('\tblock data:\n\"\"\"\n%s\n\"\"\"' % row[5])
else:
# Added the bundle data back to the main set - we only want one copy of each bundle
for bundleid in blockdata['bundles']:
bundledata.add(bundleid)
# now that we're done capturing all the file specific data, we can now capture the bundle data
results['bundles'] = self.GetFileBundles(bundledata)
self.log.debug(results['bundles'])
return results
def GetFileBlocks(self, fileid):
"""
Given a fileid retrieve all the associated block information
Returns a dictionary containing the following:
blocks
dictionary containing:
id
sha1
size
bundle (dictionary containing id and offset)
bundles
set() - list of bundles that go with the file
"""
conn = self.dbinstance.cursor()
blocks = {}
bundles = set()
for row in conn.execute('SELECT fileblocks.idx, blocks.blockid, blocks.sha1, blocks.size, blocks.bundleid, blocks.bundleoffset FROM fileblocks,blocks WHERE fileblocks.fileid=:fileid AND blocks.blockid=fileblocks.blockid ORDER BY fileblocks.idx', {'fileid': fileid}):
blocks[row[0]] = {}
blocks[row[0]]['id'] = row[1]
blocks[row[0]]['sha1'] = row[2].upper()
blocks[row[0]]['size'] = row[3]
blocks[row[0]]['bundle'] = {}
blocks[row[0]]['bundle']['id'] = row[4]
blocks[row[0]]['bundle']['offset'] = row[5]
bundles.add(row[4])
self.log.debug('\tfileid(' + str(fileid) + ') has blocks ' + str(blocks))
results = {}
results['blocks'] = blocks
results['bundles'] = bundles
return results
def GetFileBundles(self, bundleids):
"""
Given a bundle list retrieve all the information
Returns a dictionary of bundle is containins the following:
id
md5
totalsize
garbagesize
usedsized
"""
conn = self.dbinstance.cursor()
bundles = []
for bundleid in bundleids:
for row in conn.execute('SELECT md5, totalsize, garbagesize FROM bundles WHERE bundleid=:bundleid', {'bundleid': bundleid}):
bundledata = {}
bundledata['id'] = bundleid
bundledata['name'] = '{0:010}'.format(bundleid)
bundledata['md5'] = row[0].upper()
bundledata['totalsize'] = row[1]
bundledata['garbagesize'] = row[2]
bundledata['usedsize'] = (row[1] - row[2])
bundles.append(bundledata)
return bundles
def GetFileAddedInSnapshot(self, snapshotid, limit_lower=None, limit_higher=None):
"""
Given a snapshot id, retrieve basic file information from the files table
Returns a list of dictionaries.
Each dictionary contains:
id
directoryid
directory -- directory path
filename
type -- 1 folder, 0 file, 2 symlink
"""
conn = self.dbinstance.cursor()
stmt = 'SELECT f.fileid, f.directoryid, d.path, f.filename, f.type, ' \
'f.metadata FROM files f ' \
'JOIN directories d ON f.directoryid=d.directoryid ' \
'WHERE addedinsnapshotid =: snapshotid '\
'ORDER BY d.path'
stmt_dict = {
'snapshotid': snapshotid
}
if limit_lower:
stmt = '{0:} AND f.filename >= :lower_limit'.format(stmt)
stmt_dict['limit_lower'] = limit_lower
if limit_higher:
stmt = '{0:} AND f.filename < :higher_limit'.format(stmt)
stmt_dict['limit_higher'] = limit_higher
stmt = '{0:} ORDER BY d.path'.format(stmt)
print('SQL: {0:}'.format(stmt))
results = list()
for row in conn.execute(stmt, {'snapshotid': snapshotid}):
fileInfo = dict()
fileInfo['id'] = row[0]
fileInfo['directoryid'] = row[1]
fileInfo['directory'] = row[2]
fileInfo['filename'] = row[3]
fileInfo['type'] = row[4]
fileInfo['metadata'] = row[5]
results.append(fileInfo)
return results
def GetBackupConfigurations(self):
"""
Returns the list of existing backup configurations
"""
conn = self.dbinstance.cursor()
backupconfigurations = []
for result in conn.execute('SELECT backupconfigurationid, legacyguid, externalid, cleanupdays, removed FROM backupconfigurations'):
backupconfiguration = {
'backupconfigurationid': int(result[0]),
'legacyguid': result[1],
'externalid': int(result[2]),
'cleanupdays': int(result[3]),
'removed': int(result[4])
}
backupconfigurations.append(backupconfiguration)
return backupconfigurations
def GetExternalBackupConfigurationId(self, backupconfigurationid):
"""
Given a backup configuration id used internally by the agent return its equivalent for the API
"""
conn = self.dbinstance.cursor()
result = conn.execute('SELECT externalid FROM backupconfigurations WHERE backupconfigurationid=:backupconfigurationid', {'backupconfigurationid': backupconfigurationid})
data = result.fetchone()
return data[0]
def GetInternalBackupConfigurationId(self, backupconfigurationid):
"""
Given a backup configuration id used externally by the API return its equivalent for the agent
"""
conn = self.dbinstance.cursor()
result = conn.execute('SELECT backupconfigurationid FROM backupconfigurations WHERE externalid=:backupconfigurationid', {'backupconfigurationid': backupconfigurationid})
data = result.fetchone()
return data[0]
def DetectUniqueConstraintViolations(self):
"""
Detect the database entries that create a unique constraint violation
Returns: True/False for whether or not the Unique Constraint Violation was detected
"""
conn = self.dbinstance.cursor()
conn2 = self.dbinstance.cursor()
results = list()
results.append(0)
self.log.debug('Checking for unique constraint errors...')
for entry in conn.execute('SELECT COUNT(fileid), backupconfigurationid, directoryid, filename, addedinsnapshotid, lastsnapshotid FROM files WHERE lastsnapshotid=2000000000 GROUP BY filename, directoryid, backupconfigurationid HAVING COUNT(fileid) > 1'):
info = {
'count': entry[0],
'backupconfigurationid': entry[1],
'directoryid': entry[2],
'filename': entry[3],
'addedinsnapshotid': entry[4],
'lastsnapshotid': entry[5]
}
self.log.debug('Found entry (directoryid={0:}, filename={1:}, backupconfigurationid={2:}: added-in={3:}, count={4:}) with possible errors'
.format(info['directoryid'], info['filename'], info['backupconfigurationid'], info['addedinsnapshotid'], info['count']))
# Reset the addedinsnapshot for each round
first_round = True
# Retrieve all invalid rows from the database for the provided entry
for row in conn2.execute('SELECT directoryid, fileid, filename, addedinsnapshotid, lastsnapshotid, backupconfigurationid FROM files WHERE directoryid = ? AND filename = ? AND backupconfigurationid = ? AND lastsnapshotid = ? ORDER BY addedinsnapshotid DESC',
(info['directoryid'], info['filename'], info['backupconfigurationid'], info['lastsnapshotid'])):
confirmed_info = {
'directoryid': row[0],
'fileid': row[1],
'filename': row[2],
'addedinsnapshotid': row[3],
'lastsnapshotid': row[4],
'backupconfigurationid': row[5]
}
# First round we just want the snapshotid
# All remaining rounds we update the lastsnapshotid field to be the added in snapshotid of the previous round
if not first_round:
self.log.debug('Confirmed entry (directoryid={0:}, filename={1:}, backupconfigurationid={2:}) has errors'
.format(confirmed_info['directoryid'], confirmed_info['filename'], confirmed_info['backupconfigurationid']))
results[0] = results[0] + 1
first_round = False
if results[0] == 0:
results.pop()
return results
def DetectUnicodeDirectoryNameErrors(self):
"""
Detect if any directory names are in violation of the ASCII characters
"""
conn = self.dbinstance.cursor()
results = list()
self.log.debug('Checking for ASCII errors in directory names...')
for entry in conn.execute('SELECT directoryid, path FROM directories'):
path_id = int(entry[0])
path = entry[1]
for v in path:
if ord(v) > 128:
self.log.debug('Error with directory name. Directory ID = {0:}'.format(path_id))
results.append((path_id, path))
break
return results
def DetectUnicodeFileNameErrors(self):
"""
Detect if any file names are in violation of the ASCII characters
"""
conn = self.dbinstance.cursor()
results = list()
self.log.debug('Checking for ASCII errors in directory names...')
for entry in conn.execute('SELECT fileid, filename FROM files'):
file_id = int(entry[0])
filename = entry[1]
for v in filename:
if ord(v) > 128:
self.log.debug('Error with file name. File ID = {0:}'.format(file_id))
results.append((file_id, filename))
break
return results
def GetDirectory(self, directoryid):
"""
Return the directory associated with the specified directoryid
"""
conn = self.dbinstance.cursor()
results = conn.execute('SELECT path FROM directories WHERE directoryid == {0:}'.format(directoryid))
directory = results.fetchone()
return directory[0]
def FixUniqueConstraintViolations(self, unique_constraint_rows):
"""
Fix the Unique Constraint Violations
"""
conn = self.dbinstance.cursor()
conn2 = self.dbinstance.cursor()
conn3 = self.dbinstance.cursor()
commit_database = False
self.log.debug('Checking for unique constraint errors...')
for entry in conn.execute('SELECT COUNT(fileid), backupconfigurationid, directoryid, filename, addedinsnapshotid, lastsnapshotid FROM files WHERE lastsnapshotid=2000000000 GROUP BY filename, directoryid, backupconfigurationid HAVING COUNT(fileid) > 1'):
info = {
'count': entry[0],
'backupconfigurationid': entry[1],
'directoryid': entry[2],
'filename': entry[3],
'addedinsnapshotid': entry[4],
'lastsnapshotid': entry[5]
}
self.log.debug('Found entry (directoryid={0:}, filename={1:}, backupconfigurationid={2:}: added-in={3:}, count={4:}) with possible errors'
.format(info['directoryid'], info['filename'], info['backupconfigurationid'], info['addedinsnapshotid'], info['count']))
# Reset the addedinsnapshot for each round
first_round = True
# Retrieve all invalid rows from the database for the provided entry
for row in conn2.execute('SELECT directoryid, fileid, filename, addedinsnapshotid, lastsnapshotid, backupconfigurationid FROM files WHERE directoryid = ? AND filename = ? AND backupconfigurationid = ? AND lastsnapshotid = ? ORDER BY addedinsnapshotid DESC',
(info['directoryid'], info['filename'], info['backupconfigurationid'], info['lastsnapshotid'])):
confirmed_info = {
'directoryid': row[0],
'fileid': row[1],
'filename': row[2],
'addedinsnapshotid': row[3],
'lastsnapshotid': row[4],
'backupconfigurationid': row[5]
}
# First round we just want the snapshotid
# All remaining rounds we update the lastsnapshotid field to be the added in snapshotid of the previous round
if not first_round:
self.log.debug('Fixing entry (directoryid={0:}, filename={1:}, backupconfigurationid={2:}) - lastsnapshotid ({3:} -> {4:})'
.format(confirmed_info['directoryid'], confirmed_info['filename'], confirmed_info['backupconfigurationid'], confirmed_info['lastsnapshotid'], confirmed_info['addedinsnapshotid']))
conn3.execute('UPDATE files SET lastsnapshotid = ? WHERE directoryid = ? AND filename = ? AND addedinsnapshotid = ? AND lastsnapshotid = ? AND backupconfigurationid = ?',
(confirmed_info['addedinsnapshotid'], confirmed_info['directoryid'], confirmed_info['filename'], confirmed_info['addedinsnapshotid'], confirmed_info['lastsnapshotid'], confirmed_info['backupconfigurationid']))
commit_database = True
first_round = False
if commit_database is True:
self.dbinstance.commit()
return True
def ResetAgentCleanUpOffset(self):
"""
Returns the Cleanup Offset to the default value which will cause the agent to generate a new random
time for the cleanup offset to occur at.
"""
new_offset = CloudBackupCleanUpOffset.getInvalidOffset()
self.ChangeAgentCleanUpOffset(cleanup_offset=new_offset)
def GetAgentCleanUpOffset(self):
"""
Retrieves the Cleanup Offset from the database and returns an CloudBackupCleanUpOffset object containing the result.
"""
conn = self.dbinstance.cursor()
results = conn.execute('SELECT intvalue FROM keyvalues WHERE key="cleanupoffset"')
result = results.fetchone()
return CloudBackupCleanUpOffset(offset=int(result[0]))
def SetAgentCleanUpOffset(self, cleanup_offset=CloudBackupCleanUpOffset.getRandomOffset()):
"""
Sets the Cleanup Offset into the database based on the provided CloudBackupCleanUpOffset object
"""
if isinstance(cleanup_offset, CloudBackupCleanUpOffset):
if isinstance(cleanup_offset.offset, int):
conn = self.dbinstance.cursor()
conn.execute('INSERT OR REPLACE INTO keyvalues (key, intvalue) VALUES (?, ?)', ('cleanupoffset', cleanup_offset.offset))
self.dbinstance.commit()
return True
else:
raise TypeError('Cleanup Offset must be an integer type')
else:
raise TypeError('cleanup_offset must be an instance of CloudBackupCleanUpOffset')
def GetAgentLastCleanUpWeek(self):
"""
Retrieves the Last Cleanup Week from the database and returns as an integer
"""
conn = self.dbinstance.cursor()
results = conn.execute('SELECT intvalue FROM keyvalues WHERE key="lastcleanupindex"')
result = results.fetchone()
return int(result[0])
def GetAgentNextCleanupsForConfigurations(self, cleanup_offset=None, adjustment=None):
"""
Calculate the next cleanup time for each configuration in the database
:param cleanup_offset: CloudBackupCleanUpOffset - the cleanup offset to use for the calculation
:returns: a list of the configurations and their next cleanup information
each entry is a tuple of ( backupconfigurationid, cleanup week index, day of week that starts the cleanup week)
"""
if cleanup_offset is None:
cleanup_offset = self.GetAgentCleanUpOffset()
conn = self.dbinstance.cursor()
results = conn.execute('SELECT backupconfigurationid, cleanupdays FROM backupconfigurations')
cleanup_times = []
for backupconfig_result in results:
backupconfigid = backupconfig_result[0]
cleanup_days = int(backupconfig_result[1])
next_cleanup_time = CloudBackupCleanUpOffset.get_next_cleanup_time(cleanup_offset, cleanup_days, adjustment=adjustment)
data = (backupconfigid, next_cleanup_time[0], next_cleanup_time[1])
cleanup_times.append(data)
return cleanup_times
def AddSnapshot(self, old_snapshots=None):
"""
Insert new snapshot id(s).
If oldsnapshots is None, then it detects the maximum snapshot id and replicates it alone.
Parameters:
oldsnapshots - None or an iterable containing the snapshotids to replicate with new snapshotids
"""
conn = self.dbinstance.cursor()
# If no old snapshotid was specified, then find the maximum one in the database
if old_snapshots is None:
max_existing_snapshot_results = conn.execute('SELECT MAX(snapshotid) FROM snapshots')
max_existing_snapshot = max_existing_snapshot_results.fetchone()
old_snapshots = list()
old_snapshots.append(max_existing_snapshot[0])
# Keep a list of the new snapshotids so we can return the maximum one, useful for the Caller for uploads to CloudFiles
new_snapshots = list()
for snapshot in old_snapshots:
# Each new snapshot needs to be after the maximum entry in the table
max_existing_snapshot_results = conn.execute('SELECT MAX(snapshotid) FROM snapshots')
max_existing_snapshot = max_existing_snapshot_results.fetchone()
new_snapshot_id = int(max_existing_snapshot[0]) + 1
# Insert a new snapshot into the database using an existing snapshot as a baseline for certain values
conn.execute('INSERT INTO snapshots (snapshotid, startdate, state, cleanupindex, backupconfigurationid) SELECT {0:}, startdate, 4, cleanupindex, backupconfigurationid FROM snapshots WHERE snapshotid == {1:}'.format(new_snapshot_id, snapshot))
# And save the result
new_snapshots.append(new_snapshot_id)
# Reverse the short so it goes max->min
sorted_snapshots = sorted(new_snapshots, reverse=True)
if len(new_snapshots):
self.dbinstance.commit()
return sorted_snapshots[0]
def GetSnapshots(self, backupconfigurationids=None, states=None):
"""
Returns the list of existing snapshots
:param backupconfigurationids: list of backup configuration ids to return snapshots for
:param states: list of backup states to return snapshots for
"""
conn = self.dbinstance.cursor()
snapshots = []
for result in conn.execute('SELECT snapshotid, startdate, state, cleanupindex, backupconfigurationid FROM snapshots'):
snapshot = {
'snapshotid': int(result[0]),
'startdate': result[1],
'state': int(result[2]),
'cleanupindex': int(result[3]) if result[3] else '',
'backupconfigurationid': int(result[4])
}
# Skip any backup configurations that are not desired by the caller
if backupconfigurationids is not None:
if str(snapshot['backupconfigurationid']) not in backupconfigurationids:
continue
# Skip any states that are not desired by the caller
if states is not None:
if str(snapshot['state']) not in states:
continue
snapshots.append(snapshot)
return snapshots
def Vacuum(self):
"""
Shrink the database file to minimum size
"""
conn = self.dbinstance.cursor()
conn.execute('VACUUM')
self.dbinstance.commit()
# If we make it here
return True
def Rename(self, new_filename):
"""
Rename the working database file to the specified file name
"""
self.__close_db()
os.rename(self.dbfile, new_filename)
self.dbfile = new_filename
self.__open_db()
return True
def BloatDatabase(self, table_suffix=None, granularity=1024 * 1024, minimum_compressed_size=5.1 * 1024 * 1024 * 1024):
"""
Insert a table with random data in its columns to grow the database sufficiently to create a compressed database >5GB.
File size typically needs to be in the 12GB+ range
"""
def __bloat_db_find_test_compressed_size():
import tempfile
import gzip
database_is_opened = False
self.log.debug('Ensuring database closed in order to reliably generate a compressed file for testing')
# Iff the database was opened then
# Clean up and close the database for a reliable number
if self.__is_db_opened():
self.log.debug('Found the database open.')
self.Vacuum()
self.__close_db()
database_is_opened = True
# Generate a temp file for copying the compressed database to
temp_file_info = tempfile.mkstemp()
bloat_db_temp_file = temp_file_info[1]
print('Compressing file for size check')
# Apparently there is a bug in gzip.py that prevents the following from working:
#
# Compress the database to the new file
with gzip.open(bloat_db_temp_file, 'wb') as gzip_file:
with open(self.dbfile, 'rb') as input_file:
check_compressed_file_continue_loop = True
while check_compressed_file_continue_loop:
file_chunk = input_file.read(1024)
if len(file_chunk) == 0:
check_compressed_file_continue_loop = False
else:
gzip_file.write(file_chunk)
# Get the compressed file size
gzip_file_size = os.path.getsize(bloat_db_temp_file)
file_sizes = (gzip_file_size, gzip_file_size / 1024, gzip_file_size / (1024 * 1024), gzip_file_size / (1024 * 1024 * 1024))
print('\tSize: {0:} bytes, {1:} kilobytes, {2:} megabytes, {3:} gigabytes'.format(file_sizes[0], file_sizes[1], file_sizes[2], file_sizes[3]))
# Remove the file since we don't really need it
os.remove(bloat_db_temp_file)
# And re-open the database iff it was previously opened
if database_is_opened is True:
self.log.debug('Database was found open. Re-opening.')
self.__open_db()
assert self.__is_db_opened()
# return the size of the file
return gzip_file_size
original_compressed_size = __bloat_db_find_test_compressed_size()
# in case the file doesn't get changed...
new_compressed_size = original_compressed_size
# Minimum 5.1 GB = 5.1*1024 MB = 5.1*1024*1024 KB = 5.1*1024*1024*1024 bytes
if original_compressed_size < minimum_compressed_size:
# Ensure the database is opened for use
if self.__is_db_opened() is False:
self.log.debug('Database was closed. Opening.')
self.__open_db()
assert self.__is_db_opened()
# Determine the table name that will be used
table_name = None
if table_suffix is None:
table_name = 'bloat_table'
else:
table_name = 'bloat_table_{0:}'.format(table_suffix)
conn = self.dbinstance.cursor()
# Ensure the table already exists
conn.execute('CREATE TABLE IF NOT EXISTS {0:} ( a INTEGER PRIMARY KEY ASC, b DATETIME NOT NULL, c TEXT NOT NULL)'.format(table_name))
# Loop on the file size being large enough
# Note: This will incur a bit of disk thrashing but it's the only reliable way to get the database size large enough
while new_compressed_size <= minimum_compressed_size:
# make sure we have a good cursor
loop_conn = self.dbinstance.cursor()
# Insert a lot of records to get the size up
jumbo_count = 0
while jumbo_count < granularity:
loop_conn.execute('INSERT INTO {0:} (a, b, c) VALUES(NULL, DATETIME(\'now\'), HEX(RANDOMBLOB(128)))'.format(table_name))
jumbo_count = jumbo_count + 1
# Ensure the data is persistent
self.dbinstance.commit()
# Get the new size
new_compressed_size = __bloat_db_find_test_compressed_size()
# else don't do anything - the file's big enough
return (original_compressed_size, new_compressed_size)
| en | 0.824396 | Rackspace Cloud Backup SQLite Database Functionality Generate a new Random Offset Value :returns: int - random value where 0 <= v <= MAX_CLEANUP_OFFSET Calculate a new offset value based on an existing offset and the number of seconds to change it by :returns: int - number of seconds the offset adds to the base time # Enforce that the new offset must be a valid offset value Calculate the next cleanup week (Current Time (Unix Epoch) + (mDaysToKeepFiles * Seconds Per Day) + Offset) / Seconds Per Week # Convert to ISO representation # ISO Week of the current year # Number of weeks since Jan 1, 1970 # Cleanup Week is # of weeks since 1970 # offset.offset is really just a Unix Epoch offset, # so treat it as a Unix Epoch to determine which day of the week starts # the cleanup week for this agent Create a new offset with the invalid offset set :returns: CloudBackupCleanUpOffset with the new offset Create a new offset with the random offset set :returns: CloudBackupCleanUpOffset with the new offset Initialize the cleanup offset :param offset: int or CloudBackupCleanUpOffset - the cleanup offset being represented :raises: TypeError - if offset is not an instance of int or CloudBackupCleanUpOffset # if in then just store it # If CloudBackupCleanUpOffset then copy the offset over # Otherwise we don't support the conversion Enforce that the stored offset is within the valid values :raises: ValueError when the value exceeds the limit of 0 <= v <= INVALID_CLEANUP_INDEX Offset being stored :returns: int - number of seconds the offset adds to the base time Update the stored offset to a new, specific value Modify the stored offset by the specified number of seconds :raises: ValueError - when the new value is not valid Cloud Backup Sqlite Database Interface Open a SQLite3 instance to the specified sqlite3 db file Clean up Open the database Close the database instance Return whether or not the database is currently opened for use Given a directory id from the database, retrieve its path and parent directory id Given a snapshotid return all the files and their relevant data from the database snapshotid - id value maching the snapshots table for a valid snapshot # note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id # that was provided if that file is still in the latest snapshot and therefore fresh. # Not sure how else to associate the files to a specific snapshot ftm. Given a fileid return all the files and their relevant data from the database Returns a dictionary containing the following: filedata path name sha512 size blockdata (for when size is below a given threshold) blocks dictionary containing: id sha512 size bundle (dictionary containing id and offset) bundles set() - list of bundles that go with the file bundles set() - list of unique bundles that go with the all files returned # note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id # that was provided if that file is still in the latest snapshot and therefore fresh. # Not sure how else to associate the files to a specific snapshot ftm. # Should only run once... # Get the file specific block data # Build up the data we're returning # Added the bundle data back to the main set - we only want one copy of each bundle # now that we're done capturing all the file specific data, we can now capture the bundle data Given a snapshotid return all the files contained in it and their relevant data from the database Returns a dictionary containing the following: filedata path name sha512 size blockdata (for when size is below a given threshold) blocks dictionary containing: id sha512 size bundle (dictionary containing id and offset) bundles set() - list of bundles that go with the file bundles set() - list of unique bundles that go with the all files returned # note: we have to check against two snapshot id's as there is the possibility that no file has the snapshot id # that was provided if that file is still in the latest snapshot and therefore fresh. # Not sure how else to associate the files to a specific snapshot ftm. # Get the file specific block data # Build up the data we're returning # Added the bundle data back to the main set - we only want one copy of each bundle # now that we're done capturing all the file specific data, we can now capture the bundle data Given a fileid retrieve all the associated block information Returns a dictionary containing the following: blocks dictionary containing: id sha1 size bundle (dictionary containing id and offset) bundles set() - list of bundles that go with the file Given a bundle list retrieve all the information Returns a dictionary of bundle is containins the following: id md5 totalsize garbagesize usedsized Given a snapshot id, retrieve basic file information from the files table Returns a list of dictionaries. Each dictionary contains: id directoryid directory -- directory path filename type -- 1 folder, 0 file, 2 symlink Returns the list of existing backup configurations Given a backup configuration id used internally by the agent return its equivalent for the API Given a backup configuration id used externally by the API return its equivalent for the agent Detect the database entries that create a unique constraint violation Returns: True/False for whether or not the Unique Constraint Violation was detected # Reset the addedinsnapshot for each round # Retrieve all invalid rows from the database for the provided entry # First round we just want the snapshotid # All remaining rounds we update the lastsnapshotid field to be the added in snapshotid of the previous round Detect if any directory names are in violation of the ASCII characters Detect if any file names are in violation of the ASCII characters Return the directory associated with the specified directoryid Fix the Unique Constraint Violations # Reset the addedinsnapshot for each round # Retrieve all invalid rows from the database for the provided entry # First round we just want the snapshotid # All remaining rounds we update the lastsnapshotid field to be the added in snapshotid of the previous round Returns the Cleanup Offset to the default value which will cause the agent to generate a new random time for the cleanup offset to occur at. Retrieves the Cleanup Offset from the database and returns an CloudBackupCleanUpOffset object containing the result. Sets the Cleanup Offset into the database based on the provided CloudBackupCleanUpOffset object Retrieves the Last Cleanup Week from the database and returns as an integer Calculate the next cleanup time for each configuration in the database :param cleanup_offset: CloudBackupCleanUpOffset - the cleanup offset to use for the calculation :returns: a list of the configurations and their next cleanup information each entry is a tuple of ( backupconfigurationid, cleanup week index, day of week that starts the cleanup week) Insert new snapshot id(s). If oldsnapshots is None, then it detects the maximum snapshot id and replicates it alone. Parameters: oldsnapshots - None or an iterable containing the snapshotids to replicate with new snapshotids # If no old snapshotid was specified, then find the maximum one in the database # Keep a list of the new snapshotids so we can return the maximum one, useful for the Caller for uploads to CloudFiles # Each new snapshot needs to be after the maximum entry in the table # Insert a new snapshot into the database using an existing snapshot as a baseline for certain values # And save the result # Reverse the short so it goes max->min Returns the list of existing snapshots :param backupconfigurationids: list of backup configuration ids to return snapshots for :param states: list of backup states to return snapshots for # Skip any backup configurations that are not desired by the caller # Skip any states that are not desired by the caller Shrink the database file to minimum size # If we make it here Rename the working database file to the specified file name Insert a table with random data in its columns to grow the database sufficiently to create a compressed database >5GB. File size typically needs to be in the 12GB+ range # Iff the database was opened then # Clean up and close the database for a reliable number # Generate a temp file for copying the compressed database to # Apparently there is a bug in gzip.py that prevents the following from working: # # Compress the database to the new file # Get the compressed file size # Remove the file since we don't really need it # And re-open the database iff it was previously opened # return the size of the file # in case the file doesn't get changed... # Minimum 5.1 GB = 5.1*1024 MB = 5.1*1024*1024 KB = 5.1*1024*1024*1024 bytes # Ensure the database is opened for use # Determine the table name that will be used # Ensure the table already exists # Loop on the file size being large enough # Note: This will incur a bit of disk thrashing but it's the only reliable way to get the database size large enough # make sure we have a good cursor # Insert a lot of records to get the size up # Ensure the data is persistent # Get the new size # else don't do anything - the file's big enough | 2.779891 | 3 |
cogs/game.py | jossepio/jos | 2 | 6615235 | import discord
from discord.ext import commands
class Game(commands.Cog):
def __init__(self,client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Game Cog has been loaded\n-----")
@commands.group(help="Base command")
async def game(self,ctx):
if ctx.invoked_subcommand is None:
await ctx.message.delete()
await ctx.send("`Lütfen oynamak istediğiniz oyunu belirtin.`",delete_after=5)
@game.group(help="PUBG oyuncularına duyuruluyor...")
async def pubg(self,ctx,member= discord.Member):
await ctx.send(f"Tamamdır {ctx.author.name}, PUBG oynamak istediğini tüm üyelere söylüyorum!\n@everyone bi bakalım buraya...")
@game.group(help="LoL oyuncularına duyuruluyor...")
async def lol(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, LoL oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Valorant oyuncularına duyuruluyor...")
async def valorant(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Valorant oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="CS:GO oyuncularına duyuruluyor...")
async def csgo(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, CS:GO oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Apex oyuncularına duyuruluyor...")
async def apex(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Apex oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="RS6 oyuncularına duyuruluyor...",aliases=["rainbow"])
async def rs6(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, RS6 oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="TFT oyuncularına duyuruluyor...")
async def tft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, TFT oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Minecraft oyuncularına duyuruluyor...",aliases=['mc'])
async def minecraft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Minecraft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="COD: Warzone oyuncularına duyuruluyor...")
async def warzone(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, COD: Warzone oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Raft oyuncularına duyuruluyor...")
async def raft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name},Raft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="HOI oyuncularına duyuruluyor...",aliases=["hoi"])
async def hoi4(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, HOI4 oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Roblox oyuncularına duyuruluyor...")
async def roblox(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Roblox oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Rust oyuncularına duyuruluyor...")
async def rust(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Rust oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="COD: Warzone oyuncularına duyuruluyor...",aliases=["gtav","gta5"])
async def gta(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, GTA V oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Forest oyuncularına duyuruluyor...")
async def forest(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Forest oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Warcraft oyuncularına duyuruluyor...")
async def warcraft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Warcraft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
def setup(client):
client.add_cog(Game(client)) | import discord
from discord.ext import commands
class Game(commands.Cog):
def __init__(self,client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("Game Cog has been loaded\n-----")
@commands.group(help="Base command")
async def game(self,ctx):
if ctx.invoked_subcommand is None:
await ctx.message.delete()
await ctx.send("`Lütfen oynamak istediğiniz oyunu belirtin.`",delete_after=5)
@game.group(help="PUBG oyuncularına duyuruluyor...")
async def pubg(self,ctx,member= discord.Member):
await ctx.send(f"Tamamdır {ctx.author.name}, PUBG oynamak istediğini tüm üyelere söylüyorum!\n@everyone bi bakalım buraya...")
@game.group(help="LoL oyuncularına duyuruluyor...")
async def lol(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, LoL oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Valorant oyuncularına duyuruluyor...")
async def valorant(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Valorant oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="CS:GO oyuncularına duyuruluyor...")
async def csgo(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, CS:GO oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Apex oyuncularına duyuruluyor...")
async def apex(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Apex oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="RS6 oyuncularına duyuruluyor...",aliases=["rainbow"])
async def rs6(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, RS6 oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="TFT oyuncularına duyuruluyor...")
async def tft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, TFT oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Minecraft oyuncularına duyuruluyor...",aliases=['mc'])
async def minecraft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Minecraft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="COD: Warzone oyuncularına duyuruluyor...")
async def warzone(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, COD: Warzone oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Raft oyuncularına duyuruluyor...")
async def raft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name},Raft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="HOI oyuncularına duyuruluyor...",aliases=["hoi"])
async def hoi4(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, HOI4 oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Roblox oyuncularına duyuruluyor...")
async def roblox(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Roblox oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Rust oyuncularına duyuruluyor...")
async def rust(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Rust oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="COD: Warzone oyuncularına duyuruluyor...",aliases=["gtav","gta5"])
async def gta(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, GTA V oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Forest oyuncularına duyuruluyor...")
async def forest(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Forest oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
@game.group(help="Warcraft oyuncularına duyuruluyor...")
async def warcraft(self,ctx,member=discord.Member):
await ctx.send(f'Tamamdır {ctx.author.name}, Warcraft oynamak istediğini tüm üyelere söylüyorum\n@everyone bi bakalım buraya...')
def setup(client):
client.add_cog(Game(client)) | none | 1 | 2.637402 | 3 | |
pyDAWG.py | dopefishh/praatalign | 23 | 6615236 | #!/bin/env python
# -*- coding: utf-8 -*-
"""
This is part of pydawg Python module.
Pure python implementation.
Author : <NAME>, <EMAIL>
WWW : http://0x80.pl/proj/pydawg/
License : Public domain
Date : $Date$
$Id$
"""
class DAWGNode:
__slots__ = ["children", "final", "number"]
def __init__(self, char):
self.children = {}
self.final = False
self.number = None
def get_next(self, char):
try:
return self.children[char]
except KeyError:
return None
def set_next(self, char, child):
self.children[char] = child
def has_transition(self, char):
return char in self.children
def __str__(self):
return "<" + "".join(self.children.keys()) + ">"
def equivalence(p, q):
"check if states p and q are equivalent"
if p.final != q.final:
return False
if len(p.children) != len(q.children):
return False
s = set(p.children)
if s != set(q.children):
return False
"""
# exact definition of equivalence
for c in s:
if not equivalence(p.children[c], q.children[c]):
return False
"""
# pratical implementation - constraints make
# this much simpler and faster
for c in s:
if p.children[c] != q.children[c]:
return False
return True
class DAWG:
def __init__(self):
self._numbers_valid = False
self.register = set()
self.q0 = DAWGNode(None)
self.wp = ''
def add_word(self, word):
assert word > self.wp
return self.add_word_unchecked(word)
def add_word_unchecked(self, word):
# 1. skip existing
i = 0
s = self.q0
while i < len(word) and s.has_transition(word[i]):
s = s.get_next(word[i])
i = i + 1
assert s is not None
# 2. minimize
if i < len(self.wp):
self._replace_or_register(s, self.wp[i:])
# 3. add suffix
while i < len(word):
n = DAWGNode(word[i])
s.set_next(word[i], n)
assert n == s.get_next(word[i])
s = n
i = i + 1
s.final = True
self.wp = word
self._numbers_valid = False
def _replace_or_register(self, state, suffix):
stack = []
while suffix:
letter = suffix[0]
next = state.get_next(letter)
stack.append((state, letter, next))
state = next
suffix = suffix[1:]
while stack:
parent, letter, state = stack.pop()
found = False
for r in self.register:
if equivalence(state, r):
assert(parent.children[letter] == state)
parent.children[letter] = r
found = True
break
if not found:
self.register.add(state)
def freeze(self):
self._replace_or_register(self.q0, self.wp)
self._numbers_valid = False
close = freeze
def _num_nodes(self):
def clear_aux(node):
node.number = None
for child in node.children.values():
clear_aux(child)
def num_aux(node):
if node.number is None:
n = int(node.final)
for child in node.children.values():
n += num_aux(child)
node.number = n
return node.number
if not self._numbers_valid:
clear_aux(self.q0)
num_aux(self.q0)
self._numbers_valid = True
def word2index(self, word):
self._num_nodes()
state = self.q0
index = 0
for c in word:
try:
next = state.children[c]
except KeyError:
return None
for C in sorted(state.children):
if C < c:
index += state.children[C].number
else:
break
state = next
if state.final:
index = index + 1
return index
def index2word(self, index):
self._num_nodes()
state = self.q0
count = index
output_word = ""
while True:
for c in sorted(state.children):
tmp = state.get_next(c)
if tmp.number < count:
count -= tmp.number
else:
output_word += c
state = tmp
if state.final:
count -= 1
break
if count <= 0:
break
return output_word
def words(self):
L = []
def aux(node, word):
if node.final:
L.append(word)
for letter, child in node.children.items():
aux(child, word + letter)
aux(self.q0, '')
return L
def __iter__(self):
return iter(self.words())
| #!/bin/env python
# -*- coding: utf-8 -*-
"""
This is part of pydawg Python module.
Pure python implementation.
Author : <NAME>, <EMAIL>
WWW : http://0x80.pl/proj/pydawg/
License : Public domain
Date : $Date$
$Id$
"""
class DAWGNode:
__slots__ = ["children", "final", "number"]
def __init__(self, char):
self.children = {}
self.final = False
self.number = None
def get_next(self, char):
try:
return self.children[char]
except KeyError:
return None
def set_next(self, char, child):
self.children[char] = child
def has_transition(self, char):
return char in self.children
def __str__(self):
return "<" + "".join(self.children.keys()) + ">"
def equivalence(p, q):
"check if states p and q are equivalent"
if p.final != q.final:
return False
if len(p.children) != len(q.children):
return False
s = set(p.children)
if s != set(q.children):
return False
"""
# exact definition of equivalence
for c in s:
if not equivalence(p.children[c], q.children[c]):
return False
"""
# pratical implementation - constraints make
# this much simpler and faster
for c in s:
if p.children[c] != q.children[c]:
return False
return True
class DAWG:
def __init__(self):
self._numbers_valid = False
self.register = set()
self.q0 = DAWGNode(None)
self.wp = ''
def add_word(self, word):
assert word > self.wp
return self.add_word_unchecked(word)
def add_word_unchecked(self, word):
# 1. skip existing
i = 0
s = self.q0
while i < len(word) and s.has_transition(word[i]):
s = s.get_next(word[i])
i = i + 1
assert s is not None
# 2. minimize
if i < len(self.wp):
self._replace_or_register(s, self.wp[i:])
# 3. add suffix
while i < len(word):
n = DAWGNode(word[i])
s.set_next(word[i], n)
assert n == s.get_next(word[i])
s = n
i = i + 1
s.final = True
self.wp = word
self._numbers_valid = False
def _replace_or_register(self, state, suffix):
stack = []
while suffix:
letter = suffix[0]
next = state.get_next(letter)
stack.append((state, letter, next))
state = next
suffix = suffix[1:]
while stack:
parent, letter, state = stack.pop()
found = False
for r in self.register:
if equivalence(state, r):
assert(parent.children[letter] == state)
parent.children[letter] = r
found = True
break
if not found:
self.register.add(state)
def freeze(self):
self._replace_or_register(self.q0, self.wp)
self._numbers_valid = False
close = freeze
def _num_nodes(self):
def clear_aux(node):
node.number = None
for child in node.children.values():
clear_aux(child)
def num_aux(node):
if node.number is None:
n = int(node.final)
for child in node.children.values():
n += num_aux(child)
node.number = n
return node.number
if not self._numbers_valid:
clear_aux(self.q0)
num_aux(self.q0)
self._numbers_valid = True
def word2index(self, word):
self._num_nodes()
state = self.q0
index = 0
for c in word:
try:
next = state.children[c]
except KeyError:
return None
for C in sorted(state.children):
if C < c:
index += state.children[C].number
else:
break
state = next
if state.final:
index = index + 1
return index
def index2word(self, index):
self._num_nodes()
state = self.q0
count = index
output_word = ""
while True:
for c in sorted(state.children):
tmp = state.get_next(c)
if tmp.number < count:
count -= tmp.number
else:
output_word += c
state = tmp
if state.final:
count -= 1
break
if count <= 0:
break
return output_word
def words(self):
L = []
def aux(node, word):
if node.final:
L.append(word)
for letter, child in node.children.items():
aux(child, word + letter)
aux(self.q0, '')
return L
def __iter__(self):
return iter(self.words())
| en | 0.55934 | #!/bin/env python # -*- coding: utf-8 -*- This is part of pydawg Python module. Pure python implementation. Author : <NAME>, <EMAIL> WWW : http://0x80.pl/proj/pydawg/ License : Public domain Date : $Date$ $Id$ # exact definition of equivalence for c in s: if not equivalence(p.children[c], q.children[c]): return False # pratical implementation - constraints make # this much simpler and faster # 1. skip existing # 2. minimize # 3. add suffix | 3.610067 | 4 |
gopatch/reports/utils.py | porala/python | 1 | 6615237 | <filename>gopatch/reports/utils.py
# Copyright 2012 VPAC, http://www.vpac.org
# Copyright 2013-2016 <NAME> <<EMAIL>>
#
# This file is part of Patchman.
#
# Patchman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 only.
#
# Patchman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchman. If not, see <http://www.gnu.org/licenses/>
from __future__ import unicode_literals
import re
from django.db import IntegrityError, DatabaseError, transaction
from hosts.models import HostRepo
from arch.models import MachineArchitecture, PackageArchitecture
from repos.models import Repository, Mirror, MirrorPackage
from packages.models import Package, PackageName, PackageUpdate
from packages.utils import find_evr, get_or_create_package
from patchman.signals import progress_info_s, progress_update_s, \
error_message, info_message
def process_repos(report, host):
""" Processes the quoted repos string sent with a report
"""
if report.repos:
repo_ids = []
host_repos = HostRepo.objects.filter(host=host)
repos = parse_repos(report.repos)
progress_info_s.send(sender=None,
ptext='{0!s} repos'.format(str(host)[0:25]),
plen=len(repos))
for i, repo_str in enumerate(repos):
repo, priority = process_repo(repo_str, report.arch)
if repo:
repo_ids.append(repo.id)
try:
with transaction.atomic():
hostrepo, c = host_repos.get_or_create(host=host,
repo=repo)
except IntegrityError as e:
error_message.send(sender=None, text=e)
hostrepo = host_repos.get(host=host, repo=repo)
try:
if hostrepo.priority != priority:
hostrepo.priority = priority
with transaction.atomic():
hostrepo.save()
except IntegrityError as e:
error_message.send(sender=None, text=e)
progress_update_s.send(sender=None, index=i + 1)
for hostrepo in host_repos:
if hostrepo.repo.id not in repo_ids:
hostrepo.delete()
def process_packages(report, host):
""" Processes the quoted packages string sent with a report
"""
if report.packages:
package_ids = []
packages = parse_packages(report.packages)
progress_info_s.send(sender=None,
ptext='{0!s} packages'.format(str(host)[0:25]),
plen=len(packages))
for i, pkg_str in enumerate(packages):
package = process_package(pkg_str, report.protocol)
if package:
package_ids.append(package.id)
try:
with transaction.atomic():
host.packages.add(package)
except IntegrityError as e:
error_message.send(sender=None, text=e)
except DatabaseError as e:
error_message.send(sender=None, text=e)
else:
if pkg_str[0].lower() != 'gpg-pubkey':
text = 'No package returned for {0!s}'.format(pkg_str)
info_message.send(sender=None, text=text)
progress_update_s.send(sender=None, index=i + 1)
for package in host.packages.all():
if package.id not in package_ids:
host.packages.remove(package)
def process_updates(report, host):
""" Processes the update strings sent with a report
"""
bug_updates = ''
sec_updates = ''
if report.bug_updates:
bug_updates = parse_updates(report.bug_updates)
add_updates(bug_updates, host, False)
if report.sec_updates:
sec_updates = parse_updates(report.sec_updates)
add_updates(sec_updates, host, True)
def add_updates(updates, host, security):
""" Add updates to a Host
"""
ulen = len(updates)
if security:
extra = 'sec'
else:
extra = 'bug'
if ulen > 0:
ptext = '{0!s} {1!s} updates'.format(str(host)[0:25], extra)
progress_info_s.send(sender=None, ptext=ptext, plen=ulen)
for i, u in enumerate(updates):
update = process_update(host, u, security)
if update:
host.updates.add(update)
progress_update_s.send(sender=None, index=i + 1)
def parse_updates(updates_string):
""" Parses updates string in a report and returns a sanitized version
"""
updates = []
ulist = updates_string.split()
while ulist:
updates.append('{0!s} {1!s} {2!s}\n'.format(ulist[0],
ulist[1],
ulist[2]))
ulist.pop(0)
ulist.pop(0)
ulist.pop(0)
return updates
def process_update(host, update_string, security):
""" Processes a single sanitized update string and converts to an update
object
"""
update_str = update_string.split()
repo_id = update_str[2]
parts = update_str[0].rpartition('.')
package_str = parts[0]
arch_str = parts[2]
p_epoch, p_version, p_release = find_evr(update_str[1])
package_arches = PackageArchitecture.objects.all()
with transaction.atomic():
p_arch, c = package_arches.get_or_create(name=arch_str)
package_names = PackageName.objects.all()
with transaction.atomic():
p_name, c = package_names.get_or_create(name=package_str)
packages = Package.objects.all()
with transaction.atomic():
package, c = packages.get_or_create(name=p_name,
arch=p_arch,
epoch=p_epoch,
version=p_version,
release=p_release,
packagetype='R')
try:
repo = Repository.objects.get(repo_id=repo_id)
except Repository.DoesNotExist:
repo = None
if repo:
for mirror in repo.mirror_set.all():
with transaction.atomic():
MirrorPackage.objects.create(mirror=mirror, package=package)
installed_package = host.packages.filter(name=p_name,
arch=p_arch,
packagetype='R')[0]
updates = PackageUpdate.objects.all()
with transaction.atomic():
update, c = updates.get_or_create(oldpackage=installed_package,
newpackage=package,
security=security)
return update
def parse_repos(repos_string):
""" Parses repos string in a report and returns a sanitized version
"""
repos = []
for r in [s for s in repos_string.splitlines() if s]:
repodata = re.findall('\'.*?\'', r)
for i, rs in enumerate(repodata):
repodata[i] = rs.replace('\'', '')
repos.append(repodata)
return repos
def process_repo(repo, arch):
""" Processes a single sanitized repo string and converts to a repo object
"""
repository = r_id = None
if repo[2] == '':
r_priority = 0
if repo[0] == 'deb':
r_type = Repository.DEB
r_priority = int(repo[2])
elif repo[0] == 'rpm':
r_type = Repository.RPM
r_id = repo.pop(2)
r_priority = int(repo[2]) * -1
if repo[1]:
r_name = repo[1]
machine_arches = MachineArchitecture.objects.all()
with transaction.atomic():
r_arch, c = machine_arches.get_or_create(name=arch)
unknown = []
for r_url in repo[3:]:
try:
mirror = Mirror.objects.get(url=r_url)
except Mirror.DoesNotExist:
if repository:
Mirror.objects.create(repo=repository, url=r_url)
else:
unknown.append(r_url)
else:
repository = mirror.repo
if not repository:
repositories = Repository.objects.all()
try:
with transaction.atomic():
repository, c = repositories.get_or_create(name=r_name,
arch=r_arch,
repotype=r_type)
except IntegrityError as e:
error_message.send(sender=None, text=e)
repository = repositories.get(name=r_name,
arch=r_arch,
repotype=r_type)
except DatabaseError as e:
error_message.send(sender=None, text=e)
if r_id and repository.repo_id != r_id:
repository.repo_id = r_id
with transaction.atomic():
repository.save()
for url in unknown:
Mirror.objects.create(repo=repository, url=url)
for mirror in Mirror.objects.filter(repo=repository).values('url'):
if mirror['url'].find('cdn.redhat.com') != -1 or \
mirror['url'].find('nu.novell.com') != -1:
repository.auth_required = True
with transaction.atomic():
repository.save()
if mirror['url'].find('security') != -1:
repository.security = True
with transaction.atomic():
repository.save()
return repository, r_priority
def parse_packages(packages_string):
""" Parses packages string in a report and returns a sanitized version
"""
packages = []
for p in packages_string.splitlines():
packages.append(p.replace('\'', '').split(' '))
return packages
def process_package(pkg, protocol):
""" Processes a single sanitized package string and converts to a package
object
"""
if protocol == '1':
epoch = ver = rel = ''
name = pkg[0]
if pkg[4] != '':
arch = pkg[4]
else:
arch = 'unknown'
if pkg[1]:
epoch = pkg[1]
if pkg[2]:
ver = pkg[2]
if pkg[3]:
rel = pkg[3]
if pkg[5] == 'deb':
p_type = Package.DEB
elif pkg[5] == 'rpm':
p_type = Package.RPM
else:
p_type = Package.UNKNOWN
package = get_or_create_package(name, epoch, ver, rel, arch, p_type)
return package
| <filename>gopatch/reports/utils.py
# Copyright 2012 VPAC, http://www.vpac.org
# Copyright 2013-2016 <NAME> <<EMAIL>>
#
# This file is part of Patchman.
#
# Patchman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 only.
#
# Patchman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchman. If not, see <http://www.gnu.org/licenses/>
from __future__ import unicode_literals
import re
from django.db import IntegrityError, DatabaseError, transaction
from hosts.models import HostRepo
from arch.models import MachineArchitecture, PackageArchitecture
from repos.models import Repository, Mirror, MirrorPackage
from packages.models import Package, PackageName, PackageUpdate
from packages.utils import find_evr, get_or_create_package
from patchman.signals import progress_info_s, progress_update_s, \
error_message, info_message
def process_repos(report, host):
""" Processes the quoted repos string sent with a report
"""
if report.repos:
repo_ids = []
host_repos = HostRepo.objects.filter(host=host)
repos = parse_repos(report.repos)
progress_info_s.send(sender=None,
ptext='{0!s} repos'.format(str(host)[0:25]),
plen=len(repos))
for i, repo_str in enumerate(repos):
repo, priority = process_repo(repo_str, report.arch)
if repo:
repo_ids.append(repo.id)
try:
with transaction.atomic():
hostrepo, c = host_repos.get_or_create(host=host,
repo=repo)
except IntegrityError as e:
error_message.send(sender=None, text=e)
hostrepo = host_repos.get(host=host, repo=repo)
try:
if hostrepo.priority != priority:
hostrepo.priority = priority
with transaction.atomic():
hostrepo.save()
except IntegrityError as e:
error_message.send(sender=None, text=e)
progress_update_s.send(sender=None, index=i + 1)
for hostrepo in host_repos:
if hostrepo.repo.id not in repo_ids:
hostrepo.delete()
def process_packages(report, host):
""" Processes the quoted packages string sent with a report
"""
if report.packages:
package_ids = []
packages = parse_packages(report.packages)
progress_info_s.send(sender=None,
ptext='{0!s} packages'.format(str(host)[0:25]),
plen=len(packages))
for i, pkg_str in enumerate(packages):
package = process_package(pkg_str, report.protocol)
if package:
package_ids.append(package.id)
try:
with transaction.atomic():
host.packages.add(package)
except IntegrityError as e:
error_message.send(sender=None, text=e)
except DatabaseError as e:
error_message.send(sender=None, text=e)
else:
if pkg_str[0].lower() != 'gpg-pubkey':
text = 'No package returned for {0!s}'.format(pkg_str)
info_message.send(sender=None, text=text)
progress_update_s.send(sender=None, index=i + 1)
for package in host.packages.all():
if package.id not in package_ids:
host.packages.remove(package)
def process_updates(report, host):
""" Processes the update strings sent with a report
"""
bug_updates = ''
sec_updates = ''
if report.bug_updates:
bug_updates = parse_updates(report.bug_updates)
add_updates(bug_updates, host, False)
if report.sec_updates:
sec_updates = parse_updates(report.sec_updates)
add_updates(sec_updates, host, True)
def add_updates(updates, host, security):
""" Add updates to a Host
"""
ulen = len(updates)
if security:
extra = 'sec'
else:
extra = 'bug'
if ulen > 0:
ptext = '{0!s} {1!s} updates'.format(str(host)[0:25], extra)
progress_info_s.send(sender=None, ptext=ptext, plen=ulen)
for i, u in enumerate(updates):
update = process_update(host, u, security)
if update:
host.updates.add(update)
progress_update_s.send(sender=None, index=i + 1)
def parse_updates(updates_string):
""" Parses updates string in a report and returns a sanitized version
"""
updates = []
ulist = updates_string.split()
while ulist:
updates.append('{0!s} {1!s} {2!s}\n'.format(ulist[0],
ulist[1],
ulist[2]))
ulist.pop(0)
ulist.pop(0)
ulist.pop(0)
return updates
def process_update(host, update_string, security):
""" Processes a single sanitized update string and converts to an update
object
"""
update_str = update_string.split()
repo_id = update_str[2]
parts = update_str[0].rpartition('.')
package_str = parts[0]
arch_str = parts[2]
p_epoch, p_version, p_release = find_evr(update_str[1])
package_arches = PackageArchitecture.objects.all()
with transaction.atomic():
p_arch, c = package_arches.get_or_create(name=arch_str)
package_names = PackageName.objects.all()
with transaction.atomic():
p_name, c = package_names.get_or_create(name=package_str)
packages = Package.objects.all()
with transaction.atomic():
package, c = packages.get_or_create(name=p_name,
arch=p_arch,
epoch=p_epoch,
version=p_version,
release=p_release,
packagetype='R')
try:
repo = Repository.objects.get(repo_id=repo_id)
except Repository.DoesNotExist:
repo = None
if repo:
for mirror in repo.mirror_set.all():
with transaction.atomic():
MirrorPackage.objects.create(mirror=mirror, package=package)
installed_package = host.packages.filter(name=p_name,
arch=p_arch,
packagetype='R')[0]
updates = PackageUpdate.objects.all()
with transaction.atomic():
update, c = updates.get_or_create(oldpackage=installed_package,
newpackage=package,
security=security)
return update
def parse_repos(repos_string):
""" Parses repos string in a report and returns a sanitized version
"""
repos = []
for r in [s for s in repos_string.splitlines() if s]:
repodata = re.findall('\'.*?\'', r)
for i, rs in enumerate(repodata):
repodata[i] = rs.replace('\'', '')
repos.append(repodata)
return repos
def process_repo(repo, arch):
""" Processes a single sanitized repo string and converts to a repo object
"""
repository = r_id = None
if repo[2] == '':
r_priority = 0
if repo[0] == 'deb':
r_type = Repository.DEB
r_priority = int(repo[2])
elif repo[0] == 'rpm':
r_type = Repository.RPM
r_id = repo.pop(2)
r_priority = int(repo[2]) * -1
if repo[1]:
r_name = repo[1]
machine_arches = MachineArchitecture.objects.all()
with transaction.atomic():
r_arch, c = machine_arches.get_or_create(name=arch)
unknown = []
for r_url in repo[3:]:
try:
mirror = Mirror.objects.get(url=r_url)
except Mirror.DoesNotExist:
if repository:
Mirror.objects.create(repo=repository, url=r_url)
else:
unknown.append(r_url)
else:
repository = mirror.repo
if not repository:
repositories = Repository.objects.all()
try:
with transaction.atomic():
repository, c = repositories.get_or_create(name=r_name,
arch=r_arch,
repotype=r_type)
except IntegrityError as e:
error_message.send(sender=None, text=e)
repository = repositories.get(name=r_name,
arch=r_arch,
repotype=r_type)
except DatabaseError as e:
error_message.send(sender=None, text=e)
if r_id and repository.repo_id != r_id:
repository.repo_id = r_id
with transaction.atomic():
repository.save()
for url in unknown:
Mirror.objects.create(repo=repository, url=url)
for mirror in Mirror.objects.filter(repo=repository).values('url'):
if mirror['url'].find('cdn.redhat.com') != -1 or \
mirror['url'].find('nu.novell.com') != -1:
repository.auth_required = True
with transaction.atomic():
repository.save()
if mirror['url'].find('security') != -1:
repository.security = True
with transaction.atomic():
repository.save()
return repository, r_priority
def parse_packages(packages_string):
""" Parses packages string in a report and returns a sanitized version
"""
packages = []
for p in packages_string.splitlines():
packages.append(p.replace('\'', '').split(' '))
return packages
def process_package(pkg, protocol):
""" Processes a single sanitized package string and converts to a package
object
"""
if protocol == '1':
epoch = ver = rel = ''
name = pkg[0]
if pkg[4] != '':
arch = pkg[4]
else:
arch = 'unknown'
if pkg[1]:
epoch = pkg[1]
if pkg[2]:
ver = pkg[2]
if pkg[3]:
rel = pkg[3]
if pkg[5] == 'deb':
p_type = Package.DEB
elif pkg[5] == 'rpm':
p_type = Package.RPM
else:
p_type = Package.UNKNOWN
package = get_or_create_package(name, epoch, ver, rel, arch, p_type)
return package
| en | 0.834001 | # Copyright 2012 VPAC, http://www.vpac.org # Copyright 2013-2016 <NAME> <<EMAIL>> # # This file is part of Patchman. # # Patchman is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 only. # # Patchman is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Patchman. If not, see <http://www.gnu.org/licenses/> Processes the quoted repos string sent with a report Processes the quoted packages string sent with a report Processes the update strings sent with a report Add updates to a Host Parses updates string in a report and returns a sanitized version Processes a single sanitized update string and converts to an update object Parses repos string in a report and returns a sanitized version Processes a single sanitized repo string and converts to a repo object Parses packages string in a report and returns a sanitized version Processes a single sanitized package string and converts to a package object | 1.83551 | 2 |
emission/net/ext_service/push/notify_interface.py | Andrew-Tan/e-mission-server | 0 | 6615238 | <reponame>Andrew-Tan/e-mission-server<filename>emission/net/ext_service/push/notify_interface.py
# Standard imports
import json
import copy
import requests
import logging
import uuid
import random
import time
# Our imports
import emission.core.get_database as edb
# Note that the URL is hardcoded because the API endpoints are not standardized.
# If we change a push provider, we will need to modify to match their endpoints.
# Hardcoding will remind us of this :)
# We can revisit this if push providers eventually decide to standardize...
try:
key_file = open('conf/net/ext_service/push.json')
key_data = json.load(key_file)
server_auth_token = key_data["server_auth_token"]
except:
logging.exception("push service not configured, push notifications not supported")
def get_auth_header():
logging.debug("Found server_auth_token starting with %s" % server_auth_token[0:10])
return {
'Authorization': "Bearer %s" % server_auth_token,
'Content-Type': "application/json"
}
def send_msg_to_service(method, url, json_data):
return requests.request(method, url, headers=get_auth_header(), json=json_data)
def invalidate_entries(ret_tokens_list):
for token_entry in ret_tokens_list:
edb.get_profile_db().update({"device_token": token_entry["token"]}, {"$set": {
"device_token_valid": token_entry["valid"],
"device_token_invalidated": token_entry["invalidated"]
}});
def get_and_invalidate_entries():
ret_tokens_list = send_msg_to_service("GET", "https://api.ionic.io/push/tokens", {})
invalidate_entries(ret_tokens_list)
def send_visible_notification(token_list, title, message, json_data, dev=False):
if len(token_list) == 0:
logging.info("len(token_list) == 0, early return to save api calls")
return
profile_to_use = "devpush" if dev == True else "prodpush";
logging.debug("dev = %s, using profile = %s" % (dev, profile_to_use))
message_dict = {
"tokens": token_list,
"profile": profile_to_use,
"notification": {
"title": title,
"message": message,
"android": {
"data": json_data,
"payload": json_data,
},
"ios": {
"data": json_data,
"payload": json_data
}
}
}
send_push_url = "https://api.ionic.io/push/notifications"
response = send_msg_to_service("POST", send_push_url, message_dict)
logging.debug(response)
return response
def send_silent_notification(token_list, json_data, dev=False):
if len(token_list) == 0:
logging.info("len(token_list) == 0, early return to save api calls")
return
ios_raw_data = copy.copy(json_data)
# multiplying by 10^6 gives us the maximum resolution possible while still
# being not a float. Have to see if that is too big.
# Hopefully we will never send a push notification a millisecond to a single phone
ios_raw_data.update({"notId": int(time.time() * 10**6)})
profile_to_use = "devpush" if dev == True else "prodpush";
logging.debug("dev = %s, using profile = %s" % (dev, profile_to_use))
message_dict = {
"tokens": token_list,
"profile": profile_to_use,
"notification": {
"android": {
"content_available": 1,
"data": json_data,
"payload": json_data
},
"ios": {
"content_available": 1,
"priority": 10,
"data": ios_raw_data,
"payload": ios_raw_data
}
}
}
send_push_url = "https://api.ionic.io/push/notifications"
response = send_msg_to_service("POST", send_push_url, message_dict)
logging.debug(response)
return response
| # Standard imports
import json
import copy
import requests
import logging
import uuid
import random
import time
# Our imports
import emission.core.get_database as edb
# Note that the URL is hardcoded because the API endpoints are not standardized.
# If we change a push provider, we will need to modify to match their endpoints.
# Hardcoding will remind us of this :)
# We can revisit this if push providers eventually decide to standardize...
try:
key_file = open('conf/net/ext_service/push.json')
key_data = json.load(key_file)
server_auth_token = key_data["server_auth_token"]
except:
logging.exception("push service not configured, push notifications not supported")
def get_auth_header():
logging.debug("Found server_auth_token starting with %s" % server_auth_token[0:10])
return {
'Authorization': "Bearer %s" % server_auth_token,
'Content-Type': "application/json"
}
def send_msg_to_service(method, url, json_data):
return requests.request(method, url, headers=get_auth_header(), json=json_data)
def invalidate_entries(ret_tokens_list):
for token_entry in ret_tokens_list:
edb.get_profile_db().update({"device_token": token_entry["token"]}, {"$set": {
"device_token_valid": token_entry["valid"],
"device_token_invalidated": token_entry["invalidated"]
}});
def get_and_invalidate_entries():
ret_tokens_list = send_msg_to_service("GET", "https://api.ionic.io/push/tokens", {})
invalidate_entries(ret_tokens_list)
def send_visible_notification(token_list, title, message, json_data, dev=False):
if len(token_list) == 0:
logging.info("len(token_list) == 0, early return to save api calls")
return
profile_to_use = "devpush" if dev == True else "prodpush";
logging.debug("dev = %s, using profile = %s" % (dev, profile_to_use))
message_dict = {
"tokens": token_list,
"profile": profile_to_use,
"notification": {
"title": title,
"message": message,
"android": {
"data": json_data,
"payload": json_data,
},
"ios": {
"data": json_data,
"payload": json_data
}
}
}
send_push_url = "https://api.ionic.io/push/notifications"
response = send_msg_to_service("POST", send_push_url, message_dict)
logging.debug(response)
return response
def send_silent_notification(token_list, json_data, dev=False):
if len(token_list) == 0:
logging.info("len(token_list) == 0, early return to save api calls")
return
ios_raw_data = copy.copy(json_data)
# multiplying by 10^6 gives us the maximum resolution possible while still
# being not a float. Have to see if that is too big.
# Hopefully we will never send a push notification a millisecond to a single phone
ios_raw_data.update({"notId": int(time.time() * 10**6)})
profile_to_use = "devpush" if dev == True else "prodpush";
logging.debug("dev = %s, using profile = %s" % (dev, profile_to_use))
message_dict = {
"tokens": token_list,
"profile": profile_to_use,
"notification": {
"android": {
"content_available": 1,
"data": json_data,
"payload": json_data
},
"ios": {
"content_available": 1,
"priority": 10,
"data": ios_raw_data,
"payload": ios_raw_data
}
}
}
send_push_url = "https://api.ionic.io/push/notifications"
response = send_msg_to_service("POST", send_push_url, message_dict)
logging.debug(response)
return response | en | 0.870071 | # Standard imports # Our imports # Note that the URL is hardcoded because the API endpoints are not standardized. # If we change a push provider, we will need to modify to match their endpoints. # Hardcoding will remind us of this :) # We can revisit this if push providers eventually decide to standardize... # multiplying by 10^6 gives us the maximum resolution possible while still # being not a float. Have to see if that is too big. # Hopefully we will never send a push notification a millisecond to a single phone | 2.327621 | 2 |
app/app/energApp/weather.py | energeeks/ashrae-energy-prediction | 14 | 6615239 | <filename>app/app/energApp/weather.py<gh_stars>10-100
import requests
import pandas as pd
from flask import current_app
def get_forecast(lat, lon):
"""
Using the API Key in config.py, a weather forecast is fetched from
openweathermap regarding a provided longitude and latitude.
:param lat: Latitude
:param lon: Longitude
:return: Request object with API response
"""
api_key = current_app.config['API_KEY']
url = "http://api.openweathermap.org/data/2.5/forecast?" \
"&units=metric&lat={}&lon={}&appid={}"\
.format(lat, lon, api_key)
return requests.get(url)
def parse_request(request):
"""
The response object from the request is parsed into a pandas data frame.
:param request: Response object from API and preferably obtained through
get_forecast().
:return: Data Frame with the obtained weather forecast.
"""
request = request.json()
request = request['list']
main = []
weather = []
clouds = []
wind = []
date = []
for r in request:
main.append(r['main'])
weather.append(r['weather'][0])
clouds.append(r['clouds'])
wind.append(r['wind'])
date.append(r['dt_txt'])
main = pd.DataFrame(main)
weather = pd.DataFrame(weather)
clouds = pd.DataFrame(clouds)
clouds.columns = ["cloud_coverage"]
wind = pd.DataFrame(wind)
total = pd.concat([main, weather, clouds, wind], axis=1)
total["date"] = pd.to_datetime(date)
return total
| <filename>app/app/energApp/weather.py<gh_stars>10-100
import requests
import pandas as pd
from flask import current_app
def get_forecast(lat, lon):
"""
Using the API Key in config.py, a weather forecast is fetched from
openweathermap regarding a provided longitude and latitude.
:param lat: Latitude
:param lon: Longitude
:return: Request object with API response
"""
api_key = current_app.config['API_KEY']
url = "http://api.openweathermap.org/data/2.5/forecast?" \
"&units=metric&lat={}&lon={}&appid={}"\
.format(lat, lon, api_key)
return requests.get(url)
def parse_request(request):
"""
The response object from the request is parsed into a pandas data frame.
:param request: Response object from API and preferably obtained through
get_forecast().
:return: Data Frame with the obtained weather forecast.
"""
request = request.json()
request = request['list']
main = []
weather = []
clouds = []
wind = []
date = []
for r in request:
main.append(r['main'])
weather.append(r['weather'][0])
clouds.append(r['clouds'])
wind.append(r['wind'])
date.append(r['dt_txt'])
main = pd.DataFrame(main)
weather = pd.DataFrame(weather)
clouds = pd.DataFrame(clouds)
clouds.columns = ["cloud_coverage"]
wind = pd.DataFrame(wind)
total = pd.concat([main, weather, clouds, wind], axis=1)
total["date"] = pd.to_datetime(date)
return total
| en | 0.780243 | Using the API Key in config.py, a weather forecast is fetched from openweathermap regarding a provided longitude and latitude. :param lat: Latitude :param lon: Longitude :return: Request object with API response The response object from the request is parsed into a pandas data frame. :param request: Response object from API and preferably obtained through get_forecast(). :return: Data Frame with the obtained weather forecast. | 3.46687 | 3 |
trees/binheap.py | jsz1/algorithms-and-data-structures | 0 | 6615240 | class BinHeap:
def __init__(self):
self.heap_list = [0]
self.current_size = 0
def perc_up(self, i):
while i // 2 > 0:
if self.heap_list[i] < self.heap_list[i // 2]:
tmp = self.heap_list[i // 2]
self.heap_list[i // 2] = self.heap_list[i]
self.heap_list[i] = tmp
def perc_down(self, i):
while (i * 2) <= self.current_size:
current_min_child = self.min_child(i)
if self.heap_list[i] > self.heap_list[current_min_child]:
tmp = self.heap_list[i]
self.heap_list[i] = self.heap_list[current_min_child]
self.heap_list[current_min_child] = tmp
i = current_min_child
def min_child(self, i):
if i * 2 + 1 > self.current_size:
return i * 2
else:
if self.heap_list[i * 2] < self.heap_list[i * 2 + 1]:
return i * 2
else:
return i * 2 + 1
def delete_min(self):
return_value = self.heap_list[1]
self.heap_list[1] = self.heap_list[self.current_size]
self.current_size = self.current_size - 1
self.heap_list.pop()
self.perc_down(1)
return return_value
def insert(self, k):
self.heap_list.append(k)
self.current_size = self.current_size + 1
self.perc_up(self.current_size)
def build_heap(self, alist):
i = len(alist) // 2
self.current_size = len(alist)
self.heap_list = [0] + alist[:]
while i > 0:
self.perc_down(i)
i = i - 1 | class BinHeap:
def __init__(self):
self.heap_list = [0]
self.current_size = 0
def perc_up(self, i):
while i // 2 > 0:
if self.heap_list[i] < self.heap_list[i // 2]:
tmp = self.heap_list[i // 2]
self.heap_list[i // 2] = self.heap_list[i]
self.heap_list[i] = tmp
def perc_down(self, i):
while (i * 2) <= self.current_size:
current_min_child = self.min_child(i)
if self.heap_list[i] > self.heap_list[current_min_child]:
tmp = self.heap_list[i]
self.heap_list[i] = self.heap_list[current_min_child]
self.heap_list[current_min_child] = tmp
i = current_min_child
def min_child(self, i):
if i * 2 + 1 > self.current_size:
return i * 2
else:
if self.heap_list[i * 2] < self.heap_list[i * 2 + 1]:
return i * 2
else:
return i * 2 + 1
def delete_min(self):
return_value = self.heap_list[1]
self.heap_list[1] = self.heap_list[self.current_size]
self.current_size = self.current_size - 1
self.heap_list.pop()
self.perc_down(1)
return return_value
def insert(self, k):
self.heap_list.append(k)
self.current_size = self.current_size + 1
self.perc_up(self.current_size)
def build_heap(self, alist):
i = len(alist) // 2
self.current_size = len(alist)
self.heap_list = [0] + alist[:]
while i > 0:
self.perc_down(i)
i = i - 1 | none | 1 | 3.462266 | 3 | |
atnr/tagging.py | kevinjdolan/Automated-Tail-Number-Recognition | 4 | 6615241 | # a very simple flask app for manually tagging the testing and training data
import flask
import json
import sys
import os
sys.path.append('.')
APP = flask.Flask(__name__)
from atnr import data
@APP.route("/")
def root():
return flask.render_template('root.jinja2')
@APP.route("/tail-character-locations/")
def tailCharacterLocations():
pending = getPendingTailCharacterLocations()
id = flask.request.values.get('id')
if id is None:
if pending:
return flask.redirect('/tail-character-locations/?id=%s' % pending[0])
else:
return "UR DONE"
else:
info = data.getInfo(id)
charactersJson = json.dumps(info.get('characters', []))
return flask.render_template('tailCharacterLocations.jinja2',
info=info,
charactersJson=charactersJson,
remaining=len(pending),
)
@APP.route("/tail-character-locations/", methods=['POST'])
def tailCharacterLocationsSave():
id = flask.request.values.get('id')
info = data.getInfo(id)
info['version'] = 0
info['tail'] = flask.request.values.get('tail').upper()
markers = flask.request.values.getlist('markers')
markers.remove('X')
characters = []
for marker in markers:
delete = flask.request.values.get('delete-' + marker)
if delete != 'true':
x = flask.request.values.get('x-' + marker)
y = flask.request.values.get('y-' + marker)
if x and y:
characters.append((int(x),int(y)))
characters = sorted(characters, key=lambda x: x[0])
info['characters'] = characters
data.updateInfo(id, info)
return flask.redirect('/tail-character-locations/')
def getPendingTailCharacterLocations():
candidates = []
for testing in [True, False]:
for info in data.listData(testing):
if info.get('version', -1) < 0:
candidates.append(info['id'])
return candidates
@APP.route('/segmentation-marker/')
def segmentationMarker():
segmentation = flask.request.values.get('segmentation')
characters = []
for testing in [True, False]:
for item in data.listData(testing):
segmentations = data.getSegmentations(item, segmentation)
for character in segmentations:
if character['status'] in ['INFERRED', 'OK', 'BAD']:
character['url'] = '/segment-image/%s/%s/%s' % (
item['id'],
segmentation,
character['label'],
)
characters.append(character)
characters = sorted(
characters,
key=lambda x: (0 if x['status'] == 'INFERRED' else 1),
)
return flask.render_template('segmentationMarker.jinja2',
charactersJson=json.dumps(characters),
segmentationJson=json.dumps(segmentation),
)
@APP.route('/segmentation-marker/', methods=['POST'])
def segmentationMarkerSave():
info = json.loads(flask.request.values.get('info'))
data.updateSegmentation(info)
print info
return "OK"
@APP.route("/image/<id>")
def image(id):
path = data.getImagePath(id)
with open(path) as f:
return f.read()
@APP.route("/segment-image/<id>/<segmentation>/<label>/")
def segmentationImage(id, segmentation, label):
info = data.getInfo(id)
path = "%s/%s/%s.png" % (info['path'], segmentation, label)
with open(path) as f:
return f.read()
if __name__ == "__main__":
APP.run(debug=True) | # a very simple flask app for manually tagging the testing and training data
import flask
import json
import sys
import os
sys.path.append('.')
APP = flask.Flask(__name__)
from atnr import data
@APP.route("/")
def root():
return flask.render_template('root.jinja2')
@APP.route("/tail-character-locations/")
def tailCharacterLocations():
pending = getPendingTailCharacterLocations()
id = flask.request.values.get('id')
if id is None:
if pending:
return flask.redirect('/tail-character-locations/?id=%s' % pending[0])
else:
return "UR DONE"
else:
info = data.getInfo(id)
charactersJson = json.dumps(info.get('characters', []))
return flask.render_template('tailCharacterLocations.jinja2',
info=info,
charactersJson=charactersJson,
remaining=len(pending),
)
@APP.route("/tail-character-locations/", methods=['POST'])
def tailCharacterLocationsSave():
id = flask.request.values.get('id')
info = data.getInfo(id)
info['version'] = 0
info['tail'] = flask.request.values.get('tail').upper()
markers = flask.request.values.getlist('markers')
markers.remove('X')
characters = []
for marker in markers:
delete = flask.request.values.get('delete-' + marker)
if delete != 'true':
x = flask.request.values.get('x-' + marker)
y = flask.request.values.get('y-' + marker)
if x and y:
characters.append((int(x),int(y)))
characters = sorted(characters, key=lambda x: x[0])
info['characters'] = characters
data.updateInfo(id, info)
return flask.redirect('/tail-character-locations/')
def getPendingTailCharacterLocations():
candidates = []
for testing in [True, False]:
for info in data.listData(testing):
if info.get('version', -1) < 0:
candidates.append(info['id'])
return candidates
@APP.route('/segmentation-marker/')
def segmentationMarker():
segmentation = flask.request.values.get('segmentation')
characters = []
for testing in [True, False]:
for item in data.listData(testing):
segmentations = data.getSegmentations(item, segmentation)
for character in segmentations:
if character['status'] in ['INFERRED', 'OK', 'BAD']:
character['url'] = '/segment-image/%s/%s/%s' % (
item['id'],
segmentation,
character['label'],
)
characters.append(character)
characters = sorted(
characters,
key=lambda x: (0 if x['status'] == 'INFERRED' else 1),
)
return flask.render_template('segmentationMarker.jinja2',
charactersJson=json.dumps(characters),
segmentationJson=json.dumps(segmentation),
)
@APP.route('/segmentation-marker/', methods=['POST'])
def segmentationMarkerSave():
info = json.loads(flask.request.values.get('info'))
data.updateSegmentation(info)
print info
return "OK"
@APP.route("/image/<id>")
def image(id):
path = data.getImagePath(id)
with open(path) as f:
return f.read()
@APP.route("/segment-image/<id>/<segmentation>/<label>/")
def segmentationImage(id, segmentation, label):
info = data.getInfo(id)
path = "%s/%s/%s.png" % (info['path'], segmentation, label)
with open(path) as f:
return f.read()
if __name__ == "__main__":
APP.run(debug=True) | en | 0.899403 | # a very simple flask app for manually tagging the testing and training data | 2.781586 | 3 |
websubsub/views.py | Fak3/websubsub | 4 | 6615242 | <filename>websubsub/views.py
import json
import logging
from collections import defaultdict
from datetime import timedelta
from django.conf import settings
from django.http import HttpResponse
from django.utils.decorators import classonlymethod
from django.utils.timezone import now
from rest_framework.views import APIView # TODO: can we live without drf dependency?
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
from .models import Subscription
from . import tasks
logger = logging.getLogger('websubsub.views')
class WssView(APIView):
"""
Generic websub callback processing.
Usage:
Create a celery task that will accept incoming data, then in your urls.py:
>>> from websubsub.views import WssView
>>> from .tasks import news_task, reports_task
>>>
>>> urlpatterns = [
>>> path('/websubcallback/news/<uuid:id>', WssView.as_view(news_task), name='webnews')
>>> path('/websubcallback/reports/<uuid:id>', WssView.as_view(reports_task), name='webreports')
>>> ]
"""
handler_task = None
@classonlymethod
def as_view(cls, handler_task, **kwargs):
kwargs['handler_task'] = handler_task
return super().as_view(**kwargs)
def get(self, request, *args, **kwargs):
"""
Hub sends GET request to callback url to verify subscription/unsubscription or
to inform about subscription denial.
"""
if 'hub.topic' not in request.GET:
logger.error(f'{request.path}: GET request is missing hub.topic')
return Response('Missing hub.topic', status=HTTP_400_BAD_REQUEST)
mode = request.GET.get('hub.mode', None)
if mode not in ['subscribe', 'unsubscribe', 'denied']:
logger.error(f'{request.path}: GET request received unknown hub.mode "{mode}"')
return Response('Missing or unknown hub.mode', status=HTTP_400_BAD_REQUEST)
id = args[0] if args else list(kwargs.values())[0]
try:
ssn = Subscription.objects.get(id=id)
except Subscription.DoesNotExist:
logger.error(
f'Received unwanted subscription {id} "{mode}" request with'
f' topic {request.GET["hub.topic"]} !'
)
return Response('Unwanted subscription', status=HTTP_400_BAD_REQUEST)
if mode == 'subscribe':
return self.on_subscribe(request, ssn)
elif mode == 'unsubscribe':
return self.on_unsubscribe(request, ssn)
elif mode == 'denied':
return self.on_denied(request, ssn)
def on_subscribe(self, request, ssn):
"""
The subscriber MUST confirm that the hub.topic corresponds to a pending
subscription or unsubscription that it wishes to carry out. If so, the
subscriber MUST respond with an HTTP success (2xx) code with a response
body equal to the hub.challenge parameter. If the subscriber does not
agree with the action, the subscriber MUST respond with a 404 "Not Found"
response.
Hubs MAY make the hub.lease_seconds equal to the value the subscriber
passed in their subscription request but MAY change the value depending
on the hub's policies. To sustain a subscription, the subscriber MUST
re-request the subscription on the hub before hub.lease_seconds seconds
has elapsed.
Hubs MUST enforce lease expirations, and MUST NOT issue perpetual lease
durations.
"""
if 'hub.challenge' not in request.GET:
logger.error(f'Missing hub.challenge in subscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('Missing hub.challenge', status=HTTP_400_BAD_REQUEST)
if not request.GET.get('hub.lease_seconds', '').isdigit():
logger.error(f'Missing integer hub.lease_seconds in subscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('hub.lease_seconds required and must be integer', status=HTTP_400_BAD_REQUEST)
if ssn.unsubscribe_status is not None:
logger.error(f'Subscription {ssn.pk} received subscription verification request,'
f' but its was explicitly unsubscribed before.')
return Response('Unsubscribed')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verified',
lease_expiration_time = now() + timedelta(seconds=int(request.GET['hub.lease_seconds'])),
connerror_count = 0,
huberror_count = 0,
verifyerror_count = 0,
verifytimeout_count = 0
)
logger.info(f'Got {ssn.pk} subscribe confirmation from hub.')
return HttpResponse(request.GET['hub.challenge'])
def on_unsubscribe(self, request, ssn):
if 'hub.challenge' not in request.GET:
logger.error(f'Missing hub.challenge in unsubscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
unsubscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('Missing hub.challenge', status=HTTP_400_BAD_REQUEST)
tasks.save.delay(
pk = ssn.pk,
unsubscribe_status = 'verified',
#lease_expiration_time = None, # TODO: should we reset it?
connerror_count = 0,
huberror_count = 0,
verifyerror_count = 0,
verifytimeout_count = 0
)
logger.info(f'Got {ssn.pk} unsubscribe confirmation from hub.')
return HttpResponse(request.GET['hub.challenge'])
def on_denied(self, request, ssn):
"""
TODO
If (and when), the subscription is denied, the hub MUST inform the subscriber by
sending an HTTP GET request to the subscriber's callback URL as given in the
subscription request. This request has the following query string arguments appended:
hub.mode - REQUIRED. The literal string "denied".
hub.topic -REQUIRED. The topic URL given in the corresponding subscription request.
hub.reason -OPTIONAL. The hub may include a reason for which the subscription has been denied.
Hubs may provide an additional HTTP Location header to indicate that the subscriber may
retry subscribing to a different hub.topic. This allows for limited distribution to
specific groups or users in the context of social web applications.
The subscription MAY be denied by the hub at any point (even if it was previously accepted).
The Subscriber SHOULD then consider that the subscription is not possible anymore.
"""
if not ssn:
logger.error(f'Received denial on unwanted subscription with '
f'topic {request.GET["hub.topic"]}!')
return Response('Unwanted subscription')
logger.error(f'Hub denied subscription {ssn.pk}!')
tasks.save.delay(pk=ssn.pk, subscribe_status='denied')
return Response('')
def post(self, request, *args, **kwargs):
"""
The subscriber's callback URL MUST return an HTTP 2xx response code to
indicate a success. The subscriber's callback URL MAY return an HTTP 410
code to indicate that the subscription has been deleted, and the hub MAY
terminate the subscription if it receives that code as a response. The hub
MUST consider all other subscriber response codes as failures
Subscribers SHOULD respond to notifications as quickly as possible; their
success response code SHOULD only indicate receipt of the message, not
acknowledgment that it was successfully processed by the subscriber.
"""
id = args[0] if args else list(kwargs.values())[0]
try:
ssn = Subscription.objects.get(id=id)
except Subscription.DoesNotExist:
logger.error(
f'Received unwanted subscription {id} POST request! Sending status '
'410 back to hub.'
)
return Response('Unwanted subscription', status=410)
ssn.update(time_last_event_received=now())
self.handler_task.delay(request.data)
return Response('') # TODO
| <filename>websubsub/views.py
import json
import logging
from collections import defaultdict
from datetime import timedelta
from django.conf import settings
from django.http import HttpResponse
from django.utils.decorators import classonlymethod
from django.utils.timezone import now
from rest_framework.views import APIView # TODO: can we live without drf dependency?
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST
from .models import Subscription
from . import tasks
logger = logging.getLogger('websubsub.views')
class WssView(APIView):
"""
Generic websub callback processing.
Usage:
Create a celery task that will accept incoming data, then in your urls.py:
>>> from websubsub.views import WssView
>>> from .tasks import news_task, reports_task
>>>
>>> urlpatterns = [
>>> path('/websubcallback/news/<uuid:id>', WssView.as_view(news_task), name='webnews')
>>> path('/websubcallback/reports/<uuid:id>', WssView.as_view(reports_task), name='webreports')
>>> ]
"""
handler_task = None
@classonlymethod
def as_view(cls, handler_task, **kwargs):
kwargs['handler_task'] = handler_task
return super().as_view(**kwargs)
def get(self, request, *args, **kwargs):
"""
Hub sends GET request to callback url to verify subscription/unsubscription or
to inform about subscription denial.
"""
if 'hub.topic' not in request.GET:
logger.error(f'{request.path}: GET request is missing hub.topic')
return Response('Missing hub.topic', status=HTTP_400_BAD_REQUEST)
mode = request.GET.get('hub.mode', None)
if mode not in ['subscribe', 'unsubscribe', 'denied']:
logger.error(f'{request.path}: GET request received unknown hub.mode "{mode}"')
return Response('Missing or unknown hub.mode', status=HTTP_400_BAD_REQUEST)
id = args[0] if args else list(kwargs.values())[0]
try:
ssn = Subscription.objects.get(id=id)
except Subscription.DoesNotExist:
logger.error(
f'Received unwanted subscription {id} "{mode}" request with'
f' topic {request.GET["hub.topic"]} !'
)
return Response('Unwanted subscription', status=HTTP_400_BAD_REQUEST)
if mode == 'subscribe':
return self.on_subscribe(request, ssn)
elif mode == 'unsubscribe':
return self.on_unsubscribe(request, ssn)
elif mode == 'denied':
return self.on_denied(request, ssn)
def on_subscribe(self, request, ssn):
"""
The subscriber MUST confirm that the hub.topic corresponds to a pending
subscription or unsubscription that it wishes to carry out. If so, the
subscriber MUST respond with an HTTP success (2xx) code with a response
body equal to the hub.challenge parameter. If the subscriber does not
agree with the action, the subscriber MUST respond with a 404 "Not Found"
response.
Hubs MAY make the hub.lease_seconds equal to the value the subscriber
passed in their subscription request but MAY change the value depending
on the hub's policies. To sustain a subscription, the subscriber MUST
re-request the subscription on the hub before hub.lease_seconds seconds
has elapsed.
Hubs MUST enforce lease expirations, and MUST NOT issue perpetual lease
durations.
"""
if 'hub.challenge' not in request.GET:
logger.error(f'Missing hub.challenge in subscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('Missing hub.challenge', status=HTTP_400_BAD_REQUEST)
if not request.GET.get('hub.lease_seconds', '').isdigit():
logger.error(f'Missing integer hub.lease_seconds in subscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('hub.lease_seconds required and must be integer', status=HTTP_400_BAD_REQUEST)
if ssn.unsubscribe_status is not None:
logger.error(f'Subscription {ssn.pk} received subscription verification request,'
f' but its was explicitly unsubscribed before.')
return Response('Unsubscribed')
tasks.save.delay(
pk = ssn.pk,
subscribe_status = 'verified',
lease_expiration_time = now() + timedelta(seconds=int(request.GET['hub.lease_seconds'])),
connerror_count = 0,
huberror_count = 0,
verifyerror_count = 0,
verifytimeout_count = 0
)
logger.info(f'Got {ssn.pk} subscribe confirmation from hub.')
return HttpResponse(request.GET['hub.challenge'])
def on_unsubscribe(self, request, ssn):
if 'hub.challenge' not in request.GET:
logger.error(f'Missing hub.challenge in unsubscription verification {ssn.pk}!')
tasks.save.delay(
pk = ssn.pk,
unsubscribe_status = 'verifyerror',
verifyerror_count = ssn.verifyerror_count + 1
)
return Response('Missing hub.challenge', status=HTTP_400_BAD_REQUEST)
tasks.save.delay(
pk = ssn.pk,
unsubscribe_status = 'verified',
#lease_expiration_time = None, # TODO: should we reset it?
connerror_count = 0,
huberror_count = 0,
verifyerror_count = 0,
verifytimeout_count = 0
)
logger.info(f'Got {ssn.pk} unsubscribe confirmation from hub.')
return HttpResponse(request.GET['hub.challenge'])
def on_denied(self, request, ssn):
"""
TODO
If (and when), the subscription is denied, the hub MUST inform the subscriber by
sending an HTTP GET request to the subscriber's callback URL as given in the
subscription request. This request has the following query string arguments appended:
hub.mode - REQUIRED. The literal string "denied".
hub.topic -REQUIRED. The topic URL given in the corresponding subscription request.
hub.reason -OPTIONAL. The hub may include a reason for which the subscription has been denied.
Hubs may provide an additional HTTP Location header to indicate that the subscriber may
retry subscribing to a different hub.topic. This allows for limited distribution to
specific groups or users in the context of social web applications.
The subscription MAY be denied by the hub at any point (even if it was previously accepted).
The Subscriber SHOULD then consider that the subscription is not possible anymore.
"""
if not ssn:
logger.error(f'Received denial on unwanted subscription with '
f'topic {request.GET["hub.topic"]}!')
return Response('Unwanted subscription')
logger.error(f'Hub denied subscription {ssn.pk}!')
tasks.save.delay(pk=ssn.pk, subscribe_status='denied')
return Response('')
def post(self, request, *args, **kwargs):
"""
The subscriber's callback URL MUST return an HTTP 2xx response code to
indicate a success. The subscriber's callback URL MAY return an HTTP 410
code to indicate that the subscription has been deleted, and the hub MAY
terminate the subscription if it receives that code as a response. The hub
MUST consider all other subscriber response codes as failures
Subscribers SHOULD respond to notifications as quickly as possible; their
success response code SHOULD only indicate receipt of the message, not
acknowledgment that it was successfully processed by the subscriber.
"""
id = args[0] if args else list(kwargs.values())[0]
try:
ssn = Subscription.objects.get(id=id)
except Subscription.DoesNotExist:
logger.error(
f'Received unwanted subscription {id} POST request! Sending status '
'410 back to hub.'
)
return Response('Unwanted subscription', status=410)
ssn.update(time_last_event_received=now())
self.handler_task.delay(request.data)
return Response('') # TODO
| en | 0.815121 | # TODO: can we live without drf dependency? Generic websub callback processing. Usage: Create a celery task that will accept incoming data, then in your urls.py: >>> from websubsub.views import WssView >>> from .tasks import news_task, reports_task >>> >>> urlpatterns = [ >>> path('/websubcallback/news/<uuid:id>', WssView.as_view(news_task), name='webnews') >>> path('/websubcallback/reports/<uuid:id>', WssView.as_view(reports_task), name='webreports') >>> ] Hub sends GET request to callback url to verify subscription/unsubscription or to inform about subscription denial. The subscriber MUST confirm that the hub.topic corresponds to a pending subscription or unsubscription that it wishes to carry out. If so, the subscriber MUST respond with an HTTP success (2xx) code with a response body equal to the hub.challenge parameter. If the subscriber does not agree with the action, the subscriber MUST respond with a 404 "Not Found" response. Hubs MAY make the hub.lease_seconds equal to the value the subscriber passed in their subscription request but MAY change the value depending on the hub's policies. To sustain a subscription, the subscriber MUST re-request the subscription on the hub before hub.lease_seconds seconds has elapsed. Hubs MUST enforce lease expirations, and MUST NOT issue perpetual lease durations. #lease_expiration_time = None, # TODO: should we reset it? TODO If (and when), the subscription is denied, the hub MUST inform the subscriber by sending an HTTP GET request to the subscriber's callback URL as given in the subscription request. This request has the following query string arguments appended: hub.mode - REQUIRED. The literal string "denied". hub.topic -REQUIRED. The topic URL given in the corresponding subscription request. hub.reason -OPTIONAL. The hub may include a reason for which the subscription has been denied. Hubs may provide an additional HTTP Location header to indicate that the subscriber may retry subscribing to a different hub.topic. This allows for limited distribution to specific groups or users in the context of social web applications. The subscription MAY be denied by the hub at any point (even if it was previously accepted). The Subscriber SHOULD then consider that the subscription is not possible anymore. The subscriber's callback URL MUST return an HTTP 2xx response code to indicate a success. The subscriber's callback URL MAY return an HTTP 410 code to indicate that the subscription has been deleted, and the hub MAY terminate the subscription if it receives that code as a response. The hub MUST consider all other subscriber response codes as failures Subscribers SHOULD respond to notifications as quickly as possible; their success response code SHOULD only indicate receipt of the message, not acknowledgment that it was successfully processed by the subscriber. # TODO | 2.193068 | 2 |
python/bbgo/__init__.py | Yukaii/bbgo | 0 | 6615243 | from . import enums
from . import handlers
from . import utils
from .services import MarketService
from .services import TradingService
from .services import UserDataService
from .stream import Stream
| from . import enums
from . import handlers
from . import utils
from .services import MarketService
from .services import TradingService
from .services import UserDataService
from .stream import Stream
| none | 1 | 1.168568 | 1 | |
caldera_agent/agent_encoder.py | mitre/caldera-agent | 18 | 6615244 | <gh_stars>10-100
import base64
import json
import os
""" Simple functions used for preparing and processing data which traverses the
pipe. Currently, dicts are jsonified and base64'd before traversing the pipe, however, this module can be extended
for different data types or formats (BSON may become a desirable alternative to json at some point). The intention
for placing these functions in their own module is to make it easier to troubleshoot communication errors that arise
from encoding problems, and avoid mismatched encoding schemes as further development is done on the Agent and Implants
"""
class RatOpcodes(object):
# these are the only opcodes that the agent should be aware of
# any others will be passed through
Initiate = "initiate"
Exit = "exit"
class RatMessage(object):
_max_id = os.getpid() * 100
"""
:param RatMessage opcode: One of the valid opcodes
:param bool response: True if sent from the rat
:param int id: Identifier (unused)
:param [String, String] parameters:
"""
def __init__(self, opcode=None, response=False, id=None, parameters=None):
self.opcode = opcode
self.response = response
self.id = RatMessage._max_id if id is None else id
if id is None:
RatMessage._max_id += 1
self.parameters = {} if parameters is None else parameters
def to_dict(self):
return dict(opcode=self.opcode, response=self.response, id=self.id, parameters=self.parameters)
def __str__(self):
return '{}({})'.format(type(self).__name__, self.to_dict())
__repr__ = __str__
def agent_encode(message: RatMessage) -> str:
# dict -> json -> ascii -> (base64 + newline)
result = base64.b64encode(json.dumps(message.to_dict(), sort_keys=True).encode('UTF-8')) + b'\n'
return result
def agent_decode(incoming_bytes: str) -> RatMessage:
# (base64 + newline) -> ascii -> json -> dict
result = json.loads(base64.b64decode(incoming_bytes.strip()).decode('UTF-8'))
return RatMessage(**result)
| import base64
import json
import os
""" Simple functions used for preparing and processing data which traverses the
pipe. Currently, dicts are jsonified and base64'd before traversing the pipe, however, this module can be extended
for different data types or formats (BSON may become a desirable alternative to json at some point). The intention
for placing these functions in their own module is to make it easier to troubleshoot communication errors that arise
from encoding problems, and avoid mismatched encoding schemes as further development is done on the Agent and Implants
"""
class RatOpcodes(object):
# these are the only opcodes that the agent should be aware of
# any others will be passed through
Initiate = "initiate"
Exit = "exit"
class RatMessage(object):
_max_id = os.getpid() * 100
"""
:param RatMessage opcode: One of the valid opcodes
:param bool response: True if sent from the rat
:param int id: Identifier (unused)
:param [String, String] parameters:
"""
def __init__(self, opcode=None, response=False, id=None, parameters=None):
self.opcode = opcode
self.response = response
self.id = RatMessage._max_id if id is None else id
if id is None:
RatMessage._max_id += 1
self.parameters = {} if parameters is None else parameters
def to_dict(self):
return dict(opcode=self.opcode, response=self.response, id=self.id, parameters=self.parameters)
def __str__(self):
return '{}({})'.format(type(self).__name__, self.to_dict())
__repr__ = __str__
def agent_encode(message: RatMessage) -> str:
# dict -> json -> ascii -> (base64 + newline)
result = base64.b64encode(json.dumps(message.to_dict(), sort_keys=True).encode('UTF-8')) + b'\n'
return result
def agent_decode(incoming_bytes: str) -> RatMessage:
# (base64 + newline) -> ascii -> json -> dict
result = json.loads(base64.b64decode(incoming_bytes.strip()).decode('UTF-8'))
return RatMessage(**result) | en | 0.849887 | Simple functions used for preparing and processing data which traverses the pipe. Currently, dicts are jsonified and base64'd before traversing the pipe, however, this module can be extended for different data types or formats (BSON may become a desirable alternative to json at some point). The intention for placing these functions in their own module is to make it easier to troubleshoot communication errors that arise from encoding problems, and avoid mismatched encoding schemes as further development is done on the Agent and Implants # these are the only opcodes that the agent should be aware of # any others will be passed through :param RatMessage opcode: One of the valid opcodes :param bool response: True if sent from the rat :param int id: Identifier (unused) :param [String, String] parameters: # dict -> json -> ascii -> (base64 + newline) # (base64 + newline) -> ascii -> json -> dict | 3.121651 | 3 |
tests/test_home/test_models/TestCCExtractorVersion.py | satyammittal/sample-platform | 0 | 6615245 | import unittest
from mod_home.models import CCExtractorVersion
from datetime import datetime
from tests.base import ccextractor_version
from mock import mock
class TestCCExtractorVersion(unittest.TestCase):
def test_that_init_works_correctly(self):
version = ccextractor_version.version
released = ccextractor_version.released
released_date = datetime.strptime(released, '%Y-%m-%dT%H:%M:%SZ').date()
commit = ccextractor_version.commit
actual = CCExtractorVersion(version, released, commit)
self.assertEqual(actual.version, version)
self.assertEqual(actual.released, released_date)
self.assertEqual(actual.commit, commit)
def test_that_representation_works(self):
version = ccextractor_version.version
released = ccextractor_version.released
commit = ccextractor_version.commit
actual = CCExtractorVersion(version, released, commit)
expected = '<Version {v}>'.format(v=actual.version)
self.assertEqual(str(actual), expected)
| import unittest
from mod_home.models import CCExtractorVersion
from datetime import datetime
from tests.base import ccextractor_version
from mock import mock
class TestCCExtractorVersion(unittest.TestCase):
def test_that_init_works_correctly(self):
version = ccextractor_version.version
released = ccextractor_version.released
released_date = datetime.strptime(released, '%Y-%m-%dT%H:%M:%SZ').date()
commit = ccextractor_version.commit
actual = CCExtractorVersion(version, released, commit)
self.assertEqual(actual.version, version)
self.assertEqual(actual.released, released_date)
self.assertEqual(actual.commit, commit)
def test_that_representation_works(self):
version = ccextractor_version.version
released = ccextractor_version.released
commit = ccextractor_version.commit
actual = CCExtractorVersion(version, released, commit)
expected = '<Version {v}>'.format(v=actual.version)
self.assertEqual(str(actual), expected)
| none | 1 | 2.636989 | 3 | |
auctions/routes.py | samedamci/auctions | 3 | 6615246 | #!/usr/bin/env python3
from flask import render_template, url_for, flash, redirect, jsonify, request
from auctions.forms import Register, Login, AddAuction
from auctions.models import User, Auction
from auctions import app, db, bcrypt
from flask_login import login_user, current_user, logout_user, login_required
from datetime import datetime
# Displayed all time.
@app.route("/")
def home():
return render_template("auctions.html", auction=Auction)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/auction/<id>")
def auction(id):
auction = Auction.query.get(id)
return render_template(
"auction.html",
title=auction.title,
description=auction.description,
call_price=auction.call_price,
buy_now_price=auction.buy_now_price,
date_posted=auction.date_posted,
date_posted_human=str(auction.date_posted).split(".", 1)[0],
date_end=auction.date_end,
date_now=datetime.utcnow(),
image=auction.image,
)
# Displayed if user not logged in.
@app.route("/login", methods=["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for("home"))
form = Login()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for("home"))
else:
flash("Something goes wrong! Check your login data!", "failed")
return render_template("login.html", form=form)
@app.route("/register", methods=["GET", "POST"])
def register():
if current_user.is_authenticated:
return redirect(url_for("home"))
form = Register()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode(
"utf-8"
)
user = User(
username=form.username.data, email=form.email.data, password=<PASSWORD>
)
db.session.add(user)
db.session.commit()
flash(
f'Account "{form.username.data}" has been created successfully!', "success"
)
return redirect(url_for("login"))
return render_template("register.html", form=form)
# Displayed if user logged in.
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home"))
@app.route("/settings")
@login_required
def settings():
return render_template("settings.html")
@app.route("/profile")
@login_required
def profile():
return render_template("profile.html")
@app.route("/observed")
@login_required
def observed():
return render_template("observed.html")
@app.route("/cart")
@login_required
def cart():
return render_template("cart.html")
@app.route("/add_auction", methods=["GET", "POST"])
@login_required
def add_auction():
if current_user.is_authenticated:
form = AddAuction()
if form.validate_on_submit():
auction = Auction(
title=form.title.data,
description=form.description.data,
date_end=form.date_end.data,
buy_now_price=form.buy_now_price.data,
call_price=form.call_price.data,
image=form.image.data,
user_id=User.query.filter_by(username=current_user.username).first().id,
)
db.session.add(auction)
db.session.commit()
flash(f'Auction "{form.title.data}" has been added!', "success")
return redirect(url_for("auctions"))
return render_template("add_auction.html", form=form)
# API.
auction_list = []
auction_count = Auction.query.count()
for id in range(auction_count):
auction_ = Auction.query.get(id + 1)
auction_list.append(
{
"id": id + 1,
"title": auction_.title,
"date_posted": auction_.date_posted,
"date_end": auction_.date_end,
"description": auction_.description,
"buy_now_price": auction_.buy_now_price,
"call_price": auction_.call_price,
"image": auction_.image,
"user_id": auction_.user_id,
}
)
@app.route("/api/auctions/all", methods=["GET"])
def api_all():
return jsonify(auction_list)
@app.route("/api/auctions", methods=["GET"])
def api_id():
if "id" in request.args:
id = int(request.args["id"])
if id > auction_count or id <= 0:
return "Error: Invalid id.\n"
else:
return "Error: No id field provided. Please specify an id.\n"
results = []
for auction in auction_list:
if auction["id"] == id:
results.append(auction)
return jsonify(results)
| #!/usr/bin/env python3
from flask import render_template, url_for, flash, redirect, jsonify, request
from auctions.forms import Register, Login, AddAuction
from auctions.models import User, Auction
from auctions import app, db, bcrypt
from flask_login import login_user, current_user, logout_user, login_required
from datetime import datetime
# Displayed all time.
@app.route("/")
def home():
return render_template("auctions.html", auction=Auction)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/auction/<id>")
def auction(id):
auction = Auction.query.get(id)
return render_template(
"auction.html",
title=auction.title,
description=auction.description,
call_price=auction.call_price,
buy_now_price=auction.buy_now_price,
date_posted=auction.date_posted,
date_posted_human=str(auction.date_posted).split(".", 1)[0],
date_end=auction.date_end,
date_now=datetime.utcnow(),
image=auction.image,
)
# Displayed if user not logged in.
@app.route("/login", methods=["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect(url_for("home"))
form = Login()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for("home"))
else:
flash("Something goes wrong! Check your login data!", "failed")
return render_template("login.html", form=form)
@app.route("/register", methods=["GET", "POST"])
def register():
if current_user.is_authenticated:
return redirect(url_for("home"))
form = Register()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode(
"utf-8"
)
user = User(
username=form.username.data, email=form.email.data, password=<PASSWORD>
)
db.session.add(user)
db.session.commit()
flash(
f'Account "{form.username.data}" has been created successfully!', "success"
)
return redirect(url_for("login"))
return render_template("register.html", form=form)
# Displayed if user logged in.
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home"))
@app.route("/settings")
@login_required
def settings():
return render_template("settings.html")
@app.route("/profile")
@login_required
def profile():
return render_template("profile.html")
@app.route("/observed")
@login_required
def observed():
return render_template("observed.html")
@app.route("/cart")
@login_required
def cart():
return render_template("cart.html")
@app.route("/add_auction", methods=["GET", "POST"])
@login_required
def add_auction():
if current_user.is_authenticated:
form = AddAuction()
if form.validate_on_submit():
auction = Auction(
title=form.title.data,
description=form.description.data,
date_end=form.date_end.data,
buy_now_price=form.buy_now_price.data,
call_price=form.call_price.data,
image=form.image.data,
user_id=User.query.filter_by(username=current_user.username).first().id,
)
db.session.add(auction)
db.session.commit()
flash(f'Auction "{form.title.data}" has been added!', "success")
return redirect(url_for("auctions"))
return render_template("add_auction.html", form=form)
# API.
auction_list = []
auction_count = Auction.query.count()
for id in range(auction_count):
auction_ = Auction.query.get(id + 1)
auction_list.append(
{
"id": id + 1,
"title": auction_.title,
"date_posted": auction_.date_posted,
"date_end": auction_.date_end,
"description": auction_.description,
"buy_now_price": auction_.buy_now_price,
"call_price": auction_.call_price,
"image": auction_.image,
"user_id": auction_.user_id,
}
)
@app.route("/api/auctions/all", methods=["GET"])
def api_all():
return jsonify(auction_list)
@app.route("/api/auctions", methods=["GET"])
def api_id():
if "id" in request.args:
id = int(request.args["id"])
if id > auction_count or id <= 0:
return "Error: Invalid id.\n"
else:
return "Error: No id field provided. Please specify an id.\n"
results = []
for auction in auction_list:
if auction["id"] == id:
results.append(auction)
return jsonify(results)
| en | 0.751313 | #!/usr/bin/env python3 # Displayed all time. # Displayed if user not logged in. # Displayed if user logged in. # API. | 2.605693 | 3 |
algorithms/ciphers/bkdr.py | ayushijain218/python | 204 | 6615247 | <reponame>ayushijain218/python<gh_stars>100-1000
#!/usr/bin/python
def BKDRHash(s):
'''BKDR Hash is an algorithm invented by <NAME>, <NAME>.
The algorithm was presented in/on "The C Programming Language".
The digest (hash generated by this algorithm) is 32 bits (4 Bytes) in length.
refer to: http://www.partow.net/programming/hashfunctions/
'''
seed = 131 # can be any combination of 31, like 31 131 1313 13131 131313
hash = 0
for i in range(len(s)):
hash = (hash * seed) + ord(s[i])
return hash
if __name__ == '__main__':
cleartext = "This is a test string for use with the BKDRHash"
print BKDRHash(cleartext) | #!/usr/bin/python
def BKDRHash(s):
'''BKDR Hash is an algorithm invented by <NAME>, <NAME>.
The algorithm was presented in/on "The C Programming Language".
The digest (hash generated by this algorithm) is 32 bits (4 Bytes) in length.
refer to: http://www.partow.net/programming/hashfunctions/
'''
seed = 131 # can be any combination of 31, like 31 131 1313 13131 131313
hash = 0
for i in range(len(s)):
hash = (hash * seed) + ord(s[i])
return hash
if __name__ == '__main__':
cleartext = "This is a test string for use with the BKDRHash"
print BKDRHash(cleartext) | en | 0.885922 | #!/usr/bin/python BKDR Hash is an algorithm invented by <NAME>, <NAME>. The algorithm was presented in/on "The C Programming Language". The digest (hash generated by this algorithm) is 32 bits (4 Bytes) in length. refer to: http://www.partow.net/programming/hashfunctions/ # can be any combination of 31, like 31 131 1313 13131 131313 | 3.708901 | 4 |
Utils/BatteryDBNPersistentFailure.py | DavidLSmyth/OccupancyGridAgent | 0 | 6615248 | <filename>Utils/BatteryDBNPersistentFailure.py
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 10:52:16 2019
@author: 13383861
"""
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
import numpy as np
import math
#%%
##################### Persistence Transition Model #####################
#The persistence transition model is a M*K x M*K matrix representing a factorial HMM,
#where M is the number of possible battery states
#and K is the number of possible BatteryMeterBroken states.
#Initially assume the battery states are {0,1,2,3,4,5} and the BatteryMeterBroken states are {0,1}
#The tranisition matrix is then a 10x10 matrix, anaagous to the non-factorial HMM, where:
#Rows 0-5 correspond to the previous battery state from 0-5 and the BatteryMeterBroken state 0 (false)
#Rows 5-10 correspond to the previous battery state from 0-5 and the BatteryMeterBroken state 1 (true)
#Columns 0-5 correspond to the current battery state from 0-5 and the current BatteryMeterBroken state 0 (false)
#Columns 5-10 correspond to the current battery state from 0-5 and the current BatteryMeterBroken state 1 (true)
persistent_battery_state_transition_matrix = np.array([
#first list corresponds to 0 to 0 transition Second list corresponds to 0 to 1 transition for
#for BM Broken (Not broken to not broken) BM broken (not broken to broken)
[_*0.999 for _ in [0.6,0.1,0.1,0.1,0.05,0.05]] + [_*0.001 for _ in [0.6,0.1,0.1,0.1,0.05,0.05]],
[_*0.999 for _ in [0.3,0.5,0.1,0.05,0.03,0.02]] + [_*0.001 for _ in [0.3,0.5,0.1,0.05,0.03,0.02]],
[_*0.999 for _ in [0.1,0.2,0.5,0.1,0.05,0.05]] + [_*0.001 for _ in [0.1,0.2,0.5,0.1,0.05,0.05]],
[_*0.999 for _ in [0.05,0.05,0.2,0.5,0.1,0.1]] + [_*0.001 for _ in [0.05,0.05,0.2,0.5,0.1,0.1]],
[_*0.999 for _ in [0.05,0.05,0.1,0.2,0.5,0.1]] + [_*0.001 for _ in [0.05,0.05,0.1,0.2,0.5,0.1]],
[_*0.999 for _ in [0.05,0.05,0.1,0.1,0.1,0.6]] + [_*0.001 for _ in [0.05,0.05,0.1,0.1,0.1,0.6]],
#first list corresponds to 1 to 0 transition Second list corresponds to 0 to 1 transition for
#for BM Broken (Broken to not Broken) (BM Broken to BM Broken)
[0]*6 + [0.6,0.1,0.1,0.1,0.05,0.05],
[0]*6 + [0.3,0.5,0.1,0.05,0.03,0.02],
[0]*6 + [0.1,0.2,0.5,0.1,0.05,0.05],
[0]*6 + [0.05,0.05,0.2,0.5,0.1,0.1],
[0]*6 + [0.05,0.05,0.1,0.2,0.5,0.1],
[0]*6 + [0.05,0.05,0.1,0.1,0.1,0.6]], dtype = np.float64)
#normalize each row to 1
for row_index, row in enumerate(persistent_battery_state_transition_matrix):
persistent_battery_state_transition_matrix[row_index] = row/row.sum()
assert all([math.isclose(persistent_battery_state_transition_matrix[_].sum(),1, rel_tol = 0.0000001) for _ in range(len(persistent_battery_state_transition_matrix))])
##################### Persistence Transition Model #####################
#%%
##################### Persistence Sensor Model #####################
#When sensor is ok, sensor model for BMeter is identical to the transient failure model; when the sensor is broken,
#it says BMeter is always 0, regardless of the actual battery charge.
no_battery_levels = 6
battery_meter_levels = [_ for _ in range(no_battery_levels)]
#assertion standard_deviation = 2.5
standard_deviation = 2
#normalized discrete approximation of gaussian with mean battery level, sd standard_deviation. This is the sensor model
battery_meter_gaussians = [multivariate_normal([battery_meter_level], [standard_deviation]).pdf(battery_meter_levels)/multivariate_normal([battery_meter_level], [standard_deviation]).pdf(battery_meter_levels).sum() for battery_meter_level in battery_meter_levels]
#no partial observability!
#battery_meter_gaussians = np.identity(6, dtype = np.float64)
batt_meter_matrix_persistent = np.concatenate(battery_meter_gaussians, axis = 0).reshape(no_battery_levels,no_battery_levels)
def get_sensor_model_probability_matrix_persistent_battery(battery_meter_reading):
'''
Returns the sensor model corresponding to p(BatterMeter = batter_meter_reading | BatteryMeterBroken, State).
Implicitly follows order specified by the belief distribution and transition matrix, whereby
0-5 corresponds to BatteryMeterBroken = 0 (Battery meter broken is false)
6-12 corresponds to BatteryMeterBroken = 1 (Battery meter broken is true).
Maybe this should be a continuous Gaussian as outlined in #http://www.ee.columbia.edu/~sfchang/course/svia-F03/papers/factorial-HMM-97.pdf Page 4 (Factorial Hidden Markov Models), <NAME>
'''
if 0 <= battery_meter_reading < no_battery_levels:
BMSensorPersistentBatteryModel = np.zeros((no_battery_levels*2,no_battery_levels*2))
BMSensorNotBrokenMatrix = np.concatenate(batt_meter_matrix_persistent, axis = 0).reshape(no_battery_levels,no_battery_levels)
#concatenate all to form 12x12 matrix
#upper diagonal corresponds to sensor model for not broken matrix
upper = BMSensorNotBrokenMatrix[battery_meter_reading]
#lower diagonal corresponds to sensor model for broken matrix
lower = np.append(np.array([1]), np.zeros((no_battery_levels-1)))
np.fill_diagonal(BMSensorPersistentBatteryModel, np.append(upper, lower))
return BMSensorPersistentBatteryModel
else:
raise Exception("Please provide a valid sensor reading,")
##################### Persistence Sensor Model #####################
#%%
##################### Battery State Distribution #####################
#marginalize over the BatterySensorBroken related variables
def get_battery_sensor_broken_state_distribution(distribution_vector):
'''
By convention, the distribution vector is assumed to take the form:
BMeter not broken, battery = 0
BMeter not broken, battery = 1
BMeter not broken, battery = 2
.
.
BMeter not broken, battery = 5
BMeter broken, battery = 0
.
.
BMeter broken, battery = 5
To get the battery state distribution, marginalize over the BMeterBroken variable
'''
return np.array([distribution_vector[:no_battery_levels].sum(), distribution_vector[no_battery_levels:].sum()])
##################### Battery State Distribution #####################
#%%
##################### BatterySensorBroken Distribution #####################
#marginalize over the BatterySensorBroken related variables
def get_battery_state_distribution(distribution_vector):
'''
By convention, the distribution vector is assumed to take the form:
BMeter not broken, battery = 0
BMeter not broken, battery = 1
BMeter not broken, battery = 2
.
.
BMeter not broken, battery = 5
BMeter broken, battery = 0
.
.
BMeter broken, battery = 5
To get the BMeterBroken state distribution, marginalize over the Battery variable
'''
return np.array([sum([distribution_vector[_], distribution_vector[no_battery_levels+_]]) for _ in range(no_battery_levels)])
##################### BatterySensorBroken Distribution #####################
############################################################################
##################### DBN with CPDs and Marginalization ####################
############################################################################
##################### PersistentBatteryTransitionModel #####################
#As above, could model this as a HMM but it leads to some complexity (cannot simply state CPDs).
#Try the Bayes Net approach of summing out sensorBroken persistent variable.
#Define everything piecewise first
#convention i(rows) corresponds to bm_broken_t_minus_one, j(cols) corresponds to bm_broken_t
batt_m_transition_matrix = np.array([[0.999, 0.001], [0, 1]], dtype = np.float64)
def bm_broken_transition_probability(bm_broken_t, bm_broken_t_minus_one):
'''
bm_broken_t takes value 0 if battery meter is not broken, 1 if broken
'''
return batt_m_transition_matrix[bm_broken_t_minus_one][bm_broken_t]
def batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one):
'''
'''
return battery_state_transition_matrix[battery_cap_t_minus_one][battery_cap_t]
def get_persistent_battery_failure_updated_state_esimate(battery_cap_t, bm_broken_t, b_meter_measurement, previous_probability_vector):
'''
Returns the updated state estimate for p(bat_cap_t, bm_broken_t | b_meter_measurment)
'''
#print("sum Probability of battery_cap_t = {} over all previous = {}".format(battery_cap_t, sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)])), end = '*')
#print("Probability of bm_broken_t = {} over all previous = {}".format(bm_broken_t, sum([bm_broken_transition_probability(bm_broken_t, bm_broken_t_minus_one) for bm_broken_t_minus_one in [0,1]])), sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)]), end = '\n\n')
#transition_prob = sum([bm_broken_transition_probability(bm_broken_t, bm_broken_t_minus_one) for bm_broken_t_minus_one in [0,1]]) * sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)])
transition_prob = 0
for prev_bm_broken_state in [0,1]:
for prev_batt_cap_state in range(no_battery_levels):
transition_prob += bm_broken_transition_probability(bm_broken_t, prev_bm_broken_state)* batt_cap_transition_probability(battery_cap_t, prev_batt_cap_state) * previous_probability_vector[prev_bm_broken_state][prev_batt_cap_state]
# print("bm_broken transition prob: ", bm_broken_transition_probability(bm_broken_t, prev_bm_broken_state))
# print("b_cap transition prob: ", batt_cap_transition_probability(battery_cap_t, prev_batt_cap_state))
# print("Prev_prob: ", previous_probability_vector[prev_bm_broken_state][prev_batt_cap_state])
print("Transition prob to battery_cap_t = {}, bm_broken_t = {} is {}".format(battery_cap_t, bm_broken_t, transition_prob))
sensor_prob = get_persistent_battery_failure_sensor_model_probability(battery_cap_t, bm_broken_t, b_meter_measurement)
#don't forget to normalize once these values have been calculated for joint conditional dist.
#print("New probability for p(bat_cap_t, bm_broken_t | b_meter_measurment): ".format())
#print("Transition probabilities sensor_prob * transition_prob * previous_probability: {} * {} * {} = {}".format(sensor_prob, transition_prob, previous_probability, sensor_prob * transition_prob * previous_probability))
return sensor_prob * transition_prob
def get_persistent_battery_failure_sensor_model_probability(battery_cap_t, bm_broken_t, b_meter_measurement):
if bm_broken_t == 0:
#AI:AMA p594. "when sensor is OK, the sensor model for BMeter is identical to the transient failure model"
return batt_meter_matrix[b_meter_measurement][battery_cap_t]
else:
#AI:AMA p594. "when the sensor is broken, it says BMeter is always 0, regardless of actual battery charge"
return 1 if b_meter_measurement == 0 else 0 #i.e. treat battery_cap_t as the true probability
def update_all_probs(prev_distribution: np.array, b_meter_measurement):
#assuming prev_distribution is a 2 x 6 vector, where positions (0, 0-5) represent the distribution
#of the battery capacity states given that the sensor isn't broken and positions (1, 0-5) represent
#the distribution of the battery capacity states given that the sensor is broken
updated_belief_vector = np.zeros((2, 6), dtype = np.float64)
#sensor_working_dist = batt_cap_transition_probability[0]
for sensor_working_value in [0,1]:
for bat_cap_t_index in range(no_battery_levels):
updated_belief_vector[sensor_working_value][bat_cap_t_index] = get_persistent_battery_failure_updated_state_esimate(bat_cap_t_index, sensor_working_value , b_meter_measurement, prev_distribution)
#return normalized beliefs
return normalize_belief_vector(updated_belief_vector)
def get_expected_battery_cap(belief_vector):
'''
belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken,
second row corresponds to battery_meter broken. Returns the expected battery capacity
'''
return get_expected_value(get_battery_cap_conditional_dist(belief_vector))
def get_battery_cap_conditional_dist(belief_vector):
'''
belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken,
second row corresponds to battery_meter broken. Returns the conditional distribution of the battery capacity
given the battery meter readings to date
'''
return np.sum(belief_vector, axis = 0)
def get_battery_meter_broken_conditional_dist(belief_vector):
'''
belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken,
second row corresponds to battery_meter broken.. Returns the conditional distribution of the battery meter broken
given the battery meter readings to date.
'''
return np.sum(belief_vector, axis = 1)
##################### PersistentBatteryTransitionModel #####################
#
############################################################################
##################### DBN with CPDs and Marginalization ####################
############################################################################
def get_battery_expected_value(belief_state):
#for i in range(no_battery_levels):
# print(belief_state[i]+belief_state[i+no_battery_levels])
#print([i*(belief_state[i]+belief_state[i+no_battery_levels]) for i in range(len(belief_state)-1)])
return sum([i*(belief_state[i]+belief_state[i+no_battery_levels]) for i in range(no_battery_levels)])
def get_expected_value(belief_state):
#print(belief_state)
return sum([i*belief_state[i] for i in range(len(belief_state))])
#%%
#Test model with persistent failure variable
#by convention, the state distribution vector corresponds to
# p(battery = 0, batterySensorBroken = False),
#.
#.
#p(battery = 5, batterySensorBroken = False),
#p(battery = 0, batterySensorBroken = True),
#.
#.
#p(battery = 5, batterySensorBroken = True)
#easier to state values relative to 1 and then normalize
initial_distribution_persistent_battery_sensor_broken = np.array([0.05, 0.1, 0.05, 0.2, 0.05, 0.55,
0, 0, 0, 0, 0, 0])
initial_distribution_persistent_battery_sensor_broken = normalize_belief_vector(initial_distribution_persistent_battery_sensor_broken)
initial_distribution_persistent_battery_sensor_broken_factored = initial_distribution_persistent_battery_sensor_broken.reshape((2,6))
#%%
expected_values_persistent = [get_expected_battery_cap(initial_distribution_persistent_battery_sensor_broken_factored)]
batt_cap_dist = [get_battery_cap_conditional_dist(initial_distribution_persistent_battery_sensor_broken_factored)]
batt_sensor_broken_dist = [get_battery_meter_broken_conditional_dist(initial_distribution_persistent_battery_sensor_broken_factored)]
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 0)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 0)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
print(expected_values_persistent)
#expected value of
plt.plot([_ for _ in range(len(expected_values_persistent))], expected_values_persistent, label = "Expected battery capacity")
plt.plot([_ for _ in range(len(batt_sensor_broken_dist))], [_[1] for _ in batt_sensor_broken_dist], label = "Prob of sensor failure")
plt.legend()
#%%
print(batt_sensor_broken_dist)
plt.clf()
plt.plot([_ for _ in range(len(batt_sensor_broken_dist))], batt_sensor_broken_dist)
| <filename>Utils/BatteryDBNPersistentFailure.py
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 10:52:16 2019
@author: 13383861
"""
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
import numpy as np
import math
#%%
##################### Persistence Transition Model #####################
#The persistence transition model is a M*K x M*K matrix representing a factorial HMM,
#where M is the number of possible battery states
#and K is the number of possible BatteryMeterBroken states.
#Initially assume the battery states are {0,1,2,3,4,5} and the BatteryMeterBroken states are {0,1}
#The tranisition matrix is then a 10x10 matrix, anaagous to the non-factorial HMM, where:
#Rows 0-5 correspond to the previous battery state from 0-5 and the BatteryMeterBroken state 0 (false)
#Rows 5-10 correspond to the previous battery state from 0-5 and the BatteryMeterBroken state 1 (true)
#Columns 0-5 correspond to the current battery state from 0-5 and the current BatteryMeterBroken state 0 (false)
#Columns 5-10 correspond to the current battery state from 0-5 and the current BatteryMeterBroken state 1 (true)
persistent_battery_state_transition_matrix = np.array([
#first list corresponds to 0 to 0 transition Second list corresponds to 0 to 1 transition for
#for BM Broken (Not broken to not broken) BM broken (not broken to broken)
[_*0.999 for _ in [0.6,0.1,0.1,0.1,0.05,0.05]] + [_*0.001 for _ in [0.6,0.1,0.1,0.1,0.05,0.05]],
[_*0.999 for _ in [0.3,0.5,0.1,0.05,0.03,0.02]] + [_*0.001 for _ in [0.3,0.5,0.1,0.05,0.03,0.02]],
[_*0.999 for _ in [0.1,0.2,0.5,0.1,0.05,0.05]] + [_*0.001 for _ in [0.1,0.2,0.5,0.1,0.05,0.05]],
[_*0.999 for _ in [0.05,0.05,0.2,0.5,0.1,0.1]] + [_*0.001 for _ in [0.05,0.05,0.2,0.5,0.1,0.1]],
[_*0.999 for _ in [0.05,0.05,0.1,0.2,0.5,0.1]] + [_*0.001 for _ in [0.05,0.05,0.1,0.2,0.5,0.1]],
[_*0.999 for _ in [0.05,0.05,0.1,0.1,0.1,0.6]] + [_*0.001 for _ in [0.05,0.05,0.1,0.1,0.1,0.6]],
#first list corresponds to 1 to 0 transition Second list corresponds to 0 to 1 transition for
#for BM Broken (Broken to not Broken) (BM Broken to BM Broken)
[0]*6 + [0.6,0.1,0.1,0.1,0.05,0.05],
[0]*6 + [0.3,0.5,0.1,0.05,0.03,0.02],
[0]*6 + [0.1,0.2,0.5,0.1,0.05,0.05],
[0]*6 + [0.05,0.05,0.2,0.5,0.1,0.1],
[0]*6 + [0.05,0.05,0.1,0.2,0.5,0.1],
[0]*6 + [0.05,0.05,0.1,0.1,0.1,0.6]], dtype = np.float64)
#normalize each row to 1
for row_index, row in enumerate(persistent_battery_state_transition_matrix):
persistent_battery_state_transition_matrix[row_index] = row/row.sum()
assert all([math.isclose(persistent_battery_state_transition_matrix[_].sum(),1, rel_tol = 0.0000001) for _ in range(len(persistent_battery_state_transition_matrix))])
##################### Persistence Transition Model #####################
#%%
##################### Persistence Sensor Model #####################
#When sensor is ok, sensor model for BMeter is identical to the transient failure model; when the sensor is broken,
#it says BMeter is always 0, regardless of the actual battery charge.
no_battery_levels = 6
battery_meter_levels = [_ for _ in range(no_battery_levels)]
#assertion standard_deviation = 2.5
standard_deviation = 2
#normalized discrete approximation of gaussian with mean battery level, sd standard_deviation. This is the sensor model
battery_meter_gaussians = [multivariate_normal([battery_meter_level], [standard_deviation]).pdf(battery_meter_levels)/multivariate_normal([battery_meter_level], [standard_deviation]).pdf(battery_meter_levels).sum() for battery_meter_level in battery_meter_levels]
#no partial observability!
#battery_meter_gaussians = np.identity(6, dtype = np.float64)
batt_meter_matrix_persistent = np.concatenate(battery_meter_gaussians, axis = 0).reshape(no_battery_levels,no_battery_levels)
def get_sensor_model_probability_matrix_persistent_battery(battery_meter_reading):
'''
Returns the sensor model corresponding to p(BatterMeter = batter_meter_reading | BatteryMeterBroken, State).
Implicitly follows order specified by the belief distribution and transition matrix, whereby
0-5 corresponds to BatteryMeterBroken = 0 (Battery meter broken is false)
6-12 corresponds to BatteryMeterBroken = 1 (Battery meter broken is true).
Maybe this should be a continuous Gaussian as outlined in #http://www.ee.columbia.edu/~sfchang/course/svia-F03/papers/factorial-HMM-97.pdf Page 4 (Factorial Hidden Markov Models), <NAME>
'''
if 0 <= battery_meter_reading < no_battery_levels:
BMSensorPersistentBatteryModel = np.zeros((no_battery_levels*2,no_battery_levels*2))
BMSensorNotBrokenMatrix = np.concatenate(batt_meter_matrix_persistent, axis = 0).reshape(no_battery_levels,no_battery_levels)
#concatenate all to form 12x12 matrix
#upper diagonal corresponds to sensor model for not broken matrix
upper = BMSensorNotBrokenMatrix[battery_meter_reading]
#lower diagonal corresponds to sensor model for broken matrix
lower = np.append(np.array([1]), np.zeros((no_battery_levels-1)))
np.fill_diagonal(BMSensorPersistentBatteryModel, np.append(upper, lower))
return BMSensorPersistentBatteryModel
else:
raise Exception("Please provide a valid sensor reading,")
##################### Persistence Sensor Model #####################
#%%
##################### Battery State Distribution #####################
#marginalize over the BatterySensorBroken related variables
def get_battery_sensor_broken_state_distribution(distribution_vector):
'''
By convention, the distribution vector is assumed to take the form:
BMeter not broken, battery = 0
BMeter not broken, battery = 1
BMeter not broken, battery = 2
.
.
BMeter not broken, battery = 5
BMeter broken, battery = 0
.
.
BMeter broken, battery = 5
To get the battery state distribution, marginalize over the BMeterBroken variable
'''
return np.array([distribution_vector[:no_battery_levels].sum(), distribution_vector[no_battery_levels:].sum()])
##################### Battery State Distribution #####################
#%%
##################### BatterySensorBroken Distribution #####################
#marginalize over the BatterySensorBroken related variables
def get_battery_state_distribution(distribution_vector):
'''
By convention, the distribution vector is assumed to take the form:
BMeter not broken, battery = 0
BMeter not broken, battery = 1
BMeter not broken, battery = 2
.
.
BMeter not broken, battery = 5
BMeter broken, battery = 0
.
.
BMeter broken, battery = 5
To get the BMeterBroken state distribution, marginalize over the Battery variable
'''
return np.array([sum([distribution_vector[_], distribution_vector[no_battery_levels+_]]) for _ in range(no_battery_levels)])
##################### BatterySensorBroken Distribution #####################
############################################################################
##################### DBN with CPDs and Marginalization ####################
############################################################################
##################### PersistentBatteryTransitionModel #####################
#As above, could model this as a HMM but it leads to some complexity (cannot simply state CPDs).
#Try the Bayes Net approach of summing out sensorBroken persistent variable.
#Define everything piecewise first
#convention i(rows) corresponds to bm_broken_t_minus_one, j(cols) corresponds to bm_broken_t
batt_m_transition_matrix = np.array([[0.999, 0.001], [0, 1]], dtype = np.float64)
def bm_broken_transition_probability(bm_broken_t, bm_broken_t_minus_one):
'''
bm_broken_t takes value 0 if battery meter is not broken, 1 if broken
'''
return batt_m_transition_matrix[bm_broken_t_minus_one][bm_broken_t]
def batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one):
'''
'''
return battery_state_transition_matrix[battery_cap_t_minus_one][battery_cap_t]
def get_persistent_battery_failure_updated_state_esimate(battery_cap_t, bm_broken_t, b_meter_measurement, previous_probability_vector):
'''
Returns the updated state estimate for p(bat_cap_t, bm_broken_t | b_meter_measurment)
'''
#print("sum Probability of battery_cap_t = {} over all previous = {}".format(battery_cap_t, sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)])), end = '*')
#print("Probability of bm_broken_t = {} over all previous = {}".format(bm_broken_t, sum([bm_broken_transition_probability(bm_broken_t, bm_broken_t_minus_one) for bm_broken_t_minus_one in [0,1]])), sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)]), end = '\n\n')
#transition_prob = sum([bm_broken_transition_probability(bm_broken_t, bm_broken_t_minus_one) for bm_broken_t_minus_one in [0,1]]) * sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)])
transition_prob = 0
for prev_bm_broken_state in [0,1]:
for prev_batt_cap_state in range(no_battery_levels):
transition_prob += bm_broken_transition_probability(bm_broken_t, prev_bm_broken_state)* batt_cap_transition_probability(battery_cap_t, prev_batt_cap_state) * previous_probability_vector[prev_bm_broken_state][prev_batt_cap_state]
# print("bm_broken transition prob: ", bm_broken_transition_probability(bm_broken_t, prev_bm_broken_state))
# print("b_cap transition prob: ", batt_cap_transition_probability(battery_cap_t, prev_batt_cap_state))
# print("Prev_prob: ", previous_probability_vector[prev_bm_broken_state][prev_batt_cap_state])
print("Transition prob to battery_cap_t = {}, bm_broken_t = {} is {}".format(battery_cap_t, bm_broken_t, transition_prob))
sensor_prob = get_persistent_battery_failure_sensor_model_probability(battery_cap_t, bm_broken_t, b_meter_measurement)
#don't forget to normalize once these values have been calculated for joint conditional dist.
#print("New probability for p(bat_cap_t, bm_broken_t | b_meter_measurment): ".format())
#print("Transition probabilities sensor_prob * transition_prob * previous_probability: {} * {} * {} = {}".format(sensor_prob, transition_prob, previous_probability, sensor_prob * transition_prob * previous_probability))
return sensor_prob * transition_prob
def get_persistent_battery_failure_sensor_model_probability(battery_cap_t, bm_broken_t, b_meter_measurement):
if bm_broken_t == 0:
#AI:AMA p594. "when sensor is OK, the sensor model for BMeter is identical to the transient failure model"
return batt_meter_matrix[b_meter_measurement][battery_cap_t]
else:
#AI:AMA p594. "when the sensor is broken, it says BMeter is always 0, regardless of actual battery charge"
return 1 if b_meter_measurement == 0 else 0 #i.e. treat battery_cap_t as the true probability
def update_all_probs(prev_distribution: np.array, b_meter_measurement):
#assuming prev_distribution is a 2 x 6 vector, where positions (0, 0-5) represent the distribution
#of the battery capacity states given that the sensor isn't broken and positions (1, 0-5) represent
#the distribution of the battery capacity states given that the sensor is broken
updated_belief_vector = np.zeros((2, 6), dtype = np.float64)
#sensor_working_dist = batt_cap_transition_probability[0]
for sensor_working_value in [0,1]:
for bat_cap_t_index in range(no_battery_levels):
updated_belief_vector[sensor_working_value][bat_cap_t_index] = get_persistent_battery_failure_updated_state_esimate(bat_cap_t_index, sensor_working_value , b_meter_measurement, prev_distribution)
#return normalized beliefs
return normalize_belief_vector(updated_belief_vector)
def get_expected_battery_cap(belief_vector):
'''
belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken,
second row corresponds to battery_meter broken. Returns the expected battery capacity
'''
return get_expected_value(get_battery_cap_conditional_dist(belief_vector))
def get_battery_cap_conditional_dist(belief_vector):
'''
belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken,
second row corresponds to battery_meter broken. Returns the conditional distribution of the battery capacity
given the battery meter readings to date
'''
return np.sum(belief_vector, axis = 0)
def get_battery_meter_broken_conditional_dist(belief_vector):
'''
belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken,
second row corresponds to battery_meter broken.. Returns the conditional distribution of the battery meter broken
given the battery meter readings to date.
'''
return np.sum(belief_vector, axis = 1)
##################### PersistentBatteryTransitionModel #####################
#
############################################################################
##################### DBN with CPDs and Marginalization ####################
############################################################################
def get_battery_expected_value(belief_state):
#for i in range(no_battery_levels):
# print(belief_state[i]+belief_state[i+no_battery_levels])
#print([i*(belief_state[i]+belief_state[i+no_battery_levels]) for i in range(len(belief_state)-1)])
return sum([i*(belief_state[i]+belief_state[i+no_battery_levels]) for i in range(no_battery_levels)])
def get_expected_value(belief_state):
#print(belief_state)
return sum([i*belief_state[i] for i in range(len(belief_state))])
#%%
#Test model with persistent failure variable
#by convention, the state distribution vector corresponds to
# p(battery = 0, batterySensorBroken = False),
#.
#.
#p(battery = 5, batterySensorBroken = False),
#p(battery = 0, batterySensorBroken = True),
#.
#.
#p(battery = 5, batterySensorBroken = True)
#easier to state values relative to 1 and then normalize
initial_distribution_persistent_battery_sensor_broken = np.array([0.05, 0.1, 0.05, 0.2, 0.05, 0.55,
0, 0, 0, 0, 0, 0])
initial_distribution_persistent_battery_sensor_broken = normalize_belief_vector(initial_distribution_persistent_battery_sensor_broken)
initial_distribution_persistent_battery_sensor_broken_factored = initial_distribution_persistent_battery_sensor_broken.reshape((2,6))
#%%
expected_values_persistent = [get_expected_battery_cap(initial_distribution_persistent_battery_sensor_broken_factored)]
batt_cap_dist = [get_battery_cap_conditional_dist(initial_distribution_persistent_battery_sensor_broken_factored)]
batt_sensor_broken_dist = [get_battery_meter_broken_conditional_dist(initial_distribution_persistent_battery_sensor_broken_factored)]
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 0)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 0)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
next_dist = update_all_probs(initial_distribution_persistent_battery_sensor_broken_factored, 5)
batt_cap_dist.append(get_battery_cap_conditional_dist(next_dist))
batt_sensor_broken_dist.append(get_battery_meter_broken_conditional_dist(next_dist))
expected_values_persistent.append(get_expected_battery_cap(next_dist))
print(expected_values_persistent)
#expected value of
plt.plot([_ for _ in range(len(expected_values_persistent))], expected_values_persistent, label = "Expected battery capacity")
plt.plot([_ for _ in range(len(batt_sensor_broken_dist))], [_[1] for _ in batt_sensor_broken_dist], label = "Prob of sensor failure")
plt.legend()
#%%
print(batt_sensor_broken_dist)
plt.clf()
plt.plot([_ for _ in range(len(batt_sensor_broken_dist))], batt_sensor_broken_dist)
| en | 0.649404 | # -*- coding: utf-8 -*- Created on Fri May 31 10:52:16 2019 @author: 13383861 #%% ##################### Persistence Transition Model ##################### #The persistence transition model is a M*K x M*K matrix representing a factorial HMM, #where M is the number of possible battery states #and K is the number of possible BatteryMeterBroken states. #Initially assume the battery states are {0,1,2,3,4,5} and the BatteryMeterBroken states are {0,1} #The tranisition matrix is then a 10x10 matrix, anaagous to the non-factorial HMM, where: #Rows 0-5 correspond to the previous battery state from 0-5 and the BatteryMeterBroken state 0 (false) #Rows 5-10 correspond to the previous battery state from 0-5 and the BatteryMeterBroken state 1 (true) #Columns 0-5 correspond to the current battery state from 0-5 and the current BatteryMeterBroken state 0 (false) #Columns 5-10 correspond to the current battery state from 0-5 and the current BatteryMeterBroken state 1 (true) #first list corresponds to 0 to 0 transition Second list corresponds to 0 to 1 transition for #for BM Broken (Not broken to not broken) BM broken (not broken to broken) #first list corresponds to 1 to 0 transition Second list corresponds to 0 to 1 transition for #for BM Broken (Broken to not Broken) (BM Broken to BM Broken) #normalize each row to 1 ##################### Persistence Transition Model ##################### #%% ##################### Persistence Sensor Model ##################### #When sensor is ok, sensor model for BMeter is identical to the transient failure model; when the sensor is broken, #it says BMeter is always 0, regardless of the actual battery charge. #assertion standard_deviation = 2.5 #normalized discrete approximation of gaussian with mean battery level, sd standard_deviation. This is the sensor model #no partial observability! #battery_meter_gaussians = np.identity(6, dtype = np.float64) Returns the sensor model corresponding to p(BatterMeter = batter_meter_reading | BatteryMeterBroken, State). Implicitly follows order specified by the belief distribution and transition matrix, whereby 0-5 corresponds to BatteryMeterBroken = 0 (Battery meter broken is false) 6-12 corresponds to BatteryMeterBroken = 1 (Battery meter broken is true). Maybe this should be a continuous Gaussian as outlined in #http://www.ee.columbia.edu/~sfchang/course/svia-F03/papers/factorial-HMM-97.pdf Page 4 (Factorial Hidden Markov Models), <NAME> #concatenate all to form 12x12 matrix #upper diagonal corresponds to sensor model for not broken matrix #lower diagonal corresponds to sensor model for broken matrix ##################### Persistence Sensor Model ##################### #%% ##################### Battery State Distribution ##################### #marginalize over the BatterySensorBroken related variables By convention, the distribution vector is assumed to take the form: BMeter not broken, battery = 0 BMeter not broken, battery = 1 BMeter not broken, battery = 2 . . BMeter not broken, battery = 5 BMeter broken, battery = 0 . . BMeter broken, battery = 5 To get the battery state distribution, marginalize over the BMeterBroken variable ##################### Battery State Distribution ##################### #%% ##################### BatterySensorBroken Distribution ##################### #marginalize over the BatterySensorBroken related variables By convention, the distribution vector is assumed to take the form: BMeter not broken, battery = 0 BMeter not broken, battery = 1 BMeter not broken, battery = 2 . . BMeter not broken, battery = 5 BMeter broken, battery = 0 . . BMeter broken, battery = 5 To get the BMeterBroken state distribution, marginalize over the Battery variable ##################### BatterySensorBroken Distribution ##################### ############################################################################ ##################### DBN with CPDs and Marginalization #################### ############################################################################ ##################### PersistentBatteryTransitionModel ##################### #As above, could model this as a HMM but it leads to some complexity (cannot simply state CPDs). #Try the Bayes Net approach of summing out sensorBroken persistent variable. #Define everything piecewise first #convention i(rows) corresponds to bm_broken_t_minus_one, j(cols) corresponds to bm_broken_t bm_broken_t takes value 0 if battery meter is not broken, 1 if broken Returns the updated state estimate for p(bat_cap_t, bm_broken_t | b_meter_measurment) #print("sum Probability of battery_cap_t = {} over all previous = {}".format(battery_cap_t, sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)])), end = '*') #print("Probability of bm_broken_t = {} over all previous = {}".format(bm_broken_t, sum([bm_broken_transition_probability(bm_broken_t, bm_broken_t_minus_one) for bm_broken_t_minus_one in [0,1]])), sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)]), end = '\n\n') #transition_prob = sum([bm_broken_transition_probability(bm_broken_t, bm_broken_t_minus_one) for bm_broken_t_minus_one in [0,1]]) * sum([batt_cap_transition_probability(battery_cap_t, battery_cap_t_minus_one) for battery_cap_t_minus_one in range(6)]) # print("bm_broken transition prob: ", bm_broken_transition_probability(bm_broken_t, prev_bm_broken_state)) # print("b_cap transition prob: ", batt_cap_transition_probability(battery_cap_t, prev_batt_cap_state)) # print("Prev_prob: ", previous_probability_vector[prev_bm_broken_state][prev_batt_cap_state]) #don't forget to normalize once these values have been calculated for joint conditional dist. #print("New probability for p(bat_cap_t, bm_broken_t | b_meter_measurment): ".format()) #print("Transition probabilities sensor_prob * transition_prob * previous_probability: {} * {} * {} = {}".format(sensor_prob, transition_prob, previous_probability, sensor_prob * transition_prob * previous_probability)) #AI:AMA p594. "when sensor is OK, the sensor model for BMeter is identical to the transient failure model" #AI:AMA p594. "when the sensor is broken, it says BMeter is always 0, regardless of actual battery charge" #i.e. treat battery_cap_t as the true probability #assuming prev_distribution is a 2 x 6 vector, where positions (0, 0-5) represent the distribution #of the battery capacity states given that the sensor isn't broken and positions (1, 0-5) represent #the distribution of the battery capacity states given that the sensor is broken #sensor_working_dist = batt_cap_transition_probability[0] #return normalized beliefs belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken, second row corresponds to battery_meter broken. Returns the expected battery capacity belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken, second row corresponds to battery_meter broken. Returns the conditional distribution of the battery capacity given the battery meter readings to date belief_vector is a 2x6 vector, where first row corresponds to battery_meter not broken, second row corresponds to battery_meter broken.. Returns the conditional distribution of the battery meter broken given the battery meter readings to date. ##################### PersistentBatteryTransitionModel ##################### # ############################################################################ ##################### DBN with CPDs and Marginalization #################### ############################################################################ #for i in range(no_battery_levels): # print(belief_state[i]+belief_state[i+no_battery_levels]) #print([i*(belief_state[i]+belief_state[i+no_battery_levels]) for i in range(len(belief_state)-1)]) #print(belief_state) #%% #Test model with persistent failure variable #by convention, the state distribution vector corresponds to # p(battery = 0, batterySensorBroken = False), #. #. #p(battery = 5, batterySensorBroken = False), #p(battery = 0, batterySensorBroken = True), #. #. #p(battery = 5, batterySensorBroken = True) #easier to state values relative to 1 and then normalize #%% #expected value of #%% | 2.350832 | 2 |
listtuple.py | Jonakralisha/listtuple | 0 | 6615249 | <reponame>Jonakralisha/listtuple
#!/usr/bin/env python
# coding: utf-8
# In[1]:
list=[]
list.append('1')
list.append('2')
list.append('3')
list
# In[2]:
Names=('kenneth','jone','jerin')
Names[1]
# In[4]:
num=(1,2,3)
print "num[0:1]:",num[0:1]
# In[9]:
jonedict={
"hey":15,
"whatsup":25,
"have fun":79,
"bad":90,
}
del jonedict["have fun"]
print(jonedict)
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# In[1]:
list=[]
list.append('1')
list.append('2')
list.append('3')
list
# In[2]:
Names=('kenneth','jone','jerin')
Names[1]
# In[4]:
num=(1,2,3)
print "num[0:1]:",num[0:1]
# In[9]:
jonedict={
"hey":15,
"whatsup":25,
"have fun":79,
"bad":90,
}
del jonedict["have fun"]
print(jonedict)
# In[ ]: | en | 0.203685 | #!/usr/bin/env python # coding: utf-8 # In[1]: # In[2]: # In[4]: # In[9]: # In[ ]: | 3.426418 | 3 |
ExampleTetris/Piece.py | Life4gal/Tetris-AI | 1 | 6615250 | <gh_stars>1-10
import enum
import random
import AI.StandardType as StandardType
def bin_to_dec(binary: str) -> int:
return int(binary, 2)
class Piece(enum.Enum):
TOTAL_PIECES = 7
I = [
# O
# O
# O
# O
StandardType.StandardDataFormat([1, 1, 1, 1], 1, 4),
# OOOO
StandardType.StandardDataFormat([bin_to_dec('1111')], 4, 1)
]
T = [
# O
# OOO
StandardType.StandardDataFormat([bin_to_dec('010'), bin_to_dec('111')], 3, 2),
# O
# OO
# O
StandardType.StandardDataFormat([bin_to_dec('10'), bin_to_dec('11'), bin_to_dec('10')], 2, 3),
# O
# OO
# O
StandardType.StandardDataFormat([bin_to_dec('01'), bin_to_dec('11'), bin_to_dec('01')], 2, 3),
# OOO
# O
StandardType.StandardDataFormat([bin_to_dec('111'), bin_to_dec('010')], 3, 2)
]
O = [
# OO
# OO
StandardType.StandardDataFormat([bin_to_dec('11'), bin_to_dec('11')], 2, 2)
]
J = [
# O
# OOO
StandardType.StandardDataFormat([bin_to_dec('100'), bin_to_dec('111')], 3, 2),
# OO
# O
# O
StandardType.StandardDataFormat([bin_to_dec('11'), bin_to_dec('10'), bin_to_dec('10')], 2, 3),
# OOO
# O
StandardType.StandardDataFormat([bin_to_dec('111'), bin_to_dec('001')], 3, 2),
# O
# O
# OO
StandardType.StandardDataFormat([bin_to_dec('01'), bin_to_dec('01'), bin_to_dec('11')], 2, 3)
]
L = [
# O
# OOO
StandardType.StandardDataFormat([bin_to_dec('001'), bin_to_dec('111')], 3, 2),
# O
# O
# OO
StandardType.StandardDataFormat([bin_to_dec('10'), bin_to_dec('10'), bin_to_dec('11')], 2, 3),
# OOO
# O
StandardType.StandardDataFormat([bin_to_dec('111'), bin_to_dec('100')], 3, 2),
# OO
# O
# O
StandardType.StandardDataFormat([bin_to_dec('11'), bin_to_dec('01'), bin_to_dec('01')], 2, 3)
]
S = [
# OO
# OO
StandardType.StandardDataFormat([bin_to_dec('011'), bin_to_dec('110')], 3, 2),
# O
# OO
# O
StandardType.StandardDataFormat([bin_to_dec('10'), bin_to_dec('11'), bin_to_dec('01')], 2, 3)
]
Z = [
# OO
# OO
StandardType.StandardDataFormat([bin_to_dec('110'), bin_to_dec('011')], 3, 2),
# O
# OO
# O
StandardType.StandardDataFormat([bin_to_dec('01'), bin_to_dec('11'), bin_to_dec('10')], 2, 3)
]
@staticmethod
def get_random_piece():
index = random.randint(0, Piece.TOTAL_PIECES.value - 1)
if index == 0:
return Piece.I
elif index == 1:
return Piece.T
elif index == 2:
return Piece.O
elif index == 3:
return Piece.J
elif index == 4:
return Piece.L
elif index == 5:
return Piece.S
elif index == 6:
return Piece.Z
| import enum
import random
import AI.StandardType as StandardType
def bin_to_dec(binary: str) -> int:
return int(binary, 2)
class Piece(enum.Enum):
TOTAL_PIECES = 7
I = [
# O
# O
# O
# O
StandardType.StandardDataFormat([1, 1, 1, 1], 1, 4),
# OOOO
StandardType.StandardDataFormat([bin_to_dec('1111')], 4, 1)
]
T = [
# O
# OOO
StandardType.StandardDataFormat([bin_to_dec('010'), bin_to_dec('111')], 3, 2),
# O
# OO
# O
StandardType.StandardDataFormat([bin_to_dec('10'), bin_to_dec('11'), bin_to_dec('10')], 2, 3),
# O
# OO
# O
StandardType.StandardDataFormat([bin_to_dec('01'), bin_to_dec('11'), bin_to_dec('01')], 2, 3),
# OOO
# O
StandardType.StandardDataFormat([bin_to_dec('111'), bin_to_dec('010')], 3, 2)
]
O = [
# OO
# OO
StandardType.StandardDataFormat([bin_to_dec('11'), bin_to_dec('11')], 2, 2)
]
J = [
# O
# OOO
StandardType.StandardDataFormat([bin_to_dec('100'), bin_to_dec('111')], 3, 2),
# OO
# O
# O
StandardType.StandardDataFormat([bin_to_dec('11'), bin_to_dec('10'), bin_to_dec('10')], 2, 3),
# OOO
# O
StandardType.StandardDataFormat([bin_to_dec('111'), bin_to_dec('001')], 3, 2),
# O
# O
# OO
StandardType.StandardDataFormat([bin_to_dec('01'), bin_to_dec('01'), bin_to_dec('11')], 2, 3)
]
L = [
# O
# OOO
StandardType.StandardDataFormat([bin_to_dec('001'), bin_to_dec('111')], 3, 2),
# O
# O
# OO
StandardType.StandardDataFormat([bin_to_dec('10'), bin_to_dec('10'), bin_to_dec('11')], 2, 3),
# OOO
# O
StandardType.StandardDataFormat([bin_to_dec('111'), bin_to_dec('100')], 3, 2),
# OO
# O
# O
StandardType.StandardDataFormat([bin_to_dec('11'), bin_to_dec('01'), bin_to_dec('01')], 2, 3)
]
S = [
# OO
# OO
StandardType.StandardDataFormat([bin_to_dec('011'), bin_to_dec('110')], 3, 2),
# O
# OO
# O
StandardType.StandardDataFormat([bin_to_dec('10'), bin_to_dec('11'), bin_to_dec('01')], 2, 3)
]
Z = [
# OO
# OO
StandardType.StandardDataFormat([bin_to_dec('110'), bin_to_dec('011')], 3, 2),
# O
# OO
# O
StandardType.StandardDataFormat([bin_to_dec('01'), bin_to_dec('11'), bin_to_dec('10')], 2, 3)
]
@staticmethod
def get_random_piece():
index = random.randint(0, Piece.TOTAL_PIECES.value - 1)
if index == 0:
return Piece.I
elif index == 1:
return Piece.T
elif index == 2:
return Piece.O
elif index == 3:
return Piece.J
elif index == 4:
return Piece.L
elif index == 5:
return Piece.S
elif index == 6:
return Piece.Z | pt | 0.775206 | # O # O # O # O # OOOO # O # OOO # O # OO # O # O # OO # O # OOO # O # OO # OO # O # OOO # OO # O # O # OOO # O # O # O # OO # O # OOO # O # O # OO # OOO # O # OO # O # O # OO # OO # O # OO # O # OO # OO # O # OO # O | 2.766203 | 3 |