content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
import os
import layout
import callbacks # layout needs to be defined before creating callbacks
import routes
import appserver
server = appserver.app.server
if __name__ == "__main__":
debug_mode = True if os.getenv("DEBUG", "false") == "true" else False
if debug_mode is True:
print(f"Initiating server. Debug mode enabled.")
# appserver.app.enable_dev_tools(debug=True)
else:
print(f"Initiating server.")
appserver.app.run_server(
debug=debug_mode,
host="0.0.0.0",
port=5000
) | src/app.py | 548 | layout needs to be defined before creating callbacks appserver.app.enable_dev_tools(debug=True) | 95 | en | 0.665986 |
# Visualizer is for debugging purposes only
import logging
import math
import random
import threading
import http.server
import socketserver
import os
import re
from shapely import wkt
import matplotlib.pyplot as plt
import mpld3
import screeninfo
import tempfile
import webbrowser
import owlready2
from shapely import geometry
import numpy as np
from tqdm import tqdm
import time as pytime
import auto.auto
from criticality_recognition import phenomena_extraction
# TODO
# - visualize scenario level CPs
# - show has distance to in table for each individual - as ternary relations - instead of omitting it
####################
# Config constants #
####################
# Classes to not show in visualization
_NO_PRINTING_CLASSES = {"physics.Has_Distance_To", "perception.Is_Full_Occlusion", "perception.Is_Occlusion"}
# Data/object properties to hide from the individual tables shown when hovering
_NO_PRINTING_PROPERTIES = {"perceptional_property", "traffic_related_concept_property",
"descriptive_traffic_entity_property", "traffic_entity_property", "activity_property",
"physical_property", "traffic_modeling_property", "traffic_entity_property",
"automotive_urban_traffic_property", "L1_property", "L2_property", "L3_property",
"L4_property", "L5_property", "L6_property", "traffic_model_element_property",
"criticality_phenomenon_as_object_property", "has_positional_relation",
"has_spatial_relation", "has_dynamical_relation", "SF_spatial_relation",
"performance_spatial_relation", "EH_spatial_relation", "RCC8_spatial_relation", "rcc8dc",
"ehDisjoint"}
# If one hides long property lists, this is the number after which the list is cut off
_MAX_PROPS_DISPLAY = 4
_AVOID_LABEL_COLLISIONS = False
# Logging
logger = logging.getLogger(__name__)
# Helper function for sorting CPs & individuals
def natural_sort_key(s, _nsre=re.compile("([0-9]+)")):
return [int(text) if text.isdigit() else text.lower() for text in _nsre.split(str(s))]
#######
# CSS #
#######
# Scene CSS (added is iframes to scenario HTML)
scene_css = """
<style>
svg * {
font-size: 4pt;
}
table {
border: solid 1px #DDEEEE;
border-collapse: collapse;
border-spacing: 0;
font: normal 8px, sans-serif;
}
thead th {
background-color: #DDEFEF;
border: solid 1px #DDEEEE;
color: #336B6B;
padding: 3px;
text-align: left;
text-shadow: 1px 1px 1px #fff;
font-size: 10pt;
}
tbody td {
background-color: #FFFFFF;
border: solid 1px #DDEEEE;
color: #333;
padding: 3px;
text-shadow: 1px 1px 1px #fff;
font-size: 8pt;
}
.cp-tooltip {}
</style>
"""
# Scenario CSS (main CSS)
scenario_css = """
<style>
.slider {
-webkit-appearance: none; /* Override default CSS styles */
appearance: none;
width: 100%; /* Full-width */
height: 25px; /* Specified height */
background: #d3d3d3; /* Grey background */
outline: none; /* Remove outline */
opacity: 0.7; /* Set transparency (for mouse-over effects on hover) */
-webkit-transition: .2s; /* 0.2 seconds transition on hover */
transition: opacity .2s;
}
.slider:hover {
opacity: 1; /* Fully shown on mouse-over */
}
.slider::-webkit-slider-thumb {
-webkit-appearance: none; /* Override default look */
appearance: none;
width: 25px; /* Set a specific slider handle width */
height: 25px; /* Slider handle height */
background: #04AA6D; /* Green background */
cursor: pointer; /* Cursor on hover */
}
.slider::-moz-range-thumb {
width: 25px; /* Set a specific slider handle width */
height: 25px; /* Slider handle height */
background: #04AA6D; /* Green background */
cursor: pointer; /* Cursor on hover */
}
</style>"""
def visualize_scenario(scenario, cps=None):
"""
Creates an HTML visualization of the given scenario. Starts a simple web server at localhost:8000 (blocking).
:param scenario: Either a list of worlds, each world representing a single scene or a single world representing a
whole scenario
:param cps: A list of criticality phenomena which optionally to visualize as well.
:return: The path to the directory in which to find the created HTML visualization.
"""
pl_html = []
scenario_inst = None
if cps is None:
cps = []
# Fetch scene list
if type(scenario) == list:
scenes = [scene_world.search(type=auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scene_world).Scene)
[0] for scene_world in scenario]
elif type(scenario) == owlready2.namespace.World or type(scenario) == owlready2.World:
tm = auto.auto.get_ontology(auto.auto.Ontology.Traffic_Model, scenario)
scenario_inst = scenario.search(type=tm.Scenario)[0]
scenes = list(filter(lambda x: tm.Scene in x.is_a, scenario_inst.has_traffic_model))
else:
raise ValueError
scenes = sorted(scenes, key=lambda x: x.inTimePosition[0].numericPosition[0])
# Assemble scenario title
title = "Scenario"
if scenario_inst and hasattr(scenario_inst, "identifier") and len(scenario_inst.identifier) > 0:
title += " " + str(scenario_inst.identifier[0])
scenario_info = "(" + str(len(scenes)) + " Scenes)"
# Main HTML code for index.html
html_body = """<!DOCTYPE html>
<html>
<head>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<meta charset="utf-8">""" + scenario_css + """
<title>""" + title + """</title>
</head>
<body>
<div class=\"d-flex flex-row justify-content-center\"><div class=\"mt-3 py-1 px-6 alert alert-info\" style=\"display: inline-block\" role=\"alert\"><center><h5>""" + title + """ """ + scenario_info + """</h5></center></div></div>
<div class="slidecontainer m-2">
<input type="range" min="1" max=\"""" + str(len(scenes)) + """\" value="1" class="slider" id="myRange">
</div>
<script>
var slider = document.getElementById("myRange");
var last_set = 1
var show_all_cps = true
slider.oninput = function() {
var output = document.getElementById("plt" + this.value);
var last_output = document.getElementById("plt" + last_set);
last_output.style.display = 'none';
output.style.display = 'block';
last_set = this.value
}
function toggle_cps_all_iframes() {
show_all_cps = !show_all_cps
$(".cp-all-button").each(function(i) {
if (show_all_cps) {
this.parentElement.classList.add("active")
this.checked = true
} else {
this.parentElement.classList.remove("active")
this.checked = false
}
})
$(".cp-button").each(function(i) {
if (show_all_cps) {
this.parentElement.classList.add("active")
this.checked = true
} else {
this.parentElement.classList.remove("active")
this.checked = false
}
})
$(".scene-plot").each(function(i) {
this.contentWindow.toggle_cps(show_all_cps)
})
}
function toggle_cp_class(ele, cp_cls_id) {
// 0. disable automatically checked checkbox (will be added again at step 3)
ele.checked = !ele.checked
// 1. find active scene plot
active_scene = $(".scene-plot-container").filter(function(i) {
return this.style.display !== "none"
})[0]
// 2. get CP pred. str for given cp_cls_id
cp_pred = active_scene.getElementsByClassName("scene-plot")[0].contentWindow.cp_predicates[cp_cls_id]
// 3. Toggle all buttons for this CP pred
$("label > span:contains(" + cp_pred + ")").each(function(i) {
this.parentElement.classList.toggle("active")
this.parentElement.querySelector(".cp-button").checked = !this.parentElement.querySelector(".cp-button").checked
})
// 4. check if (and where) CP pred. str is present in cp_predicates, pass the resulting index
$(".scene-plot").each(function(k) {
cp_cls_id_scene = -1
for (var i = 0; i < this.contentWindow.cp_predicates.length; i++) {
if (cp_pred === this.contentWindow.cp_predicates[i]) {
cp_cls_id_scene = i
}
}
if (cp_cls_id_scene >= 0) {
this.contentWindow.toggle_cp_class(cp_cls_id_scene, ele.checked)
}
})
}
</script>
"""
pl_html.append(html_body)
iframes = []
def get_color(p):
# Fetches a different color each time, but ensures that it has a readable contrast.
_LUMA_LIMIT = 170
color = 0
luma = _LUMA_LIMIT
while luma >= _LUMA_LIMIT:
color = random.randrange(0, 0xFFFFFF, 0xF)
luma = 0.2126 * ((color >> 16) & 0xff) + 0.7152 * ((color >> 8) & 0xff) + 0.0722 * ((color >> 0) & 0xff)
return "#" + "%06x" % color
# Create HTML for each scene
for i, scene in enumerate(scenes):
logger.info("Plotting scene " + str(i + 1) + " / " + str(len(scenes)))
scene_cps = [cp for cp in cps if cp.is_representable_in_scene(scene)]
cp_colors = list(map(get_color, range(len([x for c in scene_cps for x in c.subjects]))))
cp_color = 0
no_geo_entities = []
width = 24.5
height = 10
try:
primary_screens = list(filter(lambda x: x.is_primary, screeninfo.get_monitors()))
if len(primary_screens) > 0:
width = (primary_screens[0].width_mm / 25.4) * 0.73
height = (primary_screens[0].height_mm / 25.4) * 0.73
except screeninfo.common.ScreenInfoError:
logger.info("No screens found, using default plot size of " + str(width) + " in x " + str(height) + " in")
fig = plt.figure(figsize=(width, height))
plt.axis("equal")
entity_labels = []
entity_relations = []
relations_per_cp_class = dict()
cps_relations = []
cps_for_tooltips = []
centroids_x = []
centroids_y = []
plotted_labels = []
entity_points = dict()
traffic_entities = tqdm(scene.has_traffic_entity)
for entity in traffic_entities:
traffic_entities.set_description(str(entity))
if len(entity.hasGeometry) > 0:
for geo in entity.hasGeometry:
shape = wkt.loads(geo.asWKT[0])
entity_cp_relations = []
points = None
if hasattr(shape, "exterior"):
points = shape.exterior.xy
try:
hasattr(shape, "coords")
points = shape.coords.xy
except NotImplementedError:
pass
if points:
if (np.isclose(centroids_x, shape.centroid.x) & np.isclose(centroids_y, shape.centroid.y))\
.any():
x = shape.centroid.x + 0.0
y = shape.centroid.y + 0.8
plt.plot((shape.centroid.x, x), (shape.centroid.y, y), "k-")
else:
x = shape.centroid.x
y = shape.centroid.y
entity_points[entity] = (x, y)
centroids_x.append(x)
centroids_y.append(y)
plt.plot(*points, alpha=.6)
if auto.auto.get_ontology(auto.auto.Ontology.Physics, scenario).Dynamical_Object in \
entity.INDIRECT_is_a:
plt.fill(*points, alpha=.3)
if entity.has_yaw is not None:
x_dir = (0.9 * math.cos(math.radians(entity.has_yaw)))
y_dir = (0.9 * math.sin(math.radians(entity.has_yaw)))
plt.arrow(shape.centroid.x, shape.centroid.y, dx=x_dir, dy=y_dir, shape="full",
length_includes_head=True, color="gray", alpha=0.6, head_width=1)
entity_labels.append(_describe_entity(entity))
# Plot CPs
entity_scene_cps = list(filter(lambda scp: entity in scp.subjects, scene_cps))
if len(entity_scene_cps) > 0:
plt.plot(x, y, "o", color="r", mec="k", markersize=3, alpha=1)
ent_color = "red"
else:
ent_color = "black"
if entity.identifier and len(entity.identifier) > 0 and not entity.is_persistent and not \
(isinstance(entity.identifier[0], str) and entity.identifier[0].startswith("repr")):
plt.annotate(entity.identifier[0], (x+0.2, y+0.2), color=ent_color)
already_drawn_cps = []
# init dict
for cp in entity_scene_cps:
if cp.predicate not in relations_per_cp_class.keys():
relations_per_cp_class[cp.predicate] = []
for cp in entity_scene_cps:
if cp not in already_drawn_cps:
same_line_cps = [x for x in entity_scene_cps if
[y for z in x.objects.values() for y in z] ==
[y for z in cp.objects.values() for y in z]]
labels = [(x.predicate.split("(")[0],
(x.predicate.split("(")[1].replace(")", ""), str(x)))
for x in same_line_cps]
already_drawn_cps += same_line_cps
subj_x = x
subj_y = y
for objs in cp.objects.values():
for obj in objs:
if len(obj.hasGeometry) > 0:
if obj in entity_points.keys():
obj_x = entity_points[obj][0]
obj_y = entity_points[obj][1]
else:
geom_o = wkt.loads(obj.hasGeometry[0].asWKT[0])
obj_x = geom_o.centroid.x
obj_y = geom_o.centroid.y
m = (obj_y - subj_y) / (obj_x - subj_x)
b = subj_y - m * subj_x
head_width = 0.2
head_length = 1.5 * head_width
arrow = plt.arrow(subj_x, subj_y, dx=(obj_x - subj_x), dy=(obj_y - subj_y),
color=cp_colors[cp_color], shape="full",
length_includes_head=True, head_width=head_width,
head_length=head_length)
if len(labels[0]) > 1:
label_row = " ".join([label[0] for label in labels])
else:
label_row = labels[0]
x_offset = (len(label_row) * 0.055) / 2 - 0.055
if subj_x > obj_x:
label_x = obj_x + abs(subj_x - obj_x) / 2 - x_offset
else:
label_x = obj_x - abs(subj_x - obj_x) / 2 - x_offset
a = math.degrees(math.atan(m))
for l_i, label in enumerate(labels):
label_string = label[0].replace("CP_", "")
label_len = (len(label_string) * 0.09 + 0.1)
label_x_offset = abs(math.cos(math.atan(m)) * label_len)
while True:
# Finds a free space to plot label
label_y = m * label_x + b + 0.05
label_x_1 = label_x - label_x_offset / 2 + 0.05
label_y_1 = m * label_x_1 + b
label_x_2 = label_x + label_x_offset / 2 + 0.05
label_y_2 = m * label_x_2 + b
label_line1 = geometry.LineString([(label_x_1, label_y_1),
(label_x_2, label_y_2)])
new_bb = label_line1.buffer(0.1, cap_style=2)
new_bb_rect = list(zip(*new_bb.exterior.xy))[:-1]
if not _AVOID_LABEL_COLLISIONS or not \
_has_collision_with_bbs(plotted_labels, new_bb_rect):
break
label_x += label_x_offset / 10
annot = plt.annotate(label_string,
(label_x, label_y), color=cp_colors[cp_color],
rotation=a, fontsize=2, rotation_mode="anchor")
entity_cp_relations.append(annot)
cps_relations.append(annot)
relations_per_cp_class[same_line_cps[l_i].predicate] += [annot, arrow]
cps_for_tooltips.append(same_line_cps[l_i])
plotted_labels.append(new_bb_rect)
label_x += label_x_offset
subj_x = obj_x
subj_y = obj_y
entity_cp_relations += [arrow]
cp_color = (cp_color + 1) % len(cp_colors)
entity_relations.append(entity_cp_relations)
elif len(set([str(y) for y in entity.INDIRECT_is_a]).intersection(_NO_PRINTING_CLASSES)) == 0:
no_geo_entities.append(_describe_entity(entity))
logger.info("Done with layout, creating MPLD3 plot, JS plugins, and HTML string")
pl2 = plt.plot(centroids_x, centroids_y, "o", color="b", mec="k", markersize=2, mew=1, alpha=.4)
tooltip_individuals = ToolTipAndClickInfo(pl2[0], labels=entity_labels, targets=entity_relations,
targets_per_cp=relations_per_cp_class)
fig.tight_layout()
mpld3.plugins.connect(fig, tooltip_individuals)
for h, cp_text in enumerate(cps_relations):
tooltip_cp = CPTooltip(cp_text, cps_for_tooltips[h])
mpld3.plugins.connect(fig, tooltip_cp)
html = "\n\t\t<div class=\"container-fluid scene-plot-container\" id=\"plt" + str(i + 1) + "\" style =\""
if i != 0:
html += "display: none;"
html += "\">"
html += """
<div class="row">
<div class="col-md-1">
"""
cp_count_total = len([x for x in cps if (isinstance(x.traffic_model, list) and scene in x.traffic_model) or
x.traffic_model == scenario_inst])
html += """<div class="">
<label class="btn btn-primary active" style="margin-bottom: 10px; width: %s">
<input type="checkbox" class="cp-all-button" id="cp-all-button-%s" autocomplete="off" onclick="toggle_cps_all_iframes();" checked>
<span>Show all criticality phenomena (%s)</span>
</label>""" % ("100%", str(i), str(cp_count_total))
for l, pred in enumerate(sorted(relations_per_cp_class.keys(), key=natural_sort_key)):
cp_count = len([x for x in cps if x.predicate == pred and ((isinstance(x.traffic_model, list) and
scene in x.traffic_model) or x.traffic_model == scenario_inst)])
html += """
<br />
<label class="btn btn-secondary active" style="margin-bottom: 5px; width: %s">
<input type="checkbox" class="cp-button" id="cp-button-%s-%s" autocomplete="off" onclick="toggle_cp_class(this, %s);" checked>
<span>%s (%s)</span>
</label>""" % ("100%", str(i), str(l), str(l), pred, str(cp_count))
html += """
</div>
</div>
<div class="col-md-11">
"""
html += "<div class=\"embed-responsive embed-responsive-16by9\">\n"
html += "\t\t\t\t\t\t<iframe class=\"scene-plot\" src=\"scene" + str(i + 1) + ".html\" class=\"embed-responsive-item\" style=\"width: 100%; height: " + str(height*1.27) + "in\" allowfullscreen></iframe>\n\t\t\t\t\t</div>\n"
iframe_html = """<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta HTTP-EQUIV="Access-Control-Allow-Origin" CONTENT="localhost">
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js" integrity="sha384-ka7Sk0Gln4gmtz2MlQnikT1wXgYsOg+OMhuP+IlRH9sENBO0LRn5q+8nbTov4+1p" crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
</head>
<body>"""
iframe_html += scene_css
iframe_html += """
<div class="d-flex flex-row justify-content-center">
<div class="btn-group btn-group-toggle" data-bs-toggle="buttons">
<label class="btn btn-secondary active">
<input type="checkbox" id="tooltip_button" checked autocomplete="off" onclick="toggle_tooltips(this);"> Show tooltip with information of individuals
</label>
<label class="btn btn-secondary active">
<input type="checkbox" id="descr_button" checked autocomplete="off" onclick="toggle_all_ind_relations(this);"> Show full individual relations in tooltip
</label>
</div>
</div>
<script>
var show_tooltips = true
var show_long_ind = true
cps = []
cp_targets = []
cp_targets_per_class = []
function toggle_tooltips(ele) {
ele.parentElement.classList.toggle("active")
show_tooltips = !show_tooltips
}
function toggle_all_ind_relations(ele) {
ele.parentElement.classList.toggle("active")
show_long_ind = !show_long_ind
}
function toggle_cp_targets(targets, state) {
for (let j = 0; j < targets.length; j++) {
var x = mpld3.get_element(targets[j])
if (x) {
if ("path" in x) {
tog = x.path
} else if ("obj" in x) {
tog = x.obj
}
for (var k = 0; k < tog._groups.length; k++) {
for (var l = 0; l < tog._groups[k].length; l++){
if (state) {
tog._groups[k][l].style.display = "block"
} else {
tog._groups[k][l].style.display = "none"
}
}
}
}
}
}
function toggle_cps(state) {
for (let i = 0; i < cp_targets.length; i++) {
toggle_cp_targets(cp_targets[i], state)
}
}
function toggle_cp_class(cp_class, state) {
targets = cp_targets_per_class[cp_class]
toggle_cp_targets(targets, state)
}
</script>
<div class="card m-2">
<div class="card-title d-flex flex-row justify-content-center m-1">
<h5>"""
if len(scene.inTimePosition) > 0 and len(scene.inTimePosition[0].numericPosition) > 0:
time = "%.2f s" % scene.inTimePosition[0].numericPosition[0]
if scenario_inst and len(scenario_inst.hasEnd) > 0 and len(scenario_inst.hasEnd[0].inTimePosition) > 0 and \
len(scenario_inst.hasEnd[0].inTimePosition[0].numericPosition) > 0:
time += " / %.2f s" % scenario_inst.hasEnd[0].inTimePosition[0].numericPosition[0]
else:
time += " / " + str(len(scenes))
else:
time = str(i) + " / " + str(len(scenes))
iframe_html += "Scene " + time + "<br />"
iframe_html += """
</h5>
</div>
<div class="card-body m-0 p-0 d-flex justify-content-center">
"""
scene_html = mpld3.fig_to_html(fig)
iframe_html += ''.join("\t\t"+line+"\n" for line in scene_html.splitlines())
iframe_html += """
</div>
</div>"""
if len(no_geo_entities) > 0:
iframe_html += """
<div class="d-flex flex-row justify-content-center">
<a class="btn btn-primary" data-bs-toggle="collapse" href="#noGeoCollapse" role="button" aria-expanded="false" aria-controls="noGeoCollapse">
Show scene individuals with no geometric representation (%s)
</a>
</div>
<div class="container-fluid collapse" id="noGeoCollapse">
<div class="card card-body m-2">""" % str(len(no_geo_entities))
iframe_html += "".join(no_geo_entities)
iframe_html += """
</div>
</div>"""
iframe_html += "\t</body>\n</html>"
iframes.append(iframe_html)
html += "\t\t\t\t</div>\n\t\t\t</div>\n\t\t</div>"
pl_html.append(html)
# Assemble main HTML
pl_html.append("\n\t</body>\n</html>")
# Write main HTML to index.html
tmp_dir = tempfile.mkdtemp()
index_path = tmp_dir + "/index.html"
with open(index_path, "w") as file:
for html in pl_html:
file.write(html)
# Write each scene HTML to a single file
for i, iframe in enumerate(iframes):
frame_path = tmp_dir + "/scene" + str(i + 1) + ".html"
with open(frame_path, "w") as file:
for html in iframe:
file.write(html)
# Starts webserver
os.chdir(tmp_dir)
threading.Thread(target=socketserver.TCPServer(("", 8000),
http.server.SimpleHTTPRequestHandler).serve_forever).start()
logger.info("Visualization is available at: http://localhost:8000")
webbrowser.open("http://localhost:8000")
return tmp_dir
def _describe_entity(entity):
"""
Describes the given traffic entity as an HTML list.
:param entity: An object of an owlready2 class.
:return: The HTML-representation of entity.
"""
cls = phenomena_extraction.get_most_specific_classes([entity])
label = "<table class=\"m-2\"><thead><tr><th>Individual</th><th>" + str(entity)
label += " (" + ", ".join(cls[0][1]) + ")</th></tr></thead><tbody><tr><td>is_a</td><td>"
label += ", ".join([str(x) for x in entity.is_a])
label += "</td></tr>"
for prop in entity.get_properties():
if str(prop.python_name) not in _NO_PRINTING_PROPERTIES:
label += "<tr>"
label += "<td>"
label += str(prop.python_name)
label += "</td>"
label += "<td>"
label += ", ".join([str(x) for x in prop[entity][:_MAX_PROPS_DISPLAY]])
if len(prop[entity]) > _MAX_PROPS_DISPLAY:
label += "<text class=\"extended_ind_props\">"
label += ", ".join([str(x) for x in prop[entity][_MAX_PROPS_DISPLAY:]]) + "</text>"
label += "<text class=\"extended_ind_props_dots\" style=\"display: none;\">...</text>"
label += "</td>"
label += "</tr>"
label += "</tbody></table>"
return label
def _describe_cp(cp):
label = "<table class=\"m-2\"><thead><tr><th>Criticality Phenomenon</th><th>" + \
str(cp.predicate).split("(")[1].replace(")", "")
label += "</th></tr></thead><tbody><tr><td>Start time</td><td>"
time = cp.at_time()
if isinstance(time, tuple):
label += str(time[0])
else:
label += str(time)
label += "</td></tr><tr><td>End time</td><td>"
if isinstance(time, tuple):
label += str(time[1])
else:
label += str(time)
label += "</td></tr><tr><td>Subject(s)</td><td>"
if len(cp.subjects) > 0:
subj_and_classes = phenomena_extraction.get_most_specific_classes(cp.subjects)
label += "<br />".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in subj_and_classes])
label += "</td></tr><tr><td>Predicate</td><td>"
label += str(cp.predicate)
label += "</td></tr><tr><td>Object(s)</td><td>"
if len(cp.objects) > 0:
for obj_predicate in cp.objects.keys():
obj_and_classes = phenomena_extraction.get_most_specific_classes(cp.objects[obj_predicate])
label += obj_predicate + ":<br/>" + "<br />".join([str(x[0]) + " (" + ", ".join(x[1]) + ")" for x in
obj_and_classes])
if len(cp.objects.keys()) > 1:
label += "<br/>"
label += "</td></tr>"
label += "</tbody></table>"
return label
#################
# MPLD3 Plugins #
#################
class ToolTipAndClickInfo(mpld3.plugins.PointHTMLTooltip):
# Handles:
# 1. the criticality phenomena toggling when clicking on CP subjects (red circles)
# 2. the mouse-overs when hovering over subjects
# 3. the Ctrl+Click new window action when clicking on subjects
JAVASCRIPT = """
var scene_css = `""" + scene_css + """`
mpld3.register_plugin("htmltooltip", HtmlTooltipPlugin);
HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
targets_per_cp:null,
cps:null,
hoffset:0,
voffset:10,
targets:null};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id)
var labels = this.props.labels
cps = obj.elements()
cp_targets = this.props.targets
cp_targets_per_class = this.props.targets_per_cp
cp_predicates = this.props.cps
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
function show_cp(d, i) {
if (!window.event.ctrlKey) {
for (let j = 0; j < cp_targets[i].length; j++) {
var x = mpld3.get_element(cp_targets[i][j]);
if (x) {
if ("path" in x) {
tog = x.path
} else if ("obj" in x) {
tog = x.obj
}
for (var k = 0; k < tog._groups.length; k++){
for (var l = 0; l < tog._groups[k].length; l++){
if (tog._groups[k][l].style.display === "none"){
tog._groups[k][l].style.display = "block"
} else {
tog._groups[k][l].style.display = "none"
}
}
}
}
}
}
}
obj.elements()
.on("mouseover", function(d, i) {
if (show_tooltips) {
tooltip.html(labels[i]).style("visibility", "visible");
var long_descrs = document.getElementsByClassName("extended_ind_props")
var dots_descrs = document.getElementsByClassName("extended_ind_props_dots")
for (let i = 0; i < long_descrs.length; i++) {
if(!show_long_ind) {
long_descrs[i].style.display = "none";
} else {
long_descrs[i].style.display = "inline";
}
}
for (let i = 0; i < dots_descrs.length; i++) {
if(!show_long_ind) {
dots_descrs[i].style.display = "inline";
} else {
dots_descrs[i].style.display = "none";
}
}
}
})
.on("mousemove", function(d, i) {
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mousedown.callout", show_cp)
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");
})
.on("click", function(d, i) {
if (window.event.ctrlKey) {
var newWindow = window.open();
newWindow.document.write(
`<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">` + scene_css + tooltip.html(labels[i])._groups[0][0].innerHTML
);
}
});
};
"""
def __init__(self, points, labels=None, targets=None, targets_per_cp=None, hoffset=0, voffset=10, css=None):
targets_ = []
for x in targets or []:
x_ = []
for y in x:
x_.append(mpld3.utils.get_id(y))
targets_.append(x_)
self.targets_per_cp = []
self.cps = []
if targets_per_cp:
self.cps = sorted(targets_per_cp.keys(), key=natural_sort_key)
for cp in self.cps:
x_ = []
for y in targets_per_cp[cp]:
x_.append(mpld3.utils.get_id(y))
self.targets_per_cp.append(x_)
super().__init__(points, labels, targets_, hoffset, voffset, css)
self.dict_["targets_per_cp"] = self.targets_per_cp
self.dict_["cps"] = self.cps
class CPTooltip(mpld3.plugins.PluginBase):
# Handles the Ctrl+Click action on criticality phenomena ID (opens a new tab).
JAVASCRIPT = """
var scene_css = `""" + scene_css + """`
mpld3.register_plugin("cpstooltip", CPTooltip);
CPTooltip.prototype = Object.create(mpld3.Plugin.prototype);
CPTooltip.prototype.constructor = CPTooltip;
CPTooltip.prototype.requiredProps = ["id", "tooltip_html"];
function CPTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
CPTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var tooltip_html = this.props.tooltip_html;
var tooltip = d3.select("body").append("div")
.attr("class", "cp-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.obj._groups[0][0].onmouseover = function(d, i) {
tooltip.html(tooltip_html).style("visibility", "visible");
};
obj.obj._groups[0][0].onmousemove = function(d, i) {
tooltip
.style("top", d.clientY + 10 + "px")
.style("left", d.clientX + 0 + "px");
}.bind(this);
obj.obj._groups[0][0].onclick = function(d, i) {
if (window.event.ctrlKey) {
var newWindow = window.open();
newWindow.document.write(
`<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">` + scene_css + tooltip_html
);
}
};
obj.obj._groups[0][0].onmouseout = function(d, i) {
tooltip.style("visibility", "hidden");
};
}
"""
def __init__(self, text, cp):
tooltip_html = _describe_cp(cp)
self.dict_ = {"type": "cpstooltip",
"id": mpld3.utils.get_id(text),
"tooltip_html": tooltip_html}
def _has_collision_with_bbs(existing_bbs, new_bb):
"""
Checks if the new rectangle (new_bb) collides with some existing rectangles.
"""
a_left = min([x[0] for x in new_bb])
a_right = max([x[0] for x in new_bb])
a_bottom = min([x[1] for x in new_bb])
a_top = max([x[1] for x in new_bb])
for bb in existing_bbs:
b_left = min([x[0] for x in bb])
b_right = max([x[0] for x in bb])
b_bottom = min([x[1] for x in bb])
b_top = max([x[1] for x in bb])
if a_left <= b_right and b_left <= a_right and a_top >= b_bottom and b_top >= a_bottom:
return True
return False
| auto/auto_visualizer/auto_visualizer.py | 41,090 | Describes the given traffic entity as an HTML list.
:param entity: An object of an owlready2 class.
:return: The HTML-representation of entity.
Checks if the new rectangle (new_bb) collides with some existing rectangles.
Creates an HTML visualization of the given scenario. Starts a simple web server at localhost:8000 (blocking).
:param scenario: Either a list of worlds, each world representing a single scene or a single world representing a
whole scenario
:param cps: A list of criticality phenomena which optionally to visualize as well.
:return: The path to the directory in which to find the created HTML visualization.
Visualizer is for debugging purposes only TODO - visualize scenario level CPs - show has distance to in table for each individual - as ternary relations - instead of omitting it Config constants Classes to not show in visualization Data/object properties to hide from the individual tables shown when hovering If one hides long property lists, this is the number after which the list is cut off Logging Helper function for sorting CPs & individuals CSS Scene CSS (added is iframes to scenario HTML) Scenario CSS (main CSS) Fetch scene list Assemble scenario title Main HTML code for index.html Fetches a different color each time, but ensures that it has a readable contrast. Create HTML for each scene Plot CPs init dict Finds a free space to plot label Assemble main HTML Write main HTML to index.html Write each scene HTML to a single file Starts webserver MPLD3 Plugins Handles: 1. the criticality phenomena toggling when clicking on CP subjects (red circles) 2. the mouse-overs when hovering over subjects 3. the Ctrl+Click new window action when clicking on subjects Handles the Ctrl+Click action on criticality phenomena ID (opens a new tab). | 1,781 | en | 0.767776 |
# coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sib_api_v3_sdk
from sib_api_v3_sdk.models.get_extended_contact_details_statistics import GetExtendedContactDetailsStatistics # noqa: E501
from sib_api_v3_sdk.rest import ApiException
class TestGetExtendedContactDetailsStatistics(unittest.TestCase):
"""GetExtendedContactDetailsStatistics unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetExtendedContactDetailsStatistics(self):
"""Test GetExtendedContactDetailsStatistics"""
# FIXME: construct object with mandatory attributes with example values
# model = sib_api_v3_sdk.models.get_extended_contact_details_statistics.GetExtendedContactDetailsStatistics() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| test/test_get_extended_contact_details_statistics.py | 1,842 | GetExtendedContactDetailsStatistics unit test stubs
Test GetExtendedContactDetailsStatistics
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = sib_api_v3_sdk.models.get_extended_contact_details_statistics.GetExtendedContactDetailsStatistics() noqa: E501 | 1,269 | en | 0.631238 |
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class Proj2062SpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnât have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class Proj2062DownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| support_files/scraping/entries/proj_2062/proj_2062/middlewares.py | 3,652 | Define here the models for your spider middleware See documentation in: https://docs.scrapy.org/en/latest/topics/spider-middleware.html useful for handling different item types with a single interface Not all methods need to be defined. If a method is not defined, scrapy acts as if the spider middleware does not modify the passed objects. This method is used by Scrapy to create your spiders. Called for each response that goes through the spider middleware and into the spider. Should return None or raise an exception. Called with the results returned from the Spider, after it has processed the response. Must return an iterable of Request, or item objects. Called when a spider or process_spider_input() method (from other spider middleware) raises an exception. Should return either None or an iterable of Request or item objects. Called with the start requests of the spider, and works similarly to the process_spider_output() method, except that it doesnât have a response associated. Must return only requests (not items). Not all methods need to be defined. If a method is not defined, scrapy acts as if the downloader middleware does not modify the passed objects. This method is used by Scrapy to create your spiders. Called for each request that goes through the downloader middleware. Must either: - return None: continue processing this request - or return a Response object - or return a Request object - or raise IgnoreRequest: process_exception() methods of installed downloader middleware will be called Called with the response returned from the downloader. Must either; - return a Response object - return a Request object - or raise IgnoreRequest Called when a download handler or a process_request() (from other downloader middleware) raises an exception. Must either: - return None: continue processing this exception - return a Response object: stops process_exception() chain - return a Request object: stops process_exception() chain | 1,963 | en | 0.869042 |
import functools as ft
import inspect
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
import pydantic
from . import base
class PydanticValidator(base.BaseValidator):
"""
Parameters validator based on `pydantic <https://pydantic-docs.helpmanual.io/>`_ library.
Uses python type annotations for parameters validation.
:param coerce: if ``True`` returns converted (coerced) parameters according to parameter type annotation
otherwise returns parameters as is
"""
def __init__(self, coerce: bool = True, **config_args: Any):
self._coerce = coerce
config_args.setdefault('extra', 'forbid')
# https://pydantic-docs.helpmanual.io/usage/model_config/
self._model_config = type('ModelConfig', (pydantic.BaseConfig,), config_args)
def validate_method(
self, method: Callable, params: Optional[Union[list, dict]], exclude: Iterable[str] = (), **kwargs: Any,
) -> Dict[str, Any]:
"""
Validates params against method using ``pydantic`` validator.
:param method: method to validate parameters against
:param params: parameters to be validated
:param exclude: parameter names to be excluded from validation
:returns: coerced parameters if `coerce` flag is ``True`` otherwise parameters as is
:raises: ValidationError
"""
signature = self.signature(method, exclude)
schema = self.build_validation_schema(signature)
params_model = pydantic.create_model(method.__name__, **schema, __config__=self._model_config)
bound_params = self.bind(signature, params)
try:
obj = params_model(**bound_params.arguments)
except pydantic.ValidationError as e:
raise base.ValidationError(*e.errors()) from e
return {attr: getattr(obj, attr) for attr in obj.__fields_set__} if self._coerce else bound_params.arguments
@ft.lru_cache(maxsize=None)
def build_validation_schema(self, signature: inspect.Signature) -> Dict[str, Any]:
"""
Builds pydantic model based validation schema from method signature.
:param signature: method signature to build schema for
:returns: validation schema
"""
field_definitions = {}
for param in signature.parameters.values():
if param.kind is inspect.Parameter.VAR_KEYWORD:
field_definitions[param.name] = (
Optional[Dict[str, param.annotation]] if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else None,
)
elif param.kind is inspect.Parameter.VAR_POSITIONAL:
field_definitions[param.name] = (
Optional[List[param.annotation]] if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else None,
)
else:
field_definitions[param.name] = (
param.annotation if param.annotation is not inspect.Parameter.empty else Any,
param.default if param.default is not inspect.Parameter.empty else ...,
)
return field_definitions
| xjsonrpc/server/validators/pydantic.py | 3,352 | Parameters validator based on `pydantic <https://pydantic-docs.helpmanual.io/>`_ library.
Uses python type annotations for parameters validation.
:param coerce: if ``True`` returns converted (coerced) parameters according to parameter type annotation
otherwise returns parameters as is
Builds pydantic model based validation schema from method signature.
:param signature: method signature to build schema for
:returns: validation schema
Validates params against method using ``pydantic`` validator.
:param method: method to validate parameters against
:param params: parameters to be validated
:param exclude: parameter names to be excluded from validation
:returns: coerced parameters if `coerce` flag is ``True`` otherwise parameters as is
:raises: ValidationError
https://pydantic-docs.helpmanual.io/usage/model_config/ | 844 | en | 0.466695 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
import re
from urllib.parse import urljoin
from requests import get
from time import sleep
from datetime import datetime
from pages.webview.about_this_book import AboutBook
from pages.webview.content_page import ContentPage
from tests import markers
from pages.webview.home import Home
from pages.webview.content import Content
from tests.utils import similar
@markers.webview
@markers.test_case("C193738")
@markers.nondestructive
@markers.parametrize(
"is_archive,path,expected_response_status_code",
[
# FIXME Requires varnish
# (False, '/content/col23946', 301),
(True, "/content/col23946", 301),
# FIXME Requires varnish
# (False, '/content/col23946/1.1', 301),
(True, "/content/col23946/1.1", 301),
(False, "/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e", 200),
(True, "/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e", 302),
(
False,
(
"/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e"
":7d039be2-93c6-4f32-a469-41689bab7225"
),
200,
),
(
True,
(
"/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e"
":7d039be2-93c6-4f32-a469-41689bab7225"
),
302,
),
(False, "/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e@1.1", 200),
(True, "/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e@1.1", 200),
(
False,
(
"/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e@1.1"
":7d039be2-93c6-4f32-a469-41689bab7225"
),
200,
),
(
False,
(
"/contents/4eaa8f03-88a8-485a-a777-dd3602f6c13e@1.1"
":7d039be2-93c6-4f32-a469-41689bab7225@5"
),
200,
),
(False, "/contents/TqqPA4io", 200),
(True, "/contents/TqqPA4io", 302),
(False, "/contents/TqqPA4io:fQOb4pPG", 200),
(True, "/contents/TqqPA4io:fQOb4pPG", 302),
(False, "/contents/TqqPA4io@1.157", 200),
(True, "/contents/TqqPA4io@1.157", 301),
(False, "/contents/TqqPA4io@1.157:fQOb4pPG", 200),
(True, "/contents/TqqPA4io@1.157:fQOb4pPG", 301),
(False, "/contents/TqqPA4io@1.157:fQOb4pPG@5", 200),
(True, "/contents/TqqPA4io@1.157:fQOb4pPG@5", 301),
],
)
def test_content_status_codes(
webview_base_url, archive_base_url, is_archive, path, expected_response_status_code
):
# GIVEN some URL and the expected redirect code
if is_archive:
url = urljoin(archive_base_url, path)
else:
url = urljoin(webview_base_url, path)
# WHEN we visit the URL
# NOTE: Don't bother trying to get status codes using Selenium
# https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/141
response = get(url, allow_redirects=False)
# THEN we get the expected redirect code
assert response.status_code == expected_response_status_code
@markers.webview
@markers.test_case("C194465")
@markers.nondestructive
@markers.parametrize(
"id",
[
"FqtblkWY@2.1:E3XenWEQ",
"FqtblkWY@2.1:E3XenWEQ@2",
"FqtblkWY@2.1:rKBtkIWG@2",
"TqqPA4io@1.1:fQOb4pPG@2",
],
)
def test_canonical_link_is_correct(webview_base_url, selenium, id):
# GIVEN a book's content page
content = Content(selenium, webview_base_url, id=id).open()
section_title = content.section_title
# WHEN the book's canonical url is visited
selenium.get(content.canonical_url)
content.wait_for_page_to_load()
# THEN we end up in the same page
# NOTE: we check the section title instead of the url because the canonical link seems to
# take us to the latest version of the content, no matter which version we started on
# NOTE: Newer versions of the book may not have the section number. For this we check in the
# section_title string instead of an equality.
assert content.section_title in section_title
@markers.webview
@markers.test_case("C176232", "C176233")
@markers.nondestructive
def test_navs_and_elements_are_displayed(webview_base_url, selenium):
# GIVEN the home page
home = Home(selenium, webview_base_url).open()
# WHEN a book is clicked
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
# THEN the site navbar and content nav are displayed
assert content.header.is_nav_displayed
content_header = content.content_header
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
# Section title is on top of main content section (white area)
main_content_section = content.main_content_section
section_title_div_location = content.section_title_div_location
section_title_div_size = content.section_title_div_size
# Section title inside main content section
assert section_title_div_location["x"] >= main_content_section.location["x"]
assert section_title_div_location["y"] >= main_content_section.location["y"]
assert (
section_title_div_location["x"] + section_title_div_size["width"]
<= main_content_section.location["x"] + main_content_section.size["width"]
)
assert (
section_title_div_location["y"] + section_title_div_size["height"]
<= main_content_section.location["y"] + main_content_section.size["height"]
)
# Section title on top of main content section
assert (
section_title_div_location["y"] - main_content_section.location["y"]
<= section_title_div_size["height"]
)
@markers.webview
@markers.test_case("C132542")
@markers.nondestructive
def test_author_contains_openstax(webview_base_url, selenium):
# GIVEN the home page and a book
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
# WHEN the book's cover is clicked
content = book.click_book_cover()
# THEN the displayed author is OpenStax
content_header = content.content_header
assert content_header.is_book_by_displayed
assert content_header.are_authors_displayed
assert "OpenStax" in content_header.authors
@markers.webview
@markers.test_case("C176242")
@markers.nondestructive
def test_toc_is_displayed(webview_base_url, selenium):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
# WHEN the contents button is clicked
content.header_nav.click_contents_button()
toc = content.table_of_contents
# THEN the table of contents is displayed
assert toc.is_displayed
assert toc.number_of_chapters > 0
assert toc.number_of_pages > 0
@markers.webview
@markers.smoke
@markers.test_case("C176243", "C176244")
@markers.nondestructive
def test_toc_navigation(webview_base_url, selenium):
# GIVEN a book's table of contents
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
content.header_nav.click_contents_button()
toc = content.table_of_contents
# WHEN a chapter is expanded and we navigate to one of its pages
chapter = toc.chapters[0]
chapter = chapter.click()
page = chapter.pages[1]
chapter_section = page.chapter_section
title = page.title
content = page.click()
# THEN we end up at the correct page
assert type(content) is Content
assert content.chapter_section == chapter_section
assert content.section_title == title
@markers.webview
@markers.smoke
@markers.test_case("C176257")
@markers.nondestructive
def test_share_on_top_right_corner(webview_base_url, selenium):
# GIVEN the home page
home = Home(selenium, webview_base_url).open()
# WHEN a book is clicked
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
# THEN social share links are displayed in the top right corner
share = content.share
assert share.is_displayed
assert share.is_facebook_share_link_displayed
assert share.is_twitter_share_link_displayed
root = content.share.root
# Top half
assert root.location["y"] + root.size["height"] < selenium.get_window_size()["height"] / 2
# Right half
assert root.location["x"] > selenium.get_window_size()["width"] / 2
@markers.webview
@markers.smoke
@markers.test_case("C132549", "C175148")
@markers.nondestructive
@markers.parametrize(
"uuid,query,has_results,result_index,has_os_figures,has_os_tables",
[
(
"36004586-651c-4ded-af87-203aca22d946",
"mitosis genetics gorilla",
False,
None,
None,
None,
),
("bb62933e-f20a-4ffc-90aa-97b36c296c3e", "Fizyka", True, 1, True, False),
("bb62933e-f20a-4ffc-90aa-97b36c296c3e", "Tabela", True, 3, True, True),
],
)
def test_in_book_search(
webview_base_url,
selenium,
uuid,
query,
has_results,
result_index,
has_os_figures,
has_os_tables,
):
# GIVEN a book's content page and a query
content = Content(selenium, webview_base_url, id=uuid).open()
# WHEN we search the book for the given query
search_results = content.header_nav.search(query)
# THEN search results are present (or not) and bolded and link to the matching content
results = search_results.results
result_count = search_results.result_count
assert len(results) == result_count
if not has_results:
assert result_count == 0
return
assert result_count > 0
words = query.split()
for result in results:
for word in words:
assert result.count_occurrences(word) == result.count_bold_occurrences(word)
result = results[result_index]
title = result.title
content = result.click_link()
assert content.section_title == title
content_region = content.content_region
assert content_region.has_os_figures == has_os_figures
for figure in content_region.os_figures:
assert figure.caption.is_labeled
assert figure.caption.is_numbered
assert content_region.has_os_tables == has_os_tables
for table in content_region.os_tables:
assert table.caption.is_labeled
assert table.caption.is_numbered
@markers.webview
@markers.smoke
@markers.test_case("C176258", "C176259", "C176261")
@markers.nondestructive
def test_share_links_displayed(webview_base_url, selenium):
# GIVEN the home page
home = Home(selenium, webview_base_url).open()
# WHEN a book is clicked
book = home.featured_books.openstax_list[32]
content = book.click_book_cover()
# THEN social share links have the expected urls
current_url = selenium.current_url
share = content.share
expected_facebook_url = f"https://facebook.com/sharer/sharer.php?u={current_url}"
assert share.facebook_share_url == expected_facebook_url
expected_twitter_url = f"https://twitter.com/share?url={current_url}"
assert expected_twitter_url in share.twitter_share_url
expected_linkedin_url = f"https://www.linkedin.com/shareArticle?mini=true&url={current_url}"
assert expected_linkedin_url in share.linkedin_share_url
@markers.webview
@markers.test_case("C193880")
@markers.nondestructive
@markers.parametrize("id", ["u2KTPvIK@3.30:qVb4K8xR@3"])
def test_newer_version_leads_to_correct_page(webview_base_url, selenium, id):
# GIVEN the content page
content = Content(selenium, webview_base_url, id=id).open()
version = content.book_version
section_title = content.section_title
# WHEN the newer version link is clicked
content = content.click_newer_version_link()
# THEN we end up in a newer version of the same page
assert content.section_title == section_title
assert content.book_version > version
@markers.webview
@markers.smoke
@markers.test_case("C176234")
@markers.nondestructive
def test_get_this_book(webview_base_url, selenium):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[32]
content = book.click_book_cover()
# WHEN we click the "Get This Book!" button
button_displayed = content.is_get_this_book_button_displayed
if button_displayed:
get_this_book = content.click_get_this_book_button()
pdf_displayed = get_this_book.is_pdf_link_displayed
offline_zip_displayed = get_this_book.is_offline_zip_link_displayed
# THEN links to download the pdf, epub and offline zip versions are displayed
# Look at the footer to see which downloads should have been available
downloads = content.content_footer.click_downloads_tab()
if not button_displayed:
assert not downloads.is_any_available
pytest.skip('No files available to download: "Get This Book!" button not present.')
else:
assert pdf_displayed or offline_zip_displayed
# Check the footer
if pdf_displayed:
assert downloads.is_pdf_available
if offline_zip_displayed:
assert downloads.is_offline_zip_available
@markers.webview
@markers.smoke
@markers.test_case("C195074")
@markers.nondestructive
@markers.parametrize("id", ["u2KTPvIK@3.1:Zv6FJYpb@3"])
def test_page_with_unicode_characters_in_title_loads(webview_base_url, selenium, id):
# GIVEN the webview base url, the Selenium driver and the id of a page whose title has unicode
content = Content(selenium, webview_base_url, id=id)
# WHEN we load the content page
content = content.open()
# Ensure it has a figure element
assert content.content_region.has_figures
# THEN the page does not reload afterwards
# Wait 10 seconds to see if the page reloads
sleep(10)
# If we don't get a StaleElementReferenceException then the page didn't reload
assert content.content_region.os_figures
@markers.xfail
@markers.webview
@markers.smoke
@markers.test_case("C176236")
@markers.nondestructive
def test_content_and_figures_display_after_scrolling(webview_base_url, selenium):
# This is expected to fail as we ran out non-redirecting collections
# with figures on the main page
# GIVEN a book's content page with figures
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[60]
content_page = book.click_book_cover()
content_region = content_page.content_region
assert not content_region.is_blank
assert content_region.has_figures
# WHEN we scroll to a figure
figure = content_region.figures[0]
content_region.scroll_to(figure)
# THEN some figure is displayed
assert figure.is_displayed()
@markers.webview
@markers.smoke
@markers.test_case("C176235", "C176237")
@markers.nondestructive
def test_nav_and_menus_display_after_scrolling(webview_base_url, selenium):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
content_header = content.content_header
original_content_header_y = content_header.root.location["y"]
# WHEN we scroll to the bottom
content.footer.scroll_to()
content_footer = content.content_footer
# THEN - the header nav is offscreen but still considered displayed
# - the content nav is displayed on top without the site navbar or any social links
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert not content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert not share.is_displayed
assert not share.is_facebook_share_link_displayed
assert not share.is_twitter_share_link_displayed
# The footer is displayed at the bottom
assert content_footer.is_displayed
assert content_footer.is_downloads_tab_displayed
assert content_footer.is_history_tab_displayed
assert content_footer.is_attribution_tab_displayed
assert content_footer.is_more_information_tab_displayed
# Hard to check that the content_header is on top after scrolling, but we can check
# that it at least has the pinned class and is above the footer
assert content_header.is_pinned
assert not content_header.is_opened
assert not content_header.is_closed
assert content_header.root.location["y"] > original_content_header_y
assert content_header.root.location["y"] < content_footer.root.location["y"]
@markers.webview
@markers.smoke
@markers.test_case("C195232")
@markers.nondestructive
@markers.parametrize("width,height", [(480, 640)])
def test_mobile_nav_and_menus_hide_after_scrolling(webview_base_url, selenium, width, height):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
content_header = content.content_header
original_content_header_y = content_header.root.location["y"]
# WHEN we scroll to the bottom
content.footer.scroll_to()
# THEN - the header nav is offscreen but still considered displayed
# - the content nav is offscreen without the site navbar or any social links
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert not content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert not share.is_displayed
assert not share.is_facebook_share_link_displayed
assert not share.is_twitter_share_link_displayed
assert not content_header.is_pinned
assert content_header.root.location["y"] == original_content_header_y
# WHEN we scroll up
content.scroll_up()
# THEN - the header nav is offscreen but still considered displayed
# - the content nav is now pinned and onscreen without the site navbar or any social links
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert not content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert not share.is_displayed
assert not share.is_facebook_share_link_displayed
assert not share.is_twitter_share_link_displayed
assert content_header.is_pinned
assert content_header.is_opened
assert not content_header.is_closed
previous_content_header_y = content_header.root.location["y"]
assert previous_content_header_y > original_content_header_y
# WHEN we scroll down again
content.scroll_down()
# THEN - the header nav is offscreen but still considered displayed
# - the content nav is now closed and offscreen without the site navbar or any social links
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert not content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert not share.is_displayed
assert not share.is_facebook_share_link_displayed
assert not share.is_twitter_share_link_displayed
assert content_header.is_pinned
assert not content_header.is_opened
assert content_header.is_closed
assert content_header.root.location["y"] > previous_content_header_y
@markers.webview
@markers.smoke
@markers.test_case("C162171")
@markers.nondestructive
def test_attribution(webview_base_url, selenium):
# GIVEN a book's content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
# WHEN we click the attribution tab
attribution = content.content_footer.click_attribution_tab()
# THEN the attribution is displayed and has the correct support email
assert attribution.is_displayed
expected_sentence = "For questions regarding this license, please contact support@openstax.org."
assert expected_sentence in attribution.text
@markers.webview
@markers.smoke
@markers.test_case("C176241")
@markers.nondestructive
def test_back_to_top(webview_base_url, selenium):
# GIVEN a book's scrolled content page
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
footer = content.content_footer
content_header = content.content_header
original_content_header_y = content_header.root.location["y"]
# WHEN we scroll to the bottom then click the back to top link
content = footer.nav.click_back_to_top_link()
# THEN the content page is no longer scrolled
assert content.header.is_nav_displayed
assert content_header.is_displayed
assert content_header.is_title_displayed
assert content_header.is_book_by_displayed
assert content_header.is_share_displayed
header_nav = content_header.nav
assert header_nav.is_contents_button_displayed
assert header_nav.is_searchbar_displayed
assert header_nav.is_back_link_displayed
assert header_nav.is_progress_bar_displayed
assert header_nav.is_next_link_displayed
assert content.is_section_title_displayed
share = content.share
assert share.is_displayed
assert share.is_facebook_share_link_displayed
assert share.is_twitter_share_link_displayed
# The footer is offscreen, but still considered displayed
assert footer.is_displayed
assert footer.is_downloads_tab_displayed
assert footer.is_history_tab_displayed
assert footer.is_attribution_tab_displayed
assert footer.is_more_information_tab_displayed
# The header is no longer pinned
assert not content_header.is_pinned
assert content_header.root.location["y"] == original_content_header_y
@markers.webview
@markers.smoke
@markers.test_case("C176238", "C176239", "C176240", "C176245")
@markers.nondestructive
def test_navigation(webview_base_url, selenium):
# GIVEN a book's content page and a sim_ratio
sim_ratio = 0.4
home = Home(selenium, webview_base_url).open()
book = home.featured_books.openstax_list[59]
content = book.click_book_cover()
header_nav = content.header_nav
header_nav.click_contents_button()
toc = content.table_of_contents
num_pages = toc.number_of_pages
assert type(content) == Content
# Introduction should be the first section loaded
assert (
content.section_title == "Introduction"
or similar(content.section_title, "ð Inquiry Organizer") > sim_ratio
)
# Preface is skipped by default
assert header_nav.progress_bar_fraction_is(2 / num_pages)
# WHEN we navigate next twice and then back twice using the header and footer controls
content = content.header_nav.click_next_link()
assert type(content) == Content
assert content.chapter_section == "1.2"
assert header_nav.progress_bar_fraction_is(3 / num_pages)
content = content.footer_nav.click_next_link()
assert type(content) == Content
assert content.chapter_section == "1.3"
assert header_nav.progress_bar_fraction_is(4 / num_pages)
content = content.footer_nav.click_back_link()
assert type(content) == Content
assert content.chapter_section == "1.2"
assert header_nav.progress_bar_fraction_is(3 / num_pages)
content = content.header_nav.click_back_link()
# THEN we arrive back at the initial page
assert header_nav.progress_bar_fraction_is(2 / num_pages)
@markers.webview
@markers.test_case("C195073")
@markers.slow
@markers.nondestructive
def test_ncy_is_not_displayed(webview_base_url, american_gov_uuid, selenium):
# GIVEN the webview base url, an American Government content page UUID, and the Selenium driver
# WHEN the page is fully loaded using the URL
page = Content(selenium, webview_base_url, id=american_gov_uuid).open()
# THEN :NOT_CONVERTED_YET is not displayed
assert page.is_ncy_displayed is False
@markers.webview
@markers.test_case("C132547", "C132548")
@markers.nondestructive
@markers.parametrize(
"page_uuid,is_baked_book_index",
[
("bb62933e-f20a-4ffc-90aa-97b36c296c3e:85036aed-fa1a-5d51-a9c2-c07ee673488d", True),
("6a0568d8-23d7-439b-9a01-16e4e73886b3", False),
],
)
def test_id_links_and_back_button(page_uuid, is_baked_book_index, webview_base_url, selenium):
# GIVEN an index page in a baked book or a page with anchor links in an unbaked book
content_page = Content(selenium, webview_base_url, id=page_uuid).open()
content_url = content_page.current_url
assert "#" not in content_url
# WHEN we click on a term (baked index) or an anchor link
content_region = content_page.content_region
if is_baked_book_index:
content_page = content_region.click_index_term()
else:
content_page = content_region.click_anchor_link(internal_only=True)
assert content_page.current_url.startswith(content_url)
# THEN we end up at the linked page and the element with the same id as the link is displayed
new_url = content_page.current_url
assert "#" in new_url
id = re.search("#(.+)$", new_url)[1]
assert id
assert content_page.is_element_id_displayed(id)
# WHEN we click the browser's back button
content_page.back()
# THEN we end up at the previous page
assert content_page.current_url == content_url
@markers.webview
@markers.test_case("C181754")
@markers.nondestructive
@markers.parametrize(
"ch_review_id", ["u2KTPvIK@3.32:6IrsWVCW", pytest.param("u2KTPvIK@3.32:aVXUrOzZ")]
)
def test_chapter_review_version_matches_book_version(webview_base_url, selenium, ch_review_id):
# GIVEN the webview base url, a chapter review id, and the Selenium driver
# WHEN we visit the chapter review page
content = Content(selenium, webview_base_url, id=ch_review_id).open()
# THEN the chapter review version matches the book version
assert content.page_version == content.book_version
@markers.webview
@markers.smoke
@markers.test_case("C195064")
@markers.nondestructive
@markers.parametrize("ch_review_id", ["e5fbbjPE"])
def test_books_containing_go_to_book_link(webview_base_url, selenium, ch_review_id):
# GIVEN the webview base url, a chapter review id, and the Selenium driver
content = ContentPage(selenium, webview_base_url, id=ch_review_id).open()
books = content.books_containing.book_list
# WHEN we click the link to the first book
title = books[0].title
book = books[0].click_go_to_book_link
# THEN we are on the About this Book page and it is displayed
assert type(book) == AboutBook
assert book.about_this_book_section.is_displayed
assert book.title == title
@markers.webview
@markers.test_case("C195063")
@markers.nondestructive
@markers.parametrize("ch_review_id", ["SjdU64Og@3"])
def test_books_containing_have_revised_date(webview_base_url, selenium, ch_review_id):
# GIVEN the webview base url, a chapter review id, and the Selenium driver
# WHEN the content_page is fully loaded and we have a list of books containing the page
content = ContentPage(selenium, webview_base_url, id=ch_review_id).open()
books = content.books_containing.book_list
# THEN all the Books should contain revision date
for book in books:
assert book.revision_date.is_displayed
@markers.webview
@markers.requires_complete_dataset
@markers.test_case("C195061")
@markers.nondestructive
@markers.parametrize("page_id", ["BWYBGK7C@2"])
def test_books_containing_title_not_limited(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit that page of the chapter and we have a list of books containing the page
content = ContentPage(selenium, webview_base_url, id=page_id).open()
books = content.books_containing.book_list
# THEN the title of the books are not truncated by ellipses
for book in books:
assert "..." not in book.title
@markers.webview
@markers.requires_complete_dataset
@markers.test_case("C195057", "C195058", "C195059", "C195072")
@markers.nondestructive
@markers.parametrize("page_id", ["mjO9LQWq@1", "bJs8AcSE@1", "4fGVMb7P@1"])
def test_books_containing_message_is_correct(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit the content page
# AND we have a books containing count
# AND we have the overview message
content = ContentPage(selenium, webview_base_url, id=page_id).open()
book_count = len(content.books_containing.book_list)
overview = content.books_containing.overview
# THEN ensure the proper books containing overview message is displayed
if book_count > 1:
assert overview == f"This page is in {book_count} books:"
elif book_count > 0:
assert overview == "This page is in this book:"
else:
assert overview == "This page is not in any books."
@markers.webview
@markers.test_case("C195062")
@markers.nondestructive
@markers.parametrize("page_id", ["SjdU64Og@3"])
def test_books_containing_have_authors(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit that page of the chapter and we have a list of books containing page
content = ContentPage(selenium, webview_base_url, id=page_id).open()
books = content.books_containing.book_list
# THEN the authors of the book should be displayed
for book in books:
assert book.author.is_displayed()
@markers.webview
@markers.requires_complete_dataset
@markers.test_case("C195065")
@markers.nondestructive
@markers.parametrize("page_id", ["HOATLqlR@5"])
def test_books_containing_list_in_sorted_order(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit that page of the chapter and we have a list of books containing page
content = Content(selenium, webview_base_url, id=page_id).open()
# AND store the main author
main_author = content.content_header.authors
# AND Save list of authors and dates
content = ContentPage(selenium, webview_base_url, id=page_id).open()
dates = content.books_containing.date_list
author = content.books_containing.author_list
# THEN main author should be the author of the first book listed
assert author[0][0] == main_author
# AND if there are more books with main author, they should be listed first
i = 1
while i < len(author) - 1 and author[i][0] == main_author:
i += 1
# AND for the rest of the books, the revision dates are sorted in decreasing order
date_list = []
for date in dates[i:]:
date_list.append(datetime.strptime(date[0], "%b %d, %Y"))
assert date_list == sorted(date_list, reverse=True)
@markers.webview
@markers.smoke
@markers.requires_complete_dataset
@markers.test_case("C195055")
@markers.nondestructive
@markers.parametrize("page_id", ["4fGVMb7P@1"])
def test_books_containing_button_toggles_and_labelled_books(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit a single content page (not a book)
content = ContentPage(selenium, webview_base_url, id=page_id).open()
books_containing = content.books_containing
# THEN the button that opens and closes the "ToC" is labelled "Books" instead of "Contents"
# AND the button opens and closes the "This page is in # books" side nav
contents_button = content.header_nav.contents_button
assert contents_button.text == "Books"
# The side nav area should be open by default
assert books_containing.is_displayed
content.header_nav.click_contents_button()
assert not books_containing.is_displayed
content.header_nav.click_contents_button()
content.books_containing.wait_for_region_to_display()
assert books_containing.is_displayed
@markers.webview
@markers.requires_complete_dataset
@markers.test_case("C195054")
@markers.nondestructive
@markers.parametrize("page_id", ["4fGVMb7P@1"])
def test_books_containing_list_is_on_left_of_page(webview_base_url, selenium, page_id):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we load the page of the chapter and we have the width of the window
content = ContentPage(selenium, webview_base_url, id=page_id).open()
window_width = content.get_window_size("width")
# THEN check if the books list exists and on the left
assert content.books_containing.book_list
assert content.location["x"] < window_width / 2
@markers.webview
@markers.smoke
@markers.requires_complete_dataset
@markers.test_case("C195056")
@markers.nondestructive
@markers.parametrize("page_id", ["QlYg2VHd"])
@markers.parametrize("width,height", [(1024, 768), (630, 480)])
def test_button_open_with_certain_window_size(webview_base_url, selenium, page_id, width, height):
# GIVEN the webview base url, page_id, and the Selenium driver
# WHEN we visit that page of the chapter and we have a list of books containing the page
content = ContentPage(selenium, webview_base_url, id=page_id).open()
# THEN if window width >= 640, button should be open
if width >= 640:
assert content.books_containing.overview_is_displayed
# AND if window width < 640, button should be closed
else:
assert not content.books_containing.overview_is_displayed
@markers.webview
@markers.test_case("C195060")
@markers.nondestructive
@markers.parametrize("id", ["4fGVMb7P@1"])
@markers.parametrize("highlight_color", ["#78b04a"])
def test_book_title_link_and_highlight_on_view(webview_base_url, id, selenium, highlight_color):
# GIVEN the webview base url, a chapter page id, the color and the Selenium driver
# WHEN we visit that page of the chapter
content = ContentPage(selenium, webview_base_url, id=id).open()
content_page_title = content.title
# AND click the title
content.books_containing.book_list[0].click_title_link()
# AND get and click the Contents button
content.header_nav.click_contents_button()
# AND find the on viewing title and get the color
active_color = content.table_of_contents.active_page_color
# THEN make sure the section matches the original page title and the highlight color is correct
assert content_page_title == content.section_title_without_chapter_section
assert active_color == highlight_color
| tests/webview/ui/test_content.py | 36,680 | This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. FIXME Requires varnish (False, '/content/col23946', 301), FIXME Requires varnish (False, '/content/col23946/1.1', 301), GIVEN some URL and the expected redirect code WHEN we visit the URL NOTE: Don't bother trying to get status codes using Selenium https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/141 THEN we get the expected redirect code GIVEN a book's content page WHEN the book's canonical url is visited THEN we end up in the same page NOTE: we check the section title instead of the url because the canonical link seems to take us to the latest version of the content, no matter which version we started on NOTE: Newer versions of the book may not have the section number. For this we check in the section_title string instead of an equality. GIVEN the home page WHEN a book is clicked THEN the site navbar and content nav are displayed Section title is on top of main content section (white area) Section title inside main content section Section title on top of main content section GIVEN the home page and a book WHEN the book's cover is clicked THEN the displayed author is OpenStax GIVEN a book's content page WHEN the contents button is clicked THEN the table of contents is displayed GIVEN a book's table of contents WHEN a chapter is expanded and we navigate to one of its pages THEN we end up at the correct page GIVEN the home page WHEN a book is clicked THEN social share links are displayed in the top right corner Top half Right half GIVEN a book's content page and a query WHEN we search the book for the given query THEN search results are present (or not) and bolded and link to the matching content GIVEN the home page WHEN a book is clicked THEN social share links have the expected urls GIVEN the content page WHEN the newer version link is clicked THEN we end up in a newer version of the same page GIVEN a book's content page WHEN we click the "Get This Book!" button THEN links to download the pdf, epub and offline zip versions are displayed Look at the footer to see which downloads should have been available Check the footer GIVEN the webview base url, the Selenium driver and the id of a page whose title has unicode WHEN we load the content page Ensure it has a figure element THEN the page does not reload afterwards Wait 10 seconds to see if the page reloads If we don't get a StaleElementReferenceException then the page didn't reload This is expected to fail as we ran out non-redirecting collections with figures on the main page GIVEN a book's content page with figures WHEN we scroll to a figure THEN some figure is displayed GIVEN a book's content page WHEN we scroll to the bottom THEN - the header nav is offscreen but still considered displayed - the content nav is displayed on top without the site navbar or any social links The footer is displayed at the bottom Hard to check that the content_header is on top after scrolling, but we can check that it at least has the pinned class and is above the footer GIVEN a book's content page WHEN we scroll to the bottom THEN - the header nav is offscreen but still considered displayed - the content nav is offscreen without the site navbar or any social links WHEN we scroll up THEN - the header nav is offscreen but still considered displayed - the content nav is now pinned and onscreen without the site navbar or any social links WHEN we scroll down again THEN - the header nav is offscreen but still considered displayed - the content nav is now closed and offscreen without the site navbar or any social links GIVEN a book's content page WHEN we click the attribution tab THEN the attribution is displayed and has the correct support email GIVEN a book's scrolled content page WHEN we scroll to the bottom then click the back to top link THEN the content page is no longer scrolled The footer is offscreen, but still considered displayed The header is no longer pinned GIVEN a book's content page and a sim_ratio Introduction should be the first section loaded Preface is skipped by default WHEN we navigate next twice and then back twice using the header and footer controls THEN we arrive back at the initial page GIVEN the webview base url, an American Government content page UUID, and the Selenium driver WHEN the page is fully loaded using the URL THEN :NOT_CONVERTED_YET is not displayed GIVEN an index page in a baked book or a page with anchor links in an unbaked book WHEN we click on a term (baked index) or an anchor link THEN we end up at the linked page and the element with the same id as the link is displayed WHEN we click the browser's back button THEN we end up at the previous page GIVEN the webview base url, a chapter review id, and the Selenium driver WHEN we visit the chapter review page THEN the chapter review version matches the book version GIVEN the webview base url, a chapter review id, and the Selenium driver WHEN we click the link to the first book THEN we are on the About this Book page and it is displayed GIVEN the webview base url, a chapter review id, and the Selenium driver WHEN the content_page is fully loaded and we have a list of books containing the page THEN all the Books should contain revision date GIVEN the webview base url, page_id, and the Selenium driver WHEN we visit that page of the chapter and we have a list of books containing the page THEN the title of the books are not truncated by ellipses GIVEN the webview base url, page_id, and the Selenium driver WHEN we visit the content page AND we have a books containing count AND we have the overview message THEN ensure the proper books containing overview message is displayed GIVEN the webview base url, page_id, and the Selenium driver WHEN we visit that page of the chapter and we have a list of books containing page THEN the authors of the book should be displayed GIVEN the webview base url, page_id, and the Selenium driver WHEN we visit that page of the chapter and we have a list of books containing page AND store the main author AND Save list of authors and dates THEN main author should be the author of the first book listed AND if there are more books with main author, they should be listed first AND for the rest of the books, the revision dates are sorted in decreasing order GIVEN the webview base url, page_id, and the Selenium driver WHEN we visit a single content page (not a book) THEN the button that opens and closes the "ToC" is labelled "Books" instead of "Contents" AND the button opens and closes the "This page is in books" side nav The side nav area should be open by default GIVEN the webview base url, page_id, and the Selenium driver WHEN we load the page of the chapter and we have the width of the window THEN check if the books list exists and on the left GIVEN the webview base url, page_id, and the Selenium driver WHEN we visit that page of the chapter and we have a list of books containing the page THEN if window width >= 640, button should be open AND if window width < 640, button should be closed GIVEN the webview base url, a chapter page id, the color and the Selenium driver WHEN we visit that page of the chapter AND click the title AND get and click the Contents button AND find the on viewing title and get the color THEN make sure the section matches the original page title and the highlight color is correct | 7,522 | en | 0.843556 |
#
# Copyright 2019 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=missing-docstring
import os
import drgn
import sdb
def enum_lookup(enum_type_name: str, value: int) -> str:
"""return a string which is the short name of the enum value
(truncating off the common prefix) """
fields = sdb.get_type(enum_type_name).type.enumerators
enum_string: str = fields[value].name
prefix = os.path.commonprefix([f[0] for f in fields])
return enum_string[prefix.rfind("_") + 1:]
def nicenum(num: int, suffix: str = "B") -> str:
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if num < 1024:
return "{}{}{}".format(int(num), unit, suffix)
num = int(num / 1024)
return "{}{}{}".format(int(num), "Y", suffix)
def P2PHASE(x: drgn.Object, align: int) -> int:
return int(x & (align - 1))
def BF64_DECODE(x: drgn.Object, low: int, length: int) -> int:
return int(P2PHASE(x >> low, 1 << length))
def BF64_GET(x: drgn.Object, low: int, length: int) -> int:
return BF64_DECODE(x, low, length)
def WEIGHT_IS_SPACEBASED(weight: int) -> bool:
return weight == 0 or (BF64_GET(weight, 60, 1) != 0)
def WEIGHT_GET_INDEX(weight: int) -> int:
return BF64_GET((weight), 54, 6)
def WEIGHT_GET_COUNT(weight: int) -> int:
return BF64_GET((weight), 0, 54)
METASLAB_WEIGHT_PRIMARY = int(1 << 63)
METASLAB_WEIGHT_SECONDARY = int(1 << 62)
METASLAB_WEIGHT_CLAIM = int(1 << 61)
METASLAB_WEIGHT_TYPE = int(1 << 60)
METASLAB_ACTIVE_MASK = (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY |
METASLAB_WEIGHT_CLAIM)
BTREE_LEAF_SIZE = 4096
| sdb/commands/zfs/internal/__init__.py | 2,156 | return a string which is the short name of the enum value
(truncating off the common prefix)
Copyright 2019 Delphix Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=missing-docstring | 674 | en | 0.84273 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
class _GetAttrMeta(type):
# https://stackoverflow.com/questions/33727217/subscriptable-objects-in-class
def __getitem__(cls, x):
return getattr(cls, x)
def __iter__(cls):
"""Getting subclasses which usually represent resolutions"""
for attr in vars(cls):
if not attr.startswith("_"):
yield cls[attr]
class DatasetTreeCore(metaclass=_GetAttrMeta):
pass
| wetterdienst/util/parameter.py | 568 | Getting subclasses which usually represent resolutions
-*- coding: utf-8 -*- Copyright (c) 2018-2021, earthobservations developers. Distributed under the MIT License. See LICENSE for more info. https://stackoverflow.com/questions/33727217/subscriptable-objects-in-class | 271 | en | 0.755687 |
import os
import sys
import numpy as np
import random
import math
from PIL import Image, ImageOps, ImageFilter
import torch
import torch.utils.data as data
import torchvision.transforms as transform
from .base import BaseDataset
class NYUv2Segmentation(BaseDataset):
BASE_DIR = 'nyuv2'
NUM_CLASS = 40
def __init__(self, root=os.path.expanduser('~/.cvss/data'), split='train',
mode=None, transform=None, target_transform=None, **kwargs):
super(NYUv2Segmentation, self).__init__(
root, split, mode, transform, target_transform, **kwargs)
# assert exists and prepare dataset automatically
root = os.path.join(root, self.BASE_DIR)
assert os.path.exists(root), "Please setup the dataset using" + \
"cvss/scripts/prepare_nyuv2.py"
self.images, self.masks = _get_nyuv2_pairs(root, split)
if split != 'test':
assert (len(self.images) == len(self.masks))
if len(self.images) == 0:
raise(RuntimeError("Found 0 images in subfolders of: \
" + root + "\n"))
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
if self.mode == 'test':
if self.transform is not None:
img = self.transform(img)
return img, os.path.basename(self.images[index])
mask = Image.open(self.masks[index])
# synchrosized transform
if self.mode == 'train':
img, mask = self._sync_transform(img, mask)
elif self.mode == 'val':
img, mask = self._val_sync_transform(img, mask)
else:
assert self.mode == 'testval'
mask = self._mask_transform(mask)
# general resize, normalize and toTensor
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
mask = self.target_transform(mask)
return img, mask
def _sync_transform(self, img, mask):
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
w, h = img.size
min_side = min(w, h)
scale = np.random.uniform(0.5, 2.0)
if min_side * scale < 350:
scale = 350 * 1.0 / min_side
long_size = int(self.base_size*scale)
if h > w:
oh = long_size
ow = int(1.0 * w * long_size / h + 0.5)
short_size = ow
else:
ow = long_size
oh = int(1.0 * h * long_size / w + 0.5)
short_size = oh
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
# final transform
return img, self._mask_transform(mask)
def _val_sync_transform(self, img, mask):
# final transform
return img, self._mask_transform(mask)
def _mask_transform(self, mask):
target = np.array(mask).astype('int64') - 1
return torch.from_numpy(target)
def __len__(self):
return len(self.images)
@property
def pred_offset(self):
return 1
def _get_nyuv2_pairs(folder, split='train'):
def get_path_pairs(folder, split_file):
img_paths = []
mask_paths = []
with open(os.path.join(folder, split_file), 'r') as f:
for filename in f.readlines():
filename = filename.strip()
imgpath = os.path.join(folder, 'image', filename)
maskpath = os.path.join(folder, 'mask', filename)
if os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask:', maskpath)
return img_paths, mask_paths
img_paths, mask_paths = get_path_pairs(folder, split_file=split+'.txt')
return img_paths, mask_paths
| cvss/datasets/nyuv2.py | 4,776 | assert exists and prepare dataset automatically synchrosized transform general resize, normalize and toTensor random mirror random scale (short edge) pad crop random crop crop_size final transform final transform | 212 | en | 0.660784 |
# This file was scaffold by idol_mar, but it will not be overwritten, so feel free to edit.
# This file will be regenerated if you delete it.
from ...codegen.all.target.optional_method import (
AllTargetOptionalMethodSchema as OptionalMethodSchemaCodegen,
)
class OptionalMethodSchema(OptionalMethodSchemaCodegen):
pass
| test/src/lib/idol/py_mar/all/target/optional_method.py | 330 | This file was scaffold by idol_mar, but it will not be overwritten, so feel free to edit. This file will be regenerated if you delete it. | 137 | en | 0.978566 |
# -*- coding: utf-8 -*-
import os
import sqlite3
import logging
logger = logging.getLogger("xtc")
class sqlite_handle(object):
def __init__(self):
self.dbname = "Xsense.db"
self.conn = None
def db_init(self): # åå§ådb task_infoãappsãscriptsãrun_tasks
self.db_table_all()
conn = sqlite3.connect(self.dbname)
try:
for cre in self.create_dic:
conn.execute(cre)
# logger.info(cre)
except Exception as e:
logger.info("Create table failed: {}".format(e))
return False
finally:
conn.close()
def insert_task(self,taskdict): # æå
¥ä»»å¡ä¿¡æ¯ for
conn = sqlite3.connect(self.dbname)
for task in taskdict:
conn.execute(
'INSERT INTO task_Info VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)',task
)
conn.commit()
conn.close()
def insert_script_one(self,scriptOne): # æå
¥èæ¬ä¿¡æ¯
conn = sqlite3.connect(self.dbname)
conn.execute(
'INSERT INTO scripts VALUES (?,?,?,?,?,?,?,?)',scriptOne
)
conn.commit()
conn.close()
def insert_task_many(self,script_data): # æå
¥ä»»å¡ä¿¡æ¯ å€é¡¹
conn = sqlite3.connect(self.dbname)
conn.executemany(
'INSERT INTO scripts VALUES (?,?,?,?,?,?,?,?)',script_data
)
conn.commit()
conn.close()
def db_table_all(self):
crt_task_info = '''CREATE TABLE IF NOT EXISTS task_info (
taskId INT, testTaskName TEXT, optType int,scriptId INT,scriptUrl TEXT,
startDate int, endDate int, exeBeginTime TEXT, exeEndTime TEXT,
exeType int, interval int, iterationNum int, startIterationNumber int
);'''
crt_scripts = '''CREATE TABLE IF NOT EXISTS scripts (
scriptId INT, scriptName TEXT, scriptType int,scriptUrl TEXT,
uploadDate int, scriptMaxRunTime int, scriptVersion int,
scriptCacheUrl TEXT
);'''
crt_apps = '''CREATE TABLE IF NOT EXISTS apps (
scriptId INT, appCheck int, appPackageName TEXT, appUrl TEXT, appMd5 TEXT,
appVersion TEXT, appVersionCode TEXT, appLastUpdateTime TEXT, appCacheUrl TEXT
);'''
run_tasks = '''CREATE TABLE IF NOT EXISTS run_tasks (
taskId INT, testTaskName TEXT, optType int,scriptId INT,scriptUrl TEXT,
startDate int, endDate int, exeBeginTime TEXT, exeEndTime TEXT,
exeType int, interval int, iterationNum int, startIterationNumber int
);'''
create_dic = []
create_dic.append(crt_task_info)
create_dic.append(crt_scripts)
create_dic.append(crt_apps)
create_dic.append(run_tasks) # ä¿åéèŠè¿è¡çä»»å¡ æå¿
èŠä¹
self.create_dic = create_dic
def query_runtask(self):
conn = sqlite3.connect(self.dbname)
taskrows = [] #å
çŽ äžºtupleïŒ(205937, 'pyclient-test', 1, 107864, 'http://202.105.193....69910.zip', 20191006000000, 20201231235959, '000000', '235959', 2, 1, 1, 1)
# è·åæªå®æçææ¬¡ä»»å¡ äžå«éå€é¡¹ æ°å¢+å¯åš, exeType=2ææ¬¡æ§è¡ exeType=1ææ¶æ§è¡
# optType 1`=æ°å¢ä»»å¡ïŒ`2`=æåä»»å¡ïŒ`3`=å¯åšä»»å¡ïŒ`4`=å é€ä»»å¡
for row in conn.execute('SELECT DISTINCT * FROM task_info WHERE optType=3 OR optType=1 AND exeType=2 AND startIterationNumber<=iterationNum'):
taskrows.append(row)
conn.close()
return taskrows
def dele_table(self):
pass
def query(self, sql, sqlstring=False):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
# cursor = self.conn.cursor()
if sqlstring:
cursor.executemany(sql, sqlstring)
else:
cursor.execute(sql)
data = cursor.fetchall()
cursor.close()
return data
def update(self, sql, sqlstring=False):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
# cursor = self.conn.cursor()
if sqlstring:
cursor.executemany(sql, sqlstring)
else:
cursor.execute(sql)
conn.commit()
cursor.close()
def _update(self, sql, value=None, querymany=True):
ret = True
try:
if querymany:
self.update(sql, value)
else:
self.update(sql)
#except SqliteException:
except Exception as e:
logger.info("error('æ§è¡sqlite: {} æ¶åºéïŒ{}')".format(sql, e))
ret = False
return ret
def del_task_byid(self, taskid):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
sql = 'DELETE FROM task_info WHERE taskid={}'.format(taskid)
cursor.execute(sql)
logger.info("åªé€taskid={} cursor.rowcount={}".format(taskid, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def update_task_run_status(self, taskid, status):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
cursor.execute("UPDATE task_info SET optType={} WHERE taskid={}".format(status, taskid))
logger.info("æŽæ°taskid={}ïŒè®Ÿçœ®optType={}ïŒcursor.rowcount={}".format(taskid, status, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def update_task_run_count(self, taskid, run_count):
conn = sqlite3.connect(self.dbname)
cursor = conn.cursor()
cursor.execute("UPDATE task_info SET startIterationNumber={} WHERE taskid={}".format(run_count, taskid))
logger.info("æŽæ°taskid={}ïŒstartIterationNumber={}ïŒcursor.rowcount={}".format(taskid, run_count, str(cursor.rowcount)))
conn.commit()
cursor.close()
conn.close()
def updata_table(self):
pass
if __name__ == "__main__":
handle = sqlite_handle()
if not os.path.isfile(handle.dbname):
handle.db_init()
#taskrows = handle.query_runtask()
#print("taskrows=" + str(taskrows))
#handle.del_task_byid("1235")
handle.update_task_run_count("206266", 60)
#handle.update_task_run_status("206266", "5")
# æŽæ°/å é€ åæ¡ä»»å¡ãæŽæ° èæ¬ä¿¡æ¯
# äžèœœåæ¥è¯¢æ°æ®åºïŒåŠæèæ¬idå·²ç»ååšïŒäžæŽæ°æ¶éŽäžèŽ åäžäžèœœïŒåŠåäžèœœ-->å
¥åº
# ä»»å¡è¿è¡ïŒå
æ£æ¥æ¯åŠææ°ä»»å¡ïŒåŠæææ°ä»»å¡ïŒåå
¥åºïŒ
# æ²¡ææ°ä»»å¡ïŒåæ¥è¯¢æ°æ®åºïŒä»»å¡idè¿è¡ä¿¡æ¯æ¯åŠèŸŸå°rmæ¡ä»¶ïŒè¿æã宿çïŒ
# åŠæè¿è¡ èœ®æ¬¡èŸŸå° æ»èœ®æ¬¡ ådel
# åŠæ ç»ææ¶éŽè¶
è¿åœåæ¶éŽ ådel
# æ€å€éèŠå¢å id æåº ååè¿è¡
# è¿è¡å®æåïŒæŽæ° id对åºç蜮次信æ¯
# ä»å€©æå® èæ¬è¿è¡åç»ææä»¶ ïŒç¶åådb update å remove | client-autosense/sense/sqlite_syn.py | 7,075 | -*- coding: utf-8 -*- åå§ådb task_infoãappsãscriptsãrun_tasks logger.info(cre) æå
¥ä»»å¡ä¿¡æ¯ for æå
¥èæ¬ä¿¡æ¯ æå
¥ä»»å¡ä¿¡æ¯ å€é¡¹ ä¿åéèŠè¿è¡çä»»å¡ æå¿
èŠä¹å
çŽ äžºtupleïŒ(205937, 'pyclient-test', 1, 107864, 'http://202.105.193....69910.zip', 20191006000000, 20201231235959, '000000', '235959', 2, 1, 1, 1) è·åæªå®æçææ¬¡ä»»å¡ äžå«éå€é¡¹ æ°å¢+å¯åš, exeType=2ææ¬¡æ§è¡ exeType=1ææ¶æ§è¡ optType 1`=æ°å¢ä»»å¡ïŒ`2`=æåä»»å¡ïŒ`3`=å¯åšä»»å¡ïŒ`4`=å é€ä»»å¡ cursor = self.conn.cursor() cursor = self.conn.cursor()except SqliteException:taskrows = handle.query_runtask()print("taskrows=" + str(taskrows))handle.del_task_byid("1235")handle.update_task_run_status("206266", "5") æŽæ°/å é€ åæ¡ä»»å¡ãæŽæ° èæ¬ä¿¡æ¯ äžèœœåæ¥è¯¢æ°æ®åºïŒåŠæèæ¬idå·²ç»ååšïŒäžæŽæ°æ¶éŽäžèŽ åäžäžèœœïŒåŠåäžèœœ-->å
¥åº ä»»å¡è¿è¡ïŒå
æ£æ¥æ¯åŠææ°ä»»å¡ïŒåŠæææ°ä»»å¡ïŒåå
¥åºïŒ æ²¡ææ°ä»»å¡ïŒåæ¥è¯¢æ°æ®åºïŒä»»å¡idè¿è¡ä¿¡æ¯æ¯åŠèŸŸå°rmæ¡ä»¶ïŒè¿æã宿çïŒ åŠæè¿è¡ èœ®æ¬¡èŸŸå° æ»èœ®æ¬¡ ådel åŠæ ç»ææ¶éŽè¶
è¿åœåæ¶éŽ ådel æ€å€éèŠå¢å id æåº ååè¿è¡ è¿è¡å®æåïŒæŽæ° id对åºçèœ®æ¬¡ä¿¡æ¯ ä»å€©æå® èæ¬è¿è¡åç»ææä»¶ ïŒç¶åådb update å remove | 851 | zh | 0.852034 |
def notas(*n, sit=False):
"""
Função para analisar notas e situação de varios alunos.
:param n: Uma ou mais notas dos alunos (aceita varias)
:param sit: Valor opcional, indicando se deve ou não adicionar a situação.
:return: Dicionario com varias informações sobre a situação da turma.
"""
dic = dict()
dic["total"] = len(n)
dic["maior"] = max(n)
dic["menor"] = min(n)
dic["media"] = sum(n) / len(n)
if sit:
if media < 5:
dic["situação"] = "Critica"
elif media < 7:
dic["situação"] = "Rasoavel"
else:
dic["situação"] = "Boa"
return dic
resp = notas(5, 4, 3, sit=True)
print(resp)
| Modulo-03/ex105/ex105.py | 713 | Função para analisar notas e situação de varios alunos.
:param n: Uma ou mais notas dos alunos (aceita varias)
:param sit: Valor opcional, indicando se deve ou não adicionar a situação.
:return: Dicionario com varias informações sobre a situação da turma. | 255 | pt | 0.996466 |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
| test/aqua/operators/__init__.py | 508 | -*- coding: utf-8 -*- This code is part of Qiskit. (C) Copyright IBM 2018, 2019. This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license in the LICENSE.txt file in the root directory of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. Any modifications or derivative works of this code must retain this copyright notice, and modified files need to carry a notice indicating that they have been altered from the originals. | 482 | en | 0.898065 |
import os
import csv
import shutil
from datetime import datetime
from numpy import logspace
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from torch.optim import Adam
from dataset.e_piano import create_epiano_datasets, create_pop909_datasets
from model.music_transformer import MusicTransformer
from model.discriminator import MusicDiscriminator
from model.classifier import CNNDiscriminator
from model.loss import SmoothCrossEntropyLoss
from utilities.constants import *
from utilities.WGAN_GP import WassersteinLoss
from utilities.device import get_device, use_cuda
from utilities.lr_scheduling import LrStepTracker, get_lr
from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params
from utilities.run_model import train_epoch, eval_model
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"]
dis_filter_sizes = [2, 3, 4, 5]
dis_num_filters = [300, 300, 300, 300]
# Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy
BASELINE_EPOCH = -1
# main
def main():
"""
----------
Author: Damon Gwinn
----------
Entry point. Trains a model specified by command line arguments
----------
"""
args = parse_train_args()
print_train_args(args)
if(args.force_cpu):
use_cuda(False)
print("WARNING: Forced CPU usage, expect model to perform slower")
print("")
eventid = f"{datetime.now().strftime('MusicTransformer-%Y.%m.%d')}_gan_{args.gan}_creative_{args.creative}_ce_{args.ce_smoothing}"
args.output_dir = args.output_dir + "/" + eventid
os.makedirs(args.output_dir, exist_ok=True)
##### Output prep #####
params_file = os.path.join(args.output_dir, "model_params.txt")
write_model_params(args, params_file)
weights_folder = os.path.join(args.output_dir, "weights")
os.makedirs(weights_folder, exist_ok=True)
results_folder = os.path.join(args.output_dir, "results")
os.makedirs(results_folder, exist_ok=True)
results_file = os.path.join(results_folder, "results.csv")
best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle")
best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle")
best_loss_critic_file = os.path.join(results_folder, "best_loss_critic_weights.pickle")
best_acc_critic_file = os.path.join(results_folder, "best_acc_critic_weights.pickle")
best_loss_classifier_file = os.path.join(
results_folder, "best_loss_classifier_weights.pickle")
best_acc_classifier_file = os.path.join(
results_folder, "best_acc_classifier_weights.pickle")
best_text = os.path.join(results_folder, "best_epochs.txt")
##### Tensorboard #####
if(args.no_tensorboard):
tensorboard_summary = None
else:
from torch.utils.tensorboard import SummaryWriter
tensorboad_dir = os.path.join(args.output_dir, "tensorboard/" + eventid)
tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir)
##### Datasets #####
# ë°ìŽí°ì
ìŽ ë°ëêž° ë묞ì ìëìê°ìŽ íŽì£ŒìŽìŒíš
if args.interval and args.octave:
print("octave interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_interval_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding and args.absolute:
print("absolute dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_absolute_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/pop909_absolute', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.interval and not args.octave:
print("interval dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.octave and args.fusion_encoding:
print("Octave_fusion dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_fusion_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop909', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave, fusion = args.fusion_encoding)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif not args.interval and args.octave and not args.fusion_encoding:
print("Octave dataset!!")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/octave_e_piano', args.max_sequence,
condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop909_dataset = create_pop909_datasets('./dataset/pop909_octave', args.max_sequence, condition_token=args.condition_token, interval = args.interval, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
elif args.logscale:
print("logscvale dataset")
classic_train, classic_val, classic_test = create_epiano_datasets('./dataset/logscale_epiano0420', args.max_sequence, random_seq=True,
condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop909_dataset = create_pop909_datasets('./dataset/logscale_pop0420', args.max_sequence, random_seq=True, condition_token=args.condition_token, interval = args.interval, octave = args.octave, logscale=args.logscale, absolute = args.absolute)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1),
len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
else:
classic_train, classic_val, classic_test = create_epiano_datasets(args.classic_input_dir, args.max_sequence,
condition_token = args.condition_token, octave = args.octave)
pop909_dataset = create_pop909_datasets('dataset/pop_pickle/', args.max_sequence, condition_token = args.condition_token, octave = args.octave)
pop_train, pop_valid, pop_test = torch.utils.data.random_split(pop909_dataset,
[int(len(pop909_dataset) * 0.8), int(len(pop909_dataset) * 0.1), len(pop909_dataset) - int(len(pop909_dataset) * 0.8) - int(len(pop909_dataset) * 0.1)],
generator=torch.Generator().manual_seed(42))
if args.data == 'both':
print("Dataset: both")
train_dataset = torch.utils.data.ConcatDataset([ classic_train, pop_train])
val_dataset = torch.utils.data.ConcatDataset([ classic_val, pop_valid])
elif args.data == 'classic':
print("Dataset: classic")
train_dataset = torch.utils.data.ConcatDataset([classic_train])
val_dataset = torch.utils.data.ConcatDataset([classic_val])
else:
print("Dataset: pop")
train_dataset = torch.utils.data.ConcatDataset([pop_train])
val_dataset = torch.utils.data.ConcatDataset([pop_valid])
test_dataset = torch.utils.data.ConcatDataset([classic_test, pop_test])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers)
model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads,
d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr,
condition_token = args.condition_token, interval = args.interval, octave = args.octave,
fusion = args.fusion_encoding, absolute = args.absolute, logscale=args.logscale).to(get_device())
# EY critic
# num_prime = args.num_prime
critic = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
classifier = MusicDiscriminator(n_layers=args.n_layers // 2, num_heads=args.num_heads // 2,
d_model=args.d_model // 2, dim_feedforward=args.dim_feedforward // 2, dropout=args.dropout,
max_sequence=args.max_sequence, rpr=args.rpr).to(get_device())
if args.creative:
classifier.load_state_dict(torch.load('best_classifier_acc_0.9883.pickle'))
##### Continuing from previous training session #####
start_epoch = BASELINE_EPOCH
if(args.continue_weights is not None):
if(args.continue_epoch is None):
print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights")
return
else:
model.load_state_dict(torch.load(args.continue_weights))
start_epoch = args.continue_epoch
elif(args.continue_epoch is not None):
print("ERROR: Need continue weights (-continue_weights) when using continue_epoch")
return
##### Lr Scheduler vs static lr #####
if(args.lr is None):
if(args.continue_epoch is None):
init_step = 0
else:
init_step = args.continue_epoch * len(train_loader)
lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)
else:
lr = args.lr
##### Not smoothing evaluation loss #####
if args.interval and args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_INTERVAL)
elif args.interval and not args.octave:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_INTERVAL)
elif args.octave and args.fusion_encoding and args.absolute:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif args.octave and args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD_RELATIVE)
else:
eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD)
##### SmoothCrossEntropyLoss or CrossEntropyLoss for training #####
if(args.ce_smoothing is None):
train_loss_func = eval_loss_func
else:
if args.interval and args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif args.interval and not args.octave:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_INTERVAL, ignore_index=TOKEN_PAD_INTERVAL)
elif not args.interval and args.octave and args.fusion_encoding and args.absolute:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION_ABSOLUTE, ignore_index=TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE)
elif not args.interval and args.octave and args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE_FUSION, ignore_index=TOKEN_PAD_OCTAVE_FUSION)
elif not args.interval and args.octave and not args.fusion_encoding:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_OCTAVE, ignore_index=TOKEN_PAD_OCTAVE)
elif args.logscale:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE_RELATIVE, ignore_index=TOKEN_PAD_RELATIVE)
else:
train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD)
##### EY - WGAN Loss #####
classifier_loss_func = nn.MSELoss()
##### Optimizer #####
opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
critic_opt = Adam(critic.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
classifier_opt = Adam(classifier.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON)
if(args.lr is None):
lr_scheduler = LambdaLR(opt, lr_stepper.step)
critic_lr_scheduler = LambdaLR(critic_opt, lr_stepper.step)
classifier_lr_scheduler = LambdaLR(classifier_opt, lr_stepper.step)
else:
lr_scheduler = None
##### Tracking best evaluation accuracy #####
best_eval_acc = 0.0
best_eval_acc_epoch = -1
best_eval_loss = float("inf")
best_eval_loss_epoch = -1
##### Results reporting #####
if(not os.path.isfile(results_file)):
with open(results_file, "w", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow(CSV_HEADER)
##### TRAIN LOOP #####
for epoch in range(start_epoch, args.epochs):
# Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense)
if(epoch >= BASELINE_EPOCH):
print(SEPERATOR)
print("NEW EPOCH:", epoch+1)
print(SEPERATOR)
print("")
# Train
# EY ê³ ì³ìŒ í ë¶ë¶ì ìì
train_loss, train_acc, dis_loss, gen_loss, cre_loss, gan_accuracy, class_accuracy, creativity = train_epoch(epoch+1, model, critic, classifier, train_loader, train_loss_func, classifier_loss_func, opt, critic_opt, classifier_opt, lr_scheduler, critic_lr_scheduler, classifier_lr_scheduler, args)
print(SEPERATOR)
print("Evaluating:")
else:
print(SEPERATOR)
print("Baseline model evaluation (Epoch 0):")
# Eval
# train_loss, train_acc = eval_model(model, train_loader, train_loss_func)
eval_loss, eval_acc = eval_model(model, val_loader, eval_loss_func, args)
# Learn rate
lr = get_lr(opt)
print("Epoch:", epoch+1)
print("Avg train loss:", train_loss)
print("Avg train acc:", train_acc)
print("Avg eval loss:", eval_loss)
print("Avg eval acc:", eval_acc)
print(SEPERATOR)
print("")
new_best = False
if(eval_acc > best_eval_acc):
best_eval_acc = eval_acc
best_eval_acc_epoch = epoch+1
torch.save(model.state_dict(), best_acc_file)
torch.save(critic.state_dict(), best_acc_critic_file)
torch.save(classifier.state_dict(), best_acc_classifier_file)
new_best = True
if(eval_loss < best_eval_loss):
best_eval_loss = eval_loss
best_eval_loss_epoch = epoch+1
torch.save(model.state_dict(), best_loss_file)
torch.save(critic.state_dict(), best_loss_critic_file)
torch.save(classifier.state_dict(), best_loss_classifier_file)
new_best = True
# Writing out new bests
if(new_best):
with open(best_text, "w") as o_stream:
print("Best eval acc epoch:", best_eval_acc_epoch, file=o_stream)
print("Best eval acc:", best_eval_acc, file=o_stream)
print("")
print("Best eval loss epoch:", best_eval_loss_epoch, file=o_stream)
print("Best eval loss:", best_eval_loss, file=o_stream)
if(not args.no_tensorboard):
tensorboard_summary.add_scalar("Avg_CE_loss/train", train_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Avg_CE_loss/eval", eval_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/train", train_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Accuracy/eval", eval_acc, global_step=epoch+1)
tensorboard_summary.add_scalar("Learn_rate/train", lr, global_step=epoch+1)
tensorboard_summary.add_scalar("Critic_loss/train", dis_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Gen_loss/train", gen_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity_loss/train", cre_loss, global_step=epoch+1)
tensorboard_summary.add_scalar("GAN_accuracy/train", gan_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Class_accuracy/train", class_accuracy, global_step=epoch+1)
tensorboard_summary.add_scalar("Creativity/train", creativity, global_step=epoch+1)
tensorboard_summary.flush()
if((epoch+1) % args.weight_modulus == 0):
epoch_str = str(epoch+1).zfill(PREPEND_ZEROS_WIDTH)
path = os.path.join(weights_folder, "epoch_" + epoch_str + ".pickle")
torch.save(model.state_dict(), path)
with open(results_file, "a", newline="") as o_stream:
writer = csv.writer(o_stream)
writer.writerow([epoch+1, lr, train_loss, train_acc, eval_loss, eval_acc])
# Sanity check just to make sure everything is gone
if(not args.no_tensorboard):
tensorboard_summary.flush()
return
if __name__ == "__main__":
main()
| train.py | 21,588 | ----------
Author: Damon Gwinn
----------
Entry point. Trains a model specified by command line arguments
----------
Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy main Output prep Tensorboard Datasets ë°ìŽí°ì
ìŽ ë°ëêž° ë묞ì ìëìê°ìŽ íŽì£ŒìŽìŒíš EY critic num_prime = args.num_prime Continuing from previous training session Lr Scheduler vs static lr Not smoothing evaluation loss SmoothCrossEntropyLoss or CrossEntropyLoss for training EY - WGAN Loss Optimizer Tracking best evaluation accuracy Results reporting TRAIN LOOP Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense) Train EY ê³ ì³ìŒ í ë¶ë¶ì ìì Eval train_loss, train_acc = eval_model(model, train_loader, train_loss_func) Learn rate Writing out new bests Sanity check just to make sure everything is gone | 819 | en | 0.807554 |
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code accompanies this codelab: https://codelabs.developers.google.com/codelabs/pyspark-bigquery/.
# This is a script for backfilling a set of data from Reddit into Google Cloud Storage
# Python imports
import re
import time
import sys
# A Spark Session is how we interact with Spark SQL to create Dataframes
from pyspark.sql import SparkSession
# PySpark function for replacing characters using a regex. We'll use this to remove newline characters.
from pyspark.sql.functions import regexp_replace, col
# Library for interacting with Google Cloud Storage
from google.cloud import storage
# This will help catch some PySpark errors
from py4j.protocol import Py4JJavaError
# Create a SparkSession under the name "reddit". Viewable via the Spark UI
spark = SparkSession.builder.appName("reddit").getOrCreate()
# Establish a set of years and months to iterate over
year = sys.argv[1]
month = sys.argv[2]
bucket_name = sys.argv[3]
# Establish a subreddit to process
subreddit = 'food'
# Set Google Cloud Storage temp location
path = "tmp" + str(time.time())
# Keep track of all tables accessed via the job
tables_read = []
# In the form of <project-id>.<dataset>.<table>
table = f"fh-bigquery.reddit_posts.{year}_{month}"
# If the table doesn't exist we will simply continue and not
# log it into our "tables_read" list
try:
df = spark.read.format('bigquery').option('table', table).load()
except Py4JJavaError:
print(f"{table} does not exist. ")
sys.exit(0)
print(f"Processing {table}.")
# Select the "title", "selftext" and "created_utc" columns of the designated subreddit and
# replace newline characters with a single space
subreddit_timestamps = (
df
.select(
regexp_replace(col("title"), "\n", " "),
regexp_replace(col("selftext"), "\n", " "),
"created_utc"
)
.where(df.subreddit == subreddit)
)
tmp_output_path = "gs://" + bucket_name + "/" + path + "/" + year + "/" + month
# Write output to our temp GCS bucket. Spark jobs can be written out to multiple files
# and partitions. By using coalesce, we ensure the output is consolidated to a single file.
# We then use .options to tell Spark to write out in a gzip format, and .csv to do the write.
(
subreddit_timestamps
# Data can get written out to multiple files / partition.
# This ensures it will only write to 1.
.coalesce(1)
.write
# Gzip the output file
.options(codec="org.apache.hadoop.io.compress.GzipCodec")
# Write out to csv
.csv(tmp_output_path)
)
# Lastly, we'll move the temp file to a new bucket and delete the temp directory.
regex = "part-[0-9a-zA-Z\-]*.csv.gz"
new_path = "/".join(["reddit_posts", year, month, subreddit + ".csv.gz"])
# Create the storage client
storage_client = storage.Client()
# Create an object representing the original bucket
source_bucket = storage_client.get_bucket(bucket_name)
# Grab all files in the source bucket. Typically there is also a _SUCCESS file, inside of the
# directory, so we'll make sure to find our single csv file.
buckets = list(source_bucket.list_blobs(prefix=path))
for bucket in buckets:
name = bucket.name
# Locate the file that represents our partition. Copy to new location and
# delete temp directory.
if re.search(regex, name):
blob = source_bucket.blob(name)
source_bucket.copy_blob(blob, source_bucket, new_path)
blob.delete()
| codelabs/spark-bigquery/backfill.py | 4,054 | Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This code accompanies this codelab: https://codelabs.developers.google.com/codelabs/pyspark-bigquery/. This is a script for backfilling a set of data from Reddit into Google Cloud Storage Python imports A Spark Session is how we interact with Spark SQL to create Dataframes PySpark function for replacing characters using a regex. We'll use this to remove newline characters. Library for interacting with Google Cloud Storage This will help catch some PySpark errors Create a SparkSession under the name "reddit". Viewable via the Spark UI Establish a set of years and months to iterate over Establish a subreddit to process Set Google Cloud Storage temp location Keep track of all tables accessed via the job In the form of <project-id>.<dataset>.<table> If the table doesn't exist we will simply continue and not log it into our "tables_read" list Select the "title", "selftext" and "created_utc" columns of the designated subreddit and replace newline characters with a single space Write output to our temp GCS bucket. Spark jobs can be written out to multiple files and partitions. By using coalesce, we ensure the output is consolidated to a single file. We then use .options to tell Spark to write out in a gzip format, and .csv to do the write. Data can get written out to multiple files / partition. This ensures it will only write to 1. Gzip the output file Write out to csv Lastly, we'll move the temp file to a new bucket and delete the temp directory. Create the storage client Create an object representing the original bucket Grab all files in the source bucket. Typically there is also a _SUCCESS file, inside of the directory, so we'll make sure to find our single csv file. Locate the file that represents our partition. Copy to new location and delete temp directory. | 2,349 | en | 0.85888 |
import random
import unittest
from domain_tree.tree import DomainTree, DomainNode, NodeNotFoundException
from domain_tree.domain import RealDomain, RealInterval
class TestDomainTree(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
pass
@classmethod
def tearDownClass(cls) -> None:
pass
def setUp(self):
# self.d0 = {"x0": (0, 1)}
self.d0 = RealDomain({"x0": RealInterval((0, 1), (True, False))})
def tearDown(self) -> None:
pass
def test_npartition(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
self.assertEqual(len(tree.leaves), 2)
tree = DomainTree(domains=self.d0, min_split=0.3)
self.assertIn(len(tree.leaves), [2, 3])
tree = DomainTree(domains=self.d0, min_split=0.2)
self.assertIn(len(tree.leaves), [3, 4, 5])
tree = DomainTree(domains=self.d0, min_split=0, depth_max=5)
n = (2 ** 5) / 2
self.assertEqual(len(tree.leaves), n)
def test_stress_functions(self):
for _ in range(10000):
tree = DomainTree(domains=self.d0, min_split=0, depth_max=5)
tree = DomainTree(domains=self.d0, min_split=0, depth_max=10)
for _ in range(10000):
tree.compute_f({"x0": random.random()})
with self.assertRaises(NodeNotFoundException):
for _ in range(10000):
tree.compute_f({"x0": random.random() + 1})
def test_contains(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
x = {"x0": 0}
self.assertTrue(tree.contains(x))
x = {"x0": 1}
self.assertFalse(tree.contains(x))
x = {"x0": 0.5}
self.assertTrue(tree.contains(x))
#d = {"x0": (0, 1), "x1": (2, 3)}
d = RealDomain({"x0": RealInterval((0, 1), (True, False)), "x1": RealInterval((2, 3), (True, False))})
tree = DomainTree(domains=d, min_split=0.5)
x = {"x0": 0, "x1": 2}
self.assertTrue(tree.contains(x))
x = {"x0": 1, "x1": 2}
self.assertFalse(tree.contains(x))
x = {"x0": 0.5, "x1": 2.99}
self.assertTrue(tree.contains(x))
d = RealDomain({"x0": RealInterval((0, 1), (True, True)), "x1": RealInterval((2, 3), (False, False))})
tree = DomainTree(domains=d, min_split=0.5)
#tree.print_tree()
x = {"x0": 0, "x1": 2}
self.assertFalse(tree.contains(x))
x = {"x0": 0, "x1": 2.5}
self.assertTrue(tree.contains(x))
def test_compute_f(self):
tree = DomainTree(domains=self.d0, min_split=0.5)
with self.assertRaises(NodeNotFoundException):
tree.node_which_contains({"x0": -12})
x = {"x0": 0}
node = tree.node_which_contains(x)
self.assertIsNotNone(node.regression)
b = node.regression.coef_[0]
c = node.regression.intercept_
self.assertEqual(node.regression.predict([list(x.values())]), b * x[list(x.keys())[0]] + c)
self.assertEqual(tree.compute_f(x), node.regression.predict([list(x.values())]))
class TestDomainNode(unittest.TestCase):
def setUp(self):
self.val = 10
d = RealDomain({"x0": RealInterval((0, 1), (True, False)), "x1": RealInterval((2, 3), (True, False))})
self.node = DomainNode(name="nome", domains=d, val=self.val)
def tearDown(self) -> None:
pass
def test_generate_regression(self):
self.node.generate_regression()
self.assertIsNotNone(self.node.regression)
self.assertIsNotNone(self.node.regression.coef_)
self.assertIsNotNone(self.node.regression.intercept_)
def test_contains(self):
self.assertTrue(self.node.contains({"x0": 0, "x1": 2}))
self.assertTrue(self.node.contains({"x0": 0.5, "x1": 2.5}))
self.assertFalse(self.node.contains({"x0": 1, "x1": 2}))
self.assertFalse(self.node.contains({"x0": 1, "x1": 3}))
self.assertFalse(self.node.contains({"x0": 0.2, "x1": 3}))
def test_kill(self):
self.node.dostuff(random=0.5)
self.assertIn(self.node.val, [self.val - 2, self.val - 1])
self.node.kill()
self.assertEqual(self.node.val, 0)
if __name__ == "__main__":
unittest.main()
| test/test_tree.py | 4,387 | self.d0 = {"x0": (0, 1)}d = {"x0": (0, 1), "x1": (2, 3)}tree.print_tree() | 73 | en | 0.332616 |
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
import octavia.common.config as config
import octavia.tests.unit.base as base
class TestConfig(base.TestCase):
def test_sanity(self):
config.init([])
config.setup_logging(cfg.CONF)
# Resetting because this will cause inconsistent errors when run with
# other tests
self.addCleanup(cfg.CONF.reset)
def test_validate_server_certs_key_passphrase(self):
conf = self.useFixture(oslo_fixture.Config(config.cfg.CONF))
conf.config(
group="certificates",
server_certs_key_passphrase="insecure-key-do-not-use-this-key"
)
# Test too short
self.assertRaises(ValueError, conf.config,
group="certificates",
server_certs_key_passphrase="short_passphrase")
# Test too long
self.assertRaises(
ValueError, conf.config, group="certificates",
server_certs_key_passphrase="long-insecure-key-do-not-use-this")
# Test invalid characters
self.assertRaises(
ValueError, conf.config, group="certificates",
server_certs_key_passphrase="insecure-key-do-not-u$e-this-key")
| octavia/tests/unit/common/test_config.py | 1,946 | Copyright 2014, Doug Wiegley, A10 Networks. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Resetting because this will cause inconsistent errors when run with other tests Test too short Test too long Test invalid characters | 728 | en | 0.843139 |
import getpass, platform, sys, threading
from .. util import log
from . control import ExtractedControl
# See https://stackoverflow.com/questions/42603000
DARWIN_ROOT_WARNING = """
In MacOS, pynput must to be running as root in order to get keystrokes.
Try running your program like this:
sudo %s <your commands here>
"""
INSTALL_ERROR = """
Please install the pynput library with
$ pip install pynput
"""
try:
import pynput
except ImportError:
pynput = Listener = None
else:
class Listener(pynput.keyboard.Listener):
def join(self, timeout=None):
# join() on pynput.keyboard.Listener waits on a queue...
self._queue.put(None)
return super().join(timeout)
def keyname(key):
return getattr(key, 'name', None) or getattr(key, 'char')
class Keyboard(ExtractedControl):
EXTRACTOR = {
'keys_by_type': {
'press': ['type', 'key'],
'release': ['type', 'key'],
},
'normalizers': {
'key': keyname,
},
}
def _press(self, key):
self.receive({'type': 'press', 'key': key})
def _release(self, key):
self.receive({'type': 'release', 'key': key})
def _make_thread(self):
if not pynput:
raise ValueError(INSTALL_ERROR)
if platform.platform().startswith('Darwin'):
if getpass.getuser() != 'root':
log.warning(DARWIN_ROOT_WARNING, sys.argv[0])
log.info('Starting to listen for keyboard input')
return Listener(self._press, self._release)
| bibliopixel/control/keyboard.py | 1,583 | See https://stackoverflow.com/questions/42603000 join() on pynput.keyboard.Listener waits on a queue... | 103 | en | 0.704784 |
#
# Autogenerated by Frugal Compiler (3.4.7)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from frugal.util import make_hashable
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
class base_health_condition(int):
PASS = 1
WARN = 2
FAIL = 3
UNKNOWN = 4
_VALUES_TO_NAMES = {
1: "PASS",
2: "WARN",
3: "FAIL",
4: "UNKNOWN",
}
_NAMES_TO_VALUES = {
"PASS": 1,
"WARN": 2,
"FAIL": 3,
"UNKNOWN": 4,
}
class thing(object):
"""
Attributes:
- an_id
- a_string
"""
def __init__(self, an_id=None, a_string=None):
self.an_id = an_id
self.a_string = a_string
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.an_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.a_string = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('thing')
if self.an_id is not None:
oprot.writeFieldBegin('an_id', TType.I32, 1)
oprot.writeI32(self.an_id)
oprot.writeFieldEnd()
if self.a_string is not None:
oprot.writeFieldBegin('a_string', TType.STRING, 2)
oprot.writeString(self.a_string)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.an_id))
value = (value * 31) ^ hash(make_hashable(self.a_string))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class nested_thing(object):
"""
Attributes:
- things
"""
def __init__(self, things=None):
self.things = things
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.things = []
(_, elem78) = iprot.readListBegin()
for _ in range(elem78):
elem79 = thing()
elem79.read(iprot)
self.things.append(elem79)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('nested_thing')
if self.things is not None:
oprot.writeFieldBegin('things', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.things))
for elem80 in self.things:
elem80.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.things))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class api_exception(TException):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('api_exception')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| test/expected/python.asyncio/actual_base/ttypes.py | 5,575 | Attributes:
- things
Attributes:
- an_id
- a_string
Autogenerated by Frugal Compiler (3.4.7) DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING | 162 | en | 0.857254 |
import os
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder,LabelEncoder,StandardScaler
from sklearn.decomposition import TruncatedSVD,PCA
from sklearn.metrics.pairwise import cosine_similarity,pairwise_distances
from sklearn.feature_extraction.text import TfidfVectorizer
SEED = 2048
np.random.seed(SEED)
PATH = os.path.expanduser("~") + "/data/quora/"
train = pd.read_csv(PATH + "train_porter.csv")#, nrows=5000)
test = pd.read_csv(PATH + "test_porter.csv")#, nrows=5000)
test['is_duplicated'] = [-1]*test.shape[0]
len_train = train.shape[0]
data_all = pd.concat([train,test])
def calc_set_intersection(obj,target):
a = set(obj.split())
b = set(target.split())
return (len(a.intersection(b))*1.0) / (len(a)*1.0)
print('Generate intersection')
train_interaction = train.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)
test_interaction = test.astype(str).apply(lambda x: calc_set_intersection(x['question1'],x['question2']),axis=1)
pd.to_pickle(train_interaction,PATH+"train_interaction.pkl")
pd.to_pickle(test_interaction,PATH+"test_interaction.pkl")
print('Generate porter intersection')
train_porter_interaction = train.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)
test_porter_interaction = test.astype(str).apply(lambda x:calc_set_intersection(x['question1_porter'],x['question2_porter']),axis=1)
pd.to_pickle(train_porter_interaction, PATH+"train_porter_interaction.pkl")
pd.to_pickle(test_porter_interaction, PATH+"test_porter_interaction.pkl") | quora/pyfm/generate_interaction.py | 1,594 | , nrows=5000), nrows=5000) | 26 | es | 0.328432 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
import lzma
from ... import TestUnitBase
from refinery.units.formats.office.xlxtr import _ref2rc, _rc2ref
class TestCellIndexConverter(TestUnitBase):
def test_concistency(self):
for row in range(1, 12):
for col in range(1, 12):
ref = _rc2ref(row, col)
r, c = _ref2rc(ref)
self.assertEqual((r, c), (row, col), F'({row},{col}) -> {ref} -> ({r}, {c}) != ({row},{col})')
class TestExcelExtractor(TestUnitBase):
def test_regular_xlsx(self):
data = self.TEST_XLSX
unit = self.load()
self.assertEqual(unit(data), B'Binary\nRefinery.\nBinary Refinery.')
xl1 = self.load('A1', 'R33', squeeze=True)(data)
xl2 = self.load('2#E10')(data)
xl3 = self.load('Refinery#E10')(data)
self.assertEqual(xl2, xl3)
self.assertEqual(xl1, b'BinaryRefinery.')
self.assertEqual(xl2, b'Binary Refinery.')
TEST_XLSX = lzma.decompress(base64.b85decode(
'{Wp48S^xk9=GL@E0stWa8~^|S5YJf5;3PvDAzc6{61-q2m(dT*lz$@h&uisO-M2S>G=qQEROhS?T`LVCl<0*Kr;j=qGZrTMa1_{74oZ0B;H_q6z{0fO2`#4p'
'Z(%@Rrb2l^+DIK4qbHHF_tmNDpz&Y$NlI-C6c(59S<hkLEM^A)s!{gk@qKO#f!<CU&7G31h2%4o%gM*%hC-@#t>rmqA<7aPOjP!YEkx*jkYln_Gs2{7ZcSSp'
'k%^+f{8_0fK#=AnGd4nKnS~b32=88*Gzk18vHibqY6IP;P8rsEd*hi%t(hYl<vzGV#mly+rRuPU?H$RjiOhkC&_Y^=3@n*lF-L-p{&*dA>A$-1cYhlULYXE~'
'9lRf#_`OFa&uH^H|E#>F1+<slwderZG)kz>f=O+S%CnbmT=-*EXvyp=?C!#p@e|yqJFol$s>T6*DyGIxp^}#q4f#_*{FEDNWty4CtIr9?l}dTd2ZvRe4c(lw'
'DABO4`<xHUA!rFO$CY0pMP$7Ch|~lYzBzW26csva+1m`if>ts<6(kc$R^2wfYI_u<Q|ve2LG39foqnwf%7wRQd2S-u4FHQJN@YT;52pT!6{VrFCidv$Fyf;}'
'rH559u)j4P7JILO$#(5+ZYcGMZALFyO?bVadG%NCWt)~F^p=Pm29lCFbYt)Fedzu<1zSy|M+}&@hOGrpf$f_=Y#DSA@|#f687|=g$UxDWWJKOTp)mW6TzZ=^'
'p2l)f#+eE2G<HArbYwZE!pb>bRES(cfK<g8_b)!Kft2?rXK}=vK3~G(CX^_QX)BQi&gU31F}4c4VcB7TrBk^r&0ca1okiuv1q4^388j~{y%RNKdMWD;q7$3l'
'#C;mMydS27!Koh*Bsd(dJ8m~*nz#&cRltJuz`RD02l;!L145|lg~%t7)#pZ6bT%^@aB5v|Mx2gU?|0@qMh{gR9r!(5QDnF8uc&l@Th{F@viY>d61j#TIyb8X'
'61@K*a|ghIpbVLNf7H)(W5>emQ41R#dw<#Af~ZpQO|)JqOd_Vj*kk+pzMMj@w+^G{FQH|dL4#ia(qX?XVK!~^yYHeq(&}Ngxfz31xqCY)rD*@_3Pyn>pc~Wn'
'MYDkF4kdF2tAi&B|JQ~s4)B9`NTUl4qos<(L1M+~{2d!BjkqBUb0%v1*kgIrF+ptfh}s0W$bSkIfJEba^sYW_lhRuUo-$5(Fftuy6p{|&N2JPAGBvqFg`%Q)'
'1cB<NMLt8qVvugS&hO*6_B9Kg?C_=TOZyGd>o8}DAXwo}7%+6|%=!Q&@h){<N`TgzUUJ67cJdcdXo;y#hyb@#8t&HY8P=kV)6}2jZhORE^Qab?zfQf7B_xQV'
'RK!+xABFg{33KMQ{4`>l&=iyiPUfI)c<LSMZ$G<RZa2rC=p3JGN`2;6a?#<4(EV$(=VK)cnGq^2NNZgPm;XW_n&r%)Tv0l1<R+xEEgpr*wA|*#_J_;WjMhx*'
'2_V1cq6SWKO|ImPFM#_s4uUlRF5$o<bxhE8EI!Cp;wWYl$Rwb5FtH|uR2(*WCRKe{RcePa){nOIYL{IHzSvbnG=TE4j4@A1=U$eDy?6P-nQ|;;P(T(jnSv=m'
'A&Rh1<Lz=W1J+!8u%iw8-_zZAtJcr2%@WV=+r{F4QyRi-NYdmBUk!FaGe5&&sf5vL_S1fe>CT`VFqQJ@BYH?72AFt;%Y}5m9zy2-<(iY_-&tjDSa4w0OtaO1'
'8tKtv_^&+^2ur(e<A~BD=}W({XC6cTLgOQNXL9dl25Uj~y?U_xM??>jmwHU+ICMbW#mHy;%;FmR7XxDT&|UA)JmOx6IY-%2Nzf6u%Ak^&L#DrA=cJ-qL+2V4'
'QaEix%b9zxe1xNE5#G23ON{#;_>8Kk9uORLt@ysrPLTL;n@tE%n;XrSU|Lbfw)ow=_ou8?#%|lEmF1WDbL}FKuGMr+{x400xau(;+mVCbvi;c!7;xGT@yFdV'
'O%KZ3Zd7>8k{6`<kvAq=;*cc=8so}&t<|n@0JZ0ilyz;t_j^nrUr_nSS-~|bLvwY%)Eezn(t5`=4(yJ3=C)R^NZ7aBvqw##zY<>uu=C59T>6kOvA{kgk@|v`'
's>pkG(&hxNnj-cSvL;G~#$Ew`FZiF$IM+7ut?;osAW_o%bvrhoYq6nZm9@=HAw>h4Pp#i=u)I}zReJI81}J1NlhYYmCJI!K?zcp6@Y#8Z3MQwQRUxzknnlp5'
'Rl_cFj`Wt<CU*@+s1`HvyHy~l=e_`sA<(R)nIRh{g7LFc>#eyLlRNK~<0x(GE1^FLwOTD6)j;!)u7?|Ed8uB8efa1bHZN)eQzTas@ce)BAOmvmldGs|(&vx<'
'5<<8Fy}}2W=u;!65A`@sm;bxZvSJ7?a@dwF?Hm9qA<e_Li%pFt+<IhChQmdjO{g%kg(jDtI-dwJFT9Gy@;{Nj;_p=$7QGZ6J(<db_mP^Z0@hL`fMm~^emi-<'
'#U}<C;1S7UX&q{)L&*;Bb4F4&hy!RF0|TGtm9!CB-zUI~7+XmC5f#gR?25`_79+(~-tv8S?S4f!r4*c$F!XRrO<4{vh^|w`l%t?0J>547bF1x6nFKL1FZME8'
'x>xF18ESM1s;wm*-x&m$NDpw?@x=<tlcE)STJnr9{NuK;#i6_2MYCPl%4Zq^9*$^R372ua6jwv>oH^mR0ioqk%%)Awns;#lrjXkIhYB_Vt*Pr*oTgse6Uazr'
'd)yUnaZ|Z`9?Q6aTHa2@m4`pd_?E;;Re)&<*otbim^DZ!V{~?+t%H;U2&V8O9CkMdW*tOzBErCD-E}{=Nl%~-`;W#E5$bMF8A-TOVDt09^K)tTG2cvWxLh%9'
'cuC?O7rL(QbGlAASV!M6dTB)pfy|#N5k4(Mdd*7+Mb<Fc^fR3BfFeEzF^|<<jpBXBM&T8{-77eX)1)UjzwbB1E&LZ4khDM^66En##rJ{5FB;62)1u0P(WW!?'
'lQ>ewk;iuv3T5ya!?u25bnj7}T|JgGJ#9v?s8&4#t^H+#psB8+5X2Nb(T)9WO*Vt|gLB|i#r-n1JMfe$j%Ph5SXMv_Tanlh$I>cVX}KMHqanK)`S{y}?Q*p%'
'q?-9=^4NCH4UFSGW?!(CtBYJuyypt+p0$nV^cK}KotkY2nSQndYOQFUvFVS3FW3?x>5yfLCco*5cW<@V1M^*WZG|(A0JM*3=9Sna%;2QH>md}mDc9$Mt3&b<'
'9G4eqoW1wvVYXkau#+Amms%7l0aoEO^`4|P4TnM0ZoXb_xoe`WfYVjGR)VLd+Q_@wE=eFJLr%5%w|=*hWf977@eZKekfJ3;&92d7q=M_xzybcYrXD3rWUx7T'
'YtP}VErR+Qx_;gt-vsQ=`UR=~2p9|w1mvGLTHTzpFy}ehnsV!-@9w;Br-4Iy$oZ!4*Ll%|=GkY0?kD^ebMpDWalI!>y!qU=-PH<$+%SHQox|bdqM~E30Lu?y'
'n3PZbZ?~4RkXMF4T;wYcr7pG)Y;}^m^8PA7N*9B(6278}V(4CuTj{g8cnHCBjFEVl$#zR(-FckDWBH2kXxgM8VN!zSNkFRsiLX1J0e7IR-ok22b<Fh{0Zygn'
'a->J1Tx<^V>tdmaeJ-AACUvHtR6ZqlAQc@|nfUvSjY9l8N}O1iL6tlkQNk$0EBJwV(D`Rl=MKmb{EZ(M+d9%;77%vNLbvj%X;Q>8k8h<6zf-kMENA;DDq9?9'
'-c<)(XUOK-37=JI@*2_!1<`E;#sXJ^h*;4qBLW;_Mqdg3;l@sO8%u?U%P9drSYd47l>^xT9m~sM>V(|XYphyEM=oa(c$R$_SoS+4>&;O_fr;olaT?C<i;vRU'
'>Z8O<b2dxzIAJbmw!O!q;jOe}<&^u*MaLUU@LxD!+r5~a9H*A^$_=p#3ZXmDXf(Ty2c+E9sKficRn4c|8+AF4uuF9VhW4%}>6syvgejhm`t$tpvg6Jz^Mj8-'
'eJGh$HQ4_nYI6{Gq5tdgPaPK)6ehDCQ26}`@0(w}Y^jsD<S<4|2sfQd4)8g&VMHyPnhehJDk?3y@tj=^?fTchQ<Z_k7Q{seld!f7y2Ywsq3-BjBL~RJ=5!>)'
'HrxQ9A#UUcI9OGd_dxu$A@8Czd8m&#<QJ`NMc2=__EFY=>wz*j8D_g9qx!^5p-44yDeVK-*Gq+h`Egsr8Zykb6#8*Md3@|MQtCqirE)!j#`xE3#3A;CNhhW6'
'@xeBsNwb7OLeXHM-mx$+KjZN~rhI!XRzXlWcBNb!0{QQkA>m)Ta~ke$Z)|_T1I7V2h|AKhLTNs87A1I@LGcUyR57K}(+;tyyC8y-FEcM0@?iXGNBemODlLlH'
'Mr&W(;)1Rbej$uqHn(yDH1F0kV@~eFf?-tYTXATJy75xajc$TygYO-K*F4I#iR*jVbT#0Sdc1yVJ~!nF1^f>mIxj#WHstZO4$~XMjt_&5m)E?ylIEe-l>(D!'
'Mw7{vPF6HG$F-4mG8(?dUrM(jcMhCc>w~{Ex93TcYS@D19c^KVJU^TjPDbY1#=Uo*b{(Gv7n|GEQI?et?&_b)@xjCL01(3wMnc**8<dg)VsKfN?;QKq*-WZ)'
'q@;?J7@QA^o5@YrEzLRbqL85Xn}ts4#pD44_rq|5fCqw#p~C9+4;y)=Dp3c|*;ZXTMF8FuRosDAR|5(w(ZGuW>E%_fgyG!r7?lqe3%xP?6V05D$y(VTsvUOT'
'RQ^YFF+kR~czqgECf1UH;jIk8r2hg10EZw_%qg1HJbY!EE)z8(=N8PB9wvri&LcM3CAHa~Zirs4h%N)MGC{rV+dfuhiX)QYc8+1rR=`2V+zGHRbmllDEAxHp'
'5<BjB;1rT_p7x*Z(v-bV+>i}0tw8REAnOZTGG7W$nnx$)6{BQ+R|g58X;%wAPn`#jR3qZx53X`$$S}|bEg91k*?nTro+A~2&E&c8bAL%TiOH-=B0Dj={_BRs'
'zN_c*A9%woCER;T-@U)QT6Y*KB@#oPZMMU^)_cLl=aG57!=?!dINhxjR`Ad2cib22ZA>g)GQ}!oy<&=n)X-%0d%FsL#aNFDW*P*JZ{;gPC=bY4!wS)S?l&6g'
'P6jM($%?=15;a!OkD@n`fxgQD^$w&KfMrNsA$(M<5bG)@`poZAgOs7zR6<b(_4gthE?vWQx9oH$gktbx6#eVoF&Xe5SGj?`c4Ao`3W{RMIdubs0e`X_6hiFK'
'>wynbkbfB+=3+_Q?eSa6QO0d~q7yubxNApHZEG1Hp||VtF*`Epn)YU>IO$zG_leh1K>qkB&wVr6gi`E{(q4nMnP9&;s(RCZ@vfO7zGg>mK5c_Y1Sg6{rCRjF'
'>nlWlf=PT6<0yV|00WvnG1-5Un}Qq#53Bat2Q+!&tPTzivUE>N5ydL&9B19kAevrDy(wr<id^TwwLC1O<k;_iWc3Al{%JZBDtYK^2QRE%g{XBQK>RO)dC9ur'
'@dAER%=sun5g7ZDw^S%4sIPS^s2JBddi`&zG>k9cE<1bsW}oa3e?YeDQ&KX<O;c9qMe=CF{Aa$9kInQ9TT5DSP>=GYt(Gg*5b{QCyON-vRaXXK>xC<i&$tt2'
'8|53#7Dg@~Q`bM<Zrh)ti1;$!Az6zi<f(9>`#JA?QiV1cR(HH_v>Ov#2ANK_#yB+M?#;Nxp?jzw_nBF?R|2yAURu=_MNoe$F@vzOw_rP{es)Mih4nvYQqY%f'
'>%2udb2Id;8z%n8M|N}@WUOK6lk%1+62-uL>X?x0^(=9Y%o;c`$8#a?kCmpiihl|Q+S^8)dNsvuEqmd)J<2`*U_(F9{q6Sj<v84blBU_=ikeoN_)5W<J!VAw'
'Sv$Ibl}+*I)>Qi(5y*2+-JLaxaUo`dNhioHs31)Ge(_tp+tQA>$|Gm~rxc`xRrz3dgbl<pfRlVz)6nvzGF>2$pK9lNm6NZx+A;1hh!Y^wq`e~y=n}-<6<e4<'
'`*ul_NDY@>-g1WZ7hMwR?&tw9dyu+yY)xfY(Dxz$RK4(dU`!)mqVpN&qWD~f^V+}I=fWT<KC$YC_833-rC&s|-&P@2ne_N3A*te>b=X=Eek@lN46s;fVDhJ3'
'^0`2@#<^2lA)H$6PSfhS?T3)G^?2IsKn{*Dcx9GZ>V1;)^ERS%jQdawwN@DFWmV_f?Max{4e??;&;7K<!{h(WPyGD{+@L*u(wzmx;X?xF{eUiKds($%ES*Ym'
'@~7q)`@30*YJ|TX!8tw+6+2AlC{V-75h&3sf|h$oyap$59-bkLE$lBVKy1<dt?%3gfzvf!xPmrvI?%b7BV-?+9fzF(^>Rh&lc!B=7&O#O89HJ}8FtKJ8;*`G'
'#oG&ackie*nZ<;gf4|RfL;3yJAyqllmLUY|?+yJh`Mg~?S^7{RY=Fzu=lz$Qg`QXCXTenb*>MO)qZKpGp?w@Wfo$u4oGUgZBL8~f!=1#)#f($a&NhjkJ@-g*'
'+|f`#ugApNgEbuU`g6DMU9FM%e5J^mP;<ieN^1hy#Qk2#I>7|+b#|2|XaIX$?zVFH1@WR&)QzgwuL-#U&fG=uM=T9yeNcpwB+pV^h(zB$ZU5<M5gGqvOeN#N'
'yVgbJ5<P11H}3-iK3WH)3&P%7HtVj_bQtmFcv{$s2yL*)Ii>v+ikrq*68vX=BgM4X#SvNA<ltrz-GE}KFtMrB(_&Z~V@}q;HCn15$x(Pijd=!-;U6Z~PoF&^'
'0bkjt88le{rYSw?&3;UjOaX^gf3jGo@-xA5b()&3rH;aQgcyLDn(s~vim6}iRS{UhiHDj6J>u2XPyEZpPa~5t#8t}Zs&SnD&E{>^&$saZ?Mq`u7T-2s^Y-Ng'
'5)+D+M@{nPIEmmA7yZb?N<>N0X_d)2EVrU~e?CqMCxLH~R^AVFzT{4dEXfA5k3DvQzw3Hs$VEW)xg^+5DPt<^7U9(JiWKa~nq2hxULBb*a&Y))x)#rQM8Z`j'
'5Mmpf+M1+Y+jwRI6l<q@v9rV32JHH@XZtkinW?VkC)c278{WH8UyCuUxSAM<df#~$a<VV$$*tKVxAvl{Ax%2MO(8?<9gzDAuo(}9Y#e<svKuK1bD~XdngQEg'
'L7nRHl|{{+DiK><=XU8^(;|agSqoyRyOB8*W9)6x|2vRE#7gKSkO^)4rPK~0v0)}fs&ZswK%<HY$uk`?OTLu>pD>T&rdIcf)>1>~PWqh-w8JOS&+-VlyMsOK'
'<$oB)VeHgqUh%v_4krv{i6I|6O&`lof_mK2O+00|a#}BwG(2&@xM;48<nSGP3J~DqIBzs?Qy5Q-@Kyh&!Fl6@HL+`8)!~4;G)Oa=ex1SlfM$5+Zs#1`37rEW'
'!z>3k19J|3fFOu?xIDa~SwA9%l3cKzCXIk>O75p|Bg)~|2;&k|mVGr+)MWRWCz;vY*&2yR97bK*S$>Ualdz*yplSY%%`-Yj!e!v%y*ROG3UlCsxgRcY70fqQ'
'I+EX+tv<@*&DUsq7bbCHUntXdFs5vDGP@MDqpto`ZT!$seb}vPzciItw_Z$+jnO(0Q5Ge{`CApXVtSioC!~KF;1mjl7zHO2z0YfqFLzph`avY-bbj?E;T^30'
'M0>~Bqjf8;WegI*+rs3kK<7hqTBy|v&jIUCfY+C*1mZJJbyU4ZEF!~_=0L~)Q#G|Ii{z+<I;P6A(s1E(@9b>FumB7oBm>X(NL!?}$KeF2{j3Ul)B_f84h4M5'
'r9)#GV2+28fa6fK6R4CHHh)K#0Fad@oZV4_Gua#}uAjxJ*>@g%+T|%ID!}k^BS2Je^`Ky*>aIoivXvF>-dgPgyt#In;bPorwRyWMLjuMWcW+c-9boYE)8iS>'
'q!Em8IIPsA1Y|^Xc@jro(IP
y0;na@27uAd3Z1T<ga0jjkKx&+RWCtm!fw0>lEr)3m(rj-=U)Zw-<dl;K4GSxkTx(VhK(SI=UN7dA?Lv=#D>Qsd{nfTXm'
'pxA`o(dC=F2E!ILT@*bC=AU*b$fz9Y`RM+&%tUiKh(1zr0b-tBkC^=#vjh`Aw~`^(Z}03wRH!x87TD<`J_|NamNx>q96dEcpLR`+0~*>P<hAWD^Q;hQo+5F<'
'jkThMTR3~)t79?MN$7I(KMPx$mkUjhroGlDzyqi{sBeG_$w)uw3xyWMeG8?|PVNM@^!iEg8ZFVzg+!q|&_T%AV79u_NzR%3;O-V&1mRqcD2rPxeHk7RDVwj+'
'TW~`L2g!$~bL55kst*mQ@YGUoVM@Q%(QGB!3x%5Ts?P*J5jLjM`8si3@#uU;K+U@o3R88*v$BeZFy>Z6<)6zkIfDg$P{F3Tl%R;1Iy!4f7pFwT{pda1v(L5Y'
'UAt4vr3g<_cO7kXPR6q&HzDpZU9JzHml~E4e~KjPSIg1zc8JX3ffWqT3X9rhxdhiZcI14+hrSC3geN)~9kc)SH6NaPEv7|+!C8lhOJHLhpn<#SnL<zbQ`F1d'
'F7z+X3NUnd;Cc@zZzz1@J)*=%vm5Kr|KqESpnKN`SrPmK$ZOI60Z#t#%ak|7wNPLIs_$bSRqYTpZCMnKd^q}R>)k?yVOgo)24Y*7v8)rsT^@GGq}6!!?oE!^'
'd+U-g60>iG7RE;8d~$5Nais62-MIq@rRX&o)QtxeW#N_%7vMGGro#IN7SIar0k*UrI@bNMf~JE^W&+Qnet4Kt7e#+qzFUEV{w~l8@%_@&J<W=gc7p!^u7cs7'
'000006<H{x300yM00F%;#7F=D8E*!*vBYQl0ssI200dcD'
))
| test/units/formats/office/test_xlxtr.py | 10,276 | !/usr/bin/env python3 -*- coding: utf-8 -*- | 43 | fr | 0.304089 |
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zerver.models import Service, UserProfile
class Command(ZulipBaseCommand):
help = """Given an existing bot, converts it into an outgoing webhook bot."""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser)
parser.add_argument('bot_email', metavar='<bot_email>', type=str,
help='email of bot')
parser.add_argument('service_name', metavar='<service_name>', type=str,
help='name of Service object to create')
parser.add_argument('base_url', metavar='<base_url>', type=str,
help='Endpoint URL of outgoing webhook')
# TODO: Add token and interface as arguments once OutgoingWebhookWorker
# uses these fields on the Service object.
def handle(self, *args: Any, **options: str) -> None:
bot_email = options['bot_email']
service_name = options['service_name']
base_url = options['base_url']
realm = self.get_realm(options)
if not bot_email:
print('Email of existing bot must be provided')
exit(1)
if not service_name:
print('Name for Service object must be provided')
exit(1)
if not base_url:
print('Endpoint URL of outgoing webhook must be provided')
exit(1)
# TODO: Normalize email?
bot_profile = self.get_user(email=bot_email, realm=realm)
if not bot_profile.is_bot:
print('User %s is not a bot' % (bot_email,))
exit(1)
if bot_profile.is_outgoing_webhook_bot:
print('%s is already marked as an outgoing webhook bot' % (bot_email,))
exit(1)
Service.objects.create(name=service_name,
user_profile=bot_profile,
base_url=base_url,
token='',
interface=1)
bot_profile.bot_type = UserProfile.OUTGOING_WEBHOOK_BOT
bot_profile.save()
print('Successfully converted %s into an outgoing webhook bot' % (bot_email,))
| zerver/management/commands/convert_bot_to_outgoing_webhook.py | 2,267 | TODO: Add token and interface as arguments once OutgoingWebhookWorker uses these fields on the Service object. TODO: Normalize email? | 133 | en | 0.603571 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2016 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from .. import factories as f
from tests.utils import disconnect_signals, reconnect_signals
from taiga.projects.services.stats import get_stats_for_project
pytestmark = pytest.mark.django_db
def setup_module(module):
disconnect_signals()
def teardown_module(module):
reconnect_signals()
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.user = f.UserFactory.create()
m.project = f.ProjectFactory(is_private=False, owner=m.user)
m.role1 = f.RoleFactory(project=m.project)
m.role2 = f.RoleFactory(project=m.project)
m.null_points = f.PointsFactory(project=m.project, value=None)
m.default_points = f.PointsFactory(project=m.project, value=0)
m.points1 = f.PointsFactory(project=m.project, value=1)
m.points2 = f.PointsFactory(project=m.project, value=2)
m.points3 = f.PointsFactory(project=m.project, value=4)
m.points4 = f.PointsFactory(project=m.project, value=8)
m.points5 = f.PointsFactory(project=m.project, value=16)
m.points6 = f.PointsFactory(project=m.project, value=32)
m.open_status = f.UserStoryStatusFactory(is_closed=False)
m.closed_status = f.UserStoryStatusFactory(is_closed=True)
m.project.default_points = m.default_points
m.project.save()
m.user_story1 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story1.role_points.filter(role=m.role1).update(points=m.points1)
m.user_story2 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story2.role_points.filter(role=m.role1).update(points=m.points2)
m.user_story3 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story3.role_points.filter(role=m.role1).update(points=m.points3)
m.user_story4 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone=None)
m.user_story4.role_points.filter(role=m.role1).update(points=m.points4)
# 5 and 6 are inclosed milestones
m.user_story5 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story5.role_points.filter(role=m.role1).update(points=m.points5)
m.user_story6 = f.UserStoryFactory(project=m.project,
status=m.open_status,
milestone__closed=True,
milestone__project=m.project)
m.user_story6.role_points.filter(role=m.role1).update(points=m.points6)
return m
def test_project_defined_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["defined_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
data.user_story1.role_points.filter(role=data.role1).update(points=data.default_points)
data.user_story1.role_points.filter(role=data.role2).update(points=data.points1)
project_stats = get_stats_for_project(data.project)
assert project_stats["defined_points_per_role"] == {data.role1.pk: 62, data.role2.pk: 1}
def test_project_closed_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {}
data.user_story1.is_closed = True
data.user_story1.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 1, data.role2.pk: 0}
data.user_story2.is_closed = True
data.user_story2.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 3, data.role2.pk: 0}
data.user_story3.is_closed = True
data.user_story3.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 7, data.role2.pk: 0}
data.user_story4.is_closed = True
data.user_story4.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 15, data.role2.pk: 0}
data.user_story5.is_closed = True
data.user_story5.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 31, data.role2.pk: 0}
data.user_story6.is_closed = True
data.user_story6.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
project_stats = get_stats_for_project(data.project)
assert project_stats["closed_points"] == 63
assert project_stats["speed"] == 24
def test_project_assigned_points(client, data):
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 48, data.role2.pk: 0}
data.user_story1.milestone = data.user_story6.milestone
data.user_story1.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 49, data.role2.pk: 0}
data.user_story2.milestone = data.user_story6.milestone
data.user_story2.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 51, data.role2.pk: 0}
data.user_story3.milestone = data.user_story6.milestone
data.user_story3.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 55, data.role2.pk: 0}
data.user_story4.milestone = data.user_story6.milestone
data.user_story4.save()
project_stats = get_stats_for_project(data.project)
assert project_stats["assigned_points_per_role"] == {data.role1.pk: 63, data.role2.pk: 0}
| docker-images/taigav2/taiga-back/tests/integration/test_stats.py | 7,248 | -*- coding: utf-8 -*- Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz> Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com> Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com> Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net> Copyright (C) 2014-2016 Anler Hernández <hello@anler.me> This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. 5 and 6 are inclosed milestones | 987 | en | 0.79645 |
import sys
import compileall
import importlib.util
import test.test_importlib.util
import os
import pathlib
import py_compile
import shutil
import struct
import tempfile
import time
import unittest
import io
from unittest import mock, skipUnless
try:
from concurrent.futures import ProcessPoolExecutor
_have_multiprocessing = True
except ImportError:
_have_multiprocessing = False
from test import support
from test.support import script_helper
from .test_py_compile import without_source_date_epoch
from .test_py_compile import SourceDateEpochTestMeta
class CompileallTestsBase:
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
self.bc_path = importlib.util.cache_from_source(self.source_path)
with open(self.source_path, 'w') as file:
file.write('x = 123\n')
self.source_path2 = os.path.join(self.directory, '_test2.py')
self.bc_path2 = importlib.util.cache_from_source(self.source_path2)
shutil.copyfile(self.source_path, self.source_path2)
self.subdirectory = os.path.join(self.directory, '_subdir')
os.mkdir(self.subdirectory)
self.source_path3 = os.path.join(self.subdirectory, '_test3.py')
shutil.copyfile(self.source_path, self.source_path3)
def tearDown(self):
shutil.rmtree(self.directory)
def add_bad_source_file(self):
self.bad_source_path = os.path.join(self.directory, '_test_bad.py')
with open(self.bad_source_path, 'w') as file:
file.write('x (\n')
def timestamp_metadata(self):
with open(self.bc_path, 'rb') as file:
data = file.read(12)
mtime = int(os.stat(self.source_path).st_mtime)
compare = struct.pack('<4sll', importlib.util.MAGIC_NUMBER, 0, mtime)
return data, compare
def recreation_check(self, metadata):
"""Check that compileall recreates bytecode when the new metadata is
used."""
if os.environ.get('SOURCE_DATE_EPOCH'):
raise unittest.SkipTest('SOURCE_DATE_EPOCH is set')
py_compile.compile(self.source_path)
self.assertEqual(*self.timestamp_metadata())
with open(self.bc_path, 'rb') as file:
bc = file.read()[len(metadata):]
with open(self.bc_path, 'wb') as file:
file.write(metadata)
file.write(bc)
self.assertNotEqual(*self.timestamp_metadata())
compileall.compile_dir(self.directory, force=False, quiet=True)
self.assertTrue(*self.timestamp_metadata())
def test_mtime(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(struct.pack('<4sll', importlib.util.MAGIC_NUMBER,
0, 1))
def test_magic_number(self):
# Test a change in mtime leads to a new .pyc.
self.recreation_check(b'\0\0\0\0')
def test_compile_files(self):
# Test compiling a single file, and complete directory
for fn in (self.bc_path, self.bc_path2):
try:
os.unlink(fn)
except:
pass
self.assertTrue(compileall.compile_file(self.source_path,
force=False, quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
not os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
self.assertTrue(compileall.compile_dir(self.directory, force=False,
quiet=True))
self.assertTrue(os.path.isfile(self.bc_path) and
os.path.isfile(self.bc_path2))
os.unlink(self.bc_path)
os.unlink(self.bc_path2)
# Test against bad files
self.add_bad_source_file()
self.assertFalse(compileall.compile_file(self.bad_source_path,
force=False, quiet=2))
self.assertFalse(compileall.compile_dir(self.directory,
force=False, quiet=2))
def test_compile_file_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
# we should also test the output
with support.captured_stdout() as stdout:
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path)))
self.assertRegex(stdout.getvalue(), r'Compiling ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_file_pathlike_ddir(self):
self.assertFalse(os.path.isfile(self.bc_path))
self.assertTrue(compileall.compile_file(pathlib.Path(self.source_path),
ddir=pathlib.Path('ddir_path'),
quiet=2))
self.assertTrue(os.path.isfile(self.bc_path))
def test_compile_path(self):
with test.test_importlib.util.import_state(path=[self.directory]):
self.assertTrue(compileall.compile_path(quiet=2))
with test.test_importlib.util.import_state(path=[self.directory]):
self.add_bad_source_file()
self.assertFalse(compileall.compile_path(skip_curdir=False,
force=True, quiet=2))
def test_no_pycache_in_non_package(self):
# Bug 8563 reported that __pycache__ directories got created by
# compile_file() for non-.py files.
data_dir = os.path.join(self.directory, 'data')
data_file = os.path.join(data_dir, 'file')
os.mkdir(data_dir)
# touch data/file
with open(data_file, 'w'):
pass
compileall.compile_file(data_file)
self.assertFalse(os.path.exists(os.path.join(data_dir, '__pycache__')))
def test_optimize(self):
# make sure compiling with different optimization settings than the
# interpreter's creates the correct file names
optimize, opt = (1, 1) if __debug__ else (0, '')
compileall.compile_dir(self.directory, quiet=True, optimize=optimize)
cached = importlib.util.cache_from_source(self.source_path,
optimization=opt)
self.assertTrue(os.path.isfile(cached))
cached2 = importlib.util.cache_from_source(self.source_path2,
optimization=opt)
self.assertTrue(os.path.isfile(cached2))
cached3 = importlib.util.cache_from_source(self.source_path3,
optimization=opt)
self.assertTrue(os.path.isfile(cached3))
def test_compile_dir_pathlike(self):
self.assertFalse(os.path.isfile(self.bc_path))
with support.captured_stdout() as stdout:
compileall.compile_dir(pathlib.Path(self.directory))
line = stdout.getvalue().splitlines()[0]
self.assertRegex(line, r'Listing ([^WindowsPath|PosixPath].*)')
self.assertTrue(os.path.isfile(self.bc_path))
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_pool_called(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(pool_mock.called)
def test_compile_workers_non_positive(self):
with self.assertRaisesRegex(ValueError,
"workers must be greater or equal to 0"):
compileall.compile_dir(self.directory, workers=-1)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
def test_compile_workers_cpu_count(self, pool_mock):
compileall.compile_dir(self.directory, quiet=True, workers=0)
self.assertEqual(pool_mock.call_args[1]['max_workers'], None)
@mock.patch('concurrent.futures.ProcessPoolExecutor')
@mock.patch('compileall.compile_file')
def test_compile_one_worker(self, compile_file_mock, pool_mock):
compileall.compile_dir(self.directory, quiet=True)
self.assertFalse(pool_mock.called)
self.assertTrue(compile_file_mock.called)
@mock.patch('concurrent.futures.ProcessPoolExecutor', new=None)
@mock.patch('compileall.compile_file')
def test_compile_missing_multiprocessing(self, compile_file_mock):
compileall.compile_dir(self.directory, quiet=True, workers=5)
self.assertTrue(compile_file_mock.called)
class CompileallTestsWithSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CompileallTestsWithoutSourceEpoch(CompileallTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
class EncodingTest(unittest.TestCase):
"""Issue 6716: compileall should escape source code when printing errors
to stdout."""
def setUp(self):
self.directory = tempfile.mkdtemp()
self.source_path = os.path.join(self.directory, '_test.py')
with open(self.source_path, 'w', encoding='utf-8') as file:
file.write('# -*- coding: utf-8 -*-\n')
file.write('print u"\u20ac"\n')
def tearDown(self):
shutil.rmtree(self.directory)
def test_error(self):
try:
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(),encoding='ascii')
compileall.compile_dir(self.directory)
finally:
sys.stdout = orig_stdout
class CommandLineTestsBase:
"""Test compileall's CLI."""
@classmethod
def setUpClass(cls):
for path in filter(os.path.isdir, sys.path):
directory_created = False
directory = pathlib.Path(path) / '__pycache__'
path = directory / 'test.try'
try:
if not directory.is_dir():
directory.mkdir()
directory_created = True
with path.open('w') as file:
file.write('# for test_compileall')
except OSError:
sys_path_writable = False
break
finally:
support.unlink(str(path))
if directory_created:
directory.rmdir()
else:
sys_path_writable = True
cls._sys_path_writable = sys_path_writable
def _skip_if_sys_path_not_writable(self):
if not self._sys_path_writable:
raise unittest.SkipTest('not all entries on sys.path are writable')
def _get_run_args(self, args):
return [*support.optim_args_from_interpreter_flags(),
'-S', '-m', 'compileall',
*args]
def assertRunOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_ok(
*self._get_run_args(args), **env_vars)
self.assertEqual(b'', err)
return out
def assertRunNotOK(self, *args, **env_vars):
rc, out, err = script_helper.assert_python_failure(
*self._get_run_args(args), **env_vars)
return rc, out, err
def assertCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertTrue(os.path.exists(path))
def assertNotCompiled(self, fn):
path = importlib.util.cache_from_source(fn)
self.assertFalse(os.path.exists(path))
def setUp(self):
self.directory = tempfile.mkdtemp()
self.addCleanup(support.rmtree, self.directory)
self.pkgdir = os.path.join(self.directory, 'foo')
os.mkdir(self.pkgdir)
self.pkgdir_cachedir = os.path.join(self.pkgdir, '__pycache__')
# Create the __init__.py and a package module.
self.initfn = script_helper.make_script(self.pkgdir, '__init__', '')
self.barfn = script_helper.make_script(self.pkgdir, 'bar', '')
def test_no_args_compiles_path(self):
# Note that -l is implied for the no args case.
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
self.assertCompiled(bazfn)
self.assertNotCompiled(self.initfn)
self.assertNotCompiled(self.barfn)
@without_source_date_epoch # timestamp invalidation test
def test_no_args_respects_force_flag(self):
self._skip_if_sys_path_not_writable()
bazfn = script_helper.make_script(self.directory, 'baz', '')
self.assertRunOK(PYTHONPATH=self.directory)
pycpath = importlib.util.cache_from_source(bazfn)
# Set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# Without force, no recompilation
self.assertRunOK(PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# Now force it.
self.assertRunOK('-f', PYTHONPATH=self.directory)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_no_args_respects_quiet_flag(self):
self._skip_if_sys_path_not_writable()
script_helper.make_script(self.directory, 'baz', '')
noisy = self.assertRunOK(PYTHONPATH=self.directory)
self.assertIn(b'Listing ', noisy)
quiet = self.assertRunOK('-q', PYTHONPATH=self.directory)
self.assertNotIn(b'Listing ', quiet)
# Ensure that the default behavior of compileall's CLI is to create
# PEP 3147/PEP 488 pyc files.
for name, ext, switch in [
('normal', 'pyc', []),
('optimize', 'opt-1.pyc', ['-O']),
('doubleoptimize', 'opt-2.pyc', ['-OO']),
]:
def f(self, ext=ext, switch=switch):
script_helper.assert_python_ok(*(switch +
['-m', 'compileall', '-q', self.pkgdir]))
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
expected = sorted(base.format(sys.implementation.cache_tag, ext)
for base in ('__init__.{}.{}', 'bar.{}.{}'))
self.assertEqual(sorted(os.listdir(self.pkgdir_cachedir)), expected)
# Make sure there are no .pyc files in the source directory.
self.assertFalse([fn for fn in os.listdir(self.pkgdir)
if fn.endswith(ext)])
locals()['test_pep3147_paths_' + name] = f
def test_legacy_paths(self):
# Ensure that with the proper switch, compileall leaves legacy
# pyc files, and no __pycache__ directory.
self.assertRunOK('-b', '-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertFalse(os.path.exists(self.pkgdir_cachedir))
expected = sorted(['__init__.py', '__init__.pyc', 'bar.py',
'bar.pyc'])
self.assertEqual(sorted(os.listdir(self.pkgdir)), expected)
def test_multiple_runs(self):
# Bug 8527 reported that multiple calls produced empty
# __pycache__/__pycache__ directories.
self.assertRunOK('-q', self.pkgdir)
# Verify the __pycache__ directory contents.
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
cachecachedir = os.path.join(self.pkgdir_cachedir, '__pycache__')
self.assertFalse(os.path.exists(cachecachedir))
# Call compileall again.
self.assertRunOK('-q', self.pkgdir)
self.assertTrue(os.path.exists(self.pkgdir_cachedir))
self.assertFalse(os.path.exists(cachecachedir))
@without_source_date_epoch # timestamp invalidation test
def test_force(self):
self.assertRunOK('-q', self.pkgdir)
pycpath = importlib.util.cache_from_source(self.barfn)
# set atime/mtime backward to avoid file timestamp resolution issues
os.utime(pycpath, (time.time()-60,)*2)
mtime = os.stat(pycpath).st_mtime
# without force, no recompilation
self.assertRunOK('-q', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertEqual(mtime, mtime2)
# now force it.
self.assertRunOK('-q', '-f', self.pkgdir)
mtime2 = os.stat(pycpath).st_mtime
self.assertNotEqual(mtime, mtime2)
def test_recursion_control(self):
subpackage = os.path.join(self.pkgdir, 'spam')
os.mkdir(subpackage)
subinitfn = script_helper.make_script(subpackage, '__init__', '')
hamfn = script_helper.make_script(subpackage, 'ham', '')
self.assertRunOK('-q', '-l', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
def test_recursion_limit(self):
subpackage = os.path.join(self.pkgdir, 'spam')
subpackage2 = os.path.join(subpackage, 'ham')
subpackage3 = os.path.join(subpackage2, 'eggs')
for pkg in (subpackage, subpackage2, subpackage3):
script_helper.make_pkg(pkg)
subinitfn = os.path.join(subpackage, '__init__.py')
hamfn = script_helper.make_script(subpackage, 'ham', '')
spamfn = script_helper.make_script(subpackage2, 'spam', '')
eggfn = script_helper.make_script(subpackage3, 'egg', '')
self.assertRunOK('-q', '-r 0', self.pkgdir)
self.assertNotCompiled(subinitfn)
self.assertFalse(
os.path.exists(os.path.join(subpackage, '__pycache__')))
self.assertRunOK('-q', '-r 1', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertNotCompiled(spamfn)
self.assertRunOK('-q', '-r 2', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertNotCompiled(eggfn)
self.assertRunOK('-q', '-r 5', self.pkgdir)
self.assertCompiled(subinitfn)
self.assertCompiled(hamfn)
self.assertCompiled(spamfn)
self.assertCompiled(eggfn)
def test_quiet(self):
noisy = self.assertRunOK(self.pkgdir)
quiet = self.assertRunOK('-q', self.pkgdir)
self.assertNotEqual(b'', noisy)
self.assertEqual(b'', quiet)
def test_silent(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
_, quiet, _ = self.assertRunNotOK('-q', self.pkgdir)
_, silent, _ = self.assertRunNotOK('-qq', self.pkgdir)
self.assertNotEqual(b'', quiet)
self.assertEqual(b'', silent)
def test_regexp(self):
self.assertRunOK('-q', '-x', r'ba[^\\/]*$', self.pkgdir)
self.assertNotCompiled(self.barfn)
self.assertCompiled(self.initfn)
def test_multiple_dirs(self):
pkgdir2 = os.path.join(self.directory, 'foo2')
os.mkdir(pkgdir2)
init2fn = script_helper.make_script(pkgdir2, '__init__', '')
bar2fn = script_helper.make_script(pkgdir2, 'bar2', '')
self.assertRunOK('-q', self.pkgdir, pkgdir2)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
self.assertCompiled(init2fn)
self.assertCompiled(bar2fn)
def test_d_compile_error(self):
script_helper.make_script(self.pkgdir, 'crunchyfrog', 'bad(syntax')
rc, out, err = self.assertRunNotOK('-q', '-d', 'dinsdale', self.pkgdir)
self.assertRegex(out, b'File "dinsdale')
def test_d_runtime_error(self):
bazfn = script_helper.make_script(self.pkgdir, 'baz', 'raise Exception')
self.assertRunOK('-q', '-d', 'dinsdale', self.pkgdir)
fn = script_helper.make_script(self.pkgdir, 'bing', 'import baz')
pyc = importlib.util.cache_from_source(bazfn)
os.rename(pyc, os.path.join(self.pkgdir, 'baz.pyc'))
os.remove(bazfn)
rc, out, err = script_helper.assert_python_failure(fn, __isolated=False)
self.assertRegex(err, b'File "dinsdale')
def test_include_bad_file(self):
rc, out, err = self.assertRunNotOK(
'-i', os.path.join(self.directory, 'nosuchfile'), self.pkgdir)
self.assertRegex(out, b'rror.*nosuchfile')
self.assertNotRegex(err, b'Traceback')
self.assertFalse(os.path.exists(importlib.util.cache_from_source(
self.pkgdir_cachedir)))
def test_include_file_with_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f1.py')+os.linesep)
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'), f4)
self.assertCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertCompiled(f4)
def test_include_file_no_arg(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
with open(os.path.join(self.directory, 'l1'), 'w') as l1:
l1.write(os.path.join(self.pkgdir, 'f2.py')+os.linesep)
self.assertRunOK('-i', os.path.join(self.directory, 'l1'))
self.assertNotCompiled(f1)
self.assertCompiled(f2)
self.assertNotCompiled(f3)
self.assertNotCompiled(f4)
def test_include_on_stdin(self):
f1 = script_helper.make_script(self.pkgdir, 'f1', '')
f2 = script_helper.make_script(self.pkgdir, 'f2', '')
f3 = script_helper.make_script(self.pkgdir, 'f3', '')
f4 = script_helper.make_script(self.pkgdir, 'f4', '')
p = script_helper.spawn_python(*(self._get_run_args(()) + ['-i', '-']))
p.stdin.write((f3+os.linesep).encode('ascii'))
script_helper.kill_python(p)
self.assertNotCompiled(f1)
self.assertNotCompiled(f2)
self.assertCompiled(f3)
self.assertNotCompiled(f4)
def test_compiles_as_much_as_possible(self):
bingfn = script_helper.make_script(self.pkgdir, 'bing', 'syntax(error')
rc, out, err = self.assertRunNotOK('nosuchfile', self.initfn,
bingfn, self.barfn)
self.assertRegex(out, b'rror')
self.assertNotCompiled(bingfn)
self.assertCompiled(self.initfn)
self.assertCompiled(self.barfn)
def test_invalid_arg_produces_message(self):
out = self.assertRunOK('badfilename')
self.assertRegex(out, b"Can't list 'badfilename'")
def test_pyc_invalidation_mode(self):
script_helper.make_script(self.pkgdir, 'f1', '')
pyc = importlib.util.cache_from_source(
os.path.join(self.pkgdir, 'f1.py'))
self.assertRunOK('--invalidation-mode=checked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b11)
self.assertRunOK('--invalidation-mode=unchecked-hash', self.pkgdir)
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b01)
@skipUnless(_have_multiprocessing, "requires multiprocessing")
def test_workers(self):
bar2fn = script_helper.make_script(self.directory, 'bar2', '')
files = []
for suffix in range(5):
pkgdir = os.path.join(self.directory, 'foo{}'.format(suffix))
os.mkdir(pkgdir)
fn = script_helper.make_script(pkgdir, '__init__', '')
files.append(script_helper.make_script(pkgdir, 'bar2', ''))
self.assertRunOK(self.directory, '-j', '0')
self.assertCompiled(bar2fn)
for file in files:
self.assertCompiled(file)
@mock.patch('compileall.compile_dir')
def test_workers_available_cores(self, compile_dir):
with mock.patch("sys.argv",
new=[sys.executable, self.directory, "-j0"]):
compileall.main()
self.assertTrue(compile_dir.called)
self.assertEqual(compile_dir.call_args[-1]['workers'], 0)
class CommmandLineTestsWithSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class CommmandLineTestsNoSourceEpoch(CommandLineTestsBase,
unittest.TestCase,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=False):
pass
if __name__ == "__main__":
unittest.main()
| python/Lib/test/test_compileall.py | 25,965 | Test compileall's CLI.
Issue 6716: compileall should escape source code when printing errors
to stdout.
Check that compileall recreates bytecode when the new metadata is
used.
Test a change in mtime leads to a new .pyc. Test a change in mtime leads to a new .pyc. Test compiling a single file, and complete directory Test against bad files we should also test the output Bug 8563 reported that __pycache__ directories got created by compile_file() for non-.py files. touch data/file make sure compiling with different optimization settings than the interpreter's creates the correct file names Create the __init__.py and a package module. Note that -l is implied for the no args case. timestamp invalidation test Set atime/mtime backward to avoid file timestamp resolution issues Without force, no recompilation Now force it. Ensure that the default behavior of compileall's CLI is to create PEP 3147/PEP 488 pyc files. Verify the __pycache__ directory contents. Make sure there are no .pyc files in the source directory. Ensure that with the proper switch, compileall leaves legacy pyc files, and no __pycache__ directory. Verify the __pycache__ directory contents. Bug 8527 reported that multiple calls produced empty __pycache__/__pycache__ directories. Verify the __pycache__ directory contents. Call compileall again. timestamp invalidation test set atime/mtime backward to avoid file timestamp resolution issues without force, no recompilation now force it. | 1,465 | en | 0.782112 |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 22:41:54 2020
@author: mahjaf
Automatic sleep scoring implemented for Zmax headband.
"""
#%% Reading EDF section
#####===================== Importiung libraries =========================#####
import mne
import numpy as np
from numpy import loadtxt
import h5py
import time
import os
from ssccoorriinngg import ssccoorriinngg
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import confusion_matrix, make_scorer, accuracy_score, precision_score, recall_score, f1_score, classification_report
import pandas as pd
import tensorflow as tf
from scipy import signal
#####==================== Defining required paths r=======================#####
Main_path = "P:/3013080.01/"
subject_Id_folder = Main_path + "Autoscoring/ssccoorriinngg/"
Data_folder = Main_path + "Zmax_Data/"
Hypnogram_folder = Main_path + "somno_scorings/Rathiga/"
#####===================== Reading EDF data files=========================#####
subject_ids = loadtxt(subject_Id_folder+"Zmax/Subject_ids_excluding 22_2.txt", dtype = 'str',delimiter='\n')
#####============= create an object of ssccoorriinngg class ==============#####
Object = ssccoorriinngg(filename='', channel='', fs = 256, T = 30)
#%% Load featureset and labels
path = "P:/3013080.01/Autoscoring/features/"
filename = "Zmax_Rathiga_scorings_ch-ch2+AccFeats_190620"
subjects_dic, hyp_dic = Object.load_dictionary(path, filename)
#%% ================================Training part==============================
# Training perentage
train_size = .7
n_train = round(train_size * len(subject_ids))
#######=== Randomly shuffle subjects to choose train and test splits ===#######
subject_ids = np.random.RandomState(seed=0).permutation(subject_ids)
#######=============== Initialize train and test arrays ================#######
sample_subject = "subjectP_12_night1_scoring.csv.spisop.new - Copy"
sample_hyp = "hypP_12_night1_scoring.csv.spisop.new - Copy"
X_train = np.empty((0, np.shape(subjects_dic[sample_subject])[1]))
X_test = np.empty((0, np.shape(subjects_dic[sample_subject])[1]))
y_train = np.empty((0, np.shape(hyp_dic[sample_hyp])[1]))
y_test = np.empty((0, np.shape(hyp_dic[sample_hyp])[1]))
########======= Picking the train subjetcs and concatenate them =======########
tic = time.time()
train_subjects_list = ["P_12_night1_scoring.csv.spisop.new - Copy",
"P_13_night2_scoring.csv.spisop.new - Copy",
"P_15_night2_scoring.csv.spisop.new - Copy",
"P_16_night1_scoring.csv.spisop.new - Copy",
"P_18_night1_scoring.csv.spisop.new - Copy",
"P_20_night1_scoring.csv.spisop.new - Copy",
"P_21_night1_scoring.csv.spisop.new - Copy",
"P_23_night1_scoring.csv.spisop.new - Copy"]
for c_subj in train_subjects_list:
# train hypnogram
str_train_hyp = 'hyp' + str(c_subj)
# train featureset
str_train_feat = 'subject' + str(c_subj)
# create template arrays for featurs and label
tmp_x = subjects_dic[str_train_feat]
tmp_y = hyp_dic[str_train_hyp]
# Concatenate features and labels
X_train = np.row_stack((X_train, tmp_x))
y_train = np.row_stack((y_train, tmp_y))
del tmp_x, tmp_y
print('Training set was successfully created in : {} secs'.format(time.time()-tic))
#%% ================================Test part==============================%%#
########======== Picking the test subjetcs and concatenate them =======########
tic = time.time()
test_subjects_list = []
tst_subj_list = ["P_12_night2_scoring.csv.spisop.new - Copy",
"P_12_night3_scoring.csv.spisop.new - Copy",
"P_13_night3_scoring.csv.spisop.new - Copy",
"P_14_night3_scoring.csv.spisop.new - Copy",
"P_15_night3_scoring.csv.spisop.new - Copy",
"P_16_night3_scoring.csv.spisop.new - Copy",
"P_18_night2_scoring.csv.spisop.new - Copy",
"P_18_night3_scoring.csv.spisop.new - Copy",
"P_20_night2_scoring.csv.spisop.new - Copy",
"P_20_night3_scoring.csv.spisop.new - Copy",
"P_21_night2_scoring.csv.spisop.new - Copy",
"P_21_night3_scoring.csv.spisop.new - Copy"]
for c_subj in tst_subj_list:
# test hypnogram
str_test_hyp = 'hyp' + str(c_subj)
# test featureset
str_test_feat = 'subject' + str(c_subj)
# create template arrays for featurs and label
tmp_x = subjects_dic[str_test_feat]
tmp_y = hyp_dic[str_test_hyp]
# Concatenate features and labels
X_test = np.row_stack((X_test, tmp_x))
y_test = np.row_stack((y_test, tmp_y))
# keep the subject id
test_subjects_list.append(str_test_feat)
# remove for next iteration
del tmp_x, tmp_y, str_test_feat, str_test_hyp
print('Test set was successfully created in : {} secs'.format(time.time()-tic))
print(f'Raw train and test data were created.')
########================== Replace any probable NaN ===================########
X_train = Object.replace_NaN_with_mean(X_train)
X_test = Object.replace_NaN_with_mean(X_test)
########================== Replace any probable inf ===================########
X_train = Object.replace_inf_with_mean(X_train)
X_test = Object.replace_inf_with_mean(X_test)
########==================== Z-score of features ======================########
X_train, X_test = Object.Standardadize_features(X_train, X_test)
########========== select features only on first iteration ============########
td = 5 # Time dependence: number of epochs of memory
X_train_td = Object.add_time_dependence_backward(X_train, n_time_dependence=td,
padding_type = 'sequential')
X_test_td = Object.add_time_dependence_backward(X_test, n_time_dependence=td,
padding_type = 'sequential')
########====================== Feature Selection ======================########
y_train_td = Object.binary_to_single_column_label(y_train)
########========== select features only on first iteration ============########
# =============================================================================
# ranks, Feat_selected, selected_feats_ind = Object.FeatSelect_Boruta(X_train_td,
# y_train_td[:,0], max_iter = 50, max_depth = 7)
#
# #######===================== Save selected feats =======================#######
#
# path = "P:/3013080.01/Autoscoring/features/"
# filename = "Selected_Features_BoturaNoTimeDependency_5_Backward_Zmax_ch1-ch2+Acc_200620"
# with open(path+filename+'.pickle',"wb") as f:
# pickle.dump(selected_feats_ind, f)
# =============================================================================
########################### Load selected feats ###############################
path = "P:/3013080.01/Autoscoring/features/"
filename = "Selected_Features_BoturaAfterTD=5_Backward_Zmax_ch1-ch2+Acc_200620"
#filename = "sleep_scoring_NoArousal_8channels_selected_feats_NEW"
with open(path + filename + '.pickle', "rb") as f:
selected_feats_ind = pickle.load(f)
########=================== Apply selected features ===================########
X_train = X_train_td[:, selected_feats_ind]
X_test = X_test_td[:, selected_feats_ind]
########============== Define classifier of interest ==================########
y_pred = Object.XGB_Modelling(X_train, y_train,X_test, y_test, n_estimators = 500)
#y_pred = Object.KernelSVM_Modelling(X_train, y_train,X_test, y_test, kernel='rbf')
y_pred = Object.ANN_classifier(X_train, y_train, X_test, units_h1=600, units_h2 = 300, units_output = 5,
activation_out = 'softmax',
init = 'uniform', activation = 'relu', optimizer = 'adam',
loss = 'categorical_crossentropy', metrics=[tf.keras.metrics.Recall()],
h3_status = 'deactive', units_h3 = 50, epochs = 100, batch_size = 100)
########===== Metrics to assess the model performance on test data ====########
Acc, Recall, prec, f1_sc, kappa, mcm= Object.multi_label_confusion_matrix(y_test, y_pred)
########================= Creating subjective outputs =================########
Object.create_subjecive_results(y_true=y_test, y_pred=y_pred,
test_subjects_list = test_subjects_list,
subjects_data_dic = subjects_dic,
fname_save = "results")
########============= find number of epochs per stage =================########
Object.find_number_of_samples_per_class(y_test, including_artefact = False)
########================== Comparative hypnogram ======================########
hyp_true = Object.binary_to_single_column_label(y_test)
Object.plot_comparative_hyp(hyp_true = hyp_true, hyp_pred = y_pred, mark_REM = 'active')
########==================== Plot subjectve hypnos ====================########
Object.plot_subjective_hypno(y_true=y_test, y_pred=y_pred,
test_subjects_list=test_subjects_list,
subjects_data_dic=subjects_dic,
save_fig = False,
directory="P:/3013080.01/Autoscoring/ssccoorriinngg/")
########=================== Plot overall conf-mat =======================######
Object.plot_confusion_matrix(y_test,y_pred, target_names = ['Wake','N1','N2','SWS','REM'],
title='Confusion matrix of ssccoorriinngg algorithm',
cmap = None,
normalize=True)
########================== Plot subjective conf-mat ==================########
Object.plot_confusion_mat_subjective(y_true=y_test, y_pred=y_pred,
test_subjects_list=test_subjects_list,
subjects_data_dic=subjects_dic)
########========================== Save figure =======================#########
Object.save_figure(saving_format = '.png',
directory="P:/3013080.02/Mahdad/Github/ssccoorriinngg/",
saving_name = 'test_subject_all' + str(c_subj), dpi = 900,
full_screen = False)
| Zmax_autoscoring_controlled_train_test_split.py | 10,657 | Created on Sat Jun 6 22:41:54 2020
@author: mahjaf
Automatic sleep scoring implemented for Zmax headband.
-*- coding: utf-8 -*-%% Reading EDF section===================== Importiung libraries ============================================= Defining required paths r============================================ Reading EDF data files====================================== create an object of ssccoorriinngg class ==============%% Load featureset and labels%% ================================Training part============================== Training perentage=== Randomly shuffle subjects to choose train and test splits ================== Initialize train and test arrays ======================= Picking the train subjetcs and concatenate them ======= train hypnogram train featureset create template arrays for featurs and label Concatenate features and labels%% ================================Test part==============================%%======== Picking the test subjetcs and concatenate them ======= test hypnogram test featureset create template arrays for featurs and label Concatenate features and labels keep the subject id remove for next iteration================== Replace any probable NaN ===================================== Replace any probable inf ======================================= Z-score of features ================================ select features only on first iteration ============ Time dependence: number of epochs of memory====================== Feature Selection ================================ select features only on first iteration ============ ============================================================================= ranks, Feat_selected, selected_feats_ind = Object.FeatSelect_Boruta(X_train_td, y_train_td[:,0], max_iter = 50, max_depth = 7) ===================== Save selected feats ======================= path = "P:/3013080.01/Autoscoring/features/" filename = "Selected_Features_BoturaNoTimeDependency_5_Backward_Zmax_ch1-ch2+Acc_200620" with open(path+filename+'.pickle',"wb") as f: pickle.dump(selected_feats_ind, f) ============================================================================= Load selected feats filename = "sleep_scoring_NoArousal_8channels_selected_feats_NEW"=================== Apply selected features ================================= Define classifier of interest ==================y_pred = Object.KernelSVM_Modelling(X_train, y_train,X_test, y_test, kernel='rbf')===== Metrics to assess the model performance on test data ===================== Creating subjective outputs ============================== find number of epochs per stage =================================== Comparative hypnogram ========================================== Plot subjectve hypnos ======================================= Plot overall conf-mat ========================================= Plot subjective conf-mat ============================================ Save figure ======================= | 3,025 | en | 0.592874 |
import warnings
from mlprimitives.utils import import_object
_RESAMPLE_AGGS = [
'mean',
'median',
'prod',
'quantile',
'std',
'sum',
'var',
]
def resample(df, rule, on=None, groupby=(), aggregation='mean',
reset_index=True, time_index=None):
"""pd.DataFrame.resample adapter.
Call the `df.resample` method on the given time_index
and afterwards call the indicated aggregation.
Optionally group the dataframe by the indicated columns before
performing the resampling.
If groupby option is used, the result is a multi-index datagrame.
Args:
df (pandas.DataFrame):
DataFrame to resample.
rule (str or int):
The offset string or object representing target conversion or an
integer value that will be interpreted as the number of seconds.
on (str or None):
Name of the column to use as the time index. If ``None`` is given, the
DataFrame index is used.
groupby (list):
Optional list of columns to group by.
aggregation (callable or str):
Function or name of the function to use for the aggregation. If a name is given, it
can either be one of the standard pandas aggregation functions or the fully qualified
name of a python function that will be imported and used.
reset_index (bool):
Whether to reset the index after aggregating
time_index (str or None):
Deprecated: This has been renamed to `on`.
Name of the column to use as the time index. If ``None`` is given, the
DataFrame is index is used.
Returns:
pandas.Dataframe:
resampled dataframe
"""
if on is None and time_index is not None:
message = (
'resample `time_series` argument deprecated and will be removed'
' in future versions of MLPrimitives. Please use `on` instead.'
)
warnings.warn(message, DeprecationWarning, stacklevel=2)
on = time_index
if groupby:
df = df.groupby(groupby)
if isinstance(rule, int):
rule = '{}s'.format(rule)
dtir = df.resample(rule, on=on)
if not callable(aggregation) and aggregation not in _RESAMPLE_AGGS:
try:
aggregation = import_object(aggregation)
except (AttributeError, ImportError, ValueError):
pass
df = dtir.aggregate(aggregation)
for name in df.index.names:
if name in df:
del df[name]
if reset_index:
df.reset_index(inplace=True)
return df
def _join_names(names):
"""Join the names of a multi-level index with an underscore."""
levels = (str(name) for name in names if name != '')
return '_'.join(levels)
def unstack(df, level=-1, reset_index=True):
"""pd.DataFrame.unstack adapter.
Call the `df.unstack` method using the indicated level and afterwards
join the column names using an underscore.
Args:
df (pandas.DataFrame): DataFrame to unstack.
level (str, int or list): Level(s) of index to unstack, can pass level name
reset_index (bool): Whether to reset the index after unstacking
Returns:
pandas.Dataframe: unstacked dataframe
"""
df = df.unstack(level=level)
if reset_index:
df = df.reset_index()
df.columns = df.columns.map(_join_names)
return df
| mlprimitives/adapters/pandas.py | 3,458 | Join the names of a multi-level index with an underscore.
pd.DataFrame.resample adapter.
Call the `df.resample` method on the given time_index
and afterwards call the indicated aggregation.
Optionally group the dataframe by the indicated columns before
performing the resampling.
If groupby option is used, the result is a multi-index datagrame.
Args:
df (pandas.DataFrame):
DataFrame to resample.
rule (str or int):
The offset string or object representing target conversion or an
integer value that will be interpreted as the number of seconds.
on (str or None):
Name of the column to use as the time index. If ``None`` is given, the
DataFrame index is used.
groupby (list):
Optional list of columns to group by.
aggregation (callable or str):
Function or name of the function to use for the aggregation. If a name is given, it
can either be one of the standard pandas aggregation functions or the fully qualified
name of a python function that will be imported and used.
reset_index (bool):
Whether to reset the index after aggregating
time_index (str or None):
Deprecated: This has been renamed to `on`.
Name of the column to use as the time index. If ``None`` is given, the
DataFrame is index is used.
Returns:
pandas.Dataframe:
resampled dataframe
pd.DataFrame.unstack adapter.
Call the `df.unstack` method using the indicated level and afterwards
join the column names using an underscore.
Args:
df (pandas.DataFrame): DataFrame to unstack.
level (str, int or list): Level(s) of index to unstack, can pass level name
reset_index (bool): Whether to reset the index after unstacking
Returns:
pandas.Dataframe: unstacked dataframe | 1,802 | en | 0.7267 |
"""Rectify function"""
import torch
from torch.autograd import Function
from encoding import cpu
if torch.cuda.device_count() > 0:
from encoding import gpu
__all__ = ['rectify']
class _rectify(Function):
@staticmethod
def forward(ctx, y, x, kernel_size, stride, padding, dilation, average):
ctx.save_for_backward(x)
# assuming kernel_size is 3
kernel_size = [k + 2 * (d - 1) for k,d in zip(kernel_size, dilation)]
ctx.kernel_size = kernel_size
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.average = average
if x.is_cuda:
gpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
else:
cpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
ctx.mark_dirty(y)
return y
@staticmethod
def backward(ctx, grad_y):
x, = ctx.saved_variables
if x.is_cuda:
gpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
else:
cpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
ctx.mark_dirty(grad_y)
return grad_y, None, None, None, None, None, None
rectify = _rectify.apply
| encoding/functions/rectify.py | 1,377 | Rectify function
assuming kernel_size is 3 | 44 | en | 0.565457 |
from pyspark.sql import SparkSession
def get_spark():
return (SparkSession.builder
.appName("simpleapp")
.master("local")
.getOrCreate())
from pyspark import SparkConf, SparkContext
import sys
def main(sc, args):
print("SimpleApp Arguments")
for x in args:
print x
simple_data = [
("Group A", "Section 1", 50),
("Group B", "Section 2", 75),
("Group A", "Section 1", 25),
("Group C", "section 2", 75)
]
simple_df = get_spark().createDataFrame(
simple_data,
["Group", "Section", "Amount"]
)
simple_df.show()
if __name__ == "__main__":
# Configure Spark
sc = get_spark()
# Execute Main functionality
main(sc, sys.argv)
| src/test/resources/simpleapp.py | 773 | Configure Spark Execute Main functionality | 42 | en | 0.627353 |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from itertools import product
from sklearn.preprocessing import LabelEncoder
# =============================================================================
# The lines where we processed our data
# =============================================================================
def lag_feature(df, lags, col):
tmp = df[['date_block_num','shop_id','item_id',col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]
shifted['date_block_num'] += i
df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')
return df
items = pd.read_csv(r'dataset\items.csv')
shops = pd.read_csv(r'dataset\shops.csv')
cats = pd.read_csv(r'dataset\item_categories.csv')
train = pd.read_csv(r'dataset\sales_train.csv')
test = pd.read_csv(r'dataset\test.csv').set_index('ID')
train = train[train.item_price<100000]
train = train[train.item_cnt_day<1001]
median = train[(train.shop_id==32)&(train.item_id==2973)&(train.date_block_num==4)&(train.item_price>0)].item_price.median()
train.loc[train.item_price<0, 'item_price'] = median
train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58
train.loc[train.shop_id == 10, 'shop_id'] = 11
test.loc[test.shop_id == 10, 'shop_id'] = 11
shops['shop_name'] = shops['shop_name'].apply(lambda x: x.lower()).str.replace('[^\w\s]', '').str.replace('\d+','').str.strip()
shops['city'] = shops['shop_name'].str.partition(' ')[0]
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
shops['shop_type'] = shops['shop_name'].apply(lambda x: 'ÐŒÑÑÑ' if 'ÐŒÑÑÑ' in x else 'ÑÑÑ' if 'ÑÑÑ' in x else 'ÑÑк' if 'ÑÑк' in x else 'ÑÑ' if 'ÑÑ' in x else 'Ñк' if 'Ñк' in x else 'NO_DATA')
shops['shop_type'] = LabelEncoder().fit_transform(shops['shop_type'])
shops = shops[['shop_id','city_code','shop_type']]
cats['split'] = cats['item_category_name'].str.split('-')
cats['type'] = cats['split'].map(lambda x: x[0].strip())
cats['type_code'] = LabelEncoder().fit_transform(cats['type'])
# if subtype is nan then type
cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x) > 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id','type_code', 'subtype_code']]
items.drop(['item_name'], axis=1, inplace=True)
matrix = []
cols = ['date_block_num','shop_id','item_id']
for i in range(34):
sales = train[train.date_block_num==i]
matrix.append(np.array(list(product([i], sales.shop_id.unique(), sales.item_id.unique())), dtype='int16'))
matrix = pd.DataFrame(np.vstack(matrix), columns=cols)
matrix['date_block_num'] = matrix['date_block_num'].astype(np.int8)
matrix['shop_id'] = matrix['shop_id'].astype(np.int8)
matrix['item_id'] = matrix['item_id'].astype(np.int16)
matrix.sort_values(cols,inplace=True)
train['revenue'] = train['item_price'] * train['item_cnt_day']
item_price_lag = train.groupby(['date_block_num','item_id']).agg({'item_price':['mean']})
item_price_lag.columns = ['average_item_price']
item_price_by_shop_lag = train.groupby(['date_block_num','shop_id', 'item_id']).agg({'item_price':['mean']})
item_price_by_shop_lag.columns = ['average_item_price_by_shop']
group = train.groupby(['date_block_num','shop_id','item_id']).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] = (matrix['item_cnt_month'].fillna(0).clip(0,20).astype(np.float16))
test['date_block_num'] = 34
test['date_block_num'] = test['date_block_num'].astype(np.int8)
test['shop_id'] = test['shop_id'].astype(np.int8)
test['item_id'] = test['item_id'].astype(np.int16)
matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)
matrix.fillna(0, inplace=True) # 34 month
matrix = pd.merge(matrix, item_price_lag, on=['date_block_num','item_id'], how='left')
matrix['average_item_price'] = matrix['average_item_price'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'average_item_price')
matrix.drop(['average_item_price'], axis=1, inplace=True)
matrix = pd.merge(matrix, item_price_by_shop_lag, on=['date_block_num','shop_id','item_id'], how='left')
matrix['average_item_price_by_shop'] = matrix['average_item_price_by_shop'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'average_item_price_by_shop')
matrix.drop(['average_item_price_by_shop'], axis=1, inplace=True)
matrix = pd.merge(matrix, shops, on=['shop_id'], how='left')
matrix = pd.merge(matrix, items, on=['item_id'], how='left')
matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')
matrix['city_code'] = matrix['city_code'].astype(np.int8)
matrix['shop_type'] = matrix['shop_type'].astype(np.int8)
matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)
matrix['type_code'] = matrix['type_code'].astype(np.int8)
matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)
shop_mean = matrix.groupby(['shop_id']).agg({'item_cnt_month': ['mean']})
shop_mean.columns = ['shop_mean']
shop_mean.reset_index(inplace=True)
shop_item_mean = matrix.groupby(['item_id','shop_id']).agg({'item_cnt_month': ['mean']})
shop_item_mean.columns = ['shop_item_mean']
shop_item_mean.reset_index(inplace=True)
group = matrix.groupby(['date_block_num', 'item_id']).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, shop_mean, on=['shop_id'], how='left')
matrix = pd.merge(matrix, shop_item_mean, on=['item_id','shop_id'], how='left')
matrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')
matrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3], 'date_item_avg_item_cnt')
matrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)
matrix = lag_feature(matrix, [1,2,3], 'item_cnt_month')
matrix_last = matrix[matrix.date_block_num > 2]
def fill_na(df):
for col in df.columns:
if ('_lag_' in col) & (df[col].isnull().any()):
if ('item_cnt' in col):
df[col].fillna(0, inplace=True)
if ('shop_mean' in col):
df[col].fillna(0, inplace=True)
if ('average_item_price' in col):
df[col].fillna(0, inplace=True)
return df
matrix = fill_na(matrix_last)
matrix_last.to_pickle('dataset/traintest.pkl')
# =============================================================================
# correlation Matrix
# =============================================================================
cor_data = matrix_last[['shop_item_mean','date_block_num','date_item_avg_item_cnt_lag_1','item_category_id','average_item_price_lag_2','average_item_price_lag_1','item_cnt_month_lag_1','item_cnt_month']]
corr = cor_data.corr()
mask = np.zeros_like(corr, dtype=np.bool)
f,ax = plt.subplots(figsize=(15, 20))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5},annot=True)
plt.savefig('outputdata/correlation.png')
| preprocessing.py | 7,598 | ============================================================================= The lines where we processed our data ============================================================================= if subtype is nan then type 34 month ============================================================================= correlation Matrix ============================================================================= | 405 | en | 0.385731 |
from nepc import nepc
from nepc.util import util
import pandas as pd
import os
import pytest
import platform
# TODO: remove dependence on csv; put function in scraper that uses built-in
# readlines function
import csv
# TODO: test that all values in [nepc]/tests/data are in the nepc database
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_states_table_has_species_metadata(data_config, nepc_connect):
"""
check that the states table has a species_id column
"""
NEPC_DATA = data_config[0]
number_of_states = util.wc_fxn(NEPC_DATA + 'states.tsv') - 1
df_states = nepc.table_as_df(nepc_connect[1], 'states')
assert len(df_states) == number_of_states
assert 'species_id' in list(df_states.columns)
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_csdata_lines(data_config, nepc_connect):
DIR_NAMES = data_config[1]
cs_lines = 0
for directoryname in DIR_NAMES:
directory = os.fsencode(directoryname)
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".met") or filename.endswith(".mod"):
continue
else:
# subtract 1 to account for header
cs_lines += util.wc_fxn(directoryname + filename) - 1
assert cs_lines == nepc.count_table_rows(nepc_connect[1], "csdata")
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_data_entered(data_config, nepc_connect, local):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
dat_file = row['filename']
df = pd.read_csv(NEPC_DATA + dat_file + '.dat', delimiter='\t',
usecols=['e_energy', 'sigma'])
e_energy, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
# assert e_energy == pytest.approx(df['e_energy'].tolist())
assert sigma == pytest.approx(df['sigma'].tolist())
@pytest.mark.usefixtures("data_config", "nepc_connect")
def test_meta_entered(data_config, nepc_connect, local, dbug):
NEPC_DATA = data_config[0]
if local is False or platform.node() == 'ppdadamsonlinux':
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_prod.tsv',
delimiter='\t')
else:
cs_dat_files = pd.read_csv(NEPC_DATA + 'cs_datfile_local.tsv',
delimiter='\t')
for index, row in cs_dat_files.iterrows():
cs_id = row['cs_id']
met_file = row['filename']
if dbug:
print(cs_id, met_file)
e, sigma = nepc.cs_e_sigma(nepc_connect[1], cs_id)
meta_cols = ['cs_id', 'process', 'units_e',
'units_sigma', 'ref', 'lhsA',
'lhsB', 'rhsA', 'rhsB', 'threshold', 'wavelength',
'lhs_v', 'rhs_v', 'lhs_j', 'rhs_j',
'background', 'lpu', 'upu']
with open(NEPC_DATA + met_file + ".met", 'r', newline='') as f:
reader = csv.reader(f, delimiter='\t')
next(reader)
meta_disk = list(reader)[0]
meta_disk = [meta_disk[i] for i in list(range(len(meta_cols)))]
for i in [0, 11, 12, 13, 14]:
meta_disk[i] = (int(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
for i in [2, 3, 9, 10, 16, 17]:
meta_disk[i] = (float(meta_disk[i]) if meta_disk[i] != '\\N'
else meta_disk[i])
meta_db = [nepc.cs_metadata(nepc_connect[1], cs_id)[i]
for i in list(range(0, len(meta_cols)))]
if dbug:
print('meta_db: {}\t from {}'.format(meta_db, met_file))
for i in range(len(meta_cols)):
if dbug:
print('meta_db[{}]: {}\t from {}'.format(str(i), str(meta_db[i]), met_file))
if (type(meta_db[i]) is float):
assert (pytest.approx(meta_disk[i]) ==
pytest.approx(meta_db[i]))
elif meta_db[i] is None:
assert meta_disk[i] == '\\N'
else:
assert meta_disk[i] == meta_db[i]
| tests/test_mysql_build.py | 4,509 | check that the states table has a species_id column
TODO: remove dependence on csv; put function in scraper that uses built-in readlines function TODO: test that all values in [nepc]/tests/data are in the nepc database subtract 1 to account for header assert e_energy == pytest.approx(df['e_energy'].tolist()) | 317 | en | 0.611032 |
#CODE1---For preparing the list of DRUG side-effect relation from SIDER database---
#Python 3.6.5 |Anaconda, Inc.
import sys
import glob
import errno
import csv
path = '/home/16AT72P01/Excelra/SIDER1/output/adverse_effects.tsv'
files = glob.glob(path)
unique_sideeffect = set()
unique_drug = set()
unique_pair = set()
with open(path) as f1:
reader = csv.DictReader(f1, quotechar='"', delimiter='\t', quoting=csv.QUOTE_ALL, skipinitialspace=True)
print(reader)
for row in reader:
unique_drug.add(row['drug_name'])
unique_sideeffect.add(row['adverse_effect'])
val = row['drug_name']+"|"+row['adverse_effect']
unique_pair.add(val)
f1.close()
print(len(unique_drug))
print(len(unique_sideeffect))
print(len(unique_pair))
| src/dictionaryCode/other/siderData1.py | 741 | CODE1---For preparing the list of DRUG side-effect relation from SIDER database---Python 3.6.5 |Anaconda, Inc. | 110 | en | 0.604889 |
"""
ASGI config for apiproject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'apiproject.settings')
application = get_asgi_application()
| apiproject/apiproject/asgi.py | 397 | ASGI config for apiproject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ | 216 | en | 0.733707 |
"""
This module provides an interface for reading and writing to a HackPSU RaspberryPi Scanner config file
Methods:
getProperty(configFile, prop)
Get a property from a config file by reading the config file until the desired property is found
setProperty(configFile, prop, value)
Set a property by updating the config file (requries a total rewrite of the config file)
getProperties(configFile)
Read all properties into a dictionary, which is returned to the user
setProperties(configFile, dict)
Overwrite the configFile with a new configFile generated from the dictionary provided
"""
def getProperties(configFile):
"""
dictionary getProperties(str)
This funciton reads the entire config file and builds a dictionary from the config file
Args:
configFile: The configuration file to read from
Returns:
dictionary: A list of key value pairs from the config file
"""
dict = {}
#For each line in the file
with open(configFile) as file:
for line in file:
#Remove leading and trailing whitespace
line = line.strip()
#If the line is a comment, skip
if line.startswith('#'):
continue
#Find the equals sign, if not present, skip the line
loc = line.find('=')
if loc == -1:
continue
#parse out the key and value
key = line[:loc]
value = line[loc+1:]
dict[key] = value
return dict
def setProperties(configFile, dict):
"""
void setProperties (str, dictionary)
This function iterates over the entire dictionary and saves each dictionary entry to the specified config file
Args:
configFile: The file to overwrite with the new configuration
dict: The dictionary to write
"""
#Overwrite the file
#Foreach key in dictionary write a new line
with open(configFile, 'w') as file:
for key in dict:
file.write(key + '=' + dict[key] + '\n')
def getProperty(configFile, prop):
"""
str getProperty(str, str)
This function searches a configFile for a specific property and returns its value
Args:
configFile: The configuration file to open
prop: The property to search for
Returns:
string: The property value if found or None for no value found
"""
retVal = None
#Foreach line in the file
with open(configFile) as file:
for line in file:
#Remove leading and trailing whitespace
line = line.strip()
#Ignore comment lines
if line.startswith('#'):
continue
#If the line is the desired property, parse and return
if line.startswith(prop):
retVal = line.replace(prop, '')
retVal = retVal.strip()
retVal = retVal[1:]
retVal = retVal.lstrip()
break
return retVal
def setProperty(configFile, prop, value):
"""
void setProperty(str, str, str)
This function searches a config file for the specified propery and updates its value if found.
If the specified property is not found, then a new line for the property will be created
Args:
configFile: The configuration file to open and update
prop: The property key to update
value: The new value for the property
"""
written = False
with open(configFile) as inFile:
#Create a temp file to copy into
tmpHandle, outPath = mkstemp()
with fdopen(tmpHandle, 'w') as outFile:
#Foreach line in the original file
for line in inFile:
#If it's the prop line, rewrite the prop line
if line.startswith(prop):
outFile.write(prop + '=' + value + '\n')
written = True
#Otherwise keep the line as is
else:
outFile.write(line)
#If no update was performed, then add a new line for the prop
if not written:
outFile.write(prop + ':' + value + '\n')
#Move from tmp to actual file
remove(configFile)
move(outPath, configFile)
| HackPSUconfig.py | 3,681 | dictionary getProperties(str)
This funciton reads the entire config file and builds a dictionary from the config file
Args:
configFile: The configuration file to read from
Returns:
dictionary: A list of key value pairs from the config file
str getProperty(str, str)
This function searches a configFile for a specific property and returns its value
Args:
configFile: The configuration file to open
prop: The property to search for
Returns:
string: The property value if found or None for no value found
void setProperties (str, dictionary)
This function iterates over the entire dictionary and saves each dictionary entry to the specified config file
Args:
configFile: The file to overwrite with the new configuration
dict: The dictionary to write
void setProperty(str, str, str)
This function searches a config file for the specified propery and updates its value if found.
If the specified property is not found, then a new line for the property will be created
Args:
configFile: The configuration file to open and update
prop: The property key to update
value: The new value for the property
This module provides an interface for reading and writing to a HackPSU RaspberryPi Scanner config file
Methods:
getProperty(configFile, prop)
Get a property from a config file by reading the config file until the desired property is found
setProperty(configFile, prop, value)
Set a property by updating the config file (requries a total rewrite of the config file)
getProperties(configFile)
Read all properties into a dictionary, which is returned to the user
setProperties(configFile, dict)
Overwrite the configFile with a new configFile generated from the dictionary provided
For each line in the fileRemove leading and trailing whitespaceIf the line is a comment, skipFind the equals sign, if not present, skip the lineparse out the key and valueOverwrite the fileForeach key in dictionary write a new lineForeach line in the fileRemove leading and trailing whitespaceIgnore comment linesIf the line is the desired property, parse and returnCreate a temp file to copy intoForeach line in the original file If it's the prop line, rewrite the prop lineOtherwise keep the line as isIf no update was performed, then add a new line for the propMove from tmp to actual file | 2,486 | en | 0.718828 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='rg171195@gmail.com', password='testpass'):
'''Creating sample user'''
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'rg171195@gmail.com'
password = 'Password123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_normalize(self):
"""Testing weather email is in normalize form or not"""
email = "test@XYZ.com"
user = get_user_model().objects.create_user(email, "test123")
self.assertEqual(user.email, email.lower())
def test_email_validation(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_superuser(self):
"""Test for creating super user"""
email = 'rg171195@gmail.com'
password = 'Password123'
user = get_user_model().objects.create_superuser(
email=email,
password=password
)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_tag_str(self):
tag = models.Tag.objects.create(user=sample_user(), name='vegan')
self.assertEqual(str(tag), tag.name)
| app/core/tests/test_models.py | 1,622 | Creating sample user
Test for creating super user
Test creating a new user with an email is successful
Testing weather email is in normalize form or not | 152 | en | 0.945155 |
import copy
import dask
import dask.array as da
from dask.distributed import Client
import datetime
import logging
import math
from multiprocessing.pool import ThreadPool
import numpy as np
from pathlib import Path
from tqdm.auto import tqdm
from typing import Union, TypeVar, Tuple
import xarray as xr
import shutil
import warnings
import zarr
from .utils import infer_chunks
from .readers import DirectoryImageReader
Reader = TypeVar("Reader")
def write_transposed_dataset(
reader: Reader,
outfname: Union[Path, str],
start: datetime.datetime = None,
end: datetime.datetime = None,
chunks: dict = None,
memory: float = 2,
n_threads: int = 4,
zlib: bool = True,
complevel: int = 4,
distributed: Union[bool, Client] = False,
use_dask: bool = True,
):
"""
Creates a stacked and transposed netCDF file from a given reader.
WARNING: very experimental!
Parameters
----------
reader : XarrayImageReaderBase
Reader for the dataset.
outfname : str or Path
Output filename. Must end with ".nc" for netCDF output or with ".zarr"
for zarr output.
start : datetime.datetime, optional
If not given, start at first timestamp in dataset.
end : datetime.datetime, optional
If not given, end at last timestamp in dataset.
chunks : dictionary, optional
The chunk sizes that are used for the transposed file. If none are
given, chunks with a size of 1MB are used for netCDF, and chunks with a
size of 50MB are used for zarr output.
memory : float, optional
The amount of memory to be used for buffering in GB. Default is 2.
Higher is faster.
n_threads : int, optional
The amount of threads to use. Default is 4.
zlib : bool, optional
Whether to use compression when storing the files. Reduces file size,
but strongly increases write time, and maybe also access time. Default
is ``False``.
complevel : int, optional
Compression level to use. Default is 4. Range is from 1 (low) to 9
(high).
distributed : bool or Client, optional
Whether to use the local or the distributed dask scheduler. If a client
for a distributed scheduler is used, this is used instead.
use_dask : bool, optional
Whether to use dask for the transposing. Default is True, but sometimes
(especially with large datasets) this fails. If set to False, the data
is written to an intermediate zarr store.
"""
dask_config = {
"array.slicing.split_large_chunks": False,
}
args = (reader, outfname)
kwargs = {
"start": start,
"end": end,
"memory": memory,
"zlib": zlib,
"complevel": complevel,
"chunks": chunks,
}
if not use_dask:
_transpose_no_dask(*args, **kwargs)
elif isinstance(distributed, Client) or not distributed:
if not distributed:
dask_config.update(
{"scheduler": "threads", "pool": ThreadPool(n_threads)}
)
with dask.config.set(**dask_config):
_transpose(*args, **kwargs)
elif distributed:
with dask.config.set(**dask_config), Client(
n_workers=1,
threads_per_worker=n_threads,
memory_limit=f"{memory}GB",
) as client:
print("Dask dashboard accessible at:", client.dashboard_link)
_transpose(*args, **kwargs)
def _get_intermediate_chunks(array, chunks, new_last_dim, zarr_output, memory):
"""
Calculates chunk sizes for the given array for the intermediate output
files.
Parameters
----------
array : xr.DataArray
Array to rechunk and transpose
chunks : dict or None
Chunks passed to write_transposed_dataset, None if none were given.
new_last_dim : str
Name of the new last dimension, normally "time".
zarr_output : bool
Whether the final file will be a zarr file (True) or a netCDf (False).
memory : float
The amount of memory to be used for buffering in GB.
Returns
-------
tmp_chunks : dict
Chunks to be used for rechunking the array to a temporary file. The
order of keys corresponds to the order of dimensions in the transposed
array.
"""
dtype = array.dtype
dims = dict(zip(array.dims, array.shape))
transposed_shape = [
length for dim, length in dims.items() if dim != new_last_dim
]
transposed_shape.append(dims[new_last_dim])
# If the chunks argument was not given, we have to infer the spatial
# and temporal chunks for the intermediate file.
# The spatial chunks will be set such that for a continuous time
# dimension the chunk size is still reasonable.
if chunks is None:
if zarr_output:
chunksizes = infer_chunks(transposed_shape, 100, dtype)[:-1]
else:
chunksizes = infer_chunks(transposed_shape, 1, dtype)[:-1]
chunks = dict(
zip([dim for dim in dims if dim != new_last_dim], chunksizes)
)
chunks[new_last_dim] = -1
else:
chunks = copy.copy(chunks)
tmp_chunks = {dim: chunks[dim] for dim in dims if dim != new_last_dim}
# figure out temporary chunk sizes based on image size and available memory
size = dtype.itemsize
chunksizes = [size if size != -1 else dims[dim] for dim, size in chunks.items()]
chunksize_MB = np.prod(chunksizes) * size / 1024 ** 2
img_shape = transposed_shape[:-1]
len_time = transposed_shape[-1]
imagesize_GB = np.prod(img_shape) * size / 1024 ** 3
# we need to divide by two, because we need intermediate storage for
# the transposing
stepsize = int(math.floor(memory / imagesize_GB)) // 2
stepsize = min(stepsize, len_time)
tmp_chunks[new_last_dim] = stepsize
tmp_chunks_str = str(tuple(tmp_chunks.values()))
logging.info(
f"write_transposed_dataset: Creating chunks {tmp_chunks_str}"
f" with chunksize {chunksize_MB:.2f} MB"
)
return tmp_chunks
def _transpose(
reader: Reader,
outfname: Union[Path, str],
start: datetime.datetime = None,
end: datetime.datetime = None,
chunks: dict = None,
memory: float = 2,
zlib: bool = True,
complevel: int = 4,
):
zarr_output = str(outfname).endswith(".zarr")
new_last_dim = reader.timename
if isinstance(reader, DirectoryImageReader) and reader.chunks is None:
logging.info(
"You are using DirectoryImageReader without dask. If you run into"
" memory issues or have large datasets to transpose, consider"
" setting use_dask=True in the constructor of DirectoryImageReader."
)
ds = reader.read_block(start, end)
# We process each variable separately and store them as intermediately
# chunked temporary files. The chunk size in time dimension is inferred
# from the given memory.
variable_chunks = {}
variable_intermediate_fnames = {}
for var in reader.varnames:
tmp_outfname = str(outfname) + f".{var}.zarr"
variable_intermediate_fnames[var] = tmp_outfname
if Path(tmp_outfname).exists():
logging.info(
"Skipping generating intermediate file {tmp_outfname}"
" because it exists"
)
continue
tmp_chunks = _get_intermediate_chunks(
ds[var], chunks, new_last_dim, zarr_output, memory
)
# make sure that the time dimension will be continuous in the final
# output
chunks = copy.copy(tmp_chunks)
chunks[new_last_dim] = len(ds[var].time)
variable_chunks[var] = chunks
# now we can rechunk and transpose using xarray
rechunked_transposed = ds[var].chunk(tmp_chunks).transpose(
..., new_last_dim
)
rechunked_transposed.to_dataset().to_zarr(
tmp_outfname, consolidated=True
)
# Now we have to reassemble all variables to a single dataset and write the
# final chunks
variable_ds = []
variable_chunksizes = {}
for var in reader.varnames:
ds = xr.open_zarr(variable_intermediate_fnames[var], consolidated=True)
variable_ds.append(ds)
# for the encoding variable below we need the chunks as tuple in the
# right order, it's easier to get this here were we have easy access to
# the transposed DataArray
transposed_dims = ds[var].dims
variable_chunksizes[var] = tuple(
chunks[dim] for dim in transposed_dims
)
ds = xr.merge(
variable_ds,
compat="override",
join="override",
combine_attrs="override",
)
ds.attrs.update(reader.global_attrs)
encoding = {
var: {
"chunksizes": variable_chunksizes[var],
"zlib": zlib,
"complevel": complevel,
}
for var in reader.varnames
}
if not zarr_output:
ds.to_netcdf(outfname, encoding=encoding)
else:
for var in reader.varnames:
del ds[var].encoding["chunks"]
del ds[var].encoding["preferred_chunks"]
ds[var] = ds[var].chunk(variable_chunksizes[var])
ds.to_zarr(outfname, mode="w", consolidated=True)
for var in reader.varnames:
shutil.rmtree(variable_intermediate_fnames[var])
logging.info("write_transposed_dataset: Finished writing transposed file.")
def _transpose_no_dask(
reader: Reader,
outfname: Union[Path, str],
start: datetime.datetime = None,
end: datetime.datetime = None,
chunks: Tuple = None,
memory: float = 2,
zlib: bool = True,
complevel: int = 4,
):
warnings.warn(
"This is an experimental function and not yet ready for public use!"
)
zarr_output = str(outfname).endswith(".zarr")
new_last_dim = reader.timename
timestamps = reader.tstamps_for_daterange(start, end)
variable_fnames = {}
variable_dims = {}
for varname in reader.varnames:
tmp_outfname = str(outfname) + f".{varname}.zarr"
variable_fnames[varname] = tmp_outfname
# first, get some info about structure of the input file
first_img = reader.read_block(start=timestamps[0], end=timestamps[0])[
varname
]
tmp_chunks = _get_intermediate_chunks(
first_img, chunks, new_last_dim, zarr_output, memory
)
# get new dim names in the correct order
new_dim_names = list(tmp_chunks)
variable_dims[varname] = new_dim_names
# this happens this late because we need to set
# `variable_dims[varname]` in any case
if Path(tmp_outfname).exists():
logging.info(f"{str(tmp_outfname)} already exists, skipping.")
continue
logging.debug(
f"write_transposed_dataset: starting zarr array creation"
f" for {len(timestamps)} timestamps"
)
# get shape of transposed target array
dims = dict(zip(first_img.dims, first_img.shape))
transposed_shape = tuple(dims[dim] for dim in tmp_chunks.keys())
zarr_array = zarr.create(
tuple(new_dim_sizes),
chunks=tuple(size for size in tmp_chunks.values()),
store=tmp_outfname,
overwrite=True,
fill_value=np.nan,
)
logging.debug(f"write_transposed_dataset: Writing {tmp_outfname}")
print(f"Constructing array stack for {varname}:")
pbar = tqdm(range(0, len(timestamps), stepsize))
stepsize = tmp_chunks[new_last_dim]
for start_idx in pbar:
pbar.set_description("Reading")
end_idx = min(start_idx + stepsize - 1, len(timestamps) - 1)
block = reader.read_block(
timestamps[start_idx], timestamps[end_idx]
)[varname]
block = block.transpose(..., new_last_dim)
pbar.set_description("Writing")
zarr_array[..., start_idx : end_idx + 1] = block.values
variable_arrays = {}
encoding = {}
for varname, fname in variable_fnames.items():
logging.debug(f"Reading {str(fname)}")
arr = da.from_zarr(fname)
dims = variable_dims[varname]
metadata = reader.array_attrs[varname]
if chunks is None:
if zarr_output:
chunks = infer_chunks(new_dim_sizes, 100, dtype)
else:
# netCDF chunks should be about 1MB
chunks = infer_chunks(new_dim_sizes, 1, dtype)
encoding[varname] = {
"chunksizes": chunks,
"zlib": zlib,
"complevel": complevel,
}
chunk_dict = dict(zip(dims, chunks))
arr = xr.DataArray(data=arr, dims=dims, attrs=metadata)
arr = arr.chunk(chunk_dict)
arr.encoding = encoding[varname]
# we're writing again to a temporary file, because otherwise the
# dataset creation fails because dask sucks
# arr.to_dataset(name=varname).to_zarr(fname + ".tmp", consolidated=True)
# variable_arrays[varname] = xr.open_zarr(fname + ".tmp", consolidated=True)
variable_arrays[varname] = arr
logging.debug("Reading test image")
test_img = reader.read_block(start=timestamps[0], end=timestamps[0])[
reader.varnames[0]
]
coords = {
c: test_img.coords[c] for c in test_img.coords if c != reader.timename
}
coords[reader.timename] = timestamps
logging.debug("Creating dataset")
ds = xr.Dataset(
variable_arrays,
coords=coords,
)
ds.attrs.update(reader.global_attrs)
logging.info(
f"write_transposed_dataset: Writing combined file to {str(outfname)}"
)
if not zarr_output:
ds.to_netcdf(outfname, encoding=encoding)
else:
ds.to_zarr(outfname, mode="w", consolidated=True)
for fname in variable_fnames.values():
shutil.rmtree(fname)
logging.info("write_transposed_dataset: Finished writing transposed file.")
| src/qa4sm_preprocessing/nc_image_reader/transpose.py | 14,150 | Calculates chunk sizes for the given array for the intermediate output
files.
Parameters
----------
array : xr.DataArray
Array to rechunk and transpose
chunks : dict or None
Chunks passed to write_transposed_dataset, None if none were given.
new_last_dim : str
Name of the new last dimension, normally "time".
zarr_output : bool
Whether the final file will be a zarr file (True) or a netCDf (False).
memory : float
The amount of memory to be used for buffering in GB.
Returns
-------
tmp_chunks : dict
Chunks to be used for rechunking the array to a temporary file. The
order of keys corresponds to the order of dimensions in the transposed
array.
Creates a stacked and transposed netCDF file from a given reader.
WARNING: very experimental!
Parameters
----------
reader : XarrayImageReaderBase
Reader for the dataset.
outfname : str or Path
Output filename. Must end with ".nc" for netCDF output or with ".zarr"
for zarr output.
start : datetime.datetime, optional
If not given, start at first timestamp in dataset.
end : datetime.datetime, optional
If not given, end at last timestamp in dataset.
chunks : dictionary, optional
The chunk sizes that are used for the transposed file. If none are
given, chunks with a size of 1MB are used for netCDF, and chunks with a
size of 50MB are used for zarr output.
memory : float, optional
The amount of memory to be used for buffering in GB. Default is 2.
Higher is faster.
n_threads : int, optional
The amount of threads to use. Default is 4.
zlib : bool, optional
Whether to use compression when storing the files. Reduces file size,
but strongly increases write time, and maybe also access time. Default
is ``False``.
complevel : int, optional
Compression level to use. Default is 4. Range is from 1 (low) to 9
(high).
distributed : bool or Client, optional
Whether to use the local or the distributed dask scheduler. If a client
for a distributed scheduler is used, this is used instead.
use_dask : bool, optional
Whether to use dask for the transposing. Default is True, but sometimes
(especially with large datasets) this fails. If set to False, the data
is written to an intermediate zarr store.
If the chunks argument was not given, we have to infer the spatial and temporal chunks for the intermediate file. The spatial chunks will be set such that for a continuous time dimension the chunk size is still reasonable. figure out temporary chunk sizes based on image size and available memory we need to divide by two, because we need intermediate storage for the transposing We process each variable separately and store them as intermediately chunked temporary files. The chunk size in time dimension is inferred from the given memory. make sure that the time dimension will be continuous in the final output now we can rechunk and transpose using xarray Now we have to reassemble all variables to a single dataset and write the final chunks for the encoding variable below we need the chunks as tuple in the right order, it's easier to get this here were we have easy access to the transposed DataArray first, get some info about structure of the input file get new dim names in the correct order this happens this late because we need to set `variable_dims[varname]` in any case get shape of transposed target array netCDF chunks should be about 1MB we're writing again to a temporary file, because otherwise the dataset creation fails because dask sucks arr.to_dataset(name=varname).to_zarr(fname + ".tmp", consolidated=True) variable_arrays[varname] = xr.open_zarr(fname + ".tmp", consolidated=True) | 3,677 | en | 0.792345 |
import asyncio
from pathlib import Path
from secrets import token_bytes
from typing import Optional
import aiosqlite
import pytest
from clvm_tools import binutils
from ceres.types.blockchain_format.coin import Coin
from ceres.types.blockchain_format.program import Program, SerializedProgram
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.coin_spend import CoinSpend
from ceres.util.db_wrapper import DBWrapper
from ceres.util.ints import uint64
from ceres.wallet.wallet_pool_store import WalletPoolStore
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
def make_child_solution(coin_spend: CoinSpend, new_coin: Optional[Coin] = None) -> CoinSpend:
new_puzzle_hash: bytes32 = token_bytes(32)
solution = "()"
puzzle = f"(q . ((51 0x{new_puzzle_hash.hex()} 1)))"
puzzle_prog = Program.to(binutils.assemble(puzzle))
solution_prog = Program.to(binutils.assemble(solution))
if new_coin is None:
new_coin = coin_spend.additions()[0]
sol: CoinSpend = CoinSpend(
new_coin,
SerializedProgram.from_program(puzzle_prog),
SerializedProgram.from_program(solution_prog),
)
return sol
class TestWalletPoolStore:
@pytest.mark.asyncio
async def test_store(self):
db_filename = Path("wallet_store_test.db")
if db_filename.exists():
db_filename.unlink()
db_connection = await aiosqlite.connect(db_filename)
db_wrapper = DBWrapper(db_connection)
store = await WalletPoolStore.create(db_wrapper)
try:
await db_wrapper.begin_transaction()
coin_0 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_0_alt = Coin(token_bytes(32), token_bytes(32), uint64(12312))
solution_0: CoinSpend = make_child_solution(None, coin_0)
solution_0_alt: CoinSpend = make_child_solution(None, coin_0_alt)
solution_1: CoinSpend = make_child_solution(solution_0)
assert store.get_spends_for_wallet(0) == []
assert store.get_spends_for_wallet(1) == []
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
# Idempotent
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 101)
# Rebuild cache, no longer present
await db_wrapper.rollback_transaction()
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == []
await store.rebuild_cache()
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_1_alt: CoinSpend = make_child_solution(solution_0_alt)
with pytest.raises(ValueError):
await store.add_spend(1, solution_1_alt, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_2: CoinSpend = make_child_solution(solution_1)
await store.add_spend(1, solution_2, 100)
await store.rebuild_cache()
solution_3: CoinSpend = make_child_solution(solution_2)
await store.add_spend(1, solution_3, 100)
solution_4: CoinSpend = make_child_solution(solution_3)
with pytest.raises(ValueError):
await store.add_spend(1, solution_4, 99)
await store.rebuild_cache()
await store.add_spend(1, solution_4, 101)
await store.rebuild_cache()
await store.rollback(101, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
(101, solution_4),
]
await store.rebuild_cache()
await store.rollback(100, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 105)
await store.add_spend(1, solution_4, 105)
solution_5: CoinSpend = make_child_solution(solution_4)
await store.add_spend(1, solution_5, 105)
await store.rollback(99, 1)
assert store.get_spends_for_wallet(1) == []
finally:
await db_connection.close()
db_filename.unlink()
| tests/pools/test_wallet_pool_store.py | 4,789 | Idempotent Rebuild cache, no longer present | 43 | en | 0.67074 |
import time
import torch
import numpy as np
from collections import deque
from graphnas.trainer import Trainer
class Evolution_Trainer(Trainer):
"""
This class implements the Asyncronous Aging Evolution,
proposed by Real et. al. on:
Regularized Evolution for Image Classifier Architecture Search
available on: https://arxiv.org/abs/1802.01548
"""
def __init__(self, args):
super(Evolution_Trainer, self).__init__(args)
self.args = args
self.random_seed = args.random_seed
self.population = deque()
self.accuracies = deque()
self.population_size = args.population_size
self.sample_size = args.sample_size
self.cycles = args.cycles
self.init_time = 0
print('initializing population on evolution_trainer init, maybe not the best strategy')
self.__initialize_population()
def derive_from_population(self):
population = self._construct_action(self.population)
best_score_index, _ = \
self._get_best_individual_accuracy(self.accuracies)
best_structure = self.form_gnn_info(population[best_score_index])
print("[DERIVE] Best Structure:", str(best_structure))
# train from scratch to get the final score
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed_all(self.random_seed)
test_scores_list = []
for i in range(10): # run 10 times to get Mean and Stddev
val_acc, test_acc = self.submodel_manager.evaluate(best_structure)
test_scores_list.append(test_acc)
print("[DERIVE] Best Results: ", best_structure, ": ",
np.mean(test_scores_list),
"+/-", np.std(test_scores_list))
def _mutate_individual(self, indiv):
# Choose a random position on the individual to mutate
position_to_mutate = np.random.randint(len(indiv))
# This position will receive a randomly chosen index
# of the search_spaces's list
# for the action corresponding to that position in the individual
sp_list = self.search_space[self.action_list[position_to_mutate]]
indiv[position_to_mutate] = \
np.random.randint(0, len(sp_list))
return indiv
def _get_best_individual_accuracy(self, accs):
max_acc_index = 0
max_acc = -1
for index, acc in enumerate(accs):
if acc > max_acc:
max_acc = acc
max_acc_index = index
return max_acc_index, max_acc
def __initialize_population(self):
print("\n\n===== Evaluating initial random population =====")
start_initial_population_time = time.time()
while len(self.population) < self.population_size:
# print('adding individual #:', len(population))
individual = self._generate_random_individual()
ind_actions = self._construct_action([individual])
gnn = self.form_gnn_info(ind_actions[0])
_, ind_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
print("individual:", individual, " val_score:", ind_acc)
self.accuracies.append(ind_acc)
self.population.append(individual)
end_initial_pop_time = time.time()
self.init_time = end_initial_pop_time - start_initial_population_time
print("Time elapsed initializing population: " +
str(self.init_time))
print("===== Evaluating initial random population DONE ====")
def train(self):
print("\n\n===== Evolution ====")
start_evolution_time = time.time()
while self.cycles > 0:
sample = [] # list with indexes to population individuals
sample_accs = [] # accuracies of the sampled individuals
while len(sample) < self.sample_size:
candidate = np.random.randint(0, len(self.population))
sample.append(self.population[candidate])
sample_accs.append(self.accuracies[candidate])
# Get best individual on sample to serve as parent
max_sample_acc_index, max_sample_acc = \
self._get_best_individual_accuracy(sample_accs)
parent = sample[max_sample_acc_index]
# print('parent: ', parent)
child = parent.copy()
child = self._mutate_individual(child)
# print('child: ', child)
child_actions = self._construct_action([child])
gnn = self.form_gnn_info(child_actions[0])
_, child_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
# print('child acc: ', child_acc)
print("parent: ", str(parent), " val_score: ", str(max_sample_acc),
"| child: ", str(child), ", val_score: ", str(child_acc))
self.accuracies.append(child_acc)
self.population.append(child)
if self.cycles % self.args.eval_cycle == 0:
self.derive_from_population()
# Remove oldest individual (Aging/Regularized evolution)
self.population.popleft()
self.accuracies.popleft()
print("[POPULATION STATS] Mean/Median/Best: ",
np.mean(self.accuracies),
np.median(self.accuracies),
np.max(self.accuracies))
self.cycles -= 1
end_evolution_time = time.time()
total_evolution_time = end_evolution_time - start_evolution_time
print('Time spent on evolution: ' +
str(total_evolution_time))
print('Total elapsed time: ' +
str(total_evolution_time + self.init_time))
print("===== Evolution DONE ====")
def derive(self, sample_num=None):
self.derive_from_population()
| graphnas/evolution_trainer.py | 5,889 | This class implements the Asyncronous Aging Evolution,
proposed by Real et. al. on:
Regularized Evolution for Image Classifier Architecture Search
available on: https://arxiv.org/abs/1802.01548
train from scratch to get the final score run 10 times to get Mean and Stddev Choose a random position on the individual to mutate This position will receive a randomly chosen index of the search_spaces's list for the action corresponding to that position in the individual print('adding individual :', len(population)) list with indexes to population individuals accuracies of the sampled individuals Get best individual on sample to serve as parent print('parent: ', parent) print('child: ', child) print('child acc: ', child_acc) Remove oldest individual (Aging/Regularized evolution) | 785 | en | 0.817421 |
# Generated by Django 2.1.4 on 2018-12-28 02:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| mysite/blog/migrations/0001_initial.py | 907 | Generated by Django 2.1.4 on 2018-12-28 02:51 | 45 | en | 0.649989 |
# Make SweetPea visible regardless of whether it's been installed.
import sys
sys.path.append("..")
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition
from sweetpea.constraints import no_more_than_k_in_a_row
from sweetpea import fully_cross_block, synthesize_trials_non_uniform, print_experiments
"""
Padmala & Pessoa (2011) design
***********************
factors (levels):
- reward (rewarded, non-rewarded)
- response (left, right)
- response Transition (repetition, switch). Factor dependent on response:
- congruency (congruent, incongruent, neutral)
- congruency Transition (congruent-congruent, congruent-incongruent, congruent-neutral, incongruent-congruent, incongruent-incongruent, incongruent-neutral, neutral-congruent, neutral-incongruent, neutral-neutral)
design:
- counterbalancing reward x response x response_transition x congruency_transition
"""
# DEFINE REWARD, RESPONSE and CONGRUENCY FACTORS
reward = Factor("reward", ["rewarded", "non-rewarded"])
response = Factor("response", ["building", "house"])
congruency = Factor("congruency", ["congruent", "incongruent", "neutral"])
# DEFINE CONGRUENCY TRANSITION FACTOR
def con_con(congruency):
return congruency[0] == "congruent" and congruency[1] == "congruent"
def con_inc(congruency):
return congruency[0] == "congruent" and congruency[1] == "incongruent"
def con_ntr(congruency):
return congruency[0] == "congruent" and congruency[1] == "neutral"
def inc_con(congruency):
return congruency[0] == "incongruent" and congruency[1] == "congruent"
def inc_inc(congruency):
return congruency[0] == "incongruent" and congruency[1] == "incongruent"
def inc_ntr(congruency):
return congruency[0] == "incongruent" and congruency[1] == "neutral"
def ntr_con(congruency):
return congruency[0] == "neutral" and congruency[1] == "congruent"
def ntr_inc(congruency):
return congruency[0] == "neutral" and congruency[1] == "incongruent"
def ntr_ntr(congruency):
return congruency[0] == "neutral" and congruency[1] == "neutral"
congruency_transition = Factor("congruency_transition", [
DerivedLevel("congruent-congruent", Transition(con_con, [congruency])),
DerivedLevel("congruent-incongruent", Transition(con_inc, [congruency])),
DerivedLevel("congruent-neutral", Transition(con_ntr, [congruency])),
DerivedLevel("incongruent-congruent", Transition(inc_con, [congruency])),
DerivedLevel("incongruent-incongruent", Transition(inc_inc, [congruency])),
DerivedLevel("incongruent-neutral", Transition(inc_ntr, [congruency])),
DerivedLevel("neutral-congruent", Transition(ntr_con, [congruency])),
DerivedLevel("neutral-incongruent", Transition(ntr_inc, [congruency])),
DerivedLevel("neutral-neutral", Transition(ntr_ntr, [congruency]))
])
# DEFINE RESPONSE TRANSITION FACTOR
def response_repeat(responses):
return responses[0] == responses[1]
def response_switch(responses):
return not response_repeat(responses)
response_transition = Factor("resp_transition", [
DerivedLevel("repeat", Transition(response_repeat, [response])),
DerivedLevel("switch", Transition(response_switch, [response]))
])
# DEFINE SEQUENCE CONSTRAINTS
constraints = []
# DEFINE EXPERIMENT
design = [congruency, reward, response, congruency_transition, response_transition]
crossing = [reward, response, congruency_transition, response_transition]
block = fully_cross_block(design, crossing, constraints)
# SOLVE
experiments = synthesize_trials_non_uniform(block, 5)
print_experiments(block, experiments)
| example_programs/PadmalaPessoa2011.py | 3,593 | Make SweetPea visible regardless of whether it's been installed. DEFINE REWARD, RESPONSE and CONGRUENCY FACTORS DEFINE CONGRUENCY TRANSITION FACTOR DEFINE RESPONSE TRANSITION FACTOR DEFINE SEQUENCE CONSTRAINTS DEFINE EXPERIMENT SOLVE | 233 | en | 0.822226 |
# coding: utf-8
import pprint
import re
import six
class KeyStatusInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'key_id': 'str',
'key_state': 'str'
}
attribute_map = {
'key_id': 'key_id',
'key_state': 'key_state'
}
def __init__(self, key_id=None, key_state=None):
"""KeyStatusInfo - a model defined in huaweicloud sdk"""
self._key_id = None
self._key_state = None
self.discriminator = None
if key_id is not None:
self.key_id = key_id
if key_state is not None:
self.key_state = key_state
@property
def key_id(self):
"""Gets the key_id of this KeyStatusInfo.
å¯é¥ID
:return: The key_id of this KeyStatusInfo.
:rtype: str
"""
return self._key_id
@key_id.setter
def key_id(self, key_id):
"""Sets the key_id of this KeyStatusInfo.
å¯é¥ID
:param key_id: The key_id of this KeyStatusInfo.
:type: str
"""
self._key_id = key_id
@property
def key_state(self):
"""Gets the key_state of this KeyStatusInfo.
å¯é¥ç¶æïŒ - 2䞺å¯çšç¶æ - 3䞺çŠçšç¶æ - 4䞺计åå é€ç¶æ - 5䞺çåŸ
富å
¥ç¶æ - 7䞺å»ç»ç¶æ
:return: The key_state of this KeyStatusInfo.
:rtype: str
"""
return self._key_state
@key_state.setter
def key_state(self, key_state):
"""Sets the key_state of this KeyStatusInfo.
å¯é¥ç¶æïŒ - 2䞺å¯çšç¶æ - 3䞺çŠçšç¶æ - 4䞺计åå é€ç¶æ - 5䞺çåŸ
富å
¥ç¶æ - 7䞺å»ç»ç¶æ
:param key_state: The key_state of this KeyStatusInfo.
:type: str
"""
self._key_state = key_state
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KeyStatusInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/key_status_info.py | 3,578 | Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Returns true if both objects are equal
KeyStatusInfo - a model defined in huaweicloud sdk
Returns true if both objects are not equal
For `print` and `pprint`
Gets the key_id of this KeyStatusInfo.
å¯é¥ID
:return: The key_id of this KeyStatusInfo.
:rtype: str
Sets the key_id of this KeyStatusInfo.
å¯é¥ID
:param key_id: The key_id of this KeyStatusInfo.
:type: str
Gets the key_state of this KeyStatusInfo.
å¯é¥ç¶æïŒ - 2䞺å¯çšç¶æ - 3䞺çŠçšç¶æ - 4䞺计åå é€ç¶æ - 5䞺çåŸ
富å
¥ç¶æ - 7䞺å»ç»ç¶æ
:return: The key_state of this KeyStatusInfo.
:rtype: str
Sets the key_state of this KeyStatusInfo.
å¯é¥ç¶æïŒ - 2䞺å¯çšç¶æ - 3䞺çŠçšç¶æ - 4䞺计åå é€ç¶æ - 5䞺çåŸ
富å
¥ç¶æ - 7䞺å»ç»ç¶æ
:param key_state: The key_state of this KeyStatusInfo.
:type: str
Returns the model properties as a dict
Returns the string representation of the model
coding: utf-8 | 1,022 | en | 0.500621 |
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0106_operations.py
@Version : v0.1
@Time : 2019-10-29 14:11
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : ãTensorFlowæºåšåŠä¹ 宿æåïŒNick McClureã, Sec0106ïŒP110
@Desc : TensorFlow åºç¡ïŒå£°ææäœ
"""
# common imports
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17ïŒå°äº1.17å°±äžäŒæ¥é
import sklearn
import tensorflow as tf
import winsound
from tensorflow.python.framework import ops
from tools import show_values
# è®Ÿçœ®æ°æ®æŸç€ºçç²Ÿç¡®åºŠäžºå°æ°ç¹å3äœ
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# å©çšéæºç§åïŒä¿è¯éæºæ°æ®ççš³å®æ§ïŒäœ¿åŸæ¯æ¬¡éæºæµè¯çç»æäžæ ·
np.random.seed(42)
# åå§åé»è®€ç计ç®åŸ
ops.reset_default_graph()
# Python â¥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn â¥0.20 is required
assert sklearn.__version__ >= "0.20"
# å±èœèŠåïŒYour CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()
show_values(tf.div(3, 4), "tf.div(3,4) = æŽæ°é€")
show_values(tf.truediv(3, 4), "tf.truediv(3,4) = æµ®ç¹é€")
show_values(tf.floordiv(3.0, 4.0), "tf.floordiv(3.0,4.0) = æµ®ç¹åæŽé€")
show_values(tf.mod(22.0, 5.0), "tf.mod(22.0,5.0) = åæš¡")
# åŒ éç¹ç§¯--Compute the pairwise cross product
# åŒ éç¹ç§¯ïŒå³äž€äžªåéçåä¹ïŒåå«åé积ãå€ç§¯ãå积ïŒåä¹çè¿ç®ç»ææ¯äžäžªåéèäžæ¯äžäžªæ éã
# 䞀䞪åéçç¹ç§¯äžè¿äž€äžªåéç»æçåæ å¹³é¢åçŽã
show_values(tf.cross([1., 0., 0.], [0., 1., 0.]),
"tf.cross([1., 0., 0.], [0., 1., 0.]) = åŒ éç¹ç§¯")
# åŒ éç¹ç§¯å¿
é¡»æ¯äžç»Žç
# show_values(tf.cross([1., 0., 0., 0.], [0., 1., 0., 0.]),
# "tf.cross([1., 0., 0.,0.], [0., 1., 0.,0.]) = åŒ éç¹ç§¯")
# ToSeeïŒP11ïŒæ°åŠåœæ°å衚
show_values(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.)),
"tan(pi/4) = 1 = tf.div(tf.sin(3.1416/4.),tf.cos(3.1416/4.))")
test_nums = range(15)
# What should we get with list comprehension
expected_output = [3 * x * x - x + 10 for x in test_nums]
print('-' * 50)
print("[3 * x ^ 2 - x + 10 for x in test_nums] = ")
print(expected_output)
# èªå®ä¹åœæ°
# 3x^2-x+10,x=11,=>
def custom_polynomial(value):
# return tf.subtract(3 * tf.square(value), value) + 10
return 3 * tf.square(value) - value + 10
show_values(custom_polynomial(11), "custom_polynomial(11) = 3x^2-x+10,x=11=>")
for num in test_nums:
show_values(custom_polynomial(num), "custom_polynomial({})".format(num))
# -----------------------------------------------------------------
# è¿è¡ç»æçæé
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| 01_Introduction/C0106_operations.py | 3,173 | @Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0106_operations.py
@Version : v0.1
@Time : 2019-10-29 14:11
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : ãTensorFlowæºåšåŠä¹ 宿æåïŒNick McClureã, Sec0106ïŒP110
@Desc : TensorFlow åºç¡ïŒå£°ææäœ
-*- encoding: utf-8 -*- common imports pip install numpy<1.17ïŒå°äº1.17å°±äžäŒæ¥é è®Ÿçœ®æ°æ®æŸç€ºçç²Ÿç¡®åºŠäžºå°æ°ç¹å3äœ å©çšéæºç§åïŒä¿è¯éæºæ°æ®ççš³å®æ§ïŒäœ¿åŸæ¯æ¬¡éæºæµè¯çç»æäžæ · åå§åé»è®€ç计ç®åŸ Python â¥3.5 is required Scikit-Learn â¥0.20 is required å±èœèŠåïŒYour CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA Open graph session åŒ éç¹ç§¯--Compute the pairwise cross product åŒ éç¹ç§¯ïŒå³äž€äžªåéçåä¹ïŒåå«åé积ãå€ç§¯ãå积ïŒåä¹çè¿ç®ç»ææ¯äžäžªåéèäžæ¯äžäžªæ éã 䞀䞪åéçç¹ç§¯äžè¿äž€äžªåéç»æçåæ å¹³é¢åçŽã åŒ éç¹ç§¯å¿
é¡»æ¯äžç»Žç show_values(tf.cross([1., 0., 0., 0.], [0., 1., 0., 0.]), "tf.cross([1., 0., 0.,0.], [0., 1., 0.,0.]) = åŒ éç¹ç§¯") ToSeeïŒP11ïŒæ°åŠåœæ°å衚 What should we get with list comprehension èªå®ä¹åœæ° 3x^2-x+10,x=11,=> return tf.subtract(3 * tf.square(value), value) + 10 ----------------------------------------------------------------- è¿è¡ç»æçæé | 1,235 | zh | 0.415768 |
import numpy
# FIXME: copy the functions here
from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp
def sample_gaussian2(means, cv, size, random_state, mins, maxes):
def once(size1):
g = random_state.multivariate_normal(means, cv, size1).T
g = g.reshape(len(means), -1)
mask = (g >= mins[:, None]).all(axis=0)
mask &= (g <= maxes[:, None]).all(axis=0)
return g[:, mask]
g = once(size)
generated = size
while g.shape[1] < size:
fac = 1.0 * g.shape[1] / size
togen = (size - g.shape[1]) * generated // g.shape[1]
g1 = once(togen)
generated = generated + togen
g = numpy.append(g, g1, axis=1)
return g[:, :size]
class GMM(object):
def __init__(self, weights, means, covs, lims):
self.weights = numpy.array(weights)
self.means = numpy.array(means)
self.covs = numpy.array(covs)
self.lims = numpy.array(lims)
[nc] = self.weights.shape
assert self.means.shape[0] == nc
[nc, nf] = self.means.shape
assert self.covs.shape[0] == nc
assert self.covs.shape[1] == nf
assert self.covs.shape[2] == nf
[nc, nf, nf] = self.covs.shape
assert self.lims.shape[0] == nf
assert self.lims.shape[1] == 2
def score(self, X, return_responsibilities=False):
nc = len(self.weights)
X = numpy.array(X)
if X.ndim == 1:
X = X[:, None]
if X.shape[1] != self.means.shape[1]:
raise ValueError('The shape of X is not compatible with self')
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
lpr = numpy.log(self.weights) + \
log_multivariate_normal_density(X,
self.means,
self.covs, 'full')
mask = (X >= mins[None, :]).all(axis=-1)
mask &= (X <= maxes[None, :]).all(axis=-1)
logprob = logsumexp(lpr, axis=1)
logprob[~mask] = -numpy.inf
if return_responsibilities:
responsibilities = numpy.exp(lpr - logprob[:, None])
responsibilities[~mask] = 0
return logprob, responsibilities
return logprob
def marginalize(self, axes):
return GMM(self.weights, self.means[..., axes], self.covs[..., axes][..., axes, :], self.lims[axes])
def sample(self, size, random_state=None):
"""Generate random samples from the model.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = numpy.random
mins = self.lims[:, 0]
maxes = self.lims[:, 1]
X = numpy.empty(size, ('f8', (self.means.shape[1],)))
# decide which component to use for each sample
comps = random_state.choice(len(self.weights), p=self.weights, size=size)
# for each component, generate all needed samples
for comp in range(len(self.weights)):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
cv = self.covs[comp]
g = sample_gaussian2(
self.means[comp], cv,
num_comp_in_X, random_state, mins, maxes).T
X[comp_in_X] = g
return X
@classmethod
def fit(kls, nc, X, lims):
# FIXME: get rid of this and add weights support
from sklearn import mixture
# XXX: Do not use DPGMM because the normalization is buggy
# https://github.com/scikit-learn/scikit-learn/issues/7371
model = mixture.GMM(nc, covariance_type='full', n_iter=1000)
model.fit(X)
if not model.converged_:
raise ValueError("Your data is strange. Gaussian mixture failed to converge")
return kls(model.weights_, model.means_, model.covars_, lims)
class Confidence(object):
def __init__(self, model, confidence_table):
self.model = model
self.confidence_table = confidence_table
def score(self, sc):
x, y = self.confidence_table
return numpy.interp(sc, x, y, left=1., right=0.)
@classmethod
def fit(kls, model, nsample=4*1024, vmin=-5, vmax=0, nb=100):
X = model.sample(nsample)
sc = model.score(X)
confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb)
# FIXME: add weight support here
sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.)
confidence_table = numpy.array([sc_cl, confidence_levels])
return kls(model, confidence_table)
class CombinedModel(object):
def __init__(self, models):
self.models = models
def score(self, X):
return sum([model.score(X) for model in self.models])
def marginalize(self, axes):
return CombinedModel([
model.marginalize(axes) for model in self.models])
def sample(self, nsample, random_state=None):
if random_state is None:
random_state = numpy.random
def once(size):
X = self.models[0].sample(size, random_state)
nf = X.shape[-1]
lnprob = sum([model.score(X) for model in self.models[1:]])
prob = numpy.exp(lnprob)
prob /= prob.max()
keep = random_state.rand(len(X)) < prob
return X[keep].reshape(-1, nf)
g = once(nsample)
ng = nsample
while len(g) < nsample:
togen = (nsample - len(g)) * ng // len(g)
g1 = once(togen)
ng = ng + togen
g = numpy.append(g, g1, axis=0)
return g[:nsample]
| bananas/model.py | 5,790 | Generate random samples from the model.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
FIXME: copy the functions here decide which component to use for each sample for each component, generate all needed samples occurrences of current component in X number of those occurrences FIXME: get rid of this and add weights support XXX: Do not use DPGMM because the normalization is buggy https://github.com/scikit-learn/scikit-learn/issues/7371 FIXME: add weight support here | 506 | en | 0.810332 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.fields
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0020_add_index_on_page_first_published_at'),
('tests', '0013_iconsetting_notyetregisteredsetting_testsetting'),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(unique=True, max_length=80)),
],
),
migrations.CreateModel(
name='BlogCategoryBlogPage',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('category', models.ForeignKey(to='tests.BlogCategory', related_name='+')),
],
),
migrations.CreateModel(
name='ManyToManyBlogPage',
fields=[
(
'page_ptr',
models.OneToOneField(
primary_key=True,
serialize=False,
parent_link=True,
auto_created=True,
to='wagtailcore.Page'
)
),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('adverts', models.ManyToManyField(to='tests.Advert', blank=True)),
(
'blog_categories',
models.ManyToManyField(
to='tests.BlogCategory',
through='tests.BlogCategoryBlogPage',
blank=True
)
),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AddField(
model_name='blogcategoryblogpage',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.ManyToManyBlogPage', related_name='categories'),
),
]
| wagtail/tests/testapp/migrations/0014_m2m_blog_page.py | 2,284 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from pytest import raises, approx
def test():
import pytq_crawlib
pass
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| tests/test_import.py | 277 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (print_function, unicode_literals,
absolute_import, with_statement)
import os
import sys
if __name__ == '__main__':
if __package__ is None:
dir_name = os.path.dirname(__file__)
sys.path.append(
os.path.abspath(
os.path.join(dir_name, '..')))
from excel2mysql.migrate import migrate
migrate()
| excel2mysql/__main__.py | 448 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
import biathlonresults as api
def test_cups():
res = api.cups(1819)
assert isinstance(res, list)
assert len(res) == 37
def test_cup_results():
res = api.cup_results("BT1819SWRLCP__SMTS")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
assert res["Rows"][0]["Name"] == "BOE Johannes Thingnes"
def test_athletes():
res = api.athletes("boe", "johannes")
assert isinstance(res, dict)
assert isinstance(res["Athletes"], list)
assert "boe" in res["Athletes"][0]["FamilyName"].lower()
assert "johannes" in res["Athletes"][0]["GivenName"].lower()
def test_cisbios():
res = api.cisbios("BTNOR11605199301")
assert isinstance(res, dict)
assert res["FullName"] == "Johannes Thingnes BOE"
def test_all_results():
# Raphael Poiree
res = api.all_results("BTFRA10908197401")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert res["Results"][0]["SO"] == 2
assert len(res["Results"]) == 329
def test_events():
res = api.events(1819, 1)
assert isinstance(res, list)
assert len(res) == 10
assert res[0]["Level"] == 1
assert res[-1]["ShortDescription"] == "Oslo Holmenkollen"
def test_competitions():
# Pokljuka 1819
res = api.competitions("BT1819SWRLCP01")
assert isinstance(res, list)
assert len(res) == 8
assert res[-1]["ShortDescription"] == "Women 10km Pursuit"
def test_results():
# Pokljuka 1819 W PU
res = api.results("BT1819SWRLCP01SWPU")
assert isinstance(res, dict)
assert isinstance(res["Results"], list)
assert len(res["Results"]) == 60
assert res["Results"][0]["ResultOrder"] == 1
assert res["Results"][0]["Name"] == "MAKARAINEN Kaisa"
def test_stats():
# podiums men stat
res = api.stats("WCPOD_M", "WCPOD", "ATH", "M")
assert isinstance(res, dict)
assert isinstance(res["Rows"], list)
# in case someone breaks Bjoerndalen's record
assert int(res["Rows"][0]["Value"]) >= 199
| tests/test_api.py | 2,006 | Raphael Poiree Pokljuka 1819 Pokljuka 1819 W PU podiums men stat in case someone breaks Bjoerndalen's record | 108 | en | 0.451887 |
# Copyright (c) Max-Planck-Institut fÃŒr Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
"""
Run a job from hdf5.
"""
from pyiron.base.job.wrapper import job_wrapper_function
def register(parser):
parser.add_argument(
"-d", "--debug", action = "store_true",
help = "enable debug mode" # TODO: what's that mean?
)
parser.add_argument(
"-j", "--job-id",
help = "job id to run"
)
parser.add_argument(
"-p", "--project",
help = "directory where the HDF5 file of the job is located"
)
parser.add_argument(
"-f", "--file-path",
help = "path to the HDF5 file"
)
parser.add_argument(
"-s", "--submit", action = "store_true",
help = "submit to queuing system on remote host"
)
def main(args):
job_wrapper_function(
working_directory=args.project,
job_id=args.job_id,
file_path=args.file_path,
debug=args.debug,
submit_on_remote=args.submit
)
| pyiron/cli/wrapper.py | 1,139 | Run a job from hdf5.
Copyright (c) Max-Planck-Institut fÃŒr Eisenforschung GmbH - Computational Materials Design (CM) Department Distributed under the terms of "New BSD License", see the LICENSE file. TODO: what's that mean? | 225 | en | 0.729952 |
import numpy as np
import wmf
import batched_inv
import batched_inv_mp
import solve_mp
import solve_gpu
np.random.seed(123)
B = np.load("test_matrix.pkl")
S = wmf.log_surplus_confidence_matrix(B, alpha=2.0, epsilon=1e-6)
num_factors = 40 + 1
num_iterations = 1
batch_size = 1000
solve = batched_inv.solve_sequential
# solve = solve_mp.solve_mp
# solve = solve_gpu.solve_gpu
U, V = wmf.factorize(S, num_factors=num_factors, lambda_reg=1e-5, num_iterations=num_iterations, init_std=0.01, verbose=True, dtype='float32',
recompute_factors=batched_inv_mp.recompute_factors_bias_batched_mp, batch_size=batch_size, solve=solve)
| test_batched_inv_mp.py | 633 | solve = solve_mp.solve_mp solve = solve_gpu.solve_gpu | 53 | en | 0.851619 |
"""
ASGI config for cryptocurrency project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cryptocurrency.settings')
application = get_asgi_application()
| cryptocurrency/asgi.py | 405 | ASGI config for cryptocurrency project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ | 220 | en | 0.761994 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-function-args
# pylint: disable=too-many-statements, unused-argument, too-many-arguments
"""Tensorcore template for cuda backend"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from ..util import get_const_tuple, traverse_inline, simplify
from ..nn.pad import pad
from ..nn.util import get_pad_tuple
from .tensor_intrin import intrin_wmma_load_matrix_A
from .tensor_intrin import intrin_wmma_load_matrix_W
from .tensor_intrin import intrin_wmma_store_matrix
from .tensor_intrin import intrin_wmma_gemm
def nhwc_tensorcore_cuda(cfg, Input, Filter, stride, padding, dilation, out_dtype):
"""Compute declaration for tensorcore"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_height, in_width, in_channel = get_const_tuple(Input.shape)
kernel_h, kernel_w, _, num_filter = get_const_tuple(Filter.shape)
assert (batch % 16 == 0 and in_channel % 16 == 0 and num_filter % 16 == 0) or \
(batch % 8 == 0 and in_channel % 16 == 0 and num_filter % 32 == 0) or \
(batch % 32 == 0 and in_channel % 16 == 0 and num_filter % 8 == 0), \
"The shape of (batch, in_channel, num_filter) "\
"must be multiple of (16, 16, 16) or (32, 16, 8) or (8, 16, 32) for now"
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w))
out_channel = num_filter
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
pad_before = [0, pad_top, pad_left, 0]
pad_after = [0, pad_down, pad_right, 0]
PaddedInput = pad(Input, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel), name='rc')
ry = te.reduce_axis((0, kernel_h), name='ry')
rx = te.reduce_axis((0, kernel_w), name='rx')
# convert data type of input feature maps and weights
TransPaddedInput = te.compute(
PaddedInput.shape,
lambda n, h, w, c: PaddedInput[n, h, w, c].astype('float16'))
TransFilter = te.compute(
Filter.shape, lambda h, w, i, o: Filter[h, w, i, o].astype('float16'))
Output = te.compute(
(batch, out_height, out_width, out_channel),
lambda nn, yy, xx, ff: te.sum(
TransPaddedInput[nn, yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w, rc].astype(out_dtype) *
TransFilter[ry, rx, rc, ff].astype(out_dtype), axis=[ry, rx, rc]),
name="Conv2dOutput", tag="conv2d_nhwc_tensorcore")
return Output
def schedule_nhwc_tensorcore_cuda(cfg, s, Conv):
"""Schedule tensorcore template"""
kh, kw, ic = s[Conv].op.reduce_axis
out_dtype = Conv.dtype
trans_paddata, kernel = s[Conv].op.input_tensors
in_dtype = trans_paddata.dtype
batch, _, _, _ = get_const_tuple(Conv.shape)
_, _, _, out_channels = get_const_tuple(kernel.shape)
paddata = s[trans_paddata].op.input_tensors
# inline the pad and dtype transform
s[trans_paddata].compute_inline()
s[kernel].compute_inline()
s[paddata[0]].compute_inline()
# Designate the memory hierarchy
AS = s.cache_read(trans_paddata, 'shared', [Conv])
WS = s.cache_read(kernel, 'shared', [Conv])
AF = s.cache_read(AS, 'wmma.matrix_a', [Conv])
WF = s.cache_read(WS, 'wmma.matrix_b', [Conv])
ConvF = s.cache_write(Conv, 'wmma.accumulator')
if Conv.op in s.outputs:
output = Conv
ConvS = s.cache_read(ConvF, 'shared', [Conv])
OL = ConvS
else:
output = s.outputs[0].output(0)
s[Conv].set_scope('shared')
OL = Conv
# Schedule for autotvm
cfg.define_knob("block_row_warps", [1, 2, 4])
cfg.define_knob("block_col_warps", [1, 2, 4])
cfg.define_knob("warp_row_tiles", [1, 2, 4])
cfg.define_knob("warp_col_tiles", [1, 2, 4])
cfg.define_knob("chunk", [1, 2, 4, 8])
cfg.define_knob("offset", [0, 8])
cfg.define_knob("vector_width", [1, 2, 4, 8])
if (batch % 16 == 0 and out_channels % 16 == 0):
cfg.define_knob("wmma_m", [16, 8, 32])
elif (batch % 8 == 0 and out_channels % 32 == 0):
cfg.define_knob("wmma_m", [8, 16, 32])
elif (batch % 32 == 0 and out_channels % 8 == 0):
cfg.define_knob("wmma_m", [32, 16, 8])
# fallback support
target = tvm.target.Target.current()
if cfg.is_fallback:
ref_log = autotvm.tophub.load_reference_log(
target.target_name, target.model, 'conv2d_nhwc_tensorcore.cuda')
cfg.fallback_with_reference_log(ref_log)
block_row_warps = cfg["block_row_warps"].val
block_col_warps = cfg["block_col_warps"].val
warp_row_tiles = cfg["warp_row_tiles"].val
warp_col_tiles = cfg["warp_col_tiles"].val
chunk = cfg["chunk"].val
offset = cfg["offset"].val
wmma_m = cfg["wmma_m"].val
vector_width = cfg["vector_width"].val
wmma_k = 16
if wmma_m == 16:
wmma_n = 16
elif wmma_m == 8:
wmma_n = 32
elif wmma_m == 32:
wmma_n = 8
warp_size = 32
block_x = te.thread_axis('blockIdx.x')
block_y = te.thread_axis('blockIdx.y')
block_z = te.thread_axis('blockIdx.z')
thread_x = te.thread_axis('threadIdx.x')
thread_y = te.thread_axis('threadIdx.y')
thread_z = te.thread_axis('threadIdx.z')
# Define the intrin strides
def get_strides(extents):
return [np.prod(extents[i:]).tolist() for i in range(len(extents))]
AS_align = chunk * wmma_k + offset
WS_align = warp_col_tiles * block_col_warps * wmma_n + offset
block_factor_n = wmma_m * warp_row_tiles * block_row_warps
block_factor_o = wmma_n * warp_col_tiles * block_col_warps
CS_align = block_factor_o + offset
AS_strides = get_strides([1, 1, AS_align, 1])
AL_strides = get_strides([1, 1, wmma_k, 1])
WS_strides = get_strides([WS_align, 1])
WL_strides = get_strides([wmma_n * warp_col_tiles, 1])
CL_strides = get_strides([1, 1, wmma_n * warp_col_tiles, 1])
CS_strides = get_strides([1, 1, CS_align, 1])
# Schedule for output
nc, hc, wc, oc = output.op.axis
block_k = s[output].fuse(hc, wc)
s[output].bind(block_k, block_z)
block_i, nc = s[output].split(nc, factor=block_factor_n)
block_j, oc = s[output].split(oc, factor=block_factor_o)
s[output].reorder(block_k, block_i, block_j, nc, oc)
t = s[output].fuse(nc, oc)
t, ti = s[output].split(t, factor=vector_width)
t, tx = s[output].split(t, factor=warp_size)
t, ty = s[output].split(t, factor=block_row_warps)
t, tz = s[output].split(t, factor=block_col_warps)
s[output].bind(block_i, block_x)
s[output].bind(block_j, block_y)
s[output].bind(tz, thread_z)
s[output].bind(ty, thread_y)
s[output].bind(tx, thread_x)
s[output].vectorize(ti)
# Schedule wmma store
s[OL].compute_at(s[output], block_j)
nc, hc, wc, oc = OL.op.axis
s[OL].reorder(hc, wc, nc, oc)
s[OL].storage_align(wc, CS_align - 1, CS_align)
oc, ooc = s[OL].split(oc, factor=wmma_n)
oc, oci = s[OL].split(oc, factor=warp_col_tiles)
_, oc = s[OL].split(oc, factor=block_col_warps)
nc, nnc = s[OL].split(nc, factor=wmma_m)
nc, nci = s[OL].split(nc, factor=warp_row_tiles)
_, nc = s[OL].split(nc, factor=block_row_warps)
s[OL].reorder(nc, oc, nci, oci, nnc, ooc)
s[OL].bind(nc, thread_y)
s[OL].bind(oc, thread_z)
# Schedule wmma computation
s[ConvF].compute_at(s[OL], oc)
n, h, w, o = ConvF.op.axis
n, nnf = s[ConvF].split(n, factor=wmma_m)
o, oof = s[ConvF].split(o, factor=wmma_n)
ic, ii = s[ConvF].split(ic, factor=wmma_k)
ko, ki = s[ConvF].split(ic, factor=chunk)
s[ConvF].reorder(kh, kw, ko, ki, n, o, nnf, oof, ii)
s[AF].compute_at(s[ConvF], ki)
s[WF].compute_at(s[ConvF], ki)
# Schedule wmma load
n, h, w, i = AF.op.axis
n, nn = s[AF].split(n, factor=wmma_m)
i, ii = s[AF].split(i, factor=wmma_k)
s[AF].reorder(n, i, nn, ii)
kh, kw, i, o = WF.op.axis
i, ii = s[WF].split(i, factor=wmma_k)
o, oo = s[WF].split(o, factor=wmma_n)
s[WF].reorder(o, i, oo)
s[WF].reorder(i, o, ii, oo)
s[WS].compute_at(s[ConvF], ko)
s[AS].compute_at(s[ConvF], ko)
# Schedule for data's share memory
n, h, w, i = AS.op.axis
s[AS].reorder(h, w, n, i)
s[AS].storage_align(w, AS_align - 1, AS_align)
t = s[AS].fuse(n, i)
t, ti = s[AS].split(t, factor=vector_width)
t, tx = s[AS].split(t, factor=warp_size)
t, ty = s[AS].split(t, factor=block_row_warps)
_, tz = s[AS].split(t, factor=block_col_warps)
s[AS].bind(ty, thread_y)
s[AS].bind(tz, thread_z)
s[AS].bind(tx, thread_x)
s[AS].vectorize(ti)
# Schedule for kernel's share memory
kh, kw, ic, o = WS.op.axis
t = s[WS].fuse(ic, o)
s[WS].storage_align(ic, WS_align - 1, WS_align)
t, ti = s[WS].split(t, factor=vector_width)
t, tx = s[WS].split(t, factor=warp_size)
t, ty = s[WS].split(t, factor=block_row_warps)
_, tz = s[WS].split(t, factor=block_col_warps)
s[WS].bind(ty, thread_y)
s[WS].bind(tz, thread_z)
s[WS].bind(tx, thread_x)
s[WS].vectorize(ti)
shape = (wmma_m, wmma_n, wmma_k)
# tensorize the wmma process
AS_shape = (wmma_m, 1, 1, wmma_k)
AL_shape = (wmma_m, 1, 1, wmma_k)
WS_shape = (wmma_k, wmma_n)
WL_shape = (wmma_k, wmma_n)
CL_shape = (wmma_m, 1, 1, wmma_n)
CS_shape = (wmma_m, 1, 1, wmma_n)
AL_gemm = te.placeholder(AL_shape, name='A', dtype=in_dtype)
WL_gemm = te.placeholder(WL_shape, name='B', dtype=in_dtype)
k_gemm = te.reduce_axis((0, wmma_k), name="k")
CL_compute = te.compute(CL_shape, lambda ii, t0, t1, jj:
te.sum(AL_gemm[ii, t0, t1, k_gemm].astype(out_dtype) * \
WL_gemm[k_gemm, jj].astype(out_dtype), axis=k_gemm),
name='C')
s[AF].tensorize(nn, intrin_wmma_load_matrix_A(AL_strides, AS_strides, shape,
"row_major", AS_shape, AL_shape, in_dtype))
s[WF].tensorize(ii, intrin_wmma_load_matrix_W(WL_strides, WS_strides, shape,
"row_major", WS_shape, WL_shape, in_dtype))
s[OL].tensorize(nnc, intrin_wmma_store_matrix(CS_strides, CL_strides,
shape, out_dtype, CL_shape, CS_shape))
s[ConvF].tensorize(nnf, intrin_wmma_gemm(AL_gemm, WL_gemm, CL_compute, AL_strides,
WL_strides, CL_strides, shape))
N, OH, OW, CO = get_const_tuple(output.shape)
KH, KW, CI, _ = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
@autotvm.register_topi_compute("conv2d_nhwc_tensorcore.cuda")
def conv2d_nhwc_tensorcore(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with tensorcore for NCHW layout"""
return nhwc_tensorcore_cuda(cfg, data, kernel, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("conv2d_nhwc_tensorcore.cuda")
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if 'conv2d_nhwc_tensorcore' in op.tag:
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s
| topi/python/topi/cuda/conv2d_nhwc_tensorcore.py | 12,782 | Compute conv2d with tensorcore for NCHW layout
Compute declaration for tensorcore
TOPI schedule callback
Schedule tensorcore template
Tensorcore template for cuda backend
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=invalid-name, too-many-locals, too-many-function-args pylint: disable=too-many-statements, unused-argument, too-many-arguments compute the output shape convert data type of input feature maps and weights inline the pad and dtype transform Designate the memory hierarchy Schedule for autotvm fallback support Define the intrin strides Schedule for output Schedule wmma store Schedule wmma computation Schedule wmma load Schedule for data's share memory Schedule for kernel's share memory tensorize the wmma process | 1,455 | en | 0.804194 |
# Generated by Django 2.1.5 on 2019-05-03 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0035_post_video'),
]
operations = [
migrations.AlterField(
model_name='post',
name='video',
field=models.FileField(blank=True, null=True, upload_to='uploads/'),
),
]
| blog/migrations/0036_auto_20190503_1645.py | 402 | Generated by Django 2.1.5 on 2019-05-03 15:45 | 45 | en | 0.441726 |
import asyncio
import os
import warnings
from datetime import date
from secedgar.cik_lookup import CIKLookup
from secedgar.client import NetworkClient
from secedgar.core._base import AbstractFiling
from secedgar.core.filing_types import FilingType
from secedgar.exceptions import FilingTypeError
from secedgar.utils import sanitize_date
class CompanyFilings(AbstractFiling):
"""Base class for receiving EDGAR filings.
Args:
cik_lookup (str): Central Index Key (CIK) for company of interest.
filing_type (Union[secedgar.core.filing_types.FilingType, None]): Valid filing type
enum. Defaults to None. If None, then all filing types for CIKs will be returned.
start_date (Union[str, datetime.datetime, datetime.date], optional): Date before
which not to fetch reports. Stands for "date after."
Defaults to None (will fetch all filings before ``end_date``).
end_date (Union[str, datetime.datetime, datetime.date], optional):
Date after which not to fetch reports.
Stands for "date before." Defaults to today.
count (int): Number of filings to fetch. Will fetch up to `count` if that many filings
are available. Defaults to all filings available.
ownership (str): Must be in {"include", "exclude"}. Whether or not to include ownership
filings.
match_format (str): Must be in {"EXACT", "AMEND", "ALL"}.
kwargs: See kwargs accepted for :class:`secedgar.client.network_client.NetworkClient`.
.. versionadded:: 0.1.5
"""
def __init__(self,
cik_lookup,
filing_type=None,
start_date=None,
end_date=date.today(),
client=None,
count=None,
ownership="include",
match_format="ALL",
**kwargs):
# Leave params before other setters
self._params = {
"action": "getcompany",
"output": "xml",
"owner": ownership,
"start": 0,
}
self.start_date = start_date
self.end_date = end_date
self.filing_type = filing_type
self.count = count
self.match_format = match_format
# Make default client NetworkClient and pass in kwargs
self._client = client if client is not None else NetworkClient(**kwargs)
# make CIKLookup object for users if not given
self.cik_lookup = cik_lookup
@property
def path(self):
"""str: Path added to client base."""
return "cgi-bin/browse-edgar"
@property
def params(self):
""":obj:`dict`: Parameters to include in requests."""
return self._params
@property
def client(self):
"""``secedgar.client._base``: Client to use to make requests."""
return self._client
@property
def start_date(self):
"""Union([datetime.date, datetime.datetime, str]): Date before which no filings fetched."""
return self._start_date
@property
def match_format(self):
"""The match format to use when searching for filings."""
return self._match_format
@match_format.setter
def match_format(self, val):
if val in ["EXACT", "AMEND", "ALL"]:
self._match_format = val
else:
raise ValueError("Format must be one of EXACT,AMEND,ALL")
@start_date.setter
def start_date(self, val):
if val is not None:
self._params["datea"] = sanitize_date(val)
self._start_date = val
else:
self._start_date = None
@property
def end_date(self):
"""Union([datetime.date, datetime.datetime, str]): Date after which no filings fetched."""
return self._end_date
@end_date.setter
def end_date(self, val):
self._params["dateb"] = sanitize_date(val)
self._end_date = val
@property
def filing_type(self):
"""``secedgar.core.FilingType``: FilingType enum of filing."""
return self._filing_type
@filing_type.setter
def filing_type(self, filing_type):
if isinstance(filing_type, FilingType):
self._params["type"] = filing_type.value
elif filing_type is not None:
raise FilingTypeError
self._filing_type = filing_type
@property
def count(self):
"""Number of filings to fetch."""
return self._count
@count.setter
def count(self, val):
if val is None:
self._count = None
elif not isinstance(val, int):
raise TypeError("Count must be positive integer or None.")
elif val < 1:
raise ValueError("Count must be positive integer or None.")
else:
self._count = val
self._params["count"] = val
@property
def cik_lookup(self):
"""``secedgar.cik_lookup.CIKLookup``: CIKLookup object."""
return self._cik_lookup
@cik_lookup.setter
def cik_lookup(self, val):
if not isinstance(val, CIKLookup):
val = CIKLookup(val, client=self.client)
self._cik_lookup = val
def get_urls(self, **kwargs):
"""Get urls for all CIKs given to Filing object.
Args:
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
urls (list): List of urls for txt files to download.
"""
return {
key: self._get_urls_for_cik(cik, **kwargs)
for key, cik in self.cik_lookup.lookup_dict.items()
}
# TODO: Change this to return accession numbers that are turned into URLs later
def _get_urls_for_cik(self, cik, **kwargs):
"""Get all urls for specific company according to CIK.
Must match start date, end date, filing_type, and count parameters.
Args:
cik (str): CIK for company.
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
txt_urls (list of str): Up to the desired number of URLs for that specific company
if available.
"""
self.params["CIK"] = cik
links = []
self.params["start"] = 0 # set start back to 0 before paginating
while self.count is None or len(links) < self.count:
data = self.client.get_soup(self.path, self.params, **kwargs)
links.extend([link.string for link in data.find_all("filinghref")])
self.params["start"] += self.client.batch_size
if len(data.find_all("filinghref")) == 0: # no more filings
break
txt_urls = [link[:link.rfind("-")].strip() + ".txt" for link in links]
if isinstance(self.count, int) and len(txt_urls) < self.count:
warnings.warn(
"Only {num} of {count} filings were found for {cik}.".format(
num=len(txt_urls), count=self.count, cik=cik))
# Takes `count` filings at most
return txt_urls[:self.count]
def save(self, directory, dir_pattern=None, file_pattern=None):
"""Save files in specified directory.
Each txt url looks something like:
https://www.sec.gov/Archives/edgar/data/1018724/000101872419000043/0001018724-19-000043.txt
Args:
directory (str): Path to directory where files should be saved.
dir_pattern (str): Format string for subdirectories. Default is "{cik}/{type}".
Valid options are {cik} and/or {type}.
file_pattern (str): Format string for files. Default is "{accession_number}".
Valid options are {accession_number}.
Returns:
None
Raises:
ValueError: If no text urls are available for given filing object.
"""
urls = self.get_urls_safely()
if dir_pattern is None:
dir_pattern = os.path.join("{cik}", "{type}")
if file_pattern is None:
file_pattern = "{accession_number}"
inputs = []
for cik, links in urls.items():
formatted_dir = dir_pattern.format(cik=cik,
type=self.filing_type.value)
for link in links:
formatted_file = file_pattern.format(
accession_number=self.get_accession_number(link))
path = os.path.join(directory, formatted_dir, formatted_file)
inputs.append((link, path))
loop = asyncio.get_event_loop()
loop.run_until_complete(self.client.wait_for_download_async(inputs))
| secedgar/core/company.py | 8,908 | Base class for receiving EDGAR filings.
Args:
cik_lookup (str): Central Index Key (CIK) for company of interest.
filing_type (Union[secedgar.core.filing_types.FilingType, None]): Valid filing type
enum. Defaults to None. If None, then all filing types for CIKs will be returned.
start_date (Union[str, datetime.datetime, datetime.date], optional): Date before
which not to fetch reports. Stands for "date after."
Defaults to None (will fetch all filings before ``end_date``).
end_date (Union[str, datetime.datetime, datetime.date], optional):
Date after which not to fetch reports.
Stands for "date before." Defaults to today.
count (int): Number of filings to fetch. Will fetch up to `count` if that many filings
are available. Defaults to all filings available.
ownership (str): Must be in {"include", "exclude"}. Whether or not to include ownership
filings.
match_format (str): Must be in {"EXACT", "AMEND", "ALL"}.
kwargs: See kwargs accepted for :class:`secedgar.client.network_client.NetworkClient`.
.. versionadded:: 0.1.5
Get all urls for specific company according to CIK.
Must match start date, end date, filing_type, and count parameters.
Args:
cik (str): CIK for company.
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
txt_urls (list of str): Up to the desired number of URLs for that specific company
if available.
``secedgar.cik_lookup.CIKLookup``: CIKLookup object.
``secedgar.client._base``: Client to use to make requests.
Number of filings to fetch.
Union([datetime.date, datetime.datetime, str]): Date after which no filings fetched.
``secedgar.core.FilingType``: FilingType enum of filing.
Get urls for all CIKs given to Filing object.
Args:
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
urls (list): List of urls for txt files to download.
The match format to use when searching for filings.
:obj:`dict`: Parameters to include in requests.
str: Path added to client base.
Save files in specified directory.
Each txt url looks something like:
https://www.sec.gov/Archives/edgar/data/1018724/000101872419000043/0001018724-19-000043.txt
Args:
directory (str): Path to directory where files should be saved.
dir_pattern (str): Format string for subdirectories. Default is "{cik}/{type}".
Valid options are {cik} and/or {type}.
file_pattern (str): Format string for files. Default is "{accession_number}".
Valid options are {accession_number}.
Returns:
None
Raises:
ValueError: If no text urls are available for given filing object.
Union([datetime.date, datetime.datetime, str]): Date before which no filings fetched.
Leave params before other setters Make default client NetworkClient and pass in kwargs make CIKLookup object for users if not given TODO: Change this to return accession numbers that are turned into URLs later set start back to 0 before paginating no more filings Takes `count` filings at most | 3,256 | en | 0.737018 |
from __future__ import absolute_import, division, unicode_literals
import socket
from .base import StatsClientBase, PipelineBase
class Pipeline(PipelineBase):
def __init__(self, client):
super(Pipeline, self).__init__(client)
self._maxudpsize = client._maxudpsize
def _send(self):
data = self._stats.popleft()
while self._stats:
# Use popleft to preserve the order of the stats.
stat = self._stats.popleft()
if len(stat) + len(data) + 1 >= self._maxudpsize:
self._client._after(data)
data = stat
else:
data += '\n' + stat
self._client._after(data)
class StatsClient(StatsClientBase):
"""A client for statsd."""
def __init__(self, host='localhost', port=8125, prefix=None,
maxudpsize=512, ipv6=False):
"""Create a new client."""
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
family, _, _, _, addr = socket.getaddrinfo(
host, port, fam, socket.SOCK_DGRAM)[0]
self._addr = addr
self._sock = socket.socket(family, socket.SOCK_DGRAM)
self._prefix = prefix
self._maxudpsize = maxudpsize
def _send(self, data):
"""Send data to statsd."""
try:
self._sock.sendto(data.encode('ascii'), self._addr)
except (socket.error, RuntimeError):
# No time for love, Dr. Jones!
pass
def pipeline(self):
return Pipeline(self)
| statsd/client/udp.py | 1,531 | A client for statsd.
Create a new client.
Send data to statsd.
Use popleft to preserve the order of the stats. No time for love, Dr. Jones! | 141 | en | 0.676127 |
"""grpc service.
Reliably launch and connect to grpc process.
"""
import datetime
import enum
import logging
import os
import subprocess
import sys
import tempfile
import time
from typing import Any, Dict, Optional
from typing import TYPE_CHECKING
import grpc
from wandb.proto import wandb_server_pb2 as spb
from wandb.proto import wandb_server_pb2_grpc as pbgrpc
from wandb.sdk.wandb_settings import Settings
if TYPE_CHECKING:
from google.protobuf.internal.containers import MessageMap
def _pbmap_apply_dict(
m: "MessageMap[str, spb.SettingsValue]", d: Dict[str, Any]
) -> None:
for k, v in d.items():
if isinstance(v, datetime.datetime):
continue
if isinstance(v, enum.Enum):
continue
sv = spb.SettingsValue()
if v is None:
sv.null_value = True
elif isinstance(v, int):
sv.int_value = v
elif isinstance(v, float):
sv.float_value = v
elif isinstance(v, str):
sv.string_value = v
elif isinstance(v, bool):
sv.bool_value = v
elif isinstance(v, tuple):
sv.tuple_value.string_values.extend(v)
m[k].CopyFrom(sv)
class _Service:
_stub: Optional[pbgrpc.InternalServiceStub]
def __init__(self) -> None:
self._stub = None
def _grpc_wait_for_port(
self, fname: str, proc: subprocess.Popen = None
) -> Optional[int]:
time_max = time.time() + 30
port = None
while time.time() < time_max:
if proc and proc.poll():
# process finished
print("proc exited with", proc.returncode)
return None
if not os.path.isfile(fname):
time.sleep(0.2)
continue
try:
f = open(fname)
port = int(f.read())
except Exception as e:
print("Error:", e)
return port
return None
def _grpc_launch_server(self) -> Optional[int]:
"""Launch grpc server and return port."""
# References for starting processes
# - https://github.com/wandb/client/blob/archive/old-cli/wandb/__init__.py
# - https://stackoverflow.com/questions/1196074/how-to-start-a-background-process-in-python
kwargs: Dict[str, Any] = dict(close_fds=True)
pid = os.getpid()
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, f"port-{pid}.txt")
pid_str = str(os.getpid())
exec_cmd_list = [sys.executable, "-m"]
# Add coverage collection if needed
if os.environ.get("COVERAGE_RCFILE"):
exec_cmd_list += ["coverage", "run", "-m"]
internal_proc = subprocess.Popen(
exec_cmd_list
+ [
"wandb",
"service",
"--port-filename",
fname,
"--pid",
pid_str,
"--debug",
"true",
],
env=os.environ,
**kwargs,
)
port = self._grpc_wait_for_port(fname, proc=internal_proc)
return port
def start(self) -> Optional[int]:
port = self._grpc_launch_server()
return port
def connect(self, port: int) -> None:
channel = grpc.insecure_channel("localhost:{}".format(port))
stub = pbgrpc.InternalServiceStub(channel)
self._stub = stub
# TODO: make sure service is up
def _get_stub(self) -> Optional[pbgrpc.InternalServiceStub]:
return self._stub
def _svc_inform_init(self, settings: Settings, run_id: str) -> None:
assert self._stub
inform_init = spb.ServerInformInitRequest()
settings_dict = dict(settings)
settings_dict["_log_level"] = logging.DEBUG
_pbmap_apply_dict(inform_init._settings_map, settings_dict)
inform_init._info.stream_id = run_id
_ = self._stub.ServerInformInit(inform_init)
def _svc_inform_finish(self, run_id: str = None) -> None:
assert self._stub
assert run_id
inform_fin = spb.ServerInformFinishRequest()
inform_fin._info.stream_id = run_id
_ = self._stub.ServerInformFinish(inform_fin)
def _svc_inform_attach(self, attach_id: str) -> None:
assert self._stub
inform_attach = spb.ServerInformAttachRequest()
inform_attach._info.stream_id = attach_id
_ = self._stub.ServerInformAttach(inform_attach)
def _svc_inform_teardown(self, exit_code: int) -> None:
assert self._stub
inform_fin = spb.ServerInformTeardownRequest(exit_code=exit_code)
_ = self._stub.ServerInformTeardown(inform_fin)
| wandb/sdk/service/service.py | 4,847 | Launch grpc server and return port.
grpc service.
Reliably launch and connect to grpc process.
process finished References for starting processes - https://github.com/wandb/client/blob/archive/old-cli/wandb/__init__.py - https://stackoverflow.com/questions/1196074/how-to-start-a-background-process-in-python Add coverage collection if needed TODO: make sure service is up | 375 | en | 0.776191 |
import numpy as np
from sklearn.metrics import r2_score
np.random.seed(42)
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
figsize = (8, 4)
def show_r2(results):
data_size = results["data_size"]
test_scores = results["test_scores"]
test_scores_exp = results["test_scores_exp"]
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Log(Exec. time)", color="#777777")
ax.plot(list(map(lambda x: x["r2"], test_scores_exp)), marker="o", label="Exec. time", color="#111111")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_ylim((0, 1))
ax.set_yticks(np.arange(0, 1, 0.1))
ax.set_xlabel("# Executed Jobs")
ax.set_ylabel("$R^2$ Score")
ax.legend()
return ax
def compare_r2(results, results_real_card, results_random_sampling=None, exp=True):
data_size = results["data_size"]
if exp:
test_scores_real = results_real_card["test_scores_exp"]
test_scores = results["test_scores_exp"]
else:
test_scores_real = results_real_card["test_scores"]
test_scores = results["test_scores"]
fig, ax = plt.subplots(figsize=(8, 2))
if results_random_sampling:
if exp:
test_scores_random = results_random_sampling["test_scores_exp"]
else:
test_scores_random = results_random_sampling["test_scores"]
ax.plot(list(map(lambda x: x["r2"], test_scores_random)), marker="^", linestyle="dotted",
label="Rand. samples - Estimated out card. (Baseline)",
color=sns.color_palette()[-4])
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Active labeling - Estimated out card.",
color="#111111")
ax.plot(list(map(lambda x: x["r2"], test_scores_real)), linestyle="--", marker="s",
label="Active labeling - Real out card. (Top-line)",
color=sns.color_palette()[-3], alpha=0.85)
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_ylim((0, 1))
ax.set_yticks(np.arange(0, 1, 0.2))
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("$R^2$ of pred.\nExec. Time")
ax.legend()
return ax
def show_uncertainty(results, show_errors=False):
data_size = results["data_size"]
IQRs_RMSE = results["model_uncertainty"]
IQRs_RMSE = np.array([np.mean(np.exp(I["uncertainty_high"]) - np.exp(I["uncertainty_low"])) for I in results["iterations_results"]])
IQRs_std = np.array([np.std(np.exp(I["uncertainty_high"]) - np.exp(I["uncertainty_low"])) for I in
results["iterations_results"]])
fig, ax = plt.subplots(figsize=(8, 2))
if show_errors:
ax.errorbar(np.arange(len(IQRs_RMSE)),
IQRs_RMSE,
yerr=IQRs_std, fmt='o', label="Uncertainty")
else:
ax.plot(IQRs_RMSE, marker="o", label="Uncertainty")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("Model\nUncertainty [ms]")
final_th = 0.1
count = 0
min_u = IQRs_RMSE[0]
min_local_u = IQRs_RMSE[0]
stops = []
for i in range(1, len(data_size)):
#print(i, " -> min_local_u", min_local_u)
r = IQRs_RMSE[i] / min_local_u
#print(r)
if (r > 1) or (IQRs_RMSE[i]>min_u):
pass
elif (1-r) < final_th:
pass
else:
print(i, data_size[i], "-> STOP!")
count += 1
stops.append({"iteration": i, "data_size": data_size[i],
"uncertainty": IQRs_RMSE[i],
"uncertainty_std": IQRs_std[i],
"cost": np.sum(np.exp(results["iterations_results"][i]["train_labels"]))
})
print("--------------------------------")
min_u = min(IQRs_RMSE[:i+1])
min_local_u = min(IQRs_RMSE[i-1:i+1])
#min_cost_id = np.argwhere(IQRs_RMSE == min_cost)
if len(stops) == 0:
stops.append({"iteration": len(data_size)-1, "data_size": data_size[len(data_size)-1], "cost": np.sum(np.exp(results["iterations_results"][len(data_size)-1]["train_labels"])) })
ax.errorbar([s["iteration"] for s in stops], [s["uncertainty"] for s in stops], color="red", label="Early stop", linewidth=0, marker="o" )
ax.legend()
print(pd.DataFrame(stops))
return ax
def show_iteration(results, iteration_to_show, exp=False, drop_outliers=False):
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
p = y_test.argsort()
if drop_outliers:
q = np.quantile(y_test, 0.97)
print(q)
out_mask = y_test < q
print(out_mask.shape)
y_test = y_test[out_mask]
y_pred = y_pred[out_mask]
y_pred_lower = y_pred_lower[out_mask]
y_pred_upper = y_pred_upper[out_mask]
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 3))
if exp:
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
if drop_outliers:
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 without outliers:", new_r2)
ax.plot(y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(np.arange(len(y_pred)),y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
#ax.plot(np.arange(len(y_pred)), (y_pred_lower[p]+y_pred_upper[p])/2, marker=".", linewidth=0, label="smooth", color="green")
ax.set_ylabel("Exec. Time [ms]")
# ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 3))
#ax.set_yscale("log")
ax.set_xlabel("Non-executed Jobs")
ax.legend()
print(results["test_scores_exp"][iteration_to_show])
else:
ax.plot(y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(np.arange(len(y_pred)), y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
ax.set_ylabel("Log(Exec. Time)")
ax.set_xlabel("Non-executed Jobs")
ax.legend()
print(results["test_scores"][iteration_to_show])
return ax
def show_iteration_2(results, iteration_to_show, drop_outliers=False):
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
p = y_test.argsort()
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 log with outliers:", new_r2)
if drop_outliers:
q = np.quantile(y_test, 0.97)
print(q)
out_mask = y_test < q
print(out_mask.shape)
y_test = y_test[out_mask]
y_pred = y_pred[out_mask]
y_pred_lower = y_pred_lower[out_mask]
y_pred_upper = y_pred_upper[out_mask]
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 6))
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
if drop_outliers:
new_r2 = r2_score(y_test, y_pred)
print("NEW R2 without outliers:", new_r2)
ax.plot(y_test[p], y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.errorbar(y_test[p],y_pred[p], yerr=np.array([y_pred[p] - y_pred_lower[p], y_pred_upper[p] - y_pred[p]]), linewidth=0.5, fmt='.', color="#ff7f0e", label="Pred. + Interval", alpha=0.5)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def show_td_gen(results, iteration_to_show):
y_test = results[list(results.keys())[iteration_to_show]]["test_labels"]
y_pred = results[list(results.keys())[iteration_to_show]]["pred_labels"]
from sklearn.metrics import r2_score
score = r2_score(y_test, y_pred)
print("R2 score:", score)
p = y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 3))
ax.plot(y_test[p], y_test[p], marker=".", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.plot(y_test[p], y_pred[p], marker=".", linewidth=0, label="TDGen Pred.", color=sns.color_palette()[4], alpha=0.5)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def show_our_and_td_gen(our_results, td_gen_results, iteration_to_show):
our_y_test = np.exp(our_results["iterations_results"][iteration_to_show]["test_labels"])
our_y_pred = np.exp(our_results["iterations_results"][iteration_to_show]["pred_labels"])
y_test = td_gen_results[list(td_gen_results.keys())[iteration_to_show]]["test_labels"]
y_pred = td_gen_results[list(td_gen_results.keys())[iteration_to_show]]["pred_labels"]
from sklearn.metrics import r2_score
score = r2_score(y_test, y_pred)
print("R2 score:", score)
p = y_test.argsort()
our_p = our_y_test.argsort()
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(y_test[p], y_test[p], marker="", linewidth=1, label="Real", color="#777777", alpha=0.5)
ax.plot(our_y_test[our_p], our_y_pred[our_p], marker=".", linewidth=0, label="Our solution", color=sns.color_palette()[1], alpha=0.2)
ax.plot(y_test[p], y_pred[p], marker=".", linewidth=0, label="TDGen Pred.", color=sns.color_palette()[4], alpha=0.2)
ax.set_ylabel("Forecasted Exec. Time [ms] (Log scale)")
ax.set_yscale("log")
ax.set_xlabel("Real Exec. Time [ms] (Log scale)")
ax.set_xscale("log")
ax.legend()
return ax
def compare_td_gen_r2(results, results_td_gen):
data_size = results["data_size"]
test_scores = results["test_scores_exp"]
from sklearn.metrics import r2_score
td_gen_scores = []
x = []
for k, v in results_td_gen.items():
y_test = v["test_labels"]
y_pred = v["pred_labels"]
score = r2_score(y_test, y_pred)
print(k ,"R2 score:", score)
td_gen_scores.append(score)
x.append(k)
fig, ax = plt.subplots(figsize=(8, 2))
ax.plot(td_gen_scores, linestyle="--", marker="o", label="TDGen",
color=sns.color_palette()[4])
ax.plot(list(map(lambda x: x["r2"], test_scores)), marker="o", label="Our solution",
color="#111111")
ax.set_xticks(list(range(data_size.__len__())))
ax.set_xticklabels(data_size, rotation=60)
print(np.array(list(map(lambda x: x["r2"], test_scores)))/np.array(td_gen_scores))
#ax.set_ylim((0, 1))
#ax.set_yticks(np.arange(0, 1, 0.1))
ax.set_xlabel("# Cumulated Executed Jobs")
ax.set_ylabel("$R^2$ of pred. Exec. Time")
ax.legend()
return ax
def show_centerd_uncertainty(data, iteration, exp=False):
print(data["iterations_results"][iteration].keys())
if exp:
preds = np.exp(np.array(data["iterations_results"][iteration]["pred_labels"]))
upper = np.exp(np.array(data["iterations_results"][iteration]["uncertainty_high"]))
lower = np.exp(np.array(data["iterations_results"][iteration]["uncertainty_low"]))
else:
preds = np.array(data["iterations_results"][iteration]["pred_labels"])
upper = np.array(data["iterations_results"][iteration]["uncertainty_high"])
lower = np.array(data["iterations_results"][iteration]["uncertainty_low"])
IQR_interval = upper - lower
sort_ind = np.argsort(IQR_interval)
# y_true_all = y_true_all[sort_ind]
preds = preds[sort_ind]
upper = upper[sort_ind]
lower = lower[sort_ind]
mean = (upper + lower) / 2
std = np.std((upper + lower))
# Center such that the mean of the prediction interval is at 0.0
# y_true_all_centered = y_true_all.copy()
upper_centered = upper.copy()
lower_centered = lower.copy()
preds_centered = preds.copy()
# y_true_all_centered -= mean
upper_centered = (upper_centered - mean) # /std
lower_centered = (lower_centered - mean) # /std
preds_centered = (preds_centered - mean) # /std
IRQ_th = np.quantile(IQR_interval, 0.95)
print(IRQ_th)
x_idx = np.arange(len(upper_centered))
cut = x_idx[IQR_interval[sort_ind] > IRQ_th]
print(cut)
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
# ax.plot(y_true_all_centered, "ro", markersize=1)
ax.plot(preds_centered, marker=".", color="#ff7f0e", linewidth=0)
ax.fill_between(
np.arange(len(upper_centered)), lower_centered, upper_centered, alpha=0.2, color="#ff7f0e",
label="Pred. interval (centerd)")
ax.axvline(cut[0], color="red", linestyle="--", label="Threshold $\eta$")
ax.set_xlabel("Non-executed jobs sorted by uncertainty.")
ax.set_ylabel("Predicted values (centered)")
ax.legend()
# Â ax.set_yscale("symlog")
# Â ax.set_ylim([-1.5, 1.5])
def compute_stats_on_pred_errors(results, iteration_to_show):
y_train = results["iterations_results"][iteration_to_show]["train_labels"]
y_test = results["iterations_results"][iteration_to_show]["test_labels"]
y_pred = results["iterations_results"][iteration_to_show]["pred_labels"]
y_pred_lower = results["iterations_results"][iteration_to_show]["uncertainty_low"]
y_pred_upper = results["iterations_results"][iteration_to_show]["uncertainty_high"]
y_train = np.exp(y_train)
y_test = np.exp(y_test)
y_pred = np.exp(y_pred)
y_pred_lower = np.exp(y_pred_lower)
y_pred_upper = np.exp(y_pred_upper)
print("Real values")
print(pd.Series(np.hstack((y_train, y_test)) / 1000).describe())
print("highest 5:", np.sort(np.hstack((y_train, y_test)))[-5:]/1000)
print()
print("\nAverage Prediction Error")
print(pd.Series(np.abs(y_test - y_pred) / 1000).describe())
# count_true = (y_test <= y_pred_upper) & (y_test >= y_pred_lower)
# print(len(count_true),len(count_true[count_true==True])) | generator_labeler/paper_results/custom_plots.py | 14,825 | print(i, " -> min_local_u", min_local_u)print(r)min_cost_id = np.argwhere(IQRs_RMSE == min_cost)ax.plot(np.arange(len(y_pred)), (y_pred_lower[p]+y_pred_upper[p])/2, marker=".", linewidth=0, label="smooth", color="green") ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 3))ax.set_yscale("log")ax.set_ylim((0, 1))ax.set_yticks(np.arange(0, 1, 0.1)) y_true_all = y_true_all[sort_ind] Center such that the mean of the prediction interval is at 0.0 y_true_all_centered = y_true_all.copy() y_true_all_centered -= mean /std /std /std ax.plot(y_true_all_centered, "ro", markersize=1) Â ax.set_yscale("symlog") Â ax.set_ylim([-1.5, 1.5]) count_true = (y_test <= y_pred_upper) & (y_test >= y_pred_lower) print(len(count_true),len(count_true[count_true==True])) | 757 | en | 0.437238 |
###
# Copyright (c) 2004, Daniel DiPaolo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Quotegrabs are like IRC sound bites. When someone says something funny,
incriminating, stupid, outrageous, ... anything that might be worth
remembering, you can grab that quote for that person. With this plugin, you
can store many quotes per person and display their most recent quote, as well
as see who "grabbed" the quote in the first place.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "%%VERSION%%"
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.strike
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
from . import config
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| plugins/QuoteGrabs/__init__.py | 2,652 | Quotegrabs are like IRC sound bites. When someone says something funny,
incriminating, stupid, outrageous, ... anything that might be worth
remembering, you can grab that quote for that person. With this plugin, you
can store many quotes per person and display their most recent quote, as well
as see who "grabbed" the quote in the first place.
Copyright (c) 2004, Daniel DiPaolo All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author of this software nor the name of contributors to this software may be used to endorse or promote products derived from this software without specific prior written consent. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Use this for the version of this plugin. You may wish to put a CVS keyword in here if you're keeping the plugin in CVS or some similar system. XXX Replace this with an appropriate author or supybot.Author instance. This is a dictionary mapping supybot.Author instances to lists of contributions. In case we're being reloaded. vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: | 2,255 | en | 0.899325 |
#!/usr/bin/python
# configure mini-split heat pumps for E files
# uses NRCan CSV list converted to TSV
# https://oee.nrcan.gc.ca/pml-lmp/index.cfm?language_langue=en&action=app.search-recherche&appliance=ASHP2_GH
import math, os, sys
import xml.etree.ElementTree as ET
if len(sys.argv) < 3:
print(sys.argv[0], "E-file.h2k AHRI heads|0(ducted)")
sys.exit()
e_file = sys.argv[1]
ahri = sys.argv[2]
heads = sys.argv[3]
t = ET.parse(e_file)
# tsv field list:
# Brand Outside model Inside model Furnace model HSPF (Region IV) Rated heating capacity (Btu/hour) Grant amount AHRI / Verification reference AHRI Classification Series name/product line (if applicable) SEER Rated cooling capacity (Btu/hour) Coefficient of Performance (COP) at -15 °C (5 °F) (at maximum capacity) Capacity Maintenance % (Max -15°C/5°F ÷ Rated 8.3°C/47°F)
cchp_search = "grep '" + ahri + "' ccashp.tsv"
#d = os.popen(cchp_search).read().split('\t')
d = os.popen(cchp_search).read().rstrip('\n').split('\t')
# 1 kW = 3412 BTU/hr
(mfr, model, head_mdl, size_kw, hspf, seer, cop, fraction) = \
d[0], d[1], d[2], str(float(d[5])/3412), d[4], d[10], d[12], d[13]
#(ahri, size_kw, hspf, cop, seer) = cols[9], str(float(cols[5])/3412), cols[4], cols[13], cols[12]
e = t.find("./ProgramInformation/Information")
info = ET.Element("Info", {"code": "Info. 5"})
# no NEEP until spreadsheet and/or H2K is fixed
if (int(heads) > 0):
info.text = "MSHP-" + heads
else:
info.text = "CENTRAL-HP"
e.append(info)
# GHG instructions are to use Info 6 when more than 1 ccASHP system is installed
# but ENS wants all heat pumps in Info 6
info = ET.Element("Info", {"code": "Info. 6"})
info.text = mfr + ";AHRI-" + ahri + ';' + model + ';' + head_mdl
e.append(info)
#print(info, info.attrib, info.text)
# Type 2 CCHP heating system
type2 = ET.parse("Type2.xml").getroot()
ahp = type2.find("AirHeatPump")
ei = ahp.find("EquipmentInformation")
ei.attrib["AHRI"] = ahri
ei.find("Manufacturer").text = mfr
ei.find("Model").text = model
ahp.find("Equipment").attrib["numberOfHeads"] = heads
specs = ahp.find("Specifications")
specs.find("OutputCapacity").attrib["value"] = size_kw
# use ASHP HSPF/SEER until NEEP spreadsheet or H2K is fixed for ccHP
specs.find("HeatingEfficiency").attrib["value"] = str(float(hspf)/1.15)
specs.find("CoolingEfficiency").attrib["value"] = seer
cchp = ahp.find("ColdClimateHeatPump")
cchp.attrib["heatingEfficiency"] = hspf
cchp.attrib["coolingEfficiency"] = seer
cchp.attrib["capacity"] = size_kw
cchp.attrib["cop"] = cop
cchp.attrib["capacityMaintenance"] = fraction
hc = t.find("./House/HeatingCooling")
hc.remove(hc.find("Type2"))
hc.append(type2)
#outfile = "MSHP-out.h2k"
outfile = e_file
t.write(outfile, "UTF-8", True)
| ghwiz/mshp.py | 2,747 | !/usr/bin/python configure mini-split heat pumps for E files uses NRCan CSV list converted to TSV https://oee.nrcan.gc.ca/pml-lmp/index.cfm?language_langue=en&action=app.search-recherche&appliance=ASHP2_GH tsv field list: Brand Outside model Inside model Furnace model HSPF (Region IV) Rated heating capacity (Btu/hour) Grant amount AHRI / Verification reference AHRI Classification Series name/product line (if applicable) SEER Rated cooling capacity (Btu/hour) Coefficient of Performance (COP) at -15 °C (5 °F) (at maximum capacity) Capacity Maintenance % (Max -15°C/5°F ÷ Rated 8.3°C/47°F)d = os.popen(cchp_search).read().split('\t') 1 kW = 3412 BTU/hr(ahri, size_kw, hspf, cop, seer) = cols[9], str(float(cols[5])/3412), cols[4], cols[13], cols[12] no NEEP until spreadsheet and/or H2K is fixed GHG instructions are to use Info 6 when more than 1 ccASHP system is installed but ENS wants all heat pumps in Info 6print(info, info.attrib, info.text) Type 2 CCHP heating system use ASHP HSPF/SEER until NEEP spreadsheet or H2K is fixed for ccHPoutfile = "MSHP-out.h2k" | 1,072 | en | 0.697795 |
from __future__ import absolute_import, unicode_literals
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from tracpro.test.cases import TracProDataTest
class ManageUserCreateTest(TracProDataTest):
def test_create_as_non_superuser(self):
# Non-superuser cannot use this view
url = reverse('profiles.admin_create')
self.login(self.admin) # Not a superuser
# Post something that would be an error (empty form) and would be a 200
# status if we had access.
response = self.url_post('unicef', url, dict())
# We get redirected to login
self.assertEqual(response.status_code, 302, response)
self.assertIn('login', response['Location'])
def test_create_with_fields_missing(self):
# An error case
url = reverse('profiles.admin_create')
self.login(self.superuser)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200, response)
error_dict = response.context['form'].errors
self.assertEqual(4, len(error_dict), repr(error_dict))
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
self.assertFormError(
response, 'form', 'password',
'This field is required.')
self.assertFormError(
response, 'form', '__all__',
'Email address already taken.' # FIXME: this error makes no sense in this context
)
def test_create_successfully(self):
# create non-superuser
url = reverse('profiles.admin_create')
self.login(self.superuser)
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': True,
'is_superuser': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302, response)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, 'Mo Polls')
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
def test_create_superuser(self):
# create superuser
url = reverse('profiles.admin_create')
self.login(self.superuser)
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': True,
'is_superuser': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302, response)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, 'Mo Polls')
self.assertTrue(user.is_active)
self.assertTrue(user.is_superuser)
class ManageUserUpdateTest(TracProDataTest):
def test_update_as_non_superuser(self):
# Non-superuser cannot use this view
self.login(self.admin)
url = reverse('profiles.admin_update', args=[self.user1.pk])
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 302)
self.assertIn('login', response['Location'])
def test_update(self):
# Change non-superuser to superuser, change their password, etc etc.
self.login(self.superuser)
url = reverse('profiles.admin_update', args=[self.user1.pk])
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'new_password': "abc123xy",
'confirm_password': "abc123xy",
'is_active': False,
'is_superuser': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertFalse(user.is_active)
self.assertTrue(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
# and back. changing password optional.
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
# 'password': "abc123xy",
# 'confirm_password': "abc123xy",
'is_active': True,
'is_superuser': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email='mo@trac.com')
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertEqual(user, authenticate(username=user.username, password="abc123xy"))
class UserCRUDLTest(TracProDataTest):
def test_create(self):
url = reverse('profiles.user_create')
# log in as an org administrator
self.login(self.admin)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
self.assertFormError(
response, 'form', 'password',
'This field is required.')
# submit again with all required fields but invalid password
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "123",
'confirm_password': "123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', 'password',
"Ensure this value has at least 8 characters (it has 3).")
# submit again with valid password but mismatched confirmation
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', 'confirm_password',
"Passwords don't match.")
# submit again with valid password and confirmation
data = {
'full_name': "Mo Polls",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check new user and profile
user = User.objects.get(email="mo@trac.com")
self.assertEqual(user.profile.full_name, "Mo Polls")
self.assertEqual(user.email, "mo@trac.com")
self.assertEqual(user.username, "mo@trac.com")
# try again with same email address
data = {
'full_name': "Mo Polls II",
'email': "mo@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', None,
"Email address already taken.")
def test_update(self):
url = reverse('profiles.user_update', args=[self.user1.pk])
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# can assign to any org region
self.assertEqual(len(response.context['form'].fields['regions'].choices), 3)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
# submit with all fields entered
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [self.region3.pk],
'is_active': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check updated user and profile
user = User.objects.get(pk=self.user1.pk)
self.assertEqual(user.profile.full_name, "Morris")
self.assertEqual(user.email, "mo2@chat.com")
self.assertEqual(user.username, "mo2@chat.com")
self.assertEqual(list(user.regions.all()), [self.region3])
# submit again for good measure
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [self.region3.pk],
'is_active': True,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# try giving user someone else's email address
data = {
'full_name': "Morris",
'email': "eric@nyaruka.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertFormError(
response, 'form', None,
"Email address already taken.")
# check de-activating user
data = {
'full_name': "Morris",
'email': "mo2@chat.com",
'regions': [],
'is_active': False,
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check user object is inactive
user = User.objects.get(pk=self.user1.pk)
self.assertFalse(user.is_active)
def test_read(self):
# log in as an org administrator
self.login(self.admin)
# view our own profile
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.admin.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['edit_button_url'],
reverse('profiles.user_self'))
# view other user's profile
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.user1.pk]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['edit_button_url'],
reverse('profiles.user_update', args=[self.user1.pk]))
# try to view user from other org
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.user3.pk]))
self.assertEqual(response.status_code, 404)
# log in as a user
self.login(self.user1)
# view other user's profile
response = self.url_get(
'unicef', reverse('profiles.user_read', args=[self.admin.pk]))
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.context['edit_button_url'])
def test_list(self):
url = reverse('profiles.user_list')
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
# log in as a non-administrator
self.login(self.user1)
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 2)
def test_self(self):
url = reverse('profiles.user_self')
# try as unauthenticated
response = self.url_get('unicef', url)
self.assertLoginRedirect(response, 'unicef', url)
# try as superuser (doesn't have a chat profile)
self.login(self.superuser)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 404)
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# log in as a user
self.login(self.user1)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'full_name',
'This field is required.')
self.assertFormError(
response, 'form', 'email',
'This field is required.')
# submit with all required fields entered
data = dict(full_name="Morris", email="mo2@trac.com")
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check updated user and profile
user = User.objects.get(pk=self.user1.pk)
self.assertEqual(user.profile.full_name, "Morris")
self.assertEqual(user.email, "mo2@trac.com")
self.assertEqual(user.username, "mo2@trac.com")
self.assertEqual(list(user.regions.all()), [self.region1])
# submit with all required fields entered and password fields
old_password_hash = user.password
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'new_password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check password has been changed
user = User.objects.get(pk=self.user1.pk)
self.assertNotEqual(user.password, old_password_hash)
# check when user is being forced to change their password
old_password_hash = user.password
self.user1.profile.change_password = True
self.user1.profile.save()
# submit without password
data = dict(full_name="Morris", email="mo2@trac.com")
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'password',
'This field is required.')
# submit again with password but no confirmation
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 200)
self.assertFormError(
response, 'form', 'confirm_password',
"Passwords don't match.")
# submit again with password and confirmation
data = {
'full_name': "Morris",
'email': "mo2@trac.com",
'password': "Qwerty123",
'confirm_password': "Qwerty123",
}
response = self.url_post('unicef', url, data)
self.assertEqual(response.status_code, 302)
# check password has changed and no longer has to be changed
user = User.objects.get(pk=self.user1.pk)
self.assertFalse(user.profile.change_password)
self.assertNotEqual(user.password, old_password_hash)
class DashUserCRUDLTest(TracProDataTest):
def test_login(self):
url = reverse('users.user_login')
# login without org subdomain
response = self.url_post(None, url, {
'username': 'sam@unicef.org',
'password': 'sam@unicef.org',
})
self.assertRedirects(
response, 'http://testserver/',
fetch_redirect_response=False)
# login with org subdomain
response = self.url_post('unicef', url, {
'username': 'sam@unicef.org',
'password': 'sam@unicef.org',
})
self.assertRedirects(
response, 'http://unicef.testserver/',
fetch_redirect_response=False)
| tracpro/profiles/tests/test_views.py | 16,564 | Non-superuser cannot use this view Not a superuser Post something that would be an error (empty form) and would be a 200 status if we had access. We get redirected to login An error case submit with no fields entered FIXME: this error makes no sense in this context create non-superuser create superuser Non-superuser cannot use this view Change non-superuser to superuser, change their password, etc etc. and back. changing password optional. 'password': "abc123xy", 'confirm_password': "abc123xy", log in as an org administrator submit with no fields entered submit again with all required fields but invalid password submit again with valid password but mismatched confirmation submit again with valid password and confirmation check new user and profile try again with same email address log in as an org administrator can assign to any org region submit with no fields entered submit with all fields entered check updated user and profile submit again for good measure try giving user someone else's email address check de-activating user check user object is inactive log in as an org administrator view our own profile view other user's profile try to view user from other org log in as a user view other user's profile log in as a non-administrator log in as an org administrator try as unauthenticated try as superuser (doesn't have a chat profile) log in as an org administrator log in as a user submit with no fields entered submit with all required fields entered check updated user and profile submit with all required fields entered and password fields check password has been changed check when user is being forced to change their password submit without password submit again with password but no confirmation submit again with password and confirmation check password has changed and no longer has to be changed login without org subdomain login with org subdomain | 1,883 | en | 0.923321 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors allow user instrumentation of the training process.
Monitors are useful to track training, report progress, request early
stopping and more. Monitors use the observer pattern and notify at the following
points:
- when training begins
- before a training step
- after a training step
- when training ends
Monitors are not intended to be reusable.
There are a few pre-defined monitors:
- CaptureVariable: saves a variable's values
- GraphDump: intended for debug only - saves all tensor values
- PrintTensor: outputs one or more tensor values to log
- SummarySaver: saves summaries to a summary writer
- ValidationMonitor: runs model validation, by periodically calculating eval
metrics on a separate data set; supports optional early stopping
For more specific needs, you can create custom monitors by extending one of the
following classes:
- BaseMonitor: the base class for all monitors
- EveryN: triggers a callback every N training steps
Example:
class ExampleMonitor(monitors.BaseMonitor):
def __init__(self):
print 'Init'
def begin(self, max_steps):
print 'Starting run. Will train until step %d.' % max_steps
def end(self):
print 'Completed run.'
def step_begin(self, step):
print 'About to run step %d...' % step
return ['loss_1:0']
def step_end(self, step, outputs):
print 'Done running step %d. The value of "loss" tensor: %s' % (
step, outputs['loss_1:0'])
linear_regressor = LinearRegressor()
example_monitor = ExampleMonitor()
linear_regressor.fit(
x, y, steps=2, batch_size=1, monitors=[example_monitor])
@@get_default_monitors
@@BaseMonitor
@@CaptureVariable
@@CheckpointSaver
@@EveryN
@@ExportMonitor
@@GraphDump
@@LoggingTrainable
@@NanLoss
@@PrintTensor
@@StepCounter
@@StopAtStep
@@SummarySaver
@@ValidationMonitor
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import summary_io
# TODO(ptucker): Split each monitor class into a separate file.
# TODO(ptucker): Fail if epoch or step does not monotonically increase?
class BaseMonitor(object):
"""Base class for Monitors.
Defines basic interfaces of Monitors.
Monitors can either be run on all workers or, more commonly, restricted
to run exclusively on the elected chief worker.
"""
def __init__(self):
self._begun = False
self._current_epoch = None
self._current_step = None
self._max_steps = None
self._estimator = None
self._estimator_locked = False
@property
def run_on_all_workers(self):
return False
def set_estimator(self, estimator):
"""A setter called automatically by the target estimator.
If the estimator is locked, this method does nothing.
Args:
estimator: the estimator that this monitor monitors.
Raises:
ValueError: if the estimator is None.
"""
if self._estimator_locked:
return
if estimator is None:
raise ValueError("Missing estimator.")
# TODO(mdan): This should fail if called twice with the same estimator.
self._estimator = estimator
def _lock_estimator(self):
"""Locks the estimator until _unlock_estimator is called."""
self._estimator_locked = True
def _unlock_estimator(self):
"""Unlocks the estimator."""
self._estimator_locked = False
def begin(self, max_steps=None):
"""Called at the beginning of training.
When called, the default graph is the one we are executing.
Args:
max_steps: `int`, the maximum global step this training will run until.
Raises:
ValueError: if we've already begun a run.
"""
if self._begun:
raise ValueError("begin called twice without end.")
self._max_steps = max_steps
self._begun = True
def end(self, session=None):
"""Callback at the end of training/evaluation.
Args:
session: A `tf.Session` object that can be used to run ops.
Raises:
ValueError: if we've not begun a run.
"""
_ = session
if not self._begun:
raise ValueError("end called without begin.")
self._max_steps = None
self._begun = False
def epoch_begin(self, epoch):
"""Begin epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've already begun an epoch, or `epoch` < 0.
"""
if self._current_epoch is not None:
raise ValueError("epoch_begin called twice without epoch_end.")
if epoch < 0:
raise ValueError("Invalid epoch %s." % epoch)
self._current_epoch = epoch
def epoch_end(self, epoch):
"""End epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've not begun an epoch, or `epoch` number does not match.
"""
if self._current_epoch != epoch:
raise ValueError(
"epoch_end expected %s but got %s.", self._current_epoch, epoch)
self._current_epoch = None
def step_begin(self, step):
"""Callback before training step begins.
You may use this callback to request evaluation of additional tensors
in the graph.
Args:
step: `int`, the current value of the global step.
Returns:
List of `Tensor` objects or string tensor names to be run.
Raises:
ValueError: if we've already begun a step, or `step` < 0, or
`step` > `max_steps`.
"""
if (step < 0) or (
(self._max_steps is not None) and (step > self._max_steps)):
raise ValueError("Invalid step %s." % step)
self._current_step = step
return []
def step_end(self, step, output): # pylint: disable=unused-argument
"""Callback after training step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Note that this method is not called if the call to `Session.run()` that
followed the last call to `step_begin()` failed.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
Raises:
ValueError: if we've not begun a step, or `step` number does not match.
"""
if self._current_step != step:
raise ValueError(
"step_end expected %s but got %s.", self._current_step, step)
self._current_step = None
return False
def post_step(self, step, session): # pylint: disable=unused-argument
"""Callback after the step is finished.
Called after step_end and receives session to perform extra session.run
calls. If failure occurred in the process, will be called as well.
Args:
step: `int`, global step of the model.
session: `Session` object.
"""
_ = step, session
def _extract_output(outputs, request):
if request in outputs:
return outputs[request]
return outputs[request.name]
class EveryN(BaseMonitor):
"""Base class for monitors that execute callbacks every N steps.
This class adds three new callbacks:
- every_n_step_begin
- every_n_step_end
- every_n_post_step
The callbacks are executed every n steps, or optionally every step for the
first m steps, where m and n can both be user-specified.
When extending this class, note that if you wish to use any of the
`BaseMonitor` callbacks, you must call their respective super implementation:
def step_begin(self, step):
super(ExampleMonitor, self).step_begin(step)
return []
Failing to call the super implementation will cause unpredictible behavior.
The `every_n_post_step()` callback is also called after the last step if it
was not already called through the regular conditions. Note that
`every_n_step_begin()` and `every_n_step_end()` do not receive that special
treatment.
"""
# TODO(ipolosukhin): Add also every n seconds.
def __init__(self, every_n_steps=100, first_n_steps=1):
"""Initializes an `EveryN` monitor.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
first_n_steps: `int`, specifying the number of initial steps during
which the callbacks will always be executed, regardless of the value
of `every_n_steps`. Note that this value is relative to the global step
"""
super(EveryN, self).__init__()
self._every_n_steps = every_n_steps
self._first_n_steps = first_n_steps
# Last step in the model.
self._last_successful_step = None
# Last step at which we called one of the every_n methods
self._last_active_step = 0
self._every_n_step_begin_called = False
def every_n_step_begin(self, step): # pylint: disable=unused-argument
"""Callback before every n'th step begins.
Args:
step: `int`, the current value of the global step.
Returns:
A `list` of tensors that will be evaluated at this step.
"""
return []
def every_n_step_end(self, step, outputs): # pylint: disable=unused-argument
"""Callback after every n'th step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Args:
step: `int`, the current value of the global step.
outputs: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
"""
return False
def every_n_post_step(self, step, session):
"""Callback after a step is finished or `end()` is called.
Args:
step: `int`, the current value of the global step.
session: `Session` object.
"""
pass
def step_begin(self, step):
"""Overrides `BaseMonitor.step_begin`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
Returns:
A `list`, the result of every_n_step_begin, if that was called this step,
or an empty list otherwise.
Raises:
ValueError: if called more than once during a step.
"""
super(EveryN, self).step_begin(step)
if (step <= self._first_n_steps or
step >= (self._every_n_steps + self._last_active_step) or
step == self._max_steps): # Note: max_steps can be None here.
self._every_n_step_begin_called = True
return self.every_n_step_begin(step)
self._every_n_step_begin_called = False
return []
def step_end(self, step, output):
"""Overrides `BaseMonitor.step_end`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`, the result of every_n_step_end, if that was called this step,
or `False` otherwise.
"""
super(EveryN, self).step_end(step, output)
if self._every_n_step_begin_called:
return self.every_n_step_end(step, output)
return False
def post_step(self, step, session):
super(EveryN, self).post_step(step, session)
if self._every_n_step_begin_called:
self.every_n_post_step(step, session)
self._last_active_step = step
self._last_successful_step = step
def end(self, session=None):
super(EveryN, self).end(session=session)
if self._last_successful_step != self._last_active_step:
self.every_n_post_step(self._last_successful_step, session)
class StopAtStep(BaseMonitor):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep monitor.
This monitor requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `step_begin()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(StopAtStep, self).__init__()
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
@property
def run_on_all_workers(self):
return True
def step_begin(self, step):
super(StopAtStep, self).step_begin(step)
if self._last_step is None:
self._last_step = step + self._num_steps - 1
return []
def step_end(self, step, output):
super(StopAtStep, self).step_end(step, output)
return step >= self._last_step
# TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout.
class PrintTensor(EveryN):
"""Prints given tensors every N steps.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensor_names, every_n=100, first_n=1):
"""Initializes a PrintTensor monitor.
Args:
tensor_names: `dict` of tag to tensor names or
`iterable` of tensor names (strings).
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(PrintTensor, self).__init__(every_n, first_n)
if not isinstance(tensor_names, dict):
tensor_names = {item: item for item in tensor_names}
self._tensor_names = tensor_names
def every_n_step_begin(self, step):
super(PrintTensor, self).every_n_step_begin(step)
return list(self._tensor_names.values())
def every_n_step_end(self, step, outputs):
super(PrintTensor, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._tensor_names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Step %d: %s", step, ", ".join(stats))
class LoggingTrainable(EveryN):
"""Writes trainable variable values into log every N steps.
Write the tensors in trainable variables `every_n` steps,
starting with the `first_n`th step.
"""
def __init__(self, scope=None, every_n=100, first_n=1):
"""Initializes LoggingTrainable monitor.
Args:
scope: An optional string to match variable names using re.match.
every_n: Print every N steps.
first_n: Print first N steps.
"""
super(LoggingTrainable, self).__init__(every_n, first_n)
self._scope = scope
def every_n_step_begin(self, step):
super(LoggingTrainable, self).every_n_step_begin(step)
# Get a list of trainable variables at the begining of every N steps.
# We cannot get this in __init__ because train_op has not been generated.
trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES,
scope=self._scope)
self._names = {}
for var in trainables:
self._names[var.name] = var.value().name
return list(self._names.values())
def every_n_step_end(self, step, outputs):
super(LoggingTrainable, self).every_n_step_end(step, outputs)
stats = []
for tag, tensor_name in six.iteritems(self._names):
if tensor_name in outputs:
stats.append("%s = %s" % (tag,
str(_extract_output(outputs, tensor_name))))
logging.info("Logging Trainable: Step %d: %s", step, ", ".join(stats))
class SummarySaver(EveryN):
"""Saves summaries every N steps."""
def __init__(self,
summary_op,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None):
"""Initializes a `SummarySaver` monitor.
Args:
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
"""
# TODO(ipolosukhin): Implement every N seconds.
super(SummarySaver, self).__init__(every_n_steps=save_steps)
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = summary_io.SummaryWriter(output_dir)
self._scaffold = scaffold
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def set_estimator(self, estimator):
super(SummarySaver, self).set_estimator(estimator)
# TODO(mdan): This line looks redundant.
if self._summary_writer is None:
self._summary_writer = summary_io.SummaryWriter(estimator.model_dir)
def every_n_step_begin(self, step):
super(SummarySaver, self).every_n_step_begin(step)
if self._summary_op is None and self._scaffold is not None:
self._summary_op = self._scaffold.summary_op
if self._summary_op is not None:
return [self._summary_op]
return []
def every_n_step_end(self, step, outputs):
super(SummarySaver, self).every_n_step_end(step, outputs)
if self._summary_op is not None:
summary_strs = _extract_output(outputs, self._summary_op)
if self._summary_writer:
self._summary_writer.add_summary(summary_strs, step)
return False
def end(self, session=None):
super(SummarySaver, self).end(session=session)
if self._summary_writer:
self._summary_writer.flush()
class ValidationMonitor(EveryN):
"""Runs evaluation of a given estimator, at most every N steps.
Note that the evaluation is done based on the saved checkpoint, which will
usually be older than the current step.
Can do early stopping on validation metrics if `early_stopping_rounds` is
provided.
"""
def __init__(self, x=None, y=None, input_fn=None, batch_size=None,
eval_steps=None,
every_n_steps=100, metrics=None, early_stopping_rounds=None,
early_stopping_metric="loss",
early_stopping_metric_minimize=True, name=None):
"""Initializes a ValidationMonitor.
Args:
x: See `BaseEstimator.evaluate`.
y: See `BaseEstimator.evaluate`.
input_fn: See `BaseEstimator.evaluate`.
batch_size: See `BaseEstimator.evaluate`.
eval_steps: See `BaseEstimator.evaluate`.
every_n_steps: Check for new checkpoints to evaluate every N steps. If a
new checkpoint is found, it is evaluated. See `EveryN`.
metrics: See `BaseEstimator.evaluate`.
early_stopping_rounds: `int`. If the metric indicated by
`early_stopping_metric` does not change according to
`early_stopping_metric_minimize` for this many steps, then training
will be stopped.
early_stopping_metric: `string`, name of the metric to check for early
stopping.
early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is
expected to decrease (thus early stopping occurs when this metric
stops decreasing), False if `early_stopping_metric` is expected to
increase. Typically, `early_stopping_metric_minimize` is True for
loss metrics like mean squared error, and False for performance
metrics like accuracy.
name: See `BaseEstimator.evaluate`.
Raises:
ValueError: If both x and input_fn are provided.
"""
super(ValidationMonitor, self).__init__(every_n_steps=every_n_steps,
first_n_steps=-1)
# TODO(mdan): Checks like this are already done by evaluate.
if x is None and input_fn is None:
raise ValueError("Either x or input_fn should be provided.")
self.x = x
self.y = y
self.input_fn = input_fn
self.batch_size = batch_size
self.eval_steps = eval_steps
self.metrics = metrics
self.early_stopping_rounds = early_stopping_rounds
self.early_stopping_metric = early_stopping_metric
self.early_stopping_metric_minimize = early_stopping_metric_minimize
self.name = name
self._best_value_step = None
self._best_value = None
self._early_stopped = False
self._latest_path = None
self._latest_path_step = None
@property
def early_stopped(self):
"""Returns True if this monitor caused an early stop."""
return self._early_stopped
@property
def best_step(self):
"""Returns the step at which the best early stopping metric was found."""
return self._best_value_step
@property
def best_value(self):
"""Returns the best early stopping metric value found so far."""
return self._best_value
def every_n_step_end(self, step, outputs):
super(ValidationMonitor, self).every_n_step_end(step, outputs)
# TODO(mdan): The use of step below is probably misleading.
# The code should probably use the step from the checkpoint, because
# that's what is being evaluated.
if self._estimator is None:
raise ValueError("Missing call to set_estimator.")
# Check that we are not running evaluation on the same checkpoint.
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.debug("Skipping evaluation since model has not been saved yet "
"at step %d.", step)
return False
if latest_path is not None and latest_path == self._latest_path:
logging.debug("Skipping evaluation due to same checkpoint %s for step %d "
"as for step %d.", latest_path, step,
self._latest_path_step)
return False
self._latest_path = latest_path
self._latest_path_step = step
# Run evaluation and log it.
validation_outputs = self._estimator.evaluate(
x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size,
steps=self.eval_steps, metrics=self.metrics, name=self.name)
stats = []
for name in validation_outputs:
stats.append("%s = %s" % (name, str(validation_outputs[name])))
logging.info("Validation (step %d): %s", step, ", ".join(stats))
# Early stopping logic.
if self.early_stopping_rounds is not None:
if self.early_stopping_metric not in validation_outputs:
raise ValueError("Metric %s missing from outputs %s." % (
self.early_stopping_metric, set(validation_outputs.keys())))
current_value = validation_outputs[self.early_stopping_metric]
if (self._best_value is None or (self.early_stopping_metric_minimize and
(current_value < self._best_value)) or
(not self.early_stopping_metric_minimize and
(current_value > self._best_value))):
self._best_value = current_value
self._best_value_step = step
stop_now = (step - self._best_value_step >= self.early_stopping_rounds)
if stop_now:
logging.info("Stopping. Best step: {} with {} = {}."
.format(self._best_value_step,
self.early_stopping_metric, self._best_value))
self._early_stopped = True
return True
return False
# TODO(ptucker): This really reads any tensor, not just vars, and requires the
# ':0' suffix on var_name.
class CaptureVariable(EveryN):
"""Captures a variable's values into a collection.
This monitor is useful for unit testing. You should exercise caution when
using this monitor in production, since it never discards values.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
"""
def __init__(self, var_name, every_n=100, first_n=1):
"""Initializes a CaptureVariable monitor.
Args:
var_name: `string`. The variable name, including suffix (typically ":0").
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
"""
super(CaptureVariable, self).__init__(every_n, first_n)
self._var_name = var_name
self._var_values = {}
@property
def values(self):
"""Returns the values captured so far.
Returns:
`dict` mapping `int` step numbers to that values of the variable at the
respective step.
"""
return self._var_values
def every_n_step_begin(self, step):
super(CaptureVariable, self).every_n_step_begin(step)
return [self._var_name]
def every_n_step_end(self, step, outputs):
super(CaptureVariable, self).every_n_step_end(step, outputs)
self._var_values[step] = _extract_output(outputs, self._var_name)
def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100,
output_dir=None, summary_writer=None):
"""Returns a default set of typically-used monitors.
Args:
loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`
at the default interval.
summary_op: See `SummarySaver`.
save_summary_steps: See `SummarySaver`.
output_dir: See `SummarySaver`.
summary_writer: See `SummarySaver`.
Returns:
`list` of monitors.
"""
monitors = []
if loss_op is not None:
monitors.append(PrintTensor(tensor_names={"loss": loss_op.name}))
if summary_op is not None:
monitors.append(SummarySaver(summary_op, save_steps=save_summary_steps,
output_dir=output_dir,
summary_writer=summary_writer))
return monitors
class GraphDump(BaseMonitor):
"""Dumps almost all tensors in the graph at every step.
Note, this is very expensive, prefer `PrintTensor` in production.
"""
IGNORE_OPS = ["Const", "Assign", "Identity", "Placeholder",
"RandomUniform", "Cast", "RestoreSlice"]
def __init__(self, ignore_ops=None):
"""Initializes GraphDump monitor.
Args:
ignore_ops: `list` of `string`. Names of ops to ignore.
If None, `GraphDump.IGNORE_OPS` is used.
"""
super(GraphDump, self).__init__()
self._ignore_ops = ignore_ops or GraphDump.IGNORE_OPS
self._data = {}
def begin(self, max_steps=None):
super(GraphDump, self).begin(max_steps=max_steps)
self._tensors = []
graph = ops.get_default_graph()
graph_def = graph.as_graph_def()
for node in graph_def.node:
if node.op in self._ignore_ops:
continue
logging.info("op=%s name=%s.", node.op, node.name)
try:
self._tensors.append(graph.get_tensor_by_name(node.name + ":0"))
except KeyError:
pass
def step_begin(self, step):
super(GraphDump, self).step_begin(step)
return self._tensors
def step_end(self, step, output):
super(GraphDump, self).step_end(step, output)
self._data[step] = output
@property
def data(self):
return self._data
# TODO(ptucker): Handle keys that are in one but not the other.
def compare(self, other_dump, step, atol=1e-06):
"""Compares two `GraphDump` monitors and returns differences.
Args:
other_dump: Another `GraphDump` monitor.
step: `int`, step to compare on.
atol: `float`, absolute tolerance in comparison of floating arrays.
Returns:
Returns tuple:
matched: `list` of keys that matched.
non_matched: `dict` of keys to tuple of 2 mismatched values.
Raises:
ValueError: if a key in `data` is missing from `other_dump` at `step`.
"""
non_matched = {}
matched = []
this_output = self.data[step] if step in self.data else {}
other_output = other_dump.data[step] if step in other_dump.data else {}
for key in this_output:
if not isinstance(key, str) and not isinstance(key, unicode):
continue
if key not in other_output:
raise ValueError("%s missing at step %s.", (key, step))
value1 = _extract_output(this_output, key)
value2 = _extract_output(other_output, key)
if isinstance(value1, str):
continue
if isinstance(value1, np.ndarray):
if not np.allclose(value1, value2, atol=atol):
non_matched[key] = value1 - value2
else:
matched.append(key)
else:
if value1 != value2:
non_matched[key] = (value1, value2)
else:
matched.append(key)
return matched, non_matched
class ExportMonitor(EveryN):
"""Monitor that exports Estimator every N steps."""
# TODO(philstahlfeld): Investigate switching export.export_estimator
# configuration values to **kwargs so that updates to the export_estimator
# function don't have to be reflected here.
@deprecated_arg_values(
"2016-09-23",
"The signature of the input_fn accepted by export is changing to be "
"consistent with what's used by tf.Learn Estimator's train/evaluate. "
"input_fn (and in most cases, input_feature_key) will both become "
"required args.",
input_fn=None)
def __init__(self,
every_n_steps,
export_dir,
input_fn=None,
input_feature_key=None,
exports_to_keep=5,
signature_fn=None,
default_batch_size=1):
"""Initializes ExportMonitor.
Args:
every_n_steps: Run monitor every N steps.
export_dir: str, folder to export.
input_fn: A function that takes no argument and returns a tuple of
(features, targets), where features is a dict of string key to `Tensor`
and targets is a `Tensor` that's currently not used (and so can be
`None`).
input_feature_key: String key into the features dict returned by
`input_fn` that corresponds to the raw `Example` strings `Tensor` that
the exported model will take as input. Can only be `None` if you're
using a custom `signature_fn` that does not use the first arg
(examples).
exports_to_keep: int, number of exports to keep.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `dict` of `Tensor`s for predictions.
default_batch_size: Default batch size of the `Example` placeholder.
Raises:
ValueError: If `input_fn` and `input_feature_key` are not both defined or
are not both `None`.
"""
super(ExportMonitor, self).__init__(every_n_steps=every_n_steps)
self._export_dir = export_dir
self._input_fn = input_fn
self._input_feature_key = input_feature_key
self._use_deprecated_input_fn = input_fn is None
self._exports_to_keep = exports_to_keep
self._signature_fn = signature_fn
self._default_batch_size = default_batch_size
self._last_export_dir = None
@property
def export_dir(self):
return self._export_dir
@property
def exports_to_keep(self):
return self._exports_to_keep
@property
def signature_fn(self):
return self._signature_fn
@property
def last_export_dir(self):
"""Returns the directory containing the last completed export.
Returns:
The string path to the exported directory. NB: this functionality was
added on 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because the
estimator being fitted does not yet return a value during export.
"""
return self._last_export_dir
def every_n_step_end(self, step, outputs):
super(ExportMonitor, self).every_n_step_end(step, outputs)
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
# Currently we are not syncronized with saving checkpoints, which leads to
# runtime errors when we are calling export on the same global step.
# Exports depend on saved checkpoints for constructing the graph and
# getting the global step from the graph instance saved in the checkpoint.
# If the checkpoint is stale with respect to current step, the global step
# is taken to be the last saved checkpoint's global step and exporter
# doesn't export the same checkpoint again with the following error.
logging.info("Skipping exporting because the existing checkpoint has "
"already been exported. "
"Consider exporting less frequently.")
def end(self, session=None):
super(ExportMonitor, self).end(session=session)
latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
if latest_path is None:
logging.info("Skipping export at the end since model has not been saved "
"yet.")
return
try:
self._last_export_dir = self._estimator.export(
self.export_dir,
exports_to_keep=self.exports_to_keep,
signature_fn=self.signature_fn,
input_fn=self._input_fn,
default_batch_size=self._default_batch_size,
input_feature_key=self._input_feature_key,
use_deprecated_input_fn=self._use_deprecated_input_fn)
except RuntimeError:
logging.info("Skipping exporting for the same step.")
class CheckpointSaver(BaseMonitor):
"""Saves checkpoints every N steps."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaver monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: If both `save_steps` and `save_secs` are not `None`.
ValueError: If both `save_steps` and `save_secs` are `None`.
"""
logging.info("Create CheckpointSaver.")
super(CheckpointSaver, self).__init__()
self._saver = saver
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self, max_steps=None):
super(CheckpointSaver, self).begin(max_steps)
self._last_saved_time = None
self._last_begin_step = None
self._last_saved_step = None
def step_begin(self, step):
super(CheckpointSaver, self).step_begin(step)
self._last_begin_step = step
def post_step(self, step, session):
super(CheckpointSaver, self).post_step(step, session)
if self._last_saved_time is None:
self._save(step, session)
if self._save_steps is not None:
if step >= self._last_saved_step + self._save_steps:
self._save(step, session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(step, session)
def end(self, session=None):
super(CheckpointSaver, self).end(session)
self._save(self._last_begin_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounter(EveryN):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None,
summary_writer=None):
super(StepCounter, self).__init__(every_n_steps=every_n_steps)
self._summary_tag = "global_step/sec"
self._last_reported_step = None
self._last_reported_time = None
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def set_estimator(self, estimator):
super(StepCounter, self).set_estimator(estimator)
if self._summary_writer is None:
self._summary_writer = SummaryWriterCache.get(estimator.model_dir)
def every_n_step_end(self, current_step, outputs):
current_time = time.time()
if self._last_reported_time is not None and self._summary_writer:
added_steps = current_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, current_step)
self._last_reported_step = current_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanLoss(EveryN):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, every_n_steps=100, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
every_n_steps: `int`, run check every this many steps.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
super(NanLoss, self).__init__(every_n_steps=every_n_steps)
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def every_n_step_begin(self, step):
super(NanLoss, self).every_n_step_begin(step)
return [self._loss_tensor]
def every_n_step_end(self, step, outputs):
super(NanLoss, self).every_n_step_end(step, outputs)
if np.isnan(_extract_output(outputs, self._loss_tensor)):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we return "should stop" so we stop, but
# without an exception.
return True
class RunHookAdapterForMonitors(session_run_hook.SessionRunHook):
"""Wraps monitors into a SessionRunHook."""
def __init__(self, monitors):
self._monitors = monitors
def begin(self):
self._last_step = None
self._global_step_tensor = contrib_variables.get_global_step()
for m in self._monitors:
m.begin(max_steps=None)
def before_run(self, run_context):
if self._last_step is None:
self._last_step = run_context.session.run(self._global_step_tensor) + 1
request = {self._global_step_tensor: self._global_step_tensor}
monitor_fetches = []
for m in self._monitors:
monitor_requests = m.step_begin(self._last_step)
if monitor_requests:
if not isinstance(monitor_requests, list):
raise ValueError("Monitor.step_begin should return a list.")
monitor_fetches.extend(monitor_requests)
if monitor_fetches:
request["monitors"] = dict(
zip(monitor_fetches, [_as_graph_element(f) for f in monitor_fetches]))
return session_run_hook.SessionRunArgs(request)
def after_run(self, run_context, run_values):
result = run_values.results[
"monitors"] if "monitors" in run_values.results else {}
for m in self._monitors:
induce_stop = m.step_end(self._last_step, result)
if induce_stop:
run_context.request_stop()
for m in self._monitors:
m.post_step(self._last_step, run_context.session)
self._last_step = run_values.results[self._global_step_tensor] + 1
def end(self, session):
self._last_step = None
for m in self._monitors:
if "session" in inspect.getargspec(m.end).args:
m.end(session=session)
else:
m.end()
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| tensorflow/contrib/learn/python/learn/monitors.py | 44,353 | Base class for Monitors.
Defines basic interfaces of Monitors.
Monitors can either be run on all workers or, more commonly, restricted
to run exclusively on the elected chief worker.
Captures a variable's values into a collection.
This monitor is useful for unit testing. You should exercise caution when
using this monitor in production, since it never discards values.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
Saves checkpoints every N steps.
Base class for monitors that execute callbacks every N steps.
This class adds three new callbacks:
- every_n_step_begin
- every_n_step_end
- every_n_post_step
The callbacks are executed every n steps, or optionally every step for the
first m steps, where m and n can both be user-specified.
When extending this class, note that if you wish to use any of the
`BaseMonitor` callbacks, you must call their respective super implementation:
def step_begin(self, step):
super(ExampleMonitor, self).step_begin(step)
return []
Failing to call the super implementation will cause unpredictible behavior.
The `every_n_post_step()` callback is also called after the last step if it
was not already called through the regular conditions. Note that
`every_n_step_begin()` and `every_n_step_end()` do not receive that special
treatment.
Monitor that exports Estimator every N steps.
Dumps almost all tensors in the graph at every step.
Note, this is very expensive, prefer `PrintTensor` in production.
Writes trainable variable values into log every N steps.
Write the tensors in trainable variables `every_n` steps,
starting with the `first_n`th step.
NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
Prints given tensors every N steps.
This is an `EveryN` monitor and has consistent semantic for `every_n`
and `first_n`.
The tensors will be printed to the log, with `INFO` severity.
Wraps monitors into a SessionRunHook.
Steps per second monitor.
Monitor to request stop at a specified step.
Saves summaries every N steps.
Runs evaluation of a given estimator, at most every N steps.
Note that the evaluation is done based on the saved checkpoint, which will
usually be older than the current step.
Can do early stopping on validation metrics if `early_stopping_rounds` is
provided.
Initializes an `EveryN` monitor.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
first_n_steps: `int`, specifying the number of initial steps during
which the callbacks will always be executed, regardless of the value
of `every_n_steps`. Note that this value is relative to the global step
Create a StopAtStep monitor.
This monitor requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `step_begin()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
Initializes a PrintTensor monitor.
Args:
tensor_names: `dict` of tag to tensor names or
`iterable` of tensor names (strings).
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
Initializes LoggingTrainable monitor.
Args:
scope: An optional string to match variable names using re.match.
every_n: Print every N steps.
first_n: Print first N steps.
Initializes a `SummarySaver` monitor.
Args:
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
Initializes a ValidationMonitor.
Args:
x: See `BaseEstimator.evaluate`.
y: See `BaseEstimator.evaluate`.
input_fn: See `BaseEstimator.evaluate`.
batch_size: See `BaseEstimator.evaluate`.
eval_steps: See `BaseEstimator.evaluate`.
every_n_steps: Check for new checkpoints to evaluate every N steps. If a
new checkpoint is found, it is evaluated. See `EveryN`.
metrics: See `BaseEstimator.evaluate`.
early_stopping_rounds: `int`. If the metric indicated by
`early_stopping_metric` does not change according to
`early_stopping_metric_minimize` for this many steps, then training
will be stopped.
early_stopping_metric: `string`, name of the metric to check for early
stopping.
early_stopping_metric_minimize: `bool`, True if `early_stopping_metric` is
expected to decrease (thus early stopping occurs when this metric
stops decreasing), False if `early_stopping_metric` is expected to
increase. Typically, `early_stopping_metric_minimize` is True for
loss metrics like mean squared error, and False for performance
metrics like accuracy.
name: See `BaseEstimator.evaluate`.
Raises:
ValueError: If both x and input_fn are provided.
Initializes a CaptureVariable monitor.
Args:
var_name: `string`. The variable name, including suffix (typically ":0").
every_n: `int`, print every N steps. See `PrintN.`
first_n: `int`, also print the first N steps. See `PrintN.`
Initializes GraphDump monitor.
Args:
ignore_ops: `list` of `string`. Names of ops to ignore.
If None, `GraphDump.IGNORE_OPS` is used.
Initializes ExportMonitor.
Args:
every_n_steps: Run monitor every N steps.
export_dir: str, folder to export.
input_fn: A function that takes no argument and returns a tuple of
(features, targets), where features is a dict of string key to `Tensor`
and targets is a `Tensor` that's currently not used (and so can be
`None`).
input_feature_key: String key into the features dict returned by
`input_fn` that corresponds to the raw `Example` strings `Tensor` that
the exported model will take as input. Can only be `None` if you're
using a custom `signature_fn` that does not use the first arg
(examples).
exports_to_keep: int, number of exports to keep.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `dict` of `Tensor`s for predictions.
default_batch_size: Default batch size of the `Example` placeholder.
Raises:
ValueError: If `input_fn` and `input_feature_key` are not both defined or
are not both `None`.
Initialize CheckpointSaver monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: If both `save_steps` and `save_secs` are not `None`.
ValueError: If both `save_steps` and `save_secs` are `None`.
Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
every_n_steps: `int`, run check every this many steps.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
Retrieves Graph element.
Locks the estimator until _unlock_estimator is called.
Saves the latest checkpoint.
Unlocks the estimator.
Called at the beginning of training.
When called, the default graph is the one we are executing.
Args:
max_steps: `int`, the maximum global step this training will run until.
Raises:
ValueError: if we've already begun a run.
Returns the step at which the best early stopping metric was found.
Returns the best early stopping metric value found so far.
Compares two `GraphDump` monitors and returns differences.
Args:
other_dump: Another `GraphDump` monitor.
step: `int`, step to compare on.
atol: `float`, absolute tolerance in comparison of floating arrays.
Returns:
Returns tuple:
matched: `list` of keys that matched.
non_matched: `dict` of keys to tuple of 2 mismatched values.
Raises:
ValueError: if a key in `data` is missing from `other_dump` at `step`.
Returns True if this monitor caused an early stop.
Callback at the end of training/evaluation.
Args:
session: A `tf.Session` object that can be used to run ops.
Raises:
ValueError: if we've not begun a run.
Begin epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've already begun an epoch, or `epoch` < 0.
End epoch.
Args:
epoch: `int`, the epoch number.
Raises:
ValueError: if we've not begun an epoch, or `epoch` number does not match.
Callback after a step is finished or `end()` is called.
Args:
step: `int`, the current value of the global step.
session: `Session` object.
Callback before every n'th step begins.
Args:
step: `int`, the current value of the global step.
Returns:
A `list` of tensors that will be evaluated at this step.
Callback after every n'th step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Args:
step: `int`, the current value of the global step.
outputs: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
Returns a default set of typically-used monitors.
Args:
loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`
at the default interval.
summary_op: See `SummarySaver`.
save_summary_steps: See `SummarySaver`.
output_dir: See `SummarySaver`.
summary_writer: See `SummarySaver`.
Returns:
`list` of monitors.
Returns the directory containing the last completed export.
Returns:
The string path to the exported directory. NB: this functionality was
added on 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because the
estimator being fitted does not yet return a value during export.
Callback after the step is finished.
Called after step_end and receives session to perform extra session.run
calls. If failure occurred in the process, will be called as well.
Args:
step: `int`, global step of the model.
session: `Session` object.
A setter called automatically by the target estimator.
If the estimator is locked, this method does nothing.
Args:
estimator: the estimator that this monitor monitors.
Raises:
ValueError: if the estimator is None.
Callback before training step begins.
You may use this callback to request evaluation of additional tensors
in the graph.
Args:
step: `int`, the current value of the global step.
Returns:
List of `Tensor` objects or string tensor names to be run.
Raises:
ValueError: if we've already begun a step, or `step` < 0, or
`step` > `max_steps`.
Overrides `BaseMonitor.step_begin`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
Returns:
A `list`, the result of every_n_step_begin, if that was called this step,
or an empty list otherwise.
Raises:
ValueError: if called more than once during a step.
Callback after training step finished.
This callback provides access to the tensors/ops evaluated at this step,
including the additional tensors for which evaluation was requested in
`step_begin`.
In addition, the callback has the opportunity to stop training by returning
`True`. This is useful for early stopping, for example.
Note that this method is not called if the call to `Session.run()` that
followed the last call to `step_begin()` failed.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`. True if training should stop.
Raises:
ValueError: if we've not begun a step, or `step` number does not match.
Overrides `BaseMonitor.step_end`.
When overriding this method, you must call the super implementation.
Args:
step: `int`, the current value of the global step.
output: `dict` mapping `string` values representing tensor names to
the value resulted from running these tensors. Values may be either
scalars, for scalar tensors, or Numpy `array`, for non-scalar tensors.
Returns:
`bool`, the result of every_n_step_end, if that was called this step,
or `False` otherwise.
Returns the values captured so far.
Returns:
`dict` mapping `int` step numbers to that values of the variable at the
respective step.
Monitors allow user instrumentation of the training process.
Monitors are useful to track training, report progress, request early
stopping and more. Monitors use the observer pattern and notify at the following
points:
- when training begins
- before a training step
- after a training step
- when training ends
Monitors are not intended to be reusable.
There are a few pre-defined monitors:
- CaptureVariable: saves a variable's values
- GraphDump: intended for debug only - saves all tensor values
- PrintTensor: outputs one or more tensor values to log
- SummarySaver: saves summaries to a summary writer
- ValidationMonitor: runs model validation, by periodically calculating eval
metrics on a separate data set; supports optional early stopping
For more specific needs, you can create custom monitors by extending one of the
following classes:
- BaseMonitor: the base class for all monitors
- EveryN: triggers a callback every N training steps
Example:
class ExampleMonitor(monitors.BaseMonitor):
def __init__(self):
print 'Init'
def begin(self, max_steps):
print 'Starting run. Will train until step %d.' % max_steps
def end(self):
print 'Completed run.'
def step_begin(self, step):
print 'About to run step %d...' % step
return ['loss_1:0']
def step_end(self, step, outputs):
print 'Done running step %d. The value of "loss" tensor: %s' % (
step, outputs['loss_1:0'])
linear_regressor = LinearRegressor()
example_monitor = ExampleMonitor()
linear_regressor.fit(
x, y, steps=2, batch_size=1, monitors=[example_monitor])
@@get_default_monitors
@@BaseMonitor
@@CaptureVariable
@@CheckpointSaver
@@EveryN
@@ExportMonitor
@@GraphDump
@@LoggingTrainable
@@NanLoss
@@PrintTensor
@@StepCounter
@@StopAtStep
@@SummarySaver
@@ValidationMonitor
Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== TODO(ptucker): Split each monitor class into a separate file. TODO(ptucker): Fail if epoch or step does not monotonically increase? TODO(mdan): This should fail if called twice with the same estimator. pylint: disable=unused-argument pylint: disable=unused-argument TODO(ipolosukhin): Add also every n seconds. Last step in the model. Last step at which we called one of the every_n methods pylint: disable=unused-argument pylint: disable=unused-argument Note: max_steps can be None here. TODO(ptucker): Rename to LoggingTensor since it's not writing to stdout. Get a list of trainable variables at the begining of every N steps. We cannot get this in __init__ because train_op has not been generated. TODO(ipolosukhin): Implement every N seconds. TODO(mdan): Throw an error if output_dir and summary_writer are None. TODO(mdan): This line looks redundant. TODO(mdan): Checks like this are already done by evaluate. TODO(mdan): The use of step below is probably misleading. The code should probably use the step from the checkpoint, because that's what is being evaluated. Check that we are not running evaluation on the same checkpoint. Run evaluation and log it. Early stopping logic. TODO(ptucker): This really reads any tensor, not just vars, and requires the ':0' suffix on var_name. TODO(ptucker): Handle keys that are in one but not the other. TODO(philstahlfeld): Investigate switching export.export_estimator configuration values to **kwargs so that updates to the export_estimator function don't have to be reflected here. Currently we are not syncronized with saving checkpoints, which leads to runtime errors when we are calling export on the same global step. Exports depend on saved checkpoints for constructing the graph and getting the global step from the graph instance saved in the checkpoint. If the checkpoint is stale with respect to current step, the global step is taken to be the last saved checkpoint's global step and exporter doesn't export the same checkpoint again with the following error. We don't raise an error but we return "should stop" so we stop, but without an exception. Check that there is no :1 (e.g. it's single output). | 18,022 | en | 0.797643 |
'''
Image
=====
The :class:`Image` widget is used to display an image::
Example in python::
wimg = Image(source='mylogo.png')
Kv Example::
Image:
source: 'mylogo.png'
size: self.texture_size
Asynchronous Loading
--------------------
To load an image asynchronously (for example from an external webserver), use
the :class:`AsyncImage` subclass::
aimg = AsyncImage(source='http://mywebsite.com/logo.png')
This can be useful as it prevents your application from waiting until the image
is loaded. If you want to display large images or retrieve them from URL's,
using :class:`AsyncImage` will allow these resources to be retrieved on a
background thread without blocking your application.
Alignment
---------
By default, the image is centered and fits inside the widget bounding box.
If you don't want that, you can set `allow_stretch` to True and `keep_ratio`
to False.
You can also inherit from Image and create your own style. For example, if you
want your image to be greater than the size of your widget, you could do::
class FullImage(Image):
pass
And in your kivy language file::
<-FullImage>:
canvas:
Color:
rgb: (1, 1, 1)
Rectangle:
texture: self.texture
size: self.width + 20, self.height + 20
pos: self.x - 10, self.y - 10
'''
__all__ = ('Image', 'AsyncImage')
from kivy.uix.widget import Widget
from kivy.core.image import Image as CoreImage
from kivy.resources import resource_find
from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
AliasProperty, BooleanProperty, NumericProperty, ColorProperty
from kivy.logger import Logger
# delayed imports
Loader = None
class Image(Widget):
'''Image class, see module documentation for more information.
'''
source = StringProperty(None)
'''Filename / source of your image.
:attr:`source` is a :class:`~kivy.properties.StringProperty` and
defaults to None.
'''
texture = ObjectProperty(None, allownone=True)
'''Texture object of the image. The texture represents the original, loaded
image texture. It is stretched and positioned during rendering according to
the :attr:`allow_stretch` and :attr:`keep_ratio` properties.
Depending of the texture creation, the value will be a
:class:`~kivy.graphics.texture.Texture` or a
:class:`~kivy.graphics.texture.TextureRegion` object.
:attr:`texture` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
texture_size = ListProperty([0, 0])
'''Texture size of the image. This represents the original, loaded image
texture size.
.. warning::
The texture size is set after the texture property. So if you listen to
the change on :attr:`texture`, the property texture_size will not be
up-to-date. Use self.texture.size instead.
'''
def get_image_ratio(self):
if self.texture:
return self.texture.width / float(self.texture.height)
return 1.
mipmap = BooleanProperty(False)
'''Indicate if you want OpenGL mipmapping to be applied to the texture.
Read :ref:`mipmap` for more information.
.. versionadded:: 1.0.7
:attr:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
image_ratio = AliasProperty(get_image_ratio, bind=('texture',), cache=True)
'''Ratio of the image (width / float(height).
:attr:`image_ratio` is an :class:`~kivy.properties.AliasProperty` and is
read-only.
'''
color = ColorProperty([1, 1, 1, 1])
'''Image color, in the format (r, g, b, a). This attribute can be used to
'tint' an image. Be careful: if the source image is not gray/white, the
color will not really work as expected.
.. versionadded:: 1.0.6
:attr:`color` is a :class:`~kivy.properties.ColorProperty` and defaults to
[1, 1, 1, 1].
.. versionchanged:: 2.0.0
Changed from :class:`~kivy.properties.ListProperty` to
:class:`~kivy.properties.ColorProperty`.
'''
allow_stretch = BooleanProperty(False)
'''If True, the normalized image size will be maximized to fit in the image
box. Otherwise, if the box is too tall, the image will not be
stretched more than 1:1 pixels.
.. versionadded:: 1.0.7
:attr:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
keep_ratio = BooleanProperty(True)
'''If False along with allow_stretch being True, the normalized image
size will be maximized to fit in the image box and ignores the aspect
ratio of the image.
Otherwise, if the box is too tall, the image will not be stretched more
than 1:1 pixels.
.. versionadded:: 1.0.8
:attr:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
keep_data = BooleanProperty(False)
'''If True, the underlying _coreimage will store the raw image data.
This is useful when performing pixel based collision detection.
.. versionadded:: 1.3.0
:attr:`keep_data` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
anim_delay = NumericProperty(.25)
'''Delay the animation if the image is sequenced (like an animated gif).
If anim_delay is set to -1, the animation will be stopped.
.. versionadded:: 1.0.8
:attr:`anim_delay` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.25 (4 FPS).
'''
anim_loop = NumericProperty(0)
'''Number of loops to play then stop animating. 0 means keep animating.
.. versionadded:: 1.9.0
:attr:`anim_loop` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
nocache = BooleanProperty(False)
'''If this property is set True, the image will not be added to the
internal cache. The cache will simply ignore any calls trying to
append the core image.
.. versionadded:: 1.6.0
:attr:`nocache` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
def get_norm_image_size(self):
if not self.texture:
return list(self.size)
ratio = self.image_ratio
w, h = self.size
tw, th = self.texture.size
# ensure that the width is always maximized to the container width
if self.allow_stretch:
if not self.keep_ratio:
return [w, h]
iw = w
else:
iw = min(w, tw)
# calculate the appropriate height
ih = iw / ratio
# if the height is too higher, take the height of the container
# and calculate appropriate width. no need to test further. :)
if ih > h:
if self.allow_stretch:
ih = h
else:
ih = min(h, th)
iw = ih * ratio
return [iw, ih]
norm_image_size = AliasProperty(get_norm_image_size, bind=('texture', 'size', 'allow_stretch', 'image_ratio', 'keep_ratio'), cache=True)
'''Normalized image size within the widget box.
This size will always fit the widget size and will preserve the image
ratio.
:attr:`norm_image_size` is an :class:`~kivy.properties.AliasProperty` and
is read-only.
'''
def __init__(self, **kwargs):
self._coreimage = None
self._loops = 0
update = self.texture_update
fbind = self.fbind
fbind('source', update)
fbind('mipmap', update)
super().__init__(**kwargs)
def texture_update(self, *largs):
self.set_texture_from_resource(self.source)
def set_texture_from_resource(self, resource):
if not resource:
self._clear_core_image()
return
source = resource_find(resource)
if not source:
Logger.error('Image: Not found <%s>' % resource)
self._clear_core_image()
return
if self._coreimage:
self._coreimage.unbind(on_texture=self._on_tex_change)
try:
self._coreimage = image = CoreImage(
source,
mipmap=self.mipmap,
anim_delay=self.anim_delay,
keep_data=self.keep_data,
nocache=self.nocache
)
except Exception:
Logger.error('Image: Error loading <%s>' % resource)
self._clear_core_image()
image = self._coreimage
if image:
image.bind(on_texture=self._on_tex_change)
self.texture = image.texture
def on_anim_delay(self, instance, value):
if self._coreimage is None:
return
self._coreimage.anim_delay = value
if value < 0:
self._coreimage.anim_reset(False)
def on_texture(self, instance, value):
self.texture_size = value.size if value else [0, 0]
def _clear_core_image(self):
if self._coreimage:
self._coreimage.unbind(on_texture=self._on_tex_change)
self.texture = None
self._coreimage = None
self._loops = 0
def _on_tex_change(self, *largs):
# update texture from core image
self.texture = self._coreimage.texture
ci = self._coreimage
if self.anim_loop and ci._anim_index == len(ci._image.textures) - 1:
self._loops += 1
if self.anim_loop == self._loops:
ci.anim_reset(False)
self._loops = 0
def reload(self):
'''Reload image from disk. This facilitates re-loading of
images from disk in case the image content changes.
.. versionadded:: 1.3.0
Usage::
im = Image(source = '1.jpg')
# -- do something --
im.reload()
# image will be re-loaded from disk
'''
self.remove_from_cache()
old_source = self.source
self.source = ''
self.source = old_source
def remove_from_cache(self):
'''Remove image from cache.
.. versionadded:: 2.0.0
'''
if self._coreimage:
self._coreimage.remove_from_cache()
def on_nocache(self, *args):
if self.nocache:
self.remove_from_cache()
if self._coreimage:
self._coreimage._nocache = True
class AsyncImage(Image):
'''Asynchronous Image class. See the module documentation for more
information.
.. note::
The AsyncImage is a specialized form of the Image class. You may
want to refer to the :mod:`~kivy.loader` documentation and in
particular, the :class:`~kivy.loader.ProxyImage` for more detail
on how to handle events around asynchronous image loading.
.. note::
AsyncImage currently does not support properties
:attr:`anim_loop` and :attr:`mipmap` and setting those properties will
have no effect.
'''
__events__ = ('on_error', 'on_load')
def __init__(self, **kwargs):
self._found_source = None
self._coreimage = None
global Loader
if not Loader:
from kivy.loader import Loader
self.fbind('source', self._load_source)
super().__init__(**kwargs)
def _load_source(self, *args):
source = self.source
if not source:
self._clear_core_image()
return
if not self.is_uri(source):
source = resource_find(source)
if not source:
Logger.error('AsyncImage: Not found <%s>' % self.source)
self._clear_core_image()
return
self._found_source = source
self._coreimage = image = Loader.image(
source,
nocache=self.nocache,
mipmap=self.mipmap,
anim_delay=self.anim_delay
)
image.bind(
on_load=self._on_source_load,
on_error=self._on_source_error,
on_texture=self._on_tex_change
)
self.texture = image.texture
def _on_source_load(self, value):
image = self._coreimage.image
if not image:
return
self.texture = image.texture
self.dispatch('on_load')
def _on_source_error(self, instance, error=None):
self.dispatch('on_error', error)
def on_error(self, error):
pass
def on_load(self, *args):
pass
def is_uri(self, filename):
proto = filename.split('://', 1)[0]
return proto in ('http', 'https', 'ftp', 'smb', 'S3')
def _clear_core_image(self):
if self._coreimage:
self._coreimage.unbind(on_load=self._on_source_load)
super()._clear_core_image()
self._found_source = None
def _on_tex_change(self, *largs):
if self._coreimage:
self.texture = self._coreimage.texture
def texture_update(self, *largs):
pass
def remove_from_cache(self):
if self._found_source:
Loader.remove_from_cache(self._found_source)
super().remove_from_cache()
| kivy/uix/image.py | 13,148 | Asynchronous Image class. See the module documentation for more
information.
.. note::
The AsyncImage is a specialized form of the Image class. You may
want to refer to the :mod:`~kivy.loader` documentation and in
particular, the :class:`~kivy.loader.ProxyImage` for more detail
on how to handle events around asynchronous image loading.
.. note::
AsyncImage currently does not support properties
:attr:`anim_loop` and :attr:`mipmap` and setting those properties will
have no effect.
Image class, see module documentation for more information.
Reload image from disk. This facilitates re-loading of
images from disk in case the image content changes.
.. versionadded:: 1.3.0
Usage::
im = Image(source = '1.jpg')
# -- do something --
im.reload()
# image will be re-loaded from disk
Remove image from cache.
.. versionadded:: 2.0.0
Image
=====
The :class:`Image` widget is used to display an image::
Example in python::
wimg = Image(source='mylogo.png')
Kv Example::
Image:
source: 'mylogo.png'
size: self.texture_size
Asynchronous Loading
--------------------
To load an image asynchronously (for example from an external webserver), use
the :class:`AsyncImage` subclass::
aimg = AsyncImage(source='http://mywebsite.com/logo.png')
This can be useful as it prevents your application from waiting until the image
is loaded. If you want to display large images or retrieve them from URL's,
using :class:`AsyncImage` will allow these resources to be retrieved on a
background thread without blocking your application.
Alignment
---------
By default, the image is centered and fits inside the widget bounding box.
If you don't want that, you can set `allow_stretch` to True and `keep_ratio`
to False.
You can also inherit from Image and create your own style. For example, if you
want your image to be greater than the size of your widget, you could do::
class FullImage(Image):
pass
And in your kivy language file::
<-FullImage>:
canvas:
Color:
rgb: (1, 1, 1)
Rectangle:
texture: self.texture
size: self.width + 20, self.height + 20
pos: self.x - 10, self.y - 10
delayed imports ensure that the width is always maximized to the container width calculate the appropriate height if the height is too higher, take the height of the container and calculate appropriate width. no need to test further. :) update texture from core image | 2,540 | en | 0.783445 |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all activities.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
activity_service = client.GetService('ActivityService', version='v201811')
# Create a statement to select activities.
statement = ad_manager.StatementBuilder(version='v201811')
# Retrieve a small amount of activities at a time, paging
# through until all activities have been retrieved.
while True:
response = activity_service.getActivitiesByStatement(statement.ToStatement(
))
if 'results' in response and len(response['results']):
for activity in response['results']:
# Print out some information for each activity.
print('Activity with ID "%d" and name "%s" was found.\n' %
(activity['id'], activity['name']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| examples/ad_manager/v201811/activity_service/get_all_activities.py | 1,771 | !/usr/bin/env python Copyright 2016 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Import appropriate modules from the client library. Initialize appropriate service. Create a statement to select activities. Retrieve a small amount of activities at a time, paging through until all activities have been retrieved. Print out some information for each activity. Initialize client object. | 894 | en | 0.869554 |
#!/usr/bin/env python
from __future__ import print_function
import sys
from Configuration.PyReleaseValidation.MatrixReader import MatrixReader
from Configuration.PyReleaseValidation.MatrixRunner import MatrixRunner
from Configuration.PyReleaseValidation.MatrixInjector import MatrixInjector,performInjectionOptionTest
# ================================================================================
def showRaw(opt):
mrd = MatrixReader(opt)
mrd.showRaw(opt.useInput, opt.refRel, opt.fromScratch, opt.raw, opt.step1Only, selected=opt.testList)
return 0
# ================================================================================
def runSelected(opt):
mrd = MatrixReader(opt)
mrd.prepare(opt.useInput, opt.refRel, opt.fromScratch)
# test for wrong input workflows
if opt.testList:
definedWF = []
for dwf in mrd.workFlows: definedWF.append(dwf.numId)
for twf in opt.testList:
if twf not in definedWF: raise ValueError('Not defined workflow ', twf , ' requested')
ret = 0
if opt.show:
mrd.show(opt.testList, opt.extended, opt.cafVeto)
if opt.testList : print('testListected items:', opt.testList)
else:
mRunnerHi = MatrixRunner(mrd.workFlows, opt.nProcs, opt.nThreads)
ret = mRunnerHi.runTests(opt)
if opt.wmcontrol:
if ret!=0:
print('Cannot go on with wmagent injection with failing workflows')
else:
wfInjector = MatrixInjector(opt,mode=opt.wmcontrol,options=opt.wmoptions)
ret= wfInjector.prepare(mrd,
mRunnerHi.runDirs)
if ret==0:
wfInjector.upload()
wfInjector.submit()
return ret
# ================================================================================
if __name__ == '__main__':
#this can get out of here
predefinedSet={
'limited' : [5.1, #FastSim ttbar
7.3, #CosmicsSPLoose_UP17
8, #BH/Cosmic MC
25, #MC ttbar
4.22, #cosmic data
4.53, #run1 data + miniAOD
9.0, #Higgs200 charged taus
1000, #data+prompt
1001, #data+express
101.0, #SingleElectron120E120EHCAL
136.731, #2016B Photon data
136.7611, #2016E JetHT reMINIAOD from 80X legacy
136.8311, #2017F JetHT reMINIAOD from 94X reprocessing
136.788, #2017B Photon data
136.85, #2018A Egamma data
140.53, #2011 HI data
140.56, #2018 HI data
158.0, #2018 HI MC with pp-like reco
1306.0, #SingleMu Pt1 UP15
1325.7, #test NanoAOD from existing MINI
1330, #Run2 MC Zmm
135.4, #Run 2 Zee ttbar
10042.0, #2017 ZMM
10024.0, #2017 ttbar
10224.0, #2017 ttbar PU
10824.0, #2018 ttbar
11634.0, #2021 ttbar
12434.0, #2023 ttbar
20034.0, #2026D35 ttbar (MTD TDR baseline)
20434.0, #2026D41 ttbar (L1T TDR baseline)
21234.0, #2026D44 (exercise HF nose)
22034.0, #2026D46 ttbar (exercise V11 HGCal)
25202.0, #2016 ttbar UP15 PU
250202.181, #2018 ttbar stage1 + stage2 premix
],
'jetmc': [5.1, 13, 15, 25, 38, 39], #MC
'metmc' : [5.1, 15, 25, 37, 38, 39], #MC
'muonmc' : [5.1, 124.4, 124.5, 20, 21, 22, 23, 25, 30], #MC
}
import optparse
usage = 'usage: runTheMatrix.py --show -s '
parser = optparse.OptionParser(usage)
parser.add_option('-b','--batchName',
help='relval batch: suffix to be appended to Campaign name',
dest='batchName',
default=''
)
parser.add_option('-m','--memoryOffset',
help='memory of the wf for single core',
dest='memoryOffset',
default=3000
)
parser.add_option('--addMemPerCore',
help='increase of memory per each n > 1 core: memory(n_core) = memoryOffset + (n_core-1) * memPerCore',
dest='memPerCore',
default=1500
)
parser.add_option('-j','--nproc',
help='number of processes. 0 Will use 4 processes, not execute anything but create the wfs',
dest='nProcs',
default=4
)
parser.add_option('-t','--nThreads',
help='number of threads per process to use in cmsRun.',
dest='nThreads',
default=1
)
parser.add_option('-n','--showMatrix',
help='Only show the worflows. Use --ext to show more',
dest='show',
default=False,
action='store_true'
)
parser.add_option('-e','--extended',
help='Show details of workflows, used with --show',
dest='extended',
default=False,
action='store_true'
)
parser.add_option('-s','--selected',
help='Run a pre-defined selected matrix of wf. Deprecated, please use -l limited',
dest='restricted',
default=False,
action='store_true'
)
parser.add_option('-l','--list',
help='Coma separated list of workflow to be shown or ran. Possible keys are also '+str(predefinedSet.keys())+'. and wild card like muon, or mc',
dest='testList',
default=None
)
parser.add_option('-r','--raw',
help='Temporary dump the .txt needed for prodAgent interface. To be discontinued soon. Argument must be the name of the set (standard, pileup,...)',
dest='raw'
)
parser.add_option('-i','--useInput',
help='Use recyling where available. Either all, or a coma separated list of wf number.',
dest='useInput',
default=None
)
parser.add_option('-w','--what',
help='Specify the set to be used. Argument must be the name of the set (standard, pileup,...)',
dest='what',
default='all'
)
parser.add_option('--step1',
help='Used with --raw. Limit the production to step1',
dest='step1Only',
default=False
)
parser.add_option('--maxSteps',
help='Only run maximum on maxSteps. Used when we are only interested in first n steps.',
dest='maxSteps',
default=9999,
type="int"
)
parser.add_option('--fromScratch',
help='Coma separated list of wf to be run without recycling. all is not supported as default.',
dest='fromScratch',
default=None
)
parser.add_option('--refRelease',
help='Allow to modify the recycling dataset version',
dest='refRel',
default=None
)
parser.add_option('--wmcontrol',
help='Create the workflows for injection to WMAgent. In the WORKING. -wmcontrol init will create the the workflows, -wmcontrol test will dryRun a test, -wmcontrol submit will submit to wmagent',
choices=['init','test','submit','force'],
dest='wmcontrol',
default=None,
)
parser.add_option('--revertDqmio',
help='When submitting workflows to wmcontrol, force DQM outout to use pool and not DQMIO',
choices=['yes','no'],
dest='revertDqmio',
default='no',
)
parser.add_option('--optionswm',
help='Specify a few things for wm injection',
default='',
dest='wmoptions')
parser.add_option('--keep',
help='allow to specify for which coma separated steps the output is needed',
default=None)
parser.add_option('--label',
help='allow to give a special label to the output dataset name',
default='')
parser.add_option('--command',
help='provide a way to add additional command to all of the cmsDriver commands in the matrix',
dest='command',
default=None
)
parser.add_option('--apply',
help='allow to use the --command only for 1 coma separeated',
dest='apply',
default=None)
parser.add_option('--workflow',
help='define a workflow to be created or altered from the matrix',
action='append',
dest='workflow',
default=None
)
parser.add_option('--dryRun',
help='do not run the wf at all',
action='store_true',
dest='dryRun',
default=False
)
parser.add_option('--testbed',
help='workflow injection to cmswebtest (you need dedicated rqmgr account)',
dest='testbed',
default=False,
action='store_true'
)
parser.add_option('--noCafVeto',
help='Run from any source, ignoring the CAF label',
dest='cafVeto',
default=True,
action='store_false'
)
parser.add_option('--overWrite',
help='Change the content of a step for another. List of pairs.',
dest='overWrite',
default=None
)
parser.add_option('--noRun',
help='Remove all run list selection from wfs',
dest='noRun',
default=False,
action='store_true')
parser.add_option('--das-options',
help='Options to be passed to dasgoclient.',
dest='dasOptions',
default="--limit 0",
action='store')
parser.add_option('--job-reports',
help='Dump framework job reports',
dest='jobReports',
default=False,
action='store_true')
parser.add_option('--ibeos',
help='Use IB EOS site configuration',
dest='IBEos',
default=False,
action='store_true')
opt,args = parser.parse_args()
if opt.IBEos:
import os
from commands import getstatusoutput as run_cmd
ibeos_cache = os.path.join(os.getenv("LOCALRT"), "ibeos_cache.txt")
if not os.path.exists(ibeos_cache):
err, out = run_cmd("curl -L -s -o %s https://raw.githubusercontent.com/cms-sw/cms-sw.github.io/master/das_queries/ibeos.txt" % ibeos_cache)
if err:
run_cmd("rm -f %s" % ibeos_cache)
print("Error: Unable to download ibeos cache information")
print(out)
sys.exit(err)
for cmssw_env in [ "CMSSW_BASE", "CMSSW_RELEASE_BASE" ]:
cmssw_base = os.getenv(cmssw_env,None)
if not cmssw_base: continue
cmssw_base = os.path.join(cmssw_base,"src/Utilities/General/ibeos")
if os.path.exists(cmssw_base):
os.environ["PATH"]=cmssw_base+":"+os.getenv("PATH")
os.environ["CMS_PATH"]="/cvmfs/cms-ib.cern.ch"
os.environ["CMSSW_USE_IBEOS"]="true"
print(">> WARNING: You are using SITECONF from /cvmfs/cms-ib.cern.ch")
break
if opt.restricted:
print('Deprecated, please use -l limited')
if opt.testList: opt.testList+=',limited'
else: opt.testList='limited'
def stepOrIndex(s):
if s.isdigit():
return int(s)
else:
return s
if opt.apply:
opt.apply=map(stepOrIndex,opt.apply.split(','))
if opt.keep:
opt.keep=map(stepOrIndex,opt.keep.split(','))
if opt.testList:
testList=[]
for entry in opt.testList.split(','):
if not entry: continue
mapped=False
for k in predefinedSet:
if k.lower().startswith(entry.lower()) or k.lower().endswith(entry.lower()):
testList.extend(predefinedSet[k])
mapped=True
break
if not mapped:
try:
testList.append(float(entry))
except:
print(entry,'is not a possible selected entry')
opt.testList = list(set(testList))
if opt.useInput: opt.useInput = opt.useInput.split(',')
if opt.fromScratch: opt.fromScratch = opt.fromScratch.split(',')
if opt.nProcs: opt.nProcs=int(opt.nProcs)
if opt.nThreads: opt.nThreads=int(opt.nThreads)
if (opt.memoryOffset): opt.memoryOffset=int(opt.memoryOffset)
if (opt.memPerCore): opt.memPerCore=int(opt.memPerCore)
if opt.wmcontrol:
performInjectionOptionTest(opt)
if opt.overWrite:
opt.overWrite=eval(opt.overWrite)
if opt.raw and opt.show: ###prodAgent to be discontinued
ret = showRaw(opt)
else:
ret = runSelected(opt)
sys.exit(ret)
| Configuration/PyReleaseValidation/scripts/runTheMatrix.py | 14,528 | !/usr/bin/env python ================================================================================ ================================================================================ test for wrong input workflows ================================================================================this can get out of hereFastSim ttbarCosmicsSPLoose_UP17BH/Cosmic MCMC ttbarcosmic datarun1 data + miniAODHiggs200 charged tausdata+promptdata+expressSingleElectron120E120EHCAL2016B Photon data2016E JetHT reMINIAOD from 80X legacy2017F JetHT reMINIAOD from 94X reprocessing2017B Photon data2018A Egamma data2011 HI data2018 HI data2018 HI MC with pp-like recoSingleMu Pt1 UP15test NanoAOD from existing MINIRun2 MC ZmmRun 2 Zee ttbar2017 ZMM2017 ttbar2017 ttbar PU2018 ttbar2021 ttbar2023 ttbar2026D35 ttbar (MTD TDR baseline)2026D41 ttbar (L1T TDR baseline)2026D44 (exercise HF nose)2026D46 ttbar (exercise V11 HGCal)2016 ttbar UP15 PU2018 ttbar stage1 + stage2 premixMCMCMCprodAgent to be discontinued | 997 | en | 0.36078 |
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.sync_batchnorm import SynchronizedBatchNorm2d
import torch.nn.utils.spectral_norm as spectral_norm
# Returns a function that creates a normalization function
# that does not condition on semantic map
def get_nonspade_norm_layer(opt, norm_type='instance'):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'sync_batch':
norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
class SPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
| baselines/scripts/segvae/models/networks/normalization.py | 3,467 | Returns a function that creates a normalization function that does not condition on semantic map helper function to get output channels of the previous layer this function will be returned remove bias in the previous layer, which is meaningless since it has no effect after normalization The dimension of the intermediate embedding space. Yes, hardcoded. Part 1. generate parameter-free normalized activations Part 2. produce scaling and bias conditioned on semantic map apply scale and bias | 492 | en | 0.825035 |
import io
import copy
import uuid
import numpy as np
try:
# pip install pycollada
import collada
except BaseException:
collada = None
try:
import PIL.Image
except ImportError:
pass
from .. import util
from .. import visual
from ..constants import log
def load_collada(file_obj, resolver=None, **kwargs):
"""
Load a COLLADA (.dae) file into a list of trimesh kwargs.
Parameters
----------
file_obj : file object
Containing a COLLADA file
resolver : trimesh.visual.Resolver or None
For loading referenced files, like texture images
kwargs : **
Passed to trimesh.Trimesh.__init__
Returns
-------
loaded : list of dict
kwargs for Trimesh constructor
"""
# load scene using pycollada
c = collada.Collada(file_obj)
# Create material map from Material ID to trimesh material
material_map = {}
for m in c.materials:
effect = m.effect
material_map[m.id] = _parse_material(effect, resolver)
# name : kwargs
meshes = {}
# list of dict
graph = []
for node in c.scene.nodes:
_parse_node(node=node,
parent_matrix=np.eye(4),
material_map=material_map,
meshes=meshes,
graph=graph,
resolver=resolver)
# create kwargs for load_kwargs
result = {'class': 'Scene',
'graph': graph,
'geometry': meshes}
return result
def export_collada(mesh, **kwargs):
"""
Export a mesh or a list of meshes as a COLLADA .dae file.
Parameters
-----------
mesh: Trimesh object or list of Trimesh objects
The mesh(es) to export.
Returns
-----------
export: str, string of COLLADA format output
"""
meshes = mesh
if not isinstance(mesh, (list, tuple, set, np.ndarray)):
meshes = [mesh]
c = collada.Collada()
nodes = []
for i, m in enumerate(meshes):
# Load uv, colors, materials
uv = None
colors = None
mat = _unparse_material(None)
if m.visual.defined:
if m.visual.kind == 'texture':
mat = _unparse_material(m.visual.material)
uv = m.visual.uv
elif m.visual.kind == 'vertex':
colors = (m.visual.vertex_colors / 255.0)[:, :3]
c.effects.append(mat.effect)
c.materials.append(mat)
# Create geometry object
vertices = collada.source.FloatSource(
'verts-array', m.vertices.flatten(), ('X', 'Y', 'Z'))
normals = collada.source.FloatSource(
'normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z'))
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', '#verts-array')
input_list.addInput(1, 'NORMAL', '#normals-array')
arrays = [vertices, normals]
if uv is not None:
texcoords = collada.source.FloatSource(
'texcoords-array', uv.flatten(), ('U', 'V'))
input_list.addInput(2, 'TEXCOORD', '#texcoords-array')
arrays.append(texcoords)
if colors is not None:
idx = 2
if uv:
idx = 3
colors = collada.source.FloatSource('colors-array',
colors.flatten(), ('R', 'G', 'B'))
input_list.addInput(idx, 'COLOR', '#colors-array')
arrays.append(colors)
geom = collada.geometry.Geometry(
c, uuid.uuid4().hex, uuid.uuid4().hex, arrays
)
indices = np.repeat(m.faces.flatten(), len(arrays))
matref = u'material{}'.format(i)
triset = geom.createTriangleSet(indices, input_list, matref)
geom.primitives.append(triset)
c.geometries.append(geom)
matnode = collada.scene.MaterialNode(matref, mat, inputs=[])
geomnode = collada.scene.GeometryNode(geom, [matnode])
node = collada.scene.Node(u'node{}'.format(i), children=[geomnode])
nodes.append(node)
scene = collada.scene.Scene('scene', nodes)
c.scenes.append(scene)
c.scene = scene
b = io.BytesIO()
c.write(b)
b.seek(0)
return b.read()
def _parse_node(node,
parent_matrix,
material_map,
meshes,
graph,
resolver=None):
"""
Recursively parse COLLADA scene nodes.
"""
# Parse mesh node
if isinstance(node, collada.scene.GeometryNode):
geometry = node.geometry
# Create local material map from material symbol to actual material
local_material_map = {}
for mn in node.materials:
symbol = mn.symbol
m = mn.target
if m.id in material_map:
local_material_map[symbol] = material_map[m.id]
else:
local_material_map[symbol] = _parse_material(m, resolver)
# Iterate over primitives of geometry
for i, primitive in enumerate(geometry.primitives):
if isinstance(primitive, collada.polylist.Polylist):
primitive = primitive.triangleset()
if isinstance(primitive, collada.triangleset.TriangleSet):
vertex = primitive.vertex
vertex_index = primitive.vertex_index
vertices = vertex[vertex_index].reshape(
len(vertex_index) * 3, 3)
# Get normals if present
normals = None
if primitive.normal is not None:
normal = primitive.normal
normal_index = primitive.normal_index
normals = normal[normal_index].reshape(
len(normal_index) * 3, 3)
# Get colors if present
colors = None
s = primitive.sources
if ('COLOR' in s and len(s['COLOR'])
> 0 and len(primitive.index) > 0):
color = s['COLOR'][0][4].data
color_index = primitive.index[:, :, s['COLOR'][0][0]]
colors = color[color_index].reshape(
len(color_index) * 3, 3)
faces = np.arange(
vertices.shape[0]).reshape(
vertices.shape[0] // 3, 3)
# Get UV coordinates if possible
vis = None
if primitive.material in local_material_map:
material = copy.copy(
local_material_map[primitive.material])
uv = None
if len(primitive.texcoordset) > 0:
texcoord = primitive.texcoordset[0]
texcoord_index = primitive.texcoord_indexset[0]
uv = texcoord[texcoord_index].reshape(
(len(texcoord_index) * 3, 2))
vis = visual.texture.TextureVisuals(
uv=uv, material=material)
primid = u'{}.{}'.format(geometry.id, i)
meshes[primid] = {
'vertices': vertices,
'faces': faces,
'vertex_normals': normals,
'vertex_colors': colors,
'visual': vis}
graph.append({'frame_to': primid,
'matrix': parent_matrix,
'geometry': primid})
# recurse down tree for nodes with children
elif isinstance(node, collada.scene.Node):
if node.children is not None:
for child in node.children:
# create the new matrix
matrix = np.dot(parent_matrix, node.matrix)
# parse the child node
_parse_node(
node=child,
parent_matrix=matrix,
material_map=material_map,
meshes=meshes,
graph=graph,
resolver=resolver)
elif isinstance(node, collada.scene.CameraNode):
# TODO: convert collada cameras to trimesh cameras
pass
elif isinstance(node, collada.scene.LightNode):
# TODO: convert collada lights to trimesh lights
pass
def _load_texture(file_name, resolver):
"""
Load a texture from a file into a PIL image.
"""
file_data = resolver.get(file_name)
image = PIL.Image.open(util.wrap_as_stream(file_data))
return image
def _parse_material(effect, resolver):
"""
Turn a COLLADA effect into a trimesh material.
"""
# Compute base color
baseColorFactor = np.ones(4)
baseColorTexture = None
if isinstance(effect.diffuse, collada.material.Map):
try:
baseColorTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load base texture',
exc_info=True)
elif effect.diffuse is not None:
baseColorFactor = effect.diffuse
# Compute emission color
emissiveFactor = np.zeros(3)
emissiveTexture = None
if isinstance(effect.emission, collada.material.Map):
try:
emissiveTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load emissive texture',
exc_info=True)
elif effect.emission is not None:
emissiveFactor = effect.emission[:3]
# Compute roughness
roughnessFactor = 1.0
if (not isinstance(effect.shininess, collada.material.Map)
and effect.shininess is not None):
roughnessFactor = np.sqrt(2.0 / (2.0 + effect.shininess))
# Compute metallic factor
metallicFactor = 0.0
# Compute normal texture
normalTexture = None
if effect.bumpmap is not None:
try:
normalTexture = _load_texture(
effect.bumpmap.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load bumpmap',
exc_info=True)
# Compute opacity
if (effect.transparent is not None
and not isinstance(effect.transparent, collada.material.Map)):
baseColorFactor = tuple(np.append(baseColorFactor[:3], effect.transparent[3]))
return visual.material.PBRMaterial(
emissiveFactor=emissiveFactor,
emissiveTexture=emissiveTexture,
normalTexture=normalTexture,
baseColorTexture=baseColorTexture,
baseColorFactor=baseColorFactor,
metallicFactor=metallicFactor,
roughnessFactor=roughnessFactor)
def _unparse_material(material):
"""
Turn a trimesh material into a COLLADA material.
"""
# TODO EXPORT TEXTURES
if isinstance(material, visual.material.PBRMaterial):
diffuse = material.baseColorFactor
if diffuse is not None:
diffuse = list(diffuse)
emission = material.emissiveFactor
if emission is not None:
emission = [float(emission[0]), float(emission[1]),
float(emission[2]), 1.0]
shininess = material.roughnessFactor
if shininess is not None:
shininess = 2.0 / shininess**2 - 2.0
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong',
diffuse=diffuse, emission=emission,
specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess)
)
material = collada.material.Material(
uuid.uuid4().hex, 'pbrmaterial', effect
)
else:
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong'
)
material = collada.material.Material(
uuid.uuid4().hex, 'defaultmaterial', effect
)
return material
def load_zae(file_obj, resolver=None, **kwargs):
"""
Load a ZAE file, which is just a zipped DAE file.
Parameters
-------------
file_obj : file object
Contains ZAE data
resolver : trimesh.visual.Resolver
Resolver to load additional assets
kwargs : dict
Passed to load_collada
Returns
------------
loaded : dict
Results of loading
"""
# a dict, {file name : file object}
archive = util.decompress(file_obj,
file_type='zip')
# load the first file with a .dae extension
file_name = next(i for i in archive.keys()
if i.lower().endswith('.dae'))
# a resolver so the loader can load textures / etc
resolver = visual.resolvers.ZipResolver(archive)
# run the regular collada loader
loaded = load_collada(archive[file_name],
resolver=resolver,
**kwargs)
return loaded
# only provide loaders if `pycollada` is installed
_collada_loaders = {}
_collada_exporters = {}
if collada is not None:
_collada_loaders['dae'] = load_collada
_collada_loaders['zae'] = load_zae
_collada_exporters['dae'] = export_collada
| trimesh/exchange/dae.py | 13,247 | Load a texture from a file into a PIL image.
Turn a COLLADA effect into a trimesh material.
Recursively parse COLLADA scene nodes.
Turn a trimesh material into a COLLADA material.
Export a mesh or a list of meshes as a COLLADA .dae file.
Parameters
-----------
mesh: Trimesh object or list of Trimesh objects
The mesh(es) to export.
Returns
-----------
export: str, string of COLLADA format output
Load a COLLADA (.dae) file into a list of trimesh kwargs.
Parameters
----------
file_obj : file object
Containing a COLLADA file
resolver : trimesh.visual.Resolver or None
For loading referenced files, like texture images
kwargs : **
Passed to trimesh.Trimesh.__init__
Returns
-------
loaded : list of dict
kwargs for Trimesh constructor
Load a ZAE file, which is just a zipped DAE file.
Parameters
-------------
file_obj : file object
Contains ZAE data
resolver : trimesh.visual.Resolver
Resolver to load additional assets
kwargs : dict
Passed to load_collada
Returns
------------
loaded : dict
Results of loading
pip install pycollada load scene using pycollada Create material map from Material ID to trimesh material name : kwargs list of dict create kwargs for load_kwargs Load uv, colors, materials Create geometry object Parse mesh node Create local material map from material symbol to actual material Iterate over primitives of geometry Get normals if present Get colors if present Get UV coordinates if possible recurse down tree for nodes with children create the new matrix parse the child node TODO: convert collada cameras to trimesh cameras TODO: convert collada lights to trimesh lights Compute base color Compute emission color Compute roughness Compute metallic factor Compute normal texture Compute opacity TODO EXPORT TEXTURES a dict, {file name : file object} load the first file with a .dae extension a resolver so the loader can load textures / etc run the regular collada loader only provide loaders if `pycollada` is installed | 1,977 | en | 0.541639 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import string
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def tamper(payload, **kwargs):
"""
Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> \u0053\u0045\u004C\u0045\u0043\u0054)
Notes:
* Useful to bypass weak filtering and/or WAFs in JSON contexes
>>> tamper('SELECT FIELD FROM TABLE')
'\\\\u0053\\\\u0045\\\\u004C\\\\u0045\\\\u0043\\\\u0054\\\\u0020\\\\u0046\\\\u0049\\\\u0045\\\\u004C\\\\u0044\\\\u0020\\\\u0046\\\\u0052\\\\u004F\\\\u004D\\\\u0020\\\\u0054\\\\u0041\\\\u0042\\\\u004C\\\\u0045'
"""
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += "\\u00%s" % payload[i + 1:i + 3]
i += 3
else:
retVal += '\\u%.4X' % ord(payload[i])
i += 1
return retVal
| Toolz/sqlmap/tamper/charunicodeescape.py | 1,214 | Unicode-escapes non-encoded characters in a given payload (not processing already encoded) (e.g. SELECT -> SELECT)
Notes:
* Useful to bypass weak filtering and/or WAFs in JSON contexes
>>> tamper('SELECT FIELD FROM TABLE')
'\\u0053\\u0045\\u004C\\u0045\\u0043\\u0054\\u0020\\u0046\\u0049\\u0045\\u004C\\u0044\\u0020\\u0046\\u0052\\u004F\\u004D\\u0020\\u0054\\u0041\\u0042\\u004C\\u0045'
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
!/usr/bin/env python | 523 | en | 0.490526 |
# Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from .checksum import ChecksumSerializer
class VnfPackageSoftwareImageInfoSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of the software image.",
required=True,
allow_null=False,
allow_blank=False
)
name = serializers.CharField(
help_text="Name of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
provider = serializers.CharField(
help_text="Provider of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
version = serializers.CharField(
help_text="Version of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
checksum = ChecksumSerializer(
help_text="Checksum of the software image file.",
required=True,
allow_null=False
)
containerFormat = serializers.ChoiceField(
help_text="terminationType: Indicates whether forceful or graceful termination is requested.",
choices=["AKI", "AMI", "ARI", "BARE", "DOCKER", "OVA", "OVF"],
required=True,
allow_null=True
)
diskFormat = serializers.ChoiceField(
help_text="Disk format of a software image is the format of the underlying disk image.",
choices=["AKI", "AMI", "ARI", "ISO", "QCOW2", "RAW", "VDI", "VHD", "VHDX", "VMDK"],
required=True,
allow_null=True
)
createdAt = serializers.DateTimeField(
help_text="Time when this software image was created.",
required=True,
format=None,
input_formats=None
)
minDisk = serializers.IntegerField(
help_text="The minimal disk for this software image in bytes.",
required=True,
allow_null=True
)
minRam = serializers.IntegerField(
help_text="The minimal RAM for this software image in bytes.",
required=True,
allow_null=True
)
size = serializers.IntegerField(
help_text="Size of this software image in bytes.",
required=True,
allow_null=True
)
userMetadata = serializers.DictField(
help_text="User-defined data.",
child=serializers.CharField(
help_text="KeyValue Pairs",
allow_blank=True
),
required=False,
allow_null=True
)
imagePath = serializers.CharField(
help_text="Path in the VNF package.",
required=True,
allow_null=True,
allow_blank=False
)
| catalog/packages/serializers/vnf_pkg_software_image_info.py | 3,191 | Copyright 2018 ZTE Corporation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 557 | en | 0.868543 |
#!/usr/bin/env python
import numpy as np
import socket, curses, json, traceback, math, argparse, math, sys, os, stat
from operator import itemgetter, attrgetter
from configutils.dfmux_config_constructor import get_physical_id, sq_phys_id_to_info
from configutils.dfmux_config_constructor import uniquifyList, generate_dfmux_lyrebird_config
#from spt3g.util import genericutils as GU # not in the public S4 repo
from spt3g import core, dfmux, calibration
from functools import cmp_to_key
import signal
import warnings
warnings.filterwarnings("ignore")
def split_on_numbers(s):
'''
Splits the string into a list where the numbers and the characters between numbers are each element
Copied from spt3g_software to fix dependencies (sorry)
'''
prevDig = False
outList = []
for char in s:
if char.isdigit():
if prevDig:
outList[-1] += char
else:
prevDig = True
outList.append(char)
else:
if not prevDig and len(outList)>0:
outList[-1] += char
else:
prevDig = False
outList.append(char)
return outList
def str_cmp_with_numbers_sorted(str1, str2):
'''
Compares two strings where numbers are sorted according to value, so Sq12 ends up after Sq8, use in sorted function
Copied from spt3g_software to fix dependencies (sorry)
'''
if str1==str2:
return 0
split1 = split_on_numbers(str1)
split2 = split_on_numbers(str2)
largestStr = 0
for l in [split1, split2]:
for s in l:
if s[0].isdigit():
largestStr = len(s) if len(s) > largestStr else largestStr
for l in [split1, split2]:
for i in range(len(l)):
if l[i][0].isdigit():
l[i] = '0'*(largestStr-len(l[i])) +l[i]
p1 = reduce(lambda x,y: x+y, split1)
p2 = reduce(lambda x,y: x+y, split2)
return -1 if p1<p2 else 1
@core.cache_frame_data(type = core.G3FrameType.Housekeeping, wiring_map = 'WiringMap',
tf = 'DfMuxTransferFunction', system = 'ReadoutSystem')
def AddVbiasAndCurrentConv(frame, wiring_map):
hk_map = frame['DfMuxHousekeeping']
v_bias = core.G3MapDouble()
i_conv = core.G3MapDouble()
for k in wiring_map.keys():
vb = dfmux.unittransforms.bolo_bias_voltage_rms(wiring_map, hk_map,
bolo = k, tf = tf, system = system) / core.G3Units.V
ic = dfmux.unittransforms.counts_to_rms_amps(wiring_map, hk_map,
bolo = k, tf = tf, system = system) / core.G3Units.amp
v_bias[k] = vb
i_conv[k] = ic
frame['VoltageBias'] = v_bias
frame['CurrentConv'] = i_conv
def make_square_block(n_things):
sq = n_things**0.5
if n_things == int(math.floor(sq))**2:
return (sq,sq)
else:
sq = int(math.floor(sq))
return (sq, sq+1)
def write_get_hk_script(fn, hostname, port):
script = '''#!/bin/bash
nc -w 1 %s %d
''' % (hostname, port)
f = open(fn, 'w')
f.write(script)
f.close()
st = os.stat(fn)
os.chmod(fn, st.st_mode | stat.S_IXUSR)
class BoloPropertiesFaker(object):
def __init__(self):
self.wiring_map = None
self.bolo_props = None
self.sent_off = False
self.default_tf = 'spt3g_filtering_2017_full'
return
def __call__(self, frame):
if 'DfMuxTransferFunction' in frame:
self.default_tf = frame['DfMuxTransferFunction']
if frame.type == core.G3FrameType.Wiring:
self.wiring_map = frame['WiringMap']
return self.send_off(frame)
elif frame.type == core.G3FrameType.Calibration:
if 'BolometerProperties' in frame:
self.bolo_props = frame['BolometerProperties']
elif 'NominalBolometerProperties' in frame:
self.bolo_props = frame['NominalBolometerProperties']
def send_off(self, frame):
if not self.wiring_map is None and self.bolo_props is None:
#faking the frame data
self.bolo_props = calibration.BolometerPropertiesMap()
n_chans = 0
squids = {}
for k in self.wiring_map.keys():
wm = self.wiring_map[k]
c = wm.channel + 1
if c > n_chans:
n_chans = c
sq = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1)
squids[sq] = 1
n_squids = len(squids.keys())
sq_layout = make_square_block(n_squids)
ch_layout = make_square_block(n_chans)
sq_x_sep = ch_layout[0] + 1
sq_y_sep = ch_layout[1] + 1
ch_x_sep = 1
ch_y_sep = 1
for i, sq in enumerate( sorted(squids.keys()) ):
x = i % sq_layout[0]
y = i // sq_layout[0]
squids[sq] = (1.2 * x * ch_layout[0], 1.2* y * ch_layout[1])
#need nsquids
#need nbolos per squid
for k in self.wiring_map.keys():
wm = self.wiring_map[k]
sq_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1)
w_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot)
sql = squids[sq_id]
x = sql[0] + ((wm.channel) % ch_layout[0]) * ch_x_sep
y = sql[1] + ((wm.channel) // ch_layout[0]) * ch_y_sep
bp = calibration.BolometerProperties()
bp.physical_name = k
bp.band = 0
bp.pol_angle = 0
bp.pol_efficiency = 0
bp.wafer_id = w_id
bp.squid_id = sq_id
bp.x_offset = float(x)
bp.y_offset = float(y)
self.bolo_props[k] = bp
out_frame = core.G3Frame(core.G3FrameType.Calibration)
out_frame['BolometerProperties'] = self.bolo_props
out_frame['DfMuxTransferFunction'] = self.default_tf
return [out_frame, frame]
else:
return frame
class BirdConfigGenerator(object):
def __init__(self,
lyrebird_output_file = '',
get_hk_script_name= '',
hostname = '', hk_hostname = '',
port = 3, hk_port = 3, get_hk_port = 3,
dv_buffer_size = 0, min_max_update_interval = 0,
rendering_sub_sampling = 1, max_framerate = 0,
mean_decay_factor = 0.01
):
self.l_fn = lyrebird_output_file
self.get_hk_script_name = get_hk_script_name
self.is_written = False
self.bolo_props = None
self.wiring_map = None
self.hostname = hostname
self.hk_hostname = hk_hostname
self.port = port
self.hk_port = hk_port
self.get_hk_port = get_hk_port
self.dv_buffer_size = dv_buffer_size
self.min_max_update_interval = min_max_update_interval
self.rendering_sub_sampling = rendering_sub_sampling
self.max_framerate = max_framerate
self.mean_decay_factor = mean_decay_factor
def __call__(self, frame):
if frame.type == core.G3FrameType.Calibration:
if 'BolometerProperties' in frame:
bp_id = 'BolometerProperties'
elif 'NominalBolometerProperties' in frame:
bp_id = 'NominalBolometerProperties'
else:
raise RuntimeError("BolometerProperties fucked")
self.bolo_props = frame[bp_id]
self.write_config()
elif frame.type == core.G3FrameType.Wiring:
self.wiring_map = frame['WiringMap']
self.write_config()
def write_config(self):
if self.wiring_map is None or self.bolo_props is None:
return
config_dic = generate_dfmux_lyrebird_config(
self.l_fn,
self.wiring_map, self.bolo_props,
hostname = self.hostname,
hk_hostname = self.hk_hostname,
port = self.port,
hk_port = self.hk_port,
control_host = self.hostname,
gcp_get_hk_port = self.get_hk_port,
dv_buffer_size = self.dv_buffer_size,
min_max_update_interval = self.min_max_update_interval,
sub_sampling = self.rendering_sub_sampling,
max_framerate = self.max_framerate,
mean_decay_factor = self.mean_decay_factor
)
write_get_hk_script(self.get_hk_script_name,
self.hostname, self.get_hk_port)
print("Done writing config file")
class IdSerialMapper(object):
def __init__(self, wiring_map):
self.mp = {}
self.mp_inv = {}
for k in wiring_map.keys():
wm = wiring_map[k]
board_id = get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot)
self.mp[ wm.board_serial ] = board_id
self.mp_inv[board_id] = wm.board_serial
def get_id(self, serial):
return self.mp[serial]
def get_serial(self, id):
return self.mp_inv[id]
###########################
## Squid display portion ##
###########################
def add_timestamp_info(screen, y, x, ts, col_index):
s = ts.Description()
screen.addstr(y, x, s[:s.rfind('.')], curses.color_pair(col_index))
#need screen geometry and squid list and squid mapping
def add_squid_info(screen, y, x,
sq_label, sq_label_size,
carrier_good, nuller_good, demod_good,
temperature_good,
voltage_good,
max_size,
bolometer_good,
fir_stage,
#routing_good,
feedback_on,
bolo_label = '',
neutral_c = 3, good_c = 2, bad_c = 1):
col_map = {True: curses.color_pair(good_c),
False: curses.color_pair(bad_c) }
current_index = x
screen.addstr(y, current_index, sq_label, curses.color_pair(neutral_c))
current_index += sq_label_size
screen.addstr(y, current_index, 'C', col_map[carrier_good])
current_index += 1
screen.addstr(y, current_index, 'N', col_map[nuller_good])
current_index += 1
screen.addstr(y, current_index, 'D', col_map[demod_good])
current_index += 1
screen.addstr(y, current_index, 'T', col_map[temperature_good])
current_index += 1
screen.addstr(y, current_index, 'V', col_map[voltage_good])
current_index += 1
screen.addstr(y, current_index, '%d'%fir_stage, col_map[fir_stage == 6])
current_index += 1
#screen.addstr(y, current_index, 'R', col_map[routing_good])
#current_index += 1
screen.addstr(y, current_index, 'F', col_map[feedback_on])
current_index += 1
if (not bolometer_good):
screen.addstr(y,
current_index, ' '+bolo_label[:(max_size - 7 - sq_label_size )],
col_map[False])
def load_squid_info_from_hk( screen, y, x,
hk_map,
sq_dev_id, sq_label, sq_label_size,
max_size, serial_mapper):
carrier_good = False
nuller_good = False
demod_good = False
temp_good = False
volt_good = False
bolometer_good = False
full_label = 'NoData'
fir_stage = 0
routing_good = False
feedback_on = False
board_id, mezz_num, module_num = sq_phys_id_to_info(sq_dev_id)
board_serial = serial_mapper.get_serial(board_id)
#code for loading hk info for display
if (not hk_map is None) and board_serial in hk_map:
board_info = hk_map[board_serial]
mezz_info = hk_map[board_serial].mezz[mezz_num]
module_info = hk_map[board_serial].mezz[mezz_num].modules[module_num]
fir_stage = int(board_info.fir_stage)
routing_good = module_info.routing_type.lower() == 'routing_nul'
feedback_on = module_info.squid_feedback.lower() == 'squid_lowpass'
carrier_good = not module_info.carrier_railed
nuller_good = not module_info.nuller_railed
demod_good = not module_info.demod_railed
def dic_range_check(dr, dv):
for k in dv.keys():
if (not k in dr):
continue
rng = dr[k]
v = dv[k]
if v < rng[0] or v > rng[1]:
return False
return True
voltage_range = {'MOTHERBOARD_RAIL_VCC5V5': (5,6),
'MOTHERBOARD_RAIL_VADJ': (2,3),
'MOTHERBOARD_RAIL_VCC3V3': (3,3.6),
'MOTHERBOARD_RAIL_VCC1V0': (0.8, 1.2),
'MOTHERBOARD_RAIL_VCC1V2': (1, 1.5),
'MOTHERBOARD_RAIL_VCC12V0': (11, 13),
'MOTHERBOARD_RAIL_VCC1V8': (1.6, 2),
'MOTHERBOARD_RAIL_VCC1V5': (1.3, 1.7),
'MOTHERBOARD_RAIL_VCC1V0_GTX': (0.7, 1.3)}
temp_range = {'MOTHERBOARD_TEMPERATURE_FPGA': (0,80),
'MOTHERBOARD_TEMPERATURE_POWER': (0,80),
'MOTHERBOARD_TEMPERATURE_ARM': (0,80),
'MOTHERBOARD_TEMPERATURE_PHY': (0,80)}
#mezz voltages
mezz_voltage_range = {'MEZZANINE_RAIL_VCC12V0': (11,13),
'MEZZANINE_RAIL_VADJ': (2,3),
'MEZZANINE_RAIL_VCC3V3': (3,4) }
temp_good = dic_range_check( temp_range, board_info.temperatures)
volt_good = ( dic_range_check( voltage_range, board_info.voltages) or
dic_range_check( mezz_voltage_range, mezz_info.voltages)
)
bolometer_good = True
bolo_label = ''
n_railed = 0
n_diff_freq = 0
n_dan_off = 0
for b in module_info.channels.keys():
chinfo = module_info.channels[b]
if (chinfo.dan_railed):
n_railed += 1
elif (chinfo.carrier_frequency != chinfo.demod_frequency):
n_diff_freq += 1
elif ( (not (chinfo.dan_accumulator_enable and
chinfo.dan_feedback_enable and
chinfo.dan_streaming_enable ) )
and (chinfo.carrier_frequency > 0 and chinfo.carrier_amplitude > 0) ):
n_dan_off += 1
bolometer_good = not (n_railed or n_diff_freq or n_dan_off)
if not bolometer_good:
if n_railed:
full_label = "DanRail:%s"%(n_railed)
elif n_diff_freq:
full_label = "CDDiffFreq:%s"%(n_diff_freq)
elif n_dan_off:
full_label = "DanOff:%s"%(n_dan_off)
else:
full_label = ''
add_squid_info(screen, y, x,
sq_label, sq_label_size,
carrier_good, nuller_good, demod_good,
temp_good, volt_good,
max_size,
bolometer_good,
fir_stage,
#routing_good,
feedback_on,
bolo_label = full_label,
)
def GetHousekeepingMessenger(frame, hostname, port):
if frame.type == core.G3FrameType.Wiring:
os.system( "nc %s %d" % (hostname, port) )
class SquidDisplay(object):
def __init__(self,
squids_per_col = 32,
squid_col_width = 30):
self.squids_list = None
self.squids_per_col = squids_per_col
self.squid_col_width = squid_col_width
self.serial_mapper = None
self.str_id_lst = [" Carrier",
" Nuller",
" Demod",
" Temp",
" Voltage",
" fir#",
" squid Feedback"
]
self.highlight_index = [7 for s in self.str_id_lst]
def init_squids(self, squids_list) :
self.n_squids = len(squids_list) + len(self.str_id_lst) + 1
self.squids_list = squids_list
self.sq_label_size = max(map(len, squids_list)) + 3
ncols = int(math.ceil(float(self.n_squids)/self.squids_per_col))
self.screen_size_x = ncols * self.squid_col_width
self.screen_size_y = self.squids_per_col + 2
self.pos_map = {}
#assign an x, y location to each squid
for j, sq in enumerate(sorted(squids_list, key=cmp_to_key(str_cmp_with_numbers_sorted))):
i = j + len(self.str_id_lst) + 1
y = i % self.squids_per_col + 1
x = 1 + self.squid_col_width * ( i // self.squids_per_col)
self.pos_map[sq] = (x,y)
self.stdscr = curses.initscr()
curses.start_color()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_BLUE, curses.COLOR_WHITE)
self.stdscr.clear()
signal.signal(signal.SIGWINCH, signal.SIG_IGN)
def __call__(self, frame):
if frame.type == core.G3FrameType.Wiring:
wiring_map = frame['WiringMap']
squid_ids = []
for k in wiring_map.keys():
wm = wiring_map[k]
squid_ids.append( get_physical_id(wm.board_serial,
wm.crate_serial,
wm.board_slot,
wm.module + 1) )
squid_ids = uniquifyList(squid_ids)
self.init_squids(squid_ids)
self.serial_mapper = IdSerialMapper(frame['WiringMap'])
elif frame.type == core.G3FrameType.Housekeeping:
if self.squids_list is None:
return
#do update
if not frame is None:
hk_data = frame['DfMuxHousekeeping']
else:
hk_data = None
self.stdscr.clear()
y, x = self.stdscr.getmaxyx()
if y < self.screen_size_y or x < self.screen_size_x:
screen = self.stdscr.subwin(0, x, 0, 0)
screen.addstr(0,0, 'Terminal is too small %d %d'%(y,x), curses.color_pair(1))
screen.refresh()
return
screen = self.stdscr.subwin(0, self.screen_size_x, 0, 0)
screen.clear()
#screen.box()
#CNDTV6F
if not hk_data is None:
add_timestamp_info(screen, 0,2, hk_data[hk_data.keys()[0]].timestamp, 5)
for i, s in enumerate(self.str_id_lst):
offset = 4
screen.addstr(i+1, offset, s, curses.color_pair(2))
screen.addstr(i+1, offset + self.highlight_index[i],
s[self.highlight_index[i]], curses.color_pair(3))
screen.hline(len(self.str_id_lst) + 1, 0,
'-', self.squid_col_width)
screen.vline(0, self.squid_col_width-1,
'|', len(self.str_id_lst)+1)
for i, s in enumerate(self.squids_list):
p = self.pos_map[s]
load_squid_info_from_hk( screen, p[1], p[0],
hk_data,
s, s, self.sq_label_size,
self.squid_col_width, self.serial_mapper)
screen.refresh()
elif frame.type == core.G3FrameType.EndProcessing:
if not self.squids_list is None:
self.stdscr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('hostname')
parser.add_argument('--port',type=int, default=8675)
parser.add_argument('--local_ts_port',type=int, default=8676)
parser.add_argument('--local_hk_port',type=int, default=8677)
parser.add_argument('--gcp_signalled_hk_port', type=int, default=50011)
parser.add_argument('--lyrebird_output_file', default = 'lyrebird_config_file.json')
parser.add_argument('--get_hk_script', default = 'get_hk.sh')
parser.add_argument('--timestream_buffer_size',type=int, default=1024)
parser.add_argument('--min_max_update_interval', type=int, default = 300)
parser.add_argument('--rendering_sub_sampling', type=int, default = 2)
parser.add_argument('--max_framerate', type=int, default = 60)
parser.add_argument("--mean_decay_factor", type = float, default = 0.01,
help = "The mean filtered power has an exponential convolution form to the filter. It has a value in (0,1) exclusive. Increasing the value decreases the size of the exponential to it pushes the frequency of the HPF lower. Numbers close to one filter things very rapidly, close to 0 very slowly.")
parser.add_argument('--debug_mode', action='store_true', help = "prevents the spawning on the curses display")
parser.add_argument('--debug_logs', action='store_true', help = "store logs of stderr/out")
parser.add_argument('--ignore_nominal_bias_props', action='store_true', help = "will align the bolometers into a grid")
args = parser.parse_args()
#core.set_log_level(core.G3LogLevel.LOG_DEBUG)
script_path = os.path.dirname(os.path.realpath(__file__))
script_path = script_path + '/../bin/'
lyrebird_output_file = script_path + args.lyrebird_output_file
get_hk_script = script_path + args.get_hk_script
pipe = core.G3Pipeline()
pipe.Add(core.G3NetworkReceiver,
hostname = args.hostname, port = args.port)
if args.ignore_nominal_bias_props:
pipe.Add(lambda fr: fr.type != core.G3FrameType.Calibration)
pipe.Add(BoloPropertiesFaker)
pipe.Add(AddVbiasAndCurrentConv)
pipe.Add(BirdConfigGenerator,
lyrebird_output_file = lyrebird_output_file,
hostname = args.hostname,
get_hk_script_name = get_hk_script,
hk_hostname = '127.0.0.1',
port = args.local_ts_port,
hk_port = args.local_hk_port,
get_hk_port = args.gcp_signalled_hk_port,
dv_buffer_size = args.timestream_buffer_size,
min_max_update_interval = args.min_max_update_interval,
rendering_sub_sampling = args.rendering_sub_sampling,
max_framerate = args.max_framerate,
mean_decay_factor = args.mean_decay_factor
)
pipe.Add(GetHousekeepingMessenger, hostname = args.hostname,
port = args.gcp_signalled_hk_port)
pipe.Add(core.G3ThrottledNetworkSender,
hostname = '*',
port = args.local_hk_port,
frame_decimation = {core.G3FrameType.Timepoint: 10}
)
pipe.Add(core.G3ThrottledNetworkSender,
hostname = '*',
port = args.local_ts_port,
frame_decimation = {core.G3FrameType.Housekeeping: 0}
)
if args.debug_logs:
import sys
sys.stderr = open('kookaburra_stderr.txt', 'w')
sys.stdout = open('kookaburra_stdout.txt', 'w')
if args.debug_mode:
pipe.Add(core.Dump)
pipe.Run()
else:
pipe.Add(SquidDisplay)
try:
pipe.Run()
finally:
traceback.print_exc() # Print the exception
curses.curs_set(1)
curses.echo()
curses.nocbreak()
curses.endwin()
| bin/kookaburra.py | 24,872 | Splits the string into a list where the numbers and the characters between numbers are each element
Copied from spt3g_software to fix dependencies (sorry)
Compares two strings where numbers are sorted according to value, so Sq12 ends up after Sq8, use in sorted function
Copied from spt3g_software to fix dependencies (sorry)
!/usr/bin/env pythonfrom spt3g.util import genericutils as GU not in the public S4 repofaking the frame dataneed nsquidsneed nbolos per squid Squid display portion need screen geometry and squid list and squid mappingrouting_good,screen.addstr(y, current_index, 'R', col_map[routing_good])current_index += 1code for loading hk info for displaymezz voltagesrouting_good,assign an x, y location to each squid Turn off echoing of keys, and enter cbreak mode, where no buffering is performed on keyboard inputdo updatescreen.box()CNDTV6Fcore.set_log_level(core.G3LogLevel.LOG_DEBUG) Print the exception | 927 | en | 0.746892 |
import os
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-topknob_bottomknob_hinge_slide.hdf5',
num_demo=1,
subseq_len=10,
)
env = AttrDict(
task_list = ['top burner', 'bottom burner', 'hinge cabinet', 'slide cabinet']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'finetune_vae': True,
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/kitchen/hierarchical_cl_gc_top_bot_excluded'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-top-bot-excluded.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
| spirl/configs/few_shot_imitation_learning/kitchen/hierarchical_cl_gc_top_bot_excluded_demo_topknob_bot_hinge_slide_oneshot/conf.py | 1,945 | Dataset flat last action from seq gets cropped | 46 | en | 0.695533 |
"""
Script for copying back xml junit files from tests
"""
import argparse # pylint: disable=minimum-python-version
import os
import subprocess
import paramiko
import salt.utils.yaml
class DownloadArtifacts:
def __init__(self, instance, artifacts):
self.instance = instance
self.artifacts = artifacts
self.transport = self.setup_transport()
self.sftpclient = paramiko.SFTPClient.from_transport(self.transport)
def setup_transport(self):
# pylint: disable=minimum-python-version
config = salt.utils.yaml.safe_load(
subprocess.check_output(
["bundle", "exec", "kitchen", "diagnose", self.instance]
)
)
# pylint: enable=minimum-python-version
state = config["instances"][self.instance]["state_file"]
tport = config["instances"][self.instance]["transport"]
transport = paramiko.Transport(
(state["hostname"], state.get("port", tport.get("port", 22)))
)
pkey = paramiko.rsakey.RSAKey(
filename=state.get("ssh_key", tport.get("ssh_key", "~/.ssh/id_rsa"))
)
transport.connect(
username=state.get("username", tport.get("username", "root")), pkey=pkey
)
return transport
def _set_permissions(self):
"""
Make sure all xml files are readable by the world so that anyone can grab them
"""
for remote, _ in self.artifacts:
self.transport.open_session().exec_command(
"sudo chmod -R +r {}".format(remote)
)
def download(self):
self._set_permissions()
for remote, local in self.artifacts:
if remote.endswith("/"):
for fxml in self.sftpclient.listdir(remote):
self._do_download(
os.path.join(remote, fxml),
os.path.join(local, os.path.basename(fxml)),
)
else:
self._do_download(remote, os.path.join(local, os.path.basename(remote)))
def _do_download(self, remote, local):
print("Copying from {} to {}".format(remote, local))
try:
self.sftpclient.get(remote, local)
except OSError:
print("Failed to copy: {}".format(remote))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Jenkins Artifact Download Helper")
parser.add_argument(
"--instance",
required=True,
action="store",
help="Instance on Test Kitchen to pull from",
)
parser.add_argument(
"--download-artifacts",
dest="artifacts",
nargs=2,
action="append",
metavar=("REMOTE_PATH", "LOCAL_PATH"),
help="Download remote artifacts",
)
args = parser.parse_args()
downloader = DownloadArtifacts(args.instance, args.artifacts)
downloader.download()
| tests/support/copyartifacts.py | 2,940 | Make sure all xml files are readable by the world so that anyone can grab them
Script for copying back xml junit files from tests
pylint: disable=minimum-python-version pylint: disable=minimum-python-version pylint: enable=minimum-python-version | 247 | en | 0.663011 |
#!/usr/bin/env python
"""pattern.py: An example like <Rolling an image> in Pillow document.
"""
import os.path
from PIL import Image
def run(filepath):
"""Create a wallpaper image from a PNG file."""
src = Image.open(filepath)
target = swap_quadrants(src)
paste_with_alpha(target, src, (0, 0), 0x10)
return target
def swap_quadrants(img):
"""Quarter the image and swap two diagonal quadrant pairs."""
boxes = quarter_bbox(img)
regions = [img.crop(box) for box in boxes]
target = img.copy()
paste_with_alpha(target, regions[3], (0, 0), 0x80)
paste_with_alpha(target, regions[2], (regions[3].size[0], 0), 0x80)
paste_with_alpha(target, regions[1], (0, regions[3].size[1]), 0x80)
paste_with_alpha(target, regions[0], regions[3].size, 0x80)
return target
def paste_with_alpha(target, source, left_upper, opacity):
"""An alpha_composite-like operation."""
mask = Image.new('L', source.size, opacity)
target.paste(source, left_upper, mask=mask)
def quarter_bbox(img):
"""Quarter the bounding box of an image."""
(left, upper, right, bottom) = img.getbbox()
xmid = (left + right - 1) // 2
ymid = (upper + bottom - 1) // 2
# Z
return [
(left, upper, xmid, ymid),
(xmid + 1, upper, right, ymid),
(left, ymid + 1, xmid, bottom),
(xmid + 1, ymid + 1, right, bottom),]
if __name__ == '__main__':
result = run(os.path.join(
os.path.dirname(__file__), '../../_images/illvelo.png'))
result.show()
| source/_sample/pillow/pattern.py | 1,534 | An alpha_composite-like operation.
Quarter the bounding box of an image.
Create a wallpaper image from a PNG file.
Quarter the image and swap two diagonal quadrant pairs.
pattern.py: An example like <Rolling an image> in Pillow document.
!/usr/bin/env python Z | 261 | en | 0.686324 |
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
message = 'You must be the owner of this object.'
def has_object_permission(self, request, view, obj):
# member = Membership.objects.get(user=user.request)
# member.is_active
if request.method in SAFE_METHODS:
return True
return obj.user == request.user | Book/api/permissions.py | 416 | member = Membership.objects.get(user=user.request) member.is_active | 67 | en | 0.803435 |
"""View representations of Product Active docs pages"""
from widgetastic_patternfly4 import PatternflyTable
from widgetastic.widget import View, Text
from testsuite.ui.views.admin.product import BaseProductView
from testsuite.ui.widgets.buttons import ThreescaleDeleteButton, ThreescaleEditButton
from testsuite.ui.widgets import ActiveDocV2Section, ActiveDocV3Section
from testsuite.ui.navigation import step
class ActiveDocsView(BaseProductView):
"""View representation of Active Docs list page"""
path_pattern = '/apiconfig/services/{product_id}/api_docs'
active_docs_table = PatternflyTable(locator="//*[@id='content']/table")
@step("ActiveDocsDetailView")
def detail(self, active_doc):
"""Navigate to active doc detail/preview page"""
self.active_docs_table.row(name=active_doc["name"]).name.click()
def prerequisite(self):
return BaseProductView
@property
def is_displayed(self):
return BaseProductView.is_displayed.fget(self) and self.active_docs_table.is_displayed and \
self.path in self.browser.url
class ActiveDocsDetailView(BaseProductView):
"""View representation of Active Docs Detail page"""
path_pattern = '/apiconfig/services/{product_id}/api_docs/{active_doc_id}/preview'
delete_btn = ThreescaleDeleteButton()
edit_btn = ThreescaleEditButton()
def __init__(self, parent, product, active_doc):
super().__init__(parent, product, active_doc_id=active_doc.entity_id)
@View.nested
# pylint: disable=invalid-name
class oas2(View):
"""OAS version 2 section"""
expand_operations_link = Text(locator="//*[contains(@class, 'expandResource')]")
collapse_operations_link = Text(locator="//*[contains(@class, 'collapseResource')]")
active_docs_section = ActiveDocV2Section()
def make_request(self, endpoint):
"""
Make request on preview page
:param endpoint: string of endpoint which should be tried
:return:
"""
self.expand_operations_link.click()
self.active_docs_section.try_it_out(endpoint)
@View.nested
# pylint: disable=invalid-name
class oas3(View):
"""OAS version 3 section"""
active_docs_section = ActiveDocV3Section()
server = Text("//label[@for='servers']/select/option")
def make_request(self, method, path, key):
"""
Make request on preview page
:param path string eg. /post, /get
:param method string eg. GET, POST
:param key string name of application
:return:
"""
self.active_docs_section.try_it_out(method, path, key)
def prerequisite(self):
return ActiveDocsView
@property
def is_displayed(self):
return BaseProductView.is_displayed.fget(self) and self.edit_btn.is_displayed and \
self.delete_btn.is_displayed and self.path in self.browser.url
| testsuite/ui/views/admin/product/active_docs.py | 3,008 | View representation of Active Docs Detail page
View representation of Active Docs list page
OAS version 2 section
OAS version 3 section
Navigate to active doc detail/preview page
Make request on preview page
:param endpoint: string of endpoint which should be tried
:return:
Make request on preview page
:param path string eg. /post, /get
:param method string eg. GET, POST
:param key string name of application
:return:
View representations of Product Active docs pages
pylint: disable=invalid-name pylint: disable=invalid-name | 530 | en | 0.656706 |
import os
import json
import logging
import serial
from serial.tools import list_ports
import time
CONFIG_FILENAME_DEFAULT = 'olfa_config.json'
def get_olfa_config(config_filename=''):
"""
Find and parse olfactometer configuration JSON.
:param config_filename: string with path to configuration.
:return: returns a tuple with (config_fn, config_dict)
:rtype: tuple
"""
if not config_filename:
logging.info("No olfa config file specified, looking for default in OLFA_CONFIG os variable")
config_filename = os.environ.get("OLFA_CONFIG")
#if it didnt find it there, it tries the legacy default
if not config_filename:
config_filename = CONFIG_FILENAME_DEFAULT
logging.info("No OLFA_CONFIG os variable, trying with legacy default " + CONFIG_FILENAME_DEFAULT)
if os.path.exists(config_filename):
with open(config_filename) as f:
config = json.load(f)
else:
raise Exception('No olfactometer configuration file found at {0}'.format(config_filename))
return config_filename, config
def flatten_dictionary(dictionary, separator=':', flattened_dict=None, parent_string=''):
"""
Flattens nested dictionary into a single dictionary:
{'hello': {'world': 1,
'moon': 2}}
becomes:
{'hello:world': 1,
'hello:moon': 2}
Uses recursion to flatten as many layers as exist in your dictionary.
:param dictionary: nested dictionary you wish to flatten.
:param flattened_dict: (used for recursion) current flattened dictionary to add to
:param parent_string: (used for recursion) current key string to use as prefix for
:return: flattened dictionary
:type dictionary: dict
:type flattened_dict: dict
:type parent_string: str
:rtype: dict
"""
if flattened_dict is None: # dicts are mutable, so we shouldn't use a dict as the default argument!!!
flattened_dict = {} # instead, redeclare an empty dictionary here.
for k, v in dictionary.items():
if parent_string:
full_key = "{0}{1}{2}".format(parent_string, separator, k)
else:
full_key = k
if isinstance(v, dict): # use recursion to flatten and add nested dictionaries to the product.
_ = flatten_dictionary(v, flattened_dict=flattened_dict, parent_string=full_key)
else:
flattened_dict[full_key] = v
return flattened_dict
def connect_serial(port, baudrate=115200, timeout=1, writeTimeout=1):
"""
Return Serial object after making sure that the port is accessible and that the port is expressed as a string.
:param port: str or int (ie "COM4" or 4 for Windows).
:param baudrate: baudrate.
:param timeout: read timeout in seconds, default 1 sec.
:param writeTimeout: write timeout in seconds, default 1 sec.
:return: serial port object.
:rtype: serial.Serial
"""
if isinstance(port, int):
port = "COM{0}".format(port)
names_list = list()
for i in list_ports.comports():
names_list.append(i[0])
if port not in names_list:
print(("Serial not found on {0}.".format(port)))
print('Listing current serial ports with devices:')
for ser in list_ports.comports():
ser_str = '\t{0}: {1}'.format(ser[0], ser[1])
print(ser_str)
time.sleep(.01) # just to let the above lines print before the exemption is raised. cleans console output.
raise serial.SerialException('Requested COM port: {0} is not listed as connected.'.format(port))
else:
return serial.Serial(port, baudrate=baudrate, timeout=timeout, writeTimeout=writeTimeout)
class OlfaException(Exception):
pass | olfactometry/utils.py | 3,766 | Return Serial object after making sure that the port is accessible and that the port is expressed as a string.
:param port: str or int (ie "COM4" or 4 for Windows).
:param baudrate: baudrate.
:param timeout: read timeout in seconds, default 1 sec.
:param writeTimeout: write timeout in seconds, default 1 sec.
:return: serial port object.
:rtype: serial.Serial
Flattens nested dictionary into a single dictionary:
{'hello': {'world': 1,
'moon': 2}}
becomes:
{'hello:world': 1,
'hello:moon': 2}
Uses recursion to flatten as many layers as exist in your dictionary.
:param dictionary: nested dictionary you wish to flatten.
:param flattened_dict: (used for recursion) current flattened dictionary to add to
:param parent_string: (used for recursion) current key string to use as prefix for
:return: flattened dictionary
:type dictionary: dict
:type flattened_dict: dict
:type parent_string: str
:rtype: dict
Find and parse olfactometer configuration JSON.
:param config_filename: string with path to configuration.
:return: returns a tuple with (config_fn, config_dict)
:rtype: tuple
if it didnt find it there, it tries the legacy default dicts are mutable, so we shouldn't use a dict as the default argument!!! instead, redeclare an empty dictionary here. use recursion to flatten and add nested dictionaries to the product. just to let the above lines print before the exemption is raised. cleans console output. | 1,445 | en | 0.750846 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parser package.
"""
from .specification import implements_specification, iter_specifications
MODULES = (
'consumption',
'loading',
'modeling',
'presentation',
'reading',
'validation')
__all__ = (
'MODULES',
'implements_specification',
'iter_specifications')
| aria/parser/__init__.py | 1,084 | Parser package.
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 770 | en | 0.876814 |
#!/usr/bin/env python
'''
Created on Apr 12, 2017
@author: Brian Jimenez-Garcia
@contact: brian.jimenez@bsc.es
'''
import sys
import os
if len(sys.argv[1:]) != 2:
raise SystemExit("usage: %s pdb_file1 pdb_file2" % os.path.basename(sys.argv[0]))
pdb_file1 = sys.argv[1]
pdb_file2 = sys.argv[2]
# Panda3D imports
from pandac.PandaModules import loadPrcFileData
from emol import EMol
width = 1400
height = 900
# Change window properties
loadPrcFileData("", "window-title Energy Visualizer")
loadPrcFileData("", "fullscreen 0")
loadPrcFileData("", "win-size %s %s" % (width, height))
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
# Set up a loading screen
from direct.gui.OnscreenText import OnscreenText,TextNode
loadingText=OnscreenText("Loading molecules...",1,fg=(1,1,1,1),
pos=(0,0),align=TextNode.ACenter,
scale=.07,mayChange=1)
# Render three frames to avoid black screen
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
# Load the game
visualizer = EMol(width, height, pdb_file1, pdb_file2)
# Hide loading
loadingText.cleanup()
base.run()
| launch.py | 1,183 | Created on Apr 12, 2017
@author: Brian Jimenez-Garcia
@contact: brian.jimenez@bsc.es
!/usr/bin/env python Panda3D imports Change window properties Set up a loading screen Render three frames to avoid black screen Load the game Hide loading | 241 | en | 0.599197 |
# (c) Copyright [2018-2021] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality to use to conduct
# data science projects on data stored in Vertica, taking advantage Verticaâs
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a âpipelineâ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to solve all of these problems. The idea is simple: instead
# of moving data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import math, warnings
from typing import Union
# VerticaPy Modules
from verticapy.learn.vmodel import *
from verticapy.learn.linear_model import LinearRegression
from verticapy import vDataFrame
from verticapy.plot import gen_colors
from verticapy.learn.tools import *
# Other Python Modules
from dateutil.parser import parse
import matplotlib.pyplot as plt
# ---#
class SARIMAX(Regressor):
"""
---------------------------------------------------------------------------
[Beta Version]
Creates an SARIMAX object using the Vertica Linear Regression algorithm on
the data.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
p: int, optional
Order of the AR (Auto-Regressive) part.
d: int, optional
Order of the I (Integrated) part.
q: int, optional
Order of the MA (Moving-Average) part.
P: int, optional
Order of the seasonal AR (Auto-Regressive) part.
D: int, optional
Order of the seasonal I (Integrated) part.
Q: int, optional
Order of the seasonal MA (Moving-Average) part.
s: int, optional
Span of the seasonality.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
max_pik: int, optional
Number of inverse MA coefficient used to approximate the MA.
papprox_ma: int, optional
the p of the AR(p) used to approximate the MA coefficients.
"""
def __init__(
self,
name: str,
cursor=None,
p: int = 0,
d: int = 0,
q: int = 0,
P: int = 0,
D: int = 0,
Q: int = 0,
s: int = 0,
tol: float = 1e-4,
max_iter: int = 1000,
solver: str = "Newton",
max_pik: int = 100,
papprox_ma: int = 200,
):
check_types([("name", name, [str],)])
self.type, self.name = "SARIMAX", name
self.set_params(
{
"p": p,
"d": d,
"q": q,
"P": P,
"D": D,
"Q": Q,
"s": s,
"tol": tol,
"max_iter": max_iter,
"solver": solver,
"max_pik": max_pik,
"papprox_ma": papprox_ma,
}
)
if self.parameters["s"] == 0:
assert (
self.parameters["D"] == 0
and self.parameters["P"] == 0
and self.parameters["Q"] == 0
), ParameterError(
"In case of non-seasonality (s = 0), all the parameters P, D or Q must be equal to 0."
)
else:
assert (
self.parameters["D"] > 0
or self.parameters["P"] > 0
or self.parameters["Q"] > 0
), ParameterError(
"In case of seasonality (s > 0), at least one of the parameters P, D or Q must be strictly greater than 0."
)
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
# ---#
def deploySQL(self):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Returns
-------
str
the SQL code needed to deploy the model.
"""
sql = self.deploy_predict_
if (self.parameters["d"] > 0) or (
self.parameters["D"] > 0 and self.parameters["s"] > 0
):
for i in range(0, self.parameters["d"] + 1):
for k in range(
0, max((self.parameters["D"] + 1) * min(1, self.parameters["s"]), 1)
):
if (k, i) != (0, 0):
comb_i_d = (
math.factorial(self.parameters["d"])
/ math.factorial(self.parameters["d"] - i)
/ math.factorial(i)
)
comb_k_D = (
math.factorial(self.parameters["D"])
/ math.factorial(self.parameters["D"] - k)
/ math.factorial(k)
)
sql += " + {} * LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
(-1) ** (i + k + 1) * comb_i_d * comb_k_D,
i + self.parameters["s"] * k,
)
return sql
# ---#
def fpredict(self, L: list):
"""
---------------------------------------------------------------------------
Computes the prediction.
Parameters
----------
L: list
List containing the data. It must be a two-dimensional list containing
multiple rows. Each row must include as first element the ordered predictor
and as nth elements the nth - 1 exogenous variable (nth > 2).
Returns
-------
float
the prediction.
"""
def sub_arp(L: list):
L_final = []
for i in range(len(L)):
result = L[-i]
for i in range(len(self.coef_.values["coefficient"])):
elem = self.coef_.values["predictor"][i]
if elem.lower() == "intercept":
result -= self.coef_.values["coefficient"][i]
elif elem.lower()[0:2] == "ar":
nb = int(elem[2:])
try:
result -= self.coef_.values["coefficient"][i] * L[-nb]
except:
result = None
L_final = [result] + L_final
return L_final
def fepsilon(L: list):
if self.parameters["p"] > 0 or self.parameters["P"] > 0:
L_tmp = sub_arp(L)
else:
L_tmp = L
try:
result = L_tmp[-1] - self.ma_avg_
for i in range(1, self.parameters["max_pik"]):
result -= self.ma_piq_.values["coefficient"][i] * (
L_tmp[-i] - self.ma_avg_
)
return result
except:
return 0
if (
self.parameters["p"] == 0
and self.parameters["q"] == 0
and self.parameters["d"] == 0
and self.parameters["s"] == 0
and not (self.exogenous)
):
return self.ma_avg_
try:
yt = [elem[0] for elem in L]
yt_copy = [elem[0] for elem in L]
yt.reverse()
if self.parameters["d"] > 0:
for i in range(self.parameters["d"]):
yt = [yt[i - 1] - yt[i] for i in range(1, len(yt))]
if self.parameters["D"] > 0 and self.parameters["s"] > 0:
for i in range(self.parameters["D"]):
yt = [
yt[i - self.parameters["s"]] - yt[i]
for i in range(self.parameters["s"], len(yt))
]
yt.reverse()
result, j = 0, 1
for i in range(len(self.coef_.values["coefficient"])):
elem = self.coef_.values["predictor"][i]
if elem.lower() == "intercept":
result += self.coef_.values["coefficient"][i]
elif elem.lower()[0:2] == "ar":
nb = int(elem[2:])
result += self.coef_.values["coefficient"][i] * yt[-nb]
elif elem.lower()[0:2] == "ma":
nb = int(elem[2:])
result += self.coef_.values["coefficient"][i] * fepsilon(
yt[: -nb - 1]
)
else:
result += self.coef_.values["coefficient"][i] * L[-1][j]
j += 1
for i in range(0, self.parameters["d"] + 1):
for k in range(
0, max((self.parameters["D"] + 1) * min(1, self.parameters["s"]), 1)
):
if (k, i) != (0, 0):
comb_i_d = (
math.factorial(self.parameters["d"])
/ math.factorial(self.parameters["d"] - i)
/ math.factorial(i)
)
comb_k_D = (
math.factorial(self.parameters["D"])
/ math.factorial(self.parameters["D"] - k)
/ math.factorial(k)
)
result += (
(-1) ** (i + k + 1)
* comb_i_d
* comb_k_D
* yt_copy[-(i + self.parameters["s"] * k)]
)
return result
except:
return None
# ---#
def fit(
self,
input_relation: Union[vDataFrame, str],
y: str,
ts: str,
X: list = [],
test_relation: Union[vDataFrame, str] = "",
):
"""
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
y: str
Response column.
ts: str
vcolumn used to order the data.
X: list, optional
exogenous columns used to fit the model.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
model
"""
check_types(
[
("input_relation", input_relation, [str, vDataFrame],),
("y", y, [str],),
("test_relation", test_relation, [str, vDataFrame],),
("ts", ts, [str],),
]
)
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
# Initialization
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (
input_relation
if isinstance(input_relation, str)
else input_relation.__genSQL__()
)
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
self.y, self.ts, self.deploy_predict_ = str_column(y), str_column(ts), ""
self.coef_ = tablesample({"predictor": [], "coefficient": []})
self.ma_avg_, self.ma_piq_ = None, None
X, schema = [str_column(elem) for elem in X], schema_relation(self.name)[0]
self.X, self.exogenous = [], X
relation = (
"(SELECT *, [VerticaPy_y] AS VerticaPy_y_copy FROM {}) VERTICAPY_SUBTABLE "
)
model = LinearRegression(
name=self.name,
solver=self.parameters["solver"],
max_iter=self.parameters["max_iter"],
tol=self.parameters["tol"],
)
if (
self.parameters["p"] == 0
and self.parameters["q"] == 0
and self.parameters["d"] == 0
and self.parameters["s"] == 0
and not (self.exogenous)
):
query = "SELECT AVG({}) FROM {}".format(self.y, self.input_relation)
self.ma_avg_ = self.cursor.execute(query).fetchone()[0]
self.deploy_predict_ = str(self.ma_avg_)
# I(d)
if self.parameters["d"] > 0:
for i in range(self.parameters["d"]):
relation = "(SELECT [VerticaPy_y] - LAG([VerticaPy_y], 1) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
relation
)
if self.parameters["D"] > 0 and self.parameters["s"] > 0:
for i in range(self.parameters["D"]):
relation = "(SELECT [VerticaPy_y] - LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS [VerticaPy_y], VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
self.parameters["s"], relation
)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop(
"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
cursor=self.cursor,
method="view",
)
except:
pass
# AR(p)
if self.parameters["p"] > 0 or self.parameters["P"] > 0:
columns = [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}".format(
i, i
)
for i in range(1, self.parameters["p"] + 1)
]
AR = ["AR{}".format(i) for i in range(1, self.parameters["p"] + 1)]
if self.parameters["s"] > 0:
for i in range(1, self.parameters["P"] + 1):
if (i * self.parameters["s"]) not in (
range(1, self.parameters["p"] + 1)
):
columns += [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}".format(
i * self.parameters["s"], i * self.parameters["s"]
)
]
AR += ["AR{}".format(i * self.parameters["s"])]
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
drop_temp_elem(self, schema)
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema,
get_session(self.cursor),
relation.format(self.input_relation)
.replace("[VerticaPy_ts]", self.ts)
.replace("[VerticaPy_y]", self.y)
.replace("[VerticaPy_key_columns]", ", " + ", ".join([self.ts] + X)),
)
try:
self.cursor.execute(query)
self.X += AR + X
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=self.X,
y=self.y,
)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
self.coef_.values["predictor"] = model.coef_.values["predictor"]
self.coef_.values["coefficient"] = model.coef_.values["coefficient"]
alphaq = model.coef_.values["coefficient"]
model.drop()
epsilon_final = (
"[VerticaPy_y] - "
+ str(alphaq[0])
+ " - "
+ " - ".join(
[
str(alphaq[i])
+ " * "
+ "LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])".format(
i
)
for i in range(1, self.parameters["p"] + 1)
]
)
)
self.deploy_predict_ = (
str(alphaq[0])
+ " + "
+ " + ".join(
[
str(alphaq[i])
+ " * "
+ "LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
i
)
for i in range(1, self.parameters["p"] + 1)
]
)
)
if self.parameters["s"] > 0 and self.parameters["P"] > 0:
epsilon_final += " - " + " - ".join(
[
str(alphaq[i])
+ " * "
+ "LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts])".format(
i * self.parameters["s"]
)
for i in range(
self.parameters["p"] + 1,
self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1,
)
]
)
self.deploy_predict_ += " + " + " + ".join(
[
str(alphaq[i])
+ " * "
+ "LAG(VerticaPy_y_copy, {}) OVER (ORDER BY [VerticaPy_ts])".format(
i * self.parameters["s"]
)
for i in range(
self.parameters["p"] + 1,
self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1,
)
]
)
for idx, elem in enumerate(X):
epsilon_final += " - {} * [X{}]".format(
alphaq[
idx
+ self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1
],
idx,
)
self.deploy_predict_ += " + {} * [X{}]".format(
alphaq[
idx
+ self.parameters["p"]
+ (self.parameters["P"] if self.parameters["s"] > 0 else 0)
+ 1
],
idx,
)
relation = "(SELECT {} AS [VerticaPy_y], {}, VerticaPy_y_copy[VerticaPy_key_columns] FROM {}) VERTICAPY_SUBTABLE".format(
epsilon_final, ", ".join(AR), relation
)
# MA(q)
if self.parameters["q"] > 0 or (
self.parameters["Q"] > 0 and self.parameters["s"] > 0
):
transform_relation = relation.replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", ", " + ", ".join(X + [ts])
)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace(
"[X{}]".format(idx), elem
)
query = "SELECT COUNT(*), AVG({}) FROM {}".format(
self.y, transform_relation.format(self.input_relation)
)
result = self.cursor.execute(query).fetchone()
self.ma_avg_ = result[1]
n = result[0]
n = max(
max(
min(max(n ** (1.0 / 3.0), 8), self.parameters["papprox_ma"]),
self.parameters["q"],
),
self.parameters["Q"] * self.parameters["s"] + 1,
)
n = int(n)
columns = [
"LAG([VerticaPy_y], {}) OVER (ORDER BY [VerticaPy_ts]) AS ARq{}".format(
i, i
)
for i in range(1, n)
]
ARq = ["ARq{}".format(i) for i in range(1, n)]
tmp_relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
for idx, elem in enumerate(X):
tmp_relation = tmp_relation.replace("[X{}]".format(idx), elem)
drop_temp_elem(self, schema)
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema,
get_session(self.cursor),
tmp_relation.format(self.input_relation)
.replace("[VerticaPy_ts]", self.ts)
.replace("[VerticaPy_y]", self.y)
.replace("[VerticaPy_key_columns]", ", " + ", ".join([self.ts] + X)),
)
try:
self.cursor.execute(query)
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=ARq,
y=self.y,
)
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
if not (self.coef_.values["predictor"]):
self.coef_.values["predictor"] += ["Intercept"]
self.coef_.values["coefficient"] += [self.ma_avg_]
self.deploy_predict_ = str(self.ma_avg_)
alphaq = model.coef_.values["coefficient"][1:]
model.drop()
thetaq, piq = [], [-1] + []
for j in range(0, len(alphaq)):
thetaq += [
sum([alphaq[j - i - 1] * thetaq[i] for i in range(0, j)])
+ alphaq[j]
]
for j in range(self.parameters["q"]):
self.coef_.values["predictor"] += ["ma{}".format(j + 1)]
self.coef_.values["coefficient"] += [thetaq[j]]
self.deploy_predict_ += " + {} * MA{}".format(thetaq[j], j + 1)
if self.parameters["s"] > 0:
for j in range(1, self.parameters["Q"] + 1):
self.coef_.values["predictor"] += [
"ma{}".format(self.parameters["s"] * j)
]
self.coef_.values["coefficient"] += [
thetaq[self.parameters["s"] * j - 1]
]
self.deploy_predict_ += " + {} * MA{}".format(
thetaq[self.parameters["s"] * j - 1], self.parameters["s"] * j
)
for j in range(0, self.parameters["max_pik"]):
piq_tmp = 0
for i in range(0, self.parameters["q"]):
if j - i > 0:
piq_tmp -= thetaq[i] * piq[j - i]
elif j - i == 0:
piq_tmp -= thetaq[i]
piq = piq + [piq_tmp]
self.ma_piq_ = tablesample({"coefficient": piq})
epsilon = (
"[VerticaPy_y] - "
+ str(self.ma_avg_)
+ " - "
+ " - ".join(
[
str((piq[i]))
+ " * "
+ "LAG([VerticaPy_y] - {}, {}) OVER (ORDER BY [VerticaPy_ts])".format(
self.ma_avg_, i
)
for i in range(1, self.parameters["max_pik"])
]
)
)
epsilon += " AS MA0"
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
epsilon, relation
)
columns = [
"LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}".format(i, i)
for i in range(1, self.parameters["q"] + 1)
]
MA = ["MA{}".format(i) for i in range(1, self.parameters["q"] + 1)]
if self.parameters["s"] > 0:
columns += [
"LAG(MA0, {}) OVER (ORDER BY [VerticaPy_ts]) AS MA{}".format(
i * self.parameters["s"], i * self.parameters["s"]
)
for i in range(1, self.parameters["Q"] + 1)
]
MA += [
"MA{}".format(i * self.parameters["s"])
for i in range(1, self.parameters["Q"] + 1)
]
relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), relation
)
self.X += MA
transform_relation = relation.replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", ", " + ", ".join(X + [ts])
)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace(
"[X{}]".format(idx), elem
)
self.transform_relation = relation
model_save = {
"type": "SARIMAX",
"input_relation": self.input_relation,
"test_relation": self.test_relation,
"transform_relation": self.transform_relation,
"deploy_predict": self.deploy_predict_,
"ma_avg": self.ma_avg_,
"ma_piq": self.ma_piq_.values if (self.ma_piq_) else None,
"X": self.X,
"y": self.y,
"ts": self.ts,
"exogenous": self.exogenous,
"coef": self.coef_.values,
"p": self.parameters["p"],
"d": self.parameters["d"],
"q": self.parameters["q"],
"P": self.parameters["P"],
"D": self.parameters["D"],
"Q": self.parameters["Q"],
"s": self.parameters["s"],
"tol": self.parameters["tol"],
"max_iter": self.parameters["max_iter"],
"solver": self.parameters["solver"],
"max_pik": self.parameters["max_pik"],
"papprox_ma": self.parameters["papprox_ma"],
}
insert_verticapy_schema(
model_name=self.name,
model_type="SARIMAX",
model_save=model_save,
cursor=self.cursor,
)
return self
# ---#
def plot(
self,
vdf: vDataFrame = None,
y: str = "",
ts: str = "",
X: list = [],
dynamic: bool = False,
one_step: bool = True,
observed: bool = True,
confidence: bool = True,
nlead: int = 10,
nlast: int = 0,
limit: int = 1000,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the SARIMAX model.
Parameters
----------
vdf: vDataFrame, optional
Object to use to run the prediction.
y: str, optional
Response column.
ts: str, optional
vcolumn used to order the data.
X: list, optional
exogenous vcolumns.
dynamic: bool, optional
If set to True, the dynamic forecast will be drawn.
one_step: bool, optional
If set to True, the one step ahead forecast will be drawn.
observed: bool, optional
If set to True, the observation will be drawn.
confidence: bool, optional
If set to True, the confidence ranges will be drawn.
nlead: int, optional
Number of predictions computed by the dynamic forecast after
the last ts date.
nlast: int, optional
The dynamic forecast will start nlast values before the last
ts date.
limit: int, optional
Maximum number of past elements to use.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
"""
if not (vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types(
[
("limit", limit, [int, float],),
("nlead", nlead, [int, float],),
("dynamic", dynamic, [bool],),
("observed", observed, [bool],),
("one_step", one_step, [bool],),
("confidence", confidence, [bool],),
("vdf", vdf, [vDataFrame],),
],
)
delta_limit, limit = (
limit,
max(
max(
limit,
self.parameters["p"] + 1 + nlast,
self.parameters["P"] * self.parameters["s"] + 1 + nlast,
),
200,
),
)
delta_limit = max(limit - delta_limit - nlast, 0)
assert dynamic or one_step or observed, ParameterError(
"No option selected.\n You should set either dynamic, one_step or observed to True."
)
assert nlead + nlast > 0 or not (dynamic), ParameterError(
"Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True."
)
if dynamic:
assert not (self.exogenous), Exception(
"Dynamic Plots are only possible for SARIMA models (no exegenous variables), not SARIMAX."
)
if not (y):
y = self.y
if not (ts):
ts = self.ts
if not (X):
X = self.exogenous
result = self.predict(
vdf=vdf, y=y, ts=ts, X=X, nlead=0, name="_verticapy_prediction_"
)
error_eps = 1.96 * math.sqrt(self.score(method="mse"))
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result.select([ts, y, "_verticapy_prediction_"])
.dropna()
.sort([ts])
.tail(limit)
.values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
lower_osa, upper_osa = (
[
float(elem) - error_eps if elem != None else None
for elem in one_step_ahead[1]
],
[
float(elem) + error_eps if elem != None else None
for elem in one_step_ahead[1]
],
)
if dynamic:
deltat = result[columns[0]][-1] - result[columns[0]][-2]
lead_time_list = []
if nlast > 0:
lead_list = [[elem] for elem in result[columns[1]][:-nlast]]
else:
lead_list = [[elem] for elem in result[columns[1]]]
for i in range(nlast):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [result[columns[0]][i - nlast]]
if lead_time_list:
start_time = lead_time_list[-1]
else:
start_time = result[columns[0]][-1]
for i in range(nlead):
lead_list += [[self.fpredict(lead_list)]]
lead_time_list += [start_time + (i + 1) * deltat]
dynamic_forecast = (
[result[columns[0]][-nlast - 1]] + lead_time_list,
[result[columns[1]][-nlast - 1]]
+ [elem[0] for elem in lead_list[-nlast - nlead :]],
)
lower_d, upper_d = [], []
for i in range(len(dynamic_forecast[1])):
if (
self.parameters["s"] > 0
and self.parameters["p"] == 0
and self.parameters["d"] == 0
and self.parameters["q"] == 0
):
delta_error = error_eps * math.sqrt(
int(i / self.parameters["s"]) + 1
)
else:
delta_error = error_eps * math.sqrt(i + 1)
lower_d += [float(dynamic_forecast[1][i]) - delta_error]
upper_d += [float(dynamic_forecast[1][i]) + delta_error]
else:
lower_d, upper_d, dynamic_forecast = [], [], ([], [])
alpha = 0.3
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {
"color": colors[2],
"linewidth": 2,
}
param2 = {
"color": colors[3],
"linewidth": 2,
"linestyle": ":",
}
param3 = {
"color": colors[0],
"linewidth": 2,
"linestyle": "dashed",
}
if dynamic:
ax.fill_between(
dynamic_forecast[0],
1.02
* float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02
* float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
alpha=0.04,
color=updated_dict(param3, style_kwds, 2)["color"],
)
if confidence:
ax.fill_between(
dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color="#555555"
)
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color="#000000")
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color="#000000")
ax.plot(
dynamic_forecast[0],
dynamic_forecast[1],
label="Dynamic Forecast",
**updated_dict(param3, style_kwds, 2),
)
if one_step:
if confidence:
ax.fill_between(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#555555",
)
ax.plot(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
one_step_ahead[1][delta_limit:],
label="One-step ahead Forecast",
**updated_dict(param2, style_kwds, 1),
)
if observed:
ax.plot(
true_value[0][delta_limit:],
true_value[1][delta_limit:],
label="Observed",
**updated_dict(param1, style_kwds, 0),
)
ax.set_title(
"SARIMAX({},{},{})({},{},{})_{}".format(
self.parameters["p"],
self.parameters["d"],
self.parameters["q"],
self.parameters["P"],
self.parameters["D"],
self.parameters["Q"],
self.parameters["s"],
)
)
ax.set_xlabel(ts)
ax.legend(loc="center left", bbox_to_anchor=[1, 0.5])
ax.set_ylim(
1.02 * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02 * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
# ---#
def predict(
self,
vdf: vDataFrame,
y: str = "",
ts: str = "",
X: list = [],
nlead: int = 0,
name: str = "",
):
"""
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
y: str, optional
Response column.
ts: str, optional
vcolumn used to order the data.
X: list, optional
exogenous vcolumns.
nlead: int, optional
Number of records to predict after the last ts date.
name: str, optional
Name of the added vcolumn. If empty, a name will be generated.
Returns
-------
vDataFrame
object including the prediction.
"""
check_types(
[
("name", name, [str],),
("y", y, [str],),
("ts", ts, [str],),
("X", X, [list],),
("nlead", nlead, [int, float],),
("vdf", vdf, [vDataFrame],),
],
)
if not (y):
y = self.y
if not (ts):
ts = self.ts
if not (X):
X = self.exogenous
columns_check([y, ts], vdf)
y, ts = vdf_columns_names([y, ts], vdf)
name = (
"{}_".format(self.type) + "".join(ch for ch in self.name if ch.isalnum())
if not (name)
else name
)
key_columns = ", " + ", ".join(vdf.get_columns(exclude_columns=[y]))
transform_relation = self.transform_relation.replace(
"[VerticaPy_y]", y
).replace("[VerticaPy_ts]", ts)
transform_relation = transform_relation.replace(
"[VerticaPy_key_columns]", key_columns
)
predictSQL = self.deploySQL().replace("[VerticaPy_y]", y).replace(
"[VerticaPy_ts]", ts
) + " AS {}".format(name)
for idx, elem in enumerate(X):
transform_relation = transform_relation.replace("[X{}]".format(idx), elem)
predictSQL = predictSQL.replace("[X{}]".format(idx), elem)
columns = (
vdf.get_columns(exclude_columns=[y])
+ [predictSQL]
+ ["VerticaPy_y_copy AS {}".format(y)]
)
relation = vdf.__genSQL__()
for i in range(nlead):
query = "SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1".format(
ts, ts, ts, relation, ts
)
deltat = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(
ts, deltat, relation
)
next_t = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
if i == 0:
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(
next_t,
ts,
", ".join(
[
"NULL AS {}".format(column)
for column in vdf.get_columns(exclude_columns=[ts])
]
),
)
relation_tmp = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + vdf.get_columns(exclude_columns=[ts])),
relation,
new_line,
)
query = "SELECT {} FROM {} ORDER BY {} DESC LIMIT 1".format(
self.deploySQL()
.replace("[VerticaPy_y]", y)
.replace("[VerticaPy_ts]", ts),
transform_relation.format(relation_tmp),
ts,
)
prediction = (
vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
)
columns_tmp = vdf.get_columns(exclude_columns=[ts, y])
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} AS {} {}".format(
next_t,
ts,
prediction,
y,
(", " if (columns_tmp) else "")
+ ", ".join(["NULL AS {}".format(column) for column in columns_tmp]),
)
relation = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts, y] + vdf.get_columns(exclude_columns=[ts, y])),
relation,
new_line,
)
final_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), transform_relation.format(relation)
)
result = vdf_from_relation(final_relation, "SARIMAX", self.cursor,)
if nlead > 0:
result[y].apply(
"CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(ts, first_t, "{}")
)
return result
# ---#
class VAR(Regressor):
"""
---------------------------------------------------------------------------
[Beta Version]
Creates an VAR object using the Vertica Linear Regression algorithm on the
data.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
p: int, optional
Order of the AR (Auto-Regressive) part.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
"""
def __init__(
self,
name: str,
cursor=None,
p: int = 1,
tol: float = 1e-4,
max_iter: int = 1000,
solver: str = "Newton",
):
check_types([("name", name, [str],)])
self.type, self.name = "VAR", name
assert p > 0, ParameterError(
"Parameter 'p' must be greater than 0 to build a VAR model."
)
self.set_params(
{"p": p, "tol": tol, "max_iter": max_iter, "solver": solver,}
)
cursor = check_cursor(cursor)[0]
self.cursor = cursor
version(cursor=cursor, condition=[8, 0, 0])
# ---#
def deploySQL(self):
"""
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Returns
-------
str
the SQL code needed to deploy the model.
"""
sql = []
for idx, coefs in enumerate(self.coef_):
coefs_tmp = coefs.values["coefficient"]
predictors_tmp = coefs.values["predictor"]
sql += [
str(coefs_tmp[0])
+ " + "
+ " + ".join(
[
str(coefs_tmp[i]) + " * " + str(predictors_tmp[i])
for i in range(1, len(coefs_tmp))
]
)
]
return sql
# ---#
def features_importance(
self, X_idx: int = 0, ax=None, show: bool = True, **style_kwds,
):
"""
---------------------------------------------------------------------------
Computes the model's features importance.
Parameters
----------
X_idx: int/str, optional
Index of the main vector vcolumn used to draw the features importance.
It can also be the name of a predictor vcolumn.
ax: Matplotlib axes object, optional
The axes to plot on.
show: bool
If set to True, draw the features importance.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
"""
check_types([("X_idx", X_idx, [int, float, str],), ("show", show, [bool],),],)
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for idx, elem in enumerate(self.X):
if str_column(elem).lower() == X_idx:
X_idx = idx
break
assert (
isinstance(X_idx, (float, int)) and len(self.X) > X_idx >= 0
), ParameterError(
"The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(
len(self.X)
)
)
relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts).format(
self.test_relation
)
for idx, elem in enumerate(self.X):
relation = relation.replace("[X{}]".format(idx), elem)
min_max = (
vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
.agg(func=["min", "max"], columns=self.X)
.transpose()
)
coefficient = self.coef_[X_idx].values
coeff_importances = {}
coeff_sign = {}
for idx, coef in enumerate(coefficient["predictor"]):
if idx > 0:
predictor = int(coef.split("_")[0].replace("ar", ""))
predictor = str_column(self.X[predictor])
minimum, maximum = min_max[predictor]
val = coefficient["coefficient"][idx]
coeff_importances[coef] = abs(val) * (maximum - minimum)
coeff_sign[coef] = 1 if val >= 0 else -1
total = sum([coeff_importances[elem] for elem in coeff_importances])
for elem in coeff_importances:
coeff_importances[elem] = 100 * coeff_importances[elem] / total
if show:
plot_importance(
coeff_importances, coeff_sign, print_legend=True, ax=ax, **style_kwds,
)
importances = {"index": ["importance", "sign"]}
for elem in coeff_importances:
importances[elem] = [coeff_importances[elem], coeff_sign[elem]]
return tablesample(values=importances).transpose()
# ---#
def fit(
self,
input_relation: Union[vDataFrame, str],
X: list,
ts: str,
test_relation: Union[vDataFrame, str] = "",
):
"""
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
X: list
List of the response columns.
ts: str
vcolumn used to order the data.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
self
"""
check_types(
[
("input_relation", input_relation, [str, vDataFrame],),
("X", X, [list],),
("ts", ts, [str],),
("test_relation", test_relation, [str, vDataFrame],),
]
)
self.cursor = check_cursor(self.cursor, input_relation, True)[0]
# Initialization
does_model_exist(name=self.name, cursor=self.cursor, raise_error=True)
self.input_relation = (
input_relation
if isinstance(input_relation, str)
else input_relation.__genSQL__()
)
if isinstance(test_relation, vDataFrame):
self.test_relation = test_relation.__genSQL__()
elif test_relation:
self.test_relation = test_relation
else:
self.test_relation = self.input_relation
self.ts, self.deploy_predict_ = str_column(ts), []
self.X, schema = [str_column(elem) for elem in X], schema_relation(self.name)[0]
model = LinearRegression(
name=self.name,
solver=self.parameters["solver"],
max_iter=self.parameters["max_iter"],
tol=self.parameters["tol"],
)
# AR(p)
columns, AR = [], []
for idx, elem in enumerate(self.X):
for i in range(1, self.parameters["p"] + 1):
columns += [
"LAG([X{}], {}) OVER (ORDER BY [VerticaPy_ts]) AS AR{}_{}".format(
idx, i, idx, i
)
]
AR += ["AR{}_{}".format(idx, i)]
self.transform_relation = "(SELECT *, {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), "{}"
)
relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts).format(
self.input_relation
)
for idx, elem in enumerate(self.X):
relation = relation.replace("[X{}]".format(idx), elem)
def drop_temp_elem(self, schema):
try:
with warnings.catch_warnings(record=True) as w:
drop(
"{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
cursor=self.cursor,
method="view",
)
except:
pass
drop_temp_elem(self, schema)
try:
query = "CREATE VIEW {}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{} AS SELECT * FROM {}".format(
schema, get_session(self.cursor), relation
)
self.cursor.execute(query)
self.coef_ = []
for elem in X:
model.fit(
input_relation="{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, get_session(self.cursor)
),
X=AR,
y=elem,
)
self.coef_ += [model.coef_]
model.drop()
except:
drop_temp_elem(self, schema)
raise
drop_temp_elem(self, schema)
model_save = {
"type": "VAR",
"input_relation": self.input_relation,
"test_relation": self.test_relation,
"transform_relation": self.transform_relation,
"deploy_predict": self.deploy_predict_,
"X": self.X,
"ts": self.ts,
"p": self.parameters["p"],
"tol": self.parameters["tol"],
"max_iter": self.parameters["max_iter"],
"solver": self.parameters["solver"],
}
for idx, elem in enumerate(self.coef_):
model_save["coef_{}".format(idx)] = elem.values
insert_verticapy_schema(
model_name=self.name,
model_type="VAR",
model_save=model_save,
cursor=self.cursor,
)
return self
# ---#
def fpredict(self, L: list):
"""
---------------------------------------------------------------------------
Computes the prediction.
Parameters
----------
L: list
List containing the data. It must be a two-dimensional list containing
multiple rows. Each row must include as first element the ordered predictor
and as nth elements the nth - 1 exogenous variable (nth > 2).
Returns
-------
float
the prediction.
"""
try:
result = []
result_tmp = 0
for i in range(len(self.X)):
result_tmp = 0
for j in range(len(self.coef_[i].values["coefficient"])):
elem = self.coef_[i].values["predictor"][j]
if elem.lower() == "intercept":
result_tmp += self.coef_[i].values["coefficient"][j]
else:
ni, nj = elem[2:].split("_")
ni, nj = int(ni), int(nj)
result_tmp += (
self.coef_[i].values["coefficient"][j] * L[-nj][ni]
)
result += [result_tmp]
return result
except:
return None
# ---#
def plot(
self,
vdf: vDataFrame = None,
X: list = [],
ts: str = "",
X_idx: int = 0,
dynamic: bool = False,
one_step: bool = True,
observed: bool = True,
confidence: bool = True,
nlead: int = 10,
nlast: int = 0,
limit: int = 1000,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the VAR model.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
X: list, optional
List of the response columns.
ts: str, optional
vcolumn used to order the data.
X_idx: int, optional
Index of the main vector vcolumn to draw. It can also be the name of a
predictor vcolumn.
dynamic: bool, optional
If set to True, the dynamic forecast will be drawn.
one_step: bool, optional
If set to True, the one step ahead forecast will be drawn.
observed: bool, optional
If set to True, the observation will be drawn.
confidence: bool, optional
If set to True, the confidence ranges will be drawn.
nlead: int, optional
Number of predictions computed by the dynamic forecast after
the last ts date.
nlast: int, optional
The dynamic forecast will start nlast values before the last
ts date.
limit: int, optional
Maximum number of past elements to use.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
"""
if not (vdf):
vdf = vdf_from_relation(relation=self.input_relation, cursor=self.cursor)
check_types(
[
("limit", limit, [int, float],),
("nlead", nlead, [int, float],),
("X_idx", X_idx, [int, float, str],),
("dynamic", dynamic, [bool],),
("observed", observed, [bool],),
("one_step", one_step, [bool],),
("confidence", confidence, [bool],),
("vdf", vdf, [vDataFrame],),
],
)
delta_limit, limit = (
limit,
max(max(limit, self.parameters["p"] + 1 + nlast), 200),
)
delta_limit = max(limit - delta_limit - nlast, 0)
if not (ts):
ts = self.ts
if not (X):
X = self.X
assert dynamic or one_step or observed, ParameterError(
"No option selected.\n You should set either dynamic, one_step or observed to True."
)
assert nlead + nlast > 0 or not (dynamic), ParameterError(
"Dynamic Plots are only possible if either parameter 'nlead' is greater than 0 or parameter 'nlast' is greater than 0, and parameter 'dynamic' is set to True."
)
if isinstance(X_idx, str):
X_idx = str_column(X_idx).lower()
for idx, elem in enumerate(X):
if str_column(elem).lower() == X_idx:
X_idx = idx
break
assert (
isinstance(X_idx, (float, int)) and len(self.X) > X_idx >= 0
), ParameterError(
"The index of the vcolumn to draw 'X_idx' must be between 0 and {}. It can also be the name of a predictor vcolumn.".format(
len(self.X)
)
)
result_all = self.predict(
vdf=vdf,
X=X,
ts=ts,
nlead=0,
name=[
"_verticapy_prediction_{}_".format(idx) for idx in range(len(self.X))
],
)
y, prediction = X[X_idx], "_verticapy_prediction_{}_".format(X_idx)
error_eps = 1.96 * math.sqrt(self.score(method="mse").values["mse"][X_idx])
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result_all.select([ts, y, prediction])
.dropna()
.sort([ts])
.tail(limit)
.values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
true_value = [result[columns[0]], result[columns[1]]]
one_step_ahead = [result[columns[0]], result[columns[2]]]
lower_osa, upper_osa = (
[
float(elem) - error_eps if elem != None else None
for elem in one_step_ahead[1]
],
[
float(elem) + error_eps if elem != None else None
for elem in one_step_ahead[1]
],
)
if dynamic:
print_info = verticapy.options["print_info"]
verticapy.options["print_info"] = False
try:
result = (
result_all.select([ts] + X).dropna().sort([ts]).tail(limit).values
)
except:
verticapy.options["print_info"] = print_info
raise
verticapy.options["print_info"] = print_info
columns = [elem for elem in result]
if isinstance(result[columns[0]][0], str):
result[columns[0]] = [parse(elem) for elem in result[columns[0]]]
deltat = result[columns[0]][-1] - result[columns[0]][-2]
lead_time_list, lead_list = [], []
if nlast > 0:
for i in range(len(result[columns[0]][:-nlast])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
else:
for i in range(len(result[columns[0]])):
lead_list += [[result[elem][i] for elem in columns[1:]]]
for i in range(nlast):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [result[columns[0]][i - nlast]]
if lead_time_list:
start_time = lead_time_list[-1]
else:
start_time = result[columns[0]][-1]
for i in range(nlead):
lead_list += [self.fpredict(lead_list)]
lead_time_list += [start_time + (i + 1) * deltat]
dynamic_forecast = (
[result[columns[0]][-nlast - 1]] + lead_time_list,
[result[columns[1 + X_idx]][-nlast - 1]]
+ [elem[X_idx] for elem in lead_list[-nlast - nlead :]],
)
lower_d, upper_d = [], []
for i in range(len(dynamic_forecast[1])):
delta_error = error_eps * math.sqrt(i + 1)
lower_d += [float(dynamic_forecast[1][i]) - delta_error]
upper_d += [float(dynamic_forecast[1][i]) + delta_error]
else:
lower_d, upper_d, dynamic_forecast = [], [], ([], [])
alpha = 0.3
if not (ax):
fig, ax = plt.subplots()
if isnotebook():
fig.set_size_inches(10, 6)
ax.grid()
colors = gen_colors()
param1 = {
"color": colors[2],
"linewidth": 2,
}
param2 = {
"color": colors[3],
"linewidth": 2,
"linestyle": ":",
}
param3 = {
"color": colors[0],
"linewidth": 2,
"linestyle": "dashed",
}
if dynamic:
ax.fill_between(
dynamic_forecast[0],
1.02
* float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02
* float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
alpha=0.04,
color=updated_dict(param3, style_kwds, 2)["color"],
)
if confidence:
ax.fill_between(
dynamic_forecast[0], lower_d, upper_d, alpha=0.08, color="#555555"
)
ax.plot(dynamic_forecast[0], lower_d, alpha=0.08, color="#000000")
ax.plot(dynamic_forecast[0], upper_d, alpha=0.08, color="#000000")
ax.plot(
dynamic_forecast[0],
dynamic_forecast[1],
label="Dynamic Forecast",
**updated_dict(param3, style_kwds, 2),
)
if one_step:
if confidence:
ax.fill_between(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#555555",
)
ax.plot(
one_step_ahead[0][delta_limit:],
lower_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
upper_osa[delta_limit:],
alpha=0.04,
color="#000000",
)
ax.plot(
one_step_ahead[0][delta_limit:],
one_step_ahead[1][delta_limit:],
label="One-step ahead Forecast",
**updated_dict(param2, style_kwds, 1),
)
if observed:
ax.plot(
true_value[0][delta_limit:],
true_value[1][delta_limit:],
label="Observed",
**updated_dict(param1, style_kwds, 0),
)
ax.set_title("VAR({}) [{}]".format(self.parameters["p"], y))
ax.set_xlabel(ts)
ax.legend(loc="center left", bbox_to_anchor=[1, 0.5])
ax.set_ylim(
1.02 * float(min(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
1.02 * float(max(true_value[1] + dynamic_forecast[1] + one_step_ahead[1])),
)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
return ax
# ---#
def predict(
self,
vdf: vDataFrame,
X: list = [],
ts: str = "",
nlead: int = 0,
name: list = [],
):
"""
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
X: list, optional
List of the response columns.
ts: str, optional
vcolumn used to order the data.
nlead: int, optional
Number of records to predict after the last ts date.
name: list, optional
Names of the added vcolumns. If empty, names will be generated.
Returns
-------
vDataFrame
object including the prediction.
"""
check_types(
[
("name", name, [list],),
("ts", ts, [str],),
("nlead", nlead, [int, float],),
("X", X, [list],),
("vdf", vdf, [vDataFrame],),
],
)
if not (ts):
ts = self.ts
if not (X):
X = self.X
columns_check(X + [ts], vdf)
X = vdf_columns_names(X, vdf)
ts = vdf_columns_names([ts], vdf)[0]
all_pred, names = [], []
transform_relation = self.transform_relation.replace("[VerticaPy_ts]", self.ts)
for idx, elem in enumerate(X):
name_tmp = (
"{}_".format(self.type) + "".join(ch for ch in elem if ch.isalnum())
if len(name) != len(X)
else name[idx]
)
all_pred += ["{} AS {}".format(self.deploySQL()[idx], name_tmp)]
transform_relation = transform_relation.replace("[X{}]".format(idx), elem)
columns = vdf.get_columns() + all_pred
relation = vdf.__genSQL__()
for i in range(nlead):
query = "SELECT ({} - LAG({}, 1) OVER (ORDER BY {}))::VARCHAR FROM {} ORDER BY {} DESC LIMIT 1".format(
ts, ts, ts, relation, ts
)
deltat = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
query = "SELECT (MAX({}) + '{}'::interval)::VARCHAR FROM {}".format(
ts, deltat, relation
)
next_t = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()[0]
if i == 0:
first_t = next_t
new_line = "SELECT '{}'::TIMESTAMP AS {}, {}".format(
next_t,
ts,
", ".join(
[
"NULL AS {}".format(column)
for column in vdf.get_columns(exclude_columns=[ts])
]
),
)
relation_tmp = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + vdf.get_columns(exclude_columns=[ts])),
relation,
new_line,
)
query = "SELECT {} FROM {} ORDER BY {} DESC LIMIT 1".format(
", ".join(self.deploySQL()), transform_relation.format(relation_tmp), ts
)
prediction = vdf._VERTICAPY_VARIABLES_["cursor"].execute(query).fetchone()
for idx, elem in enumerate(X):
prediction[idx] = "{} AS {}".format(prediction[idx], elem)
columns_tmp = vdf.get_columns(exclude_columns=[ts] + X)
new_line = "SELECT '{}'::TIMESTAMP AS {}, {} {}".format(
next_t,
ts,
", ".join(prediction),
(", " if (columns_tmp) else "")
+ ", ".join(["NULL AS {}".format(column) for column in columns_tmp]),
)
relation = "(SELECT {} FROM {} UNION ALL ({})) VERTICAPY_SUBTABLE".format(
", ".join([ts] + X + vdf.get_columns(exclude_columns=[ts] + X)),
relation,
new_line,
)
final_relation = "(SELECT {} FROM {}) VERTICAPY_SUBTABLE".format(
", ".join(columns), transform_relation.format(relation)
)
result = vdf_from_relation(final_relation, "VAR", self.cursor,)
if nlead > 0:
for elem in X:
result[elem].apply(
"CASE WHEN {} >= '{}' THEN NULL ELSE {} END".format(
ts, first_t, "{}"
)
)
return result
| verticapy/learn/tsa.py | 69,298 | ---------------------------------------------------------------------------
[Beta Version]
Creates an SARIMAX object using the Vertica Linear Regression algorithm on
the data.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
p: int, optional
Order of the AR (Auto-Regressive) part.
d: int, optional
Order of the I (Integrated) part.
q: int, optional
Order of the MA (Moving-Average) part.
P: int, optional
Order of the seasonal AR (Auto-Regressive) part.
D: int, optional
Order of the seasonal I (Integrated) part.
Q: int, optional
Order of the seasonal MA (Moving-Average) part.
s: int, optional
Span of the seasonality.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
max_pik: int, optional
Number of inverse MA coefficient used to approximate the MA.
papprox_ma: int, optional
the p of the AR(p) used to approximate the MA coefficients.
---------------------------------------------------------------------------
[Beta Version]
Creates an VAR object using the Vertica Linear Regression algorithm on the
data.
Parameters
----------
name: str
Name of the the model. The model will be stored in the DB.
cursor: DBcursor, optional
Vertica database cursor.
p: int, optional
Order of the AR (Auto-Regressive) part.
tol: float, optional
Determines whether the algorithm has reached the specified accuracy result.
max_iter: int, optional
Determines the maximum number of iterations the algorithm performs before
achieving the specified accuracy result.
solver: str, optional
The optimizer method to use to train the model.
Newton : Newton Method
BFGS : Broyden Fletcher Goldfarb Shanno
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Returns
-------
str
the SQL code needed to deploy the model.
---------------------------------------------------------------------------
Returns the SQL code needed to deploy the model.
Returns
-------
str
the SQL code needed to deploy the model.
---------------------------------------------------------------------------
Computes the model's features importance.
Parameters
----------
X_idx: int/str, optional
Index of the main vector vcolumn used to draw the features importance.
It can also be the name of a predictor vcolumn.
ax: Matplotlib axes object, optional
The axes to plot on.
show: bool
If set to True, draw the features importance.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
y: str
Response column.
ts: str
vcolumn used to order the data.
X: list, optional
exogenous columns used to fit the model.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
model
---------------------------------------------------------------------------
Trains the model.
Parameters
----------
input_relation: str/vDataFrame
Training relation.
X: list
List of the response columns.
ts: str
vcolumn used to order the data.
test_relation: str/vDataFrame, optional
Relation used to test the model.
Returns
-------
object
self
---------------------------------------------------------------------------
Computes the prediction.
Parameters
----------
L: list
List containing the data. It must be a two-dimensional list containing
multiple rows. Each row must include as first element the ordered predictor
and as nth elements the nth - 1 exogenous variable (nth > 2).
Returns
-------
float
the prediction.
---------------------------------------------------------------------------
Computes the prediction.
Parameters
----------
L: list
List containing the data. It must be a two-dimensional list containing
multiple rows. Each row must include as first element the ordered predictor
and as nth elements the nth - 1 exogenous variable (nth > 2).
Returns
-------
float
the prediction.
---------------------------------------------------------------------------
Draws the SARIMAX model.
Parameters
----------
vdf: vDataFrame, optional
Object to use to run the prediction.
y: str, optional
Response column.
ts: str, optional
vcolumn used to order the data.
X: list, optional
exogenous vcolumns.
dynamic: bool, optional
If set to True, the dynamic forecast will be drawn.
one_step: bool, optional
If set to True, the one step ahead forecast will be drawn.
observed: bool, optional
If set to True, the observation will be drawn.
confidence: bool, optional
If set to True, the confidence ranges will be drawn.
nlead: int, optional
Number of predictions computed by the dynamic forecast after
the last ts date.
nlast: int, optional
The dynamic forecast will start nlast values before the last
ts date.
limit: int, optional
Maximum number of past elements to use.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
---------------------------------------------------------------------------
Draws the VAR model.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
X: list, optional
List of the response columns.
ts: str, optional
vcolumn used to order the data.
X_idx: int, optional
Index of the main vector vcolumn to draw. It can also be the name of a
predictor vcolumn.
dynamic: bool, optional
If set to True, the dynamic forecast will be drawn.
one_step: bool, optional
If set to True, the one step ahead forecast will be drawn.
observed: bool, optional
If set to True, the observation will be drawn.
confidence: bool, optional
If set to True, the confidence ranges will be drawn.
nlead: int, optional
Number of predictions computed by the dynamic forecast after
the last ts date.
nlast: int, optional
The dynamic forecast will start nlast values before the last
ts date.
limit: int, optional
Maximum number of past elements to use.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
y: str, optional
Response column.
ts: str, optional
vcolumn used to order the data.
X: list, optional
exogenous vcolumns.
nlead: int, optional
Number of records to predict after the last ts date.
name: str, optional
Name of the added vcolumn. If empty, a name will be generated.
Returns
-------
vDataFrame
object including the prediction.
---------------------------------------------------------------------------
Predicts using the input relation.
Parameters
----------
vdf: vDataFrame
Object to use to run the prediction.
X: list, optional
List of the response columns.
ts: str, optional
vcolumn used to order the data.
nlead: int, optional
Number of records to predict after the last ts date.
name: list, optional
Names of the added vcolumns. If empty, names will be generated.
Returns
-------
vDataFrame
object including the prediction.
(c) Copyright [2018-2021] Micro Focus or one of its affiliates. Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. |_ |~) _ _| _ /~\ _ |. |_)\/ |_)(_|(_|| \_/|_|(_||| / ____________ ______ / __ `\ / / | \/ / / / |______ / / / |____/ / / _____________ / / \ / / / \ / / / \_______/ / / ______ / / \ / / / \ / / / \/ / / / / / / \ / \ / \/ _ \ / _ __|_. _ _ |_) \/ (/_| | |(_(_|| \/ / VerticaPy is a Python library with scikit-like functionality to use to conduct data science projects on data stored in Vertica, taking advantage Verticaâs speed and built-in analytics and machine learning features. It supports the entire data science life cycle, uses a âpipelineâ mechanism to sequentialize data transformation operations, and offers beautiful graphical options. VerticaPy aims to solve all of these problems. The idea is simple: instead of moving data around for processing, VerticaPy brings the logic to the data. Modules Standard Python Modules VerticaPy Modules Other Python Modules --- --- --- --- Initialization I(d) AR(p) MA(q) --- --- --- --- --- --- Initialization AR(p) --- --- --- | 10,095 | en | 0.523928 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 08:23:08 2017
Author: Zachary W. Mikus
"""
#These are testing variables
d1 = {1:30, 2:20, 3:30, 5:80}
d2 = {1:40, 2:50, 3:60, 4:70}
def f(x, y):
k = x + y
return k
def commonKeys(longerList, shorterList):
commonKeyList = []
#Variables
#intersectDictionary = The final returned intersect dictionary
#commonKeyList = The list of keys that appear in both dictionaries
for i in range(len(longerList)):
if longerList[i] in shorterList:
commonKeyList.append(longerList[i])
return commonKeyList
def differentKeys(longerList, shorterList):
#This function uses similar logic to the commonKeys function
#Except it will see if the index is NOT in the other list and remove it
#This runs the loop twice once through each loop to find the missing numbers
#in each list
differentKeyList = []
for i in range(len(longerList)):
if longerList[i] not in shorterList:
differentKeyList.append(longerList[i])
for i in range(len(shorterList)):
if shorterList[i] not in longerList:
differentKeyList.append(shorterList[i])
return differentKeyList
def intersect(commonList, d1, d2):
intersectDict = {}
#This function takes the common list of keys, grabs the common values in
#both dictionaries and performs the f(x, y) function on them
for i in range(len(commonList)):
#currentIndex is the index in the dictionary, it will move
currentIndex = commonList[i]
x = d1[currentIndex]
y = d2[currentIndex]
functionValue = f(x, y)
intersectDict[currentIndex] = functionValue
return intersectDict
def difference(differentKeyList, d1, d2):
differenceDict = {}
#This function takes the different list of keys, grabs the relevant values and
#creates a dictionary
#searches d
for i in range(len(differentKeyList)):
currentIndex = differentKeyList[i]
if currentIndex in d1:
differenceDict[currentIndex] = d1[currentIndex]
if currentIndex in d2:
differenceDict[currentIndex] = d2[currentIndex]
return differenceDict
def diff_dictionary(d1, d2):
differentKeyList = []
#Turns key values in lists and finds the longest
#keyListD1 = list of keys in d1
#keyListD2 = list of keys in d2
keyListD1 = list(d1.keys())
keyListD2 = list(d2.keys())
#determines which of the two lists is the longest and assigned it values
#for the common list function
if len(keyListD1) > len(keyListD2):
longerList = keyListD1
shorterList = keyListD2
else:
longerList = keyListD2
shorterList = keyListD1
#Finds the common keys
commonList = commonKeys(longerList, shorterList)
#Makes the intersect dictionary
intersectDict = intersect(commonList, d1, d2)
#Finds the different keys
differentKeyList = differentKeys(longerList, shorterList)
#Makes the different key dictionary
differenceDict = difference(differentKeyList, d1, d2)
#This now creates a list of the dictionaries put together
return (intersectDict, differenceDict)
'''
#This is for calculating the difference dictionary.
#The difference dictionary consists of every
#KEY VALUE# in the dictionaries that does not exist
#in the other dictionary.
'''
#Variables
#differenceDictionary = The final returned difference dictionary
print(diff_dictionary(d1, d2)) | dict_interdiff.py | 3,543 | Created on Tue Feb 14 08:23:08 2017
Author: Zachary W. Mikus
!/usr/bin/env python3 -*- coding: utf-8 -*-These are testing variablesVariablesintersectDictionary = The final returned intersect dictionarycommonKeyList = The list of keys that appear in both dictionariesThis function uses similar logic to the commonKeys functionExcept it will see if the index is NOT in the other list and remove itThis runs the loop twice once through each loop to find the missing numbersin each listThis function takes the common list of keys, grabs the common values in both dictionaries and performs the f(x, y) function on themcurrentIndex is the index in the dictionary, it will move This function takes the different list of keys, grabs the relevant values andcreates a dictionarysearches dTurns key values in lists and finds the longestkeyListD1 = list of keys in d1keyListD2 = list of keys in d2determines which of the two lists is the longest and assigned it valuesfor the common list functionFinds the common keysMakes the intersect dictionaryFinds the different keysMakes the different key dictionaryThis now creates a list of the dictionaries put togetherVariablesdifferenceDictionary = The final returned difference dictionary | 1,222 | en | 0.773525 |
# Generated by Django 4.0 on 2021-10-12 22:38
import blog.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('slug', models.SlugField(max_length=200, unique=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('summary', models.TextField(max_length=250)),
('content', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)),
('cover_image', models.ImageField(blank=True, null=True, upload_to=blog.models.get_unique_path)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to='auth.user')),
],
options={
'ordering': ['-created_on'],
},
),
]
| farmblr/blog/migrations/0001_initial.py | 1,368 | Generated by Django 4.0 on 2021-10-12 22:38 | 43 | en | 0.796131 |
import urllib.parse
from datetime import datetime
from unittest.mock import patch
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from dfirtrack_artifacts.models import (
Artifact,
Artifactpriority,
Artifactstatus,
Artifacttype,
)
from dfirtrack_config.models import Statushistory
from dfirtrack_main.models import (
Analysisstatus,
Case,
Casepriority,
Casestatus,
System,
Systemstatus,
Task,
Taskname,
Taskpriority,
Taskstatus,
)
class StatusViewTestCase(TestCase):
""" status view tests """
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# create object
artifactstatus_1 = Artifactstatus.objects.create(artifactstatus_name='artifactstatus_1')
# create object
artifacttype_1 = Artifacttype.objects.create(artifacttype_name='artifacttype_1')
# create object
casepriority_1 = Casepriority.objects.create(casepriority_name='casepriority_1')
# create object
casestatus_1 = Casestatus.objects.create(casestatus_name='casestatus_1')
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
taskname_1 = Taskname.objects.create(taskname_name='taskname_1')
# create object
taskpriority_1 = Taskpriority.objects.create(taskpriority_name='prio_1')
# create object
taskstatus_1 = Taskstatus.objects.create(taskstatus_name='taskstatus_1')
# create object
system_1 = System.objects.create(
system_name = 'system_1',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
System.objects.create(
system_name = 'system_2',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
System.objects.create(
system_name = 'system_3',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
# create object
Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority_1,
taskstatus = taskstatus_1,
task_modify_time = timezone.now(),
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# create object
Artifact.objects.create(
artifact_name = 'artifact_1',
artifactstatus = artifactstatus_1,
artifacttype = artifacttype_1,
system = system_1,
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
Artifact.objects.create(
artifact_name = 'artifact_2',
artifactstatus = artifactstatus_1,
artifacttype = artifacttype_1,
system = system_1,
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
# create object
Case.objects.create(
case_name = 'case_1',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_2',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_3',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_4',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
# mock timezone.now()
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_1):
# create empty object (for simple testing get request for empty detail view this should be sufficient)
Statushistory.objects.create()
def test_status_view_not_logged_in(self):
""" test status view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/config/status/', safe='')
# get response
response = self.client.get('/config/status/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_status_view_logged_in(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# compare
self.assertEqual(response.status_code, 200)
def test_status_view_template(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_config/status/status.html')
def test_status_view_get_user_context(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_status')
def test_status_view_redirect(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# create url
destination = urllib.parse.quote('/config/status/', safe='/')
# get response
response = self.client.get('/config/status', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_status_view_get_object_context(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# get querysets
analysisstatus_all = Analysisstatus.objects.all().order_by('analysisstatus_name')
artifactpriority_all = Artifactpriority.objects.all().order_by('artifactpriority_name')
artifactstatus_all = Artifactstatus.objects.all().order_by('artifactstatus_name')
casepriority_all = Casepriority.objects.all().order_by('casepriority_name')
casestatus_all = Casestatus.objects.all().order_by('casestatus_name')
systemstatus_all = Systemstatus.objects.all().order_by('systemstatus_name')
taskstatus_all = Taskstatus.objects.all().order_by('taskstatus_name')
taskpriority_all = Taskpriority.objects.all().order_by('taskpriority_name')
# compare
self.assertEqual(response.context['artifacts_number'], 2)
self.assertEqual(response.context['cases_number'], 4)
self.assertEqual(response.context['systems_number'], 3)
self.assertEqual(response.context['tasks_number'], 1)
self.assertEqual(type(response.context['analysisstatus_all']), type(analysisstatus_all))
self.assertEqual(type(response.context['artifactpriority_all']), type(artifactpriority_all))
self.assertEqual(type(response.context['artifactstatus_all']), type(artifactstatus_all))
self.assertEqual(type(response.context['casepriority_all']), type(casepriority_all))
self.assertEqual(type(response.context['casestatus_all']), type(casestatus_all))
self.assertEqual(type(response.context['systemstatus_all']), type(systemstatus_all))
self.assertEqual(type(response.context['taskpriority_all']), type(taskpriority_all))
self.assertEqual(type(response.context['taskstatus_all']), type(taskstatus_all))
def test_status_view_get_statushistory_entry_numbers_context(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# compare
self.assertEqual(type(response.context['statushistory_all']), type(reversed(Statushistory.objects.all())))
# TODO: test number of queryset elements in context element 'statushistory_all' according to 'statushistory_last_entrys' in MainConfigModel
# TODO: number also depends on available statushistory elements
# TODO: find a way to count reversed queryset
#self.assertEqual(response.context['statushistory_all'].count(), 2)
def test_status_detail_view_not_logged_in(self):
""" test status view """
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# create url
destination = '/login/?next=' + urllib.parse.quote('/config/status/' + str(statushistory_id) + '/', safe='')
# get response
response = self.client.get('/config/status/' + str(statushistory_id) + '/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_status_detail_view_logged_in(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# get response
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_status_detail_view_template(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# get response
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_config/status/status_detail.html')
def test_status_detail_view_get_user_context(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# get response
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_status')
def test_status_detail_view_redirect(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# create url
destination = urllib.parse.quote('/config/status/' + str(statushistory_id) + '/', safe='/')
# get response
response = self.client.get('/config/status/' + str(statushistory_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
| dfirtrack_config/tests/status/test_status_views.py | 12,598 | status view tests
test status view
test status view
test status view
test status view
test status view
test status view
test status view
test status view
test status view
test status view
test status view
test status view
create user create object create object create object create object create object create object create object create object create object create object create object create object mock timezone.now() create empty object (for simple testing get request for empty detail view this should be sufficient) create url get response compare login testuser get response compare login testuser get response compare login testuser get response compare login testuser create url get response compare login testuser get response get querysets compare login testuser get response compare TODO: test number of queryset elements in context element 'statushistory_all' according to 'statushistory_last_entrys' in MainConfigModel TODO: number also depends on available statushistory elements TODO: find a way to count reversed querysetself.assertEqual(response.context['statushistory_all'].count(), 2) get time get object create url get response compare login testuser get time get object get response compare login testuser get time get object get response compare login testuser get time get object get response compare login testuser get time get object create url get response compare | 1,407 | en | 0.49602 |
import sys
import getopt
import os
import subprocess
import shutil
import logging as log
def Initialize(platform):
print "Initializing Workspace"
global workspace
workspace = os.environ['WORKSPACE']
if platform == "windows":
# Jenkins puts quotes in the path, which is wrong. Remove quotes.
os.environ['PATH'] = os.environ['PATH'].replace('"','')
return workspace
def ParseArgs(argv):
print "Parsing arguments for compile"
try:
opts, args = getopt.getopt(argv, "t:p:a:v", ["target=", "platform=", "arch=", "verbose","noclean"])
except getopt.GetoptError:
print "ERROR: \n\t usage: python compile.py --target <target> --platform <windows|linux> --arch <arch> [--verbose] [--noclean]"
return 2,"","","",True
verbose = False
cleanUp = True
acceptedPlatforms = ['windows','linux']
for opt, arg in opts:
if opt in ("-t", "--target"):
target = arg
elif opt in ("-p", "--platform"):
if arg.lower() not in acceptedPlatforms:
print "ERROR: " + arg + "not an accepted platform. Use windows or linux."
sys.exit(2)
platform = arg.lower()
elif opt in ("-a", "--arch"):
arch = arg
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-c", "--noclean"):
cleanUp = False
if verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("In verbose mode.")
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if target == "" or platform == "" or arch == "":
# must specify target, project and arch
log.error("Must specify target, project and arch")
return 2,"","","",True
return 0,target,platform,arch,cleanUp
def SetupDirectories(target, arch, platform):
log.info("Setting up directories")
global rootdir
global builddir
global fullBuildDirPath
rootdir = "build"
if not os.path.isdir(rootdir):
os.mkdir(rootdir)
os.chdir(rootdir)
builddir = "build-" + platform
if platform == "windows":
builddir = builddir + "-" + arch + "-" + target
if os.path.isdir(builddir):
shutil.rmtree(builddir)
os.mkdir(builddir)
os.chdir(builddir)
fullbuilddirpath = workspace + "/" + rootdir + "/" + builddir
return fullbuilddirpath
def Cleanup(cleanUp,workspace):
print "\n==================================================\n"
print "Cleaning Up."
print "\n==================================================\n"
if cleanUp:
os.chdir(workspace + "/" + rootdir)
shutil.rmtree(builddir)
os.chdir("..")
shutil.rmtree(rootdir)
log.shutdown()
return 0
| src/pal/automation/util.py | 2,814 | Jenkins puts quotes in the path, which is wrong. Remove quotes. must specify target, project and arch | 101 | en | 0.856487 |
import robocup
import constants
import main
import math
import skills.touch_ball
import skills._kick
import skills.pass_receive
## AngleReceive accepts a receive_point as a parameter and gets setup there to catch the ball
# It transitions to the 'aligned' state once it's there within its error thresholds and is steady
# Set its 'ball_kicked' property to True to tell it to dynamically update its position based on where
# the ball is moving and attempt to catch it.
# It will move to the 'completed' state if it catches the ball, otherwise it will go to 'failed'.
# Kick is a single_robot_behavior, so no need to import both
class AngleReceive(skills.pass_receive.PassReceive):
def __init__(self):
super().__init__(
captureFunction=(lambda: skills.touch_ball.TouchBall()))
self._target_point = None
self.kick_power = 1
self.target_point = constants.Field.TheirGoalSegment.center()
self.ball_kicked = False
self.target_angle = 0
## The point that the receiver should expect the ball to hit it's mouth
# Default: constants.Field.TheirGoalSegment.center()
@property
def target_point(self):
return self._target_point
@target_point.setter
def target_point(self, value):
self._target_point = value
self.recalculate()
## Returns an adjusted angle with account for ball speed
#
# First finds the rejection, which is the X component of the ball's velocity in the reference
# frame of the robot, with the mouth facing the y axis. Then we calculate the angle required to
# offset this rejection angle (if possible).
def adjust_angle(self, target_angle, ball_angle=None, ball_speed=None):
ball = main.ball()
if ball_angle == None:
ball_angle = (ball.vel).angle()
if ball_speed == None:
ball_speed = ball.vel.mag()
angle_diff = target_angle - ball_angle
rejection = math.sin(angle_diff) * ball_speed
# The min/max is to bound the value by -1 and 1.
adjust = math.asin(min(1, max(-1, rejection /
constants.Robot.MaxKickSpeed)))
return adjust + target_angle
# calculates:
# self._pass_line - the line from the ball along where we think we're going
# self._target_pos - where the bot should be
# self._angle_error - difference in where we're facing and where we want to face (in radians)
# self._x_error
# self._y_error
def recalculate(self):
# can't do squat if we don't know what we're supposed to do
if self.receive_point == None or self.robot == None or self.target_point == None:
return
ball = main.ball()
if self.ball_kicked:
# when the ball's in motion, the line is based on the ball's velocity
self._pass_line = robocup.Line(ball.pos, ball.pos + ball.vel * 10)
# After kicking, apply angle calculations
target_angle_rad = self.adjust_angle((self.target_point -
self.robot.pos).angle())
# Removes angle adjustment
# target_angle_rad = (self.target_point - self.robot.pos).angle()
self._kick_line = robocup.Line(self.robot.pos, robocup.Point(
self.robot.pos.x + math.cos(self.robot.angle) * 10,
self.robot.pos.y + math.sin(self.robot.angle) * 10))
else:
# if the ball hasn't been kicked yet, we assume it's going to go through the receive point
self._pass_line = robocup.Line(ball.pos, self.receive_point)
# Assume ball is kicked at max speed and is coming from the ball point to the location of our robot. Then average this with the target angle.
target_angle_rad = self.adjust_angle(
(self.target_point - self.robot.pos).angle(),
(self.robot.pos - main.ball().pos).angle(),
constants.Robot.MaxKickSpeed)
# TODO make this faster by caching the .angle() part
target_angle_rad = (
target_angle_rad +
(self.target_point - self.robot.pos).angle()) / 2
self._kick_line = robocup.Line(self.receive_point,
self.target_point)
self._angle_facing = target_angle_rad
self.target_angle = target_angle_rad
angle_rad = self.robot.angle
self._angle_error = target_angle_rad - angle_rad
if self.ball_kicked:
receive_before_adjust = self._pass_line.nearest_point(
self.robot.pos)
else:
receive_before_adjust = self.receive_point
# Make the receive point be the mouth, rather than the center of the robot.
# Assumes mouth of robot is at the edge.
self._target_pos = receive_before_adjust - robocup.Point(
constants.Robot.Radius * math.cos(self.robot.angle),
constants.Robot.Radius * math.sin(self.robot.angle))
# Code to provide slipback when receiving the ball
# pass_line_dir = (self._pass_line.get_pt(1) - self._pass_line.get_pt(0)).normalized()
# self._target_pos = actual_receive_point + pass_line_dir * constants.Robot.Radius
# vector pointing down the pass line toward the kicker
self._x_error = self._target_pos.x - self.robot.pos.x
self._y_error = self._target_pos.y - self.robot.pos.y
def execute_running(self):
super().execute_running()
self.recalculate()
self.robot.face(self.robot.pos + robocup.Point(
math.cos(self._angle_facing), math.sin(self._angle_facing)))
if self._kick_line != None:
main.system_state().draw_line(self._kick_line,
constants.Colors.Red, "Shot")
def execute_receiving(self):
super().execute_receiving()
self.ball_kicked = True
# Kick the ball!
self.robot.kick(self.kick_power)
if self.target_point != None:
main.system_state().draw_circle(self.target_point, 0.03,
constants.Colors.Blue, "Target")
| soccer/gameplay/skills/angle_receive.py | 6,233 | AngleReceive accepts a receive_point as a parameter and gets setup there to catch the ball It transitions to the 'aligned' state once it's there within its error thresholds and is steady Set its 'ball_kicked' property to True to tell it to dynamically update its position based on where the ball is moving and attempt to catch it. It will move to the 'completed' state if it catches the ball, otherwise it will go to 'failed'. Kick is a single_robot_behavior, so no need to import both The point that the receiver should expect the ball to hit it's mouth Default: constants.Field.TheirGoalSegment.center() Returns an adjusted angle with account for ball speed First finds the rejection, which is the X component of the ball's velocity in the reference frame of the robot, with the mouth facing the y axis. Then we calculate the angle required to offset this rejection angle (if possible). The min/max is to bound the value by -1 and 1. calculates: self._pass_line - the line from the ball along where we think we're going self._target_pos - where the bot should be self._angle_error - difference in where we're facing and where we want to face (in radians) self._x_error self._y_error can't do squat if we don't know what we're supposed to do when the ball's in motion, the line is based on the ball's velocity After kicking, apply angle calculations Removes angle adjustment target_angle_rad = (self.target_point - self.robot.pos).angle() if the ball hasn't been kicked yet, we assume it's going to go through the receive point Assume ball is kicked at max speed and is coming from the ball point to the location of our robot. Then average this with the target angle. TODO make this faster by caching the .angle() part Make the receive point be the mouth, rather than the center of the robot. Assumes mouth of robot is at the edge. Code to provide slipback when receiving the ball pass_line_dir = (self._pass_line.get_pt(1) - self._pass_line.get_pt(0)).normalized() self._target_pos = actual_receive_point + pass_line_dir * constants.Robot.Radius vector pointing down the pass line toward the kicker Kick the ball! | 2,115 | en | 0.894094 |
from big_ol_pile_of_manim_imports import *
import os
import pyclbr
class Shapes(Scene):
#A few simple shapes
#Python 2.7 version runs in Python 3.7 without changes
def construct(self):
#circle = Circle()
#square = Square()
line=Line(UP,DOWN)
#line2=Line
#triangle=Polygon(np.array([0,0,0]),np.array([1,1,0]),np.array([1,-1,0]))
self.add(line)
#self.play(ShowCreation(circle))
#self.play(FadeOut(circle))
#self.play(GrowFromCenter(square))
#self.play(Transform(square,triangle))
class MoreShapes(Scene):
#A few more simple shapes
#2.7 version runs in 3.7 without any changes
#Note: I fixed my 'play command not found' issue by installing sox
def construct(self):
circle = Circle(color=PURPLE_A)
square = Square(fill_color=GOLD_B, fill_opacity=1, color=GOLD_A)
square.move_to(UP+LEFT)
circle.surround(square)
rectangle = Rectangle(height=2, width=3)
ellipse=Ellipse(width=3, height=1, color=RED)
ellipse.shift(2*DOWN+2*RIGHT)
pointer = CurvedArrow(2*RIGHT,5*RIGHT,color=MAROON_C)
arrow = Arrow(LEFT,UP)
arrow.next_to(circle,DOWN+LEFT)
rectangle.next_to(arrow,DOWN+LEFT)
ring=Annulus(inner_radius=.5, outer_radius=1, color=BLUE)
ring.next_to(ellipse, RIGHT)
self.play(FadeIn(square))
self.play(Rotating(square),FadeIn(circle))
self.play(GrowArrow(arrow))
self.play(GrowFromCenter(rectangle), GrowFromCenter(ellipse), GrowFromCenter(ring))
self.add(pointer)
class MovingShapes(Scene):
#Show the difference between .shift() and .move_to
def construct(self):
circle=Circle(color=TEAL_A)
circle.move_to(LEFT)
square=Circle()
square.move_to(LEFT+3*DOWN)
self.play(GrowFromCenter(circle), GrowFromCenter(square), rate=5)
self.play(ApplyMethod(circle.move_to,RIGHT), ApplyMethod(square.shift,RIGHT))
self.play(ApplyMethod(circle.move_to,RIGHT+UP), ApplyMethod(square.shift,RIGHT+UP))
self.play(ApplyMethod(circle.move_to,LEFT+UP), ApplyMethod(square.shift,LEFT+UP))
class AddingText(Scene):
#Adding text on the screen
def construct(self):
my_first_text=TextMobject("Writing with manim is fun")
second_line=TextMobject("and easy to do!")
second_line.next_to(my_first_text,DOWN)
third_line=TextMobject("for me and you!")
third_line.next_to(my_first_text,DOWN)
self.add(my_first_text, second_line)
self.wait(2)
self.play(Transform(second_line,third_line))
self.wait(2)
second_line.shift(3*DOWN)
self.play(ApplyMethod(my_first_text.shift,3*UP))
###Try uncommenting the following###
#self.play(ApplyMethod(second_line.move_to, LEFT_SIDE-2*LEFT))
#self.play(ApplyMethod(my_first_text.next_to,second_line))
class AddingMoreText(Scene):
#Playing around with text properties
def construct(self):
quote = TextMobject("Imagination is more important than knowledge")
quote.set_color(RED)
quote.to_edge(UP)
quote2 = TextMobject("A person who never made a mistake never tried anything new")
quote2.set_color(YELLOW)
author=TextMobject("-Albert Einstein")
author.scale(0.75)
author.next_to(quote.get_corner(DOWN+RIGHT),DOWN)
self.add(quote)
self.add(author)
self.wait(2)
self.play(Transform(quote,quote2),ApplyMethod(author.move_to,quote2.get_corner(DOWN+RIGHT)+DOWN+2*LEFT))
self.play(ApplyMethod(author.scale,1.5))
author.match_color(quote2)
self.play(FadeOut(quote))
class RotateAndHighlight(Scene):
#Rotation of text and highlighting with surrounding geometries
def construct(self):
square=Square(side_length=5,fill_color=YELLOW, fill_opacity=1)
label=TextMobject("Text at an angle")
label.bg=BackgroundRectangle(label,fill_opacity=1)
label_group=VGroup(label.bg,label) #Order matters
label_group.rotate(TAU/8)
label2=TextMobject("Boxed text",color=BLACK)
label2.bg=SurroundingRectangle(label2,color=BLUE,fill_color=RED, fill_opacity=.5)
label2_group=VGroup(label2,label2.bg)
label2_group.next_to(label_group,DOWN)
label3=TextMobject("Rainbow")
label3.scale(2)
label3.set_color_by_gradient(RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE)
label3.to_edge(DOWN)
self.add(square)
self.play(FadeIn(label_group))
self.play(FadeIn(label2_group))
self.play(FadeIn(label3))
class BasicEquations(Scene):
#A short script showing how to use Latex commands
def construct(self):
eq1=TextMobject("$\\vec{X}_0 \\cdot \\vec{Y}_1 = 3$")
eq1.shift(2*UP)
eq2=TexMobject(r"\vec{F}_{net} = \sum_i \vec{F}_i")
eq2.shift(2*DOWN)
self.play(Write(eq1))
self.play(Write(eq2))
class ColoringEquations(Scene):
#Grouping and coloring parts of equations
def construct(self):
line1=TexMobject(r"\text{The vector } \vec{F}_{net} \text{ is the net }",r"\text{force }",r"\text{on object of mass }")
line2=TexMobject("m", "\\text{ and acceleration }", "\\vec{a}", ". ")
sentence=VGroup(line1,line2)
sentence.arrange_submobjects(DOWN, buff=MED_LARGE_BUFF)
self.play(Write(sentence))
class UsingBraces(Scene):
#Using braces to group text together
def construct(self):
eq1A = TextMobject("4x + 3y")
eq1B = TextMobject("=")
eq1C = TextMobject("0")
eq2A = TextMobject("5x -2y")
eq2B = TextMobject("=")
eq2C = TextMobject("3")
eq1B.next_to(eq1A,RIGHT)
eq1C.next_to(eq1B,RIGHT)
eq2A.shift(DOWN)
eq2B.shift(DOWN)
eq2C.shift(DOWN)
eq2A.align_to(eq1A,LEFT)
eq2B.align_to(eq1B,LEFT)
eq2C.align_to(eq1C,LEFT)
eq_group=VGroup(eq1A,eq2A)
braces=Brace(eq_group,LEFT)
eq_text = braces.get_text("A pair of equations")
self.add(eq1A, eq1B, eq1C)
self.add(eq2A, eq2B, eq2C)
self.play(GrowFromCenter(braces),Write(eq_text))
class UsingBracesConcise(Scene):
#A more concise block of code with all columns aligned
def construct(self):
eq1_text=["4","x","+","3","y","=","0"]
eq2_text=["5","x","-","2","y","=","3"]
eq1_mob=TexMobject(*eq1_text)
eq2_mob=TexMobject(*eq2_text)
eq1_mob.set_color_by_tex_to_color_map({
"x":RED_B,
"y":GREEN_C
})
eq2_mob.set_color_by_tex_to_color_map({
"x":RED_B,
"y":GREEN_C
})
for i,item in enumerate(eq2_mob):
item.align_to(eq1_mob[i],LEFT)
eq1=VGroup(*eq1_mob)
eq2=VGroup(*eq2_mob)
eq2.shift(DOWN)
eq_group=VGroup(eq1,eq2)
braces=Brace(eq_group,LEFT)
eq_text = braces.get_text("A pair of equations")
self.play(Write(eq1),Write(eq2))
self.play(GrowFromCenter(braces),Write(eq_text))
class PlotFunctions(GraphScene):
CONFIG = {
"x_min" : -10,
"x_max" : 10.3,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : RED ,
"axes_color" : GREEN,
"x_labeled_nums" :range(-10,12,2),
}
def construct(self):
self.setup_axes(animate=True)
func_graph=self.get_graph(self.func_to_graph,self.function_color)
func_graph2=self.get_graph(self.func_to_graph2)
vert_line = self.get_vertical_line_to_graph(TAU,func_graph,color=YELLOW)
graph_lab = self.get_graph_label(func_graph, label = "\\cos(x)")
graph_lab2=self.get_graph_label(func_graph2,label = "\\sin(x)", x_val=-10, direction=UP/2)
two_pi = TexMobject("x = 2 \\pi")
label_coord = self.input_to_graph_point(TAU,func_graph)
two_pi.next_to(label_coord,RIGHT+UP)
self.play(ShowCreation(func_graph),ShowCreation(func_graph2))
self.play(ShowCreation(vert_line), ShowCreation(graph_lab), ShowCreation(graph_lab2),ShowCreation(two_pi))
def func_to_graph(self,x):
return np.cos(x)
def func_to_graph2(self,x):
return np.sin(x)
class ExampleApproximation(GraphScene):
CONFIG = {
"function" : lambda x : np.cos(x),
"function_color" : BLUE,
"taylor" : [lambda x: 1, lambda x: 1-x**2/2, lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4), lambda x: 1-x**2/2+x**4/math.factorial(4)-x**6/math.factorial(6),
lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6)+x**8/math.factorial(8), lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6)+x**8/math.factorial(8) - x**10/math.factorial(10)],
"center_point" : 0,
"approximation_color" : GREEN,
"x_min" : -10,
"x_max" : 10,
"y_min" : -1,
"y_max" : 1,
"graph_origin" : ORIGIN ,
"x_labeled_nums" :range(-10,12,2),
}
def construct(self):
self.setup_axes(animate=True)
func_graph = self.get_graph(
self.function,
self.function_color,
)
approx_graphs = [
self.get_graph(
f,
self.approximation_color
)
for f in self.taylor
]
term_num = [
TexMobject("n = " + str(n),aligned_edge=TOP)
for n in range(0,8)]
#[t.to_edge(BOTTOM,buff=SMALL_BUFF) for t in term_num]
#term = TexMobject("")
#term.to_edge(BOTTOM,buff=SMALL_BUFF)
term = VectorizedPoint(3*DOWN)
approx_graph = VectorizedPoint(
self.input_to_graph_point(self.center_point, func_graph)
)
self.play(
ShowCreation(func_graph),
)
for n,graph in enumerate(approx_graphs):
self.play(
Transform(approx_graph, graph, run_time = 2),
Transform(term,term_num[n])
)
self.wait()
class DrawAnAxis(Scene):
CONFIG = { "plane_kwargs" : {
"x_line_frequency" : 2,
"y_line_frequency" :2
}
}
def construct(self):
my_plane = NumberPlane(**self.plane_kwargs)
my_plane.add(my_plane.get_axis_labels())
self.add(my_plane)
#self.wait()
class SimpleField(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED
},
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs) #Create axes and grid
plane.add(plane.get_axis_labels()) #add x and y label
self.add(plane) #Place grid on screen
points = [x*RIGHT+y*UP
for x in np.arange(-5,5,1)
for y in np.arange(-5,5,1)
] #List of vectors pointing to each grid point
vec_field = [] #Empty list to use in for loop
for point in points:
field = 0.5*RIGHT + 0.5*UP #Constant field up and to right
result = Vector(field).shift(point) #Create vector and shift it to grid point
vec_field.append(result) #Append to list
draw_field = VGroup(*vec_field) #Pass list of vectors to create a VGroup
self.play(ShowCreation(draw_field)) #Draw VGroup on screen
class FieldWithAxes(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.calc_field(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.play(ShowCreation(field))
def calc_field(self,point):
#This calculates the field at a single point.
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
#efield = np.array((-y,x,0))/math.sqrt(x**2+y**2) #Try one of these two fields
#efield = np.array(( -2*(y%2)+1 , -2*(x%2)+1 , 0 ))/3 #Try one of these two fields
return Vector(efield).shift(point)
class ExampleThreeD(ThreeDScene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
#self.set_camera_position(0, -np.pi/2) #Old code
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field2D = VGroup(*[self.calc_field2D(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.set_camera_orientation(phi=PI/3,gamma=PI/5)
self.play(ShowCreation(field2D))
self.wait()
self.move_camera(gamma=0,run_time=1)
self.move_camera(phi=3/4*PI, theta=-PI/2)
self.begin_ambient_camera_rotation(rate=0.1)
self.wait(6)
def calc_field2D(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
class EFieldInThreeD(ThreeDScene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field2D = VGroup(*[self.calc_field2D(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
field3D = VGroup(*[self.calc_field3D(x*RIGHT+y*UP+z*OUT)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
for z in np.arange(-5,5,1)])
self.play(ShowCreation(field3D))
self.wait()
self.move_camera(0.8*np.pi/2, -0.45*np.pi)
self.begin_ambient_camera_rotation()
self.wait(6)
def calc_field2D(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
def calc_field3D(self,point):
x,y,z = point
Rx,Ry,Rz = self.point_charge_loc
r = math.sqrt((x-Rx)**2 + (y-Ry)**2+(z-Rz)**2)
efield = (point - self.point_charge_loc)/r**3
#efield = np.array((-y,x,z))/math.sqrt(x**2+y**2+z**2)
return Vector(efield).shift(point)
class MovingCharges(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.calc_field(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.field=field
source_charge = self.Positron().move_to(self.point_charge_loc)
self.play(FadeIn(source_charge))
self.play(ShowCreation(field))
self.moving_charge()
def calc_field(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
def moving_charge(self):
numb_charges=4
possible_points = [v.get_start() for v in self.field]
points = random.sample(possible_points, numb_charges)
particles = VGroup(*[
self.Positron().move_to(point)
for point in points
])
for particle in particles:
particle.velocity = np.array((0,0,0))
self.play(FadeIn(particles))
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles )
self.always_continually_update = True
self.wait(10)
def field_at_point(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return efield
def continual_update(self, *args, **kwargs):
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for p in self.moving_particles:
accel = self.field_at_point(p.get_center())
p.velocity = p.velocity + accel*dt
p.shift(p.velocity*dt)
class Positron(Circle):
CONFIG = {
"radius" : 0.2,
"stroke_width" : 3,
"color" : RED,
"fill_color" : RED,
"fill_opacity" : 0.5,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(self)
self.add(plus)
class FieldOfMovingCharge(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_start_loc" : 5.5*LEFT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
plane.main_lines.fade(.9)
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.create_vect_field(self.point_charge_start_loc,x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.field=field
self.source_charge = self.Positron().move_to(self.point_charge_start_loc)
self.source_charge.velocity = np.array((1,0,0))
self.play(FadeIn(self.source_charge))
self.play(ShowCreation(field))
self.moving_charge()
def create_vect_field(self,source_charge,observation_point):
return Vector(self.calc_field(source_charge,observation_point)).shift(observation_point)
def calc_field(self,source_point,observation_point):
x,y,z = observation_point
Rx,Ry,Rz = source_point
r = math.sqrt((x-Rx)**2 + (y-Ry)**2 + (z-Rz)**2)
if r<0.0000001: #Prevent divide by zero
efield = np.array((0,0,0))
else:
efield = (observation_point - source_point)/r**3
return efield
def moving_charge(self):
numb_charges=3
possible_points = [v.get_start() for v in self.field]
points = random.sample(possible_points, numb_charges)
particles = VGroup(self.source_charge, *[
self.Positron().move_to(point)
for point in points
])
for particle in particles[1:]:
particle.velocity = np.array((0,0,0))
self.play(FadeIn(particles[1:]))
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles )
self.always_continually_update = True
self.wait(10)
def continual_update(self, *args, **kwargs):
Scene.continual_update(self, *args, **kwargs)
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for v in self.field:
field_vect=np.zeros(3)
for p in self.moving_particles:
field_vect = field_vect + self.calc_field(p.get_center(), v.get_start())
v.put_start_and_end_on(v.get_start(), field_vect+v.get_start())
for p in self.moving_particles:
accel = np.zeros(3)
p.velocity = p.velocity + accel*dt
p.shift(p.velocity*dt)
class Positron(Circle):
CONFIG = {
"radius" : 0.2,
"stroke_width" : 3,
"color" : RED,
"fill_color" : RED,
"fill_opacity" : 0.5,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(self)
self.add(plus)
HEAD_INDEX = 0
BODY_INDEX = 1
ARMS_INDEX = 2
LEGS_INDEX = 3
class StickMan(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix": "stick_man",
"stroke_width" : 2,
"stroke_color" : WHITE,
"fill_opacity" : 1.0,
"height" : 3,
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
SVG_IMAGE_DIR,
"%s_%s.svg" % (self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
SVG_IMAGE_DIR,
"stick_man_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.head = self.submobjects[HEAD_INDEX]
self.body = self.submobjects[BODY_INDEX]
self.arms = self.submobjects[ARMS_INDEX]
self.legs = self.submobjects[LEGS_INDEX]
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
if not self.parts_named:
self.name_parts()
self.head.set_fill(self.color, opacity = 1)
self.body.set_fill(RED, opacity = 1)
self.arms.set_fill(YELLOW, opacity = 1)
self.legs.set_fill(BLUE, opacity = 1)
return self
class Waving(Scene):
def construct(self):
start_man = StickMan()
plain_man = StickMan()
waving_man = StickMan("wave")
self.add(start_man)
self.wait()
self.play(Transform(start_man,waving_man))
self.play(Transform(start_man,plain_man))
self.wait()
class CirclesAndSquares(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix": "circles_and_squares",
"stroke_width" : 2,
"stroke_color" : WHITE,
"fill_opacity" : 1.0,
"height" : 3,
"start_corner" : None,
"circle_index" : 0,
"line1_index" :1,
"line2_index" : 2,
"square1_index" : 3,
"square2_index" : 4,
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
SVG_IMAGE_DIR,
"%s_%s.svg" % (self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
SVG_IMAGE_DIR,
"circles_and_squares_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.circle = self.submobjects[self.circle_index]
self.line1 = self.submobjects[self.line1_index]
self.line2 = self.submobjects[self.line2_index]
self.square1 = self.submobjects[self.square1_index]
self.square2 = self.submobjects[self.square2_index]
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
self.name_parts()
self.circle.set_fill(RED, opacity = 1)
self.line1.set_fill(self.color, opacity = 0)
self.line2.set_fill(self.color, opacity = 0)
self.square1.set_fill(GREEN, opacity = 1)
self.square2.set_fill(BLUE, opacity = 1)
return self
class SVGCircleAndSquare(Scene):
def construct(self):
thingy = CirclesAndSquares()
self.add(thingy)
self.wait()
if __name__ == "__main__":
# Call this file at command line to make sure all scenes work with version of manim
# type "python manim_tutorial_P37.py" at command line to run all scenes in this file
#Must have "import os" and "import pyclbr" at start of file to use this
###Using Python class browser to determine which classes are defined in this file
module_name = 'manim_tutorial_P37' #Name of current file
module_info = pyclbr.readmodule(module_name)
for item in module_info.values():
if item.module==module_name:
print(item.name)
os.system("python -m manim manim_tutorial_P37.py %s -l" % item.name) #Does not play files | examples/tutorial.py | 24,971 | A few simple shapesPython 2.7 version runs in Python 3.7 without changescircle = Circle()square = Square()line2=Linetriangle=Polygon(np.array([0,0,0]),np.array([1,1,0]),np.array([1,-1,0]))self.play(ShowCreation(circle))self.play(FadeOut(circle))self.play(GrowFromCenter(square))self.play(Transform(square,triangle))A few more simple shapes2.7 version runs in 3.7 without any changesNote: I fixed my 'play command not found' issue by installing soxShow the difference between .shift() and .move_toAdding text on the screenTry uncommenting the followingself.play(ApplyMethod(second_line.move_to, LEFT_SIDE-2*LEFT))self.play(ApplyMethod(my_first_text.next_to,second_line))Playing around with text propertiesRotation of text and highlighting with surrounding geometriesOrder mattersA short script showing how to use Latex commandsGrouping and coloring parts of equationsUsing braces to group text togetherA more concise block of code with all columns aligned[t.to_edge(BOTTOM,buff=SMALL_BUFF) for t in term_num]term = TexMobject("")term.to_edge(BOTTOM,buff=SMALL_BUFF)self.wait()Create axes and gridadd x and y labelPlace grid on screenList of vectors pointing to each grid pointEmpty list to use in for loopConstant field up and to rightCreate vector and shift it to grid pointAppend to listPass list of vectors to create a VGroupDraw VGroup on screenThis calculates the field at a single point.efield = np.array((-y,x,0))/math.sqrt(x**2+y**2) Try one of these two fieldsefield = np.array(( -2*(y%2)+1 , -2*(x%2)+1 , 0 ))/3 Try one of these two fieldsself.set_camera_position(0, -np.pi/2) Old codeefield = np.array((-y,x,z))/math.sqrt(x**2+y**2+z**2)Prevent divide by zero Call this file at command line to make sure all scenes work with version of manim type "python manim_tutorial_P37.py" at command line to run all scenes in this fileMust have "import os" and "import pyclbr" at start of file to use thisUsing Python class browser to determine which classes are defined in this fileName of current fileDoes not play files | 2,025 | en | 0.760929 |
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'elli_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| contrib/devtools/update-translations.py | 8,149 | Find all format specifiers in a string.
Remove invalid characters from translation string
Sanitize string for printing
Split format specifiers between numeric (Qt) and others (strprintf)
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
!/usr/bin/env python Copyright (c) 2014 Wladimir J. van der Laan Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Name of transifex tool Name of source language file Directory with locale files Minimum number of messages for translation to be considered at all If both numeric format specifiers and "others" are used, assume we're dealing with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.htmlarg) only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing any kind of escaping that would be necessary for strprintf. Without this, this function would wrongly detect '%)' as a printf format specifier. numeric (Qt) can be present in any order, others (strprintf) must be in specified order assert that no source messages contain both Qt and strprintf format specifiers if this fails, go change the source as this is hacky and confusing! Allow numerus translations to omit %n specifier (usually when it only has one possible value) process only language files, and do not process source language remove provided suffix Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for comparison, disable by default) pre-fixups to cope with transifex output need to override encoding because 'utf8' is not understood only 'utf-8' remove control characters; this must be done over the entire file otherwise the XML parser will fail iterate over all messages in file pick all numerusforms set type to unfinished and clear string if invalid Remove location tags Remove entire message if it is an unfinished translation check if document is (virtually) empty, and remove it if so write fixed-up tree if diff reduction requested, replace some XML to 'sanitize' to qt formatting | 2,468 | en | 0.750959 |
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wipeout service."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import logging
from constants import constants
from core.domain import auth_services
from core.domain import collection_services
from core.domain import email_manager
from core.domain import exp_services
from core.domain import question_domain
from core.domain import question_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_domain
from core.domain import user_services
from core.domain import wipeout_domain
from core.domain import wipeout_service
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
(
auth_models, base_models, collection_models,
config_models, email_models, exp_models,
feedback_models, improvements_models, question_models,
skill_models, story_models, subtopic_models,
suggestion_models, topic_models, user_models
) = models.Registry.import_models([
models.NAMES.auth, models.NAMES.base_model, models.NAMES.collection,
models.NAMES.config, models.NAMES.email, models.NAMES.exploration,
models.NAMES.feedback, models.NAMES.improvements, models.NAMES.question,
models.NAMES.skill, models.NAMES.story, models.NAMES.subtopic,
models.NAMES.suggestion, models.NAMES.topic, models.NAMES.user
])
datastore_services = models.Registry.import_datastore_services()
class WipeoutServiceHelpersTests(test_utils.GenericTestBase):
"""Provides testing of the pre-deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceHelpersTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_1_role = user_services.get_user_settings(self.user_1_id).role
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_2_role = user_services.get_user_settings(self.user_2_id).role
def test_gets_pending_deletion_request(self):
wipeout_service.save_pending_deletion_requests(
[
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role)
]
)
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(pending_deletion_request.user_id, self.user_1_id)
self.assertEqual(pending_deletion_request.email, self.USER_1_EMAIL)
self.assertEqual(pending_deletion_request.deletion_complete, False)
self.assertEqual(
pending_deletion_request.pseudonymizable_entity_mappings, {})
def test_get_number_of_pending_deletion_requests_returns_correct_number(
self):
number_of_pending_deletion_requests = (
wipeout_service.get_number_of_pending_deletion_requests())
self.assertEqual(number_of_pending_deletion_requests, 0)
wipeout_service.save_pending_deletion_requests(
[
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role),
wipeout_domain.PendingDeletionRequest.create_default(
self.user_2_id, self.USER_2_EMAIL, self.user_2_role)
]
)
number_of_pending_deletion_requests = (
wipeout_service.get_number_of_pending_deletion_requests())
self.assertEqual(number_of_pending_deletion_requests, 2)
def test_saves_pending_deletion_request_when_new(self):
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role))
wipeout_service.save_pending_deletion_requests(
[pending_deletion_request])
pending_deletion_request_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(pending_deletion_request_model.id, self.user_1_id)
self.assertEqual(
pending_deletion_request_model.email, self.USER_1_EMAIL)
self.assertEqual(
pending_deletion_request_model.deletion_complete, False)
self.assertEqual(
pending_deletion_request_model.pseudonymizable_entity_mappings, {})
def test_saves_pending_deletion_request_when_already_existing(self):
pending_deletion_request_model_old = (
user_models.PendingDeletionRequestModel(
id=self.user_1_id,
email=self.USER_1_EMAIL,
role=self.user_1_role,
deletion_complete=False,
pseudonymizable_entity_mappings={}
)
)
pending_deletion_request_model_old.put()
pending_deletion_request = (
wipeout_domain.PendingDeletionRequest.create_default(
self.user_1_id, self.USER_1_EMAIL, self.user_1_role)
)
pending_deletion_request.deletion_complete = True
pending_deletion_request.pseudonymizable_entity_mappings = {
'story': {'story_id': 'user_id'}
}
wipeout_service.save_pending_deletion_requests(
[pending_deletion_request])
pending_deletion_request_model_new = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertEqual(pending_deletion_request_model_new.id, self.user_1_id)
self.assertEqual(
pending_deletion_request_model_new.email, self.USER_1_EMAIL)
self.assertEqual(
pending_deletion_request_model_new.deletion_complete, True)
self.assertEqual(
pending_deletion_request_model_new.pseudonymizable_entity_mappings,
{'story': {'story_id': 'user_id'}})
self.assertEqual(
pending_deletion_request_model_old.created_on,
pending_deletion_request_model_new.created_on)
class WipeoutServicePreDeleteTests(test_utils.GenericTestBase):
"""Provides testing of the pre-deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
USER_3_EMAIL = 'other@email.com'
USER_3_USERNAME = 'username3'
def setUp(self):
super(WipeoutServicePreDeleteTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.set_user_role(self.USER_1_USERNAME, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_user_data.display_alias = 'name'
self.modifiable_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
def tearDown(self):
pending_deletion_request_models = (
user_models.PendingDeletionRequestModel.get_all())
for pending_deletion_request_model in pending_deletion_request_models:
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(
pending_deletion_request_model.id))
self.assertEqual(
wipeout_service.run_user_deletion(pending_deletion_request),
wipeout_domain.USER_DELETION_SUCCESS)
self.assertEqual(
wipeout_service.run_user_deletion_completion(
pending_deletion_request),
wipeout_domain.USER_VERIFICATION_SUCCESS)
def test_pre_delete_user_email_subscriptions(self):
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertEqual(
email_preferences.can_receive_email_updates,
feconf.DEFAULT_EMAIL_UPDATES_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
email_preferences = user_services.get_email_preferences(self.user_1_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
def test_pre_delete_profile_users_works_correctly(self):
user_settings = user_services.get_user_settings(self.profile_user_id)
self.assertFalse(user_settings.deleted)
self.assertFalse(user_settings.deleted)
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(
self.profile_user_id)
self.assertTrue(user_settings.deleted)
user_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(user_auth_details.deleted)
def test_pre_delete_user_for_full_user_also_deletes_all_profiles(self):
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertFalse(user_settings.deleted)
profile_user_settings = user_services.get_user_settings(
self.profile_user_id)
self.assertFalse(profile_user_settings.deleted)
profile_auth_details = user_services.get_user_settings(
self.profile_user_id)
self.assertFalse(profile_auth_details.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(self.user_1_id)
self.assertTrue(user_settings.deleted)
user_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(user_auth_details.deleted)
profile_user_settings = user_models.UserSettingsModel.get_by_id(
self.profile_user_id)
self.assertTrue(profile_user_settings.deleted)
profile_auth_details = (
auth_models.UserAuthDetailsModel.get_by_id(self.profile_user_id))
self.assertTrue(profile_auth_details.deleted)
def test_pre_delete_user_without_activities_works_correctly(self):
user_models.UserSubscriptionsModel(
id=self.user_1_id, exploration_ids=[], collection_ids=[]
).put()
user_settings = user_services.get_user_settings(self.user_1_id)
self.assertFalse(user_settings.deleted)
user_auth_details = auth_models.UserAuthDetailsModel.get(self.user_1_id)
self.assertFalse(user_auth_details.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
user_settings = user_models.UserSettingsModel.get_by_id(self.user_1_id)
self.assertTrue(user_settings.deleted)
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertIsNotNone(pending_deletion_model)
def test_pre_delete_username_is_not_saved_for_user_younger_than_week(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
pending_deletion_request.normalized_long_term_username)
def test_pre_delete_username_is_saved_for_user_older_than_week(self):
date_10_days_ago = (
datetime.datetime.utcnow() - datetime.timedelta(days=10))
with self.mock_datetime_utcnow(date_10_days_ago):
self.signup(self.USER_3_EMAIL, self.USER_3_USERNAME)
user_3_id = self.get_user_id_from_email(self.USER_3_EMAIL)
wipeout_service.pre_delete_user(user_3_id)
self.process_and_flush_pending_tasks()
pending_deletion_request = (
wipeout_service.get_pending_deletion_request(user_3_id))
self.assertEqual(
pending_deletion_request.normalized_long_term_username,
self.USER_3_USERNAME)
def test_pre_delete_user_with_activities_multiple_owners(self):
user_services.update_user_role(
self.user_1_id, feconf.ROLE_ID_COLLECTION_EDITOR)
self.save_new_valid_exploration('exp_id', self.user_1_id)
rights_manager.assign_role_for_exploration(
self.user_1_actions,
'exp_id',
self.user_2_id,
rights_domain.ROLE_OWNER)
self.save_new_valid_collection(
'col_id', self.user_1_id, exploration_id='exp_id')
rights_manager.assign_role_for_collection(
self.user_1_actions,
'col_id',
self.user_2_id,
rights_domain.ROLE_OWNER)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
pending_deletion_model = (
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
self.assertIsNotNone(pending_deletion_model)
def test_pre_delete_user_collection_is_marked_deleted(self):
self.save_new_valid_collection('col_id', self.user_1_id)
collection_model = collection_models.CollectionModel.get_by_id('col_id')
self.assertFalse(collection_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(collection_models.CollectionModel.get_by_id('col_id'))
def test_pre_delete_user_exploration_is_marked_deleted(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
exp_model = exp_models.ExplorationModel.get_by_id('exp_id')
self.assertFalse(exp_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(exp_models.ExplorationModel.get_by_id('exp_id'))
def test_pre_delete_user_collection_ownership_is_released(self):
self.save_new_valid_collection('col_id', self.user_1_id)
self.publish_collection(self.user_1_id, 'col_id')
rights_manager.assign_role_for_collection(
user_services.get_system_user(),
'col_id',
self.user_2_id,
feconf.ROLE_EDITOR)
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertFalse(collection_summary_model.community_owned)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertTrue(collection_summary_model.community_owned)
def test_pre_delete_user_exploration_ownership_is_released(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
self.publish_exploration(self.user_1_id, 'exp_id')
rights_manager.assign_role_for_exploration(
user_services.get_system_user(),
'exp_id',
self.user_2_id,
feconf.ROLE_EDITOR)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertFalse(exp_summary_model.community_owned)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertTrue(exp_summary_model.community_owned)
def test_pre_delete_user_collection_user_is_deassigned(self):
self.save_new_valid_collection('col_id', self.user_1_id)
rights_manager.assign_role_for_collection(
user_services.get_system_user(),
'col_id',
self.user_2_id,
feconf.ROLE_EDITOR)
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertEqual(collection_summary_model.editor_ids, [self.user_2_id])
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
collection_summary_model = (
collection_models.CollectionSummaryModel.get_by_id('col_id'))
self.assertEqual(collection_summary_model.editor_ids, [])
def test_pre_delete_user_exploration_user_is_deassigned(self):
self.save_new_valid_exploration('exp_id', self.user_1_id)
rights_manager.assign_role_for_exploration(
user_services.get_system_user(),
'exp_id',
self.user_2_id,
feconf.ROLE_EDITOR)
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertEqual(exp_summary_model.editor_ids, [self.user_2_id])
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
exp_summary_model = exp_models.ExpSummaryModel.get_by_id('exp_id')
self.assertEqual(exp_summary_model.editor_ids, [])
def test_pre_delete_user_user_is_deassigned_from_topics(self):
self.save_new_topic('top_id', self.user_1_id)
topic_services.assign_role(
user_services.get_system_user(),
self.user_1_actions,
feconf.ROLE_MANAGER,
'top_id')
top_rights_model = topic_models.TopicRightsModel.get_by_id('top_id')
self.assertEqual(top_rights_model.manager_ids, [self.user_1_id])
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
top_rights_model = topic_models.TopicRightsModel.get_by_id('top_id')
self.assertEqual(top_rights_model.manager_ids, [])
class WipeoutServiceRunFunctionsTests(test_utils.GenericTestBase):
"""Provides testing of the pre-deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceRunFunctionsTests, self).setUp()
date_10_days_ago = (
datetime.datetime.utcnow() - datetime.timedelta(days=10))
with self.mock_datetime_utcnow(date_10_days_ago):
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.set_user_role(self.USER_1_USERNAME, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.pending_deletion_request = (
wipeout_service.get_pending_deletion_request(self.user_1_id))
def test_run_user_deletion_with_user_not_deleted(self):
self.assertEqual(
wipeout_service.run_user_deletion(self.pending_deletion_request),
wipeout_domain.USER_DELETION_SUCCESS
)
def test_run_user_deletion_with_user_already_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
self.assertEqual(
wipeout_service.run_user_deletion(self.pending_deletion_request),
wipeout_domain.USER_DELETION_ALREADY_DONE
)
def test_run_user_deletion_completion_with_user_not_yet_deleted(self):
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_NOT_DELETED)
self.assertIsNotNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
def test_run_user_deletion_completion_with_user_properly_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_SUCCESS
)
self.assertIsNotNone(
user_models.DeletedUserModel.get_by_id(self.user_1_id))
self.assertTrue(user_services.is_username_taken(self.USER_1_USERNAME))
self.assertIsNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
# Pre-deleted auth associations will return None.
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
self.assertTrue(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
def test_run_user_deletion_completion_with_user_wrongly_deleted(self):
wipeout_service.run_user_deletion(self.pending_deletion_request)
user_models.CompletedActivitiesModel(
id=self.user_1_id, exploration_ids=[], collection_ids=[]
).put()
email_content = (
'The Wipeout process failed for the user with ID \'%s\' '
'and email \'%s\'.' % (self.user_1_id, self.USER_1_EMAIL)
)
send_email_swap = self.swap_with_checks(
email_manager,
'send_mail_to_admin',
lambda x, y: None,
expected_args=[('WIPEOUT: Account deletion failed', email_content)]
)
with send_email_swap:
self.assertEqual(
wipeout_service.run_user_deletion_completion(
self.pending_deletion_request),
wipeout_domain.USER_VERIFICATION_FAILURE)
self.assertIsNotNone(
user_models.UserSettingsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
auth_models.UserAuthDetailsModel.get_by_id(self.user_1_id))
self.assertIsNotNone(
user_models.PendingDeletionRequestModel.get_by_id(self.user_1_id))
class WipeoutServiceDeleteConfigModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
CONFIG_1_ID = 'config_1_id'
CONFIG_2_ID = 'config_2_id'
def setUp(self):
super(WipeoutServiceDeleteConfigModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
config_models.ConfigPropertyModel(
id=self.CONFIG_1_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_config_property_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model.committer_id, config_mappings[self.CONFIG_1_ID])
def test_one_config_property_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
self.assertEqual(
metadata_model.committer_id, config_mappings[self.CONFIG_1_ID])
def test_multiple_config_properties_are_pseudonymized(self):
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='b'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
config_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings[self.CONFIG_1_ID])
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_2.committer_id, config_mappings[self.CONFIG_2_ID])
def test_multiple_config_properties_with_multiple_users_are_pseudonymized(
self):
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='b'
).commit(self.user_2_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
config_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings_1[self.CONFIG_1_ID])
# Verify second user is not yet deleted.
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_2.committer_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
config_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_3 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_2_ID)
)
self.assertEqual(
metadata_model_3.committer_id, config_mappings_2[self.CONFIG_2_ID])
def test_one_config_property_with_multiple_users_is_pseudonymized(self):
config_models.ConfigPropertyModel.get_by_id(
self.CONFIG_1_ID
).commit(self.user_2_id, [{'cmd': 'command'}])
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
config_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_1 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-1' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_1.committer_id, config_mappings_1[self.CONFIG_1_ID])
# Verify second user is not yet deleted.
metadata_model_2 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-2' % self.CONFIG_1_ID)
)
self.assertEqual(metadata_model_2.committer_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
config_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.config]
)
metadata_model_3 = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'%s-2' % self.CONFIG_1_ID)
)
self.assertEqual(
metadata_model_3.committer_id, config_mappings_2[self.CONFIG_1_ID])
class WipeoutServiceVerifyDeleteConfigModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
CONFIG_1_ID = 'config_1_id'
CONFIG_2_ID = 'config_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteConfigModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
config_model = config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
)
config_model.commit(self.user_1_id, [{'cmd': 'command'}])
config_model.commit(self.user_1_id, [{'cmd': 'command_2'}])
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
config_models.ConfigPropertyModel(
id=self.CONFIG_2_ID, value='a'
).commit(self.user_1_id, [{'cmd': 'command'}])
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteCollectionModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
COL_1_ID = 'col_1_id'
COL_2_ID = 'col_2_id'
def setUp(self):
super(WipeoutServiceDeleteCollectionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_valid_collection(self.COL_1_ID, self.user_1_id)
self.publish_collection(self.user_1_id, self.COL_1_ID)
rights_manager.assign_role_for_collection(
user_services.UserActionsInfo(self.user_1_id),
self.COL_1_ID,
self.user_2_id,
feconf.ROLE_OWNER)
def test_one_collection_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
rights_metadata_model_1 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id,
collection_mappings[self.COL_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids,
[collection_mappings[self.COL_1_ID]])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.COL_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id,
collection_mappings[self.COL_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[collection_mappings[self.COL_1_ID]])
self.assertEqual(rights_metadata_model_2.commit_cmds_user_ids, [])
def test_one_collection_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
rights_content_model_1 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'%s-1' % self.COL_1_ID)
)
self.assertEqual(
rights_content_model_1.content['owner_ids'],
[collection_mappings[self.COL_1_ID]])
rights_content_model_2 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'%s-3' % self.COL_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['owner_ids'],
[
collection_mappings[self.COL_1_ID],
self.user_2_id
])
def test_one_collection_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
commit_log_model_1 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id,
collection_mappings[self.COL_1_ID])
commit_log_model_2 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'rights-%s-3' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model_2.user_id,
collection_mappings[self.COL_1_ID])
def test_one_collection_with_missing_snapshot_is_pseudonymized(self):
collection_models.CollectionCommitLogEntryModel(
id='collection-%s-1' % self.COL_2_ID,
collection_id=self.COL_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model '
'\'CollectionCommitLogEntryModel\' and '
'snapshot models [\'CollectionSnapshotMetadataModel\', '
'\'CollectionRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.COL_2_ID,
'[WIPEOUT] The commit log model '
'\'ExplorationCommitLogEntryModel\' and '
'snapshot models [\'ExplorationSnapshotMetadataModel\', '
'\'ExplorationRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'an_exploration_id\'].'
]
)
# Verify user is deleted.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model_1 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id,
collection_mappings[self.COL_1_ID])
commit_log_model_2 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id,
collection_mappings[self.COL_2_ID])
def test_one_collection_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_1_ID])
def test_collection_user_is_removed_from_contributors(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {self.user_1_id: 2}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_col_user_is_removed_from_contributor_ids_when_missing_from_summary(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = (
collection_models.CollectionSummaryModel.get_by_id(self.COL_1_ID))
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted(
self):
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
collection_services.delete_collection(self.user_1_id, self.COL_2_ID)
collection_rights_model = (
collection_models.CollectionRightsModel.get_by_id(self.COL_2_ID))
self.assertTrue(collection_rights_model.deleted)
collection_model = (
collection_models.CollectionModel.get_by_id(self.COL_2_ID))
self.assertTrue(collection_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
collection_models.CollectionRightsModel.get_by_id(self.COL_2_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COL_2_ID))
def test_multiple_collections_are_pseudonymized(self):
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
self.publish_collection(self.user_1_id, self.COL_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
collection_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.collection]
)
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_1_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_1_ID
)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_1_ID])
metadata_model = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'%s-1' % self.COL_2_ID
)
)
self.assertEqual(
metadata_model.committer_id,
collection_mappings[self.COL_2_ID])
commit_log_model = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-%s-1' % self.COL_2_ID
)
)
self.assertEqual(
commit_log_model.user_id,
collection_mappings[self.COL_2_ID])
class WipeoutServiceVerifyDeleteCollectionModelsTests(
test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
COL_1_ID = 'col_1_id'
COL_2_ID = 'col_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteCollectionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_valid_collection(self.COL_1_ID, self.user_1_id)
self.save_new_valid_collection(self.COL_2_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
collection_models.CollectionSnapshotMetadataModel(
id='%s-1' % self.COL_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteExplorationModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteExplorationModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_valid_exploration(self.EXP_1_ID, self.user_1_id)
self.publish_exploration(self.user_1_id, self.EXP_1_ID)
rights_manager.assign_role_for_exploration(
user_services.UserActionsInfo(self.user_1_id),
self.EXP_1_ID,
self.user_2_id,
feconf.ROLE_OWNER)
def test_one_exploration_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
metadata_model.committer_id,
exploration_mappings[self.EXP_1_ID])
rights_metadata_model_1 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id,
exploration_mappings[self.EXP_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids,
[exploration_mappings[self.EXP_1_ID]])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.EXP_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id,
exploration_mappings[self.EXP_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[exploration_mappings[self.EXP_1_ID]])
self.assertEqual(rights_metadata_model_2.commit_cmds_user_ids, [])
def test_one_exploration_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
rights_content_model_1 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'%s-1' % self.EXP_1_ID)
)
self.assertEqual(
rights_content_model_1.content['owner_ids'],
[exploration_mappings[self.EXP_1_ID]])
rights_content_model_2 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'%s-3' % self.EXP_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['owner_ids'],
[
exploration_mappings[self.EXP_1_ID],
self.user_2_id
])
def test_one_exploration_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
commit_log_model_1 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_2 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'rights-%s-3' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model_2.user_id, exploration_mappings[self.EXP_1_ID])
def test_one_exploration_with_missing_snapshot_is_pseudonymized(self):
exp_models.ExplorationCommitLogEntryModel(
id='exploration-%s-1' % self.EXP_2_ID,
exploration_id=self.EXP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model '
'\'ExplorationCommitLogEntryModel\' and '
'snapshot models [\'ExplorationSnapshotMetadataModel\', '
'\'ExplorationRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.EXP_2_ID
]
)
# Verify user is deleted.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_1 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, exploration_mappings[self.EXP_1_ID])
commit_log_model_2 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, exploration_mappings[self.EXP_2_ID])
def test_one_exploration_when_the_deletion_is_repeated_is_pseudonymized(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_1_ID])
def test_exploration_user_is_removed_from_contributors(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {self.user_1_id: 2}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_exp_user_is_removed_from_contributor_ids_when_missing_from_summary(
self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
old_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, old_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, old_summary_model.contributors_summary)
old_summary_model.contributor_ids = [self.user_1_id]
old_summary_model.contributors_summary = {}
old_summary_model.put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
new_summary_model = exp_models.ExpSummaryModel.get_by_id(self.EXP_1_ID)
self.assertNotIn(self.user_1_id, new_summary_model.contributor_ids)
self.assertNotIn(self.user_1_id, new_summary_model.contributors_summary)
def test_delete_exp_where_user_has_role_when_rights_model_marked_as_deleted(
self):
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
exp_services.delete_exploration(self.user_1_id, self.EXP_2_ID)
exp_rights_model = (
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertTrue(exp_rights_model.deleted)
exp_model = (
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertTrue(exp_model.deleted)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
exp_models.ExplorationRightsModel.get_by_id(self.EXP_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXP_2_ID))
def test_multiple_explorations_are_pseudonymized(self):
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
self.publish_exploration(self.user_1_id, self.EXP_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
exploration_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.exploration]
)
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_1_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_1_ID])
metadata_model = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, exploration_mappings[self.EXP_2_ID])
commit_log_model = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-%s-1' % self.EXP_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, exploration_mappings[self.EXP_2_ID])
class WipeoutServiceVerifyDeleteExplorationModelsTests(
test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteExplorationModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_valid_exploration(self.EXP_1_ID, self.user_1_id)
self.save_new_valid_exploration(self.EXP_2_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
exp_models.ExplorationSnapshotMetadataModel(
id='%s-1' % self.EXP_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteEmailModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
THREAD_1_ID = 'thread_1_id'
THREAD_2_ID = 'thread_2_id'
REPLY_1_ID = 'reply_1_id'
REPLY_2_ID = 'reply_2_id'
def setUp(self):
super(WipeoutServiceDeleteEmailModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_email_is_deleted(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
def test_multiple_emails_are_deleted(self):
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_2_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_2_ID,
reply_to_id=self.REPLY_2_ID
).put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_2_ID)))
def test_multiple_emails_from_multiple_users_are_deleted(self):
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_2_id, self.THREAD_2_ID),
user_id=self.user_2_id,
thread_id=self.THREAD_2_ID,
reply_to_id=self.REPLY_2_ID
).put()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_1_id, self.THREAD_1_ID)))
self.assertIsNotNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_2_id, self.THREAD_2_ID)))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertIsNone(
email_models.GeneralFeedbackEmailReplyToIdModel.get_by_id(
'%s.%s' % (self.user_2_id, self.THREAD_2_ID)))
class WipeoutServiceVerifyDeleteEmailModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
THREAD_1_ID = 'thread_1_id'
THREAD_2_ID = 'thread_2_id'
REPLY_1_ID = 'reply_1_id'
REPLY_2_ID = 'reply_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteEmailModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
email_models.GeneralFeedbackEmailReplyToIdModel(
id='%s.%s' % (self.user_1_id, self.THREAD_1_ID),
user_id=self.user_1_id,
thread_id=self.THREAD_1_ID,
reply_to_id=self.REPLY_1_ID
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteFeedbackModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
FEEDBACK_1_ID = 'feedback_1_id'
FEEDBACK_2_ID = 'feedback_2_id'
MESSAGE_1_ID = 'message_1_id'
MESSAGE_2_ID = 'message_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
NUMBER_OF_MODELS = 150
def setUp(self):
super(WipeoutServiceDeleteFeedbackModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_2_id
).put_for_human()
feedback_models.GeneralFeedbackMessageModel(
id=self.MESSAGE_1_ID,
thread_id=self.FEEDBACK_1_ID,
message_id=0,
author_id=self.user_2_id,
text='Some text'
).put_for_human()
suggestion_models.GeneralSuggestionModel(
id=self.FEEDBACK_1_ID,
suggestion_type=(
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
target_version_at_submission=1,
status=suggestion_models.STATUS_IN_REVIEW,
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
change_cmd={},
score_category=suggestion_models.SCORE_TYPE_CONTENT
).put_for_human()
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_feedback_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is pseudonymized.
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
suggestion_model_model = (
suggestion_models.GeneralSuggestionModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
suggestion_model_model.author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
def test_one_feedback_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return feedback thread model to the original user ID.
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
feedback_thread_model.original_author_id = self.user_1_id
feedback_thread_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the feedback thread and the suggestion have the same
# pseudonymous user ID.
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
new_feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
new_feedback_thread_model.original_author_id,
feedback_mappings[self.FEEDBACK_1_ID]
)
def test_multiple_feedbacks_are_pseudonymized(self):
feedback_thread_models = []
for i in python_utils.RANGE(self.NUMBER_OF_MODELS):
feedback_thread_models.append(
feedback_models.GeneralFeedbackThreadModel(
id='feedback-%s' % i,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Too short exploration',
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_2_id
)
)
feedback_message_models = []
for i in python_utils.RANGE(self.NUMBER_OF_MODELS):
feedback_message_models.append(
feedback_models.GeneralFeedbackMessageModel(
id='message-%s' % i,
thread_id='feedback-%s' % i,
message_id=i,
author_id=self.user_1_id,
text='Some text'
)
)
base_models.BaseHumanMaintainedModel.put_multi_for_human(
feedback_thread_models + feedback_message_models)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
pseudonymized_feedback_thread_models = (
feedback_models.GeneralFeedbackThreadModel.get_multi(
[model.id for model in feedback_thread_models]
)
)
for feedback_thread_model in pseudonymized_feedback_thread_models:
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings[feedback_thread_model.id]
)
pseudonymized_feedback_message_models = (
feedback_models.GeneralFeedbackMessageModel.get_multi(
[model.id for model in feedback_message_models]
)
)
for feedback_message_model in pseudonymized_feedback_message_models:
self.assertEqual(
feedback_message_model.author_id,
feedback_mappings[feedback_message_model.thread_id]
)
def test_one_feedback_with_multiple_users_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
feedback_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
# Verify first user is pseudonymized.
feedback_thread_model = (
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.FEEDBACK_1_ID)
)
self.assertEqual(
feedback_thread_model.original_author_id,
feedback_mappings_1[self.FEEDBACK_1_ID]
)
# Verify second user is not yet pseudonymized.
self.assertEqual(
feedback_thread_model.last_nonempty_message_author_id,
self.user_2_id
)
# Delete second user.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
feedback_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.feedback]
)
# Verify second user is pseudonymized.
self.assertEqual(
feedback_thread_model.last_nonempty_message_author_id,
feedback_mappings_2[self.FEEDBACK_1_ID]
)
class WipeoutServiceVerifyDeleteFeedbackModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
FEEDBACK_1_ID = 'feedback_1_id'
MESSAGE_1_ID = 'message_1_id'
EXP_1_ID = 'exp_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteFeedbackModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_1_id
).put_for_human()
feedback_models.GeneralFeedbackMessageModel(
id=self.MESSAGE_1_ID,
thread_id=self.FEEDBACK_1_ID,
message_id=0,
author_id=self.user_1_id,
text='Some text'
).put_for_human()
suggestion_models.GeneralSuggestionModel(
id=self.FEEDBACK_1_ID,
suggestion_type=(
feconf.SUGGESTION_TYPE_EDIT_STATE_CONTENT),
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
target_version_at_submission=1,
status=suggestion_models.STATUS_IN_REVIEW,
author_id=self.user_1_id,
final_reviewer_id=self.user_1_id,
change_cmd={},
score_category=suggestion_models.SCORE_TYPE_CONTENT
).put_for_human()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
feedback_models.GeneralFeedbackThreadModel(
id=self.FEEDBACK_1_ID,
entity_type=feconf.ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
original_author_id=self.user_1_id,
subject='Wrong state name',
has_suggestion=True,
last_nonempty_message_text='Some text',
last_nonempty_message_author_id=self.user_1_id
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteImprovementsModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteImprovementsModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.improvements_model_1_id = (
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
)
self.improvements_model_2_id = (
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_2_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
)
def test_delete_user_is_successful(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_1_id))
self.assertIsNotNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_1_id))
self.assertIsNone(
improvements_models.TaskEntryModel.get_by_id(
self.improvements_model_2_id))
class WipeoutServiceVerifyDeleteImprovementsModelsTests(
test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
EXP_3_ID = 'exp_3_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteImprovementsModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_1_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_2_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
improvements_models.TaskEntryModel.create(
entity_type=constants.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_3_ID,
entity_version=1,
task_type=constants.TASK_TYPE_HIGH_BOUNCE_RATE,
target_type=constants.TASK_TARGET_TYPE_STATE,
target_id='State',
issue_description=None,
status=constants.TASK_STATUS_RESOLVED,
resolver_id=self.user_1_id
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteQuestionModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
SKILL_1_ID = 'skill_1_id'
QUESTION_1_ID = 'question_1_id'
QUESTION_2_ID = 'question_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteQuestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_question(
self.QUESTION_1_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_question_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
def test_one_question_with_missing_snapshot_is_pseudonymized(self):
question_models.QuestionCommitLogEntryModel(
id='question-%s-1' % self.QUESTION_2_ID,
question_id=self.QUESTION_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'QuestionCommitLogEntryModel\' '
'and snapshot models [\'QuestionSnapshotMetadataModel\'] IDs '
'differ. Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.QUESTION_2_ID])
# Verify user is deleted.
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model_1 = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, question_mappings[self.QUESTION_1_ID])
commit_log_model_2 = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, question_mappings[self.QUESTION_2_ID])
def test_one_question_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
def test_multiple_questions_are_pseudonymized(self):
self.save_new_question(
self.QUESTION_2_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
question_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_1_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_1_ID])
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, question_mappings[self.QUESTION_2_ID])
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings[self.QUESTION_2_ID])
def test_multiple_questions_with_multiple_users_are_pseudonymized(self):
self.save_new_question(
self.QUESTION_2_ID,
self.user_2_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
question_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_1[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_1[self.QUESTION_1_ID])
# Verify second user is not yet deleted.
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
question_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_2[self.QUESTION_2_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_2[self.QUESTION_2_ID])
def test_one_question_with_multiple_users_is_pseudonymized(self):
question_services.update_question(
self.user_2_id,
self.QUESTION_1_ID,
[question_domain.QuestionChange({
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': (
question_domain.QUESTION_PROPERTY_LANGUAGE_CODE),
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
question_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_1[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-1' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_1[self.QUESTION_1_ID])
# Verify second user is not yet deleted.
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
question_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.question]
)
metadata_model = (
question_models.QuestionSnapshotMetadataModel.get_by_id(
'%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(
metadata_model.committer_id,
question_mappings_2[self.QUESTION_1_ID]
)
commit_log_model = (
question_models.QuestionCommitLogEntryModel.get_by_id(
'question-%s-2' % self.QUESTION_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, question_mappings_2[self.QUESTION_1_ID])
class WipeoutServiceVerifyDeleteQuestionModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
SKILL_1_ID = 'SKILL_1_ID'
QUESTION_1_ID = 'QUESTION_1_ID'
QUESTION_2_ID = 'QUESTION_2_ID'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteQuestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_question(
self.QUESTION_1_ID,
self.user_1_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
self.save_new_question(
self.QUESTION_2_ID,
self.user_2_id,
self._create_valid_question_data('ABC'),
[self.SKILL_1_ID]
)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
question_services.update_question(
self.user_2_id,
self.QUESTION_2_ID,
[question_domain.QuestionChange({
'cmd': question_domain.CMD_UPDATE_QUESTION_PROPERTY,
'property_name': (
question_domain.QUESTION_PROPERTY_LANGUAGE_CODE),
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
class WipeoutServiceDeleteSkillModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
SKILL_1_ID = 'skill_1_id'
SKILL_2_ID = 'skill_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteSkillModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_skill_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
def test_one_skill_with_missing_snapshot_is_pseudonymized(self):
skill_models.SkillCommitLogEntryModel(
id='skill-%s-1' % self.SKILL_2_ID,
skill_id=self.SKILL_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'SkillCommitLogEntryModel\' and '
'snapshot models [\'SkillSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.SKILL_2_ID])
# Verify user is deleted.
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model_1 = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model_1.user_id, skill_mappings[self.SKILL_1_ID])
commit_log_model_2 = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model_2.user_id, skill_mappings[self.SKILL_2_ID])
def test_one_skill_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
def test_multiple_skills_are_pseudonymized(self):
self.save_new_skill(self.SKILL_2_ID, self.user_1_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
skill_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_1_ID])
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings[self.SKILL_2_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings[self.SKILL_2_ID])
def test_multiple_skills_with_multiple_users_are_pseudonymized(self):
self.save_new_skill(self.SKILL_2_ID, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
skill_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_1[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_1[self.SKILL_1_ID])
# Verify second user is not yet deleted.
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
skill_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_2_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_2[self.SKILL_2_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_2_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_2[self.SKILL_2_ID])
def test_one_skill_with_multiple_users_is_pseudonymized(self):
skill_services.update_skill(
self.user_2_id,
self.SKILL_1_ID,
[skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
skill_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-1' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_1[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-1' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_1[self.SKILL_1_ID])
# Verify second user is not yet deleted.
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-2' % self.SKILL_1_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-2' % self.SKILL_1_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
skill_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.skill]
)
metadata_model = skill_models.SkillSnapshotMetadataModel.get_by_id(
'%s-2' % self.SKILL_1_ID)
self.assertEqual(
metadata_model.committer_id, skill_mappings_2[self.SKILL_1_ID])
commit_log_model = skill_models.SkillCommitLogEntryModel.get_by_id(
'skill-%s-2' % self.SKILL_1_ID)
self.assertEqual(
commit_log_model.user_id, skill_mappings_2[self.SKILL_1_ID])
class WipeoutServiceVerifyDeleteSkillModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
SKILL_1_ID = 'skill_1_id'
SKILL_2_ID = 'skill_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteSkillModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.set_admins((self.USER_1_USERNAME, self.USER_2_USERNAME))
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_skill(self.SKILL_1_ID, self.user_1_id)
self.save_new_skill(self.SKILL_2_ID, self.user_2_id)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
skill_services.update_skill(
self.user_2_id,
self.SKILL_2_ID,
[skill_domain.SkillChange({
'cmd': skill_domain.CMD_UPDATE_SKILL_PROPERTY,
'property_name': skill_domain.SKILL_PROPERTY_LANGUAGE_CODE,
'new_value': 'cs',
'old_value': 'en'
})],
'Change language.'
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
class WipeoutServiceDeleteStoryModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
TOPIC_1_ID = 'topic_1_id'
STORY_1_ID = 'story_1_id'
STORY_2_ID = 'story_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceDeleteStoryModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(
self.TOPIC_1_ID,
self.user_1_id,
abbreviated_name='abbrev-one',
url_fragment='frag-one',
canonical_story_ids=[self.STORY_1_ID])
self.save_new_story(self.STORY_1_ID, self.user_1_id, self.TOPIC_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_story_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
def test_one_story_with_missing_snapshot_is_pseudonymized(self):
story_models.StoryCommitLogEntryModel(
id='story-%s-1' % self.STORY_2_ID,
story_id=self.STORY_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model \'StoryCommitLogEntryModel\' and '
'snapshot models [\'StorySnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.STORY_2_ID])
# Verify user is deleted.
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model_1 = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model_1.user_id, story_mappings[self.STORY_1_ID])
commit_log_model_2 = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model_2.user_id, story_mappings[self.STORY_2_ID])
def test_one_story_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
def test_multiple_stories_are_pseudonymized(self):
self.save_new_topic(
self.TOPIC_1_ID, self.user_1_id, name='Topic 2',
abbreviated_name='abbrev-two', url_fragment='frag-two')
self.save_new_story(self.STORY_2_ID, self.user_1_id, self.TOPIC_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
story_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_1_ID])
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings[self.STORY_2_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings[self.STORY_2_ID])
def test_multiple_stories_with_multiple_users_are_pseudonymized(self):
self.save_new_topic(
self.TOPIC_1_ID, self.user_2_id, name='Topic 2',
abbreviated_name='abbrev-three', url_fragment='frag-three')
self.save_new_story(self.STORY_2_ID, self.user_2_id, self.TOPIC_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
story_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_1[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_1[self.STORY_1_ID])
# Verify second user is not yet deleted.
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
story_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_2_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_2[self.STORY_2_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_2_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_2[self.STORY_2_ID])
def test_one_story_with_multiple_users_is_pseudonymized(self):
story_services.update_story(
self.user_2_id,
self.STORY_1_ID,
[story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 2'
})],
'Add node.'
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
story_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-1' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_1[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-1' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_1[self.STORY_1_ID])
# Verify second user is not yet deleted.
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-2' % self.STORY_1_ID)
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-2' % self.STORY_1_ID)
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
story_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.story]
)
metadata_model = story_models.StorySnapshotMetadataModel.get_by_id(
'%s-2' % self.STORY_1_ID)
self.assertEqual(
metadata_model.committer_id, story_mappings_2[self.STORY_1_ID])
commit_log_model = story_models.StoryCommitLogEntryModel.get_by_id(
'story-%s-2' % self.STORY_1_ID)
self.assertEqual(
commit_log_model.user_id, story_mappings_2[self.STORY_1_ID])
class WipeoutServiceVerifyDeleteStoryModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
TOPIC_1_ID = 'topic_1_id'
TOPIC_2_ID = 'topic_2_id'
STORY_1_ID = 'story_1_id'
STORY_2_ID = 'story_2_id'
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteStoryModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(
self.TOPIC_1_ID, self.user_1_id, abbreviated_name='abbrev-four',
url_fragment='frag-four')
self.save_new_story(self.STORY_1_ID, self.user_1_id, self.TOPIC_1_ID)
self.save_new_topic(
self.TOPIC_2_ID,
self.user_2_id,
name='Topic 2',
abbreviated_name='abbrev-five',
url_fragment='frag-five',
canonical_story_ids=[self.STORY_2_ID])
self.save_new_story(self.STORY_2_ID, self.user_2_id, self.TOPIC_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
story_services.update_story(
self.user_2_id,
self.STORY_2_ID,
[story_domain.StoryChange({
'cmd': story_domain.CMD_ADD_STORY_NODE,
'node_id': 'node_1',
'title': 'Title 2'
})],
'Add node.'
)
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
class WipeoutServiceDeleteSubtopicModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
TOP_1_ID = 'top_1_id'
SUBTOP_1_ID = 'subtop_1_id'
SUBTOP_2_ID = 'subtop_2_id'
def setUp(self):
super(WipeoutServiceDeleteSubtopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
self.subtopic_page = self.save_new_subtopic(
self.SUBTOP_1_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_one_subtopic_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_one_subtopic_with_missing_snapshot_is_pseudonymized(self):
subtopic_models.SubtopicPageCommitLogEntryModel(
id='%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID),
subtopic_page_id=self.SUBTOP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertEqual(
log_messages,
['[WIPEOUT] The commit log model '
'\'SubtopicPageCommitLogEntryModel\' and snapshot models '
'[\'SubtopicPageSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.SUBTOP_2_ID])
# Verify user is deleted.
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_one_subtopic_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
def test_multiple_subtopics_are_pseudonymized(self):
self.save_new_subtopic(self.SUBTOP_2_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
subtopic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
def test_multiple_subtopics_with_multiple_users_are_pseudonymized(self):
self.save_new_subtopic(self.SUBTOP_2_ID, self.user_2_id, self.TOP_1_ID)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
subtopic_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
# Verify second user is not yet deleted.
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
subtopic_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_2_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_2_ID)])
def test_one_subtopic_with_multiple_users_is_pseudonymized(self):
subtopic_page_services.save_subtopic_page(
self.user_2_id,
self.subtopic_page,
'Change subtopic',
[
subtopic_page_domain.SubtopicPageChange({
'cmd': (
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY),
'property_name': (
subtopic_page_domain
.SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML),
'new_value': 'new value',
'old_value': 'old value',
'subtopic_id': self.SUBTOP_1_ID
})
]
)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify first user is deleted.
subtopic_mappings_1 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_1['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
# Verify second user is not yet deleted.
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(metadata_model.committer_id, self.user_2_id)
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(commit_log_model.user_id, self.user_2_id)
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
# Verify second user is deleted.
subtopic_mappings_2 = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_2_id
).pseudonymizable_entity_mappings[models.NAMES.subtopic]
)
metadata_model = (
subtopic_models.SubtopicPageSnapshotMetadataModel.get_by_id(
'%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
metadata_model.committer_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
commit_log_model = (
subtopic_models.SubtopicPageCommitLogEntryModel.get_by_id(
'subtopicpage-%s-%s-2' % (self.TOP_1_ID, self.SUBTOP_1_ID)))
self.assertEqual(
commit_log_model.user_id,
subtopic_mappings_2['%s-%s' % (self.TOP_1_ID, self.SUBTOP_1_ID)])
class WipeoutServiceVerifyDeleteSubtopicModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
TOP_1_ID = 'top_1_id'
SUBTOP_1_ID = 'subtop_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteSubtopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
self.save_new_subtopic(self.SUBTOP_1_ID, self.user_1_id, self.TOP_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verification_is_successful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verification_when_deletion_failed_is_unsuccessful(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
subtopic_models.SubtopicPageSnapshotMetadataModel(
id='%s-%s-1' % (self.TOP_1_ID, self.SUBTOP_1_ID),
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteSuggestionModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
VOICEOVER_1_ID = 'voiceover_1_id'
VOICEOVER_2_ID = 'voiceover_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteSuggestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_2_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_2_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_2_id,
final_reviewer_id=self.user_1_id,
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_voiceover_application_is_pseudonymized(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
suggestion_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.suggestion]
)
# Verify user is pseudonymized.
voiceover_application_model_1 = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
self.VOICEOVER_1_ID)
)
self.assertEqual(
voiceover_application_model_1.author_id,
suggestion_mappings[self.VOICEOVER_1_ID]
)
voiceover_application_model_2 = (
suggestion_models.GeneralVoiceoverApplicationModel.get_by_id(
self.VOICEOVER_2_ID)
)
self.assertEqual(
voiceover_application_model_2.final_reviewer_id,
suggestion_mappings[self.VOICEOVER_2_ID]
)
class WipeoutServiceVerifyDeleteSuggestionModelsTests(
test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
VOICEOVER_1_ID = 'voiceover_1_id'
VOICEOVER_2_ID = 'voiceover_2_id'
EXP_1_ID = 'exp_1_id'
EXP_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteSuggestionModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_2_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_2_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_2_id,
final_reviewer_id=self.user_1_id,
).put()
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
suggestion_models.GeneralVoiceoverApplicationModel(
id=self.VOICEOVER_1_ID,
target_type=feconf.ENTITY_TYPE_EXPLORATION,
target_id=self.EXP_1_ID,
language_code='en',
status=suggestion_models.STATUS_IN_REVIEW,
content='Text',
filename='filename.txt',
author_id=self.user_1_id,
final_reviewer_id=self.user_2_id,
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteTopicModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
TOP_1_ID = 'top_1_id'
TOP_2_ID = 'top_2_id'
def setUp(self):
super(WipeoutServiceDeleteTopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
user_services.update_user_role(
self.user_1_id, feconf.ROLE_ID_ADMIN)
user_services.update_user_role(
self.user_2_id, feconf.ROLE_ID_TOPIC_MANAGER)
self.user_1_actions = user_services.UserActionsInfo(self.user_1_id)
self.user_2_actions = user_services.UserActionsInfo(self.user_2_id)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
topic_services.assign_role(
self.user_1_actions,
self.user_1_actions,
topic_domain.ROLE_MANAGER,
self.TOP_1_ID)
topic_services.assign_role(
self.user_1_actions,
self.user_2_actions,
topic_domain.ROLE_MANAGER,
self.TOP_1_ID)
def test_one_topic_snapshot_metadata_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
rights_metadata_model_1 = (
topic_models.TopicRightsSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
rights_metadata_model_1.committer_id, topic_mappings[self.TOP_1_ID])
self.assertEqual(
rights_metadata_model_1.content_user_ids, [])
self.assertEqual(rights_metadata_model_1.commit_cmds_user_ids, [])
rights_metadata_model_2 = (
topic_models.TopicRightsSnapshotMetadataModel.get_by_id(
'%s-2' % self.TOP_1_ID)
)
self.assertEqual(
rights_metadata_model_2.committer_id, topic_mappings[self.TOP_1_ID])
self.assertEqual(
rights_metadata_model_2.content_user_ids,
[topic_mappings[self.TOP_1_ID]])
self.assertEqual(
rights_metadata_model_2.commit_cmds_user_ids,
[topic_mappings[self.TOP_1_ID]])
def test_one_topic_snapshot_content_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
rights_content_model_1 = (
topic_models.TopicRightsSnapshotContentModel.get_by_id(
'%s-1' % self.TOP_1_ID)
)
self.assertEqual(
rights_content_model_1.content['manager_ids'], [])
rights_content_model_2 = (
topic_models.TopicRightsSnapshotContentModel.get_by_id(
'%s-3' % self.TOP_1_ID)
)
self.assertItemsEqual(
rights_content_model_2.content['manager_ids'],
[
topic_mappings[self.TOP_1_ID],
self.user_2_id
])
def test_one_topic_commit_log_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify user is deleted.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
commit_log_model_1 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'rights-%s-2' % self.TOP_1_ID)
)
self.assertEqual(
commit_log_model_1.user_id, topic_mappings[self.TOP_1_ID])
def test_one_topic_with_missing_snapshot_is_pseudonymized(self):
topic_models.TopicCommitLogEntryModel(
id='topic-%s-1' % self.TOP_2_ID,
topic_id=self.TOP_2_ID,
user_id=self.user_1_id,
commit_type='create_new',
commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
version=1
).put_for_human()
with self.capture_logging(min_level=logging.ERROR) as log_messages:
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertItemsEqual(
log_messages,
[
'[WIPEOUT] The commit log model \'TopicCommitLogEntryModel\' '
'and snapshot models [\'TopicSnapshotMetadataModel\', '
'\'TopicRightsSnapshotMetadataModel\'] IDs differ. '
'Snapshots without commit logs: [], '
'commit logs without snapshots: [u\'%s\'].' % self.TOP_2_ID
]
)
# Verify user is deleted.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model_1 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
commit_log_model_1.user_id, topic_mappings[self.TOP_1_ID])
commit_log_model_2 = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
commit_log_model_2.user_id, topic_mappings[self.TOP_2_ID])
def test_one_topic_when_the_deletion_is_repeated_is_pseudonymized(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Return metadata model to the original user ID.
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
metadata_model.committer_id = self.user_1_id
metadata_model.put_for_human()
# Run the user deletion again.
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
# Verify that both the commit and the metadata have the same
# pseudonymous user ID.
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_1_ID])
def test_multiple_topics_are_pseudonymized(self):
self.save_new_topic(
self.TOP_2_ID,
self.user_1_id,
name='topic2',
url_fragment='topic-two')
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
topic_mappings = (
user_models.PendingDeletionRequestModel.get_by_id(
self.user_1_id
).pseudonymizable_entity_mappings[models.NAMES.topic]
)
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_1_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_1_ID
)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_1_ID])
metadata_model = (
topic_models.TopicSnapshotMetadataModel.get_by_id(
'%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
metadata_model.committer_id, topic_mappings[self.TOP_2_ID])
commit_log_model = (
topic_models.TopicCommitLogEntryModel.get_by_id(
'topic-%s-1' % self.TOP_2_ID
)
)
self.assertEqual(
commit_log_model.user_id, topic_mappings[self.TOP_2_ID])
class WipeoutServiceVerifyDeleteTopicModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
TOP_1_ID = 'top_1_id'
TOP_2_ID = 'top_2_id'
SUBTOP_1_ID = 'subtop_1_id'
def setUp(self):
super(WipeoutServiceVerifyDeleteTopicModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.save_new_topic(self.TOP_1_ID, self.user_1_id)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
topic_models.TopicSnapshotMetadataModel(
id='%s-1' % self.TOP_1_ID,
committer_id=self.user_1_id,
commit_message='123',
commit_type='create',
commit_cmds={}
).put_for_human()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
class WipeoutServiceDeleteUserModelsTests(test_utils.GenericTestBase):
"""Provides testing of the deletion part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
COLLECTION_1_ID = 'col_1_id'
COLLECTION_2_ID = 'col_2_id'
EXPLORATION_1_ID = 'exp_1_id'
EXPLORATION_2_ID = 'exp_2_id'
def setUp(self):
super(WipeoutServiceDeleteUserModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
user_models.CompletedActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = 'name'
self.modifiable_new_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
user_models.CompletedActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
def test_delete_user_for_profile_user_is_successful(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.profile_user_id))
self.assertTrue(
auth_services.verify_external_auth_associations_are_deleted(
self.profile_user_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id)
)
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
def test_delete_user_for_full_user_and_its_profiles_is_successful(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
auth_services.get_auth_id_from_user_id(self.user_1_id))
# External auth associations should not have been deleted yet.
self.assertFalse(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(
self.profile_user_id))
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
def test_delete_user_with_collection_and_exploration_is_successful(self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
def test_delete_user_with_collections_and_explorations_is_successful(self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
self.save_new_valid_exploration(
self.EXPLORATION_2_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_2_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_2_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_2_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_2_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_2_ID))
def test_delete_user_with_collection_and_exploration_repeated_is_successful(
self):
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_1_id))
self.save_new_valid_exploration(
self.EXPLORATION_1_ID,
self.user_1_id)
self.save_new_valid_collection(
self.COLLECTION_1_ID,
self.user_1_id,
exploration_id=self.EXPLORATION_1_ID)
self.assertIsNotNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNotNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertIsNone(
collection_models.CollectionModel.get_by_id(self.COLLECTION_1_ID))
self.assertIsNone(
exp_models.ExplorationModel.get_by_id(self.EXPLORATION_1_ID))
def test_delete_user_with_multiple_users_is_successful(self):
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
self.assertIsNotNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.IncompleteActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNotNone(
user_models.LearnerPlaylistModel.get_by_id(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertIsNone(
user_models.UserEmailPreferencesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.IncompleteActivitiesModel.get_by_id(self.user_2_id))
self.assertIsNone(
user_models.LearnerPlaylistModel.get_by_id(self.user_2_id))
def test_after_deletion_user_and_its_profiles_cannot_do_anything(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertIsNone(user_services.get_user_settings(self.user_1_id))
self.assertIsNone(user_services.get_user_settings(self.profile_user_id))
with self.assertRaisesRegexp(Exception, 'User not found.'):
# Try to do some action with the deleted user.
user_services.update_preferred_language_codes(
self.user_1_id, ['en'])
with self.assertRaisesRegexp(Exception, 'User not found.'):
# Try to do some action with the deleted user.
user_services.update_preferred_language_codes(
self.profile_user_id, ['en'])
class WipeoutServiceVerifyDeleteUserModelsTests(test_utils.GenericTestBase):
"""Provides testing of the verification part of wipeout service."""
USER_1_EMAIL = 'some@email.com'
USER_1_USERNAME = 'username1'
USER_2_EMAIL = 'some-other@email.com'
USER_2_USERNAME = 'username2'
def setUp(self):
super(WipeoutServiceVerifyDeleteUserModelsTests, self).setUp()
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.user_1_id = self.get_user_id_from_email(self.USER_1_EMAIL)
self.user_2_id = self.get_user_id_from_email(self.USER_2_EMAIL)
self.user_1_auth_id = self.get_auth_id_from_email(self.USER_1_EMAIL)
user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': self.user_1_id,
}
new_user_data_dict = {
'schema_version': 1,
'display_alias': 'display_alias3',
'pin': '12345',
'preferred_language_codes': [constants.DEFAULT_LANGUAGE_CODE],
'preferred_site_language_code': None,
'preferred_audio_language_code': None,
'user_id': None,
}
self.modifiable_user_data = (
user_domain.ModifiableUserData.from_raw_dict(user_data_dict))
self.modifiable_new_user_data = (
user_domain.ModifiableUserData.from_raw_dict(new_user_data_dict))
user_services.update_multiple_users_data(
[self.modifiable_user_data])
self.modifiable_new_user_data.display_alias = 'name'
self.modifiable_new_user_data.pin = '123'
self.profile_user_id = user_services.create_new_profiles(
self.user_1_auth_id, self.USER_1_EMAIL,
[self.modifiable_new_user_data]
)[0].user_id
wipeout_service.pre_delete_user(self.user_2_id)
self.process_and_flush_pending_tasks()
def test_verify_user_delete_when_profile_user_deleted_returns_true(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_user_is_deleted_returns_true(self):
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_1_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id)
)
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_user_is_not_deleted_returns_false(self):
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
user_models.CompletedActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.user_2_id, exploration_ids=[], collection_ids=[]
).put()
self.assertFalse(wipeout_service.verify_user_deleted(self.user_2_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_2_id))
self.assertTrue(wipeout_service.verify_user_deleted(self.user_2_id))
def test_verify_user_delete_when_profile_user_not_deleted_is_false(self):
wipeout_service.pre_delete_user(self.profile_user_id)
self.process_and_flush_pending_tasks()
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
user_models.CompletedActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.IncompleteActivitiesModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
user_models.LearnerPlaylistModel(
id=self.profile_user_id, exploration_ids=[], collection_ids=[]
).put()
self.assertFalse(
wipeout_service.verify_user_deleted(self.profile_user_id))
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.profile_user_id))
self.assertTrue(
wipeout_service.verify_user_deleted(self.profile_user_id))
def test_verify_user_delete_when_external_auth_associations_are_not_deleted(
self):
self.assertFalse(
auth_services.verify_external_auth_associations_are_deleted(
self.user_1_id))
wipeout_service.pre_delete_user(self.user_1_id)
self.process_and_flush_pending_tasks()
delete_external_auth_associations_swap = self.swap_to_always_return(
auth_services, 'delete_external_auth_associations')
with delete_external_auth_associations_swap:
wipeout_service.delete_user(
wipeout_service.get_pending_deletion_request(self.user_1_id))
self.assertFalse(wipeout_service.verify_user_deleted(self.user_1_id))
| core/domain/wipeout_service_test.py | 182,295 | Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the deletion part of wipeout service.
Provides testing of the pre-deletion part of wipeout service.
Provides testing of the pre-deletion part of wipeout service.
Provides testing of the pre-deletion part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Provides testing of the verification part of wipeout service.
Tests for wipeout service.
Copyright 2020 The Oppia Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=import-only-modules pylint: disable=import-only-modules Pre-deleted auth associations will return None. Verify user is deleted. Return metadata model to the original user ID. Run the user deletion again. Verify that both the commit and the metadata have the same pseudonymous user ID. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify user is deleted. Verify user is deleted. Verify user is deleted. Verify user is deleted. Return metadata model to the original user ID. Run the user deletion again. Verify that both the commit and the metadata have the same pseudonymous user ID. Verify user is deleted. Verify user is deleted. Verify user is deleted. Verify user is deleted. Return metadata model to the original user ID. Run the user deletion again. Verify that both the commit and the metadata have the same pseudonymous user ID. Verify user is pseudonymized. Return feedback thread model to the original user ID. Run the user deletion again. Verify that both the feedback thread and the suggestion have the same pseudonymous user ID. Verify first user is pseudonymized. Verify second user is not yet pseudonymized. Delete second user. Verify second user is pseudonymized. Verify user is deleted. Verify user is deleted. Return metadata model to the original user ID. Run the user deletion again. Verify that both the commit and the metadata have the same pseudonymous user ID. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify user is deleted. Verify user is deleted. Return metadata model to the original user ID. Run the user deletion again. Verify that both the commit and the metadata have the same pseudonymous user ID. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify user is deleted. Verify user is deleted. Return metadata model to the original user ID. Run the user deletion again. Verify that both the commit and the metadata have the same pseudonymous user ID. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify user is deleted. Verify user is deleted. Return metadata model to the original user ID. Run the user deletion again. Verify that both the commit and the metadata have the same pseudonymous user ID. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify first user is deleted. Verify second user is not yet deleted. Verify second user is deleted. Verify user is pseudonymized. Verify user is deleted. Verify user is deleted. Verify user is deleted. Verify user is deleted. Return metadata model to the original user ID. Run the user deletion again. Verify that both the commit and the metadata have the same pseudonymous user ID. External auth associations should not have been deleted yet. Try to do some action with the deleted user. Try to do some action with the deleted user. | 5,756 | en | 0.948945 |
from unittest.mock import call, PropertyMock, MagicMock
import pytest
from analytical_validation.exceptions import DataWasNotFitted
from src.analytical_validation.validators.linearity_validator import LinearityValidator
@pytest.fixture(scope='function')
def fitted_result_obj(mocker):
mock = mocker.Mock(create=True)
mock.params = (mocker.Mock(), mocker.Mock())
mock.pvalues = (mocker.Mock(), mocker.Mock())
mock.ess = MagicMock()
mock.ssr = MagicMock()
mock.df_model = MagicMock()
mock.df_resid = MagicMock()
mock.resid = mocker.Mock()
return mock
@pytest.fixture(scope='function')
def linearity_validator_obj(fitted_result_obj):
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.1, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data)
linearity_validator.fitted_result = fitted_result_obj
return linearity_validator
@pytest.fixture(scope='function')
def linearity_validator_outlier_obj():
analytical_data = [[1.0, 1.0, 10.0], [2.0, 6.0, 2.0]]
concentration_data = [[1.0, 2.0, 3.0], [8.0, 9.0, 10.0]]
return LinearityValidator(analytical_data, concentration_data)
@pytest.fixture(scope='function')
def het_breuschpagan_mock(mocker):
het_breuschpagan_mock = mocker.patch('analytical_validation.validators.linearity_validator.'
'statsmodelsapi.het_breuschpagan')
het_breuschpagan_mock.return_value = (33, 42)
return het_breuschpagan_mock
@pytest.fixture(scope='function')
def shapiro_mock(mocker, linearity_validator_obj):
shapiro_mock = mocker.patch('analytical_validation.validators.linearity_validator.scipy.stats')
shapiro_mock.shapiro(linearity_validator_obj.analytical_data).return_value = (0, 1)
return shapiro_mock
@pytest.fixture(scope='function')
def durbin_watson_mock(mocker):
durbin_watson_mock = mocker.patch('analytical_validation.validators.linearity_validator.stattools.durbin_watson')
durbin_watson_mock.return_value = 1
return durbin_watson_mock
@pytest.fixture(scope='function')
def add_constant_mock(mocker):
add_constant_mock = mocker.patch(
'analytical_validation.validators.linearity_validator.statsmodels.add_constant')
return add_constant_mock
@pytest.fixture(scope='function')
def ordinary_least_squares_regression_mock(mocker):
ordinary_least_squares_regression_mock = mocker.patch(
'analytical_validation.validators.linearity_validator.statsmodels.OLS')
return ordinary_least_squares_regression_mock
class TestLinearityValidator(object):
def test_constructor_must_create_object_when_analytical_data_has_float_values(self, linearity_validator_obj):
"""Given analytical data
The LinearityValidator
Should create a list of floats
"""
# Assert
assert linearity_validator_obj.analytical_data == [0.100, 0.200, 0.150]
assert linearity_validator_obj.concentration_data == [0.1, 0.2, 0.3]
def test_ordinary_least_squares_linear_regression_must_pass_float_when_given_correct_data(self,
ordinary_least_squares_regression_mock,
add_constant_mock,
linearity_validator_obj):
"""Given concentration values = float
The ordinary_least_squares_linear_regression
Then must set properties"""
# Act
linearity_validator_obj.ordinary_least_squares_linear_regression()
# Assert
assert linearity_validator_obj.fitted_result == ordinary_least_squares_regression_mock.return_value.fit.return_value # Garante que a regressao e resultado do resultado do metodo statsmodels.OLS(), aplicado .fit().
assert ordinary_least_squares_regression_mock.called # Garante que o metodo ols esta sendo chamado
assert ordinary_least_squares_regression_mock.call_args_list == [
call(linearity_validator_obj.analytical_data, add_constant_mock.return_value)
# Garante que os arquivos de entrada definidos no call foram utilizados
]
assert add_constant_mock.called
assert add_constant_mock.call_args_list == [
call(linearity_validator_obj.concentration_data)
]
def test_slope_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
# Act & assert
assert linearity_validator_obj.slope == fitted_result_obj.params[1]
def test_intercept_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
# Act & assert
assert linearity_validator_obj.intercept == fitted_result_obj.params[0]
def test_r_squared_adjusted_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.r_squared_adj == fitted_result_obj.rsquared_adj
def test_r_squared_property_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
# Act & assert
assert linearity_validator_obj.r_squared == fitted_result_obj.rsquared
def test_regression_residues_exists_when_fitted_result_not_none(self, linearity_validator_obj, fitted_result_obj):
"""Given a regression model
when regression_residues is called
the regression residues must be created"""
assert linearity_validator_obj.regression_residues == fitted_result_obj.resid.tolist()
def test_sum_of_squares_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.sum_of_squares_model == fitted_result_obj.ess
def test_sum_of_squares_total_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.sum_of_squares_total == fitted_result_obj.ess + fitted_result_obj.ssr
def test_sum_of_squares_resid_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.sum_of_squares_resid == fitted_result_obj.ssr
def test_degrees_of_freedom_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.degrees_of_freedom_model == fitted_result_obj.df_model
def test_degrees_of_freedom_residues_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.degrees_of_freedom_residues == fitted_result_obj.df_resid
def test_degrees_of_freedom_total_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.degrees_of_freedom_total == fitted_result_obj.df_model + fitted_result_obj.df_resid
def test_mean_squared_error_model_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.mean_squared_error_model == fitted_result_obj.mse_model
def test_mean_squared_error_residues_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.mean_squared_error_residues == fitted_result_obj.mse_resid
def test_anova_f_value_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.anova_f_value == fitted_result_obj.fvalue
def test_anova_f_pvalue_property_exists_when_fitted_result_not_none(self, linearity_validator_obj,
fitted_result_obj):
# Act & assert
assert linearity_validator_obj.anova_f_pvalue == fitted_result_obj.f_pvalue
@pytest.mark.parametrize('param_anova_f_pvalue, param_alpha, expected_result', [
(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.10, True)
])
def test_valid_anova_f_pvalue_must_return_true_when_r_squared_is_greater_than_0990(self, param_alpha,
linearity_validator_obj,
param_anova_f_pvalue,
expected_result):
"""Given data with an aceptable regression model
When valid_anova_f_pvalue is called
Then anova_f_pvalue < alpha must assert true"""
# Arrange
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.f_pvalue = param_anova_f_pvalue
# Act & Assert
assert linearity_validator_obj.valid_anova_f_pvalue is expected_result
@pytest.mark.parametrize('param_alpha, param_breusch_pagan_pvalue, expected_result', [
(1, -10, False), (0.05, 0.049, False), (0.10, 0.11, True), (0.05, 10, True)
])
def test_is_homokedastic_must_return_false_when_breusch_pagan_pvalue_is_smaller_than_alpha_otherwise_true(self,
param_alpha,
param_breusch_pagan_pvalue,
expected_result):
# Arrange
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.1, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data, param_alpha)
linearity_validator.breusch_pagan_pvalue = param_breusch_pagan_pvalue
# Act & Assert
assert linearity_validator.is_homoscedastic is expected_result
@pytest.mark.parametrize('param_significant_slope, param_alpha, expected_result', [
(0.051, 0.05, False), (10, 0.1, False), (0.049, 0.05, True), (0.001, 0.10, True)
])
def test_significant_slope_must_return_true_when_slope_pvalue_is_smaller_than_alpha(self, linearity_validator_obj,
param_significant_slope,
param_alpha, expected_result):
"""Given homokedastic data
When check_hypothesis is called
Then slope_is_significant must assert true"""
# Arrange
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = ("mock value", param_significant_slope)
# Act & Assert
assert linearity_validator_obj.significant_slope is expected_result
@pytest.mark.parametrize('param_insignificant_intercept, param_alpha, expected_result', [
(0.051, 0.05, True), (10, 0.1, True), (0.049, 0.05, False), (0.001, 0.10, False)
])
def test_insignificant_intercept_must_return_true_when_intercept_pvalue_is_greater_than_alpha(self,
linearity_validator_obj,
param_alpha,
param_insignificant_intercept,
expected_result):
"""Given homokedastic data
When check_hypothesis is called
Then intercept_not_significant must assert true"""
# Arrange
linearity_validator_obj.alpha = param_alpha
linearity_validator_obj.fitted_result.pvalues = (param_insignificant_intercept, "mock value")
# Act & Assert
assert linearity_validator_obj.insignificant_intercept is expected_result
@pytest.mark.parametrize('param_r_squared, expected_result', [
(1, True), (0.99, True), (0.98, False)
])
def test_valid_r_squared_must_return_true_when_r_squared_is_greater_than_0990(self,
linearity_validator_obj,
param_r_squared, expected_result):
"""Given homokedastic data
When check_hypothesis is called
Then r_squared > 0.990 must assert true"""
# Arrange
linearity_validator_obj.fitted_result.rsquared = param_r_squared
# Act & Assert
assert linearity_validator_obj.valid_r_squared is expected_result
@pytest.mark.parametrize(
'param_significant_slope, param_insignificant_intercept, param_valid_r_squared, expected_result', [
(True, True, True, True), (True, False, False, False), (True, True, False, False),
(False, True, True, False), (False, True, False, False), (False, False, False, False)
])
def test_valid_regression_model(self, mocker, param_significant_slope, param_insignificant_intercept,
param_valid_r_squared, expected_result):
# Arrange
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.significant_slope',
new_callable=PropertyMock, return_value=param_significant_slope)
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.insignificant_intercept',
new_callable=PropertyMock, return_value=param_insignificant_intercept)
mocker.patch('unit.test_validators.test_linearity_validator.LinearityValidator.valid_r_squared',
new_callable=PropertyMock, return_value=param_valid_r_squared)
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
linearity_validator = LinearityValidator(analytical_data, concentration_data)
# Act & Assert
assert linearity_validator.valid_regression_model is expected_result
def test_check_outliers_when_given_list_of_list_data(self, linearity_validator_outlier_obj):
linearity_validator_outlier_obj.check_outliers()
assert linearity_validator_outlier_obj.outliers == [[10.0], [6.0]]
assert linearity_validator_outlier_obj.cleaned_analytical_data == [[1.0, 1.0], [2.0, 2.0]]
assert linearity_validator_outlier_obj.cleaned_concentration_data == [[1.0, 2.0], [8.0, 10.0]]
@pytest.mark.parametrize('param_shapiro_pvalue, param_alpha, expected_result', [
(10, 0.05, True), (0.01, 0.1, False), (0.0501, 0.05, True), (0.099, 0.1, False)
])
def test_is_normal_distribution(self, param_shapiro_pvalue, param_alpha, expected_result):
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
validator = LinearityValidator(analytical_data, concentration_data, param_alpha)
validator.shapiro_pvalue = param_shapiro_pvalue
# Assert
assert validator.is_normal_distribution is expected_result
def test_run_breusch_pagan_test_must_raise_exception_when_model_is_none(self):
"""Not given a model parameter
The check_homokedasticity
Should raise exception"""
# Arrange
analytical_data = [[0.100, 0.200, 0.150]]
concentration_data = [[0.2, 0.2, 0.3]]
# Act & Assert
with pytest.raises(DataWasNotFitted):
LinearityValidator(analytical_data, concentration_data).run_breusch_pagan_test()
def test_run_breusch_pagan_test(self, linearity_validator_obj, het_breuschpagan_mock):
"""Given heterokedastic data
When check_homokedasticity is called
Then must return false"""
# Act
linearity_validator_obj.run_breusch_pagan_test()
# Assert
assert linearity_validator_obj.breusch_pagan_pvalue == 42
assert het_breuschpagan_mock.called
assert het_breuschpagan_mock.call_args_list == [
call(linearity_validator_obj.fitted_result.resid, linearity_validator_obj.fitted_result.model.exog)
]
@pytest.mark.parametrize('durbin_watson_pvalue', [
0.1, 1, 2, 2.5, 3, 3.9
])
def test_check_residual_autocorrelation(self, linearity_validator_obj, durbin_watson_mock,
durbin_watson_pvalue):
"""Given data
When residual_autocorrelation is called
Then must create durbin_watson_value"""
# Arrange
durbin_watson_mock.return_value = durbin_watson_pvalue
# Act
linearity_validator_obj.check_residual_autocorrelation()
# Assert
assert linearity_validator_obj.durbin_watson_value == durbin_watson_mock.return_value
assert durbin_watson_mock.called
assert durbin_watson_mock.call_args_list == [
call(linearity_validator_obj.fitted_result.resid)
]
def test_check_residual_autocorrelation_must_raise_exception_when_data_not_fitted(self, linearity_validator_obj):
"""Given data,
if no regression was calculated
Should raise an exception"""
# Arrange
linearity_validator_obj.fitted_result = None
# Act & assert
with pytest.raises(DataWasNotFitted):
linearity_validator_obj.check_residual_autocorrelation()
@pytest.mark.parametrize('durbin_watson_pvalue', [
-1, 10, 4.1
])
def test_check_residual_autocorrelation_must_pass_when_durbin_watson_value_is_between_0_and_4(self,
linearity_validator_obj,
durbin_watson_mock,
durbin_watson_pvalue):
"""Given data,
When check_residual is called
after fitting the model
Should pass creating
0 < durbin_watson_value < 4"""
# Arrange
durbin_watson_mock.return_value = durbin_watson_pvalue
# Act & Assert
assert linearity_validator_obj.durbin_watson_value is None
| tests/unit/test_validators/test_linearity_validator.py | 19,599 | Given data
When residual_autocorrelation is called
Then must create durbin_watson_value
Given data,
When check_residual is called
after fitting the model
Should pass creating
0 < durbin_watson_value < 4
Given data,
if no regression was calculated
Should raise an exception
Given analytical data
The LinearityValidator
Should create a list of floats
Given homokedastic data
When check_hypothesis is called
Then intercept_not_significant must assert true
Given concentration values = float
The ordinary_least_squares_linear_regression
Then must set properties
Given a regression model
when regression_residues is called
the regression residues must be created
Given heterokedastic data
When check_homokedasticity is called
Then must return false
Not given a model parameter
The check_homokedasticity
Should raise exception
Given homokedastic data
When check_hypothesis is called
Then slope_is_significant must assert true
Given data with an aceptable regression model
When valid_anova_f_pvalue is called
Then anova_f_pvalue < alpha must assert true
Given homokedastic data
When check_hypothesis is called
Then r_squared > 0.990 must assert true
Assert Act Assert Garante que a regressao e resultado do resultado do metodo statsmodels.OLS(), aplicado .fit(). Garante que o metodo ols esta sendo chamado Garante que os arquivos de entrada definidos no call foram utilizados Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Act & assert Arrange Act & Assert Arrange Act & Assert Arrange Act & Assert Arrange Act & Assert Arrange Act & Assert Arrange Act & Assert Assert Arrange Act & Assert Act Assert Arrange Act Assert Arrange Act & assert Arrange Act & Assert | 1,779 | en | 0.625069 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import time
from copy import copy
from datetime import datetime
from typing import Any, Dict, Optional
from flask_babel import lazy_gettext as _
from sqlalchemy.orm import make_transient, Session
from superset import ConnectorRegistry, db
from superset.commands.base import BaseCommand
from superset.connectors.sqla.models import SqlaTable, SqlMetric, TableColumn
from superset.datasets.commands.importers.v0 import import_dataset
from superset.exceptions import DashboardImportException
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.models.core import Database
from superset.utils.dashboard_filter_scopes_converter import (
convert_filter_scopes,
copy_filter_scopes,
)
logger = logging.getLogger(__name__)
def import_chart(
slc_to_import: Slice,
slc_to_override: Optional[Slice],
import_time: Optional[int] = None,
) -> int:
"""Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
"""
session = db.session
make_transient(slc_to_import)
slc_to_import.dashboards = []
slc_to_import.alter_params(remote_id=slc_to_import.id, import_time=import_time)
slc_to_import = slc_to_import.copy()
slc_to_import.reset_ownership()
params = slc_to_import.params_dict
datasource = ConnectorRegistry.get_datasource_by_name(
session,
slc_to_import.datasource_type,
params["datasource_name"],
params["schema"],
params["database_name"],
)
slc_to_import.datasource_id = datasource.id # type: ignore
if slc_to_override:
slc_to_override.override(slc_to_import)
session.flush()
return slc_to_override.id
session.add(slc_to_import)
logger.info("Final slice: %s", str(slc_to_import.to_json()))
session.flush()
return slc_to_import.id
def import_dashboard(
# pylint: disable=too-many-locals,too-many-statements
dashboard_to_import: Dashboard,
dataset_id_mapping: Optional[Dict[int, int]] = None,
import_time: Optional[int] = None,
database_id: Optional[int] = None,
) -> int:
"""Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
"""
def alter_positions(
dashboard: Dashboard, old_to_new_slc_id_dict: Dict[int, int]
) -> None:
"""Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
"""
position_data = json.loads(dashboard.position_json)
position_json = position_data.values()
for value in position_json:
if (
isinstance(value, dict)
and value.get("meta")
and value.get("meta", {}).get("chartId")
):
old_slice_id = value["meta"]["chartId"]
if old_slice_id in old_to_new_slc_id_dict:
value["meta"]["chartId"] = old_to_new_slc_id_dict[old_slice_id]
dashboard.position_json = json.dumps(position_data)
def alter_native_filters(dashboard: Dashboard) -> None:
json_metadata = json.loads(dashboard.json_metadata)
native_filter_configuration = json_metadata.get("native_filter_configuration")
if not native_filter_configuration:
return
for native_filter in native_filter_configuration:
for target in native_filter.get("targets", []):
old_dataset_id = target.get("datasetId")
if dataset_id_mapping and old_dataset_id is not None:
target["datasetId"] = dataset_id_mapping.get(
old_dataset_id, old_dataset_id,
)
dashboard.json_metadata = json.dumps(json_metadata)
logger.info("Started import of the dashboard: %s", dashboard_to_import.to_json())
session = db.session
logger.info("Dashboard has %d slices", len(dashboard_to_import.slices))
# copy slices object as Slice.import_slice will mutate the slice
# and will remove the existing dashboard - slice association
slices = copy(dashboard_to_import.slices)
# Clearing the slug to avoid conflicts
dashboard_to_import.slug = None
old_json_metadata = json.loads(dashboard_to_import.json_metadata or "{}")
old_to_new_slc_id_dict: Dict[int, int] = {}
new_timed_refresh_immune_slices = []
new_expanded_slices = {}
new_filter_scopes = {}
i_params_dict = dashboard_to_import.params_dict
remote_id_slice_map = {
slc.params_dict["remote_id"]: slc
for slc in session.query(Slice)
.filter(Slice.datasource_id.in_(list(dataset_id_mapping.values())))
.all()
if "remote_id" in slc.params_dict
}
for slc in slices:
logger.info(
"Importing slice %s from the dashboard: %s",
slc.to_json(),
dashboard_to_import.dashboard_title,
)
# Change database name in params due to using new database for imported dashboard
if database_id:
database_name = session.query(Database).filter(Database.id == database_id).first().name
slc.alter_params(database_name=database_name)
remote_slc = remote_id_slice_map.get(slc.id)
new_slc_id = import_chart(slc, remote_slc, import_time=import_time)
old_to_new_slc_id_dict[slc.id] = new_slc_id
# update json metadata that deals with slice ids
new_slc_id_str = str(new_slc_id)
old_slc_id_str = str(slc.id)
if (
"timed_refresh_immune_slices" in i_params_dict
and old_slc_id_str in i_params_dict["timed_refresh_immune_slices"]
):
new_timed_refresh_immune_slices.append(new_slc_id_str)
if (
"expanded_slices" in i_params_dict
and old_slc_id_str in i_params_dict["expanded_slices"]
):
new_expanded_slices[new_slc_id_str] = i_params_dict["expanded_slices"][
old_slc_id_str
]
# since PR #9109, filter_immune_slices and filter_immune_slice_fields
# are converted to filter_scopes
# but dashboard create from import may still have old dashboard filter metadata
# here we convert them to new filter_scopes metadata first
filter_scopes = {}
if (
"filter_immune_slices" in i_params_dict
or "filter_immune_slice_fields" in i_params_dict
):
filter_scopes = convert_filter_scopes(old_json_metadata, slices)
if "filter_scopes" in i_params_dict:
filter_scopes = old_json_metadata.get("filter_scopes")
# then replace old slice id to new slice id:
if filter_scopes:
new_filter_scopes = copy_filter_scopes(
old_to_new_slc_id_dict=old_to_new_slc_id_dict,
old_filter_scopes=filter_scopes,
)
# override the dashboard
existing_dashboard = None
for dash in session.query(Dashboard).all():
if (
"remote_id" in dash.params_dict
and dash.params_dict["remote_id"] == dashboard_to_import.id
):
existing_dashboard = dash
dashboard_to_import = dashboard_to_import.copy()
dashboard_to_import.id = None
dashboard_to_import.reset_ownership()
# position_json can be empty for dashboards
# with charts added from chart-edit page and without re-arranging
if dashboard_to_import.position_json:
alter_positions(dashboard_to_import, old_to_new_slc_id_dict)
dashboard_to_import.alter_params(import_time=import_time)
dashboard_to_import.remove_params(param_to_remove="filter_immune_slices")
dashboard_to_import.remove_params(param_to_remove="filter_immune_slice_fields")
if new_filter_scopes:
dashboard_to_import.alter_params(filter_scopes=new_filter_scopes)
if new_expanded_slices:
dashboard_to_import.alter_params(expanded_slices=new_expanded_slices)
if new_timed_refresh_immune_slices:
dashboard_to_import.alter_params(
timed_refresh_immune_slices=new_timed_refresh_immune_slices
)
alter_native_filters(dashboard_to_import)
new_slices = (
session.query(Slice).filter(Slice.id.in_(old_to_new_slc_id_dict.values())).all()
)
if existing_dashboard:
existing_dashboard.override(dashboard_to_import)
existing_dashboard.slices = new_slices
session.flush()
return existing_dashboard.id
dashboard_to_import.slices = new_slices
session.add(dashboard_to_import)
session.flush()
return dashboard_to_import.id # type: ignore
def decode_dashboards( # pylint: disable=too-many-return-statements
o: Dict[str, Any]
) -> Any:
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
"""
# pylint: disable=import-outside-toplevel
from superset.connectors.druid.models import (
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
)
if "__Dashboard__" in o:
return Dashboard(**o["__Dashboard__"])
if "__Slice__" in o:
return Slice(**o["__Slice__"])
if "__TableColumn__" in o:
return TableColumn(**o["__TableColumn__"])
if "__SqlaTable__" in o:
return SqlaTable(**o["__SqlaTable__"])
if "__SqlMetric__" in o:
return SqlMetric(**o["__SqlMetric__"])
if "__DruidCluster__" in o:
return DruidCluster(**o["__DruidCluster__"])
if "__DruidColumn__" in o:
return DruidColumn(**o["__DruidColumn__"])
if "__DruidDatasource__" in o:
return DruidDatasource(**o["__DruidDatasource__"])
if "__DruidMetric__" in o:
return DruidMetric(**o["__DruidMetric__"])
if "__datetime__" in o:
return datetime.strptime(o["__datetime__"], "%Y-%m-%dT%H:%M:%S")
return o
def import_dashboards(
session: Session,
content: str,
database_id: Optional[int] = None,
import_time: Optional[int] = None,
) -> None:
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(content, object_hook=decode_dashboards)
if not data:
raise DashboardImportException(_("No data in file"))
dataset_id_mapping: Dict[int, int] = {}
for table in data["datasources"]:
new_dataset_id = import_dataset(table, database_id, import_time=import_time)
params = json.loads(table.params)
dataset_id_mapping[params["remote_id"]] = new_dataset_id
session.commit()
for dashboard in data["dashboards"]:
import_dashboard(dashboard, dataset_id_mapping, import_time=import_time,
database_id=database_id)
session.commit()
class ImportDashboardsCommand(BaseCommand):
"""
Import dashboard in JSON format.
This is the original unversioned format used to export and import dashboards
in Superset.
"""
# pylint: disable=unused-argument
def __init__(
self, contents: Dict[str, str], database_id: Optional[int] = None, **kwargs: Any
):
self.contents = contents
self.database_id = database_id
def run(self) -> None:
self.validate()
for file_name, content in self.contents.items():
logger.info("Importing dashboard from file %s", file_name)
import_dashboards(db.session, content, self.database_id)
def validate(self) -> None:
# ensure all files are JSON
for content in self.contents.values():
try:
json.loads(content)
except ValueError:
logger.exception("Invalid JSON file")
raise
| superset/dashboards/commands/importers/v0.py | 13,991 | Import dashboard in JSON format.
This is the original unversioned format used to export and import dashboards
in Superset.
Updates slice_ids in the position json.
Sample position_json data:
{
"DASHBOARD_VERSION_KEY": "v2",
"DASHBOARD_ROOT_ID": {
"type": "DASHBOARD_ROOT_TYPE",
"id": "DASHBOARD_ROOT_ID",
"children": ["DASHBOARD_GRID_ID"]
},
"DASHBOARD_GRID_ID": {
"type": "DASHBOARD_GRID_TYPE",
"id": "DASHBOARD_GRID_ID",
"children": ["DASHBOARD_CHART_TYPE-2"]
},
"DASHBOARD_CHART_TYPE-2": {
"type": "CHART",
"id": "DASHBOARD_CHART_TYPE-2",
"children": [],
"meta": {
"width": 4,
"height": 50,
"chartId": 118
}
},
}
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
Inserts or overrides slc in the database.
remote_id and import_time fields in params_dict are set to track the
slice origin and ensure correct overrides for multiple imports.
Slice.perm is used to find the datasources and connect them.
:param Slice slc_to_import: Slice object to import
:param Slice slc_to_override: Slice to replace, id matches remote_id
:returns: The resulting id for the imported slice
:rtype: int
Imports the dashboard from the object to the database.
Once dashboard is imported, json_metadata field is extended and stores
remote_id and import_time. It helps to decide if the dashboard has to
be overridden or just copies over. Slices that belong to this
dashboard will be wired to existing tables. This function can be used
to import/export dashboards between multiple superset instances.
Audit metadata isn't copied over.
Imports dashboards from a stream to databases
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: ignore pylint: disable=too-many-locals,too-many-statements copy slices object as Slice.import_slice will mutate the slice and will remove the existing dashboard - slice association Clearing the slug to avoid conflicts Change database name in params due to using new database for imported dashboard update json metadata that deals with slice ids since PR 9109, filter_immune_slices and filter_immune_slice_fields are converted to filter_scopes but dashboard create from import may still have old dashboard filter metadata here we convert them to new filter_scopes metadata first then replace old slice id to new slice id: override the dashboard position_json can be empty for dashboards with charts added from chart-edit page and without re-arranging type: ignore pylint: disable=too-many-return-statements pylint: disable=import-outside-toplevel pylint: disable=unused-argument ensure all files are JSON | 3,443 | en | 0.766774 |
from datetime import datetime
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase, override_settings
from django.utils import timezone
@skipIfCustomUser
@override_settings(ROOT_URLCONF='django.contrib.auth.tests.urls')
class RemoteUserTest(TestCase):
middleware = 'django.contrib.auth.middleware.RemoteUserMiddleware'
backend = 'django.contrib.auth.backends.RemoteUserBackend'
header = 'REMOTE_USER'
# Usernames to be passed in REMOTE_USER for the test_known_user test case.
known_user = 'knownuser'
known_user2 = 'knownuser2'
def setUp(self):
self.curr_middleware = settings.MIDDLEWARE_CLASSES
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.MIDDLEWARE_CLASSES += (self.middleware,)
settings.AUTHENTICATION_BACKENDS += (self.backend,)
def test_no_remote_user(self):
"""
Tests requests where no remote user is specified and insures that no
users get created.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: None})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: ''})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
def test_unknown_user(self):
"""
Tests the case where the username passed in the header does not exist
as a User.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), num_users + 1)
User.objects.get(username='newuser')
# Another request with same user should not create any new users.
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(User.objects.count(), num_users + 1)
def test_known_user(self):
"""
Tests the case where the username passed in the header is a valid User.
"""
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
# Test that a different user passed in the headers causes the new user
# to be logged in.
response = self.client.get('/remote_user/',
**{self.header: self.known_user2})
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
def test_last_login(self):
"""
Tests that a user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
"""
user = User.objects.create(username='knownuser')
# Set last_login to something so we can determine if it changes.
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(default_login, response.context['user'].last_login)
def test_header_disappears(self):
"""
Tests that a logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER header disappears. Should trigger logout.
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].is_anonymous(), True)
# verify the remoteuser middleware will not remove a user
# authenticated via another backend
User.objects.create_user(username='modeluser', password='foo')
self.client.login(username='modeluser', password='foo')
authenticate(username='modeluser', password='foo')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].username, 'modeluser')
def test_user_switch_forces_new_login(self):
"""
Tests that if the username in the header changes between requests
that the original user is logged out
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER changes to a different user.
response = self.client.get('/remote_user/',
**{self.header: "newnewuser"})
# Ensure that the current user is not the prior remote_user
# In backends that create a new user, username is "newnewuser"
# In backends that do not create new users, it is '' (anonymous user)
self.assertNotEqual(response.context['user'].username, 'knownuser')
def tearDown(self):
"""Restores settings to avoid breaking other tests."""
settings.MIDDLEWARE_CLASSES = self.curr_middleware
settings.AUTHENTICATION_BACKENDS = self.curr_auth
class RemoteUserNoCreateBackend(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
@skipIfCustomUser
class RemoteUserNoCreateTest(RemoteUserTest):
"""
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
"""
backend = 'django.contrib.auth.tests.test_remote_user.RemoteUserNoCreateBackend'
def test_unknown_user(self):
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
class CustomRemoteUserBackend(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split('@')[0]
def configure_user(self, user):
"""
Sets user's email address.
"""
user.email = 'user@example.com'
user.save()
return user
@skipIfCustomUser
class RemoteUserCustomTest(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend = 'django.contrib.auth.tests.test_remote_user.CustomRemoteUserBackend'
# REMOTE_USER strings with email addresses for the custom backend to
# clean.
known_user = 'knownuser@example.com'
known_user2 = 'knownuser2@example.com'
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super(RemoteUserCustomTest, self).test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, '')
self.assertEqual(User.objects.get(username='knownuser2').email, '')
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address.
"""
super(RemoteUserCustomTest, self).test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, 'user@example.com')
class CustomHeaderMiddleware(RemoteUserMiddleware):
"""
Middleware that overrides custom HTTP auth user header.
"""
header = 'HTTP_AUTHUSER'
@skipIfCustomUser
class CustomHeaderRemoteUserTest(RemoteUserTest):
"""
Tests a custom RemoteUserMiddleware subclass with custom HTTP auth user
header.
"""
middleware = (
'django.contrib.auth.tests.test_remote_user.CustomHeaderMiddleware'
)
header = 'HTTP_AUTHUSER'
| django/contrib/auth/tests/test_remote_user.py | 9,572 | Middleware that overrides custom HTTP auth user header.
Tests a custom RemoteUserMiddleware subclass with custom HTTP auth user
header.
Backend that overrides RemoteUserBackend methods.
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
Backend that doesn't create unknown users.
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
Grabs username before the @ character.
Sets user's email address.
Restores settings to avoid breaking other tests.
Tests that a logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
Tests the case where the username passed in the header is a valid User.
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
Tests that a user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
Tests requests where no remote user is specified and insures that no
users get created.
Tests the case where the username passed in the header does not exist
as a User.
The unknown user created should be configured with an email address.
Tests that if the username in the header changes between requests
that the original user is logged out
Usernames to be passed in REMOTE_USER for the test_known_user test case. Another request with same user should not create any new users. Test that a different user passed in the headers causes the new user to be logged in. Set last_login to something so we can determine if it changes. Known user authenticates During the session, the REMOTE_USER header disappears. Should trigger logout. verify the remoteuser middleware will not remove a user authenticated via another backend Known user authenticates During the session, the REMOTE_USER changes to a different user. Ensure that the current user is not the prior remote_user In backends that create a new user, username is "newnewuser" In backends that do not create new users, it is '' (anonymous user) REMOTE_USER strings with email addresses for the custom backend to clean. | 2,198 | en | 0.874221 |
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
import os
from PIL import Image
class CXRDataset(Dataset):
def __init__(
self,
path_to_images,
fold,
transform=None,
sample=0,
finding="any",):
self.transform = transform
self.path_to_images = path_to_images
self.df = pd.read_csv("nih/nih_labels.csv")
self.df = self.df[self.df['fold'] == fold]
# can limit to sample, useful for testing
# if fold == "train" or fold =="val": sample=500
if(sample > 0 and sample < len(self.df)):
self.df = self.df.sample(sample)
if not finding == "any": # can filter for positive findings of the kind described; useful for evaluation
if finding in self.df.columns:
if len(self.df[self.df[finding] == 1]) > 0:
self.df = self.df[self.df[finding] == 1]
else:
print("No positive cases exist for "+LABEL+", returning all unfiltered cases")
else:
print("cannot filter on finding " + finding +
" as not in data - please check spelling")
self.df = self.df.set_index("Image Index")
self.PRED_LABEL = [
'No Finding',
'Atelectasis',
'Cardiomegaly',
'Effusion',
'Infiltration',
'Lung Lesion',
'Pneumonia',
'Pneumothorax',
'Consolidation',
'Edema',
'Emphysema',
'Fibrosis',
'Pleural_Thickening',
'Hernia']
RESULT_PATH = "results/"
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
image = Image.open(
os.path.join(
self.path_to_images,
self.df.index[idx]))
image = image.convert('RGB')
label = np.zeros(len(self.PRED_LABEL), dtype=int)
for i in range(0, len(self.PRED_LABEL)):
# can leave zero if zero, else make one
if(self.df[self.PRED_LABEL[i].strip()].iloc[idx].astype('int') > 0):
label[i] = self.df[self.PRED_LABEL[i].strip()
].iloc[idx].astype('int')
if self.transform:
image = self.transform(image)
return (image, label,self.df.index[idx])
| nih/loader.py | 2,449 | can limit to sample, useful for testing if fold == "train" or fold =="val": sample=500 can filter for positive findings of the kind described; useful for evaluation can leave zero if zero, else make one | 202 | en | 0.831188 |
from job import *
class NoncookingJob(Job):
def __init__(self, name, prefs, maxMatches):
Job.__init__(self, name, prefs, maxMatches)
# remove pairs & underclassmen
self.prefs = filter(lambda x: x.numPeople != 1 and x.semsCooked < 4, self.prefs)
# sort all the people by number of semesters cooked, high to low
prefs.sort(key=lambda x: x.semsCooked, reverse=True)
| noncookingjob.py | 376 | remove pairs & underclassmen sort all the people by number of semesters cooked, high to low | 91 | en | 0.81482 |
import math
import pandas as pd
import spacy # Requires: python -m spacy download en_core_web_sm
from spacy import displacy
from nltk import sent_tokenize, word_tokenize, PorterStemmer
from nltk.corpus import stopwords
from data import load_model
import streamlit as st
class tf_idf():
nlp = load_model('en_core_web_md')
def word_freq(self, text) -> dict:
"""
Create document word frequency table {w1:f1, ..., wN:fN}.
Remove stop words, punct, etc. and lowercase
:rtype: dict
"""
doc = self.nlp(text)
word_freq_table = {}
for token in doc:
ignore = token.is_stop or token.is_punct or token.is_quote or token.is_oov or token.text in ['.',',',';',':','%','-']
if not ignore and token.text in word_freq_table:
word_freq_table[token.lower_] += 1
elif not ignore:
word_freq_table[token.lower_] = 1
return word_freq_table
def sent_word_freq(self, text) -> dict:
"""
Create sentence word frequency table {s1:{w1:f1, ..., wN:fN}, ..., sN:{w1:f1, ..., wN:fN} }.
:rtype: dict
"""
doc = self.nlp(text)
sent_word_freq_table = {}
for sent in doc.sents:
word_freq_table = self.word_freq(sent.lower_)
sent_word_freq_table[sent.lower_[:15]] = word_freq_table
return sent_word_freq_table
def tf_matrix(self, sent_word_freq_table) -> dict:
tf_matrix = {}
for sent, word_freq_table in sent_word_freq_table.items():
tf_table = {}
sent_word_count = len(word_freq_table)
for word, freq in word_freq_table.items():
tf_table[word] = freq / sent_word_count
tf_matrix[sent] = tf_table
return tf_matrix
def global_word_freq(self, tf_matrix) -> dict:
tf_global_matrix = {}
for sent, f_table in tf_matrix.items():
for word, count in f_table.items():
if word in tf_global_matrix:
tf_global_matrix[word] += count
else:
tf_global_matrix[word] = count
return tf_global_matrix
def idf(self, tf_matrix, tf_global_matrix) -> dict:
total_documents = len(tf_matrix)
idf_matrix = {}
for sent, f_table in tf_matrix.items():
idf_table = {}
for word in f_table.keys():
idf_table[word] = math.log10(total_documents / float(tf_global_matrix[word]))
idf_matrix[sent] = idf_table
return idf_matrix
def tf_idf(self, tf_matrix, idf_matrix) -> dict:
tf_idf_matrix = {}
for (sent1, f_table1), (sent2, f_table2) in zip(tf_matrix.items(), idf_matrix.items()):
tf_idf_table = {}
for (word1, value1), (word2, value2) in zip(f_table1.items(),f_table2.items()): # here, keys are the same in both the table
tf_idf_table[word1] = float(value1 * value2)
tf_idf_matrix[sent1] = tf_idf_table
return tf_idf_matrix
def score_sentences(self, tf_idf_matrix) -> dict:
# Score sentences by their word TFs
# Algorithm: adds word TFs and divides by total no of words in sentence. Normalise scale in range [0..10]
sentenceScores = {}
for sent, f_table in tf_idf_matrix.items():
sent_word_count = len(f_table)
scores = [score for _word, score in f_table.items()]
if len(scores) > 0:
maxScore = max(scores)
normScores = [score/maxScore for score in scores]
total_sent_score = sum(normScores)
sentenceScores[sent] = total_sent_score / sent_word_count
else:
sentenceScores[sent] = 0.0
return sentenceScores
def average_score(self, sentenceScores) -> int:
sumScores = sum([sentenceScores[entry] for entry in sentenceScores])
# Average score of a sentence from original summary_text
average = sumScores / len(sentenceScores)
return average
def generate_summary(self, sents, sentenceScores, threshold) -> str:
summary = ' '.join([
sent.text.strip() for sent in sents
if ((sent.lower_[:15] in sentenceScores) and (sentenceScores[sent.lower_[:15]] <= (threshold)))
])
return summary
def summarize(self, text, threshold: float) -> str:
doc = self.nlp(text)
sents = doc.sents
'''
Term frequency (TF) is how often a word appears in the document, divided by how many words there are in the document.
'''
# 1 Calculate the term frequency matrix, by sentence
tf_matrix = self.sent_word_freq(text)
#st.write(pd.DataFrame(tf_matrix))
# 2 Calculate the term frequency matrix, global (all sentences)
tf_global_matrix = self.global_word_freq(tf_matrix)
#st.write(pd.DataFrame({'tf_global_matrix':tf_global_matrix}))
'''
Inverse document frequency (IDF) is how unique or rare a word is.
'''
# 3 Calculate IDF
idf_matrix = self.idf(tf_matrix, tf_global_matrix)
#st.write(pd.DataFrame(idf_matrix))
# 4 Calculate TF-IDF
tf_idf_matrix = self.tf_idf(tf_matrix, idf_matrix)
#st.write(pd.DataFrame(tf_idf_matrix))
# 5 Score sentences
sentence_scores = self.score_sentences(tf_idf_matrix)
#st.write(pd.DataFrame({'sentence_scores':sentence_scores}))
# 6 Generate summary
summary = self.generate_summary(sents, sentence_scores, threshold)
return summary
| TextSummarization/TF_IDF.py | 5,677 | Create sentence word frequency table {s1:{w1:f1, ..., wN:fN}, ..., sN:{w1:f1, ..., wN:fN} }.
:rtype: dict
Create document word frequency table {w1:f1, ..., wN:fN}.
Remove stop words, punct, etc. and lowercase
:rtype: dict
Requires: python -m spacy download en_core_web_sm here, keys are the same in both the table Score sentences by their word TFs Algorithm: adds word TFs and divides by total no of words in sentence. Normalise scale in range [0..10] Average score of a sentence from original summary_text 1 Calculate the term frequency matrix, by sentencest.write(pd.DataFrame(tf_matrix)) 2 Calculate the term frequency matrix, global (all sentences)st.write(pd.DataFrame({'tf_global_matrix':tf_global_matrix})) 3 Calculate IDFst.write(pd.DataFrame(idf_matrix)) 4 Calculate TF-IDFst.write(pd.DataFrame(tf_idf_matrix)) 5 Score sentencesst.write(pd.DataFrame({'sentence_scores':sentence_scores})) 6 Generate summary | 917 | en | 0.549476 |
"""
========================================================
Statistic the SV Stat after AGE Process
========================================================
Author: Shujia Huang & Siyang Liu
Date : 2014-03-07 0idx:54:15
"""
import sys
import re
import os
import string
import numpy as np
import matplotlib.pyplot as plt
def DrawFig(figureFile, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden, inbCoe):
fig = plt.figure(num=None, figsize=(16, 30), facecolor='w', edgecolor='k')
title = ['Distance distribution', 'NRatio', 'Perfect Depth', 'Imperfect depth', '', '', '']
ylabel = ['The position of breakpoint', 'N Ratio of varints', \
'Perfect Depth', 'Both ImPerfect Depth', 'InbreedCoefficient', \
'Map score', 'Mismapping Probability' , 'Average Identity', \
'ProperReadDepth', 'ImProperReadDepth']
al = 0.5
for i, data in enumerate ([distance, nr, aa, bb, inbCoe, mscore, misprob, aveIden, properDepth, imProperDepth ]):
plt.subplot(10,2,2 * i + 1)
#plt.title(title[i], fontsize=16)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=al, linewidths = 0.1, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=al, linewidths = 0.1, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='*', c = 'Y', alpha=al, linewidths = 0.1, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper right')
plt.xlim(-10, 50)
if i == 9: plt.xlabel('Score', fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(10, 2, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad], data[:,2][NEW][bad], marker='o', c = 'm', alpha=al, linewidths = 0.1, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=al, linewidths = 0.1, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.xlim(-3, 30)
plt.legend(loc='upper right')
if i == 9: plt.xlabel('Score', fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def DrawPhredScale (figureFile, phredScal):
fig = plt.figure()
ylabel = ['Phred Scale']
for i, data in enumerate ([phredScal ]):
plt.subplot(2, 1, 2 * i + 1)
P = data[:,0] == 1; N = data[:,0] == 2; X = data[:,0] == 3
plt.scatter(data[:,1][N], data[:,2][N], marker='o', c = 'r', alpha=0.5, linewidths = 0, label = 'Negative(%d)'%len(data[:,1][N])) # Negative
plt.scatter(data[:,1][P], data[:,2][P], marker='o', c = 'g', alpha=0.5, linewidths = 0, label = 'Positive(%d)'%len(data[:,1][P])) # Positive
plt.scatter(data[:,1][X], data[:,2][X], marker='o', c = 'Y', alpha=0.5, linewidths = 0, label = 'Positive->Negative(%d)' % len(data[:,1][X])) # Positive->Negative
plt.legend(loc='upper left')
plt.ylabel(ylabel[i], fontsize=16)
plt.subplot(2, 1, 2*i + 2)
NEW = data[:,0] == 0
good = data[:,1][NEW] >= VQ_CUTOFF
bad = data[:,1][NEW] < VQ_CUTOFF
plt.scatter(data[:,1][NEW][bad] , data[:,2][NEW][bad] , marker='o', c = 'm', alpha=0.5, linewidths = 0, label = 'bad(%d)' % len(data[:,1][NEW][bad])) # bad
plt.scatter(data[:,1][NEW][good], data[:,2][NEW][good], marker='o', c = 'b', alpha=0.5, linewidths = 0, label = 'good(%d)' % len(data[:,1][NEW][good])) # good
plt.legend(loc='upper left')
plt.xlabel('Score' , fontsize=16)
plt.ylabel(ylabel[i], fontsize=16)
fig.savefig(figureFile + '.png')
#fig.savefig(figureFile + '.pdf')
def Accum (data, isBig = False):
tmpD= data
k = sorted(tmpD.keys(), key = lambda d: float(d))
dat = []
for i in range(len(k)):
if isBig:
for j in range(i,len(k)): tmpD[k[i]][1] += tmpD[k[j]][0]
else:
for j in range(i+1): tmpD[k[i]][1] += tmpD[k[j]][0]
dat.append([float(k[i]), float(tmpD[k[i]][0]), float(tmpD[k[i]][1]) ])
return dat
def SampleFaLen (faLenFile):
if faLenFile[-3:] == '.gz': I = os.popen('gzip -dc %s' % faLenFile)
else : I = open(faLenFile)
data = {}
while 1:
lines = I.readlines (100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
data[col[0]] = string.atoi(col[1])
I.close()
return data
def LoadFaLen (faLenLstFile):
data = {}
I = open (faLenLstFile)
for line in I.readlines():
if len(line.strip('\n').split()) != 2: raise ValueError('[ERROR] The format of Fa length list maybe not right. It could just be: "sample FalenghtFile", but found',line)
sampleId, fileName = line.strip('\n').split()
if sampleId not in data: data[sampleId] = {}
data[sampleId] = SampleFaLen(fileName)
I.close()
return data
def main (argv):
qFaLen = LoadFaLen(argv[1])
figPrefix = 'test'
if len(argv) > 2: figPrefix = argv[2]
if argv[0][-3:] == '.gz':
I = os.popen('gzip -dc %s' % argv[0])
else:
I = open (argv[0])
s, annotations, mark = set(), [], []
print '#Chr\tPosition\tDistance\tLeftIden\tRightIden\tAveIden\tN-Ratio\tAA'
while 1: # VCF format
lines = I.readlines(100000)
if not lines: break
for line in lines:
col = line.strip('\n').split()
if re.search(r'^#CHROM', line): col2sam = { i+9:sam for i,sam in enumerate(col[9:]) }
if re.search(r'^#', line): continue
key = col[0] + ':' + col[1]
if key in s: continue
s.add(key)
#if re.search(r'^PASS', col[6]): continue
#if not re.search(r'_TRAIN_SITE', col[7]): continue
#if not re.search(r'^PASS', col[6]): continue
isbad = False
for i, sample in enumerate (col[9:]):
if re.search(r'NULL', sample): isbad = True
if isbad: continue
fmat = { k:i for i,k in enumerate(col[8].split(':')) }
if 'VS' not in fmat or 'QR' not in fmat: continue
if 'AGE' not in fmat: continue
if len(annotations) == 0: annotations = [[] for _ in col[9:] ]
vcfinfo = { d.split('=')[0]: d.split('=')[1] for d in col[7].split(';') if len(d.split('=')) == 2 }
vq = string.atof(vcfinfo['VQ'])
inb = string.atof(vcfinfo['InbCoeff'])
if ('POSITIVE_TRAIN_SITE' in col[7]) and ('NEGATIVE_TRAIN_SITE' in col[7]):
mark.append([3, vq, inb])
elif 'POSITIVE_TRAIN_SITE' in col[7]:
mark.append([1, vq, inb])
elif 'NEGATIVE_TRAIN_SITE' in col[7]:
mark.append([2, vq, inb])
else:
mark.append([0, vq, inb])
# GT:AA:AE:FN:MIP:MS:QR:RR:VS:VT
for i, sample in enumerate (col[9:]):
sampleId = col2sam[9+i]
field = sample.split(':')
if sample == './.' or len(field) < fmat['QR'] + 1 or field[fmat['QR']].split(',')[-1] == '.' or field[fmat['AS']] == '.':
annotations[i].append([0, 0, 0, 0, 0, 0, 0, 0, 0])
continue
qr = field[fmat['QR']].split(',')[-1]
qregion = np.array(qr.split('-'))
if len(qregion) > 3: qId = qregion[0] + '-' + qregion[1]
else : qId = qregion[0]
qSta = string.atoi(qregion[-2])
qEnd = string.atoi(qregion[-1])
if sampleId not in qFaLen:
raise ValueError ('[ERROR] The sample name $s(in vcf) is not in the name of Fa list.' % sampleId)
if qId not in qFaLen[sampleId]:
raise ValueError ('[ERROR]', qId, 'is not been found in file', opt.qFalen, '\n')
qSta= int(qSta * 100 / qFaLen[sampleId][qId] + 0.5)
qEnd= int(qEnd * 100 / qFaLen[sampleId][qId] + 0.5)
if qSta > 100 or qEnd > 100:
raise ValueError ('[ERROR] Query size Overflow! sample: %s; scaffold: %s' % (sampleId, qId))
leg = qSta
if 100 - qEnd < qSta: leg = qEnd
nn = string.atof(sample.split(':')[fmat['NR']])
n = round(1000 * nn) / 10.0 # N ratio
alt = string.atoi(sample.split(':')[fmat['AA']].split(',')[1]) # Alternate perfect
bot = string.atoi(sample.split(':')[fmat['AA']].split(',')[3]) # Both imperfect
pro, ipr = [0,0]
ms = string.atoi(sample.split(':')[fmat['AS']]) # Mapping score
mip = string.atof(sample.split(':')[fmat['MS']]) # Mismapping probability
if sample.split(':')[fmat['AGE']] != '.':
aveI = string.atoi(sample.split(':')[fmat['AGE']].split(',')[3]) # ave_iden in AGE
else:
aveI = 0
annotations[i].append([leg, n, alt, bot, pro, ipr, ms, mip, aveI])
I.close()
print >> sys.stderr, '# Number of Positions: %d' % len(mark)
if len(mark) != len(annotations[0]):
raise ValueError ('[ERROR] The size is not match mark=%d, annotations=%d!' % (len(mark), len(annotations)))
annotations = np.array(annotations);
sampleNum = len(annotations)
data, distance, properDepth, imProperDepth, nr, aa, bb, mscore, misprob, aveIden = [],[],[],[],[],[],[],[],[],[]
inbreedCoe, phredScal = [], []
for i in range(len(annotations[0])):
anno = np.array([annotations[s][i] for s in range(sampleNum) if len(annotations[s][i][annotations[s][i]!=0]) > 0 ]) # each person in the same position
score = np.array([annotations[s][i][-3] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
msprob = np.array([annotations[s][i][-2] for s in range(sampleNum) if annotations[s][i][-3] > 0 ])
phred = -10 * np.log10(1.0 - score.sum() / np.sum(score/(1.0 - msprob))) # Phred scale
if len(anno) == 0: continue
leg, n, alt, bot, pro,ipr, ms, mip, aveI = np.median(anno, axis=0)
distance.append ([mark[i][0], mark[i][1], leg ])
properDepth.append ([mark[i][0], mark[i][1], pro ])
imProperDepth.append ([mark[i][0], mark[i][1], ipr ])
nr.append ([mark[i][0], mark[i][1], n ])
aa.append ([mark[i][0], mark[i][1], alt ])
bb.append ([mark[i][0], mark[i][1], bot ])
mscore.append ([mark[i][0], mark[i][1], ms ])
misprob.append ([mark[i][0], mark[i][1], mip ])
aveIden.append ([mark[i][0], mark[i][1], aveI])
phredScal.append ([mark[i][0], mark[i][1], phred])
inbreedCoe.append ([mark[i][0], mark[i][1], mark[i][2]])
data.append([leg, alt, pro, ipr, n, bot])
print mark[i][0], mark[i][1], mark[i][2], '\t', leg, '\t', pro, '\t', ipr,'\t', n, '\t', alt, '\t', bot
data = np.array(data)
print >> sys.stderr, '\nPosition\tALTernatePerfect\tLeftIdentity\tRightIdentity\tAveIden\tNRatio\tBothImperfect'
print >> sys.stderr, 'Means: ', data.mean(axis=0), '\nstd : ', data.std(axis=0), '\nMedian: ', np.median(data, axis=0)
print >> sys.stderr, '25 Percentile:', np.percentile(data, 25,axis=0), '\n50 Percentile:', np.percentile(data, 50,axis=0), '\n75 Percentile:', np.percentile(data, 75,axis=0)
DrawFig(figPrefix, \
np.array (distance ), \
np.array (properDepth ), \
np.array (imProperDepth), \
np.array (nr ), \
np.array (aa ), \
np.array (bb ), \
np.array (mscore ), \
np.array (misprob ), \
np.array (aveIden ), \
np.array (inbreedCoe ) )
DrawPhredScale (figPrefix + '.phred', np.array(phredScal))
if __name__ == '__main__':
VQ_CUTOFF = 3.0
main(sys.argv[1:])
| src/AsmvarVarScore/FeatureToScore2.py | 12,476 | plt.title(title[i], fontsize=16) Negative Positive Positive->Negative bad goodfig.savefig(figureFile + '.pdf') Negative Positive Positive->Negative bad goodfig.savefig(figureFile + '.pdf') VCF formatif re.search(r'^PASS', col[6]): continueif not re.search(r'_TRAIN_SITE', col[7]): continueif not re.search(r'^PASS', col[6]): continue GT:AA:AE:FN:MIP:MS:QR:RR:VS:VT N ratio Alternate perfect Both imperfect Mapping score Mismapping probability ave_iden in AGE each person in the same position Phred scale | 505 | en | 0.421564 |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkSubmissionKey(object):
"""
Work Submission Identifier
"""
def __init__(self, **kwargs):
"""
Initializes a new WorkSubmissionKey object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param work_submission_key:
The value to assign to the work_submission_key property of this WorkSubmissionKey.
:type work_submission_key: str
"""
self.swagger_types = {
'work_submission_key': 'str'
}
self.attribute_map = {
'work_submission_key': 'workSubmissionKey'
}
self._work_submission_key = None
@property
def work_submission_key(self):
"""
**[Required]** Gets the work_submission_key of this WorkSubmissionKey.
Work Submission Identifier
:return: The work_submission_key of this WorkSubmissionKey.
:rtype: str
"""
return self._work_submission_key
@work_submission_key.setter
def work_submission_key(self, work_submission_key):
"""
Sets the work_submission_key of this WorkSubmissionKey.
Work Submission Identifier
:param work_submission_key: The work_submission_key of this WorkSubmissionKey.
:type: str
"""
self._work_submission_key = work_submission_key
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| src/oci/management_agent/models/work_submission_key.py | 2,219 | Work Submission Identifier
Initializes a new WorkSubmissionKey object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param work_submission_key:
The value to assign to the work_submission_key property of this WorkSubmissionKey.
:type work_submission_key: str
**[Required]** Gets the work_submission_key of this WorkSubmissionKey.
Work Submission Identifier
:return: The work_submission_key of this WorkSubmissionKey.
:rtype: str
Sets the work_submission_key of this WorkSubmissionKey.
Work Submission Identifier
:param work_submission_key: The work_submission_key of this WorkSubmissionKey.
:type: str
coding: utf-8 Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. noqa: F401 | 1,048 | en | 0.785599 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import iso8601
from lxml import etree
import webob
from nova.api.openstack import compute
from nova.api.openstack.compute import extensions as compute_extensions
from nova.api.openstack import extensions as base_extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import flags
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
NS = "{http://docs.openstack.org/common/api/v1.0}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
response_body = "Try to say this Mr. Knox, sir..."
extension_body = "I am not a fox!"
class StubController(object):
def __init__(self, body):
self.body = body
def index(self, req):
return self.body
def create(self, req):
msg = 'All aboard the fail train!'
raise webob.exc.HTTPBadRequest(explanation=msg)
def show(self, req, id):
raise webob.exc.HTTPNotFound()
class StubActionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return self.body
class StubControllerExtension(base_extensions.ExtensionDescriptor):
name = 'twaadle'
def __init__(self):
pass
class StubEarlyExtensionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.extends
def index(self, req):
yield self.body
@wsgi.extends(action='fooAction')
def _action_foo(self, req, id, body):
yield self.body
class StubLateExtensionController(wsgi.Controller):
def __init__(self, body):
self.body = body
@wsgi.extends
def index(self, req, resp_obj):
return self.body
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp_obj, id, body):
return self.body
class StubExtensionManager(object):
"""Provides access to Tweedle Beetles"""
name = "Tweedle Beetle Extension"
alias = "TWDLBETL"
def __init__(self, resource_ext=None, action_ext=None, request_ext=None,
controller_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
self.controller_ext = controller_ext
self.extra_resource_ext = None
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
if self.extra_resource_ext:
resource_exts.append(self.extra_resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_request_extensions(self):
request_extensions = []
if self.request_ext:
request_extensions.append(self.request_ext)
return request_extensions
def get_controller_extensions(self):
controller_extensions = []
if self.controller_ext:
controller_extensions.append(self.controller_ext)
return controller_extensions
class ExtensionTestCase(test.TestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
ext_list = FLAGS.osapi_compute_extension[:]
fox = ('nova.tests.api.openstack.compute.extensions.'
'foxinsocks.Foxinsocks')
if fox not in ext_list:
ext_list.append(fox)
self.flags(osapi_compute_extension=ext_list)
class ExtensionControllerTest(ExtensionTestCase):
def setUp(self):
super(ExtensionControllerTest, self).setUp()
self.ext_list = [
"AdminActions",
"Aggregates",
"AvailabilityZone",
"Certificates",
"Cloudpipe",
"ConsoleOutput",
"Consoles",
"Createserverext",
"DeferredDelete",
"DiskConfig",
"ExtendedStatus",
"ExtendedServerAttributes",
"FlavorAccess",
"FlavorDisabled",
"FlavorExtraSpecs",
"FlavorExtraData",
"FlavorManage",
"FlavorRxtx",
"FlavorSwap",
"FloatingIps",
"FloatingIpDns",
"FloatingIpPools",
"Fox In Socks",
"Hosts",
"Keypairs",
"Multinic",
"MultipleCreate",
"Networks",
"QuotaClasses",
"Quotas",
"Rescue",
"SchedulerHints",
"SecurityGroups",
"ServerDiagnostics",
"ServerStartStop",
"SimpleTenantUsage",
"UsedLimits",
"UserData",
"VirtualInterfaces",
"Volumes",
"VolumeTypes",
]
self.ext_list.sort()
def test_list_extensions_json(self):
app = compute.APIRouter()
request = webob.Request.blank("/fake/extensions")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
# Make sure we have all the extensions, extra extensions being OK.
data = jsonutils.loads(response.body)
names = [str(x['name']) for x in data['extensions']
if str(x['name']) in self.ext_list]
names.sort()
self.assertEqual(names, self.ext_list)
# Ensure all the timestamps are valid according to iso8601
for ext in data['extensions']:
iso8601.parse_date(ext['updated'])
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [
x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
self.assertEqual(fox_ext, {
'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
'name': 'Fox In Socks',
'updated': '2011-01-22T13:25:27-06:00',
'description': 'The Fox In Socks Extension',
'alias': 'FOXNSOX',
'links': []
},
)
for ext in data['extensions']:
url = '/fake/extensions/%s' % ext['alias']
request = webob.Request.blank(url)
response = request.get_response(app)
output = jsonutils.loads(response.body)
self.assertEqual(output['extension']['alias'], ext['alias'])
def test_get_extension_json(self):
app = compute.APIRouter()
request = webob.Request.blank("/fake/extensions/FOXNSOX")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
data = jsonutils.loads(response.body)
self.assertEqual(data['extension'], {
"namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
"name": "Fox In Socks",
"updated": "2011-01-22T13:25:27-06:00",
"description": "The Fox In Socks Extension",
"alias": "FOXNSOX",
"links": []})
def test_get_non_existing_extension_json(self):
app = compute.APIRouter()
request = webob.Request.blank("/fake/extensions/4")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
def test_list_extensions_xml(self):
app = compute.APIRouter()
request = webob.Request.blank("/fake/extensions")
request.accept = "application/xml"
response = request.get_response(app)
self.assertEqual(200, response.status_int)
root = etree.XML(response.body)
self.assertEqual(root.tag.split('extensions')[0], NS)
# Make sure we have all the extensions, extras extensions being OK.
exts = root.findall('{0}extension'.format(NS))
self.assert_(len(exts) >= len(self.ext_list))
# Make sure that at least Fox in Sox is correct.
(fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
self.assertEqual(fox_ext.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension')
xmlutil.validate_schema(root, 'extensions')
def test_get_extension_xml(self):
app = compute.APIRouter()
request = webob.Request.blank("/fake/extensions/FOXNSOX")
request.accept = "application/xml"
response = request.get_response(app)
self.assertEqual(200, response.status_int)
xml = response.body
root = etree.XML(xml)
self.assertEqual(root.tag.split('extension')[0], NS)
self.assertEqual(root.get('alias'), 'FOXNSOX')
self.assertEqual(root.get('name'), 'Fox In Socks')
self.assertEqual(root.get('namespace'),
'http://www.fox.in.socks/api/ext/pie/v1.0')
self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
self.assertEqual(root.findtext('{0}description'.format(NS)),
'The Fox In Socks Extension')
xmlutil.validate_schema(root, 'extension')
class ResourceExtensionTest(ExtensionTestCase):
def test_no_extension_present(self):
manager = StubExtensionManager(None)
app = compute.APIRouter(manager)
request = webob.Request.blank("/blah")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
def test_get_resources(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_get_resources_with_controller(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_bad_request(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
request.method = "POST"
response = request.get_response(app)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "All aboard the fail train!",
"code": 400
}
}
self.assertDictMatch(expected, body)
def test_non_exist_resource(self):
res_ext = base_extensions.ResourceExtension('tweedles',
StubController(response_body))
manager = StubExtensionManager(res_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/1")
response = request.get_response(app)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"itemNotFound": {
"message": "The resource could not be found.",
"code": 404
}
}
self.assertDictMatch(expected, body)
class InvalidExtension(object):
alias = "THIRD"
class ExtensionManagerTest(ExtensionTestCase):
response_body = "Try to say this Mr. Knox, sir..."
def test_get_resources(self):
app = compute.APIRouter()
request = webob.Request.blank("/fake/foxnsocks")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(response_body, response.body)
def test_invalid_extensions(self):
# Don't need the serialization middleware here because we're
# not testing any serialization
app = compute.APIRouter()
ext_mgr = compute_extensions.ExtensionManager()
ext_mgr.register(InvalidExtension())
self.assertTrue(ext_mgr.is_loaded('FOXNSOX'))
self.assertFalse(ext_mgr.is_loaded('THIRD'))
class ActionExtensionTest(ExtensionTestCase):
def _send_server_action_request(self, url, body):
app = compute.APIRouter()
request = webob.Request.blank(url)
request.method = 'POST'
request.content_type = 'application/json'
request.body = jsonutils.dumps(body)
response = request.get_response(app)
return response
def test_extended_action(self):
body = dict(add_tweedle=dict(name="test"))
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(200, response.status_int)
self.assertEqual("Tweedle Beetle Added.", response.body)
body = dict(delete_tweedle=dict(name="test"))
response = self._send_server_action_request(url, body)
self.assertEqual(200, response.status_int)
self.assertEqual("Tweedle Beetle Deleted.", response.body)
def test_invalid_action(self):
body = dict(blah=dict(name="test")) # Doesn't exist
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "There is no such action: blah",
"code": 400
}
}
self.assertDictMatch(expected, body)
def test_non_exist_action(self):
body = dict(blah=dict(name="test"))
url = "/fake/fdsa/1/action"
response = self._send_server_action_request(url, body)
self.assertEqual(404, response.status_int)
def test_failed_action(self):
body = dict(fail=dict(name="test"))
url = "/fake/servers/abcd/action"
response = self._send_server_action_request(url, body)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
body = jsonutils.loads(response.body)
expected = {
"badRequest": {
"message": "Tweedle fail",
"code": 400
}
}
self.assertDictMatch(expected, body)
class RequestExtensionTest(ExtensionTestCase):
def test_get_resources_with_stub_mgr(self):
class GooGoose(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
# only handle JSON responses
resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
req_ext = base_extensions.ControllerExtension(
StubControllerExtension(), 'flavors', GooGoose())
manager = StubExtensionManager(None, None, None, req_ext)
app = fakes.wsgi_app(ext_mgr=manager)
request = webob.Request.blank("/v2/fake/flavors/1?chewing=bluegoo")
request.environ['api.version'] = '2'
response = request.get_response(app)
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('bluegoo', response_data['flavor']['googoose'])
def test_get_resources_with_mgr(self):
app = fakes.wsgi_app()
request = webob.Request.blank("/v2/fake/flavors/1?chewing=newblue")
request.environ['api.version'] = '2'
response = request.get_response(app)
self.assertEqual(200, response.status_int)
response_data = jsonutils.loads(response.body)
self.assertEqual('newblue', response_data['flavor']['googoose'])
self.assertEqual("Pig Bands!", response_data['big_bands'])
class ControllerExtensionTest(ExtensionTestCase):
def test_controller_extension_early(self):
controller = StubController(response_body)
res_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubEarlyExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_extension_late(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubController(dict(foo=response_body))
res_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_extension_late_inherited_resource(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubController(dict(foo=response_body))
parent_ext = base_extensions.ResourceExtension('tweedles', controller)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=parent_ext,
controller_ext=cont_ext)
child_ext = base_extensions.ResourceExtension('beetles', controller,
inherits='tweedles')
manager.extra_resource_ext = child_ext
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/beetles")
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_action_extension_early(self):
controller = StubActionController(response_body)
actions = dict(action='POST')
res_ext = base_extensions.ResourceExtension('tweedles', controller,
member_actions=actions)
ext_controller = StubEarlyExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/foo/action")
request.method = 'POST'
request.headers['Content-Type'] = 'application/json'
request.body = jsonutils.dumps(dict(fooAction=True))
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
def test_controller_action_extension_late(self):
# Need a dict for the body to convert to a ResponseObject
controller = StubActionController(dict(foo=response_body))
actions = dict(action='POST')
res_ext = base_extensions.ResourceExtension('tweedles', controller,
member_actions=actions)
ext_controller = StubLateExtensionController(extension_body)
extension = StubControllerExtension()
cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
ext_controller)
manager = StubExtensionManager(resource_ext=res_ext,
controller_ext=cont_ext)
app = compute.APIRouter(manager)
request = webob.Request.blank("/fake/tweedles/foo/action")
request.method = 'POST'
request.headers['Content-Type'] = 'application/json'
request.body = jsonutils.dumps(dict(fooAction=True))
response = request.get_response(app)
self.assertEqual(200, response.status_int)
self.assertEqual(extension_body, response.body)
class ExtensionsXMLSerializerTest(test.TestCase):
def test_serialize_extension(self):
serializer = base_extensions.ExtensionTemplate()
data = {'extension': {
'name': 'ext1',
'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
'alias': 'RS-PIE',
'updated': '2011-01-22T13:25:27-06:00',
'description': 'Adds the capability to share an image.',
'links': [{'rel': 'describedby',
'type': 'application/pdf',
'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'},
{'rel': 'describedby',
'type': 'application/vnd.sun.wadl+xml',
'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}}
xml = serializer.serialize(data)
root = etree.XML(xml)
ext_dict = data['extension']
self.assertEqual(root.findtext('{0}description'.format(NS)),
ext_dict['description'])
for key in ['name', 'namespace', 'alias', 'updated']:
self.assertEqual(root.get(key), ext_dict[key])
link_nodes = root.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(ext_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
xmlutil.validate_schema(root, 'extension')
def test_serialize_extensions(self):
serializer = base_extensions.ExtensionsTemplate()
data = {"extensions": [{
"name": "Public Image Extension",
"namespace": "http://foo.com/api/ext/pie/v1.0",
"alias": "RS-PIE",
"updated": "2011-01-22T13:25:27-06:00",
"description": "Adds the capability to share an image.",
"links": [{"rel": "describedby",
"type": "application/pdf",
"type": "application/vnd.sun.wadl+xml",
"href": "http://foo.com/api/ext/cs-pie.pdf"},
{"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://foo.com/api/ext/cs-pie.wadl"}]},
{"name": "Cloud Block Storage",
"namespace": "http://foo.com/api/ext/cbs/v1.0",
"alias": "RS-CBS",
"updated": "2011-01-12T11:22:33-06:00",
"description": "Allows mounting cloud block storage.",
"links": [{"rel": "describedby",
"type": "application/pdf",
"href": "http://foo.com/api/ext/cs-cbs.pdf"},
{"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]}
xml = serializer.serialize(data)
root = etree.XML(xml)
ext_elems = root.findall('{0}extension'.format(NS))
self.assertEqual(len(ext_elems), 2)
for i, ext_elem in enumerate(ext_elems):
ext_dict = data['extensions'][i]
self.assertEqual(ext_elem.findtext('{0}description'.format(NS)),
ext_dict['description'])
for key in ['name', 'namespace', 'alias', 'updated']:
self.assertEqual(ext_elem.get(key), ext_dict[key])
link_nodes = ext_elem.findall('{0}link'.format(ATOMNS))
self.assertEqual(len(link_nodes), 2)
for i, link in enumerate(ext_dict['links']):
for key, value in link.items():
self.assertEqual(link_nodes[i].get(key), value)
xmlutil.validate_schema(root, 'extensions')
| debian/python-nova/usr/lib/python2.7/dist-packages/nova/tests/api/openstack/compute/test_extensions.py | 90 | Provides access to Tweedle Beetles
vim: tabstop=4 shiftwidth=4 softtabstop=4 Copyright (c) 2011 X.commerce, a business unit of eBay Inc. Copyright 2011 OpenStack LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Make sure we have all the extensions, extra extensions being OK. Ensure all the timestamps are valid according to iso8601 Make sure that at least Fox in Sox is correct. Make sure we have all the extensions, extras extensions being OK. Make sure that at least Fox in Sox is correct. Don't need the serialization middleware here because we're not testing any serialization Doesn't exist only handle JSON responses Need a dict for the body to convert to a ResponseObject Need a dict for the body to convert to a ResponseObject Need a dict for the body to convert to a ResponseObject | 1,319 | en | 0.887135 |
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf.dmrgscf import dmrgci
'''
Block code for active space N-particle density matrices.
'''
b = 1.2
mol = gto.M(
verbose = 4,
atom = 'N 0 0 0; N 0 0 %f'%b,
basis = 'cc-pvdz',
symmetry = True,
)
m = scf.RHF(mol)
m.kernel()
mc = mcscf.CASSCF(m, 8, 8)
mc.fcisolver = dmrgci.DMRGCI(mol)
mc.kernel()
dm1 = mc.fcisolver.make_rdm1(0, mc.ncas, mc.nelecas)
dm2 = mc.fcisolver.make_rdm12(0, mc.ncas, mc.nelecas)[1]
dm3 = mc.fcisolver.make_rdm123(0, mc.ncas, mc.nelecas)[2]
#
# or computing DMs all together in one DMRG call
#
dm1, dm2 = mc.fcisolver.make_rdm12(0, mc.ncas, mc.nelecas)
dm1, dm2, dm3 = mc.fcisolver.make_rdm123(0, mc.ncas, mc.nelecas)
| examples/dmrg/03-density_matrix.py | 808 | !/usr/bin/env python Author: Qiming Sun <osirpt.sun@gmail.com> or computing DMs all together in one DMRG call | 109 | en | 0.471611 |
import os
import re
import shutil
import sys
import urllib.error
import urllib.parse
import urllib.request
from zipfile import ZipFile
import helpers.config as config
from helpers.logger import Logger
class Updater:
__instance = None
@staticmethod
def Get():
if Updater.__instance is None:
return Updater()
return Updater.__instance
def __init__(self):
if Updater.__instance is not None:
return
else:
self.log = Logger("pyLaunch.Frontend.Updater", "frontend.log")
self.DeleteFolders = ["src"]
self.UpdateFolder = "updatefiles"
def Automatic(self) -> bool:
if not self.CheckConnection():
return False
UpdateAvailable = self.CheckVersions()
if UpdateAvailable:
print(f"An update is available! [v{'.'.join(self.Versions[1])}]")
if not 'n' in input(f"Would you like to update from [{'.'.join(self.Versions[0])}]? (Y/n) > "):
if self.DownloadUpdate():
return self.InstallUpdate()
return False
def CheckConnection(self) -> str:
if config.CONFIGURATION['Update']['SkipCheck']:
return "Skipping update check"
try:
urllib.request.urlopen('http://google.com')
return True
except Exception as e:
return "Unable to connect to the internet" # Unable to connect to the internet
def DownloadUpdate(self) -> bool:
response = None
try:
response = urllib.request.urlopen(f"https://api.github.com/repos/{config.CONFIGURATION['Update']['Organization']}/{config.CONFIGURATION['Update']['Repository']}/zipball/{config.CONFIGURATION['Update']['Branch']}")
except urllib.error.HTTPError as e:
print(f"Unable to download update from GitHub: {e}")
input("Press enter to continue...")
return False
if not os.path.exists(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.mkdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
with open(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}gh_download.zip", "wb") as f:
f.write(response.read())
# Zip is downloaded, now extract
os.chdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
zipFileContent = dict()
zipFileContentSize = 0
with ZipFile(f"gh_download.zip", 'r') as zipFile:
for name in zipFile.namelist():
zipFileContent[name] = zipFile.getinfo(name).file_size
zipFileContentSize = sum(zipFileContent.values())
extractedContentSize = 0
for zippedFileName, zippedFileSize in zipFileContent.items():
UnzippedFilePath = os.path.abspath(f"{zippedFileName}")
os.makedirs(os.path.dirname(UnzippedFilePath), exist_ok=True)
if os.path.isfile(UnzippedFilePath):
zipFileContentSize -= zippedFileSize
else:
zipFile.extract(zippedFileName, path="", pwd=None)
extractedContentSize += zippedFileSize
try:
done = int(50*extractedContentSize/zipFileContentSize)
percentage = (extractedContentSize / zipFileContentSize) * 100
except ZeroDivisionError:
done = 50
percentage = 100
sys.stdout.write('\r[{}{}] {:.2f}%'.format('â' * done, '.' * (50-done), percentage))
sys.stdout.flush()
sys.stdout.write('\n')
os.chdir(config.PATH_ROOT)
return True
def InstallUpdate(self) -> bool:
print("Installing new version")
for file in os.listdir(config.CONFIGURATION['Launch']['ProjectRoot']):
if os.path.isdir(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}"):
if file in self.DeleteFolders:
shutil.rmtree(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
else: # Files
os.remove(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
# Old version is deleted
for file in os.listdir(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}"):
os.rename(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}{os.sep}{file}", f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{file}")
shutil.rmtree(f"{config.PATH_ROOT}{os.sep}{self.UpdateFolder}")
return True
def CheckVersions(self):
# Sucessful return: bool
# Unsuccessful: list[message: str, continue: bool]
self.Versions = self._GetVersions()
if type(self.Versions[1]) == bool:
return self.Versions
self.Versions[0] = self._GetVersionAsInt(self.Versions[0])
self.Versions[1] = self._GetVersionAsInt(self.Versions[1])
self.Difference = []
for installed, checked in zip(self.Versions[0], self.Versions[1]):
self.Difference.append(checked - installed)
for section in self.Difference:
if section < 0: # When working on project and updating locally
return False
elif section > 0:
return True
return False
def _GetVersions(self) -> list:
# Sucessful return: list[InstalledVersion: str, CheckedVersion: str]
# Unsucessful: list[message: str, continue: bool]
if not os.path.exists(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{config.CONFIGURATION['Update']['VersionPath']}"):
# This means either the configuration is incorrect, or pyLaunch isn't where it should be
# continue is False, because the project cannot be launched
return [f"Unable to locate installed version at {config.CONFIGURATION['Update']['VersionPath']}", False]
InstalledVersion = None # Local Version
CheckedVersion = None # Version on GitHub
with open(f"{config.CONFIGURATION['Launch']['ProjectRoot']}{os.sep}{config.CONFIGURATION['Update']['VersionPath']}", "r") as f:
lines = f.readlines()
InstalledVersion = self._GetVersionFromStr(lines)
try:
response = urllib.request.urlopen(f"https://raw.githubusercontent.com/{config.CONFIGURATION['Update']['Organization']}/{config.CONFIGURATION['Update']['Repository']}/{config.CONFIGURATION['Update']['Branch']}{config.CONFIGURATION['Update']['VersionPath']}")
content = response.read().decode("UTF-8").split("\n")
CheckedVersion = self._GetVersionFromStr(content)
except urllib.error.HTTPError as e:
# The Project URL is invalid (cannot find Org/Repo/Branch/VersionPath) or,
# raw.githubusercontent is down, continue is True, the project can still be launched
return ["Project URL does not exist or githubusercontent is down", True] # URL doesn't exist or something went wrong
if CheckedVersion is None:
# Some other error, just to be safe.
return ["Unable to get current version from GitHub", True]
return [InstalledVersion, CheckedVersion]
def _GetVersionFromStr(self, lines: str) -> str:
ver = None
for line in lines:
line = line.strip()
if config.CONFIGURATION['Update']['Find'] in line:
ver = line[len(config.CONFIGURATION['Update']['Find']):].strip('"')
match = re.match(r"\d+\.\d+\.\d+", ver) # > #.#.#
if match:
return ver[match.start():match.end()]
return None
def _GetVersionAsInt(self, version: str) -> list:
version = version.split(".")
intVer = []
for section in version:
if section.isalnum():
newSection = ""
for char in section:
if char.isnumeric():
newSection += char
section = newSection
intVer.append(int(section))
return intVer
| frontend/update.py | 8,093 | Unable to connect to the internet Zip is downloaded, now extract Files Old version is deleted Sucessful return: bool Unsuccessful: list[message: str, continue: bool] When working on project and updating locally Sucessful return: list[InstalledVersion: str, CheckedVersion: str] Unsucessful: list[message: str, continue: bool] This means either the configuration is incorrect, or pyLaunch isn't where it should be continue is False, because the project cannot be launched Local Version Version on GitHub The Project URL is invalid (cannot find Org/Repo/Branch/VersionPath) or, raw.githubusercontent is down, continue is True, the project can still be launched URL doesn't exist or something went wrong Some other error, just to be safe. > .. | 740 | en | 0.862953 |
# -*- coding:utf-8 -*-
#
# Author : TangHanYi
# E-mail : thydeyx@163.com
# Create Date : 2016-12-11 09:33:17 AM
# Last modified : 2016-12-11 10:48:50 AM
# File Name : Super_Pow.py
# Desc :
class Solution(object):
def superPow(self, a, b):
if len(b) == 0:
return a
tmp = a
ret = 1
n = len(b)
k = 0
while k != n - 1:
if b[-1] % 2 == 1:
ret = (ret * tmp) % 1337
tmp = (tmp * tmp) % 1337
pre = 0
for i in range(k, n):
pre_b = (pre * 10 + b[i]) % 2
b[i] = (pre * 10 + b[i]) / 2
pre = pre_b
if k == i and b[i] == 0:
k += 1
return ret
if __name__ == "__main__":
s = Solution()
a = 434576
b = [6,4,0,4,4,0,3,9,4,9,9,6,2,0,2,5,4,2,1,7,9,2,5,9,5,2,5,6,2,9,2,4,4,4,8,0,7,4,9,1,3,9,9,7,1,1,2,7,5,5,4,5,7,1,4,4,4,9,1,8,0,5,4,6,2,3,7,9,9,6,0,2,7,1,0,0,3,4,7,8,2,4,3,9,5,9,6,1,8,9,0,9,6,4,5,8,9,4,7,8,1,9,1,0,3,3,1,6,9,8,6,1,4,0,3,0,1,9,1,0,0,1,1,6,8,8,5,7,3,4,6,6,6,9,6,9,2,9,7,3,0,3,7,4,5,0,4,7,1,8,7,1,1,0,9,9,0,4,9,5,1,5,3,7,4,0,8,8,1,5,1,1,8,8,6,4,0,2,1,3,0,0,4,4,2,6,5,2,0,4,0,1,9,3,0,5,5,8,5,7,5,7,0,4,7,6,0,8,1,1,1,3,3,8,7,5,4,3,9,6,7,9,0,9,5,0,6,0,1,2,9,6,1,0,2,8,8,2,6,9,5,0,3,8,8,0,3,4,5,5,0,5,6,0,6,1,3,2,4,4,6,3,2,7,5,5,8,4,9,6,3,5,6,8,3,6,9,9,0,6,4,1,1,2,3,7,4,6,2,0,0,0,5,5,0,1,0,8,7,9,4,2,6,3,1,0,9,2,1,2,8,7,5,0,9,8,9,5,5,1,5,7,4,3,2,4,6,4,2,3,6,8,5,4,1,8,4,1,0,7,3,9,4,8,1,4,8,0,1,5,4,9,3,8,2,7,2,8,4,6,1,2,4,8,6,8,9,3,1,9,0,6,8,5,6,1,1,4,2,2,0,8,1,5,6,5,2,0,3,8,8,6,2,4,7,9,2,6,4,3,5,4,1,6,1,7,7,2,2,1,7,4,9,0,9,7,6,3,9,1,2,7,8,4,2,7,5,6,3,9,2,0,6,3,8,7,1,8,2,5,9,9,9,1,9,8,8,7,1,8,9,5,7,9,2,9,6,7,8,1,9,0,3,5,3,4,4,4,2,6,9,3,5,8,4,7,8,5,4,2,5,5,7,2,6,9,4,4,9,2,5,0,2,1,7,5,5,1,2,9,8,3,2,5,4,9,4,2,4,9,4,9,6,4,3,3,5,7,7,6,9,5,8,3,8,5,1,3,9,3,2,7,8,6,4,2,5,9,7,9,0,3,0,6,9,4,1,5,3,1,1,3,6,0,6,4,7,9,9,6,2,3,5,3,9,0,7,7,1,4,6,1,0,9,9,9,5,1,6,8,2,8,1,0,0,0,6,9,9,5,6,4,0,1,9,9,3,6,8,4,3,7,5,3,6,7,4,1,0,1,9,4,1,3,4,1,5,0,2,6,7,8,0,9,2,1,0,7,8,9,2,1,6,9,6,2,6,0,5,8,1,6,2,2,9,6,5,6,8,8,3,7,8,5,6,0,7,7,8,5,6,2,8,2,1,4,6,0,4,1,8,6,7,1,8,9,9,4,5,0,4,8,9,2,6,6,5,3,5,5,8,3,7,6,7,0,0,3,2,4,6,3,2,5,6,1,4,5,7,2,7,1,2,7,3,8,3,8,1,0,5,1,3,2,9,0,5,1,3,7,8,1,0,0,6,6,3,3,4,0,7,1,3,9,0,7,8,5,7,1,5,3,3,8,7,4,0,2,6,5,2,4,6,2,4,5,1,8,8,7,0,5,0,4,6,1,3,4,6,0,8,2,5,3,2,5,7,3,7,5,8,1,9,7,6,6,2,7,6,0,6,6,7,6,2,3,7,5,0,6,8,8,0,5,3,2,0,0,7,0,8,8,1,7,5,7,5,7,6,1,7,4,0,4,1,2,9,0,8,9,6,6,9,6,1,2,1,4,5,8,4,3,6,7,2,3,5,8,0,3,9,7,8,9,3,1,2,5,1,2,4,0,8,6,8,1,8,9,5,5,0,1,0,8,9,3,2,6,1,4,9,2,2,9,4,7,0,8,2,4,0,9,6,0,7,4,3,5,6,1,3,8,2,3,8,1,6,2,7,9,7,9,4,1,0,0,0,1,8,3,7,0,4,3,2,1,9,5,8,7,6,1,5,1,7,6,2,5,8,2,7,5,1,1,8,3,1,9,4,1,4,3,1,0,8,5,1,0,0,1,7,9,5,5,0,2,1,2,9,1,6,6,9,9,9,7,3,0,6,9,3,0,3,6,0,3,1,3,3,2,7]
print s.superPow(a, b)
| Super_Pow.py | 2,705 | -*- coding:utf-8 -*- Author : TangHanYi E-mail : thydeyx@163.com Create Date : 2016-12-11 09:33:17 AM Last modified : 2016-12-11 10:48:50 AM File Name : Super_Pow.py Desc : | 201 | en | 0.302557 |
"""
Création des images pour la tâche de détermination numérique
(pour évaluer l'impact de la configuration sur le subitizing)
Victor ANTOINE - victor.antoine@ens.fr
"""
import pygame
from random import sample
from numpy import random, sort
from os import path
from itertools import product
W, H = 960, 540
pygame.init()
screen = pygame.display.set_mode((W, H), pygame.DOUBLEBUF)
screen.fill((0, 0, 0))
#création des images en disposition aléatoire
origin_x, origin_y = random.randint(50, 910), random.randint(50, 490)
list_coord_random_x = list_coords_random_y = []
def create_liste_coord_random(axe, origin):
coord1 = coord2 = origin
liste = []
liste.append(origin)
while coord1 <= axe - 160:
coord1 += 80
liste.append(coord1)
while coord2 >= 110:
coord2 -= 80
liste.append(coord2)
liste = list(sort(liste))
return liste
list_coord_random_x = create_liste_coord_random(W, origin_x)
list_coord_random_y = create_liste_coord_random(H, origin_y)
system_coord_random = list(product(list_coord_random_x, list_coord_random_y))
for version in list(range(1, 11)):
for points_number in list(range(1, 11)):
screen.fill((0, 0, 0))
for (x, y) in sample(system_coord_random, points_number):
pygame.draw.circle(screen, (255, 255, 255), (x, y), 30, 0)
pygame.image.save(screen, path.join("pictures", "random", \
str(points_number) + "_" + str(version) + ".png"))
#création des images en dispostion configurationnelle
def create_slot_coord_config(top, left):
liste_coord = []
for position in [(1, 1), (3, 1), (2, 2), (1, 3), (3, 3)]:
liste_coord.append((top + position[0] * ((W - 270)/8),\
left + position[1] * ((H - 270)/4)))
return liste_coord
coord_left_side = create_slot_coord_config(130, 130)
coord_mid_side = create_slot_coord_config(303, 130)
coord_right_side = create_slot_coord_config(475, 130)
system_coord_config = []
position = [[2], [1, 3], [1, 2, 3], [0, 1, 3, 4], [0, 1, 2, 3, 4]]
for number in range(1, 11):
list_coord = []
if number <= 5:
positions = position[number-1]
for circle in positions:
list_coord.append(coord_mid_side[circle])
system_coord_config.append(list_coord)
else:
for circle in position[4]:
list_coord.append(coord_left_side[circle])
positions = position[number-6]
for circle in positions:
list_coord.append(coord_right_side[circle])
system_coord_config.append(list_coord)
number_index = 1
for number in system_coord_config:
screen.fill((0, 0, 0))
for (x, y) in number:
pygame.draw.circle(screen, (255, 255, 255), (int(x), int(y)), 30, 0)
pygame.image.save(screen, path.join("pictures", "config", \
str(number_index) + ".png"))
number_index += 1
| create_pictures.py | 2,966 | Création des images pour la tâche de détermination numérique
(pour évaluer l'impact de la configuration sur le subitizing)
Victor ANTOINE - victor.antoine@ens.fr
création des images en disposition aléatoirecréation des images en dispostion configurationnelle | 260 | fr | 0.970843 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
from xmlrpc.client import ServerProxy
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QPushButton
from electrum_zcash import bitcoin, util
from electrum_zcash import transaction
from electrum_zcash.plugins import BasePlugin, hook
from electrum_zcash.i18n import _
from electrum_zcash.wallet import Multisig_Wallet
from electrum_zcash.util import bh2u, bfh
from electrum_zcash_gui.qt.transaction_dialog import show_transaction
import sys
import traceback
server = ServerProxy('https://cosigner.electrum.org/', allow_none=True)
class Listener(util.DaemonThread):
def __init__(self, parent):
util.DaemonThread.__init__(self)
self.daemon = True
self.parent = parent
self.received = set()
self.keyhashes = []
def set_keyhashes(self, keyhashes):
self.keyhashes = keyhashes
def clear(self, keyhash):
server.delete(keyhash)
self.received.remove(keyhash)
def run(self):
while self.running:
if not self.keyhashes:
time.sleep(2)
continue
for keyhash in self.keyhashes:
if keyhash in self.received:
continue
try:
message = server.get(keyhash)
except Exception as e:
self.print_error("cannot contact cosigner pool")
time.sleep(30)
continue
if message:
self.received.add(keyhash)
self.print_error("received message for", keyhash)
self.parent.obj.cosigner_receive_signal.emit(
keyhash, message)
# poll every 30 seconds
time.sleep(30)
class QReceiveSignalObject(QObject):
cosigner_receive_signal = pyqtSignal(object, object)
class Plugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.listener = None
self.obj = QReceiveSignalObject()
self.obj.cosigner_receive_signal.connect(self.on_receive)
self.keys = []
self.cosigner_list = []
@hook
def init_qt(self, gui):
for window in gui.windows:
self.on_new_window(window)
@hook
def on_new_window(self, window):
self.update(window)
@hook
def on_close_window(self, window):
self.update(window)
def is_available(self):
return True
def update(self, window):
wallet = window.wallet
if type(wallet) != Multisig_Wallet:
return
if self.listener is None:
self.print_error("starting listener")
self.listener = Listener(self)
self.listener.start()
elif self.listener:
self.print_error("shutting down listener")
self.listener.stop()
self.listener = None
self.keys = []
self.cosigner_list = []
for key, keystore in wallet.keystores.items():
xpub = keystore.get_master_public_key()
K = bitcoin.deserialize_xpub(xpub)[-1]
_hash = bh2u(bitcoin.Hash(K))
if not keystore.is_watching_only():
self.keys.append((key, _hash, window))
else:
self.cosigner_list.append((window, xpub, K, _hash))
if self.listener:
self.listener.set_keyhashes([t[1] for t in self.keys])
@hook
def transaction_dialog(self, d):
d.cosigner_send_button = b = QPushButton(_("Send to cosigner"))
b.clicked.connect(lambda: self.do_send(d.tx))
d.buttons.insert(0, b)
self.transaction_dialog_update(d)
@hook
def transaction_dialog_update(self, d):
if d.tx.is_complete() or d.wallet.can_sign(d.tx):
d.cosigner_send_button.hide()
return
for window, xpub, K, _hash in self.cosigner_list:
if window.wallet == d.wallet and self.cosigner_can_sign(d.tx, xpub):
d.cosigner_send_button.show()
break
else:
d.cosigner_send_button.hide()
def cosigner_can_sign(self, tx, cosigner_xpub):
from electrum_zcash.keystore import is_xpubkey, parse_xpubkey
xpub_set = set([])
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
xpub_set.add(xpub)
return cosigner_xpub in xpub_set
def do_send(self, tx):
for window, xpub, K, _hash in self.cosigner_list:
if not self.cosigner_can_sign(tx, xpub):
continue
raw_tx_bytes = bfh(str(tx))
message = bitcoin.encrypt_message(raw_tx_bytes, bh2u(K)).decode('ascii')
try:
server.put(_hash, message)
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message("Failed to send transaction to cosigning pool.")
return
window.show_message("Your transaction was sent to the cosigning pool.\nOpen your cosigner wallet to retrieve it.")
def on_receive(self, keyhash, message):
self.print_error("signal arrived for", keyhash)
for key, _hash, window in self.keys:
if _hash == keyhash:
break
else:
self.print_error("keyhash not found")
return
wallet = window.wallet
if wallet.has_keystore_encryption():
password = window.password_dialog('An encrypted transaction was retrieved from cosigning pool.\nPlease enter your password to decrypt it.')
if not password:
return
else:
password = None
if not window.question(_("An encrypted transaction was retrieved from cosigning pool.\nDo you want to open it now?")):
return
xprv = wallet.keystore.get_master_private_key(password)
if not xprv:
return
try:
k = bh2u(bitcoin.deserialize_xprv(xprv)[-1])
EC = bitcoin.EC_KEY(bfh(k))
message = bh2u(EC.decrypt_message(message))
except Exception as e:
traceback.print_exc(file=sys.stdout)
window.show_message(str(e))
return
self.listener.clear(keyhash)
tx = transaction.Transaction(message)
show_transaction(tx, window, prompt_if_unsaved=True)
| plugins/cosigner_pool/qt.py | 7,739 | !/usr/bin/env python Electrum - lightweight Bitcoin client Copyright (C) 2014 Thomas Voegtlin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. poll every 30 seconds | 1,136 | en | 0.860996 |
"""mgear.core.vector test"""
def test_get_distance(run_with_maya_pymel, setup_path):
# Maya imports
from maya import OpenMaya
import pymel.core as pm
# mGear imports
from mgear.core.vector import get_distance
v_1 = [0, 0, 0]
v_2 = [1, 0, 0]
assert get_distance(v_1, v_2) == 1.0
v_1 = OpenMaya.MVector(0, 0, 0)
v_2 = OpenMaya.MVector(1, 0, 0)
assert get_distance(v_1, v_2) == 1.0
pm.newFile(force=True)
v_1 = pm.createNode("transform")
v_2 = pm.createNode("transform")
v_2.translate.set(10, 5, 7)
distance = pm.createNode("distanceBetween")
v_1.worldMatrix >> distance.inMatrix1
v_2.worldMatrix >> distance.inMatrix2
distance_value = distance.distance.get()
assert get_distance(v_1, v_2) == distance_value
def test_get_plane_binormal(run_with_maya_pymel, setup_path):
# Maya imports
from maya import OpenMaya
# mGear imports
from mgear.core.vector import get_plane_binormal
vector_a = OpenMaya.MVector(0, 0, 0)
vector_b = OpenMaya.MVector(-1, 0, 0)
vector_c = OpenMaya.MVector(0, 0, 1)
result = get_plane_binormal(vector_a, vector_b, vector_c)
assert type(result) == OpenMaya.MVector
assert [result[0], result[1], result[2]] == [0, 0, -1]
def test_get_plane_normal(run_with_maya_pymel, setup_path):
# Maya imports
from maya import OpenMaya
import pymel.core as pm
# mGear imports
from mgear.core.vector import get_plane_normal
vector_a = OpenMaya.MVector(0, 0, 0)
vector_b = OpenMaya.MVector(1, 0, 0)
vector_c = OpenMaya.MVector(0, 0, 1)
result = get_plane_normal(vector_a, vector_b, vector_c)
assert type(result) == OpenMaya.MVector
assert [result[0], result[1], result[2]] == [0, 1, 0]
pm.newFile(force=True)
vector_a = pm.createNode("transform")
vector_b = pm.createNode("transform")
vector_c = pm.createNode("transform")
vector_b.translate.set(-1, 0, 0)
vector_c.translate.set(0, 0, 1)
result = get_plane_normal(vector_a, vector_b, vector_c)
assert [result[0], result[1], result[2]] == [0, -1, 0]
result = get_plane_normal(list(vector_a.getTranslation()),
list(vector_b.getTranslation()),
list(vector_c.getTranslation()))
assert [result[0], result[1], result[2]] == [0, -1, 0]
def test_linear_interpolate(run_with_maya_pymel, setup_path):
# Maya imports
from maya import OpenMaya
import pymel.core as pm
# mGear imports
from mgear.core.vector import linear_interpolate
_value = [2, 5, 8]
v_1 = [0, 0, 0]
v_2 = _value
result = linear_interpolate(v_1, v_2)
assert type(result) == OpenMaya.MVector
assert [result[0], result[1], result[2]] == [1, 2.5, 4]
pm.newFile(force=True)
v_1 = pm.createNode("transform")
v_2 = pm.createNode("transform")
v_2.translate.set(_value[0], _value[1], _value[2])
result = linear_interpolate(v_1, v_2)
assert [result[0], result[1], result[2]] == [1, 2.5, 4]
| tests/test_mgear/test_core/test_vector.py | 3,039 | mgear.core.vector test
Maya imports mGear imports Maya imports mGear imports Maya imports mGear imports Maya imports mGear imports | 132 | en | 0.365017 |
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from sklearn.metrics import accuracy_score
from sklearn.model_selection import TimeSeriesSplit
from keras.layers import Dropout
from keras.layers import Dense, LSTM
from keras.models import Sequential
import numpy as np
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
from datetime import timedelta
from datetime import datetime
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
from pandas_datareader import data as pdr
from sklearn.metrics import r2_score, mean_squared_error
# pandasã®ã€ã³ããŒã
# ããŒã¿ã®èªã¿èŸŒã¿
#df = pd.read_csv('finance_dataset.csv')
# ããŒã¿ãã¬ãŒã ã®è¡šç€º
#df
code = '6976' # '6976'#6758
#2021幎ãã仿¥ãŸã§ã®1幎éã®ããŒã¿ãååŸããŸããããææ¥ã決ããŠè¡ããŸãã
# (2021, 1, 1) # æåž«ããŒã¿(ä»ãŸã§ã®ããŒã¿)
#start_train = datetime.date(2022, 1, 1) # æåž«ããŒã¿(ä»ãŸã§ã®ããŒã¿)
start_train=datetime.date.today() + relativedelta(days=-700)
#dowstart_train = datetime.date(2022, 1, 5)#start_train + relativedelta(days=+3)
# æšæ¥å(today-1æ¥)ãŸã§ååŸã§ããïŒåœæ¥åã¯å€åããŠããããïŒ
end_train = datetime.date.today() + relativedelta(days=-1)
data = pdr.get_data_yahoo(f'{code}.T', start_train, end_train) # æåž«ããŒã¿ãèªã¿èŸŒãã
Dow_df = pdr.get_data_yahoo('^DJI', start_train, end_train) # 詊éšããŒã¿ã®csvãã¡ã€ã«ãèªã¿èŸŒãã
Nikkei_df = pdr.get_data_yahoo('^N225', start_train, end_train) # 詊éšããŒã¿ã®csvãã¡ã€ã«ãèªã¿èŸŒãã
#ããŒã¿ã®ååŠç
#æ¬ æããŒã¿ãããã®ã§ãæ¬ æå€NaNãé€å€ãã
#df_NikkeiAll_drop = df_NikkeiAll.dropna()
#df_NikkeiAll_drop.head() # å
é ã®5è¡ã衚圢åŒã§è¡šç€º
print(data.head())
'''
png
ã€ã³ããã¯ã¹ã0ãã13966ãŸã§ã®é£çªã§ãã«ã©ã ã«
æ¥ä»('Date')ãæé«å€('High')ãæå®å€('Low')ãå§å€('Open')ãçµå€('Close')ãèšå®ãããããŒã¿ãã¬ãŒã ã§ããäºã確èªã§ããŸãã
æ¥ä»('Date)ã¯1965幎1æ5æ¥ãã2021幎10æ21æ¥ãŸã§ãšãªã£ãŠããŸãã
åŸã«è©³ãã説æãè¡ããŸãããäºæž¬ã¢ãã«äœæã«å¯ŸããŠãç®ç倿°ã®è¿œå ããé±ããšã«ããŒã¿ãçºããå¿
èŠããããŸãã
ãã®ããã«ãææ¥æ
å ±ãåãã®é±ãåºæºãšããŠäœé±ç®ãšãªããçã®æ
å ±ãšãä»åã®ç®ç倿°ã§ããæšææ¥ã®çµå€ããç¿æ¥éææ¥ã®å§å€ãäžãããã©ããã®âUpâ(äžããå Žåã¯'1', åãåã¯äžããå Žåã¯'0')ã远å ããŠãããŸãã
次ã«ãinfoã¡ãœãããçšããŠãæ¬ æå€ã®æç¡ãã«ã©ã ã®ããŒã¿åã®ç¢ºèªãè¡ããŸãã
'''
# åã«ã©ã ã®è©³çŽ°ç¢ºèª
data.info()
'''
png
åã«ã©ã æ¬ æå€ãªãã§ããäºãããããŸãã
æ¥ä»('Date')ãâobject'åãšãªã£ãŠããŸããä»åã®æ§ãªæç³»åããŒã¿ãçšããéã«ã¯ã'datetime64'åãçšããæ¹ãå©äŸ¿æ§ãé«ãçºãpandasã®'to_datetime'ã¡ãœãããçšããŠããŒã¿åã®å€æãè¡ããŸãã
'''
# æ¥ä»ã€ã³ããã¯ã¹ããã»ãã
data.reset_index(drop=False,inplace=True)
Dow_df.reset_index(drop=False,inplace=True)
Nikkei_df.reset_index(drop=False, inplace=True)
# Dateã®ããŒã¿åãã'datetime'åãžå€æŽ
data['Date'] = pd.to_datetime(data['Date'])
Dow_df['Date'] = pd.to_datetime(Dow_df['Date'])
Nikkei_df['Date'] = pd.to_datetime(Nikkei_df['Date'])
data.info()
'''
png
'Date'ã®ã«ã©ã ã'object'åãã'datetime64'åãžä»£ãã£ãŠããããšã確èªã§ããŸãã
æ¬¡ã«ææ¥æ
å ±ã®ã«ã©ã ã远å ããŸãã'datetime64'åã¯'dt.weekday'ã¡ãœãããçšããŠãææ¥æ
å ±ãååŸããäºãã§ããŸããæææ¥ã0ãšããŠé£çªã®æ°åãèšå®ãããŸããå®è¡çµæãdfã«'weekday'ã«ã©ã ã远å ããŠå
¥åããå®è¡çµæã確èªããŸãã
'''
# ææ¥æ
å ±ã远å (ææ:0, ç«æ:1, æ°Žæ:2, æšæ:3, éæ:4ãåæ:5ãæ¥æ:6)
data['weekday'] = data['Date'].dt.weekday
#data['Dowweekday'] = Dow_df['Date'].dt.weekday
#data['DowDate'] = Dow_df['Date']
#data['Nikkeiweekday'] = Nikkei_df['Date'].dt.weekday
print(data)
'''
png
'weekday'ã®ã«ã©ã ã远å ãã0ãã4ã®æ°åãå
¥åãããŠããäºãããããŸãã
ãŸããæ ªååŒã®è¡ãããªãåææ¥: 5ãšæ¥ææ¥: 6ã®ããŒã¿ã¯ååšããŠããªãäºãããããŸãã
次ã«ã1965幎1æ5æ¥ã®é±ãåºæºã«äœåšç®ãšãªãã®ãã®æ
å ±ã远å ããŸãã
1965幎1æ5æ¥ãç«ææ¥ã§ããäºããããã®ã§ããã®é±ã®é ã®æææ¥ãšãªã1965幎1æ4æ¥ãåºæºãšããŠãäœæ¥ç®ãšãªãã®ãã®æ
å ±ã远å ããŸãã
datetimeã®ã©ã€ãã©ãªããdatetimeãštimedeltaãã€ã³ããŒãããŸãã
åºæºãšãªãæ¥ã®1965幎1æ4æ¥ãdatetime颿°ã䜿ã£ãŠã倿°startã«ä»£å
¥ããŸãã
dfã®'Date'ã«ã©ã ããåºæºã®startãšåŒãç®ãããããšã§ãäœæ¥ç®ãšãªãã®ããèšç®ããŸãããããtimedelta颿°ãçšããŠïŒé±éãšãªã7æ¥åšæã§å²ãããšã§äœé±ç®ããèšç®ããäºãã§ããŸãã
timedelta(weeks=1)ãšèšå®ããããšã§ïŒé±éãšãªããŸãã
ãã®èšç®çµæã'weeks'ãšããã«ã©ã ãdfã«è¿œå ããŸããå®è¡ããããšã§åãã®é±ã¯0ããå§ãŸãæåŸã®2021幎10æ18æ¥ã®é±ã¯2963ãšãªã£ãŠããäºãåãããŸãã
'''
# åãã®æææ¥ãšãªã1965/1/4ãåºæºã«æ¥æ°ã远å
start = start_train+relativedelta(days=-2) # datetime(1965, 1, 4)
start = pd.to_datetime(start)
#data['weeks'] = (data['Date'] - start) // timedelta(weeks=1)
#data['Dowweeks'] = (Dow_df['Date'] - start) // timedelta(weeks=1)
#data['Nikkiweeks'] = (Nikkei_df['Date'] - start) // timedelta(weeks=1)
#print(data)
#data.to_csv('data/stocks_price_data/KinoCode_data.csv') # csvæžãåºã
'''
png
æ¥ä»ã®æ
å ±ã®'Date', 'weekday', 'weeks'ã®ã«ã©ã ãåãããŠè¡šç€ºãããŠããã®ã§ãèŠæ ããæŽçããç®çã§ãäžæŠã«ã©ã ã®äžŠã³æ¿ããè¡ããŸãã
å
é ã«æ¥ä»ã®æ
å ±ããŸãšããŸãã
äžŠã³æ¿ãããé åºã§ã«ã©ã ãèšè¿°ãdfã眮ãæããŸãã
å®è¡ããäºã§ãäžŠã³æ¿ããããŠããäºãããããŸãã
'''
# Closeã®åã®ããŒã¿ã®ã¿ãåãåºã
data['NikkiClose'] = Nikkei_df['Close'].values
# ã«ã©ã ã®äžŠã¹æ¿ã
df = data[['Date', 'weekday','High', 'Low', 'Open', 'Close', 'NikkiClose']]
#df_dow = Dow_df[['Date', 'weeks', 'weekday', 'High', 'Low', 'Open', 'Close']]
#df_nikkei = Nikkei_df[['Date', 'weeks', 'weekday', 'High', 'Low', 'Open', 'Close']]
print(df)
df.to_csv('data/stocks_price_data/KinoCode_data.csv') # csvæžãåºã
'''
png
ä»åã®ãããªæç³»åããŒã¿ãåŠçããéã«ã¯ãset_indexã¡ãœããã䜿ã£ãŠindexãæ¥ä»ã«èšå®ããŸãã念ã®ããã«sort_valuesã¡ãœããã䜿ã£ãŠæ¥ä»é ã«äžŠã³æ¿ããè¡ããŸããå®è¡ããäºã§ãæ¥ä»ã®'Date'ãindexã«èšå®ãããŠããäºãããããŸãã
'''
# ããŒã¿ã®äžŠã³æ¿ã
df.sort_values(by='Date', ascending=True, inplace=True)
# æ¥ä»ãã€ã³ããã¯ã¹ã«ã»ãã
df.set_index(keys='Date', inplace=True)
print(df)
'''
png
次ã«ä»åäºæž¬ãããç¿æ¥ã®çµå€ãæ¬æ¥ã®çµå€ãããäžããã®ãã©ããã®æ
å ±ã远å ããŸããshiftã¡ãœãããçšããŠã«ã©ã ã®æ
å ±ããããdfãäœæããäºãã§ããã®ã§ããããçšããŠèšç®ãè¡ããŸãã
shift(-1)ãšããäºã§ãã«ã©ã ã®æ
å ±ãïŒè¡äžã«ããããããŒã¿ãã¬ãŒã ãäœæããäºãã§ããŸãã
dfãïŒè¡åäžã«ãããããã®ãdf_shiftãšããŠäœæããŸããå®è¡ããäºã§ã«ã©ã ã®æ
å ±ãïŒè¡åäžã«ã·ããããŠããäºãããããŸããäžçªäžã®ã«ã©ã ã¯æ¬ æå€ãšãªããŸãã
'''
#ã«ã©ã æ
å ±ã1è¡äžã«ããããããŒã¿ãã¬ãŒã ãäœæãã
df_shift = df.shift(-1)
df_shift
#png
#ãã®df_shiftãçšããŠãç¿æ¥ã®çµå€ã𿬿¥ã®çµå€ãåŒãç®ãããã®çµæã'delta_Close'ãšããã«ã©ã ã远å ãdfã«å
¥åããŸãã
#ç¿æ¥ã®å§å€ã𿬿¥ã®çµå€ã®å·®åã远å ãã
df['delta_Close'] = df_shift['Close'] - df['Close']
df
'''
png
ãã®'delta_Close'ãäžããå Žå1ããã以å€ã0ãšããŠç®ç倿°ãšãªã'Up'ã®ã«ã©ã ã远å ããŸããåæã«'delta_Close'ã«ã©ã ã®åé€ãè¡ããŸãã
'''
#ç®ç倿°Upã远å ãã(ç¿æ¥ã®çµå€ãäžããå Žå1ããã以å€ã¯0ãšãã)ã'delta_Close'ã«ã©ã ã®åé€
df['Up'] = 0
df['Up'][df['delta_Close'] > 0] = 1
df = df.drop('delta_Close', axis=1)
df
'''
png
ãããŸã§ã§ãäžæºåãšãªãé±çªå·ãææ¥ãç®ç倿°ã®è¿œå ãçµãããŸããã
ããŒã¿ã®å
šäœåãã€ãã
æç³»åããŒã¿ãã°ã©ãã§è¡šç€ºããäºã§ãæ ªäŸ¡å€åã®å€§ãŸããªã€ã¡ãŒãžãæŽã¿ãŸãã
'Open', 'High', 'Low', 'Close'ãæãåºãdf_newãäœæåŸã«ãpyplotãçšããŠã°ã©ãåè¡ããŸãã
matplotlibã®ã©ã€ãã©ãªããpyplotãpltãšããååã§ã€ã³ããŒãããŸãã
df_newã«plotã¡ãœãããçšããŠãåŒæ°'kind=line'ãšããäºã§æãç·ã°ã©ããäœæãããŸããpyplotã®showã¡ãœããã§ã°ã©ãã衚瀺ããŸãã
åãã®1965幎ãã1990幎é ãŸã§ã¯ãäžæåŸåãšãªã£ãŠããŸãããã®åŸã¯äžããåŸåãšãªãã2010é ããååºŠäžæåŸåã§ããäºãããããŸãã
'''
# 'Open', 'High', 'Low', 'Close'ã°ã©ãåã®ããã«ã«ã©ã æœåº
df_new = df[['Open', 'High', 'Low', 'Close']]
# matplotlibã®ã€ã³ããŒã
# æç³»åæãç·ã°ã©ãã®äœæ
df_new.plot(kind='line')
plt.show()
'''
png
ç¹åŸŽéã远å ãã
äºæž¬ãæ£ããè¡ããããã«ããçºã®æ
å ±é(ç¹åŸŽé)ã远å ããŸããçŸåšdfã«å
¥ã£ãŠããå§å€ãçµå€ãæé«å€ãæå®å€ã®æ
å ±ã ããçšããŠäºæž¬ããäºãå¯èœã§ãããæ ªäŸ¡ã®å€åã«åœ±é¿ãããšèšãããŠããäžè¬çãªæ
å ±ã远å ããŠãããŸãã
çµå€ã®åæ¥æ¯çãšãå§å€ãšçµå€ã®å·®åã«ã©ã ã«è¿œå ããŸãã
ãŸãçµå€ã®åæ¥æ¯çã§ãããæ¬æ¥ã®çµå€ã忥ããäœïŒ
å€åããã®ãã衚ãå€ãšãªããŸãã
(仿¥ã®çµå€ - 忥ã®çµå€) ÷ 忥ã®çµå€
ã§èšç®ããŸãã
shiftã¡ãœãããçšããŠãä»åºŠã¯1è¡ããã«ããããããŒã¿ãã¬ãŒã ãäœæããçµå€ã®åæ¥æ¯ç'Close_ratio'ãèšç®ãdfã«ã«ã©ã ã远å ããŸãã
'''
# çµå€ã®åæ¥æ¯ã®è¿œå
df_shift = df.shift(1)
df['Close_ratio'] = (df['Close'] - df_shift['Close']) / df_shift['Close']
df
#png
#次ã«ãå§å€ãšçµå€ã®å·®å'Body'ãdfã«è¿œå ããŸãã
# å§å€ãšçµå€ã®å·®åã远å
df['Body'] = df['Open'] - df['Close']
df
'''
png
ç¹åŸŽéã®è¿œå ã¯ä»¥äžã«ãªããŸããæ¬¡ã«ãäžèŠãªããŒã¿ã®åé€ãè¡ããŸããä»åãæææ¥ããæšææ¥ãŸã§ã®æ
å ±ãçšããŠãéææ¥ã®å§å€ãäžãããäžããã®ããäºæž¬ããã¢ãã«ãäœæããããã«ãåé±ã§æææ¥ããéææ¥ãŸã§ã®ããŒã¿ãæã£ãŠããé±ã ã䜿çšããŸããç¥æ¥ã幎æ«å¹Žå§ãªã©æ ªååŒãè¡ãããŠããªãæ¥ã¯ããŒã¿ããªãçºã5æ¥åã®ããŒã¿ãæã£ãŠããªãé±ãååšããŠããŸãã
å鱿¯ã«äœæ¥åã®ããŒã¿ãååšããŠããã®ãã調ã¹ãŠã5æ¥åæã£ãŠããé±ã®ããŒã¿ãæã£ãŠããŸãã
æé ãšããŠã¯ãé±çªå·'weeks'ã®ãªã¹ããäœæããŸãããã®åŸãªã¹ãããåãåºããåãé±çªå·ã®ããŒã¿æ°ãã«ãŠã³ãããŠè¡ãçµæãdfã«æ ŒçŽãã5æ¥æã£ãŠããé±ã ãæ®ãåŠçãããŸãã
é±çªå·ã¯0ãã2963ãŸã§é£çªã§æããšèãããã0ããé çªã«åŠçããã°è¯ããšèããããŸãããäžãäžæããŠããé±ãååšããŠå±
ãŠãåŠçãè¡ããããã«ããããŠé±çªå·ãæãåºãããªã¹ã(list_weeks)ãäœæããŸãã
'''
'''
# é±çªå·ããªã¹ãã«æ ŒçŽ
list_weeks = []
list_weeks = df['weeks'].unique()
list_weeks
#png
#ãªã¹ãã«åŸããforæãçšããŠã鱿¯ã®æ¥æ°ãã«ãŠã³ãããã«ã©ã 'week_days'ã«ã«ãŠã³ãæ°ãå
¥åããŸãã
# åé±ããšã®æ¥æ°ãå
¥å
df['week_days'] = 0
for i in list_weeks:
df['week_days'][df['weeks'] == i] = len(df[df['weeks'] == i])
df
#png
#5æ¥ããŒã¿ã®ååšããé±(week_daysã5)ã®é±ã®ããŒã¿ãæãåºããŠãdfã«å
¥åããŸãã
# ææãéæãŸã§5æ¥åããŒã¿ã®ããé±ã ãããŒã¿ãåãåºã
df = df[df['week_days'] == 5]
df
#png
#äºæž¬ã«äœ¿çšããªãéææ¥ã®ããŒã¿(weekdayã4)ãåé€ããŸãã
#éææ¥ã®ããŒã¿ãåé€ãã(weekday:4ãšãªãããŒã¿)
df = df[df['weekday'] != 4]
df
'''
#png
#äžèŠã«ã©ã ã®åé€ãšäžŠã³æ¿ããè¡ããŸãã
# äžèŠã«ã©ã ã®åé€ãšäžŠã¹æ¿ã
df = df[['weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body', 'Up']]
df
'''
png
ãããŸã§ã§ãããŒã¿ã®æºåã¯å®äºã§ãã
åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã«åå²ãã
ããŠãããããã¯çŽè¿ã®2018幎以éã®ããŒã¿ã䜿çšããŸãã
2018幎ãã2020幎ãåŠç¿ããŒã¿ã2021幎以éãæ€èšŒããŒã¿ãšããŠåå²ããŸãã
datetime64åãindexã«èšå®ããŠããæç³»åã®ããŒã¿ãã¬ãŒã ã¯ãæéãèšå®ããŠããŒã¿ãæãåºãäºãã§ããŸãã
2018幎1æ1æ¥ãã2020幎12æ31æ¥ãŸã§ã®ããŒã¿ãæãåºããdf_trainã«å
¥åããŸãã
'''
# åŠç¿ããŒã¿ã2018-01-01ã2020-12-31ã®æéãšãdf_trainã«å
¥åãã
df_train = df['2018-01-01': '2020-12-31']
df_train
#png
#åæ§ã«ã2021幎1æ1æ¥ä»¥éã®ããŒã¿ãæãåºããdf_valã«å
¥åããŸãã
# æ€èšŒããŒã¿ã2021-01-01以éãšããŠãšããŠdf_valã«å
¥åãã
df_val = df['2021-01-01':]
df_val
'''
png
åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ãããããã説æå€æ°ãšç®ç倿°ã«åããŸãã
説æå€æ°ã®ã«ã©ã ã¯'weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body'ã
ç®ç倿°ã®ã«ã©ã ã¯'Up'ã«ãªããŸãã
åŠç¿ããŒã¿ã®èª¬æå€æ°ãX_trainãåŠç¿ããŒã¿ã®ç®ç倿°ãy_trainãšããŠã«ã©ã ãæå®ããŠããããããå
¥åããŸãããŸãã衚瀺ããããšã§X_train, y_trainããããã«æå®ããæéå
ã®ããŒã¿ãå
¥åãããŠããããšãåãããŸãã
'''
# åŠç¿ããŒã¿ã説æå€æ°(X_train)ãšç®ç倿°(y_train)ã«åãã
X_train = df_train[['weekday', 'High', 'Low',
'Open', 'Close', 'Close_ratio', 'Body']]
y_train = df_train['Up']
# åŠç¿ããŒã¿ã®èª¬æå€æ°ãšç®ç倿°ã確èª
print(X_train)
print(y_train)
#png
#png
#åæ§ã«æ€èšŒããŒã¿ã®èª¬æå€æ°ãX_valãç®ç倿°ãy_valãšããŠããŒã¿ãå
¥åãã確èªããŸãã
# æ€èšŒããŒã¿ã説æå€æ°(X_val)ãšç®ç倿°(y_val)ã«åãã
X_val = df_val[['weekday', 'High', 'Low',
'Open', 'Close', 'Close_ratio', 'Body']]
y_val = df_val['Up']
# æ€èšŒããŒã¿ã®èª¬æå€æ°ãšç®ç倿°ã確èª
print(X_val)
print(y_val)
#png
#png
#åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®æç³»åã°ã©ããäœæã2021幎ååŸã§ããŒã¿ãåãããŠããããšãç®ã§ç¢ºèªããŸãã2021幎以åãåŠç¿ããŒã¿ã§éãã°ã©ãã2021幎以éãæ€èšŒããŒã¿ã§ãªã¬ã³ãžã®ã°ã©ãã§ç€ºãããŠããäºãåãããŸãã
# åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®çµå€(Close)ã®æãç·ã°ã©ãäœæ
X_train['Close'].plot(kind='line')
X_val['Close'].plot(kind='line')
# ã°ã©ãã®å¡äŸãèšå®
plt.legend(['X_train', 'X_val'])
# ã°ã©ãã®è¡šç€º
plt.show()
'''
png
ããŒã¿ãæŽãã
äºæž¬ã¢ãã«ã«åŠç¿ããããããã«ãããŒã¿ãæŽããŸãã
説æå€æ°ã¯å鱿¯ã®æææ¥ããæšææ¥ã®ïŒæ¥éãã»ãããšããŠäžã€ã«ãŸãšããŸãããŸããç®ç倿°ã¯ç¿æ¥ã®éææ¥ã®å§å€ãäžãããäžããããç€ºãæšææ¥ã®ããŒã¿ãæãåºããŸããæ©æ¢°åŠç¿ãè¡ãããã«ã¯èª¬æå€æ°ãšç®ç倿°ã®æ°ãæããå¿
èŠããããŸãã
png
説æå€æ°ãæãåºãæéã«ãããæ ªäŸ¡ã®éé¡ãå€åéãéã£ãŠããŸãã
äŸãã°ã2020幎4æé ã¯æ ªäŸ¡ã16000åçšåºŠã§ãã£ãã®ã«å¯Ÿãã12æé ã«ã¯25000åãè¶
ããŠããããåãé±ã§ãæ ªäŸ¡ã®å€åã倧ããäºããããŸãã
ãã®ããã«æãåºããŠããæéå
ã«ãããŠãããŒã¿ã®å€§ãããå€åå¹
ã倧ããç°ãªã£ãŠããå Žåãæ©æ¢°åŠç¿ã§ã¯äºæž¬ãæ£ããè¡ããªãäºããããŸãããã®ãããªå Žåã«æšæºåãšããåŠçãè¡ãããšãæããŸãã
ãã®åŠçãè¡ãããšã§ãå¹³åã0ã§Â±3以å
ã®ç¯å²ã«åããäºãåºæ¥ãããã«ãæ©æ¢°ã¯èšç®ã®åŠçããæããªãããŸãäºæž¬ç²ŸåºŠãåäžããäºããããŸãã
png
ãã®4æ¥æ¯ã«ããŒã¿ãæãåºããŠãæšæºåãè¡ãããã®åŠçããsklearnã®preprocessingãšããã©ã€ãã©ãªã®StandardScalerãšãã颿°ã䜿ã£ãŠãforæã®ç¹°ãè¿ãåŠçãçšããŠæ¬¡ã®ãããªé¢æ°ãå®çŸ©ããŸãã
ãŸãä»åãæ©æ¢°åŠç¿ã«äœ¿çšããäºæž¬ã¢ãã«ã¯LSTMãšãããã¥ãŒã©ã«ãããã®ã¢ãã«ã䜿çšããŸãããã®ã¢ãã«ã§ã¯numpyé
åãšãã圢åŒã®ããŒã¿ãçšããŸãã
'''
# æšæºå颿°(StandardScaler)ã®ã€ã³ããŒã
# numpyã®ã€ã³ããŒã
# 4æ¥ããšã«ããŒã¿ãæãåºããŠãæšæºåãšãšnumpyé
åã«å€æãã颿°(std_to_np)ã®å®çŸ©
def std_to_np(df):
df_list = []
df = np.array(df)
for i in range(0, len(df) - 3, 4):
df_s = df[i:i+4]
scl = StandardScaler()
df_std = scl.fit_transform(df_s)
df_list.append(df_std)
return np.array(df_list)
#æšæºåãè¡ãStandardScalaerãsklearn.preprocessingãããnumpyãnpãšããŠã€ã³ããŒãããŸãã
# 次ã«4æ¥æ¯ã«ããŒã¿ãæãåºããæšæºåãè¡ããnumpyé
åã§åºåãã颿°(std_to_np)ãå®çŸ©ããŸãã
#df_list = [] ã§ãŸã空ã®ãªã¹ããå®çŸ©ããŸããããã«ã¯æšæºåããããªã£ãåŸã®ã4æ¥æ¯ã«ãŸãšãŸã£ãããŒã¿ãæ ŒçŽããŠè¡ããŸãã
#df = np.array(df) ã§å
¥åãããããŒã¿ãã¬ãŒã ããŸãnumpyé
åã«å€æããŸãã
#ãã®é
åã«å¯ŸããŠãforæãçšããŠ4æ¥ãã€ã®ããŒã¿æãåºããŠãdf_sã«å
¥å(df_s=df[i:i+4])ããåŸã«ãStandardScalerãã€ã³ã¹ã¿ã³ã¹åã(scl=ãStandardScaler()) æšæºåããããªã£ãçµæãdf_stdã«å
¥å(df_std=scl.fit_transform(df_s))ãããããã¯ããã«å®çŸ©ããdf_listã«appendã¡ãœãããçšããŠæ ŒçŽ(df_list.append(df_std))ããŠè¡ããŸããæåŸã®4æ¥åã®ããŒã¿ãŸã§ãã®ç¹°ãè¿ãåŠçãè¡ããŸãã
#ç¹°ãè¿ãåŠçãçµäºãããšãdf_listãnumpyé
åã§åºå(return np.array(df_list))ããŸãã
#ãã®é¢æ°ãX_trainãšX_valã«é©çšããŠããŒã¿ã®åã確èªããŸãã
# åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®èª¬æå€æ°ã«é¢æ°(std_to_np)ãå®è¡
X_train_np_array = std_to_np(X_train)
X_val_np_array = std_to_np(X_val)
# åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®åœ¢ã®ç¢ºèª
print(X_train_np_array.shape)
print(X_val_np_array.shape)
'''
png
åºåçµæããã480æ¥åãã£ãX_trainã4åã®ïŒã®120åã®ããŒã¿ãšãªãã132æ¥åãã£ãX_valã4åã®ïŒã®33åã®ããŒã¿ã«ãªã£ãŠããäºãããããŸãã
ããããã®æ°ã«ç¶ã'4'ã¯ææããæšæã®4æ¥åã®ããŒã¿æ°ã衚ããŠããã'7'ã¯èª¬æå€æ°('weekday', 'High', 'Low', 'Open', 'Close', 'Close_ratio', 'Body')ã®ã«ã©ã æ°ã衚ããŠããŸãã
ç¶ããŠãç®ç倿°ã®æšææ¥ã®ããŒã¿ã ãæãåºããŸããæãåºãåã«äžæŠãåŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®ããŒã¿ã確èªããŸãã
'''
# åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®ç®ç倿°ã確èª
print(y_train)
print(y_val)
#png
#åŠç¿ããŒã¿ã¯480åãæ€èšŒããŒã¿ã¯132åæãããšãããããŸãã
#ãããã®ããŒã¿ã«å¯ŸããŠãåé±ã®4æ¥ç®(æšææ¥)ã®ããŒã¿ãæãåºããŠç¢ºèªããŸãã
# åŠç¿ããŒã¿ãæ€èšŒããŒã¿ã®ç®ç倿°ã®éåŒã
# é±ã®4æ¥ç®(æšææ¥)ã®ããŒã¿ã ãæãåºã
y_train_new = y_train[3::4]
y_val_new = y_val[3::4]
# éåŒãåŸã®åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®ç®ç倿°ã確èª
print(y_train_new)
print(y_val_new)
#åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ããããåé±ã®4æ¥ç®ã®ããŒã¿ã®ã¿ã«ãªã£ãŠãããåæ°ã¯120åãš33åãšãªã£ãŠããã4æ¥æ¯ã«ãŸãšãã説æå€æ°ã®ããŒã¿æ°ãšåãã«ãªã£ãŠããŸãã
#png
#png
#ããã§ãæ©æ¢°åŠç¿ãè¡ãããã®ããŒã¿ã¯æŽããŸããã
'''
äºæž¬ã¢ãã«ã®äœæ
ãã¥ãŒã©ã«ãããã®1çš®ã®LSTMãçšããŠäºæž¬ã¢ãã«ã®äœæãšãæ€èšŒããŒã¿ãçšããäºæž¬ç²ŸåºŠã®æ€èšŒãããŸãã
LSTMã䜿çšããããã«kerasã®ã©ã€ãã©ãªã䜿ããããã«ããå¿
èŠããããŸãããŸããã®ããã«tensorflowãã€ã³ã¹ããŒã«ããŸããå人ã®ç°å¢ã§ãã€ã³ã¹ããŒã«æžã¿ã®æ¹ã¯äžèŠã§ãããgoogle colabolatoryã䜿çšã®æ¹ã¯æ¯åè¡ãå¿
èŠããããŸããã€ã³ã¹ããŒã«ã¯æ¬¡ã®ã³ãã³ãã§æ°ç§ã§å®äºããŸãã
'''
#!pip install tensorflow
#ç¶ããŠãkerasããå¿
èŠãªé¢æ°ãã€ã³ããŒãããŸãã
# keras.modelsããSequentialã®ã€ã³ããŒã
# keras.layersããDenseãLSTMã®ã€ã³ããŒã
# Dropoutã®ã€ã³ããŒã
#ãã¥ãŒã©ã«ãããã®æ§ç¯ãããã©ã¡ãŒã¿ã®ãã¥ãŒãã³ã°æ¹æ³ã®èª¬æã¯çç¥ãããŠé ããŸããã
# åºæ¬çãªå
¥åå±€ãäžéå±€ãšåºåå±€ãããªãã¢ãã«ããã®ããã«æ§ç¯ããããšãã§ããŸãã
# ãŸãããã®ã¢ãã«ãlstm_compãšãã颿°ã§å®çŸ©ããŸãããã
# LSTMæ§ç¯ãšã³ã³ãã€ã«é¢æ°
def lstm_comp(df):
# å
¥åå±€/äžéå±€/åºåå±€ã®ãããã¯ãŒã¯ãæ§ç¯
model = Sequential()
model.add(LSTM(256, activation='relu', batch_input_shape=(
None, df.shape[1], df.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
# ãããã¯ãŒã¯ã®ã³ã³ãã€ã«
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
'''
次ã«ãäœæããã¢ãã«ãæ¬åœã«äºæž¬ã«äœ¿çšã§ããã®ãã確èªããæ¹æ³ãšããŠã亀差æ€èšŒãããŸããããæ£è§£ã®åãã£ãŠããåŠç¿ããŒã¿ãè€æ°ã«åå²ããŠã亀差æ€èšŒãè¡ãã®ãæå¹ã§ãã
亀差æ€èšŒã®ææ³ã«ã¯è€æ°ååšããŸãããä»åã®æ§ãªæç³»åã®ããŒã¿ã§éå»ã®ããŒã¿ãçšããŠæªæ¥ãäºæž¬ããå Žåã¯ãæç³»ååå²ã®äº€å·®æ€èšŒãçšããã®ãäžè¬çã§ãã
ä»åã¯åŠç¿ããŒã¿ã5åå²ããåŠç¿ããŒã¿ãšæ€èšŒããŒã¿ãå³ã®æ§ãªã€ã¡ãŒãžã®çµã¿åããã§åèš4åã®åŠç¿ãäºæž¬ãšç²ŸåºŠæ€èšŒãç¹°ãè¿ããŸãããããã®ã¹ã³ã¢ã®å¹³åå€ãããã¢ãã«ãäºæž¬ã«äœ¿çšã§ãããã®å€æãè¡ããŸãã
ãã®ææ³ã§ã¯æ€èšŒããŒã¿ãããéå»ã®ããŒã¿ã®ã¿ãçšããŠåŠç¿ãè¡ãªããŸãã
png
ãŸããæç³»ååå²äº€å·®æ€èšŒãè¡ãããã®TimeSeriesSplitãšãäºæž¬çµæã®ç²ŸåºŠ(accuracy)ãç®åºããããã«accuracy_scoreãã€ã³ããŒãããŸãã
# æç³»ååå²ã®ããTimeSeriesSplitã®ã€ã³ããŒã
# accuracyç®åºã®ããaccuracy_scoreã®ã€ã³ããŒã
ã€ãã«ã4ååã®äº€å·®æ€èšŒã®çµæã代å
¥ãã空ã®ãªã¹ããäœæããŸãããããŠãTimeSeriesSplitã®ã€ã³ã¹ã¿ã³ã¹åãè¡ã倿°(tscv)ã«ä»£å
¥ããŸãã
'''
valid_scores = []
tscv = TimeSeriesSplit(n_splits=4)
'''
foræãçšããŠã亀差æ€èšŒã4åç¹°ãè¿ããŸãã
å
·äœçã«ã¯ãã®ãããªæ€èšŒã宿œããŸãã
splitã¡ãœãããçšããŠåŠç¿ããŒã¿ãåå²ãã亀差æ€èšŒçšã®åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ãäœæ
å
ã«å®çŸ©ããlstm_comp颿°ããLSTMã¢ãã«ãäœæ
亀差æ€èšŒçšã®åŠç¿ããŒã¿ããåŠç¿
æ€èšŒããŒã¿ã®èª¬æå€æ°ãçšããŠäºæž¬
äºæž¬çµæã®2å€å
æ€èšŒããŒã¿ã®ç®ç倿°(æ£è§£ããŒã¿)ãçšããŠãäºæž¬çµæã®ç²ŸåºŠç®åºãšè¡šç€º
äºæž¬ç²ŸåºŠã®ã¹ã³ã¢ããªã¹ãã«æ ŒçŽ
'''
for fold, (train_indices, valid_indices) in enumerate(tscv.split(X_train_np_array)):
X_train, X_valid = X_train_np_array[train_indices], X_train_np_array[valid_indices]
y_train, y_valid = y_train_new[train_indices], y_train_new[valid_indices]
# LSTMæ§ç¯ãšã³ã³ãã€ã«é¢æ°ã«X_trainãæž¡ãã倿°modelã«ä»£å
¥
model = lstm_comp(X_train)
'''# ã¢ãã«åŠç¿'''
hist = model.fit(X_train, y_train, epochs=10, batch_size=64)
# loss(èšç·ŽããŒã¿ã«å¯Ÿããå€å®çµæ)ãval_loss(ãã¹ãããŒã¿ã«å¯Ÿããå€å®çµæ)ããããããã
#loss = hist.history['loss']
#val_loss = hist.history['val_loss']
#epochs = len(loss)
''''''
# äºæž¬
y_valid_pred = model.predict(X_valid)
# äºæž¬çµæã®2å€å
y_valid_pred = np.where(y_valid_pred < 0.5, 0, 1)
# äºæž¬ç²ŸåºŠã®ç®åºãšè¡šç€º
score = accuracy_score(y_valid, y_valid_pred)
print(f'fold {fold} MAE: {score}')
# äºæž¬ç²ŸåºŠã¹ã³ã¢ããªã¹ãã«æ ŒçŽ
valid_scores.append(score)
#4åã®äº€å·®æ€èšŒãçµäºããããäºæž¬ç²ŸåºŠã®ã¹ã³ã¢ãæ ŒçŽããããªã¹ãã®è¡šç€ºããã¹ã³ã¢ã®å¹³åå€ã®ç®åºãšè¡šç€ºãããŠã¿ãŸãããã
#4åã®ããããã®ã¹ã³ã¢ãšãå¹³åå€ã¯ãã®ããã«ãªããŸããã
print(f'valid_scores: {valid_scores}')
cv_score = np.mean(valid_scores)
print(f'CV score: {cv_score}')
'''
png
1åç®ïŒ0.541
2åç®ïŒ0.708
3åç®ïŒ0.541
4åç®ïŒ0.333
å¹³åïŒ0.531
ä»åã®ãããªäžãããäžãããã®2å€äºæž¬ã®å Žåãäžè¬çã«ã¯ã¹ã³ã¢ã0.5以äžã§ããã°ããçšåºŠäœ¿çšã§ãããšããç®å®ãšãªããŸãã
ç®åºããã¹ã³ã¢ãšå¹³åå€ããããã®ã¢ãã«ãããçšåºŠäœ¿çšã§ãããã®ãšå€æããŠæ¬¡ã«é²ã¿ãŸãããã
ã§ã¯ããã®ã¢ãã«ã«å¯ŸããŠã2018幎ãã2020幎ã®åŠç¿ããŒã¿ãçšããŠåŠç¿ãããŸãã
æµãã¯å
ã»ã©ã®äº€å·®æ€èšŒãšäŒŒãŠããŸãã
ãŸãã¯æšæºåããåŠç¿ããŒã¿ã§LSTMã¢ãã«ãäœæããŸãã
'''
# LSTMæ§ç¯ãšã³ã³ãã€ã«é¢æ°ã«X_train_np_arrayãæž¡ãã倿°modelã«ä»£å
¥
model = lstm_comp(X_train_np_array)
#äœæããã¢ãã«ã§ãåŠç¿ããŸãã
#äžç¬ã§åŠç¿ãçµäºããŸããã
# ã¢ãã«ã®åŠç¿ã®å®è¡
result = model.fit(X_train_np_array, y_train_new, epochs=10, batch_size=64)
#ä»åºŠã¯åŠç¿ããã¢ãã«ãçšããŠãæ€èšŒããŒã¿ã«ã€ããŠäºæž¬ãè¡ããå
é ã®10åã衚瀺ãããŠã¿ãŸãããã
# äœæããã¢ãã«ããæ€èšŒããŒã¿ãçšããŠäºæž¬ãè¡ã
pred = model.predict(X_val_np_array)
pred[:10]
'''
ãã®ããã«äºæž¬ããçµæã衚瀺ãããŸãã
png
ãã®æ°å€ããäžãããäžãããã®0ãš1ã«å€æããŸããnumpyã®whereã¡ãœãããçšããŠ0.5ãè¶
ãããã®ã1ããã以å€ã0ãšä¿®æ£ããŸãããããŠå床å
é ã®10åã衚瀺ããŸãã
ããã§ãäžãããäžãããã®01ã©ã¡ããã®äºæž¬ãã§ããŸããã
'''
# äºæž¬çµæã0ãããã¯1ã«ä¿®æ£ïŒ0.5ãå¢ã«ããŠã1ã«è¿ãã»ã©æ ªäŸ¡ãäžæã0ã«è¿ãã»ã©æ ªäŸ¡ãäžæããªãïŒ
pred = np.where(pred < 0.5, 0, 1)
# ä¿®æ£ããäºæž¬çµæã®å
é 10ä»¶ã確èª
pred[:10]
'''
png
次ã«ãäºæž¬ã¢ãã«ã®ç²ŸåºŠç¢ºèªãè¡ããŸãããã®äºæž¬çµæãå®éã®å€ãšãªãæ€èšŒããŒã¿ã®ç®ç倿°ãšæ¯èŒããæ£è§£çãèšç®ããŸããsklearnã®accuracy_scoreãšãã颿°ã䜿ãããšã§èšç®ãè¡ããŸãã
ãã®çµæã衚瀺ãããš57ïŒ
ã®æ£è§£çã§æãããšãããããŸããä»åã®æ§ãªæ ªäŸ¡ãäžãããäžãããã®2å€ã®äºæž¬ã§ã¯ãçŽæçã«äºæž¬ãè¡ãå Žå50ïŒ
ã®æ£è§£çãšãªããŸããæ©æ¢°åŠç¿ãçšããäºã§ãããè¶
ããæ£è§£çãšãªããŸããã
'''
# å®éã®çµæããäºæž¬å€ã®æ£è§£çãèšç®ãã
print('accuracy = ', accuracy_score(y_true=y_val_new, y_pred=pred))
'''
# ã¢ãã«ã®ç²ŸåºŠãè©äŸ¡ãã
# 決å®ä¿æ°ãšRMSEãèšç®ãã
# 決å®ä¿æ°ã¯1.0ã«ãRMSEã¯0.0ã«è¿ãã»ã©ãã¢ãã«ã®ç²ŸåºŠã¯é«ã
r2_score = r2_score(y_test, predictions)
rmse = np.sqrt(mean_squared_error(y_test, predictions))
print(f'r2_score: {r2_score:.4f}')
print(f'rmse: {rmse:.4f}')
'''
'''
png
æåŸã«ãäºæž¬çµæãšæ£è§£çµæãæ··åè¡åãçšããŠç¢ºèªããŸãã
æ··åè¡åãšã¯ããã®ããã«2è¡2åã®è¡šã§ãçéœæ§ãçé°æ§ãåœéœæ§ãåœé°æ§ã®æ°ã衚ãããã®ã§ããä»åã¯ãäºæž¬ã0ã§çµæã0ãäºæž¬ã1ã§çµæã1ã§ããã°æ£è§£ã§ãã0ãšäºæž¬ããŠçµæã1ã1ãšäºæž¬ããŠçµæã0ãªãäžæ£è§£ãšããããšã«ãªããŸããå
šäœã®ç²ŸåºŠã ãã§ã¯ãªãã0ãš1ããããã®æ£è§£ã«å¯Ÿãã粟床ã確èªããããšãã§ããŸãã
jpg
æ··åè¡åãçæããããã«ãsklern.mericsããconfusion_matrixãšConfusionMatrixDisplayãã€ã³ããŒãããŸãã
ãŸããèŠèŠçã«ããããããæ§ã«ãããŒããããã§è¡šç€ºããŸãããã
ãã®ããã«ãæ£ããäºæž¬ãè¡ããŠããã®ã¯ãå³äžã®çéœæ§(TP)ãšå·Šäžã®çé°æ§(TN)ã§ããäºæž¬çµæãã0ã1ã®ã©ã¡ããã«æ¥µç«¯ã«åã£ãŠããåŸåã§ã¯ãªãããã§ãããæ£ããäºæž¬ã§ããŠããªããã®ãååšããŠããããšãããããŸããäºæž¬ç²ŸåºŠãæ¹åããããšã§ãåœéœæ§(FP)ãšåœé°æ§(FN)ã®æ°ãæžããããšãã§ããŸãã
'''
# æ··åè¡åçæã®ããconfusion_matrixãã€ã³ããŒã
# æ··åè¡åã衚瀺
cm = confusion_matrix(y_val_new, pred)
cmp = ConfusionMatrixDisplay(cm)
cmp.plot(cmap=plt.cm.Reds)
# ã°ã©ãã®è¡šç€º
plt.show()
'''
ä»åã¯åºæ¬çãªç¹åŸŽéããæ©æ¢°åŠç¿ã¢ãã«ã®æ§ç¯æ¹æ³ã§äºæž¬ãè¡ããŸãããç¹åŸŽéã远å ããããšããåŠç¿ã¢ãã«ã®æ¹è¯ãè¡ãããšã§ãäºæž¬ç²ŸåºŠãåäžãããããšãå¯èœã§ãã
ãšã¯ãããããŒã¿ã®æéãå€ããã ãã§ã粟床ãå€ãããŸããå¿
ããã€ãäºæž¬ãããŸãããããã§ã¯ãããŸããã®ã§ã泚æãã ããã
'''
'''
Graphics parameter
'''
# Closeã®åã®ããŒã¿ã®ã¿ãåãåºã
TergetData = data['Close'].values
# datetimeã®åã®ããŒã¿ã®ã¿ãåãåºã
data = data.reset_index(drop=False)
TergetDate = data['Date'].values
#ãªã·ã§ã€ã
TergetData = TergetData.reshape(-1, 1) # float64
TergetDate = TergetDate.reshape(-1, 1) # datetime64[ns]
# èªã¿èŸŒãã æ¥çµå¹³åããããã
k = 700 # 衚瀺ããæ°
i = TergetData.shape[0]-k
j = TergetData.shape[0]
xdata = TergetDate[i:j]
ydata = TergetData[i:j]
#æç»ããããŒã¿ã®èªã¿èŸŒã¿
fig = plt.figure(figsize=(15, 10), dpi=100)
ax = fig.add_subplot(2, 1, 1)
# å³å
šäœã®ã¿ã€ãã«
fig.suptitle(
"Long Short-Term Memory (Deep Larning) of Artificial Intelligence[AI]", fontsize=20)
plt.title("Test Graph", {"fontsize": 20})
ax1 = plt.subplot(2, 2, 1) # 2x2ã®1çªç®
ax1.plot(xdata, ydata) # 1çªç®ã«æç»
ax1.legend(loc='best')
ax1.grid()
ax1.set_xlabel('Date') # 1çªç®ã«xã©ãã«ã远å
ax1.set_ylabel(f'{code}') # 1çªç®ã«yã©ãã«ã远å
ax2 = plt.subplot(2, 2, 2) # 2x2ã®1çªç®
ax2.plot(range(epochs), loss, marker='.',
label='loss(training data)') # 1çªç®ã«æç»
ax2.plot(range(epochs), val_loss, marker='.',
label='val_loss(evaluation data)') # 1çªç®ã«è¿œå æç»
ax2.legend(loc='best')
ax2.grid()
ax2.set_xlabel('epoch') # 1çªç®ã«xã©ãã«ã远å
ax2.set_ylabel('loss') # 1çªç®ã«yã©ãã«ã远å
ax3 = plt.subplot(2, 2, 3) # 2x2ã®3çªç®
ax3.plot(datelabel, predicted_N, marker='.', label='predicted') # 1çªç®ã«æç»
ax3.plot(datelabel, y_test_price_N, marker='.',
label='y_test_price') # 1çªç®ã«è¿œå æç»
ax3.legend(loc='best')
ax3.grid()
ax3.set_xlabel('Date')
ax3.set_ylabel(f'{code}')
ax4 = plt.subplot(2, 2, 4) # 2x2ã®4çªç®
ax4.plot(range(len(predicted_futureN)), predicted_futureN,
marker='.', label='future predicted') # 1çªç®ã«æç»
ax4.plot(range(len(predicted_futureN[:10])), predicted_futureN[:10],
marker='.', label='real data', color="0.5") # 1çªç®ã«è¿œå æç»
ax4.legend(loc='best')
ax4.grid()
ax4.set_xlabel('Date') # 1çªç®ã«xã©ãã«ã远å
ax4.set_ylabel(f'{code}') # 1çªç®ã«yã©ãã«ã远å
# ã°ã©ãã衚瀺ãã
plt.show()
| app/src/main/python/KinoCode.py | 33,041 | pandasã®ã€ã³ããŒã ããŒã¿ã®èªã¿èŸŒã¿df = pd.read_csv('finance_dataset.csv') ããŒã¿ãã¬ãŒã ã®è¡šç€ºdf '6976'67582021幎ãã仿¥ãŸã§ã®1幎éã®ããŒã¿ãååŸããŸããããææ¥ã決ããŠè¡ããŸãã (2021, 1, 1) æåž«ããŒã¿(ä»ãŸã§ã®ããŒã¿)start_train = datetime.date(2022, 1, 1) æåž«ããŒã¿(ä»ãŸã§ã®ããŒã¿)dowstart_train = datetime.date(2022, 1, 5)start_train + relativedelta(days=+3) æšæ¥å(today-1æ¥)ãŸã§ååŸã§ããïŒåœæ¥åã¯å€åããŠããããïŒ æåž«ããŒã¿ãèªã¿èŸŒãã 詊éšããŒã¿ã®csvãã¡ã€ã«ãèªã¿èŸŒãã 詊éšããŒã¿ã®csvãã¡ã€ã«ãèªã¿èŸŒããããŒã¿ã®ååŠçæ¬ æããŒã¿ãããã®ã§ãæ¬ æå€NaNãé€å€ããdf_NikkeiAll_drop = df_NikkeiAll.dropna()df_NikkeiAll_drop.head() å
é ã®5è¡ã衚圢åŒã§è¡šç€º åã«ã©ã ã®è©³çŽ°ç¢ºèª æ¥ä»ã€ã³ããã¯ã¹ããã»ãã Dateã®ããŒã¿åãã'datetime'åãžå€æŽ ææ¥æ
å ±ã远å (ææ:0, ç«æ:1, æ°Žæ:2, æšæ:3, éæ:4ãåæ:5ãæ¥æ:6)data['Dowweekday'] = Dow_df['Date'].dt.weekdaydata['DowDate'] = Dow_df['Date']data['Nikkeiweekday'] = Nikkei_df['Date'].dt.weekday åãã®æææ¥ãšãªã1965/1/4ãåºæºã«æ¥æ°ã远å datetime(1965, 1, 4)data['weeks'] = (data['Date'] - start) // timedelta(weeks=1)data['Dowweeks'] = (Dow_df['Date'] - start) // timedelta(weeks=1)data['Nikkiweeks'] = (Nikkei_df['Date'] - start) // timedelta(weeks=1)print(data)data.to_csv('data/stocks_price_data/KinoCode_data.csv') csvæžãåºã Closeã®åã®ããŒã¿ã®ã¿ãåãåºã ã«ã©ã ã®äžŠã¹æ¿ãdf_dow = Dow_df[['Date', 'weeks', 'weekday', 'High', 'Low', 'Open', 'Close']]df_nikkei = Nikkei_df[['Date', 'weeks', 'weekday', 'High', 'Low', 'Open', 'Close']] csvæžãåºã ããŒã¿ã®äžŠã³æ¿ã æ¥ä»ãã€ã³ããã¯ã¹ã«ã»ããã«ã©ã æ
å ±ã1è¡äžã«ããããããŒã¿ãã¬ãŒã ãäœæããpngãã®df_shiftãçšããŠãç¿æ¥ã®çµå€ã𿬿¥ã®çµå€ãåŒãç®ãããã®çµæã'delta_Close'ãšããã«ã©ã ã远å ãdfã«å
¥åããŸããç¿æ¥ã®å§å€ã𿬿¥ã®çµå€ã®å·®åã远å ããç®ç倿°Upã远å ãã(ç¿æ¥ã®çµå€ãäžããå Žå1ããã以å€ã¯0ãšãã)ã'delta_Close'ã«ã©ã ã®åé€ 'Open', 'High', 'Low', 'Close'ã°ã©ãåã®ããã«ã«ã©ã æœåº matplotlibã®ã€ã³ããŒã æç³»åæãç·ã°ã©ãã®äœæ çµå€ã®åæ¥æ¯ã®è¿œå png次ã«ãå§å€ãšçµå€ã®å·®å'Body'ãdfã«è¿œå ããŸãã å§å€ãšçµå€ã®å·®åã远å pngäžèŠã«ã©ã ã®åé€ãšäžŠã³æ¿ããè¡ããŸãã äžèŠã«ã©ã ã®åé€ãšäžŠã¹æ¿ã åŠç¿ããŒã¿ã2018-01-01ã2020-12-31ã®æéãšãdf_trainã«å
¥åããpngåæ§ã«ã2021幎1æ1æ¥ä»¥éã®ããŒã¿ãæãåºããdf_valã«å
¥åããŸãã æ€èšŒããŒã¿ã2021-01-01以éãšããŠãšããŠdf_valã«å
¥åãã åŠç¿ããŒã¿ã説æå€æ°(X_train)ãšç®ç倿°(y_train)ã«åãã åŠç¿ããŒã¿ã®èª¬æå€æ°ãšç®ç倿°ã確èªpngpngåæ§ã«æ€èšŒããŒã¿ã®èª¬æå€æ°ãX_valãç®ç倿°ãy_valãšããŠããŒã¿ãå
¥åãã確èªããŸãã æ€èšŒããŒã¿ã説æå€æ°(X_val)ãšç®ç倿°(y_val)ã«åãã æ€èšŒããŒã¿ã®èª¬æå€æ°ãšç®ç倿°ã確èªpngpngåŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®æç³»åã°ã©ããäœæã2021幎ååŸã§ããŒã¿ãåãããŠããããšãç®ã§ç¢ºèªããŸãã2021幎以åãåŠç¿ããŒã¿ã§éãã°ã©ãã2021幎以éãæ€èšŒããŒã¿ã§ãªã¬ã³ãžã®ã°ã©ãã§ç€ºãããŠããäºãåãããŸãã åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®çµå€(Close)ã®æãç·ã°ã©ãäœæ ã°ã©ãã®å¡äŸãèšå® ã°ã©ãã®è¡šç€º æšæºå颿°(StandardScaler)ã®ã€ã³ããŒã numpyã®ã€ã³ããŒã 4æ¥ããšã«ããŒã¿ãæãåºããŠãæšæºåãšãšnumpyé
åã«å€æãã颿°(std_to_np)ã®å®çŸ©æšæºåãè¡ãStandardScalaerãsklearn.preprocessingãããnumpyãnpãšããŠã€ã³ããŒãããŸãã æ¬¡ã«4æ¥æ¯ã«ããŒã¿ãæãåºããæšæºåãè¡ããnumpyé
åã§åºåãã颿°(std_to_np)ãå®çŸ©ããŸããdf_list = [] ã§ãŸã空ã®ãªã¹ããå®çŸ©ããŸããããã«ã¯æšæºåããããªã£ãåŸã®ã4æ¥æ¯ã«ãŸãšãŸã£ãããŒã¿ãæ ŒçŽããŠè¡ããŸããdf = np.array(df) ã§å
¥åãããããŒã¿ãã¬ãŒã ããŸãnumpyé
åã«å€æããŸãããã®é
åã«å¯ŸããŠãforæãçšããŠ4æ¥ãã€ã®ããŒã¿æãåºããŠãdf_sã«å
¥å(df_s=df[i:i+4])ããåŸã«ãStandardScalerãã€ã³ã¹ã¿ã³ã¹åã(scl=ãStandardScaler()) æšæºåããããªã£ãçµæãdf_stdã«å
¥å(df_std=scl.fit_transform(df_s))ãããããã¯ããã«å®çŸ©ããdf_listã«appendã¡ãœãããçšããŠæ ŒçŽ(df_list.append(df_std))ããŠè¡ããŸããæåŸã®4æ¥åã®ããŒã¿ãŸã§ãã®ç¹°ãè¿ãåŠçãè¡ããŸããç¹°ãè¿ãåŠçãçµäºãããšãdf_listãnumpyé
åã§åºå(return np.array(df_list))ããŸãããã®é¢æ°ãX_trainãšX_valã«é©çšããŠããŒã¿ã®åã確èªããŸãã åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®èª¬æå€æ°ã«é¢æ°(std_to_np)ãå®è¡ åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®åœ¢ã®ç¢ºèª åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®ç®ç倿°ã確èªpngåŠç¿ããŒã¿ã¯480åãæ€èšŒããŒã¿ã¯132åæãããšãããããŸãããããã®ããŒã¿ã«å¯ŸããŠãåé±ã®4æ¥ç®(æšææ¥)ã®ããŒã¿ãæãåºããŠç¢ºèªããŸãã åŠç¿ããŒã¿ãæ€èšŒããŒã¿ã®ç®ç倿°ã®éåŒã é±ã®4æ¥ç®(æšææ¥)ã®ããŒã¿ã ãæãåºã éåŒãåŸã®åŠç¿ããŒã¿ãšæ€èšŒããŒã¿ã®ç®ç倿°ã確èªåŠç¿ããŒã¿ãšæ€èšŒããŒã¿ããããåé±ã®4æ¥ç®ã®ããŒã¿ã®ã¿ã«ãªã£ãŠãããåæ°ã¯120åãš33åãšãªã£ãŠããã4æ¥æ¯ã«ãŸãšãã説æå€æ°ã®ããŒã¿æ°ãšåãã«ãªã£ãŠããŸããpngpngããã§ãæ©æ¢°åŠç¿ãè¡ãããã®ããŒã¿ã¯æŽããŸããã!pip install tensorflowç¶ããŠãkerasããå¿
èŠãªé¢æ°ãã€ã³ããŒãããŸãã keras.modelsããSequentialã®ã€ã³ããŒã keras.layersããDenseãLSTMã®ã€ã³ããŒã Dropoutã®ã€ã³ããŒããã¥ãŒã©ã«ãããã®æ§ç¯ãããã©ã¡ãŒã¿ã®ãã¥ãŒãã³ã°æ¹æ³ã®èª¬æã¯çç¥ãããŠé ããŸããã åºæ¬çãªå
¥åå±€ãäžéå±€ãšåºåå±€ãããªãã¢ãã«ããã®ããã«æ§ç¯ããããšãã§ããŸãã ãŸãããã®ã¢ãã«ãlstm_compãšãã颿°ã§å®çŸ©ããŸãããã LSTMæ§ç¯ãšã³ã³ãã€ã«é¢æ° å
¥åå±€/äžéå±€/åºåå±€ã®ãããã¯ãŒã¯ãæ§ç¯ ãããã¯ãŒã¯ã®ã³ã³ãã€ã« LSTMæ§ç¯ãšã³ã³ãã€ã«é¢æ°ã«X_trainãæž¡ãã倿°modelã«ä»£å
¥ loss(èšç·ŽããŒã¿ã«å¯Ÿããå€å®çµæ)ãval_loss(ãã¹ãããŒã¿ã«å¯Ÿããå€å®çµæ)ãããããããloss = hist.history['loss']val_loss = hist.history['val_loss']epochs = len(loss) äºæž¬ äºæž¬çµæã®2å€å äºæž¬ç²ŸåºŠã®ç®åºãšè¡šç€º äºæž¬ç²ŸåºŠã¹ã³ã¢ããªã¹ãã«æ ŒçŽ4åã®äº€å·®æ€èšŒãçµäºããããäºæž¬ç²ŸåºŠã®ã¹ã³ã¢ãæ ŒçŽããããªã¹ãã®è¡šç€ºããã¹ã³ã¢ã®å¹³åå€ã®ç®åºãšè¡šç€ºãããŠã¿ãŸãããã4åã®ããããã®ã¹ã³ã¢ãšãå¹³åå€ã¯ãã®ããã«ãªããŸããã LSTMæ§ç¯ãšã³ã³ãã€ã«é¢æ°ã«X_train_np_arrayãæž¡ãã倿°modelã«ä»£å
¥äœæããã¢ãã«ã§ãåŠç¿ããŸããäžç¬ã§åŠç¿ãçµäºããŸããã ã¢ãã«ã®åŠç¿ã®å®è¡ä»åºŠã¯åŠç¿ããã¢ãã«ãçšããŠãæ€èšŒããŒã¿ã«ã€ããŠäºæž¬ãè¡ããå
é ã®10åã衚瀺ãããŠã¿ãŸãããã äœæããã¢ãã«ããæ€èšŒããŒã¿ãçšããŠäºæž¬ãè¡ã äºæž¬çµæã0ãããã¯1ã«ä¿®æ£ïŒ0.5ãå¢ã«ããŠã1ã«è¿ãã»ã©æ ªäŸ¡ãäžæã0ã«è¿ãã»ã©æ ªäŸ¡ãäžæããªãïŒ ä¿®æ£ããäºæž¬çµæã®å
é 10ä»¶ãç¢ºèª å®éã®çµæããäºæž¬å€ã®æ£è§£çãèšç®ãã æ··åè¡åçæã®ããconfusion_matrixãã€ã³ããŒã æ··åè¡åã衚瀺 ã°ã©ãã®è¡šç€º Closeã®åã®ããŒã¿ã®ã¿ãåãåºã datetimeã®åã®ããŒã¿ã®ã¿ãåãåºããªã·ã§ã€ã float64 datetime64[ns] èªã¿èŸŒãã æ¥çµå¹³åããããã 衚瀺ããæ°æç»ããããŒã¿ã®èªã¿èŸŒã¿ å³å
šäœã®ã¿ã€ãã« 2x2ã®1çªç® 1çªç®ã«æç» 1çªç®ã«xã©ãã«ã远å 1çªç®ã«yã©ãã«ã远å 2x2ã®1çªç® 1çªç®ã«æç» 1çªç®ã«è¿œå æç» 1çªç®ã«xã©ãã«ã远å 1çªç®ã«yã©ãã«ã远å 2x2ã®3çªç® 1çªç®ã«æç» 1çªç®ã«è¿œå æç» 2x2ã®4çªç® 1çªç®ã«æç» 1çªç®ã«è¿œå æç» 1çªç®ã«xã©ãã«ã远å 1çªç®ã«yã©ãã«ã远å ã°ã©ãã衚瀺ãã | 4,181 | ja | 0.99513 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.