code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
"""
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
from sklearn import datasets, neighbors
def get_data(N, D, dataset="dense"):
if dataset == "dense":
np.random.seed(0)
return np.random.random((N, D))
elif dataset == "digits":
X, _ = datasets.load_digits(return_X_y=True)
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(
Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset="digits",
):
algorithms = ("kd_tree", "brute", "ball_tree")
fiducial_values = {"N": N, "D": D, "k": k}
# ------------------------------------------------------------
# varying N
N_results_build = {alg: np.zeros(len(Nrange)) for alg in algorithms}
N_results_query = {alg: np.zeros(len(Nrange)) for alg in algorithms}
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(
n_neighbors=min(NN, k), algorithm=algorithm, leaf_size=leaf_size
)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = t1 - t0
N_results_query[algorithm][i] = t2 - t1
# ------------------------------------------------------------
# varying D
D_results_build = {alg: np.zeros(len(Drange)) for alg in algorithms}
D_results_query = {alg: np.zeros(len(Drange)) for alg in algorithms}
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(
n_neighbors=k, algorithm=algorithm, leaf_size=leaf_size
)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = t1 - t0
D_results_query[algorithm][i] = t2 - t1
# ------------------------------------------------------------
# varying k
k_results_build = {alg: np.zeros(len(krange)) for alg in algorithms}
k_results_query = {alg: np.zeros(len(krange)) for alg in algorithms}
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(
n_neighbors=kk, algorithm=algorithm, leaf_size=leaf_size
)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = t1 - t0
k_results_query[algorithm][i] = t2 - t1
plt.figure(figsize=(8, 11))
for sbplt, vals, quantity, build_time, query_time in [
(311, Nrange, "N", N_results_build, N_results_query),
(312, Drange, "D", D_results_build, D_results_query),
(313, krange, "k", k_results_build, k_results_query),
]:
ax = plt.subplot(sbplt, yscale="log")
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min(
[min(np.floor(np.log10(build_time[alg]))) for alg in algorithms]
)
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom, width, bottom, color="r")
q_bar = plt.bar(xvals, query_time[alg], width, build_time[alg], color="b")
tick_vals += list(xvals + 0.5 * width)
tick_labels += ["%i" % val for val in vals]
plt.text(
(i + 0.02) / len(algorithms),
0.98,
alg,
transform=ax.transAxes,
ha="left",
va="top",
bbox=dict(facecolor="w", edgecolor="w", alpha=0.5),
)
plt.ylabel("Time (s)")
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = "Varying %s" % quantity
descr_string = ""
for s in "NDk":
if s == quantity:
pass
else:
descr_string += "%s = %i, " % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(
1.01,
0.5,
title_string,
transform=ax.transAxes,
rotation=-90,
ha="left",
va="center",
fontsize=20,
)
plt.text(
0.99,
0.5,
descr_string,
transform=ax.transAxes,
rotation=-90,
ha="right",
va="center",
)
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ("construction", "N-point query"), "upper right")
if __name__ == "__main__":
barplot_neighbors(dataset="digits")
barplot_neighbors(dataset="dense")
plt.show() | python | github | https://github.com/scikit-learn/scikit-learn | benchmarks/bench_plot_neighbors.py |
# -*- coding: utf-8 -*-
from lxml import etree as et
import sys
import os
# for TTS(default system TTS)
import pyttsx
#coding: utf-8
#function that reads the xhtml file and returns the root of the document
def getData(fname):
f=open(fname)
d=f.read()
data = d.replace('\n','')
root = et.fromstring(data)
return root
#function to generate a basic sable root and append the version and other info at the end to a file "equation.sable"
def generateSable(node=None,flag=None):
#print 'in function generateSable'
#print flag
documentInfo = '<?xml version="1.0"?><!DOCTYPE SABLE PUBLIC "-//SABLE//DTD SABLE speech mark up//EN" "Sable.v0_2.dtd" []>'
if flag:
sable = ''
sable = documentInfo+et.tostring(node)
#print 'writing sable to file'
f = open('equation.sable','w')
f.write(sable)
f.close()
return
return et.Element('SABLE')
#function to parse the operators in the <mo> tags
def operatorParse(op):
if op == '+':
return 'plus'
if op == '-':
return 'minus'
if op == '...':
return 'so on till,'
if op == '=':
return 'is equal to,'
if op == '(':
return 'the quantity, ('
if op == ')':
return '),'
if op == 'sin':
return 'sine'
if op == 'cos':
return 'cos'
if op == 'tan':
return 'tan'
if op == 'log':
return 'log'
if op == '*':
return 'times'
if op == '/':
return 'divided by'
if op == '%':
return 'modulo divided by'
if op == '∫∫':
return 'double integral of'
if op == '∫':
return 'integral of'
if op == '∮':
return ' contour integral of'
if op == '∯':
return ' surface integral of'
if op == '∰':
return ' volume integral of'
if op == '∱':
return ' clockwise integral of'
if op == '∂':
return 'partial derivative of'
if op == '∠':
return ' angle of'
# alternative way for an integral, using the direct symbol should also work
if op == '∫':
return 'integral'
if op == 'ⅆ':
return 'D'
#fill in operators
# if op == the operator:
#return a text form of the operator
#function that takes text and speaks it
"""def speek(text):
engine = pyttsx.init()
engine.say(text)
engine.runAndWait()
return
"""
#function to parse the mathML
def mathparse(element,snode,exp = []):
#print 'testing element'
#print 'text:'
#print element.text
#print 'tag:',element.tag
mtag = element.tag.split('}')[1]
#print 'modified tag:',mtag
#print 'expression string:', exp
# numbers and variables
if mtag == 'mi' or mtag == 'mn':
exp.append(element.text)
# operators
if mtag == 'mo':
exp.append(operatorParse(element.text))
# fractions
if mtag == 'mfrac':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/testsound.wav')
et.SubElement(snode,'EMPH').text = 'fraction'
#et.SubElement(snode,'BREAK',LEVEL = 'Medium')
tnode = et.SubElement(snode,'RATE',SPEED = '+0%')
t2node = et.SubElement(tnode,'PITCH',BASE = '+0%')
tnode.tail = 'divided by'
exp=mathparse(element[0],t2node,exp)
if len(t2node) > 0:
t2node[-1].tail = ' '.join(exp)
else:
t2node.text = ' '.join(exp)
exp = []
dnode = et.SubElement(snode,'RATE',SPEED = '+0%')
d2node = et.SubElement(dnode,'PITCH',BASE = '-0%')
exp=mathparse(element[1],d2node,exp)
if len(d2node) > 0:
d2node[-1].tail = ' '.join(exp)
else:
d2node.text = ' '.join(exp)
exp = []
return []
# superscript
if mtag == 'msup':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/superscript.wav')
exp=mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'superscript'
enode =et.SubElement(snode,'PITCH',BASE = '+0%')
exp=mathparse(element[1],enode,exp)
if len(enode) > 0:
enode[-1].tail = ' '.join(exp)
else:
enode.text = ' '.join(exp)
exp = []
return []
#subscript
if mtag == 'msub':
if snode.text:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/subscript.wav')
exp=mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'subscript'
subNode =et.SubElement(snode,'PITCH',BASE = '-0%')
exp=mathparse(element[1],subNode,exp)
if len(subNode) > 0:
subNode[-1].tail = ' '.join(exp)
else:
subNode.text = ' '.join(exp)
exp = []
return []
#subscript-superscript pairs
if mtag == 'msubsup':
exp=mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/subsuper.wav')
#et.SubElement(snode,'EMPH').text = 'subscript'
ssSub = et.SubElement(snode,'PITCH',BASE='-0%')
exp=mathparse(element[1],ssSub,exp)
if len(ssSub) > 0:
ssSub[-1].tail = ' '.join(exp)
else:
ssSub.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'superscript'
ssSup = et.SubElement(snode,'PITCH',BASE = '+0%')
exp=mathparse(element[2],ssSup,exp)
if len(ssSup) > 0:
ssSup[-1].tail = ' '.join(exp)
else:
ssSup.text = ' '.join(exp)
exp = []
return []
#fence
if mtag == 'mfence':
exp.append('the quantity')
if snode.text:
snode[-1].tail = '' .join(exp)
else:
snode.text = ' '.join(exp)
exp = []
for c in element:
exp=mathparse(c,snode,exp)
et.SubElement(snode,'BREAK',LEVEL='Medium')
exp = []
return []
# over script
if mtag == 'mover':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/superscript.wav')
exp = mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'overscript'
overNode =et.SubElement(snode,'PITCH',BASE = '+0%')
exp=mathparse(element[1],overNode,exp)
if len(overNode) > 0:
overNode[-1].tail = ' '.join(exp)
else:
overNode.text = ' '.join(exp)
exp = []
return []
#underscript
if mtag == 'munder':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/subscript.wav')
exp=mathparse(element[0],snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'EMPH').text = 'underscript'
underNode =et.SubElement(snode,'PITCH',BASE = '-0%')
exp=mathparse(element[1],underNode,exp)
if len(underNode) > 0:
underNode[-1].tail = ' '.join(exp)
else:
underNode.text = ' '.join(exp)
exp = []
return []
# underscript-overscript pair
if mtag == 'munderover':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/subsuper.wav')
underOverBase=et.SubElement(snode,'EMPH')
underOverBase.text = ' '.join(mathparse(element[0],snode,exp))
underOverBase.tail = 'from'
exp = []
underOverSub =et.SubElement(snode,'PITCH',BASE = '-0%')
exp=mathparse(element[1],underOverSub,exp)
if len(underOverSub) > 0:
underOverSub[-1].tail = ' '.join(exp)
else:
underOverSub.text = ' '.join(exp)
exp = []
underOverSup =et.SubElement(snode,'PITCH',BASE = '+0%')
exp=mathparse(element[2],underOverSup,exp)
if len(underOverSup) > 0:
underOverSup[-1].tail = ' '.join(exp)
else:
underOverSup.text = ' '.join(exp)
exp = []
return []
# square root
if mtag == 'msqrt':
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/squareroot.wav')
exp.append('square root')
if len(element) == 1 and len(element[0]) > 1:
exp.append('of')
if len(snode)>0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
sqrtNode = et.SubElement('snode,RATE',SPEED='+0%')
for c in element:
exp=mathparse(c,sqrtNode,exp)
if len(sqrtNode)>0:
sqrtNode[-1].tail = ' '.join(exp)
else:
sqrtNode.text = ' '.join(exp)
exp = []
return []
# general root
if mtag == 'mroot':
et.SubElement(snode,'AUDIO',SRC='http://localhost/test/squareroot.wav')
exp=mathparse(element[-1],snode,exp)
exp.append('root of')
for c in element[:-1]:
exp=mathparse(c,snode,exp)
if len(snode) > 0:
snode[-1].tail = ' '.join(exp)
else:
snode.text = ' '.join(exp)
exp = []
return []
#print 'list:',len(exp)
#print 'items in the list:\n',exp
#print 'sable markup:\n',et.tostring(snode)
for e in element:
exp=mathparse(e,snode,exp)
if len(snode) > 0:
if snode[-1].tail:
snode[-1].tail = snode[-1].tail+' '.join(exp)
else:
snode[-1].tail = ' '.join(exp)
#exp = []
else:
if snode.text:
snode.text = snode.text + ' '.join(exp)
else:
snode.text = ' '.join(exp)
#print 'sable just before exiting:\n',et.tostring(snode)
return exp
def main():
args = sys.argv
if len(args) < 2:
print 'usage:\ndemo.py inputFile.xhtml'
exit(1)
fileName = str(sys.argv[1])
xmlroot = getData(fileName) #'example1.xhtml' contains the xhtml code given above
sableroot=generateSable()
expList = mathparse(xmlroot,sableroot)
if len(sableroot) > 0:
sableroot[-1].tail = ' '.join(expList)
else:
sableroot.text = ' '.join(expList)
generateSable(sableroot,1)
#print 'list in the main function:\n',expList
#print len(expList)
expression = ' '.join(expList)
#print the resulting string
print 'result:',expression
#speak the expression
#speek(expression)
#speak the expression using festival
cmd = 'echo "'+expression+'" | festival --tts'
festCmd = 'festival --tts equation.sable'
os.system(festCmd)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_PE_MSVCR110_DLL_LOOKUP_H
#define LIEF_PE_MSVCR110_DLL_LOOKUP_H
#include <cstdint>
namespace LIEF {
namespace PE {
inline const char* msvcr110_dll_lookup(uint32_t i) {
switch(i) {
case 0x014b: return "$I10_OUTPUT";
case 0x0001: return "??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z";
case 0x0002: return "??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z";
case 0x0003: return "??0SchedulerPolicy@Concurrency@@QAA@IZZ";
case 0x0004: return "??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z";
case 0x0005: return "??0SchedulerPolicy@Concurrency@@QAE@XZ";
case 0x0006: return "??0_CancellationTokenState@details@Concurrency@@AAE@XZ";
case 0x0007: return "??0_Cancellation_beacon@details@Concurrency@@QAE@XZ";
case 0x0008: return "??0_Condition_variable@details@Concurrency@@QAE@XZ";
case 0x0009: return "??0_Context@details@Concurrency@@QAE@PAVContext@2@@Z";
case 0x000a: return "??0_Interruption_exception@details@Concurrency@@QAE@PBD@Z";
case 0x000b: return "??0_Interruption_exception@details@Concurrency@@QAE@XZ";
case 0x000c: return "??0_NonReentrantBlockingLock@details@Concurrency@@QAE@XZ";
case 0x000d: return "??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ";
case 0x000e: return "??0_ReaderWriterLock@details@Concurrency@@QAE@XZ";
case 0x000f: return "??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ";
case 0x0010: return "??0_ReentrantLock@details@Concurrency@@QAE@XZ";
case 0x0011: return "??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ";
case 0x0012: return "??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z";
case 0x0013: return "??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z";
case 0x0014: return "??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z";
case 0x0015: return "??0_SpinLock@details@Concurrency@@QAE@ACJ@Z";
case 0x0016: return "??0_StructuredTaskCollection@details@Concurrency@@QAE@PAV_CancellationTokenState@12@@Z";
case 0x0017: return "??0_TaskCollection@details@Concurrency@@QAE@PAV_CancellationTokenState@12@@Z";
case 0x0018: return "??0_TaskCollection@details@Concurrency@@QAE@XZ";
case 0x0019: return "??0_Timer@details@Concurrency@@IAE@I_N@Z";
case 0x001a: return "??0__non_rtti_object@std@@QAE@ABV01@@Z";
case 0x001b: return "??0__non_rtti_object@std@@QAE@PBD@Z";
case 0x001c: return "??0bad_cast@std@@AAE@PBQBD@Z";
case 0x001d: return "??0bad_cast@std@@QAE@ABV01@@Z";
case 0x001e: return "??0bad_cast@std@@QAE@PBD@Z";
case 0x001f: return "??0bad_target@Concurrency@@QAE@PBD@Z";
case 0x0020: return "??0bad_target@Concurrency@@QAE@XZ";
case 0x0021: return "??0bad_typeid@std@@QAE@ABV01@@Z";
case 0x0022: return "??0bad_typeid@std@@QAE@PBD@Z";
case 0x0023: return "??0context_self_unblock@Concurrency@@QAE@PBD@Z";
case 0x0024: return "??0context_self_unblock@Concurrency@@QAE@XZ";
case 0x0025: return "??0context_unblock_unbalanced@Concurrency@@QAE@PBD@Z";
case 0x0026: return "??0context_unblock_unbalanced@Concurrency@@QAE@XZ";
case 0x0027: return "??0critical_section@Concurrency@@QAE@XZ";
case 0x0028: return "??0default_scheduler_exists@Concurrency@@QAE@PBD@Z";
case 0x0029: return "??0default_scheduler_exists@Concurrency@@QAE@XZ";
case 0x002a: return "??0event@Concurrency@@QAE@XZ";
case 0x002b: return "??0exception@std@@QAE@ABQBD@Z";
case 0x002c: return "??0exception@std@@QAE@ABQBDH@Z";
case 0x002d: return "??0exception@std@@QAE@ABV01@@Z";
case 0x002e: return "??0exception@std@@QAE@XZ";
case 0x002f: return "??0improper_lock@Concurrency@@QAE@PBD@Z";
case 0x0030: return "??0improper_lock@Concurrency@@QAE@XZ";
case 0x0031: return "??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z";
case 0x0032: return "??0improper_scheduler_attach@Concurrency@@QAE@XZ";
case 0x0033: return "??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z";
case 0x0034: return "??0improper_scheduler_detach@Concurrency@@QAE@XZ";
case 0x0035: return "??0improper_scheduler_reference@Concurrency@@QAE@PBD@Z";
case 0x0036: return "??0improper_scheduler_reference@Concurrency@@QAE@XZ";
case 0x0037: return "??0invalid_link_target@Concurrency@@QAE@PBD@Z";
case 0x0038: return "??0invalid_link_target@Concurrency@@QAE@XZ";
case 0x0039: return "??0invalid_multiple_scheduling@Concurrency@@QAE@PBD@Z";
case 0x003a: return "??0invalid_multiple_scheduling@Concurrency@@QAE@XZ";
case 0x003b: return "??0invalid_operation@Concurrency@@QAE@PBD@Z";
case 0x003c: return "??0invalid_operation@Concurrency@@QAE@XZ";
case 0x003d: return "??0invalid_oversubscribe_operation@Concurrency@@QAE@PBD@Z";
case 0x003e: return "??0invalid_oversubscribe_operation@Concurrency@@QAE@XZ";
case 0x003f: return "??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z";
case 0x0040: return "??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ";
case 0x0041: return "??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z";
case 0x0042: return "??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ";
case 0x0043: return "??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z";
case 0x0044: return "??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ";
case 0x0045: return "??0message_not_found@Concurrency@@QAE@PBD@Z";
case 0x0046: return "??0message_not_found@Concurrency@@QAE@XZ";
case 0x0047: return "??0missing_wait@Concurrency@@QAE@PBD@Z";
case 0x0048: return "??0missing_wait@Concurrency@@QAE@XZ";
case 0x0049: return "??0nested_scheduler_missing_detach@Concurrency@@QAE@PBD@Z";
case 0x004a: return "??0nested_scheduler_missing_detach@Concurrency@@QAE@XZ";
case 0x004b: return "??0operation_timed_out@Concurrency@@QAE@PBD@Z";
case 0x004c: return "??0operation_timed_out@Concurrency@@QAE@XZ";
case 0x004d: return "??0reader_writer_lock@Concurrency@@QAE@XZ";
case 0x004e: return "??0scheduler_not_attached@Concurrency@@QAE@PBD@Z";
case 0x004f: return "??0scheduler_not_attached@Concurrency@@QAE@XZ";
case 0x0050: return "??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z";
case 0x0051: return "??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z";
case 0x0052: return "??0scheduler_worker_creation_error@Concurrency@@QAE@J@Z";
case 0x0053: return "??0scheduler_worker_creation_error@Concurrency@@QAE@PBDJ@Z";
case 0x0054: return "??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z";
case 0x0055: return "??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z";
case 0x0056: return "??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z";
case 0x0057: return "??0task_canceled@Concurrency@@QAE@PBD@Z";
case 0x0058: return "??0task_canceled@Concurrency@@QAE@XZ";
case 0x0059: return "??0unsupported_os@Concurrency@@QAE@PBD@Z";
case 0x005a: return "??0unsupported_os@Concurrency@@QAE@XZ";
case 0x005b: return "??1SchedulerPolicy@Concurrency@@QAE@XZ";
case 0x005c: return "??1_CancellationTokenState@details@Concurrency@@UAE@XZ";
case 0x005d: return "??1_Cancellation_beacon@details@Concurrency@@QAE@XZ";
case 0x005e: return "??1_Condition_variable@details@Concurrency@@QAE@XZ";
case 0x005f: return "??1_NonReentrantBlockingLock@details@Concurrency@@QAE@XZ";
case 0x0060: return "??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ";
case 0x0061: return "??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ";
case 0x0062: return "??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ";
case 0x0063: return "??1_SpinLock@details@Concurrency@@QAE@XZ";
case 0x0064: return "??1_TaskCollection@details@Concurrency@@QAE@XZ";
case 0x0065: return "??1_Timer@details@Concurrency@@MAE@XZ";
case 0x0066: return "??1__non_rtti_object@std@@UAE@XZ";
case 0x0067: return "??1bad_cast@std@@UAE@XZ";
case 0x0068: return "??1bad_typeid@std@@UAE@XZ";
case 0x0069: return "??1critical_section@Concurrency@@QAE@XZ";
case 0x006a: return "??1event@Concurrency@@QAE@XZ";
case 0x006b: return "??1exception@std@@UAE@XZ";
case 0x006c: return "??1reader_writer_lock@Concurrency@@QAE@XZ";
case 0x006d: return "??1scoped_lock@critical_section@Concurrency@@QAE@XZ";
case 0x006e: return "??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ";
case 0x006f: return "??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ";
case 0x0070: return "??1type_info@@UAE@XZ";
case 0x0071: return "??2@YAPAXI@Z";
case 0x0072: return "??2@YAPAXIHPBDH@Z";
case 0x0073: return "??3@YAXPAX@Z";
case 0x0074: return "??3@YAXPAXHPBDH@Z";
case 0x0075: return "??4?$_SpinWait@$00@details@Concurrency@@QAEAAV012@ABV012@@Z";
case 0x0076: return "??4?$_SpinWait@$0A@@details@Concurrency@@QAEAAV012@ABV012@@Z";
case 0x0077: return "??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z";
case 0x0078: return "??4__non_rtti_object@std@@QAEAAV01@ABV01@@Z";
case 0x0079: return "??4bad_cast@std@@QAEAAV01@ABV01@@Z";
case 0x007a: return "??4bad_typeid@std@@QAEAAV01@ABV01@@Z";
case 0x007b: return "??4exception@std@@QAEAAV01@ABV01@@Z";
case 0x007c: return "??8type_info@@QBE_NABV0@@Z";
case 0x007d: return "??9type_info@@QBE_NABV0@@Z";
case 0x007e: return "??_7__non_rtti_object@std@@6B@";
case 0x007f: return "??_7bad_cast@std@@6B@";
case 0x0080: return "??_7bad_typeid@std@@6B@";
case 0x0081: return "??_7exception@std@@6B@";
case 0x0082: return "??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ";
case 0x0083: return "??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ";
case 0x0084: return "??_F_Context@details@Concurrency@@QAEXXZ";
case 0x0085: return "??_F_Scheduler@details@Concurrency@@QAEXXZ";
case 0x0086: return "??_Fbad_cast@std@@QAEXXZ";
case 0x0087: return "??_Fbad_typeid@std@@QAEXXZ";
case 0x0088: return "??_U@YAPAXI@Z";
case 0x0089: return "??_U@YAPAXIHPBDH@Z";
case 0x008a: return "??_V@YAXPAX@Z";
case 0x008b: return "??_V@YAXPAXHPBDH@Z";
case 0x008c: return "?Alloc@Concurrency@@YAPAXI@Z";
case 0x008d: return "?Block@Context@Concurrency@@SAXXZ";
case 0x008e: return "?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z";
case 0x008f: return "?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z";
case 0x0090: return "?CreateResourceManager@Concurrency@@YAPAUIResourceManager@1@XZ";
case 0x0091: return "?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z";
case 0x0092: return "?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ";
case 0x0093: return "?CurrentContext@Context@Concurrency@@SAPAV12@XZ";
case 0x0094: return "?Detach@CurrentScheduler@Concurrency@@SAXXZ";
case 0x0095: return "?DisableTracing@Concurrency@@YAJXZ";
case 0x0096: return "?EnableTracing@Concurrency@@YAJXZ";
case 0x0097: return "?Free@Concurrency@@YAXPAX@Z";
case 0x0098: return "?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ";
case 0x0099: return "?GetExecutionContextId@Concurrency@@YAIXZ";
case 0x009a: return "?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ";
case 0x009b: return "?GetOSVersion@Concurrency@@YA?AW4OSVersion@IResourceManager@1@XZ";
case 0x009c: return "?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ";
case 0x009d: return "?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z";
case 0x009e: return "?GetProcessorCount@Concurrency@@YAIXZ";
case 0x009f: return "?GetProcessorNodeCount@Concurrency@@YAIXZ";
case 0x00a0: return "?GetSchedulerId@Concurrency@@YAIXZ";
case 0x00a1: return "?GetSharedTimerQueue@details@Concurrency@@YAPAXXZ";
case 0x00a2: return "?Id@Context@Concurrency@@SAIXZ";
case 0x00a3: return "?Id@CurrentScheduler@Concurrency@@SAIXZ";
case 0x00a4: return "?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z";
case 0x00a5: return "?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ";
case 0x00a6: return "?Log2@details@Concurrency@@YAKI@Z";
case 0x00a7: return "?Oversubscribe@Context@Concurrency@@SAX_N@Z";
case 0x00a8: return "?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z";
case 0x00a9: return "?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ";
case 0x00aa: return "?ScheduleGroupId@Context@Concurrency@@SAIXZ";
case 0x00ab: return "?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z";
case 0x00ac: return "?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z";
case 0x00ad: return "?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z";
case 0x00ae: return "?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z";
case 0x00af: return "?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z";
case 0x00b0: return "?VirtualProcessorId@Context@Concurrency@@SAIXZ";
case 0x00b1: return "?Yield@Context@Concurrency@@SAXXZ";
case 0x00b2: return "?_Abort@_StructuredTaskCollection@details@Concurrency@@AAEXXZ";
case 0x00b3: return "?_Acquire@_NonReentrantBlockingLock@details@Concurrency@@QAEXXZ";
case 0x00b4: return "?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z";
case 0x00b5: return "?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ";
case 0x00b6: return "?_Acquire@_ReentrantLock@details@Concurrency@@QAEXXZ";
case 0x00b7: return "?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z";
case 0x00b8: return "?_AcquireRead@_ReaderWriterLock@details@Concurrency@@QAEXXZ";
case 0x00b9: return "?_AcquireWrite@_ReaderWriterLock@details@Concurrency@@QAEXXZ";
case 0x00ba: return "?_Cancel@_CancellationTokenState@details@Concurrency@@QAEXXZ";
case 0x00bb: return "?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAEXXZ";
case 0x00bc: return "?_Cancel@_TaskCollection@details@Concurrency@@QAEXXZ";
case 0x00bd: return "?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IAEXXZ";
case 0x00be: return "?_CleanupToken@_StructuredTaskCollection@details@Concurrency@@AAEXXZ";
case 0x00bf: return "?_ConcRT_CoreAssert@details@Concurrency@@YAXPBD0H@Z";
case 0x00c0: return "?_ConcRT_Trace@details@Concurrency@@YAXHPB_WZZ";
case 0x00c1: return "?_Confirm_cancel@_Cancellation_beacon@details@Concurrency@@QAE_NXZ";
case 0x00c2: return "?_Copy_str@exception@std@@AAEXPBD@Z";
case 0x00c3: return "?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ";
case 0x00c4: return "?_Current_node@location@Concurrency@@SA?AV12@XZ";
case 0x00c5: return "?_DeregisterCallback@_CancellationTokenState@details@Concurrency@@QAEXPAV_CancellationTokenRegistration@23@@Z";
case 0x00c6: return "?_Destroy@_AsyncTaskCollection@details@Concurrency@@EAEXXZ";
case 0x00c7: return "?_Destroy@_CancellationTokenState@details@Concurrency@@EAEXXZ";
case 0x00c8: return "?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ";
case 0x00c9: return "?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ";
case 0x00ca: return "?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ";
case 0x00cb: return "?_GetConcRTTraceInfo@Concurrency@@YAPBU_CONCRT_TRACE_INFO@details@1@XZ";
case 0x00cc: return "?_GetConcurrency@details@Concurrency@@YAIXZ";
case 0x00cd: return "?_GetCurrentInlineDepth@_StackGuard@details@Concurrency@@CAAAIXZ";
case 0x00ce: return "?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ";
case 0x00cf: return "?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ";
case 0x00d0: return "?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ";
case 0x00d1: return "?_Invoke@_CancellationTokenRegistration@details@Concurrency@@AAEXXZ";
case 0x00d2: return "?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAE_NXZ";
case 0x00d3: return "?_IsCanceling@_TaskCollection@details@Concurrency@@QAE_NXZ";
case 0x00d4: return "?_IsSynchronouslyBlocked@_Context@details@Concurrency@@QBE_NXZ";
case 0x00d5: return "?_Name_base@type_info@@CAPBDPBV1@PAU__type_info_node@@@Z";
case 0x00d6: return "?_Name_base_internal@type_info@@CAPBDPBV1@PAU__type_info_node@@@Z";
case 0x00d7: return "?_NewCollection@_AsyncTaskCollection@details@Concurrency@@SAPAV123@PAV_CancellationTokenState@23@@Z";
case 0x00d8: return "?_NewTokenState@_CancellationTokenState@details@Concurrency@@SAPAV123@XZ";
case 0x00d9: return "?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ";
case 0x00da: return "?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ";
case 0x00db: return "?_Oversubscribe@_Context@details@Concurrency@@SAX_N@Z";
case 0x00dc: return "?_Reference@_Scheduler@details@Concurrency@@QAEIXZ";
case 0x00dd: return "?_RegisterCallback@_CancellationTokenState@details@Concurrency@@QAEPAV_CancellationTokenRegistration@23@P6AXPAX@Z0H@Z";
case 0x00de: return "?_RegisterCallback@_CancellationTokenState@details@Concurrency@@QAEXPAV_CancellationTokenRegistration@23@@Z";
case 0x00df: return "?_Release@_NonReentrantBlockingLock@details@Concurrency@@QAEXXZ";
case 0x00e0: return "?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ";
case 0x00e1: return "?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ";
case 0x00e2: return "?_Release@_ReentrantLock@details@Concurrency@@QAEXXZ";
case 0x00e3: return "?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ";
case 0x00e4: return "?_Release@_Scheduler@details@Concurrency@@QAEIXZ";
case 0x00e5: return "?_ReleaseRead@_ReaderWriterLock@details@Concurrency@@QAEXXZ";
case 0x00e6: return "?_ReleaseWrite@_ReaderWriterLock@details@Concurrency@@QAEXXZ";
case 0x00e7: return "?_ReportUnobservedException@details@Concurrency@@YAXXZ";
case 0x00e8: return "?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ";
case 0x00e9: return "?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ";
case 0x00ea: return "?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAG?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z";
case 0x00eb: return "?_RunAndWait@_TaskCollection@details@Concurrency@@QAG?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z";
case 0x00ec: return "?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@@Z";
case 0x00ed: return "?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@PAVlocation@3@@Z";
case 0x00ee: return "?_Schedule@_TaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@@Z";
case 0x00ef: return "?_Schedule@_TaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@PAVlocation@3@@Z";
case 0x00f0: return "?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z";
case 0x00f1: return "?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z";
case 0x00f2: return "?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z";
case 0x00f3: return "?_SetUnobservedExceptionHandler@details@Concurrency@@YAXP6AXXZ@Z";
case 0x00f4: return "?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ";
case 0x00f5: return "?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ";
case 0x00f6: return "?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ";
case 0x00f7: return "?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ";
case 0x00f8: return "?_SpinYield@Context@Concurrency@@SAXXZ";
case 0x00f9: return "?_Start@_Timer@details@Concurrency@@IAEXXZ";
case 0x00fa: return "?_Stop@_Timer@details@Concurrency@@IAEXXZ";
case 0x00fb: return "?_Tidy@exception@std@@AAEXXZ";
case 0x00fc: return "?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ";
case 0x00fd: return "?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z";
case 0x00fe: return "?_TryAcquire@_NonReentrantBlockingLock@details@Concurrency@@QAE_NXZ";
case 0x00ff: return "?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ";
case 0x0100: return "?_TryAcquire@_ReentrantLock@details@Concurrency@@QAE_NXZ";
case 0x0101: return "?_TryAcquireWrite@_ReaderWriterLock@details@Concurrency@@QAE_NXZ";
case 0x0102: return "?_Type_info_dtor@type_info@@CAXPAV1@@Z";
case 0x0103: return "?_Type_info_dtor_internal@type_info@@CAXPAV1@@Z";
case 0x0104: return "?_UnderlyingYield@details@Concurrency@@YAXXZ";
case 0x0105: return "?_ValidateExecute@@YAHP6GHXZ@Z";
case 0x0106: return "?_ValidateRead@@YAHPBXI@Z";
case 0x0107: return "?_ValidateWrite@@YAHPAXI@Z";
case 0x0108: return "?_Value@_SpinCount@details@Concurrency@@SAIXZ";
case 0x0109: return "?_Yield@_Context@details@Concurrency@@SAXXZ";
case 0x010a: return "?__ExceptionPtrAssign@@YAXPAXPBX@Z";
case 0x010b: return "?__ExceptionPtrCompare@@YA_NPBX0@Z";
case 0x010c: return "?__ExceptionPtrCopy@@YAXPAXPBX@Z";
case 0x010d: return "?__ExceptionPtrCopyException@@YAXPAXPBX1@Z";
case 0x010e: return "?__ExceptionPtrCreate@@YAXPAX@Z";
case 0x010f: return "?__ExceptionPtrCurrentException@@YAXPAX@Z";
case 0x0110: return "?__ExceptionPtrDestroy@@YAXPAX@Z";
case 0x0111: return "?__ExceptionPtrRethrow@@YAXPBX@Z";
case 0x0112: return "?__ExceptionPtrSwap@@YAXPAX0@Z";
case 0x0113: return "?__ExceptionPtrToBool@@YA_NPBX@Z";
case 0x0115: return "?_inconsistency@@YAXXZ";
case 0x0116: return "?_invalid_parameter@@YAXPBG00II@Z";
case 0x0117: return "?_is_exception_typeof@@YAHABVtype_info@@PAU_EXCEPTION_POINTERS@@@Z";
case 0x0118: return "?_name_internal_method@type_info@@QBEPBDPAU__type_info_node@@@Z";
case 0x0119: return "?_open@@YAHPBDHH@Z";
case 0x011a: return "?_query_new_handler@@YAP6AHI@ZXZ";
case 0x011b: return "?_query_new_mode@@YAHXZ";
case 0x011c: return "?_set_new_handler@@YAP6AHI@ZH@Z";
case 0x011d: return "?_set_new_handler@@YAP6AHI@ZP6AHI@Z@Z";
case 0x011e: return "?_set_new_mode@@YAHH@Z";
case 0x011f: return "?_set_se_translator@@YAP6AXIPAU_EXCEPTION_POINTERS@@@ZH@Z";
case 0x0120: return "?_set_se_translator@@YAP6AXIPAU_EXCEPTION_POINTERS@@@ZP6AXI0@Z@Z";
case 0x0121: return "?_sopen@@YAHPBDHHH@Z";
case 0x0122: return "?_type_info_dtor_internal_method@type_info@@QAEXXZ";
case 0x0123: return "?_wopen@@YAHPB_WHH@Z";
case 0x0124: return "?_wsopen@@YAHPB_WHHH@Z";
case 0x0125: return "?before@type_info@@QBE_NABV1@@Z";
case 0x0126: return "?current@location@Concurrency@@SA?AV12@XZ";
case 0x0127: return "?from_numa_node@location@Concurrency@@SA?AV12@G@Z";
case 0x0128: return "?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ";
case 0x0129: return "?lock@critical_section@Concurrency@@QAEXXZ";
case 0x012a: return "?lock@reader_writer_lock@Concurrency@@QAEXXZ";
case 0x012b: return "?lock_read@reader_writer_lock@Concurrency@@QAEXXZ";
case 0x012c: return "?name@type_info@@QBEPBDPAU__type_info_node@@@Z";
case 0x012d: return "?native_handle@critical_section@Concurrency@@QAEAAV12@XZ";
case 0x012e: return "?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ";
case 0x012f: return "?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ";
case 0x0130: return "?raw_name@type_info@@QBEPBDXZ";
case 0x0131: return "?reset@event@Concurrency@@QAEXXZ";
case 0x0132: return "?set@event@Concurrency@@QAEXXZ";
case 0x0133: return "?set_new_handler@@YAP6AXXZP6AXXZ@Z";
case 0x0134: return "?set_task_execution_resources@Concurrency@@YAXGPAU_GROUP_AFFINITY@@@Z";
case 0x0135: return "?set_task_execution_resources@Concurrency@@YAXK@Z";
case 0x0136: return "?set_terminate@@YAP6AXXZH@Z";
case 0x0137: return "?set_terminate@@YAP6AXXZP6AXXZ@Z";
case 0x0138: return "?set_unexpected@@YAP6AXXZH@Z";
case 0x0139: return "?set_unexpected@@YAP6AXXZP6AXXZ@Z";
case 0x013a: return "?swprintf@@YAHPAGIPBGZZ";
case 0x013b: return "?swprintf@@YAHPA_WIPB_WZZ";
case 0x013c: return "?terminate@@YAXXZ";
case 0x013d: return "?try_lock@critical_section@Concurrency@@QAE_NXZ";
case 0x013e: return "?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ";
case 0x013f: return "?try_lock_for@critical_section@Concurrency@@QAE_NI@Z";
case 0x0140: return "?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ";
case 0x0141: return "?unexpected@@YAXXZ";
case 0x0142: return "?unlock@critical_section@Concurrency@@QAEXXZ";
case 0x0143: return "?unlock@reader_writer_lock@Concurrency@@QAEXXZ";
case 0x0144: return "?vswprintf@@YAHPA_WIPB_WPAD@Z";
case 0x0145: return "?wait@Concurrency@@YAXI@Z";
case 0x0146: return "?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z";
case 0x0147: return "?wait@event@Concurrency@@QAEII@Z";
case 0x0148: return "?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z";
case 0x0149: return "?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z";
case 0x014a: return "?what@exception@std@@UBEPBDXZ";
case 0x014c: return "_CIacos";
case 0x014d: return "_CIasin";
case 0x014e: return "_CIatan";
case 0x014f: return "_CIatan2";
case 0x0150: return "_CIcos";
case 0x0151: return "_CIcosh";
case 0x0152: return "_CIexp";
case 0x0153: return "_CIfmod";
case 0x0154: return "_CIlog";
case 0x0155: return "_CIlog10";
case 0x0156: return "_CIpow";
case 0x0157: return "_CIsin";
case 0x0158: return "_CIsinh";
case 0x0159: return "_CIsqrt";
case 0x015a: return "_CItan";
case 0x015b: return "_CItanh";
case 0x015c: return "_CRT_RTC_INIT";
case 0x015d: return "_CRT_RTC_INITW";
case 0x015e: return "_CreateFrameInfo";
case 0x015f: return "_CxxThrowException";
case 0x0160: return "_EH_prolog";
case 0x0161: return "_FindAndUnlinkFrame";
case 0x0162: return "_Getdays";
case 0x0163: return "_Getmonths";
case 0x0164: return "_Gettnames";
case 0x0165: return "_HUGE";
case 0x0166: return "_IsExceptionObjectToBeDestroyed";
case 0x0167: return "_Lock_shared_ptr_spin_lock";
case 0x0168: return "_NLG_Dispatch2";
case 0x0169: return "_NLG_Return";
case 0x016a: return "_NLG_Return2";
case 0x016b: return "_Strftime";
case 0x016c: return "_Unlock_shared_ptr_spin_lock";
case 0x016d: return "_W_Getdays";
case 0x016e: return "_W_Getmonths";
case 0x016f: return "_W_Gettnames";
case 0x0170: return "_Wcsftime";
case 0x0171: return "_XcptFilter";
case 0x0172: return "__AdjustPointer";
case 0x0173: return "__BuildCatchObject";
case 0x0174: return "__BuildCatchObjectHelper";
case 0x0175: return "__CppXcptFilter";
case 0x0176: return "__CxxDetectRethrow";
case 0x0177: return "__CxxExceptionFilter";
case 0x0178: return "__CxxFrameHandler";
case 0x0179: return "__CxxFrameHandler2";
case 0x017a: return "__CxxFrameHandler3";
case 0x017b: return "__CxxLongjmpUnwind";
case 0x017c: return "__CxxQueryExceptionSize";
case 0x017d: return "__CxxRegisterExceptionObject";
case 0x017e: return "__CxxUnregisterExceptionObject";
case 0x017f: return "__DestructExceptionObject";
case 0x0180: return "__FrameUnwindFilter";
case 0x0181: return "__RTCastToVoid";
case 0x0182: return "__RTDynamicCast";
case 0x0183: return "__RTtypeid";
case 0x0184: return "__STRINGTOLD";
case 0x0185: return "__STRINGTOLD_L";
case 0x0186: return "__TypeMatch";
case 0x0197: return "___crtCreateSemaphoreExW@24";
case 0x0187: return "___lc_codepage_func";
case 0x0188: return "___lc_collate_cp_func";
case 0x0189: return "___lc_locale_name_func";
case 0x018a: return "___mb_cur_max_func";
case 0x018b: return "___mb_cur_max_l_func";
case 0x018c: return "___setlc_active_func";
case 0x018d: return "___unguarded_readlc_active_add_func";
case 0x018e: return "__argc";
case 0x018f: return "__argv";
case 0x0190: return "__badioinfo";
case 0x0191: return "__clean_type_info_names_internal";
case 0x0192: return "__control87_2";
case 0x0193: return "__create_locale";
case 0x0194: return "__crtCompareStringA";
case 0x0195: return "__crtCompareStringEx";
case 0x0196: return "__crtCompareStringW";
case 0x0198: return "__crtCreateSymbolicLinkW";
case 0x0199: return "__crtEnumSystemLocalesEx";
case 0x019a: return "__crtFlsAlloc";
case 0x019b: return "__crtFlsFree";
case 0x019c: return "__crtFlsGetValue";
case 0x019d: return "__crtFlsSetValue";
case 0x019e: return "__crtGetDateFormatEx";
case 0x019f: return "__crtGetLocaleInfoEx";
case 0x01a0: return "__crtGetShowWindowMode";
case 0x01a1: return "__crtGetTimeFormatEx";
case 0x01a2: return "__crtGetUserDefaultLocaleName";
case 0x01a3: return "__crtInitializeCriticalSectionEx";
case 0x01a4: return "__crtIsPackagedApp";
case 0x01a5: return "__crtIsValidLocaleName";
case 0x01a6: return "__crtLCMapStringA";
case 0x01a7: return "__crtLCMapStringEx";
case 0x01a8: return "__crtLCMapStringW";
case 0x01a9: return "__crtSetThreadStackGuarantee";
case 0x01aa: return "__crtSetUnhandledExceptionFilter";
case 0x01ab: return "__crtTerminateProcess";
case 0x01ac: return "__crtUnhandledException";
case 0x01ad: return "__daylight";
case 0x01ae: return "__dllonexit";
case 0x01af: return "__doserrno";
case 0x01b0: return "__dstbias";
case 0x01b1: return "__fpecode";
case 0x01b2: return "__free_locale";
case 0x01b3: return "__get_current_locale";
case 0x01b4: return "__get_flsindex";
case 0x01b5: return "__get_tlsindex";
case 0x01b6: return "__getmainargs";
case 0x01b7: return "__initenv";
case 0x01b8: return "__iob_func";
case 0x01b9: return "__isascii";
case 0x01ba: return "__iscsym";
case 0x01bb: return "__iscsymf";
case 0x01bc: return "__iswcsym";
case 0x01bd: return "__iswcsymf";
case 0x01be: return "__lconv";
case 0x01bf: return "__lconv_init";
case 0x01c0: return "__libm_sse2_acos";
case 0x01c1: return "__libm_sse2_acosf";
case 0x01c2: return "__libm_sse2_asin";
case 0x01c3: return "__libm_sse2_asinf";
case 0x01c4: return "__libm_sse2_atan";
case 0x01c5: return "__libm_sse2_atan2";
case 0x01c6: return "__libm_sse2_atanf";
case 0x01c7: return "__libm_sse2_cos";
case 0x01c8: return "__libm_sse2_cosf";
case 0x01c9: return "__libm_sse2_exp";
case 0x01ca: return "__libm_sse2_expf";
case 0x01cb: return "__libm_sse2_log";
case 0x01cc: return "__libm_sse2_log10";
case 0x01cd: return "__libm_sse2_log10f";
case 0x01ce: return "__libm_sse2_logf";
case 0x01cf: return "__libm_sse2_pow";
case 0x01d0: return "__libm_sse2_powf";
case 0x01d1: return "__libm_sse2_sin";
case 0x01d2: return "__libm_sse2_sinf";
case 0x01d3: return "__libm_sse2_tan";
case 0x01d4: return "__libm_sse2_tanf";
case 0x01d5: return "__mb_cur_max";
case 0x01d6: return "__p___argc";
case 0x01d7: return "__p___argv";
case 0x01d8: return "__p___initenv";
case 0x01d9: return "__p___mb_cur_max";
case 0x01da: return "__p___wargv";
case 0x01db: return "__p___winitenv";
case 0x01dc: return "__p__acmdln";
case 0x01dd: return "__p__commode";
case 0x01de: return "__p__daylight";
case 0x01df: return "__p__dstbias";
case 0x01e0: return "__p__environ";
case 0x01e1: return "__p__fmode";
case 0x01e2: return "__p__iob";
case 0x01e3: return "__p__mbcasemap";
case 0x01e4: return "__p__mbctype";
case 0x01e5: return "__p__pctype";
case 0x01e6: return "__p__pgmptr";
case 0x01e7: return "__p__pwctype";
case 0x01e8: return "__p__timezone";
case 0x01e9: return "__p__tzname";
case 0x01ea: return "__p__wcmdln";
case 0x01eb: return "__p__wenviron";
case 0x01ec: return "__p__wpgmptr";
case 0x01ed: return "__pctype_func";
case 0x01ee: return "__pioinfo";
case 0x01ef: return "__pwctype_func";
case 0x01f0: return "__pxcptinfoptrs";
case 0x01f1: return "__report_gsfailure";
case 0x01f2: return "__set_app_type";
case 0x01f3: return "__setlc_active";
case 0x01f4: return "__setusermatherr";
case 0x01f5: return "__strncnt";
case 0x01f6: return "__swprintf_l";
case 0x01f7: return "__sys_errlist";
case 0x01f8: return "__sys_nerr";
case 0x01f9: return "__threadhandle";
case 0x01fa: return "__threadid";
case 0x01fb: return "__timezone";
case 0x01fc: return "__toascii";
case 0x01fd: return "__tzname";
case 0x01fe: return "__unDName";
case 0x01ff: return "__unDNameEx";
case 0x0200: return "__unDNameHelper";
case 0x0114: return "__uncaught_exception";
case 0x0201: return "__unguarded_readlc_active";
case 0x0202: return "__vswprintf_l";
case 0x0203: return "__wargv";
case 0x0204: return "__wcserror";
case 0x0205: return "__wcserror_s";
case 0x0206: return "__wcsncnt";
case 0x0207: return "__wgetmainargs";
case 0x0208: return "__winitenv";
case 0x0209: return "_abnormal_termination";
case 0x020a: return "_abs64";
case 0x020b: return "_access";
case 0x020c: return "_access_s";
case 0x020d: return "_acmdln";
case 0x020e: return "_aligned_free";
case 0x020f: return "_aligned_malloc";
case 0x0210: return "_aligned_msize";
case 0x0211: return "_aligned_offset_malloc";
case 0x0212: return "_aligned_offset_realloc";
case 0x0213: return "_aligned_offset_recalloc";
case 0x0214: return "_aligned_realloc";
case 0x0215: return "_aligned_recalloc";
case 0x0216: return "_amsg_exit";
case 0x0217: return "_assert";
case 0x0218: return "_atodbl";
case 0x0219: return "_atodbl_l";
case 0x021a: return "_atof_l";
case 0x021b: return "_atoflt";
case 0x021c: return "_atoflt_l";
case 0x021d: return "_atoi64";
case 0x021e: return "_atoi64_l";
case 0x021f: return "_atoi_l";
case 0x0220: return "_atol_l";
case 0x0221: return "_atoldbl";
case 0x0222: return "_atoldbl_l";
case 0x0223: return "_beep";
case 0x0224: return "_beginthread";
case 0x0225: return "_beginthreadex";
case 0x0226: return "_byteswap_uint64";
case 0x0227: return "_byteswap_ulong";
case 0x0228: return "_byteswap_ushort";
case 0x0229: return "_c_exit";
case 0x022a: return "_cabs";
case 0x022b: return "_callnewh";
case 0x022c: return "_calloc_crt";
case 0x022d: return "_cexit";
case 0x022e: return "_cgets";
case 0x022f: return "_cgets_s";
case 0x0230: return "_cgetws";
case 0x0231: return "_cgetws_s";
case 0x0232: return "_chdir";
case 0x0233: return "_chdrive";
case 0x0234: return "_chgsign";
case 0x0235: return "_chkesp";
case 0x0236: return "_chmod";
case 0x0237: return "_chsize";
case 0x0238: return "_chsize_s";
case 0x0239: return "_clearfp";
case 0x023a: return "_close";
case 0x023b: return "_commit";
case 0x023c: return "_commode";
case 0x023d: return "_configthreadlocale";
case 0x023e: return "_control87";
case 0x023f: return "_controlfp";
case 0x0240: return "_controlfp_s";
case 0x0241: return "_copysign";
case 0x0242: return "_cprintf";
case 0x0243: return "_cprintf_l";
case 0x0244: return "_cprintf_p";
case 0x0245: return "_cprintf_p_l";
case 0x0246: return "_cprintf_s";
case 0x0247: return "_cprintf_s_l";
case 0x0248: return "_cputs";
case 0x0249: return "_cputws";
case 0x024a: return "_creat";
case 0x024b: return "_create_locale";
case 0x024c: return "_crt_debugger_hook";
case 0x024d: return "_cscanf";
case 0x024e: return "_cscanf_l";
case 0x024f: return "_cscanf_s";
case 0x0250: return "_cscanf_s_l";
case 0x0251: return "_ctime32";
case 0x0252: return "_ctime32_s";
case 0x0253: return "_ctime64";
case 0x0254: return "_ctime64_s";
case 0x0255: return "_cwait";
case 0x0256: return "_cwprintf";
case 0x0257: return "_cwprintf_l";
case 0x0258: return "_cwprintf_p";
case 0x0259: return "_cwprintf_p_l";
case 0x025a: return "_cwprintf_s";
case 0x025b: return "_cwprintf_s_l";
case 0x025c: return "_cwscanf";
case 0x025d: return "_cwscanf_l";
case 0x025e: return "_cwscanf_s";
case 0x025f: return "_cwscanf_s_l";
case 0x0260: return "_daylight";
case 0x0261: return "_difftime32";
case 0x0262: return "_difftime64";
case 0x0263: return "_dosmaperr";
case 0x0264: return "_dstbias";
case 0x0265: return "_dup";
case 0x0266: return "_dup2";
case 0x0267: return "_dupenv_s";
case 0x0268: return "_ecvt";
case 0x0269: return "_ecvt_s";
case 0x026a: return "_endthread";
case 0x026b: return "_endthreadex";
case 0x026c: return "_environ";
case 0x026d: return "_eof";
case 0x026e: return "_errno";
case 0x026f: return "_except_handler2";
case 0x0270: return "_except_handler3";
case 0x0271: return "_except_handler4_common";
case 0x0272: return "_execl";
case 0x0273: return "_execle";
case 0x0274: return "_execlp";
case 0x0275: return "_execlpe";
case 0x0276: return "_execv";
case 0x0277: return "_execve";
case 0x0278: return "_execvp";
case 0x0279: return "_execvpe";
case 0x027a: return "_exit";
case 0x027b: return "_expand";
case 0x027c: return "_fclose_nolock";
case 0x027d: return "_fcloseall";
case 0x027e: return "_fcvt";
case 0x027f: return "_fcvt_s";
case 0x0280: return "_fdopen";
case 0x0281: return "_fflush_nolock";
case 0x0282: return "_fgetchar";
case 0x0283: return "_fgetwc_nolock";
case 0x0284: return "_fgetwchar";
case 0x0285: return "_filbuf";
case 0x0286: return "_filelength";
case 0x0287: return "_filelengthi64";
case 0x0288: return "_fileno";
case 0x0289: return "_findclose";
case 0x028a: return "_findfirst32";
case 0x028b: return "_findfirst32i64";
case 0x028c: return "_findfirst64";
case 0x028d: return "_findfirst64i32";
case 0x028e: return "_findnext32";
case 0x028f: return "_findnext32i64";
case 0x0290: return "_findnext64";
case 0x0291: return "_findnext64i32";
case 0x0292: return "_finite";
case 0x0293: return "_flsbuf";
case 0x0294: return "_flushall";
case 0x0295: return "_fmode";
case 0x0296: return "_fpclass";
case 0x0297: return "_fpieee_flt";
case 0x0298: return "_fpreset";
case 0x0299: return "_fprintf_l";
case 0x029a: return "_fprintf_p";
case 0x029b: return "_fprintf_p_l";
case 0x029c: return "_fprintf_s_l";
case 0x029d: return "_fputchar";
case 0x029e: return "_fputwc_nolock";
case 0x029f: return "_fputwchar";
case 0x02a0: return "_fread_nolock";
case 0x02a1: return "_fread_nolock_s";
case 0x02a2: return "_free_locale";
case 0x02a3: return "_freea";
case 0x02a4: return "_freea_s";
case 0x02a5: return "_freefls";
case 0x02a6: return "_fscanf_l";
case 0x02a7: return "_fscanf_s_l";
case 0x02a8: return "_fseek_nolock";
case 0x02a9: return "_fseeki64";
case 0x02aa: return "_fseeki64_nolock";
case 0x02ab: return "_fsopen";
case 0x02ac: return "_fstat32";
case 0x02ad: return "_fstat32i64";
case 0x02ae: return "_fstat64";
case 0x02af: return "_fstat64i32";
case 0x02b0: return "_ftell_nolock";
case 0x02b1: return "_ftelli64";
case 0x02b2: return "_ftelli64_nolock";
case 0x02b3: return "_ftime32";
case 0x02b4: return "_ftime32_s";
case 0x02b5: return "_ftime64";
case 0x02b6: return "_ftime64_s";
case 0x02b7: return "_ftol";
case 0x02b8: return "_fullpath";
case 0x02b9: return "_futime32";
case 0x02ba: return "_futime64";
case 0x02bb: return "_fwprintf_l";
case 0x02bc: return "_fwprintf_p";
case 0x02bd: return "_fwprintf_p_l";
case 0x02be: return "_fwprintf_s_l";
case 0x02bf: return "_fwrite_nolock";
case 0x02c0: return "_fwscanf_l";
case 0x02c1: return "_fwscanf_s_l";
case 0x02c2: return "_gcvt";
case 0x02c3: return "_gcvt_s";
case 0x02c4: return "_get_current_locale";
case 0x02c5: return "_get_daylight";
case 0x02c6: return "_get_doserrno";
case 0x02c7: return "_get_dstbias";
case 0x02c8: return "_get_errno";
case 0x02c9: return "_get_fmode";
case 0x02ca: return "_get_heap_handle";
case 0x02cb: return "_get_invalid_parameter_handler";
case 0x02cc: return "_get_osfhandle";
case 0x02cd: return "_get_output_format";
case 0x02ce: return "_get_pgmptr";
case 0x02cf: return "_get_printf_count_output";
case 0x02d0: return "_get_purecall_handler";
case 0x02d1: return "_get_terminate";
case 0x02d2: return "_get_timezone";
case 0x02d3: return "_get_tzname";
case 0x02d4: return "_get_unexpected";
case 0x02d5: return "_get_wpgmptr";
case 0x02d6: return "_getc_nolock";
case 0x02d7: return "_getch";
case 0x02d8: return "_getch_nolock";
case 0x02d9: return "_getche";
case 0x02da: return "_getche_nolock";
case 0x02db: return "_getcwd";
case 0x02dc: return "_getdcwd";
case 0x02dd: return "_getdiskfree";
case 0x02de: return "_getdllprocaddr";
case 0x02df: return "_getdrive";
case 0x02e0: return "_getdrives";
case 0x02e1: return "_getmaxstdio";
case 0x02e2: return "_getmbcp";
case 0x02e3: return "_getpid";
case 0x02e4: return "_getptd";
case 0x02e5: return "_getsystime";
case 0x02e6: return "_getw";
case 0x02e7: return "_getwch";
case 0x02e8: return "_getwch_nolock";
case 0x02e9: return "_getwche";
case 0x02ea: return "_getwche_nolock";
case 0x02eb: return "_getws";
case 0x02ec: return "_getws_s";
case 0x02ed: return "_global_unwind2";
case 0x02ee: return "_gmtime32";
case 0x02ef: return "_gmtime32_s";
case 0x02f0: return "_gmtime64";
case 0x02f1: return "_gmtime64_s";
case 0x02f2: return "_heapadd";
case 0x02f3: return "_heapchk";
case 0x02f4: return "_heapmin";
case 0x02f5: return "_heapset";
case 0x02f6: return "_heapused";
case 0x02f7: return "_heapwalk";
case 0x02f8: return "_hypot";
case 0x02f9: return "_hypotf";
case 0x02fa: return "_i64toa";
case 0x02fb: return "_i64toa_s";
case 0x02fc: return "_i64tow";
case 0x02fd: return "_i64tow_s";
case 0x02fe: return "_initptd";
case 0x02ff: return "_initterm";
case 0x0300: return "_initterm_e";
case 0x0301: return "_inp";
case 0x0302: return "_inpd";
case 0x0303: return "_inpw";
case 0x0304: return "_invalid_parameter";
case 0x0305: return "_invalid_parameter_noinfo";
case 0x0306: return "_invalid_parameter_noinfo_noreturn";
case 0x0307: return "_invoke_watson";
case 0x0308: return "_iob";
case 0x0309: return "_isalnum_l";
case 0x030a: return "_isalpha_l";
case 0x030b: return "_isatty";
case 0x030c: return "_iscntrl_l";
case 0x030d: return "_isctype";
case 0x030e: return "_isctype_l";
case 0x030f: return "_isdigit_l";
case 0x0310: return "_isgraph_l";
case 0x0311: return "_isleadbyte_l";
case 0x0312: return "_islower_l";
case 0x0313: return "_ismbbalnum";
case 0x0314: return "_ismbbalnum_l";
case 0x0315: return "_ismbbalpha";
case 0x0316: return "_ismbbalpha_l";
case 0x0317: return "_ismbbgraph";
case 0x0318: return "_ismbbgraph_l";
case 0x0319: return "_ismbbkalnum";
case 0x031a: return "_ismbbkalnum_l";
case 0x031b: return "_ismbbkana";
case 0x031c: return "_ismbbkana_l";
case 0x031d: return "_ismbbkprint";
case 0x031e: return "_ismbbkprint_l";
case 0x031f: return "_ismbbkpunct";
case 0x0320: return "_ismbbkpunct_l";
case 0x0321: return "_ismbblead";
case 0x0322: return "_ismbblead_l";
case 0x0323: return "_ismbbprint";
case 0x0324: return "_ismbbprint_l";
case 0x0325: return "_ismbbpunct";
case 0x0326: return "_ismbbpunct_l";
case 0x0327: return "_ismbbtrail";
case 0x0328: return "_ismbbtrail_l";
case 0x0329: return "_ismbcalnum";
case 0x032a: return "_ismbcalnum_l";
case 0x032b: return "_ismbcalpha";
case 0x032c: return "_ismbcalpha_l";
case 0x032d: return "_ismbcdigit";
case 0x032e: return "_ismbcdigit_l";
case 0x032f: return "_ismbcgraph";
case 0x0330: return "_ismbcgraph_l";
case 0x0331: return "_ismbchira";
case 0x0332: return "_ismbchira_l";
case 0x0333: return "_ismbckata";
case 0x0334: return "_ismbckata_l";
case 0x0335: return "_ismbcl0";
case 0x0336: return "_ismbcl0_l";
case 0x0337: return "_ismbcl1";
case 0x0338: return "_ismbcl1_l";
case 0x0339: return "_ismbcl2";
case 0x033a: return "_ismbcl2_l";
case 0x033b: return "_ismbclegal";
case 0x033c: return "_ismbclegal_l";
case 0x033d: return "_ismbclower";
case 0x033e: return "_ismbclower_l";
case 0x033f: return "_ismbcprint";
case 0x0340: return "_ismbcprint_l";
case 0x0341: return "_ismbcpunct";
case 0x0342: return "_ismbcpunct_l";
case 0x0343: return "_ismbcspace";
case 0x0344: return "_ismbcspace_l";
case 0x0345: return "_ismbcsymbol";
case 0x0346: return "_ismbcsymbol_l";
case 0x0347: return "_ismbcupper";
case 0x0348: return "_ismbcupper_l";
case 0x0349: return "_ismbslead";
case 0x034a: return "_ismbslead_l";
case 0x034b: return "_ismbstrail";
case 0x034c: return "_ismbstrail_l";
case 0x034d: return "_isnan";
case 0x034e: return "_isprint_l";
case 0x034f: return "_ispunct_l";
case 0x0350: return "_isspace_l";
case 0x0351: return "_isupper_l";
case 0x0352: return "_iswalnum_l";
case 0x0353: return "_iswalpha_l";
case 0x0354: return "_iswcntrl_l";
case 0x0355: return "_iswcsym_l";
case 0x0356: return "_iswcsymf_l";
case 0x0357: return "_iswctype_l";
case 0x0358: return "_iswdigit_l";
case 0x0359: return "_iswgraph_l";
case 0x035a: return "_iswlower_l";
case 0x035b: return "_iswprint_l";
case 0x035c: return "_iswpunct_l";
case 0x035d: return "_iswspace_l";
case 0x035e: return "_iswupper_l";
case 0x035f: return "_iswxdigit_l";
case 0x0360: return "_isxdigit_l";
case 0x0361: return "_itoa";
case 0x0362: return "_itoa_s";
case 0x0363: return "_itow";
case 0x0364: return "_itow_s";
case 0x0365: return "_j0";
case 0x0366: return "_j1";
case 0x0367: return "_jn";
case 0x0368: return "_kbhit";
case 0x0369: return "_lfind";
case 0x036a: return "_lfind_s";
case 0x036b: return "_libm_sse2_acos_precise";
case 0x036c: return "_libm_sse2_asin_precise";
case 0x036d: return "_libm_sse2_atan_precise";
case 0x036e: return "_libm_sse2_cos_precise";
case 0x036f: return "_libm_sse2_exp_precise";
case 0x0370: return "_libm_sse2_log10_precise";
case 0x0371: return "_libm_sse2_log_precise";
case 0x0372: return "_libm_sse2_pow_precise";
case 0x0373: return "_libm_sse2_sin_precise";
case 0x0374: return "_libm_sse2_sqrt_precise";
case 0x0375: return "_libm_sse2_tan_precise";
case 0x0376: return "_loaddll";
case 0x0377: return "_local_unwind2";
case 0x0378: return "_local_unwind4";
case 0x0379: return "_localtime32";
case 0x037a: return "_localtime32_s";
case 0x037b: return "_localtime64";
case 0x037c: return "_localtime64_s";
case 0x037d: return "_lock";
case 0x037e: return "_lock_file";
case 0x037f: return "_locking";
case 0x0380: return "_logb";
case 0x0381: return "_longjmpex";
case 0x0382: return "_lrotl";
case 0x0383: return "_lrotr";
case 0x0384: return "_lsearch";
case 0x0385: return "_lsearch_s";
case 0x0386: return "_lseek";
case 0x0387: return "_lseeki64";
case 0x0388: return "_ltoa";
case 0x0389: return "_ltoa_s";
case 0x038a: return "_ltow";
case 0x038b: return "_ltow_s";
case 0x038c: return "_makepath";
case 0x038d: return "_makepath_s";
case 0x038e: return "_malloc_crt";
case 0x038f: return "_mbbtombc";
case 0x0390: return "_mbbtombc_l";
case 0x0391: return "_mbbtype";
case 0x0392: return "_mbbtype_l";
case 0x0393: return "_mbcasemap";
case 0x0394: return "_mbccpy";
case 0x0395: return "_mbccpy_l";
case 0x0396: return "_mbccpy_s";
case 0x0397: return "_mbccpy_s_l";
case 0x0398: return "_mbcjistojms";
case 0x0399: return "_mbcjistojms_l";
case 0x039a: return "_mbcjmstojis";
case 0x039b: return "_mbcjmstojis_l";
case 0x039c: return "_mbclen";
case 0x039d: return "_mbclen_l";
case 0x039e: return "_mbctohira";
case 0x039f: return "_mbctohira_l";
case 0x03a0: return "_mbctokata";
case 0x03a1: return "_mbctokata_l";
case 0x03a2: return "_mbctolower";
case 0x03a3: return "_mbctolower_l";
case 0x03a4: return "_mbctombb";
case 0x03a5: return "_mbctombb_l";
case 0x03a6: return "_mbctoupper";
case 0x03a7: return "_mbctoupper_l";
case 0x03a8: return "_mbctype";
case 0x03a9: return "_mblen_l";
case 0x03aa: return "_mbsbtype";
case 0x03ab: return "_mbsbtype_l";
case 0x03ac: return "_mbscat_s";
case 0x03ad: return "_mbscat_s_l";
case 0x03ae: return "_mbschr";
case 0x03af: return "_mbschr_l";
case 0x03b0: return "_mbscmp";
case 0x03b1: return "_mbscmp_l";
case 0x03b2: return "_mbscoll";
case 0x03b3: return "_mbscoll_l";
case 0x03b4: return "_mbscpy_s";
case 0x03b5: return "_mbscpy_s_l";
case 0x03b6: return "_mbscspn";
case 0x03b7: return "_mbscspn_l";
case 0x03b8: return "_mbsdec";
case 0x03b9: return "_mbsdec_l";
case 0x03ba: return "_mbsicmp";
case 0x03bb: return "_mbsicmp_l";
case 0x03bc: return "_mbsicoll";
case 0x03bd: return "_mbsicoll_l";
case 0x03be: return "_mbsinc";
case 0x03bf: return "_mbsinc_l";
case 0x03c0: return "_mbslen";
case 0x03c1: return "_mbslen_l";
case 0x03c2: return "_mbslwr";
case 0x03c3: return "_mbslwr_l";
case 0x03c4: return "_mbslwr_s";
case 0x03c5: return "_mbslwr_s_l";
case 0x03c6: return "_mbsnbcat";
case 0x03c7: return "_mbsnbcat_l";
case 0x03c8: return "_mbsnbcat_s";
case 0x03c9: return "_mbsnbcat_s_l";
case 0x03ca: return "_mbsnbcmp";
case 0x03cb: return "_mbsnbcmp_l";
case 0x03cc: return "_mbsnbcnt";
case 0x03cd: return "_mbsnbcnt_l";
case 0x03ce: return "_mbsnbcoll";
case 0x03cf: return "_mbsnbcoll_l";
case 0x03d0: return "_mbsnbcpy";
case 0x03d1: return "_mbsnbcpy_l";
case 0x03d2: return "_mbsnbcpy_s";
case 0x03d3: return "_mbsnbcpy_s_l";
case 0x03d4: return "_mbsnbicmp";
case 0x03d5: return "_mbsnbicmp_l";
case 0x03d6: return "_mbsnbicoll";
case 0x03d7: return "_mbsnbicoll_l";
case 0x03d8: return "_mbsnbset";
case 0x03d9: return "_mbsnbset_l";
case 0x03da: return "_mbsnbset_s";
case 0x03db: return "_mbsnbset_s_l";
case 0x03dc: return "_mbsncat";
case 0x03dd: return "_mbsncat_l";
case 0x03de: return "_mbsncat_s";
case 0x03df: return "_mbsncat_s_l";
case 0x03e0: return "_mbsnccnt";
case 0x03e1: return "_mbsnccnt_l";
case 0x03e2: return "_mbsncmp";
case 0x03e3: return "_mbsncmp_l";
case 0x03e4: return "_mbsncoll";
case 0x03e5: return "_mbsncoll_l";
case 0x03e6: return "_mbsncpy";
case 0x03e7: return "_mbsncpy_l";
case 0x03e8: return "_mbsncpy_s";
case 0x03e9: return "_mbsncpy_s_l";
case 0x03ea: return "_mbsnextc";
case 0x03eb: return "_mbsnextc_l";
case 0x03ec: return "_mbsnicmp";
case 0x03ed: return "_mbsnicmp_l";
case 0x03ee: return "_mbsnicoll";
case 0x03ef: return "_mbsnicoll_l";
case 0x03f0: return "_mbsninc";
case 0x03f1: return "_mbsninc_l";
case 0x03f2: return "_mbsnlen";
case 0x03f3: return "_mbsnlen_l";
case 0x03f4: return "_mbsnset";
case 0x03f5: return "_mbsnset_l";
case 0x03f6: return "_mbsnset_s";
case 0x03f7: return "_mbsnset_s_l";
case 0x03f8: return "_mbspbrk";
case 0x03f9: return "_mbspbrk_l";
case 0x03fa: return "_mbsrchr";
case 0x03fb: return "_mbsrchr_l";
case 0x03fc: return "_mbsrev";
case 0x03fd: return "_mbsrev_l";
case 0x03fe: return "_mbsset";
case 0x03ff: return "_mbsset_l";
case 0x0400: return "_mbsset_s";
case 0x0401: return "_mbsset_s_l";
case 0x0402: return "_mbsspn";
case 0x0403: return "_mbsspn_l";
case 0x0404: return "_mbsspnp";
case 0x0405: return "_mbsspnp_l";
case 0x0406: return "_mbsstr";
case 0x0407: return "_mbsstr_l";
case 0x0408: return "_mbstok";
case 0x0409: return "_mbstok_l";
case 0x040a: return "_mbstok_s";
case 0x040b: return "_mbstok_s_l";
case 0x040c: return "_mbstowcs_l";
case 0x040d: return "_mbstowcs_s_l";
case 0x040e: return "_mbstrlen";
case 0x040f: return "_mbstrlen_l";
case 0x0410: return "_mbstrnlen";
case 0x0411: return "_mbstrnlen_l";
case 0x0412: return "_mbsupr";
case 0x0413: return "_mbsupr_l";
case 0x0414: return "_mbsupr_s";
case 0x0415: return "_mbsupr_s_l";
case 0x0416: return "_mbtowc_l";
case 0x0417: return "_memccpy";
case 0x0418: return "_memicmp";
case 0x0419: return "_memicmp_l";
case 0x041a: return "_mkdir";
case 0x041b: return "_mkgmtime32";
case 0x041c: return "_mkgmtime64";
case 0x041d: return "_mktemp";
case 0x041e: return "_mktemp_s";
case 0x041f: return "_mktime32";
case 0x0420: return "_mktime64";
case 0x0421: return "_msize";
case 0x0422: return "_nextafter";
case 0x0423: return "_onexit";
case 0x0424: return "_open";
case 0x0425: return "_open_osfhandle";
case 0x0426: return "_outp";
case 0x0427: return "_outpd";
case 0x0428: return "_outpw";
case 0x0429: return "_pclose";
case 0x042a: return "_pctype";
case 0x042b: return "_pgmptr";
case 0x042c: return "_pipe";
case 0x042d: return "_popen";
case 0x042e: return "_printf_l";
case 0x042f: return "_printf_p";
case 0x0430: return "_printf_p_l";
case 0x0431: return "_printf_s_l";
case 0x0432: return "_purecall";
case 0x0433: return "_putch";
case 0x0434: return "_putch_nolock";
case 0x0435: return "_putenv";
case 0x0436: return "_putenv_s";
case 0x0437: return "_putw";
case 0x0438: return "_putwch";
case 0x0439: return "_putwch_nolock";
case 0x043a: return "_putws";
case 0x043b: return "_pwctype";
case 0x043c: return "_read";
case 0x043d: return "_realloc_crt";
case 0x043e: return "_recalloc";
case 0x043f: return "_recalloc_crt";
case 0x0440: return "_resetstkoflw";
case 0x0441: return "_rmdir";
case 0x0442: return "_rmtmp";
case 0x0443: return "_rotl";
case 0x0444: return "_rotl64";
case 0x0445: return "_rotr";
case 0x0446: return "_rotr64";
case 0x0447: return "_scalb";
case 0x0448: return "_scanf_l";
case 0x0449: return "_scanf_s_l";
case 0x044a: return "_scprintf";
case 0x044b: return "_scprintf_l";
case 0x044c: return "_scprintf_p";
case 0x044d: return "_scprintf_p_l";
case 0x044e: return "_scwprintf";
case 0x044f: return "_scwprintf_l";
case 0x0450: return "_scwprintf_p";
case 0x0451: return "_scwprintf_p_l";
case 0x0452: return "_searchenv";
case 0x0453: return "_searchenv_s";
case 0x0455: return "_seh_longjmp_unwind";
case 0x0454: return "_seh_longjmp_unwind4";
case 0x0456: return "_set_SSE2_enable";
case 0x0457: return "_set_abort_behavior";
case 0x0458: return "_set_controlfp";
case 0x0459: return "_set_doserrno";
case 0x045a: return "_set_errno";
case 0x045b: return "_set_error_mode";
case 0x045c: return "_set_fmode";
case 0x045d: return "_set_invalid_parameter_handler";
case 0x045e: return "_set_malloc_crt_max_wait";
case 0x045f: return "_set_output_format";
case 0x0460: return "_set_printf_count_output";
case 0x0461: return "_set_purecall_handler";
case 0x0462: return "_seterrormode";
case 0x0463: return "_setjmp";
case 0x0464: return "_setjmp3";
case 0x0465: return "_setmaxstdio";
case 0x0466: return "_setmbcp";
case 0x0467: return "_setmode";
case 0x0468: return "_setsystime";
case 0x0469: return "_sleep";
case 0x046a: return "_snprintf";
case 0x046b: return "_snprintf_c";
case 0x046c: return "_snprintf_c_l";
case 0x046d: return "_snprintf_l";
case 0x046e: return "_snprintf_s";
case 0x046f: return "_snprintf_s_l";
case 0x0470: return "_snscanf";
case 0x0471: return "_snscanf_l";
case 0x0472: return "_snscanf_s";
case 0x0473: return "_snscanf_s_l";
case 0x0474: return "_snwprintf";
case 0x0475: return "_snwprintf_l";
case 0x0476: return "_snwprintf_s";
case 0x0477: return "_snwprintf_s_l";
case 0x0478: return "_snwscanf";
case 0x0479: return "_snwscanf_l";
case 0x047a: return "_snwscanf_s";
case 0x047b: return "_snwscanf_s_l";
case 0x047c: return "_sopen";
case 0x047d: return "_sopen_s";
case 0x047e: return "_spawnl";
case 0x047f: return "_spawnle";
case 0x0480: return "_spawnlp";
case 0x0481: return "_spawnlpe";
case 0x0482: return "_spawnv";
case 0x0483: return "_spawnve";
case 0x0484: return "_spawnvp";
case 0x0485: return "_spawnvpe";
case 0x0486: return "_splitpath";
case 0x0487: return "_splitpath_s";
case 0x0488: return "_sprintf_l";
case 0x0489: return "_sprintf_p";
case 0x048a: return "_sprintf_p_l";
case 0x048b: return "_sprintf_s_l";
case 0x048c: return "_sscanf_l";
case 0x048d: return "_sscanf_s_l";
case 0x048e: return "_stat32";
case 0x048f: return "_stat32i64";
case 0x0490: return "_stat64";
case 0x0491: return "_stat64i32";
case 0x0492: return "_statusfp";
case 0x0493: return "_statusfp2";
case 0x0494: return "_strcoll_l";
case 0x0495: return "_strdate";
case 0x0496: return "_strdate_s";
case 0x0497: return "_strdup";
case 0x0498: return "_strerror";
case 0x0499: return "_strerror_s";
case 0x049a: return "_strftime_l";
case 0x049b: return "_stricmp";
case 0x049c: return "_stricmp_l";
case 0x049d: return "_stricoll";
case 0x049e: return "_stricoll_l";
case 0x049f: return "_strlwr";
case 0x04a0: return "_strlwr_l";
case 0x04a1: return "_strlwr_s";
case 0x04a2: return "_strlwr_s_l";
case 0x04a3: return "_strncoll";
case 0x04a4: return "_strncoll_l";
case 0x04a5: return "_strnicmp";
case 0x04a6: return "_strnicmp_l";
case 0x04a7: return "_strnicoll";
case 0x04a8: return "_strnicoll_l";
case 0x04a9: return "_strnset";
case 0x04aa: return "_strnset_s";
case 0x04ab: return "_strrev";
case 0x04ac: return "_strset";
case 0x04ad: return "_strset_s";
case 0x04ae: return "_strtime";
case 0x04af: return "_strtime_s";
case 0x04b0: return "_strtod_l";
case 0x04b1: return "_strtoi64";
case 0x04b2: return "_strtoi64_l";
case 0x04b3: return "_strtol_l";
case 0x04b4: return "_strtoui64";
case 0x04b5: return "_strtoui64_l";
case 0x04b6: return "_strtoul_l";
case 0x04b7: return "_strupr";
case 0x04b8: return "_strupr_l";
case 0x04b9: return "_strupr_s";
case 0x04ba: return "_strupr_s_l";
case 0x04bb: return "_strxfrm_l";
case 0x04bc: return "_swab";
case 0x04bd: return "_swprintf";
case 0x04be: return "_swprintf_c";
case 0x04bf: return "_swprintf_c_l";
case 0x04c0: return "_swprintf_p";
case 0x04c1: return "_swprintf_p_l";
case 0x04c2: return "_swprintf_s_l";
case 0x04c3: return "_swscanf_l";
case 0x04c4: return "_swscanf_s_l";
case 0x04c5: return "_sys_errlist";
case 0x04c6: return "_sys_nerr";
case 0x04c7: return "_tell";
case 0x04c8: return "_telli64";
case 0x04c9: return "_tempnam";
case 0x04ca: return "_time32";
case 0x04cb: return "_time64";
case 0x04cc: return "_timezone";
case 0x04cd: return "_tolower";
case 0x04ce: return "_tolower_l";
case 0x04cf: return "_toupper";
case 0x04d0: return "_toupper_l";
case 0x04d1: return "_towlower_l";
case 0x04d2: return "_towupper_l";
case 0x04d3: return "_tzname";
case 0x04d4: return "_tzset";
case 0x04d5: return "_ui64toa";
case 0x04d6: return "_ui64toa_s";
case 0x04d7: return "_ui64tow";
case 0x04d8: return "_ui64tow_s";
case 0x04d9: return "_ultoa";
case 0x04da: return "_ultoa_s";
case 0x04db: return "_ultow";
case 0x04dc: return "_ultow_s";
case 0x04dd: return "_umask";
case 0x04de: return "_umask_s";
case 0x04df: return "_ungetc_nolock";
case 0x04e0: return "_ungetch";
case 0x04e1: return "_ungetch_nolock";
case 0x04e2: return "_ungetwc_nolock";
case 0x04e3: return "_ungetwch";
case 0x04e4: return "_ungetwch_nolock";
case 0x04e5: return "_unlink";
case 0x04e6: return "_unloaddll";
case 0x04e7: return "_unlock";
case 0x04e8: return "_unlock_file";
case 0x04e9: return "_utime32";
case 0x04ea: return "_utime64";
case 0x04eb: return "_vcprintf";
case 0x04ec: return "_vcprintf_l";
case 0x04ed: return "_vcprintf_p";
case 0x04ee: return "_vcprintf_p_l";
case 0x04ef: return "_vcprintf_s";
case 0x04f0: return "_vcprintf_s_l";
case 0x04f1: return "_vcwprintf";
case 0x04f2: return "_vcwprintf_l";
case 0x04f3: return "_vcwprintf_p";
case 0x04f4: return "_vcwprintf_p_l";
case 0x04f5: return "_vcwprintf_s";
case 0x04f6: return "_vcwprintf_s_l";
case 0x04f7: return "_vfprintf_l";
case 0x04f8: return "_vfprintf_p";
case 0x04f9: return "_vfprintf_p_l";
case 0x04fa: return "_vfprintf_s_l";
case 0x04fb: return "_vfwprintf_l";
case 0x04fc: return "_vfwprintf_p";
case 0x04fd: return "_vfwprintf_p_l";
case 0x04fe: return "_vfwprintf_s_l";
case 0x04ff: return "_vprintf_l";
case 0x0500: return "_vprintf_p";
case 0x0501: return "_vprintf_p_l";
case 0x0502: return "_vprintf_s_l";
case 0x0503: return "_vscprintf";
case 0x0504: return "_vscprintf_l";
case 0x0505: return "_vscprintf_p";
case 0x0506: return "_vscprintf_p_l";
case 0x0507: return "_vscwprintf";
case 0x0508: return "_vscwprintf_l";
case 0x0509: return "_vscwprintf_p";
case 0x050a: return "_vscwprintf_p_l";
case 0x050b: return "_vsnprintf";
case 0x050c: return "_vsnprintf_c";
case 0x050d: return "_vsnprintf_c_l";
case 0x050e: return "_vsnprintf_l";
case 0x050f: return "_vsnprintf_s";
case 0x0510: return "_vsnprintf_s_l";
case 0x0511: return "_vsnwprintf";
case 0x0512: return "_vsnwprintf_l";
case 0x0513: return "_vsnwprintf_s";
case 0x0514: return "_vsnwprintf_s_l";
case 0x0515: return "_vsprintf_l";
case 0x0516: return "_vsprintf_p";
case 0x0517: return "_vsprintf_p_l";
case 0x0518: return "_vsprintf_s_l";
case 0x0519: return "_vswprintf";
case 0x051a: return "_vswprintf_c";
case 0x051b: return "_vswprintf_c_l";
case 0x051c: return "_vswprintf_l";
case 0x051d: return "_vswprintf_p";
case 0x051e: return "_vswprintf_p_l";
case 0x051f: return "_vswprintf_s_l";
case 0x0520: return "_vwprintf_l";
case 0x0521: return "_vwprintf_p";
case 0x0522: return "_vwprintf_p_l";
case 0x0523: return "_vwprintf_s_l";
case 0x0524: return "_waccess";
case 0x0525: return "_waccess_s";
case 0x0526: return "_wasctime";
case 0x0527: return "_wasctime_s";
case 0x0528: return "_wassert";
case 0x0529: return "_wchdir";
case 0x052a: return "_wchmod";
case 0x052b: return "_wcmdln";
case 0x052c: return "_wcreat";
case 0x052d: return "_wcreate_locale";
case 0x052e: return "_wcscoll_l";
case 0x052f: return "_wcsdup";
case 0x0530: return "_wcserror";
case 0x0531: return "_wcserror_s";
case 0x0532: return "_wcsftime_l";
case 0x0533: return "_wcsicmp";
case 0x0534: return "_wcsicmp_l";
case 0x0535: return "_wcsicoll";
case 0x0536: return "_wcsicoll_l";
case 0x0537: return "_wcslwr";
case 0x0538: return "_wcslwr_l";
case 0x0539: return "_wcslwr_s";
case 0x053a: return "_wcslwr_s_l";
case 0x053b: return "_wcsncoll";
case 0x053c: return "_wcsncoll_l";
case 0x053d: return "_wcsnicmp";
case 0x053e: return "_wcsnicmp_l";
case 0x053f: return "_wcsnicoll";
case 0x0540: return "_wcsnicoll_l";
case 0x0541: return "_wcsnset";
case 0x0542: return "_wcsnset_s";
case 0x0543: return "_wcsrev";
case 0x0544: return "_wcsset";
case 0x0545: return "_wcsset_s";
case 0x0546: return "_wcstod_l";
case 0x0547: return "_wcstoi64";
case 0x0548: return "_wcstoi64_l";
case 0x0549: return "_wcstol_l";
case 0x054a: return "_wcstombs_l";
case 0x054b: return "_wcstombs_s_l";
case 0x054c: return "_wcstoui64";
case 0x054d: return "_wcstoui64_l";
case 0x054e: return "_wcstoul_l";
case 0x054f: return "_wcsupr";
case 0x0550: return "_wcsupr_l";
case 0x0551: return "_wcsupr_s";
case 0x0552: return "_wcsupr_s_l";
case 0x0553: return "_wcsxfrm_l";
case 0x0554: return "_wctime32";
case 0x0555: return "_wctime32_s";
case 0x0556: return "_wctime64";
case 0x0557: return "_wctime64_s";
case 0x0558: return "_wctomb_l";
case 0x0559: return "_wctomb_s_l";
case 0x055a: return "_wctype";
case 0x055b: return "_wdupenv_s";
case 0x055c: return "_wenviron";
case 0x055d: return "_wexecl";
case 0x055e: return "_wexecle";
case 0x055f: return "_wexeclp";
case 0x0560: return "_wexeclpe";
case 0x0561: return "_wexecv";
case 0x0562: return "_wexecve";
case 0x0563: return "_wexecvp";
case 0x0564: return "_wexecvpe";
case 0x0565: return "_wfdopen";
case 0x0566: return "_wfindfirst32";
case 0x0567: return "_wfindfirst32i64";
case 0x0568: return "_wfindfirst64";
case 0x0569: return "_wfindfirst64i32";
case 0x056a: return "_wfindnext32";
case 0x056b: return "_wfindnext32i64";
case 0x056c: return "_wfindnext64";
case 0x056d: return "_wfindnext64i32";
case 0x056e: return "_wfopen";
case 0x056f: return "_wfopen_s";
case 0x0570: return "_wfreopen";
case 0x0571: return "_wfreopen_s";
case 0x0572: return "_wfsopen";
case 0x0573: return "_wfullpath";
case 0x0574: return "_wgetcwd";
case 0x0575: return "_wgetdcwd";
case 0x0576: return "_wgetenv";
case 0x0577: return "_wgetenv_s";
case 0x0578: return "_wmakepath";
case 0x0579: return "_wmakepath_s";
case 0x057a: return "_wmkdir";
case 0x057b: return "_wmktemp";
case 0x057c: return "_wmktemp_s";
case 0x057d: return "_wopen";
case 0x057e: return "_wperror";
case 0x057f: return "_wpgmptr";
case 0x0580: return "_wpopen";
case 0x0581: return "_wprintf_l";
case 0x0582: return "_wprintf_p";
case 0x0583: return "_wprintf_p_l";
case 0x0584: return "_wprintf_s_l";
case 0x0585: return "_wputenv";
case 0x0586: return "_wputenv_s";
case 0x0587: return "_wremove";
case 0x0588: return "_wrename";
case 0x0589: return "_write";
case 0x058a: return "_wrmdir";
case 0x058b: return "_wscanf_l";
case 0x058c: return "_wscanf_s_l";
case 0x058d: return "_wsearchenv";
case 0x058e: return "_wsearchenv_s";
case 0x058f: return "_wsetlocale";
case 0x0590: return "_wsopen";
case 0x0591: return "_wsopen_s";
case 0x0592: return "_wspawnl";
case 0x0593: return "_wspawnle";
case 0x0594: return "_wspawnlp";
case 0x0595: return "_wspawnlpe";
case 0x0596: return "_wspawnv";
case 0x0597: return "_wspawnve";
case 0x0598: return "_wspawnvp";
case 0x0599: return "_wspawnvpe";
case 0x059a: return "_wsplitpath";
case 0x059b: return "_wsplitpath_s";
case 0x059c: return "_wstat32";
case 0x059d: return "_wstat32i64";
case 0x059e: return "_wstat64";
case 0x059f: return "_wstat64i32";
case 0x05a0: return "_wstrdate";
case 0x05a1: return "_wstrdate_s";
case 0x05a2: return "_wstrtime";
case 0x05a3: return "_wstrtime_s";
case 0x05a4: return "_wsystem";
case 0x05a5: return "_wtempnam";
case 0x05a6: return "_wtmpnam";
case 0x05a7: return "_wtmpnam_s";
case 0x05a8: return "_wtof";
case 0x05a9: return "_wtof_l";
case 0x05aa: return "_wtoi";
case 0x05ab: return "_wtoi64";
case 0x05ac: return "_wtoi64_l";
case 0x05ad: return "_wtoi_l";
case 0x05ae: return "_wtol";
case 0x05af: return "_wtol_l";
case 0x05b0: return "_wunlink";
case 0x05b1: return "_wutime32";
case 0x05b2: return "_wutime64";
case 0x05b3: return "_y0";
case 0x05b4: return "_y1";
case 0x05b5: return "_yn";
case 0x05b6: return "abort";
case 0x05b7: return "abs";
case 0x05b8: return "acos";
case 0x05b9: return "asctime";
case 0x05ba: return "asctime_s";
case 0x05bb: return "asin";
case 0x05bc: return "atan";
case 0x05bd: return "atan2";
case 0x05be: return "atexit";
case 0x05bf: return "atof";
case 0x05c0: return "atoi";
case 0x05c1: return "atol";
case 0x05c2: return "bsearch";
case 0x05c3: return "bsearch_s";
case 0x05c4: return "btowc";
case 0x05c5: return "calloc";
case 0x05c6: return "ceil";
case 0x05c7: return "clearerr";
case 0x05c8: return "clearerr_s";
case 0x05c9: return "clock";
case 0x05ca: return "cos";
case 0x05cb: return "cosh";
case 0x05cc: return "div";
case 0x05cd: return "exit";
case 0x05ce: return "exp";
case 0x05cf: return "fabs";
case 0x05d0: return "fclose";
case 0x05d1: return "feof";
case 0x05d2: return "ferror";
case 0x05d3: return "fflush";
case 0x05d4: return "fgetc";
case 0x05d5: return "fgetpos";
case 0x05d6: return "fgets";
case 0x05d7: return "fgetwc";
case 0x05d8: return "fgetws";
case 0x05d9: return "floor";
case 0x05da: return "fmod";
case 0x05db: return "fopen";
case 0x05dc: return "fopen_s";
case 0x05dd: return "fprintf";
case 0x05de: return "fprintf_s";
case 0x05df: return "fputc";
case 0x05e0: return "fputs";
case 0x05e1: return "fputwc";
case 0x05e2: return "fputws";
case 0x05e3: return "fread";
case 0x05e4: return "fread_s";
case 0x05e5: return "free";
case 0x05e6: return "freopen";
case 0x05e7: return "freopen_s";
case 0x05e8: return "frexp";
case 0x05e9: return "fscanf";
case 0x05ea: return "fscanf_s";
case 0x05eb: return "fseek";
case 0x05ec: return "fsetpos";
case 0x05ed: return "ftell";
case 0x05ee: return "fwprintf";
case 0x05ef: return "fwprintf_s";
case 0x05f0: return "fwrite";
case 0x05f1: return "fwscanf";
case 0x05f2: return "fwscanf_s";
case 0x05f3: return "getc";
case 0x05f4: return "getchar";
case 0x05f5: return "getenv";
case 0x05f6: return "getenv_s";
case 0x05f7: return "gets";
case 0x05f8: return "gets_s";
case 0x05f9: return "getwc";
case 0x05fa: return "getwchar";
case 0x05fb: return "is_wctype";
case 0x05fc: return "isalnum";
case 0x05fd: return "isalpha";
case 0x05fe: return "iscntrl";
case 0x05ff: return "isdigit";
case 0x0600: return "isgraph";
case 0x0601: return "isleadbyte";
case 0x0602: return "islower";
case 0x0603: return "isprint";
case 0x0604: return "ispunct";
case 0x0605: return "isspace";
case 0x0606: return "isupper";
case 0x0607: return "iswalnum";
case 0x0608: return "iswalpha";
case 0x0609: return "iswascii";
case 0x060a: return "iswcntrl";
case 0x060b: return "iswctype";
case 0x060c: return "iswdigit";
case 0x060d: return "iswgraph";
case 0x060e: return "iswlower";
case 0x060f: return "iswprint";
case 0x0610: return "iswpunct";
case 0x0611: return "iswspace";
case 0x0612: return "iswupper";
case 0x0613: return "iswxdigit";
case 0x0614: return "isxdigit";
case 0x0615: return "labs";
case 0x0616: return "ldexp";
case 0x0617: return "ldiv";
case 0x0618: return "llabs";
case 0x0619: return "lldiv";
case 0x061a: return "localeconv";
case 0x061b: return "log";
case 0x061c: return "log10";
case 0x061d: return "longjmp";
case 0x061e: return "malloc";
case 0x061f: return "mblen";
case 0x0620: return "mbrlen";
case 0x0621: return "mbrtowc";
case 0x0622: return "mbsrtowcs";
case 0x0623: return "mbsrtowcs_s";
case 0x0624: return "mbstowcs";
case 0x0625: return "mbstowcs_s";
case 0x0626: return "mbtowc";
case 0x0627: return "memchr";
case 0x0628: return "memcmp";
case 0x0629: return "memcpy";
case 0x062a: return "memcpy_s";
case 0x062b: return "memmove";
case 0x062c: return "memmove_s";
case 0x062d: return "memset";
case 0x062e: return "modf";
case 0x062f: return "perror";
case 0x0630: return "pow";
case 0x0631: return "printf";
case 0x0632: return "printf_s";
case 0x0633: return "putc";
case 0x0634: return "putchar";
case 0x0635: return "puts";
case 0x0636: return "putwc";
case 0x0637: return "putwchar";
case 0x0638: return "qsort";
case 0x0639: return "qsort_s";
case 0x063a: return "raise";
case 0x063b: return "rand";
case 0x063c: return "rand_s";
case 0x063d: return "realloc";
case 0x063e: return "remove";
case 0x063f: return "rename";
case 0x0640: return "rewind";
case 0x0641: return "scanf";
case 0x0642: return "scanf_s";
case 0x0643: return "setbuf";
case 0x0644: return "setlocale";
case 0x0645: return "setvbuf";
case 0x0646: return "signal";
case 0x0647: return "sin";
case 0x0648: return "sinh";
case 0x0649: return "sprintf";
case 0x064a: return "sprintf_s";
case 0x064b: return "sqrt";
case 0x064c: return "srand";
case 0x064d: return "sscanf";
case 0x064e: return "sscanf_s";
case 0x064f: return "strcat";
case 0x0650: return "strcat_s";
case 0x0651: return "strchr";
case 0x0652: return "strcmp";
case 0x0653: return "strcoll";
case 0x0654: return "strcpy";
case 0x0655: return "strcpy_s";
case 0x0656: return "strcspn";
case 0x0657: return "strerror";
case 0x0658: return "strerror_s";
case 0x0659: return "strftime";
case 0x065a: return "strlen";
case 0x065b: return "strncat";
case 0x065c: return "strncat_s";
case 0x065d: return "strncmp";
case 0x065e: return "strncpy";
case 0x065f: return "strncpy_s";
case 0x0660: return "strnlen";
case 0x0661: return "strpbrk";
case 0x0662: return "strrchr";
case 0x0663: return "strspn";
case 0x0664: return "strstr";
case 0x0665: return "strtod";
case 0x0666: return "strtok";
case 0x0667: return "strtok_s";
case 0x0668: return "strtol";
case 0x0669: return "strtoul";
case 0x066a: return "strxfrm";
case 0x066b: return "swprintf_s";
case 0x066c: return "swscanf";
case 0x066d: return "swscanf_s";
case 0x066e: return "system";
case 0x066f: return "tan";
case 0x0670: return "tanh";
case 0x0671: return "tmpfile";
case 0x0672: return "tmpfile_s";
case 0x0673: return "tmpnam";
case 0x0674: return "tmpnam_s";
case 0x0675: return "tolower";
case 0x0676: return "toupper";
case 0x0677: return "towlower";
case 0x0678: return "towupper";
case 0x0679: return "ungetc";
case 0x067a: return "ungetwc";
case 0x067b: return "vfprintf";
case 0x067c: return "vfprintf_s";
case 0x067d: return "vfwprintf";
case 0x067e: return "vfwprintf_s";
case 0x067f: return "vprintf";
case 0x0680: return "vprintf_s";
case 0x0681: return "vsprintf";
case 0x0682: return "vsprintf_s";
case 0x0683: return "vswprintf_s";
case 0x0684: return "vwprintf";
case 0x0685: return "vwprintf_s";
case 0x0686: return "wcrtomb";
case 0x0687: return "wcrtomb_s";
case 0x0688: return "wcscat";
case 0x0689: return "wcscat_s";
case 0x068a: return "wcschr";
case 0x068b: return "wcscmp";
case 0x068c: return "wcscoll";
case 0x068d: return "wcscpy";
case 0x068e: return "wcscpy_s";
case 0x068f: return "wcscspn";
case 0x0690: return "wcsftime";
case 0x0691: return "wcslen";
case 0x0692: return "wcsncat";
case 0x0693: return "wcsncat_s";
case 0x0694: return "wcsncmp";
case 0x0695: return "wcsncpy";
case 0x0696: return "wcsncpy_s";
case 0x0697: return "wcsnlen";
case 0x0698: return "wcspbrk";
case 0x0699: return "wcsrchr";
case 0x069a: return "wcsrtombs";
case 0x069b: return "wcsrtombs_s";
case 0x069c: return "wcsspn";
case 0x069d: return "wcsstr";
case 0x069e: return "wcstod";
case 0x069f: return "wcstok";
case 0x06a0: return "wcstok_s";
case 0x06a1: return "wcstol";
case 0x06a2: return "wcstombs";
case 0x06a3: return "wcstombs_s";
case 0x06a4: return "wcstoul";
case 0x06a5: return "wcsxfrm";
case 0x06a6: return "wctob";
case 0x06a7: return "wctomb";
case 0x06a8: return "wctomb_s";
case 0x06a9: return "wmemcpy_s";
case 0x06aa: return "wmemmove_s";
case 0x06ab: return "wprintf";
case 0x06ac: return "wprintf_s";
case 0x06ad: return "wscanf";
case 0x06ae: return "wscanf_s";
}
return nullptr;
}
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/src/PE/utils/ordinals_lookup_tables/msvcr110_dll_lookup.hpp |
/*
* Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.tests
import io.ktor.client.engine.mock.*
import io.ktor.client.plugins.*
import io.ktor.client.plugins.observer.*
import io.ktor.client.request.*
import io.ktor.client.statement.*
import io.ktor.client.test.base.*
import kotlin.test.*
class ResponseObserverTest : ClientLoader() {
private var observerCalled = false
@Test
fun testEmptyResponseObserverIsNotFreezing() = clientTests {
config {
ResponseObserver {
}
}
test { client ->
client.get("$TEST_SERVER/download") {
parameter("size", (1024 * 10).toString())
}
}
}
@Test
fun testThrowInResponseObserverIsNotFailingRequest() = clientTests {
config {
ResponseObserver {
error("fail")
}
}
test { client ->
client.get("$TEST_SERVER/download") {
parameter("size", (1024 * 10).toString())
}
}
}
@Test
fun testResponseObserverCalledWhenNoFilterPresent() = clientTests {
config {
install(ResponseObserver) {
onResponse { observerCalled = true }
}
}
test { client ->
client.get("$TEST_SERVER/download") {
parameter("size", (1024 * 10).toString())
}
assertTrue { observerCalled }
}
}
@Test
fun testResponseObserverCalledWhenFilterMatched() = clientTests {
config {
install(ResponseObserver) {
onResponse { observerCalled = true }
filter { true }
}
}
test { client ->
client.get("$TEST_SERVER/download") {
parameter("size", (1024 * 10).toString())
}
assertTrue { observerCalled }
}
}
@Test
fun testResponseObserverNotCalledWhenFilterNotMatched() = clientTests {
config {
install(ResponseObserver) {
onResponse { observerCalled = true }
filter { false }
}
}
test { client ->
client.get("$TEST_SERVER/download") {
parameter("size", (1024 * 10).toString())
}
assertFalse { observerCalled }
}
}
@Test
fun testSavedResponseCanBeReadMultipleTimes() = testWithEngine(MockEngine) {
val bodyContent = "Hello"
var wasSaved: Boolean? = null
suspend fun assertBodyCanBeReadMultipleTimes(response: HttpResponse) {
assertEquals(bodyContent, response.bodyAsText(), "First read failed")
assertEquals(bodyContent, response.bodyAsText(), "It should be possible to read body multiple times")
}
config {
install(ResponseObserver) {
onResponse { response ->
wasSaved = response.isSaved
// In the response observer itself
assertBodyCanBeReadMultipleTimes(response)
}
}
engine {
addHandler { respondOk(bodyContent) }
}
}
test { client ->
// In the pipeline after ResponseObserver
client.receivePipeline.intercept(HttpReceivePipeline.After) { response ->
assertBodyCanBeReadMultipleTimes(response)
proceedWith(response)
}
val response = client.get("/")
assertNotNull(wasSaved, "Response observer should be called")
assertTrue(wasSaved!!, "Response should be saved before reaching the response observer")
// After all pipelines
assertBodyCanBeReadMultipleTimes(response)
}
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-client/ktor-client-tests/common/test/io/ktor/client/tests/ResponseObserverTest.kt |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Patches the spawn() command for windows compilers.
Windows has an 8191 character command line limit, but some compilers
support an @command_file directive where command_file is a file
containing the full command line.
"""
from distutils import ccompiler
import os
import os.path
import shutil
import sys
import tempfile
MAX_COMMAND_LENGTH = 8191
_classic_spawn = ccompiler.CCompiler.spawn
def _commandfile_spawn(self, command):
command_length = sum([len(arg) for arg in command])
if os.name == 'nt' and command_length > MAX_COMMAND_LENGTH:
# Even if this command doesn't support the @command_file, it will
# fail as is so we try blindly
print('Command line length exceeded, using command file')
print(' '.join(command))
temporary_directory = tempfile.mkdtemp()
command_filename = os.path.abspath(
os.path.join(temporary_directory, 'command'))
with open(command_filename, 'w') as command_file:
escaped_args = [
'"' + arg.replace('\\', '\\\\') + '"' for arg in command[1:]
]
command_file.write(' '.join(escaped_args))
modified_command = command[:1] + ['@{}'.format(command_filename)]
try:
_classic_spawn(self, modified_command)
finally:
shutil.rmtree(temporary_directory)
else:
_classic_spawn(self, command)
def monkeypatch_spawn():
"""Monkeypatching is dumb, but it's either that or we become maintainers of
something much, much bigger."""
ccompiler.CCompiler.spawn = _commandfile_spawn | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import uuid
from copy import copy
from logbook import Logger
from collections import defaultdict
from six import text_type, iteritems
from six.moves import filter
import zipline.errors
import zipline.protocol as zp
from zipline.finance.slippage import (
VolumeShareSlippage,
transact_partial,
check_order_triggers
)
from zipline.finance.commission import PerShare
from zipline.utils.protocol_utils import Enum
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = Logger('Blotter')
ORDER_STATUS = Enum(
'OPEN',
'FILLED',
'CANCELLED',
'REJECTED',
'HELD',
)
class Blotter(object):
def __init__(self):
self.transact = transact_partial(VolumeShareSlippage(), PerShare())
# these orders are aggregated by sid
self.open_orders = defaultdict(list)
# keep a dict of orders by their own id
self.orders = {}
# holding orders that have come in since the last
# event.
self.new_orders = []
self.current_dt = None
self.max_shares = int(1e+11)
def __repr__(self):
return """
{class_name}(
transact_partial={transact_partial},
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
current_dt={current_dt})
""".strip().format(class_name=self.__class__.__name__,
transact_partial=self.transact.args,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders,
current_dt=self.current_dt)
def set_date(self, dt):
self.current_dt = dt
def order(self, sid, amount, style, order_id=None):
# something could be done with amount to further divide
# between buy by share count OR buy shares up to a dollar amount
# numeric == share count AND "$dollar.cents" == cost amount
"""
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(sid, amount)
Limit order: order(sid, amount, style=LimitOrder(limit_price))
Stop order: order(sid, amount, style=StopOrder(stop_price))
StopLimit order: order(sid, amount, style=StopLimitOrder(limit_price,
stop_price))
"""
if amount == 0:
# Don't bother placing orders for 0 shares.
return
elif amount > self.max_shares:
# Arbitrary limit of 100 billion (US) shares will never be
# exceeded except by a buggy algorithm.
raise OverflowError("Can't order more than %d shares" %
self.max_shares)
is_buy = (amount > 0)
order = Order(
dt=self.current_dt,
sid=sid,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
self.open_orders[order.sid].append(order)
self.orders[order.id] = order
self.new_orders.append(order)
return order.id
def cancel(self, order_id):
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
order_list = self.open_orders[cur_order.sid]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.cancel()
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def reject(self, order_id, reason=''):
"""
Mark the given order as 'rejected', which is functionally similar to
cancelled. The distinction is that rejections are involuntary (and
usually include a message from a broker indicating why the order was
rejected) while cancels are typically user-driven.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
order_list = self.open_orders[cur_order.sid]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.reject(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def hold(self, order_id, reason=''):
"""
Mark the order with order_id as 'held'. Held is functionally similar
to 'open'. When a fill (full or partial) arrives, the status
will automatically change back to open/filled as necessary.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.hold(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def process_split(self, split_event):
if split_event.sid not in self.open_orders:
return
orders_to_modify = self.open_orders[split_event.sid]
for order in orders_to_modify:
order.handle_split(split_event)
def process_benchmark(self, benchmark_event):
return
yield
def process_trade(self, trade_event):
if trade_event.sid not in self.open_orders:
return
if trade_event.volume < 1:
# there are zero volume trade_events bc some stocks trade
# less frequently than once per minute.
return
orders = self.open_orders[trade_event.sid]
orders.sort(key=lambda o: o.dt)
# Only use orders for the current day or before
current_orders = filter(
lambda o: o.dt <= trade_event.dt,
orders)
processed_orders = []
for txn, order in self.process_transactions(trade_event,
current_orders):
processed_orders.append(order)
yield txn, order
# remove closed orders. we should only have to check
# processed orders
def not_open(order):
return not order.open
closed_orders = filter(not_open, processed_orders)
for order in closed_orders:
orders.remove(order)
if len(orders) == 0:
del self.open_orders[trade_event.sid]
def process_transactions(self, trade_event, current_orders):
for order, txn in self.transact(trade_event, current_orders):
if txn.type == zp.DATASOURCE_TYPE.COMMISSION:
order.commission = (order.commission or 0.0) + txn.cost
else:
if txn.amount == 0:
raise zipline.errors.TransactionWithNoAmount(txn=txn)
if math.copysign(1, txn.amount) != order.direction:
raise zipline.errors.TransactionWithWrongDirection(
txn=txn, order=order)
if abs(txn.amount) > abs(self.orders[txn.order_id].amount):
raise zipline.errors.TransactionVolumeExceedsOrder(
txn=txn, order=order)
order.filled += txn.amount
if txn.commission is not None:
order.commission = ((order.commission or 0.0) +
txn.commission)
# mark the date of the order to match the transaction
# that is filling it.
order.dt = txn.dt
yield txn, order
def __getstate__(self):
state_to_save = ['new_orders', 'orders', '_status']
state_dict = {k: self.__dict__[k] for k in state_to_save
if k in self.__dict__}
# Have to handle defaultdicts specially
state_dict['open_orders'] = dict(self.open_orders)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
self.__init__()
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Blotter saved is state too old.")
open_orders = defaultdict(list)
open_orders.update(state.pop('open_orders'))
self.open_orders = open_orders
self.__dict__.update(state)
class Order(object):
def __init__(self, dt, sid, amount, stop=None, limit=None, filled=0,
commission=None, id=None):
"""
@dt - datetime.datetime that the order was placed
@sid - stock sid of the order
@amount - the number of shares to buy/sell
a positive sign indicates a buy
a negative sign indicates a sell
@filled - how many shares of the order have been filled so far
"""
# get a string representation of the uuid.
self.id = id or self.make_id()
self.dt = dt
self.reason = None
self.created = dt
self.sid = sid
self.amount = amount
self.filled = filled
self.commission = commission
self._status = ORDER_STATUS.OPEN
self.stop = stop
self.limit = limit
self.stop_reached = False
self.limit_reached = False
self.direction = math.copysign(1, self.amount)
self.type = zp.DATASOURCE_TYPE.ORDER
def make_id(self):
return uuid.uuid4().hex
def to_dict(self):
py = copy(self.__dict__)
for field in ['type', 'direction', '_status']:
del py[field]
py['status'] = self.status
return py
def to_api_obj(self):
pydict = self.to_dict()
obj = zp.Order(initial_values=pydict)
return obj
def check_triggers(self, event):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
check_order_triggers(self, event)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = event.dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None
def handle_split(self, split_event):
ratio = split_event.ratio
# update the amount, limit_price, and stop_price
# by the split's ratio
# info here: http://finra.complinet.com/en/display/display_plain.html?
# rbid=2403&element_id=8950&record_id=12208&print=1
# new_share_amount = old_share_amount / ratio
# new_price = old_price * ratio
self.amount = int(self.amount / ratio)
if self.limit is not None:
self.limit = round(self.limit * ratio, 2)
if self.stop is not None:
self.stop = round(self.stop * ratio, 2)
@property
def status(self):
if not self.open_amount:
return ORDER_STATUS.FILLED
elif self._status == ORDER_STATUS.HELD and self.filled:
return ORDER_STATUS.OPEN
else:
return self._status
@status.setter
def status(self, status):
self._status = status
def cancel(self):
self.status = ORDER_STATUS.CANCELLED
def reject(self, reason=''):
self.status = ORDER_STATUS.REJECTED
self.reason = reason
def hold(self, reason=''):
self.status = ORDER_STATUS.HELD
self.reason = reason
@property
def open(self):
return self.status in [ORDER_STATUS.OPEN, ORDER_STATUS.HELD]
@property
def triggered(self):
"""
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
"""
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True
@property
def open_amount(self):
return self.amount - self.filled
def __repr__(self):
"""
String representation for this object.
"""
return "Order(%s)" % self.to_dict().__repr__()
def __unicode__(self):
"""
Unicode representation for this object.
"""
return text_type(repr(self))
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__)
if not k.startswith('_')}
state_dict['_status'] = self._status
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Order saved state is too old.")
self.__dict__.update(state) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# encoding: utf-8
from tests.base import OsfTestCase
from osf_tests.factories import ProjectFactory
from addons.osfstorage import settings as storage_settings
import collections
from framework.auth import Auth
identity = lambda value: value
class Delta(object):
def __init__(self, getter, checker=None):
self.getter = getter
self.checker = checker or identity
class AssertDeltas(object):
def __init__(self, *deltas):
self.deltas = deltas
self.original = []
def __enter__(self):
self.original = [delta.getter() for delta in self.deltas]
def __exit__(self, exc_type, exc_value, exc_tb):
for idx, delta in enumerate(self.deltas):
final = delta.getter()
assert delta.checker(self.original[idx]) == final
class StorageTestCase(OsfTestCase):
def setUp(self):
super(StorageTestCase, self).setUp()
self.project = ProjectFactory()
self.node = self.project
self.user = self.project.creator
self.node_settings = self.project.get_addon('osfstorage')
self.auth_obj = Auth(user=self.project.creator)
# Refresh records from database; necessary for comparing dates
self.project.reload()
self.user.reload()
def recursively_create_file(settings, path):
path = path.split('/')
final = path.pop(-1)
current = settings.get_root()
for subpath in path:
current = current.append_folder(subpath)
return current.append_file(final)
def recursively_create_folder(settings, path):
path = path.split('/')
final = path.pop(-1)
current = settings.root_node
for subpath in path:
current = current.append_folder(subpath)
return current.append_file(final)
def make_payload(user, name, **kwargs):
payload = {
'user': user._id,
'name': name,
'hashes': {'base64': '=='},
'worker': {
'uname': 'testmachine'
},
'settings': {
'provider': 'filesystem',
storage_settings.WATERBUTLER_RESOURCE: 'blah',
},
'metadata': {
'size': 123,
'name': 'file',
'provider': 'filesystem',
'modified': 'Mon, 16 Feb 2015 18:45:34 GMT'
},
}
payload.update(kwargs)
return payload | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm
from openerp.tools.translate import _
class mail_message(osv.Model):
""" Update of mail_message class, to restrict mail access. """
_inherit = 'mail.message'
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to remove
all internal notes if uid is a non-employee
"""
if uid == SUPERUSER_ID:
return super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_user')[1]
if group_user_id not in [group.id for group in group_ids]:
args = [('subtype_id', '!=', False)] + list(args)
return super(mail_message, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Add Access rules of mail.message for non-employee user:
- read:
- raise if the type is comment and subtype NULL (internal note)
"""
if uid == SUPERUSER_ID:
return super(mail_message, self).check_access_rule(cr, uid, ids=ids, operation=operation, context=context)
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_user')[1]
if group_user_id not in [group.id for group in group_ids]:
cr.execute('SELECT DISTINCT id FROM "%s" WHERE type = %%s AND subtype_id IS NULL AND id = ANY (%%s)' % (self._table), ('comment', ids,))
if cr.fetchall():
raise orm.except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
return super(mail_message, self).check_access_rule(cr, uid, ids=ids, operation=operation, context=context) | unknown | codeparrot/codeparrot-clean | ||
//===--- FileTypes.h - Input & output formats used by the tools -*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_BASIC_FILETYPES_H
#define SWIFT_BASIC_FILETYPES_H
#include "swift/Basic/LLVM.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
namespace swift {
namespace file_types {
enum ID : uint8_t {
#define TYPE(NAME, ID, EXTENSION, FLAGS) TY_##ID,
#include "swift/Basic/FileTypes.def"
#undef TYPE
TY_INVALID
};
/// Return the name of the type for \p Id.
StringRef getTypeName(ID Id);
/// Return the extension to use when creating a file of this type,
/// or an empty string if unspecified.
StringRef getExtension(ID Id);
/// Lookup the type to use for the file extension \p Ext.
/// If the extension is empty or is otherwise not recognized, return
/// the invalid type \c TY_INVALID.
ID lookupTypeForExtension(StringRef Ext);
/// Lookup the type to use for the file name \p Filename.
/// If the filename is empty or type cannot be recognoized, return
/// the invalid type \c TY_INVALID.
ID lookupTypeFromFilename(StringRef Filename);
/// Lookup the type to use for the name \p Name.
ID lookupTypeForName(StringRef Name);
/// Returns true if the type represents textual data.
bool isTextual(ID Id);
/// Returns true if the type is produced in the compiler after the LLVM
/// passes.
///
/// For those types the compiler produces multiple output files in multi-
/// threaded compilation.
bool isAfterLLVM(ID Id);
/// Returns true if the type is a file that contributes to the Swift module
/// being compiled.
///
/// These need to be passed to the Swift frontend
bool isPartOfSwiftCompilation(ID Id);
/// Returns true of the type of the output is produced from a diagnostic engine.
bool isProducedFromDiagnostics(ID Id);
static inline void forAllTypes(llvm::function_ref<void(file_types::ID)> fn) {
for (uint8_t i = 0; i < static_cast<uint8_t>(TY_INVALID); ++i)
fn(static_cast<ID>(i));
}
/// Some files are produced by the frontend and read by the driver in order to
/// support incremental compilation. Invoke the passed-in function for every
/// such file type.
static inline void
forEachIncrementalOutputType(llvm::function_ref<void(file_types::ID)> fn) {
fn(file_types::TY_SwiftDeps);
}
} // end namespace file_types
} // end namespace swift
namespace llvm {
template <> struct DenseMapInfo<swift::file_types::ID> {
using ID = swift::file_types::ID;
static inline ID getEmptyKey() { return ID::TY_INVALID; }
static inline ID getTombstoneKey() {
return static_cast<ID>(ID::TY_INVALID + 1);
}
static unsigned getHashValue(ID Val) { return (unsigned)Val * 37U; }
static bool isEqual(ID LHS, ID RHS) { return LHS == RHS; }
};
} // end namespace llvm
#endif | c | github | https://github.com/apple/swift | include/swift/Basic/FileTypes.h |
"""
Test contentstore.mongo functionality
"""
import os
import logging
from uuid import uuid4
import unittest
import mimetypes
from tempfile import mkdtemp
import path
import shutil
from opaque_keys.edx.locator import CourseLocator, AssetLocator
from opaque_keys.edx.keys import AssetKey
from xmodule.tests import DATA_DIR
from xmodule.contentstore.mongo import MongoContentStore
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
import ddt
from __builtin__ import delattr
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
log = logging.getLogger(__name__)
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
@ddt.ddt
class TestContentstore(unittest.TestCase):
"""
Test the methods in contentstore.mongo using deprecated and non-deprecated keys
"""
# don't use these 2 class vars as they restore behavior once the tests are done
asset_deprecated = None
ssck_deprecated = None
@classmethod
def tearDownClass(cls):
"""
Restores deprecated values
"""
if cls.asset_deprecated is not None:
setattr(AssetLocator, 'deprecated', cls.asset_deprecated)
else:
delattr(AssetLocator, 'deprecated')
if cls.ssck_deprecated is not None:
setattr(CourseLocator, 'deprecated', cls.ssck_deprecated)
else:
delattr(CourseLocator, 'deprecated')
return super(TestContentstore, cls).tearDownClass()
def set_up_assets(self, deprecated):
"""
Setup contentstore w/ proper overriding of deprecated.
"""
# since MongoModuleStore and MongoContentStore are basically assumed to be together, create this class
# as well
self.contentstore = MongoContentStore(HOST, DB, port=PORT)
self.addCleanup(self.contentstore._drop_database) # pylint: disable=protected-access
setattr(AssetLocator, 'deprecated', deprecated)
setattr(CourseLocator, 'deprecated', deprecated)
self.course1_key = CourseLocator('test', 'asset_test', '2014_07')
self.course2_key = CourseLocator('test', 'asset_test2', '2014_07')
self.course1_files = ['contains.sh', 'picture1.jpg', 'picture2.jpg']
self.course2_files = ['picture1.jpg', 'picture3.jpg', 'door_2.ogg']
def load_assets(course_key, files):
locked = False
for filename in files:
asset_key = course_key.make_asset_key('asset', filename)
self.save_asset(filename, asset_key, filename, locked)
locked = not locked
load_assets(self.course1_key, self.course1_files)
load_assets(self.course2_key, self.course2_files)
def save_asset(self, filename, asset_key, displayname, locked):
"""
Load and save the given file.
"""
with open("{}/static/{}".format(DATA_DIR, filename), "rb") as f:
content = StaticContent(
asset_key, displayname, mimetypes.guess_type(filename)[0], f.read(),
locked=locked
)
self.contentstore.save(content)
@ddt.data(True, False)
def test_delete(self, deprecated):
"""
Test that deleting assets works
"""
self.set_up_assets(deprecated)
asset_key = self.course1_key.make_asset_key('asset', self.course1_files[0])
self.contentstore.delete(asset_key)
with self.assertRaises(NotFoundError):
self.contentstore.find(asset_key)
# ensure deleting a non-existent file is a noop
self.contentstore.delete(asset_key)
@ddt.data(True, False)
def test_find(self, deprecated):
"""
Test using find
"""
self.set_up_assets(deprecated)
asset_key = self.course1_key.make_asset_key('asset', self.course1_files[0])
self.assertIsNotNone(self.contentstore.find(asset_key), "Could not find {}".format(asset_key))
self.assertIsNotNone(self.contentstore.find(asset_key, as_stream=True), "Could not find {}".format(asset_key))
unknown_asset = self.course1_key.make_asset_key('asset', 'no_such_file.gif')
with self.assertRaises(NotFoundError):
self.contentstore.find(unknown_asset)
self.assertIsNone(
self.contentstore.find(unknown_asset, throw_on_not_found=False),
"Found unknown asset {}".format(unknown_asset)
)
@ddt.data(True, False)
def test_export_for_course(self, deprecated):
"""
Test export
"""
self.set_up_assets(deprecated)
root_dir = path.path(mkdtemp())
try:
self.contentstore.export_all_for_course(
self.course1_key, root_dir,
path.path(root_dir / "policy.json"),
)
for filename in self.course1_files:
filepath = path.path(root_dir / filename)
self.assertTrue(filepath.isfile(), "{} is not a file".format(filepath))
for filename in self.course2_files:
if filename not in self.course1_files:
filepath = path.path(root_dir / filename)
self.assertFalse(filepath.isfile(), "{} is unexpected exported a file".format(filepath))
finally:
shutil.rmtree(root_dir)
@ddt.data(True, False)
def test_get_all_content(self, deprecated):
"""
Test get_all_content_for_course
"""
self.set_up_assets(deprecated)
course1_assets, count = self.contentstore.get_all_content_for_course(self.course1_key)
self.assertEqual(count, len(self.course1_files), course1_assets)
for asset in course1_assets:
parsed = AssetKey.from_string(asset['filename'])
self.assertIn(parsed.name, self.course1_files)
course1_assets, __ = self.contentstore.get_all_content_for_course(self.course1_key, 1, 1)
self.assertEqual(len(course1_assets), 1, course1_assets)
fake_course = CourseLocator('test', 'fake', 'non')
course_assets, count = self.contentstore.get_all_content_for_course(fake_course)
self.assertEqual(count, 0)
self.assertEqual(course_assets, [])
@ddt.data(True, False)
def test_attrs(self, deprecated):
"""
Test setting and getting attrs
"""
self.set_up_assets(deprecated)
for filename in self.course1_files:
asset_key = self.course1_key.make_asset_key('asset', filename)
prelocked = self.contentstore.get_attr(asset_key, 'locked', False)
self.contentstore.set_attr(asset_key, 'locked', not prelocked)
self.assertEqual(self.contentstore.get_attr(asset_key, 'locked', False), not prelocked)
@ddt.data(True, False)
def test_copy_assets(self, deprecated):
"""
copy_all_course_assets
"""
self.set_up_assets(deprecated)
dest_course = CourseLocator('test', 'destination', 'copy')
self.contentstore.copy_all_course_assets(self.course1_key, dest_course)
for filename in self.course1_files:
asset_key = self.course1_key.make_asset_key('asset', filename)
dest_key = dest_course.make_asset_key('asset', filename)
source = self.contentstore.find(asset_key)
copied = self.contentstore.find(dest_key)
for propname in ['name', 'content_type', 'length', 'locked']:
self.assertEqual(getattr(source, propname), getattr(copied, propname))
__, count = self.contentstore.get_all_content_for_course(dest_course)
self.assertEqual(count, len(self.course1_files))
@ddt.data(True, False)
def test_delete_assets(self, deprecated):
"""
delete_all_course_assets
"""
self.set_up_assets(deprecated)
self.contentstore.delete_all_course_assets(self.course1_key)
__, count = self.contentstore.get_all_content_for_course(self.course1_key)
self.assertEqual(count, 0)
# ensure it didn't remove any from other course
__, count = self.contentstore.get_all_content_for_course(self.course2_key)
self.assertEqual(count, len(self.course2_files)) | unknown | codeparrot/codeparrot-clean | ||
"""
Take EHR data located in a study folder and convert it to I2B2.
Poll a base directory for incoming CSV files ready for processing. We assume that
CSV files are already anonymised and organised with the following directory structure:
2016
_ 20160407
_ patients.csv
_ diseases.csv
_ ...
"""
from datetime import datetime, timedelta
from airflow import DAG
from common_steps import initial_step
from common_steps.check_local_free_space import check_local_free_space_cfg
from common_steps.prepare_pipeline import prepare_pipeline
from ehr_steps.map_ehr_to_i2b2 import map_ehr_to_i2b2_pipeline_cfg
from ehr_steps.version_incoming_ehr import version_incoming_ehr_pipeline_cfg
steps_with_file_outputs = ['version_incoming_ehr']
def ehr_to_i2b2_dag(dataset, section, email_errors_to, max_active_runs):
# Define the DAG
dag_name = '%s_ehr_to_i2b2' % dataset.lower().replace(" ", "_")
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.now(),
'retries': 1,
'retry_delay': timedelta(seconds=120),
'email': email_errors_to,
'email_on_failure': True,
'email_on_retry': True
}
dag = DAG(
dag_id=dag_name,
default_args=default_args,
schedule_interval=None,
max_active_runs=max_active_runs)
upstream_step = check_local_free_space_cfg(dag, initial_step, section,
map(lambda p: section + ':' + p, steps_with_file_outputs))
upstream_step = prepare_pipeline(dag, upstream_step, False)
upstream_step = version_incoming_ehr_pipeline_cfg(dag, upstream_step, section, section + ':version_incoming_ehr')
# TODO Next: Python to build provenance_details
# Call MipMap on versioned folder
map_ehr_to_i2b2_pipeline_cfg(dag, upstream_step, section, section + ':map_ehr_to_i2b2')
# TODO Call MipMap to convert original data in I2B2 format to the MIP CDE (Common Data Elements)
# also in I2B2 format but stored in another database
# map_i2b2_to_mip_i2b2_pipeline_cfg(dag, upstream_step, section, section + ':map_i2b2_to_mip_i2b2')
return dag | unknown | codeparrot/codeparrot-clean | ||
# Taken from Python 2.6.4 and regexp module constants modified
"""A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
# from warnings import warnpy3k
# warnpy3k("the sgmllib module has been removed in Python 3.0",
# stacklevel=2)
# del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
# hack to fix http://bugs.python.org/issue803422
# charref = re.compile('&#([0-9]+)[^0-9]')
charref = re.compile("&#(x?[0-9a-fA-F]+)[^0-9a-fA-F]")
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
# hack moved from _beautifulsoup.py (bundled BeautifulSoup version 2)
#This code makes Beautiful Soup able to parse XML with namespaces
# tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
import gc
'''
This snippet shows how to create a uncollectible object:
It is an object in a cycle reference chain, in which there is an object
with __del__ defined.
The simpliest is an object that refers to itself and with a __del__ defined.
> python uncollectible.py
======= collectible object =======
*** init, nr of referrers: 4
garbage: []
created: collectible: <__main__.One object at 0x102c01090>
nr of referrers: 5
delete:
*** __del__ called
*** after gc, nr of referrers: 4
garbage: []
======= uncollectible object =======
*** init, nr of referrers: 4
garbage: []
created: uncollectible: <__main__.One object at 0x102c01110>
nr of referrers: 5
delete:
*** after gc, nr of referrers: 5
garbage: [<__main__.One object at 0x102c01110>]
'''
def dd(*msg):
for m in msg:
print(m, end='')
print()
class One(object):
def __init__(self, collectible):
if collectible:
self.typ = 'collectible'
else:
self.typ = 'uncollectible'
# Make a reference to it self, to form a reference cycle.
# A reference cycle with __del__, makes it uncollectible.
self.me = self
def __del__(self):
dd('*** __del__ called')
def test_it(collectible):
dd()
dd('======= ', ('collectible' if collectible else 'uncollectible'), ' object =======')
dd()
gc.collect()
dd('*** init, nr of referrers: ', len(gc.get_referrers(One)))
dd(' garbage: ', gc.garbage)
one = One(collectible)
dd(' created: ', one.typ, ': ', one)
dd(' nr of referrers: ', len(gc.get_referrers(One)))
dd(' delete:')
del one
gc.collect()
dd('*** after gc, nr of referrers: ', len(gc.get_referrers(One)))
dd(' garbage: ', gc.garbage)
if __name__ == "__main__":
test_it(collectible=True)
test_it(collectible=False) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# matplotlib_plot.py
# Purpose: plot Temp of running LAMMPS simulation via matplotlib
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys
import matplotlib.pyplot as plt
# parse command line
argv = sys.argv
if len(argv) != 5:
print("Syntax: plot.py in.lammps Nfreq Nsteps compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment this if running in parallel via mpi4py
#from mpi4py import MPI
#me = MPI.COMM_WORLD.Get_rank()
#nprocs = MPI.COMM_WORLD.Get_size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# create matplotlib plot
# just proc 0 handles plotting
if me == 0:
fig = plt.figure()
line, = plt.plot(xaxis, yaxis)
plt.xlim([0, nsteps])
plt.title(compute)
plt.xlabel("Timestep")
plt.ylabel("Temperature")
plt.show(block=False)
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
import time
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0:
line.set_xdata(xaxis)
line.set_ydata(yaxis)
ax = plt.gca()
ax.relim()
ax.autoscale_view(True, True, True)
plt.pause(0.001)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via mpi4py
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
if me == 0:
if sys.version_info[0] == 3:
input("Press Enter to exit...")
else:
raw_input("Press Enter to exit...") | unknown | codeparrot/codeparrot-clean | ||
from ctypes import *
import unittest, sys
from ctypes.test import is_resource_enabled
################################################################
# This section should be moved into ctypes\__init__.py, when it's ready.
from _ctypes import PyObj_FromPtr
################################################################
from sys import getrefcount as grc
if sys.version_info > (2, 4):
c_py_ssize_t = c_size_t
else:
c_py_ssize_t = c_int
class PythonAPITestCase(unittest.TestCase):
def test_PyString_FromStringAndSize(self):
PyString_FromStringAndSize = pythonapi.PyString_FromStringAndSize
PyString_FromStringAndSize.restype = py_object
PyString_FromStringAndSize.argtypes = c_char_p, c_py_ssize_t
self.assertEqual(PyString_FromStringAndSize("abcdefghi", 3), "abc")
def test_PyString_FromString(self):
pythonapi.PyString_FromString.restype = py_object
pythonapi.PyString_FromString.argtypes = (c_char_p,)
s = "abc"
refcnt = grc(s)
pyob = pythonapi.PyString_FromString(s)
self.assertEqual(grc(s), refcnt)
self.assertEqual(s, pyob)
del pyob
self.assertEqual(grc(s), refcnt)
if is_resource_enabled("refcount"):
# This test is unreliable, because it is possible that code in
# unittest changes the refcount of the '42' integer. So, it
# is disabled by default.
def test_PyInt_Long(self):
ref42 = grc(42)
pythonapi.PyInt_FromLong.restype = py_object
self.assertEqual(pythonapi.PyInt_FromLong(42), 42)
self.assertEqual(grc(42), ref42)
pythonapi.PyInt_AsLong.argtypes = (py_object,)
pythonapi.PyInt_AsLong.restype = c_long
res = pythonapi.PyInt_AsLong(42)
self.assertEqual(grc(res), ref42 + 1)
del res
self.assertEqual(grc(42), ref42)
def test_PyObj_FromPtr(self):
s = "abc def ghi jkl"
ref = grc(s)
# id(python-object) is the address
pyobj = PyObj_FromPtr(id(s))
self.assertTrue(s is pyobj)
self.assertEqual(grc(s), ref + 1)
del pyobj
self.assertEqual(grc(s), ref)
def test_PyOS_snprintf(self):
PyOS_snprintf = pythonapi.PyOS_snprintf
PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p
buf = c_buffer(256)
PyOS_snprintf(buf, sizeof(buf), "Hello from %s", "ctypes")
self.assertEqual(buf.value, "Hello from ctypes")
PyOS_snprintf(buf, sizeof(buf), "Hello from %s", "ctypes", 1, 2, 3)
self.assertEqual(buf.value, "Hello from ctypes")
# not enough arguments
self.assertRaises(TypeError, PyOS_snprintf, buf)
def test_pyobject_repr(self):
self.assertEqual(repr(py_object()), "py_object(<NULL>)")
self.assertEqual(repr(py_object(42)), "py_object(42)")
self.assertEqual(repr(py_object(object)), "py_object(%r)" % object)
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.ecl
~~~~~~~~~~~~~~~~~~~
Lexers for the ECL language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['ECLLexer']
class ECLLexer(RegexLexer):
"""
Lexer for the declarative big-data `ECL
<http://hpccsystems.com/community/docs/ecl-language-reference/html>`_
language.
.. versionadded:: 1.5
"""
name = 'ECL'
aliases = ['ecl']
filenames = ['*.ecl']
mimetypes = ['application/x-ecl']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('statements'),
],
'whitespace': [
(r'\s+', Text),
(r'\/\/.*', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
],
'statements': [
include('types'),
include('keywords'),
include('functions'),
include('hash'),
(r'"', String, 'string'),
(r'\'', String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)e[+-]?\d+[lu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+f)f?', Number.Float),
(r'0x[0-9a-f]+[lu]*', Number.Hex),
(r'0[0-7]+[lu]*', Number.Oct),
(r'\d+[lu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]+', Operator),
(r'[{}()\[\],.;]', Punctuation),
(r'[a-z_]\w*', Name),
],
'hash': [
(r'^#.*$', Comment.Preproc),
],
'types': [
(r'(RECORD|END)\D', Keyword.Declaration),
(r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|'
r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|'
r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)',
bygroups(Keyword.Type, Text)),
],
'keywords': [
(words((
'APPLY', 'ASSERT', 'BUILD', 'BUILDINDEX', 'EVALUATE', 'FAIL',
'KEYDIFF', 'KEYPATCH', 'LOADXML', 'NOTHOR', 'NOTIFY', 'OUTPUT',
'PARALLEL', 'SEQUENTIAL', 'SOAPCALL', 'CHECKPOINT', 'DEPRECATED',
'FAILCODE', 'FAILMESSAGE', 'FAILURE', 'GLOBAL', 'INDEPENDENT',
'ONWARNING', 'PERSIST', 'PRIORITY', 'RECOVERY', 'STORED', 'SUCCESS',
'WAIT', 'WHEN'), suffix=r'\b'),
Keyword.Reserved),
# These are classed differently, check later
(words((
'ALL', 'AND', 'ANY', 'AS', 'ATMOST', 'BEFORE', 'BEGINC++', 'BEST', 'BETWEEN', 'CASE',
'CONST', 'COUNTER', 'CSV', 'DESCEND', 'ENCRYPT', 'ENDC++', 'ENDMACRO', 'EXCEPT',
'EXCLUSIVE', 'EXPIRE', 'EXPORT', 'EXTEND', 'FALSE', 'FEW', 'FIRST', 'FLAT', 'FULL',
'FUNCTION', 'GROUP', 'HEADER', 'HEADING', 'HOLE', 'IFBLOCK', 'IMPORT', 'IN', 'JOINED',
'KEEP', 'KEYED', 'LAST', 'LEFT', 'LIMIT', 'LOAD', 'LOCAL', 'LOCALE', 'LOOKUP', 'MACRO',
'MANY', 'MAXCOUNT', 'MAXLENGTH', 'MIN SKEW', 'MODULE', 'INTERFACE', 'NAMED', 'NOCASE',
'NOROOT', 'NOSCAN', 'NOSORT', 'NOT', 'OF', 'ONLY', 'OPT', 'OR', 'OUTER', 'OVERWRITE',
'PACKED', 'PARTITION', 'PENALTY', 'PHYSICALLENGTH', 'PIPE', 'QUOTE', 'RELATIONSHIP',
'REPEAT', 'RETURN', 'RIGHT', 'SCAN', 'SELF', 'SEPARATOR', 'SERVICE', 'SHARED', 'SKEW',
'SKIP', 'SQL', 'STORE', 'TERMINATOR', 'THOR', 'THRESHOLD', 'TOKEN', 'TRANSFORM', 'TRIM',
'TRUE', 'TYPE', 'UNICODEORDER', 'UNSORTED', 'VALIDATE', 'VIRTUAL', 'WHOLE', 'WILD',
'WITHIN', 'XML', 'XPATH', '__COMPRESSED__'), suffix=r'\b'),
Keyword.Reserved),
],
'functions': [
(words((
'ABS', 'ACOS', 'ALLNODES', 'ASCII', 'ASIN', 'ASSTRING', 'ATAN', 'ATAN2', 'AVE', 'CASE',
'CHOOSE', 'CHOOSEN', 'CHOOSESETS', 'CLUSTERSIZE', 'COMBINE', 'CORRELATION', 'COS',
'COSH', 'COUNT', 'COVARIANCE', 'CRON', 'DATASET', 'DEDUP', 'DEFINE', 'DENORMALIZE',
'DISTRIBUTE', 'DISTRIBUTED', 'DISTRIBUTION', 'EBCDIC', 'ENTH', 'ERROR', 'EVALUATE',
'EVENT', 'EVENTEXTRA', 'EVENTNAME', 'EXISTS', 'EXP', 'FAILCODE', 'FAILMESSAGE',
'FETCH', 'FROMUNICODE', 'GETISVALID', 'GLOBAL', 'GRAPH', 'GROUP', 'HASH', 'HASH32',
'HASH64', 'HASHCRC', 'HASHMD5', 'HAVING', 'IF', 'INDEX', 'INTFORMAT', 'ISVALID',
'ITERATE', 'JOIN', 'KEYUNICODE', 'LENGTH', 'LIBRARY', 'LIMIT', 'LN', 'LOCAL', 'LOG', 'LOOP',
'MAP', 'MATCHED', 'MATCHLENGTH', 'MATCHPOSITION', 'MATCHTEXT', 'MATCHUNICODE',
'MAX', 'MERGE', 'MERGEJOIN', 'MIN', 'NOLOCAL', 'NONEMPTY', 'NORMALIZE', 'PARSE', 'PIPE',
'POWER', 'PRELOAD', 'PROCESS', 'PROJECT', 'PULL', 'RANDOM', 'RANGE', 'RANK', 'RANKED',
'REALFORMAT', 'RECORDOF', 'REGEXFIND', 'REGEXREPLACE', 'REGROUP', 'REJECTED',
'ROLLUP', 'ROUND', 'ROUNDUP', 'ROW', 'ROWDIFF', 'SAMPLE', 'SET', 'SIN', 'SINH', 'SIZEOF',
'SOAPCALL', 'SORT', 'SORTED', 'SQRT', 'STEPPED', 'STORED', 'SUM', 'TABLE', 'TAN', 'TANH',
'THISNODE', 'TOPN', 'TOUNICODE', 'TRANSFER', 'TRIM', 'TRUNCATE', 'TYPEOF', 'UNGROUP',
'UNICODEORDER', 'VARIANCE', 'WHICH', 'WORKUNIT', 'XMLDECODE', 'XMLENCODE',
'XMLTEXT', 'XMLUNICODE'), suffix=r'\b'),
Name.Function),
],
'string': [
(r'"', String, '#pop'),
(r'\'', String, '#pop'),
(r'[^"\']+', String),
],
} | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"fmt"
"strings"
"github.com/hashicorp/cli"
"github.com/posener/complete"
)
var (
_ cli.Command = (*OperatorStepDownCommand)(nil)
_ cli.CommandAutocomplete = (*OperatorStepDownCommand)(nil)
)
type OperatorStepDownCommand struct {
*BaseCommand
}
func (c *OperatorStepDownCommand) Synopsis() string {
return "Forces Vault to resign active duty"
}
func (c *OperatorStepDownCommand) Help() string {
helpText := `
Usage: vault operator step-down [options]
Forces the Vault server at the given address to step down from active duty.
While the affected node will have a delay before attempting to acquire the
leader lock again, if no other Vault nodes acquire the lock beforehand, it
is possible for the same node to re-acquire the lock and become active
again.
Force Vault to step down as the leader:
$ vault operator step-down
` + c.Flags().Help()
return strings.TrimSpace(helpText)
}
func (c *OperatorStepDownCommand) Flags() *FlagSets {
return c.flagSet(FlagSetHTTP)
}
func (c *OperatorStepDownCommand) AutocompleteArgs() complete.Predictor {
return nil
}
func (c *OperatorStepDownCommand) AutocompleteFlags() complete.Flags {
return c.Flags().Completions()
}
func (c *OperatorStepDownCommand) Run(args []string) int {
f := c.Flags()
if err := f.Parse(args); err != nil {
c.UI.Error(err.Error())
return 1
}
args = f.Args()
if len(args) > 0 {
c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args)))
return 1
}
client, err := c.Client()
if err != nil {
c.UI.Error(err.Error())
return 2
}
if err := client.Sys().StepDown(); err != nil {
c.UI.Error(fmt.Sprintf("Error stepping down: %s", err))
return 2
}
c.UI.Output(fmt.Sprintf("Success! Stepped down: %s", client.Address()))
return 0
} | go | github | https://github.com/hashicorp/vault | command/operator_step_down.go |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Scheduler Error Mailer module for OpenERP
# Copyright (C) 2012-2013 Akretion (http://www.akretion.com/)
# @author: Sébastien Beau <sebastien.beau@akretion.com>
# @author David Beal <bealdavid@gmail.com>
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Scheduler Error Mailer',
'summary': 'Send an e-mail when a scheduler fails',
'version': '1.0',
'category': 'Extra Tools',
'license': 'AGPL-3',
'description': """
Scheduler Error Mailer
======================
This module adds the possibility to send an e-mail when a scheduler raises
an error.""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com/',
'depends': ['email_template'],
'data': [
'ir_cron.xml',
'ir_cron_email_tpl.xml',
],
'demo': ['ir_cron_demo.xml'],
'images': ['images/scheduler_error_mailer.jpg'],
'installable': True,
} | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Set of utilities for execution of a pipeline by the FnApiRunner."""
# mypy: disallow-untyped-defs
import collections
import copy
import itertools
import uuid
import weakref
from typing import TYPE_CHECKING
from typing import Any
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Set
from typing import Tuple
from typing_extensions import Protocol
from apache_beam import coders
from apache_beam.coders import BytesCoder
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.coders.coders import GlobalWindowCoder
from apache_beam.coders.coders import WindowedValueCoder
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners.portability.fn_api_runner import translations
from apache_beam.runners.portability.fn_api_runner.translations import create_buffer_id
from apache_beam.runners.portability.fn_api_runner.translations import only_element
from apache_beam.runners.portability.fn_api_runner.translations import split_buffer_id
from apache_beam.runners.portability.fn_api_runner.translations import unique_name
from apache_beam.runners.portability.fn_api_runner.watermark_manager import WatermarkManager
from apache_beam.runners.worker import bundle_processor
from apache_beam.transforms import core
from apache_beam.transforms import trigger
from apache_beam.transforms import window
from apache_beam.transforms.window import GlobalWindow
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import proto_utils
from apache_beam.utils import windowed_value
if TYPE_CHECKING:
from apache_beam.coders.coder_impl import CoderImpl, WindowedValueCoderImpl
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability.fn_api_runner import worker_handlers
from apache_beam.runners.portability.fn_api_runner.fn_runner import DataOutput
from apache_beam.runners.portability.fn_api_runner.fn_runner import OutputTimers
from apache_beam.runners.portability.fn_api_runner.translations import DataSideInput
from apache_beam.runners.portability.fn_api_runner.translations import TimerFamilyId
from apache_beam.transforms.window import BoundedWindow
ENCODED_IMPULSE_VALUE = WindowedValueCoder(
BytesCoder(), GlobalWindowCoder()).get_impl().encode_nested(
GlobalWindows.windowed_value(b''))
SAFE_WINDOW_FNS = set(window.WindowFn._known_urns.keys()) - set(
[python_urns.PICKLED_WINDOWFN])
class Buffer(Protocol):
def __iter__(self):
# type: () -> Iterator[bytes]
pass
def append(self, item):
# type: (bytes) -> None
pass
class PartitionableBuffer(Buffer, Protocol):
def partition(self, n):
# type: (int) -> List[List[bytes]]
pass
@property
def cleared(self):
# type: () -> bool
pass
def clear(self):
# type: () -> None
pass
def reset(self):
# type: () -> None
pass
class ListBuffer(object):
"""Used to support parititioning of a list."""
def __init__(self, coder_impl):
# type: (CoderImpl) -> None
self._coder_impl = coder_impl
self._inputs = [] # type: List[bytes]
self._grouped_output = None # type: Optional[List[List[bytes]]]
self.cleared = False
def append(self, element):
# type: (bytes) -> None
if self.cleared:
raise RuntimeError('Trying to append to a cleared ListBuffer.')
if self._grouped_output:
raise RuntimeError('ListBuffer append after read.')
self._inputs.append(element)
def partition(self, n):
# type: (int) -> List[List[bytes]]
if self.cleared:
raise RuntimeError('Trying to partition a cleared ListBuffer.')
if len(self._inputs) >= n or len(self._inputs) == 0:
return [self._inputs[k::n] for k in range(n)]
else:
if not self._grouped_output:
output_stream_list = [create_OutputStream() for _ in range(n)]
idx = 0
for input in self._inputs:
input_stream = create_InputStream(input)
while input_stream.size() > 0:
decoded_value = self._coder_impl.decode_from_stream(
input_stream, True)
self._coder_impl.encode_to_stream(
decoded_value, output_stream_list[idx], True)
idx = (idx + 1) % n
self._grouped_output = [[output_stream.get()]
for output_stream in output_stream_list]
return self._grouped_output
def __iter__(self):
# type: () -> Iterator[bytes]
if self.cleared:
raise RuntimeError('Trying to iterate through a cleared ListBuffer.')
return iter(self._inputs)
def clear(self):
# type: () -> None
self.cleared = True
self._inputs = []
self._grouped_output = None
def reset(self):
# type: () -> None
"""Resets a cleared buffer for reuse."""
if not self.cleared:
raise RuntimeError('Trying to reset a non-cleared ListBuffer.')
self.cleared = False
class GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self,
pre_grouped_coder, # type: coders.Coder
post_grouped_coder, # type: coders.Coder
windowing # type: core.Windowing
):
# type: (...) -> None
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(
list) # type: DefaultDict[bytes, List[Any]]
self._windowing = windowing
self._grouped_output = None # type: Optional[List[List[bytes]]]
def append(self, elements_data):
# type: (bytes) -> None
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing else windowed_key_value.
with_value(value))
def partition(self, n):
# type: (int) -> List[List[bytes]]
""" It is used to partition _GroupingBuffer to N parts. Once it is
partitioned, it would not be re-partitioned with diff N. Re-partition
is not supported now.
"""
if not self._grouped_output:
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(
None,
timestamp=GlobalWindow().max_timestamp(),
pane_info=windowed_value.PaneInfo(
is_first=True,
is_last=True,
timing=windowed_value.PaneInfoTiming.ON_TIME,
index=0,
nonspeculative_index=0)).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
# TODO(pabloem, BEAM-7514): Trigger driver needs access to the clock
# note that this only comes through if windowing is default - but what
# about having multiple firings on the global window.
# May need to revise.
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
self._grouped_output = [[] for _ in range(n)]
output_stream_list = [create_OutputStream() for _ in range(n)]
for idx, (encoded_key, windowed_values) in enumerate(self._table.items()):
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream_list[idx % n], True)
for ix, output_stream in enumerate(output_stream_list):
self._grouped_output[ix] = [output_stream.get()]
self._table.clear()
return self._grouped_output
def __iter__(self):
# type: () -> Iterator[bytes]
""" Since partition() returns a list of lists, add this __iter__ to return
a list to simplify code when we need to iterate through ALL elements of
_GroupingBuffer.
"""
return itertools.chain(*self.partition(1))
# these should never be accessed, but they allow this class to meet the
# PartionableBuffer protocol
cleared = False
def clear(self):
# type: () -> None
pass
def reset(self):
# type: () -> None
pass
class WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(
self,
access_pattern, # type: beam_runner_api_pb2.FunctionSpec
coder # type: WindowedValueCoder
):
# type: (...) -> None
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extractor = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('') # type: coders.Coder
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extractor = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (coder.wrapped_value_coder.value_coder())
else:
raise ValueError("Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(
list) # type: DefaultDict[Tuple[str, BoundedWindow], List[Any]]
def append(self, elements_data):
# type: (bytes) -> None
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_val_coder_impl = self._windowed_value_coder.get_impl(
) # type: WindowedValueCoderImpl
windowed_value = windowed_val_coder_impl.decode_from_stream(
input_stream, True)
key, value = self._kv_extractor(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
# type: () -> Iterator[Tuple[bytes, bytes, bytes]]
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class GenericNonMergingWindowFn(window.NonMergingWindowFn):
URN = 'internal-generic-non-merging'
def __init__(self, coder):
# type: (coders.Coder) -> None
self._coder = coder
def assign(self, assign_context):
# type: (window.WindowFn.AssignContext) -> Iterable[BoundedWindow]
raise NotImplementedError()
def get_window_coder(self):
# type: () -> coders.Coder
return self._coder
@staticmethod
@window.urns.RunnerApiFn.register_urn(URN, bytes)
def from_runner_api_parameter(window_coder_id, context):
# type: (bytes, Any) -> GenericNonMergingWindowFn
return GenericNonMergingWindowFn(
context.coders[window_coder_id.decode('utf-8')])
class GenericMergingWindowFn(window.WindowFn):
URN = 'internal-generic-merging'
TO_SDK_TRANSFORM = 'read'
FROM_SDK_TRANSFORM = 'write'
_HANDLES = {} # type: Dict[str, GenericMergingWindowFn]
def __init__(self, execution_context, windowing_strategy_proto):
# type: (FnApiRunnerExecutionContext, beam_runner_api_pb2.WindowingStrategy) -> None
self._worker_handler = None # type: Optional[worker_handlers.WorkerHandler]
self._handle_id = handle_id = uuid.uuid4().hex
self._HANDLES[handle_id] = self
# ExecutionContexts are expensive, we don't want to keep them in the
# static dictionary forever. Instead we hold a weakref and pop self
# out of the dict once this context goes away.
self._execution_context_ref_obj = weakref.ref(
execution_context, lambda _: self._HANDLES.pop(handle_id, None))
self._windowing_strategy_proto = windowing_strategy_proto
self._counter = 0
# Lazily created in make_process_bundle_descriptor()
self._process_bundle_descriptor = None
self._bundle_processor_id = None # type: Optional[str]
self.windowed_input_coder_impl = None # type: Optional[CoderImpl]
self.windowed_output_coder_impl = None # type: Optional[CoderImpl]
def _execution_context_ref(self):
# type: () -> FnApiRunnerExecutionContext
result = self._execution_context_ref_obj()
assert result is not None
return result
def payload(self):
# type: () -> bytes
return self._handle_id.encode('utf-8')
@staticmethod
@window.urns.RunnerApiFn.register_urn(URN, bytes)
def from_runner_api_parameter(handle_id, unused_context):
# type: (bytes, Any) -> GenericMergingWindowFn
return GenericMergingWindowFn._HANDLES[handle_id.decode('utf-8')]
def assign(self, assign_context):
# type: (window.WindowFn.AssignContext) -> Iterable[window.BoundedWindow]
raise NotImplementedError()
def merge(self, merge_context):
# type: (window.WindowFn.MergeContext) -> None
worker_handler = self.worker_handle()
assert self.windowed_input_coder_impl is not None
assert self.windowed_output_coder_impl is not None
process_bundle_id = self.uid('process')
to_worker = worker_handler.data_conn.output_stream(
process_bundle_id, self.TO_SDK_TRANSFORM)
to_worker.write(
self.windowed_input_coder_impl.encode_nested(
window.GlobalWindows.windowed_value((b'', merge_context.windows))))
to_worker.close()
process_bundle_req = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_id=self._bundle_processor_id))
result_future = worker_handler.control_conn.push(process_bundle_req)
for output in worker_handler.data_conn.input_elements(
process_bundle_id, [self.FROM_SDK_TRANSFORM],
abort_callback=lambda: bool(result_future.is_done() and result_future.
get().error)):
if isinstance(output, beam_fn_api_pb2.Elements.Data):
windowed_result = self.windowed_output_coder_impl.decode_nested(
output.data)
for merge_result, originals in windowed_result.value[1][1]:
merge_context.merge(originals, merge_result)
else:
raise RuntimeError("Unexpected data: %s" % output)
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
# The result was "returned" via the merge callbacks on merge_context above.
def get_window_coder(self):
# type: () -> coders.Coder
return self._execution_context_ref().pipeline_context.coders[
self._windowing_strategy_proto.window_coder_id]
def worker_handle(self):
# type: () -> worker_handlers.WorkerHandler
if self._worker_handler is None:
worker_handler_manager = self._execution_context_ref(
).worker_handler_manager
self._worker_handler = worker_handler_manager.get_worker_handlers(
self._windowing_strategy_proto.environment_id, 1)[0]
process_bundle_decriptor = self.make_process_bundle_descriptor(
self._worker_handler.data_api_service_descriptor(),
self._worker_handler.state_api_service_descriptor())
worker_handler_manager.register_process_bundle_descriptor(
process_bundle_decriptor)
return self._worker_handler
def make_process_bundle_descriptor(
self, data_api_service_descriptor, state_api_service_descriptor):
# type: (Optional[endpoints_pb2.ApiServiceDescriptor], Optional[endpoints_pb2.ApiServiceDescriptor]) -> beam_fn_api_pb2.ProcessBundleDescriptor
"""Creates a ProcessBundleDescriptor for invoking the WindowFn's
merge operation.
"""
def make_channel_payload(coder_id):
# type: (str) -> bytes
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (data_api_service_descriptor.url)
return data_spec.SerializeToString()
pipeline_context = self._execution_context_ref().pipeline_context
global_windowing_strategy_id = self.uid('global_windowing_strategy')
global_windowing_strategy_proto = core.Windowing(
window.GlobalWindows()).to_runner_api(pipeline_context)
coders = dict(pipeline_context.coders.get_id_to_proto_map())
def make_coder(urn, *components):
# type: (str, str) -> str
coder_proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(urn=urn),
component_coder_ids=components)
coder_id = self.uid('coder')
coders[coder_id] = coder_proto
pipeline_context.coders.put_proto(coder_id, coder_proto)
return coder_id
bytes_coder_id = make_coder(common_urns.coders.BYTES.urn)
window_coder_id = self._windowing_strategy_proto.window_coder_id
global_window_coder_id = make_coder(common_urns.coders.GLOBAL_WINDOW.urn)
iter_window_coder_id = make_coder(
common_urns.coders.ITERABLE.urn, window_coder_id)
input_coder_id = make_coder(
common_urns.coders.KV.urn, bytes_coder_id, iter_window_coder_id)
output_coder_id = make_coder(
common_urns.coders.KV.urn,
bytes_coder_id,
make_coder(
common_urns.coders.KV.urn,
iter_window_coder_id,
make_coder(
common_urns.coders.ITERABLE.urn,
make_coder(
common_urns.coders.KV.urn,
window_coder_id,
iter_window_coder_id))))
windowed_input_coder_id = make_coder(
common_urns.coders.WINDOWED_VALUE.urn,
input_coder_id,
global_window_coder_id)
windowed_output_coder_id = make_coder(
common_urns.coders.WINDOWED_VALUE.urn,
output_coder_id,
global_window_coder_id)
self.windowed_input_coder_impl = pipeline_context.coders[
windowed_input_coder_id].get_impl()
self.windowed_output_coder_impl = pipeline_context.coders[
windowed_output_coder_id].get_impl()
self._bundle_processor_id = self.uid('merge_windows')
return beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._bundle_processor_id,
transforms={
self.TO_SDK_TRANSFORM: beam_runner_api_pb2.PTransform(
unique_name='MergeWindows/Read',
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=make_channel_payload(windowed_input_coder_id)),
outputs={'input': 'input'}),
'Merge': beam_runner_api_pb2.PTransform(
unique_name='MergeWindows/Merge',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.MERGE_WINDOWS.urn,
payload=self._windowing_strategy_proto.window_fn.
SerializeToString()),
inputs={'input': 'input'},
outputs={'output': 'output'}),
self.FROM_SDK_TRANSFORM: beam_runner_api_pb2.PTransform(
unique_name='MergeWindows/Write',
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=make_channel_payload(windowed_output_coder_id)),
inputs={'output': 'output'}),
},
pcollections={
'input': beam_runner_api_pb2.PCollection(
unique_name='input',
windowing_strategy_id=global_windowing_strategy_id,
coder_id=input_coder_id),
'output': beam_runner_api_pb2.PCollection(
unique_name='output',
windowing_strategy_id=global_windowing_strategy_id,
coder_id=output_coder_id),
},
coders=coders,
windowing_strategies={
global_windowing_strategy_id: global_windowing_strategy_proto,
},
environments=dict(
self._execution_context_ref().pipeline_components.environments.
items()),
state_api_service_descriptor=state_api_service_descriptor,
timer_api_service_descriptor=data_api_service_descriptor)
def uid(self, name=''):
# type: (str) -> str
self._counter += 1
return '%s_%s_%s' % (self._handle_id, name, self._counter)
class FnApiRunnerExecutionContext(object):
"""
:var pcoll_buffers: (dict): Mapping of
PCollection IDs to list that functions as buffer for the
``beam.PCollection``.
"""
def __init__(self,
stages, # type: List[translations.Stage]
worker_handler_manager, # type: worker_handlers.WorkerHandlerManager
pipeline_components, # type: beam_runner_api_pb2.Components
safe_coders: translations.SafeCoderMapping,
data_channel_coders: Dict[str, str],
) -> None:
"""
:param worker_handler_manager: This class manages the set of worker
handlers, and the communication with state / control APIs.
:param pipeline_components: (beam_runner_api_pb2.Components)
:param safe_coders: A map from Coder ID to Safe Coder ID.
:param data_channel_coders: A map from PCollection ID to the ID of the Coder
for that PCollection.
"""
self.stages = stages
self.side_input_descriptors_by_stage = (
self._build_data_side_inputs_map(stages))
self.pcoll_buffers = {} # type: MutableMapping[bytes, PartitionableBuffer]
self.timer_buffers = {} # type: MutableMapping[bytes, ListBuffer]
self.worker_handler_manager = worker_handler_manager
self.pipeline_components = pipeline_components
self.safe_coders = safe_coders
self.data_channel_coders = data_channel_coders
self.input_transform_to_buffer_id = {
t.unique_name: t.spec.payload
for s in stages for t in s.transforms
if t.spec.urn == bundle_processor.DATA_INPUT_URN
}
self.watermark_manager = WatermarkManager(stages)
self.pipeline_context = pipeline_context.PipelineContext(
self.pipeline_components,
iterable_state_write=self._iterable_state_write)
self._last_uid = -1
self.safe_windowing_strategies = {
id: self._make_safe_windowing_strategy(id)
for id in self.pipeline_components.windowing_strategies.keys()
}
@staticmethod
def _build_data_side_inputs_map(stages):
# type: (Iterable[translations.Stage]) -> MutableMapping[str, DataSideInput]
"""Builds an index mapping stages to side input descriptors.
A side input descriptor is a map of side input IDs to side input access
patterns for all of the outputs of a stage that will be consumed as a
side input.
"""
transform_consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[beam_runner_api_pb2.PTransform]]
stage_consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[translations.Stage]]
def get_all_side_inputs():
# type: () -> Set[str]
all_side_inputs = set() # type: Set[str]
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
transform_consumers[input].append(transform)
stage_consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
return all_side_inputs
all_side_inputs = frozenset(get_all_side_inputs())
data_side_inputs_by_producing_stage = {} # type: Dict[str, DataSideInput]
producing_stages_by_pcoll = {}
for s in stages:
data_side_inputs_by_producing_stage[s.name] = {}
for transform in s.transforms:
for o in transform.outputs.values():
if o in s.side_inputs():
continue
if o in producing_stages_by_pcoll:
continue
producing_stages_by_pcoll[o] = s
for side_pc in all_side_inputs:
for consuming_transform in transform_consumers[side_pc]:
if consuming_transform.spec.urn not in translations.PAR_DO_URNS:
continue
producing_stage = producing_stages_by_pcoll[side_pc]
payload = proto_utils.parse_Bytes(
consuming_transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for si_tag in payload.side_inputs:
if consuming_transform.inputs[si_tag] == side_pc:
side_input_id = (consuming_transform.unique_name, si_tag)
data_side_inputs_by_producing_stage[
producing_stage.name][side_input_id] = (
translations.create_buffer_id(side_pc),
payload.side_inputs[si_tag].access_pattern)
return data_side_inputs_by_producing_stage
def _make_safe_windowing_strategy(self, id):
# type: (str) -> str
windowing_strategy_proto = self.pipeline_components.windowing_strategies[id]
if windowing_strategy_proto.window_fn.urn in SAFE_WINDOW_FNS:
return id
else:
safe_id = id + '_safe'
while safe_id in self.pipeline_components.windowing_strategies:
safe_id += '_'
safe_proto = copy.copy(windowing_strategy_proto)
if (windowing_strategy_proto.merge_status ==
beam_runner_api_pb2.MergeStatus.NON_MERGING):
safe_proto.window_fn.urn = GenericNonMergingWindowFn.URN
safe_proto.window_fn.payload = (
windowing_strategy_proto.window_coder_id.encode('utf-8'))
elif (windowing_strategy_proto.merge_status ==
beam_runner_api_pb2.MergeStatus.NEEDS_MERGE):
window_fn = GenericMergingWindowFn(self, windowing_strategy_proto)
safe_proto.window_fn.urn = GenericMergingWindowFn.URN
safe_proto.window_fn.payload = window_fn.payload()
else:
raise NotImplementedError(
'Unsupported merging strategy: %s' %
windowing_strategy_proto.merge_status)
self.pipeline_context.windowing_strategies.put_proto(safe_id, safe_proto)
return safe_id
@property
def state_servicer(self):
# type: () -> worker_handlers.StateServicer
# TODO(BEAM-9625): Ensure FnApiRunnerExecutionContext owns StateServicer
return self.worker_handler_manager.state_servicer
def next_uid(self):
# type: () -> str
self._last_uid += 1
return str(self._last_uid)
def _iterable_state_write(self, values, element_coder_impl):
# type: (Iterable, CoderImpl) -> bytes
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
self.worker_handler_manager.state_servicer.append_raw(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
def commit_side_inputs_to_state(
self,
data_side_input, # type: DataSideInput
):
# type: (...) -> None
for (consuming_transform_id, tag), (buffer_id,
func_spec) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = self.pipeline_context.coders[self.safe_coders[
self.data_channel_coders[pcoll_id]]]
elements_by_window = WindowGroupingBuffer(func_spec, value_coder)
if buffer_id not in self.pcoll_buffers:
self.pcoll_buffers[buffer_id] = ListBuffer(
coder_impl=value_coder.get_impl())
for element_data in self.pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
if func_spec.urn == common_urns.side_inputs.ITERABLE.urn:
for _, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
iterable_side_input=beam_fn_api_pb2.StateKey.IterableSideInput(
transform_id=consuming_transform_id,
side_input_id=tag,
window=window))
self.state_servicer.append_raw(state_key, elements_data)
elif func_spec.urn == common_urns.side_inputs.MULTIMAP.urn:
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id=consuming_transform_id,
side_input_id=tag,
window=window,
key=key))
self.state_servicer.append_raw(state_key, elements_data)
else:
raise ValueError("Unknown access pattern: '%s'" % func_spec.urn)
class BundleContextManager(object):
def __init__(self,
execution_context, # type: FnApiRunnerExecutionContext
stage, # type: translations.Stage
num_workers, # type: int
):
# type: (...) -> None
self.execution_context = execution_context
self.stage = stage
self.bundle_uid = self.execution_context.next_uid()
self.num_workers = num_workers
# Properties that are lazily initialized
self._process_bundle_descriptor = None # type: Optional[beam_fn_api_pb2.ProcessBundleDescriptor]
self._worker_handlers = None # type: Optional[List[worker_handlers.WorkerHandler]]
# a mapping of {(transform_id, timer_family_id): timer_coder_id}. The map
# is built after self._process_bundle_descriptor is initialized.
# This field can be used to tell whether current bundle has timers.
self._timer_coder_ids = None # type: Optional[Dict[Tuple[str, str], str]]
@property
def worker_handlers(self):
# type: () -> List[worker_handlers.WorkerHandler]
if self._worker_handlers is None:
self._worker_handlers = (
self.execution_context.worker_handler_manager.get_worker_handlers(
self.stage.environment, self.num_workers))
return self._worker_handlers
def data_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
return self.worker_handlers[0].data_api_service_descriptor()
def state_api_service_descriptor(self):
# type: () -> Optional[endpoints_pb2.ApiServiceDescriptor]
# All worker_handlers share the same grpc server, so we can read grpc server
# info from any worker_handler and read from the first worker_handler.
return self.worker_handlers[0].state_api_service_descriptor()
@property
def process_bundle_descriptor(self):
# type: () -> beam_fn_api_pb2.ProcessBundleDescriptor
if self._process_bundle_descriptor is None:
self._process_bundle_descriptor = self._build_process_bundle_descriptor()
self._timer_coder_ids = self._build_timer_coders_id_map()
return self._process_bundle_descriptor
def _build_process_bundle_descriptor(self):
# type: () -> beam_fn_api_pb2.ProcessBundleDescriptor
# Cannot be invoked until *after* _extract_endpoints is called.
# Always populate the timer_api_service_descriptor.
return beam_fn_api_pb2.ProcessBundleDescriptor(
id=self.bundle_uid,
transforms={
transform.unique_name: transform
for transform in self.stage.transforms
},
pcollections=dict(
self.execution_context.pipeline_components.pcollections.items()),
coders=dict(self.execution_context.pipeline_components.coders.items()),
windowing_strategies=dict(
self.execution_context.pipeline_components.windowing_strategies.
items()),
environments=dict(
self.execution_context.pipeline_components.environments.items()),
state_api_service_descriptor=self.state_api_service_descriptor(),
timer_api_service_descriptor=self.data_api_service_descriptor())
def extract_bundle_inputs_and_outputs(self):
# type: () -> Tuple[Dict[str, PartitionableBuffer], DataOutput, Dict[TimerFamilyId, bytes]]
"""Returns maps of transform names to PCollection identifiers.
Also mutates IO stages to point to the data ApiServiceDescriptor.
Returns:
A tuple of (data_input, data_output, expected_timer_output) dictionaries.
`data_input` is a dictionary mapping (transform_name, output_name) to a
PCollection buffer; `data_output` is a dictionary mapping
(transform_name, output_name) to a PCollection ID.
`expected_timer_output` is a dictionary mapping transform_id and
timer family ID to a buffer id for timers.
"""
data_input = {} # type: Dict[str, PartitionableBuffer]
data_output = {} # type: DataOutput
# A mapping of {(transform_id, timer_family_id) : buffer_id}
expected_timer_output = {} # type: OutputTimers
for transform in self.stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
coder_id = self.execution_context.data_channel_coders[only_element(
transform.outputs.values())]
coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders.get(coder_id, coder_id)]
if pcoll_id == translations.IMPULSE_BUFFER:
data_input[transform.unique_name] = ListBuffer(
coder_impl=coder.get_impl())
data_input[transform.unique_name].append(ENCODED_IMPULSE_VALUE)
else:
if pcoll_id not in self.execution_context.pcoll_buffers:
self.execution_context.pcoll_buffers[pcoll_id] = ListBuffer(
coder_impl=coder.get_impl())
data_input[transform.unique_name] = (
self.execution_context.pcoll_buffers[pcoll_id])
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
data_output[transform.unique_name] = pcoll_id
coder_id = self.execution_context.data_channel_coders[only_element(
transform.inputs.values())]
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
data_api_service_descriptor = self.data_api_service_descriptor()
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in translations.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for timer_family_id in payload.timer_family_specs.keys():
expected_timer_output[(transform.unique_name, timer_family_id)] = (
create_buffer_id(timer_family_id, 'timers'))
return data_input, data_output, expected_timer_output
def get_input_coder_impl(self, transform_id):
# type: (str) -> CoderImpl
coder_id = beam_fn_api_pb2.RemoteGrpcPort.FromString(
self.process_bundle_descriptor.transforms[transform_id].spec.payload
).coder_id
assert coder_id
return self.get_coder_impl(coder_id)
def _build_timer_coders_id_map(self):
# type: () -> Dict[Tuple[str, str], str]
assert self._process_bundle_descriptor is not None
timer_coder_ids = {}
for transform_id, transform_proto in (self._process_bundle_descriptor
.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.PAR_DO.urn:
pardo_payload = proto_utils.parse_Bytes(
transform_proto.spec.payload, beam_runner_api_pb2.ParDoPayload)
for id, timer_family_spec in pardo_payload.timer_family_specs.items():
timer_coder_ids[(transform_id, id)] = (
timer_family_spec.timer_family_coder_id)
return timer_coder_ids
def get_coder_impl(self, coder_id):
# type: (str) -> CoderImpl
if coder_id in self.execution_context.safe_coders:
return self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[coder_id]].get_impl()
else:
return self.execution_context.pipeline_context.coders[coder_id].get_impl()
def get_timer_coder_impl(self, transform_id, timer_family_id):
# type: (str, str) -> CoderImpl
assert self._timer_coder_ids is not None
return self.get_coder_impl(
self._timer_coder_ids[(transform_id, timer_family_id)])
def get_buffer(self, buffer_id, transform_id):
# type: (bytes, str) -> PartitionableBuffer
"""Returns the buffer for a given (operation_type, PCollection ID).
For grouping-typed operations, we produce a ``GroupingBuffer``. For
others, we produce a ``ListBuffer``.
"""
kind, name = split_buffer_id(buffer_id)
if kind == 'materialize':
if buffer_id not in self.execution_context.pcoll_buffers:
self.execution_context.pcoll_buffers[buffer_id] = ListBuffer(
coder_impl=self.get_input_coder_impl(transform_id))
return self.execution_context.pcoll_buffers[buffer_id]
# For timer buffer, name = timer_family_id
elif kind == 'timers':
if buffer_id not in self.execution_context.timer_buffers:
timer_coder_impl = self.get_timer_coder_impl(transform_id, name)
self.execution_context.timer_buffers[buffer_id] = ListBuffer(
timer_coder_impl)
return self.execution_context.timer_buffers[buffer_id]
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in self.execution_context.pcoll_buffers:
original_gbk_transform = name
transform_proto = self.execution_context.pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[
self.execution_context.data_channel_coders[input_pcoll]]]
post_gbk_coder = self.execution_context.pipeline_context.coders[
self.execution_context.safe_coders[
self.execution_context.data_channel_coders[output_pcoll]]]
windowing_strategy = (
self.execution_context.pipeline_context.windowing_strategies[
self.execution_context.safe_windowing_strategies[
self.execution_context.pipeline_components.
pcollections[input_pcoll].windowing_strategy_id]])
self.execution_context.pcoll_buffers[buffer_id] = GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return self.execution_context.pcoll_buffers[buffer_id]
def input_for(self, transform_id: str, input_id: str) -> str:
"""Returns the name of the transform producing the given PCollection."""
input_pcoll = self.process_bundle_descriptor.transforms[
transform_id].inputs[input_id]
for read_id, proto in self.process_bundle_descriptor.transforms.items():
# The GrpcRead is followed by the SDF/Process.
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN and
input_pcoll in proto.outputs.values()):
return read_id
# The GrpcRead is followed by the SDF/Truncate -> SDF/Process.
if (proto.spec.urn ==
common_urns.sdf_components.TRUNCATE_SIZED_RESTRICTION.urn and
input_pcoll in proto.outputs.values()):
read_input = list(
self.process_bundle_descriptor.transforms[read_id].inputs.values()
)[0]
for (grpc_read,
transform_proto) in self.process_bundle_descriptor.transforms.items(): # pylint: disable=line-too-long
if (transform_proto.spec.urn == bundle_processor.DATA_INPUT_URN and
read_input in transform_proto.outputs.values()):
return grpc_read
raise RuntimeError('No IO transform feeds %s' % transform_id) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2014, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import logging
import os
import json
import server
class ZkTopoServer(server.TopoServer):
"""Implementation of TopoServer for ZooKeeper"""
def __init__(self):
self.ports_assigned = False
def assign_ports(self):
"""Assign ports if not already assigned"""
if self.ports_assigned:
return
from environment import reserve_ports
import utils
self.zk_port_base = reserve_ports(3)
self.hostname = utils.hostname
self.zk_ports = ':'.join(str(self.zk_port_base + i) for i in range(3))
self.zk_client_port = self.zk_port_base + 2
self.ports_assigned = True
def setup(self, add_bad_host=False):
from environment import run, binary_args, vtlogroot, tmproot
self.assign_ports()
run(binary_args('zkctl') + [
'-log_dir', vtlogroot,
'-zk.cfg', '1@%s:%s' % (self.hostname, self.zk_ports),
'init'])
config = tmproot + '/test-zk-client-conf.json'
with open(config, 'w') as f:
ca_server = 'localhost:%u' % (self.zk_client_port)
if add_bad_host:
ca_server += ',does.not.exists:1234'
zk_cell_mapping = {
'test_nj': 'localhost:%u' % (self.zk_client_port),
'test_ny': 'localhost:%u' % (self.zk_client_port),
'test_ca': ca_server,
'global': 'localhost:%u' % (self.zk_client_port),
}
json.dump(zk_cell_mapping, f)
os.environ['ZK_CLIENT_CONFIG'] = config
logging.debug('Using ZK_CLIENT_CONFIG=%s', str(config))
run(binary_args('zk') + ['touch', '-p', '/zk/test_nj/vt'])
run(binary_args('zk') + ['touch', '-p', '/zk/test_ny/vt'])
run(binary_args('zk') + ['touch', '-p', '/zk/test_ca/vt'])
def teardown(self):
from environment import run, binary_args, vtlogroot
import utils
self.assign_ports()
run(binary_args('zkctl') + [
'-log_dir', vtlogroot,
'-zk.cfg', '1@%s:%s' % (self.hostname, self.zk_ports),
'shutdown' if utils.options.keep_logs else 'teardown'],
raise_on_error=False)
def flags(self):
return ['-topo_implementation', 'zookeeper']
def wipe(self):
from environment import run, binary_args
# Work around safety check on recursive delete.
run(binary_args('zk') + ['rm', '-rf', '/zk/test_nj/vt/*'])
run(binary_args('zk') + ['rm', '-rf', '/zk/test_ny/vt/*'])
run(binary_args('zk') + ['rm', '-rf', '/zk/global/vt/*'])
run(binary_args('zk') + ['rm', '-f', '/zk/test_nj/vt'])
run(binary_args('zk') + ['rm', '-f', '/zk/test_ny/vt'])
run(binary_args('zk') + ['rm', '-f', '/zk/global/vt'])
server.flavor_map['zookeeper'] = ZkTopoServer() | unknown | codeparrot/codeparrot-clean | ||
"""The Distribution template."""
from equadratures.distributions.recurrence_utils import custom_recurrence_coefficients
import numpy as np
PDF_SAMPLES = 500000
class Distribution(object):
"""
The class defines a Distribution object. It serves as a template for all distributions.
:param double lower:
Lower bound of the support of the distribution.
:param double upper:
Upper bound of the support of the distribution.
"""
def __init__(self, mean=None, variance=None, lower=None, upper=None, shape=None, scale=None, rate=None):
self.mean = mean
self.variance = variance
self.lower = lower
self.upper = upper
self.rate = rate
self.scale = scale
self.x_range_for_pdf = []
def get_description(self):
"""
Returns the description of the distribution.
:param Distribution self:
An instance of the distribution class.
"""
pass
def get_pdf(self, points=None):
"""
Returns the PDF of the distribution.
:param Distribution self:
An instance of the distribution class.
"""
pass
def get_cdf(self, points=None):
"""
Returns the CDF of the distribution.
:param Distribution self:
An instance of the distribution class.
"""
pass
def get_icdf(self, xx):
"""
An inverse cumulative density function.
:param Distribution self:
An instance of the distribution class.
:param xx:
A numpy array of uniformly distributed samples between [0,1].
:return:
Inverse CDF samples associated with the gamma distribution.
"""
pass
def get_recurrence_coefficients(self, order):
"""
Recurrence coefficients for the distribution
:param Distribution self:
An instance of the distribution class.
:param array order:
The order of the recurrence coefficients desired.
:return:
Recurrence coefficients associated with the distribution.
"""
w_pdf = self.get_pdf(self.x_range_for_pdf)
ab = custom_recurrence_coefficients(self.x_range_for_pdf, w_pdf, order)
return ab
def get_samples(self, m=None):
"""
Generates samples from the distribution.
:param Distribution self:
An instance of the distribution class.
:param integer m:
Number of random samples. If no value is provided, a default of 5e5 is assumed.
:return:
A N-by-1 vector that contains the samples.
"""
if m is None:
number_of_random_samples = PDF_SAMPLES
else:
number_of_random_samples = m
uniform_samples = np.random.random((number_of_random_samples, 1))
yy = self.get_icdf(uniform_samples)
return yy | unknown | codeparrot/codeparrot-clean | ||
require 'optparse'
parser = OptionParser.new
parser.on('-x XXX', '--xxx', 'Required argument via short name') do |value|
p ['--xxx', value]
end
parser.on('-y', '--y YYY', 'Required argument via long name') do |value|
p ['--yyy', value]
end
parser.parse! | ruby | github | https://github.com/ruby/ruby | doc/optparse/ruby/required_argument.rb |
/*
* Copyright (c) 2007 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal.verification;
import static org.junit.Assert.*;
import java.util.Arrays;
import java.util.List;
import org.junit.Test;
import org.mockito.Mock;
import org.mockito.exceptions.verification.NoInteractionsWanted;
import org.mockito.exceptions.verification.WantedButNotInvoked;
import org.mockito.internal.invocation.InvocationBuilder;
import org.mockito.internal.invocation.InvocationMatcher;
import org.mockito.internal.verification.api.VerificationData;
import org.mockito.invocation.Invocation;
import org.mockito.invocation.MatchableInvocation;
import org.mockitousage.IMethods;
import org.mockitoutil.TestBase;
public class OnlyTest extends TestBase {
@Mock IMethods mock;
Only only = new Only();
public static class VerificationDataStub implements VerificationData {
private final Invocation[] invocations;
private final InvocationMatcher wanted;
public VerificationDataStub(InvocationMatcher wanted, Invocation... invocations) {
this.invocations = invocations;
this.wanted = wanted;
}
public List<Invocation> getAllInvocations() {
return Arrays.asList(invocations);
}
@Override
public MatchableInvocation getTarget() {
return wanted;
}
public InvocationMatcher getWanted() {
return wanted;
}
}
@Test
public void shouldMarkAsVerifiedWhenAssertionSucceded() {
// given
Invocation invocation = new InvocationBuilder().toInvocation();
assertFalse(invocation.isVerified());
// when
only.verify(new VerificationDataStub(new InvocationMatcher(invocation), invocation));
// then
assertTrue(invocation.isVerified());
}
@Test
public void shouldNotMarkAsVerifiedWhenWantedButNotInvoked() {
// given
Invocation invocation = new InvocationBuilder().toInvocation();
assertFalse(invocation.isVerified());
// when
try {
only.verify(
new VerificationDataStub(
new InvocationBuilder().toInvocationMatcher(), invocation));
fail();
} catch (WantedButNotInvoked e) {
}
// then
assertFalse(invocation.isVerified());
}
@Test
public void shouldMarkWantedOnlyAsVerified() {
// given
InvocationBuilder invocationBuilder = new InvocationBuilder();
Invocation wanted = invocationBuilder.mock(mock).simpleMethod().toInvocation();
Invocation different = new InvocationBuilder().mock(mock).differentMethod().toInvocation();
// when
try {
only.verify(
new VerificationDataStub(
invocationBuilder.toInvocationMatcher(), wanted, different));
fail();
} catch (NoInteractionsWanted e) {
}
// then
assertTrue(wanted.isVerified());
assertFalse(different.isVerified());
}
@Test
public void shouldMarkMultipleInvocationAsVerified() {
// given
InvocationBuilder invocationBuilder = new InvocationBuilder();
Invocation wanted = invocationBuilder.mock(mock).simpleMethod().toInvocation();
Invocation different = invocationBuilder.mock(mock).differentMethod().toInvocation();
// when
try {
only.verify(
new VerificationDataStub(
invocationBuilder.toInvocationMatcher(), wanted, different));
fail();
} catch (NoInteractionsWanted e) {
}
try {
only.verify(
new VerificationDataStub(
invocationBuilder.toInvocationMatcher(), different, wanted));
fail();
} catch (WantedButNotInvoked e) {
}
// then
assertTrue(wanted.isVerified());
assertTrue(different.isVerified());
}
} | java | github | https://github.com/mockito/mockito | mockito-core/src/test/java/org/mockito/internal/verification/OnlyTest.java |
---
layout: step
title: Collections
position: 9
---
Let's look at fleshing out authors so each author has their own page with a
blurb and the posts they've published.
To do this you'll use collections. Collections are similar to posts except the
content doesn't have to be grouped by date.
## Configuration
To set up a collection you need to tell Jekyll about it. Jekyll configuration
happens in a file called `_config.yml` (by default).
Create `_config.yml` in the root with the following:
```yaml
collections:
authors:
```
To (re)load the configuration, restart the jekyll server. Press `Ctrl`+`C` in your terminal to stop the server, and then `jekyll serve` to restart it.
## Add authors
Documents (the items in a collection) live in a folder in the root of the site
named `_*collection_name*`. In this case, `_authors`.
Create a document for each author:
`_authors/jill.md`:
```markdown
---
short_name: jill
name: Jill Smith
position: Chief Editor
---
Jill is an avid fruit grower based in the south of France.
```
`_authors/ted.md`:
```markdown
---
short_name: ted
name: Ted Doe
position: Writer
---
Ted has been eating fruit since he was baby.
```
## Staff page
Let's add a page which lists all the authors on the site. Jekyll makes the
collection available at `site.authors`.
Create `staff.html` in the root directory and iterate over `site.authors` to output all the staff:
{% raw %}
```liquid
---
layout: default
title: Staff
---
<h1>Staff</h1>
<ul>
{% for author in site.authors %}
<li>
<h2>{{ author.name }}</h2>
<h3>{{ author.position }}</h3>
<p>{{ author.content | markdownify }}</p>
</li>
{% endfor %}
</ul>
```
{% endraw %}
Since the content is markdown, you need to run it through the
`markdownify` filter. This happens automatically when outputting using
{% raw %}`{{ content }}`{% endraw %} in a layout.
You also need a way to navigate to this page through the main navigation. Open
`_data/navigation.yml` and add an entry for the staff page:
```yaml
- name: Home
link: /
- name: About
link: /about.html
- name: Blog
link: /blog.html
- name: Staff
link: /staff.html
```
## Output a page
By default, collections do not output a page for documents. In this case we
want each author to have their own page so let's tweak the collection
configuration.
Open `_config.yml` and add `output: true` to the author collection
configuration:
```yaml
collections:
authors:
output: true
```
Restart the jekyll server once more for the configuration changes to take effect.
You can link to the output page using `author.url`.
Add the link to the `staff.html` page:
{% raw %}
```liquid
---
layout: default
title: Staff
---
<h1>Staff</h1>
<ul>
{% for author in site.authors %}
<li>
<h2><a href="{{ author.url }}">{{ author.name }}</a></h2>
<h3>{{ author.position }}</h3>
<p>{{ author.content | markdownify }}</p>
</li>
{% endfor %}
</ul>
```
{% endraw %}
Just like posts you'll need to create a layout for authors.
Create `_layouts/author.html` with the following content:
{% raw %}
```liquid
---
layout: default
---
<h1>{{ page.name }}</h1>
<h2>{{ page.position }}</h2>
{{ content }}
```
{% endraw %}
## Front matter defaults
Now you need to configure the author documents to use the `author` layout. You
could do this in the front matter like we have previously but that's getting
repetitive.
What you really want is all posts to automatically have the post
layout, authors to have author and everything else to use the default.
You can achieve this by using [front matter defaults](/docs/configuration/front-matter-defaults/)
in `_config.yml`. You set a scope of what the default applies to, then the
default front matter you'd like.
Add defaults for layouts to your `_config.yml`,
```yaml
collections:
authors:
output: true
defaults:
- scope:
path: ""
type: "authors"
values:
layout: "author"
- scope:
path: ""
type: "posts"
values:
layout: "post"
- scope:
path: ""
values:
layout: "default"
```
Now you can remove layout from the front matter of all pages and posts. Note
that any time you update `_config.yml` you'll need to restart Jekyll for the
changes to take effect.
## List author's posts
Let's list the posts an author has published on their page. To do
this you need to match the author `short_name` to the post `author`. You
use this to filter the posts by author.
Iterate over this filtered list in `_layouts/author.html` to output the
author's posts:
{% raw %}
```liquid
---
layout: default
---
<h1>{{ page.name }}</h1>
<h2>{{ page.position }}</h2>
{{ content }}
<h2>Posts</h2>
<ul>
{% assign filtered_posts = site.posts | where: 'author', page.short_name %}
{% for post in filtered_posts %}
<li><a href="{{ post.url }}">{{ post.title }}</a></li>
{% endfor %}
</ul>
```
{% endraw %}
## Link to authors page
The posts have a reference to the author so let's link it to the author's page.
You can do this using a similar filtering technique in `_layouts/post.html`:
{% raw %}
```liquid
---
layout: default
---
<h1>{{ page.title }}</h1>
<p>
{{ page.date | date_to_string }}
{% assign author = site.authors | where: 'short_name', page.author | first %}
{% if author %}
- <a href="{{ author.url }}">{{ author.name }}</a>
{% endif %}
</p>
{{ content }}
```
{% endraw %}
Open up <a href="http://localhost:4000" target="_blank" data-proofer-ignore>http://localhost:4000</a> and
have a look at the staff page and the author links on posts to check everything
is linked together correctly.
In the next and final step of this tutorial, we'll add polish to the site and
get it ready for a production deployment. | unknown | github | https://github.com/jekyll/jekyll | docs/_docs/step-by-step/09-collections.md |
from connection import H2OConnection
from frame import H2OFrame
from job import H2OJob
from model.model_future import H2OModelFuture
from model.dim_reduction import H2ODimReductionModel
from model.autoencoder import H2OAutoEncoderModel
from model.multinomial import H2OMultinomialModel
from model.regression import H2ORegressionModel
from model.binomial import H2OBinomialModel
from model.clustering import H2OClusteringModel
def supervised_model_build(x=None,y=None,vx=None,vy=None,algo="",offsets=None,weights=None,fold_column=None,kwargs=None):
is_auto_encoder = kwargs is not None and "autoencoder" in kwargs and kwargs["autoencoder"] is not None
if is_auto_encoder and y is not None: raise ValueError("y should not be specified for autoencoder.")
if not is_auto_encoder and y is None: raise ValueError("Missing response")
if vx is not None and vy is None: raise ValueError("Missing response validating a supervised model")
return _model_build(x,y,vx,vy,algo,offsets,weights,fold_column,kwargs)
def supervised(kwargs):
x =_frame_helper(kwargs["x"],kwargs["training_frame"])
y =_frame_helper(kwargs["y"],kwargs["training_frame"])
vx=_frame_helper(kwargs["validation_x"],kwargs["validation_frame"])
vy=_frame_helper(kwargs["validation_y"],kwargs["validation_frame"])
offsets = _ow("offset_column", kwargs)
weights = _ow("weights_column",kwargs)
fold_column= _ow("fold_column", kwargs)
algo = kwargs["algo"]
parms={k:v for k,v in kwargs.items() if (k not in ["x","y","validation_x","validation_y","algo"] and v is not None) or k=="validation_frame"}
return supervised_model_build(x,y,vx,vy,algo,offsets,weights,fold_column,parms)
def unsupervised_model_build(x,validation_x,algo_url,kwargs): return _model_build(x,None,validation_x,None,algo_url,None,None,None,kwargs)
def unsupervised(kwargs):
x = _frame_helper(kwargs["x"],kwargs["training_frame"]) # y is just None
vx=_frame_helper(kwargs["validation_x"],kwargs["validation_frame"])
algo=kwargs["algo"]
parms={k:v for k,v in kwargs.items() if k not in ["x","validation_x","algo"] and v is not None}
return unsupervised_model_build(x,vx,algo,parms)
def _frame_helper(col,fr):
if col is None: return None
if not isinstance(col,H2OFrame):
if fr is None: raise ValueError("Missing training_frame")
return fr[col] if not isinstance(col,H2OFrame) else col
def _ow(name,kwargs): # for checking offsets and weights, c is column, fr is frame
c=kwargs[name]
fr=kwargs["training_frame"]
if c is None or isinstance(c,H2OFrame): res=c
else:
if fr is None: raise ValueError("offsets/weights/fold given, but missing training_frame")
res=fr[c]
kwargs[name] = None if res is None else res.col_names[0]
if res is not None and kwargs["validation_x"] is not None and kwargs["validation_frame"] is None: # validation frame must have any offsets, weights, folds, etc.
raise ValueError("offsets/weights/fold given, but missing validation_frame")
return res
def _check_frame(x,y,response): # y and response are only ever different for validation
if x is None: return None
x._eager()
if y is not None:
y._eager()
response._eager()
x[response._col_names[0]] = y
return x
def _check_col(x,vx,vfr,col):
x=_check_frame(x,col,col)
vx= None if vfr is None else _check_frame(vx,vfr[col.names[0]],vfr[col.names[0]])
return x,vx
def _model_build(x,y,vx,vy,algo,offsets,weights,fold_column,kwargs):
if x is None: raise ValueError("Missing features")
x =_check_frame(x,y,y)
vx=_check_frame(vx,vy,y)
if offsets is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],offsets)
if weights is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],weights)
if fold_column is not None: x,vx=_check_col(x,vx,kwargs["validation_frame"],fold_column)
kwargs['training_frame']=x._id
if vx is not None: kwargs['validation_frame']=vx._id
if y is not None: kwargs['response_column']=y._col_names[0]
kwargs = dict([(k, kwargs[k]._frame()._id if isinstance(kwargs[k], H2OFrame) else kwargs[k]) for k in kwargs if kwargs[k] is not None])
do_future = kwargs.pop("do_future") if "do_future" in kwargs else False
future_model = H2OModelFuture(H2OJob(H2OConnection.post_json("ModelBuilders/"+algo, **kwargs), job_type=(algo+" Model Build")), x)
return future_model if do_future else _resolve_model(future_model, **kwargs)
def _resolve_model(future_model, **kwargs):
future_model.poll()
if '_rest_version' in kwargs.keys(): model_json = H2OConnection.get_json("Models/"+future_model.job.dest_key, _rest_version=kwargs['_rest_version'])["models"][0]
else: model_json = H2OConnection.get_json("Models/"+future_model.job.dest_key)["models"][0]
model_type = model_json["output"]["model_category"]
if model_type=="Binomial": model = H2OBinomialModel( future_model.job.dest_key,model_json)
elif model_type=="Clustering": model = H2OClusteringModel( future_model.job.dest_key,model_json)
elif model_type=="Regression": model = H2ORegressionModel( future_model.job.dest_key,model_json)
elif model_type=="Multinomial": model = H2OMultinomialModel( future_model.job.dest_key,model_json)
elif model_type=="AutoEncoder": model = H2OAutoEncoderModel( future_model.job.dest_key,model_json)
elif model_type=="DimReduction": model = H2ODimReductionModel(future_model.job.dest_key,model_json)
else: raise NotImplementedError(model_type)
return model | unknown | codeparrot/codeparrot-clean | ||
"""
This plugin bypasses the actual execution of tests, and instead just collects
test names. Fixtures are also bypassed, so running nosetests with the
collection plugin enabled should be very quick.
This plugin is useful in combination with the testid plugin (``--with-id``).
Run both together to get an indexed list of all tests, which will enable you to
run individual tests by index number.
This plugin is also useful for counting tests in a test suite, and making
people watching your demo think all of your tests pass.
"""
from nose.plugins.base import Plugin
from nose.case import Test
import logging
import unittest
log = logging.getLogger(__name__)
class CollectOnly(Plugin):
"""
Collect and output test names only, don't run any tests.
"""
name = "collect-only"
enableOpt = 'collect_only'
def options(self, parser, env):
"""Register commandline options.
"""
parser.add_option('--collect-only',
action='store_true',
dest=self.enableOpt,
default=env.get('NOSE_COLLECT_ONLY'),
help="Enable collect-only: %s [COLLECT_ONLY]" %
(self.help()))
def prepareTestLoader(self, loader):
"""Install collect-only suite class in TestLoader.
"""
# Disable context awareness
log.debug("Preparing test loader")
loader.suiteClass = TestSuiteFactory(self.conf)
def prepareTestCase(self, test):
"""Replace actual test with dummy that always passes.
"""
# Return something that always passes
log.debug("Preparing test case %s", test)
if not isinstance(test, Test):
return
def run(result):
# We need to make these plugin calls because there won't be
# a result proxy, due to using a stripped-down test suite
self.conf.plugins.startTest(test)
result.startTest(test)
self.conf.plugins.addSuccess(test)
result.addSuccess(test)
self.conf.plugins.stopTest(test)
result.stopTest(test)
return run
class TestSuiteFactory:
"""
Factory for producing configured test suites.
"""
def __init__(self, conf):
self.conf = conf
def __call__(self, tests=(), **kw):
return TestSuite(tests, conf=self.conf)
class TestSuite(unittest.TestSuite):
"""
Basic test suite that bypasses most proxy and plugin calls, but does
wrap tests in a nose.case.Test so prepareTestCase will be called.
"""
def __init__(self, tests=(), conf=None):
self.conf = conf
# Exec lazy suites: makes discovery depth-first
if callable(tests):
tests = tests()
log.debug("TestSuite(%r)", tests)
unittest.TestSuite.__init__(self, tests)
def addTest(self, test):
log.debug("Add test %s", test)
if isinstance(test, unittest.TestSuite):
self._tests.append(test)
else:
self._tests.append(Test(test, config=self.conf)) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import, division, print_function
from datetime import datetime
from rfc822 import parsedate_tz, mktime_tz
from urlparse import urlparse
from .base import Vcs, RevisionResult, BufferParser
LOG_FORMAT = '{node}\x01{author}\x01{date|rfc822date}\x01{p1node} {p2node}\x01{branches}\x01{desc}\x02'
class MercurialVcs(Vcs):
binary_path = 'hg'
def get_default_env(self):
return {
'HGPLAIN': '1',
}
def get_default_revision(self):
return 'default'
@property
def remote_url(self):
if self.url.startswith(('ssh:', 'http:', 'https:')):
parsed = urlparse(self.url)
url = '%s://%s@%s/%s' % (
parsed.scheme,
parsed.username or self.username or 'hg',
parsed.hostname + (':%s' % (parsed.port,) if parsed.port else ''),
parsed.path.lstrip('/'),
)
else:
url = self.url
return url
def run(self, cmd, **kwargs):
cmd = [
self.binary_path,
'--config',
'ui.ssh={0}'.format(self.ssh_connect_path)
] + cmd
return super(MercurialVcs, self).run(cmd, **kwargs)
def clone(self):
self.run(['clone', '--uncompressed', self.remote_url, self.path])
def update(self):
self.run(['pull'])
def log(self, parent=None, offset=0, limit=100):
# TODO(dcramer): we should make this streaming
cmd = ['log', '--template=%s' % (LOG_FORMAT,)]
if parent:
cmd.append('-r reverse(ancestors(%s))' % (parent,))
if limit:
cmd.append('--limit=%d' % (offset + limit,))
result = self.run(cmd)
for idx, chunk in enumerate(BufferParser(result, '\x02')):
if idx < offset:
continue
(sha, author, author_date, parents, branches, message) = chunk.split('\x01')
branches = filter(bool, branches.split(' ')) or ['default']
parents = filter(lambda x: x and x != '0' * 40, parents.split(' '))
author_date = datetime.utcfromtimestamp(
mktime_tz(parsedate_tz(author_date)))
yield RevisionResult(
id=sha,
author=author,
author_date=author_date,
message=message,
parents=parents,
branches=branches,
)
def export(self, id):
cmd = ['diff', '-g', '-c %s' % (id,)]
result = self.run(cmd)
return result
def is_child_parent(self, child_in_question, parent_in_question):
cmd = ['debugancestor', parent_in_question, child_in_question]
result = self.run(cmd)
return parent_in_question in result | unknown | codeparrot/codeparrot-clean | ||
// compile
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test function signatures.
// Compiled but not run.
package main
type t1 int
type t2 int
type t3 int
func f1(t1, t2, t3)
func f2(t1, t2, t3 bool)
func f3(t1, t2, x t3)
func f4(t1, *t3)
func (x *t1) f5(y []t2) (t1, *t3)
func f6() (int, *string)
func f7(*t2, t3)
func f8(os int) int
func f9(os int) int {
return os
}
func f10(err error) error {
return err
}
func f11(t1 string) string {
return t1
} | go | github | https://github.com/golang/go | test/func2.go |
# Necessary imports. Provides library functions to ease writing tests.
from lib import prebuild, testcase, SUBMITTY_TUTORIAL_DIR
import subprocess
import os
import glob
import shutil
import traceback
############################################################################
# COPY THE ASSIGNMENT FROM THE SAMPLE ASSIGNMENTS DIRECTORIES
SAMPLE_ASSIGNMENT_CONFIG = SUBMITTY_TUTORIAL_DIR + "/examples/11_resources/config"
SAMPLE_SUBMISSIONS = SUBMITTY_TUTORIAL_DIR + "/examples/11_resources/submissions/"
@prebuild
def initialize(test):
try:
os.mkdir(os.path.join(test.testcase_path, "assignment_config"))
except OSError:
pass
try:
data_path = os.path.join(test.testcase_path, "data")
if os.path.isdir(data_path):
shutil.rmtree(data_path)
os.mkdir(data_path)
except OSError:
pass
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "config.json"),
os.path.join(test.testcase_path, "assignment_config")])
############################################################################
def cleanup(test):
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data", "*cpp")))
subprocess.call(["rm"] + ["-rf"] +
glob.glob(os.path.join(test.testcase_path, "data", "test*")))
subprocess.call(["rm"] + ["-f"] +
glob.glob(os.path.join(test.testcase_path, "data", "results*")))
os.mkdir(os.path.join(test.testcase_path ,"data" ,"test_output"))
subprocess.call(["cp",
os.path.join(SAMPLE_ASSIGNMENT_CONFIG, "test_output", "simple_out.txt"),
os.path.join(test.testcase_path, "data", "test_output")])
@testcase
def schema_validation(test):
cleanup(test)
config_path = os.path.join(test.testcase_path, 'assignment_config', 'complete_config.json')
try:
test.validate_complete_config(config_path)
except Exception:
traceback.print_exc()
raise
@testcase
def solution(test):
cleanup(test)
subprocess.call(["cp",os.path.join(SAMPLE_SUBMISSIONS, "solution.cpp"),
os.path.join(test.testcase_path, "data")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt","solution_grade.txt","-b")
test.json_diff("results.json","solution_results.json")
test.diff("test02/STDOUT.txt","data/test_output/simple_out.txt")
test.empty_file("test02/STDERR.txt")
test.empty_file("test02/execute_logfile.txt")
@testcase
def buggy(test):
cleanup(test)
subprocess.call(["cp",os.path.join(SAMPLE_SUBMISSIONS, "buggy.cpp"),
os.path.join(test.testcase_path, "data")])
test.run_compile()
test.run_run()
test.run_validator()
test.diff("grade.txt","buggy_grade.txt","-b")
test.json_diff("results.json","buggy_results.json")
test.empty_file("test02/STDOUT.txt")
test.empty_file("test02/STDERR.txt")
test.diff("test02/execute_logfile.txt","buggy_test02_execute_logfile.txt") | unknown | codeparrot/codeparrot-clean | ||
// This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
use crate::LanguageIdentifier;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
impl Serialize for LanguageIdentifier {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
impl<'de> Deserialize<'de> for LanguageIdentifier {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct LanguageIdentifierVisitor;
impl serde::de::Visitor<'_> for LanguageIdentifierVisitor {
type Value = LanguageIdentifier;
fn expecting(&self, formatter: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(formatter, "a valid Unicode Language Identifier")
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
s.parse::<LanguageIdentifier>()
.map_err(serde::de::Error::custom)
}
}
deserializer.deserialize_string(LanguageIdentifierVisitor)
}
}
#[test]
fn json() {
use crate::langid;
use crate::subtags::{Language, Region, Script};
assert_eq!(
serde_json::to_string(&langid!("en-US")).unwrap(),
r#""en-US""#
);
assert_eq!(
serde_json::from_str::<LanguageIdentifier>(r#""en-US""#).unwrap(),
langid!("en-US")
);
assert!(serde_json::from_str::<LanguageIdentifier>(r#""2Xs""#).is_err());
assert_eq!(
serde_json::to_string(&"fr".parse::<Language>().unwrap()).unwrap(),
r#""fr""#
);
assert_eq!(
serde_json::from_str::<Language>(r#""fr""#).unwrap(),
"fr".parse::<Language>().unwrap()
);
assert!(serde_json::from_str::<Language>(r#""2Xs""#).is_err());
assert_eq!(
serde_json::to_string(&"Latn".parse::<Script>().unwrap()).unwrap(),
r#""Latn""#
);
assert_eq!(
serde_json::from_str::<Script>(r#""Latn""#).unwrap(),
"Latn".parse::<Script>().unwrap()
);
assert!(serde_json::from_str::<Script>(r#""2Xs""#).is_err());
assert_eq!(
serde_json::to_string(&"US".parse::<Region>().unwrap()).unwrap(),
r#""US""#
);
assert_eq!(
serde_json::from_str::<Region>(r#""US""#).unwrap(),
"US".parse::<Region>().unwrap()
);
assert!(serde_json::from_str::<Region>(r#""2Xs""#).is_err());
}
#[test]
fn postcard() {
use crate::langid;
use crate::subtags::{Language, Region, Script};
assert_eq!(
postcard::to_stdvec(&langid!("en-US")).unwrap(),
&[5, b'e', b'n', b'-', b'U', b'S']
);
assert_eq!(
postcard::from_bytes::<LanguageIdentifier>(&[5, b'e', b'n', b'-', b'U', b'S']).unwrap(),
langid!("en-US")
);
assert!(postcard::from_bytes::<LanguageIdentifier>(&[3, b'2', b'X', b's']).is_err());
assert_eq!(
postcard::to_stdvec(&"fr".parse::<Language>().unwrap()).unwrap(),
b"fr\0"
);
assert_eq!(
postcard::from_bytes::<Language>(b"fr\0").unwrap(),
"fr".parse::<Language>().unwrap()
);
assert!(postcard::from_bytes::<Language>(b"2Xs").is_err());
assert_eq!(
postcard::to_stdvec(&"Latn".parse::<Script>().unwrap()).unwrap(),
b"Latn"
);
assert_eq!(
postcard::from_bytes::<Script>(b"Latn").unwrap(),
"Latn".parse::<Script>().unwrap()
);
assert!(postcard::from_bytes::<Script>(b"2Xss").is_err());
assert_eq!(
postcard::to_stdvec(&"US".parse::<Region>().unwrap()).unwrap(),
b"US\0"
);
assert_eq!(
postcard::from_bytes::<Region>(b"US\0").unwrap(),
"US".parse::<Region>().unwrap()
);
assert!(postcard::from_bytes::<Region>(b"2Xs").is_err());
} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/icu_locale_core/src/serde.rs |
"""
spatial lag operations
"""
__authors__ = "Serge Rey <srey@asu.edu>, David C. Folch <david.folch@asu.edu>"
__all__ = ['lag_spatial']
def lag_spatial(w, y):
"""
Spatial lag operator. If w is row standardized, returns the average of
each observation's neighbors; if not, returns the weighted sum of each
observation's neighbors.
Parameters
----------
w : W
weights object
y : array
numpy array with dimensionality conforming to w (see examples)
Returns
-------
wy : array
array of numeric values for the spatial lag
Examples
--------
>>> import pysal
>>> import numpy as np
Setup a 9x9 binary spatial weights matrix and vector of data; compute the
spatial lag of the vector.
>>> w = pysal.lat2W(3, 3)
>>> y = np.arange(9)
>>> yl = pysal.lag_spatial(w, y)
>>> yl
array([ 4., 6., 6., 10., 16., 14., 10., 18., 12.])
Row standardize the weights matrix and recompute the spatial lag
>>> w.transform = 'r'
>>> yl = pysal.lag_spatial(w, y)
>>> yl
array([ 2. , 2. , 3. , 3.33333333, 4. ,
4.66666667, 5. , 6. , 6. ])
Explicitly define data vector as 9x1 and recompute the spatial lag
>>> y.shape = (9, 1)
>>> yl = pysal.lag_spatial(w, y)
>>> yl
array([[ 2. ],
[ 2. ],
[ 3. ],
[ 3.33333333],
[ 4. ],
[ 4.66666667],
[ 5. ],
[ 6. ],
[ 6. ]])
Take the spatial lag of a 9x2 data matrix
>>> yr = np.arange(8, -1, -1)
>>> yr.shape = (9, 1)
>>> x = np.hstack((y, yr))
>>> yl = pysal.lag_spatial(w, x)
>>> yl
array([[ 2. , 6. ],
[ 2. , 6. ],
[ 3. , 5. ],
[ 3.33333333, 4.66666667],
[ 4. , 4. ],
[ 4.66666667, 3.33333333],
[ 5. , 3. ],
[ 6. , 2. ],
[ 6. , 2. ]])
"""
return w.sparse * y | unknown | codeparrot/codeparrot-clean | ||
import re
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.functional import cached_property
test_srs = ({
'srid': 4326,
'auth_name': ('EPSG', True),
'auth_srid': 4326,
# Only the beginning, because there are differences depending on installed libs
'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"',
# +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8
'proj_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ?',
'spheroid': 'WGS 84', 'name': 'WGS 84',
'geographic': True, 'projected': False, 'spatialite': True,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.3, 298.257223563),
'eprec': (1, 1, 9),
'wkt': re.sub(r'[\s+]', '', """
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
""")
}, {
'srid': 32140,
'auth_name': ('EPSG', False),
'auth_srid': 32140,
'srtext': (
'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"'
),
'proj_re': r'\+proj=lcc (\+lat_1=30.28333333333333? |\+lat_2=28.38333333333333? |\+lat_0=27.83333333333333? |'
r'\+lon_0=-99 ){4}\+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?'
r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ?',
'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central',
'geographic': False, 'projected': True, 'spatialite': False,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.31414, 298.257222101),
'eprec': (1, 5, 10),
})
@skipUnlessDBFeature("has_spatialrefsys_table")
class SpatialRefSysTest(TestCase):
@cached_property
def SpatialRefSys(self):
return connection.ops.connection.ops.spatial_ref_sys()
def test_get_units(self):
epsg_4326 = next(f for f in test_srs if f['srid'] == 4326)
unit, unit_name = self.SpatialRefSys().get_units(epsg_4326['wkt'])
self.assertEqual(unit_name, 'degree')
self.assertAlmostEqual(unit, 0.01745329251994328)
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = self.SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
# Compare case-insensitively because srs.auth_name is lowercase
# ("epsg") on Spatialite.
if not connection.ops.oracle or oracle_flag:
self.assertIs(srs.auth_name.upper().startswith(auth_name), True)
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No PROJ and different srtext on Oracle.
if not connection.ops.oracle:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
self.assertRegex(srs.proj4text, sd['proj_re'])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = self.SpatialRefSys.objects.get(srid=sd['srid'])
self.assertTrue(sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
self.assertIs(sr.name.startswith(sd['name']), True)
# Testing the SpatialReference object directly.
if not connection.ops.oracle:
srs = sr.srs
self.assertRegex(srs.proj, sd['proj_re'])
self.assertTrue(srs.wkt.startswith(sd['srtext']))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = self.SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature('supports_add_srs_entry')
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(
self.SpatialRefSys.objects.filter(srid=3857).exists()
)
srs = self.SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
self.SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[')
) | unknown | codeparrot/codeparrot-clean | ||
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.13 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "v"
parentdir_prefix = "kinderstadt-"
versionfile_source = "kinderstadt/_version.py"
import errno
import os
import re
import subprocess
import sys
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full": keywords["full"].strip()}
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return {"version": keywords["full"].strip(),
"full": keywords["full"].strip()}
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = {"refnames": git_refnames, "full": git_full}
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split('/'))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class InstructionsOperations:
"""InstructionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.billing.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_billing_profile(
self,
billing_account_name: str,
billing_profile_name: str,
**kwargs
) -> AsyncIterable["_models.InstructionListResult"]:
"""Lists the instructions by billing profile id.
:param billing_account_name: The ID that uniquely identifies a billing account.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile.
:type billing_profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InstructionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.billing.models.InstructionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InstructionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_billing_profile.metadata['url'] # type: ignore
path_format_arguments = {
'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'),
'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InstructionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_billing_profile.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/instructions'} # type: ignore
async def get(
self,
billing_account_name: str,
billing_profile_name: str,
instruction_name: str,
**kwargs
) -> "_models.Instruction":
"""Get the instruction by name. These are custom billing instructions and are only applicable for
certain customers.
:param billing_account_name: The ID that uniquely identifies a billing account.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile.
:type billing_profile_name: str
:param instruction_name: Instruction Name.
:type instruction_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Instruction, or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.Instruction
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Instruction"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'),
'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str'),
'instructionName': self._serialize.url("instruction_name", instruction_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Instruction', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/instructions/{instructionName}'} # type: ignore
async def put(
self,
billing_account_name: str,
billing_profile_name: str,
instruction_name: str,
parameters: "_models.Instruction",
**kwargs
) -> "_models.Instruction":
"""Creates or updates an instruction. These are custom billing instructions and are only
applicable for certain customers.
:param billing_account_name: The ID that uniquely identifies a billing account.
:type billing_account_name: str
:param billing_profile_name: The ID that uniquely identifies a billing profile.
:type billing_profile_name: str
:param instruction_name: Instruction Name.
:type instruction_name: str
:param parameters: The new instruction.
:type parameters: ~azure.mgmt.billing.models.Instruction
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Instruction, or the result of cls(response)
:rtype: ~azure.mgmt.billing.models.Instruction
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Instruction"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.put.metadata['url'] # type: ignore
path_format_arguments = {
'billingAccountName': self._serialize.url("billing_account_name", billing_account_name, 'str'),
'billingProfileName': self._serialize.url("billing_profile_name", billing_profile_name, 'str'),
'instructionName': self._serialize.url("instruction_name", instruction_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Instruction')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Instruction', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountName}/billingProfiles/{billingProfileName}/instructions/{instructionName}'} # type: ignore | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .analysis import (
create_report_for_web_service_scan,
update_web_service_report_for_organization,
update_latest_web_service_reports_for_organization,
)
from .base import (
apply_flag_to_web_service_scan,
apply_flags_to_web_service_scan,
inspect_http_service,
inspect_https_service,
publish_report_for_web_service_scan,
retrieve_landing_resource_for_web_service,
scan_web_service,
update_web_service_scan_completed,
update_web_service_scan_elasticsearch,
update_web_service_scanning_status,
)
from .crawling import (
crawl_web_service,
)
from .fingerprinting import (
enumerate_user_agent_fingerprints_for_web_service,
get_user_agent_fingerprint_for_web_service,
)
from .imaging import (
screenshot_web_service,
screenshot_web_service_url,
)
from .virtualhost import (
assess_virtual_host_fingerprints,
discover_virtual_hosts_for_web_service,
fingerprint_virtual_host,
fingerprint_virtual_hosts,
) | unknown | codeparrot/codeparrot-clean | ||
'''
Save spectrum info to a data structure, so it's easy to retrieve spect for a special time
'''
import pymzml
class SpecBasic:
def __init__(self, rt, index, mslevel = 1):
# here the program assume the retention time is a float number
self._rt = float(rt)
self._idx = index
mslevel = 1
self._spec = dict()
@property
def rtime(self):
return self._rt
@property
def index(self):
return self._idx
@property
def spec(self):
return self._spec
@spec.setter
def spec(self, spec):
self._spec = spec
def __str__(self):
return "retention time: %s, index: %s" %(self._rt, self._idx)
class SpecDict(dict):
def __init__(self):
self._dict = dict()
def __getitem__(self, time):
if float(time).is_integer():
return self._dict[int(time)]
else:
matchlist = []
dec_len = len(str(time).split(".")[-1])
for specbasic in self._dict[int(time)]:
if (specbasic.rtime - time) * 10 ** dec_len // 1 == 0:
matchlist.append(specbasic)
return matchlist
def __setitem__(self, time, specbasic):
if not isinstance(specbasic, SpecBasic):
raise Exception("SpectDict only accept SpecBasic")
try:
self._dict[int(time)].append(specbasic)
except:
self._dict[int(time)] = [specbasic]
def __str__(self):
return "number of spec: %s" % (len(self._dict.keys()))
def getRange(self, s_time, f_time):
matchlist = []
for int_time in range(int(s_time), int(f_time) + 1):
if not int_time in self._dict:
continue
for specbasic in self._dict[int_time]:
if specbasic.rtime > s_time and specbasic.rtime < f_time:
matchlist.append(specbasic)
return matchlist
class ExtractSpec:
def __init__(self, filename):
self.start_time = 0
self.end_time = 0
self.interval = 0
self.run, self.specdict = self.setup(filename)
#print self.specdict
def setup(self, filename):
# set up basic data structure
run = pymzml.run.Reader(filename)
specdict = SpecDict()
#self.start_time = run[1]["scan time"]
#self.interval = run[2]["scan time"] - run[1]["scan time"]
# first spec flag
flag_f = 1
for spectrum in run:
if spectrum['ms level'] == 2:
if flag_f:
self.start_time = spectrum["scan time"]
flag_f = 0
#print max(spectrum.i), min(spectrum.i)
#print spectrum['scan time']
specbasic = SpecBasic(spectrum['scan time'], spectrum['id'])
specbasic.spec["peaks"] = spectrum.peaks
specbasic.spec["scan time"] = spectrum['scan time']
specbasic.spec["id"] = spectrum['id']
specdict[spectrum['scan time']] = specbasic
if spectrum["scan time"] > self.end_time:
self.interval = spectrum["scan time"] - self.end_time
self.end_time = spectrum["scan time"]
return run, specdict
def extractWithTime(self, time):
return self.specdict[time]
def extractWithTimeRange(self, s_time, f_time):
return self.specdict.getRange(s_time, f_time)
def ExtractIonChrom(run):
timeDependentIntensities = []
#MASS_2_FOLLOW = 1402
MASS_2_FOLLOW = 1403
print "mass 2 follow:", MASS_2_FOLLOW
for spectrum in run:
print spectrum['id']
#for spectrum in run:
# if spectrum['ms level'] == 1:
# matchList = spectrum.hasPeak(MASS_2_FOLLOW)
# if matchList != []:
# print "matched"
# for mz,I in matchList:
# if I > 100:
# timeDependentIntensities.append( [ spectrum['scan time'], I , mz ])
for rt, i, mz in timeDependentIntensities:
print('{0:5.3f} {1:13.4f} {2:10}'.format( rt, i, mz ))
def ExtractTest():
exspec = ExtractSpec("./4tRNA1_102009.mzML")
# extract spectrums for specific time
print "for time 1"
specs = exspec.extractWithTime(1)
for spec in specs:
print spec
print "for time 1.1"
specs = exspec.extractWithTime(1.1)
for spec in specs:
print spec
print "for time 1.1 to 1.5"
specs = exspec.extractWithTimeRange(1.1, 1.5)
for spec in specs:
print spec
if __name__ == "__main__":
ExtractTest() | unknown | codeparrot/codeparrot-clean | ||
from flask_testing import TestCase
import testing.postgresql
from orcid_service.models import Base
class TestCaseDatabase(TestCase):
"""
Base test class for when databases are being used.
"""
postgresql_url_dict = {
'port': 1234,
'host': '127.0.0.1',
'user': 'postgres',
'database': 'test'
}
postgresql_url = 'postgresql://{user}@{host}:{port}/{database}' \
.format(
user=postgresql_url_dict['user'],
host=postgresql_url_dict['host'],
port=postgresql_url_dict['port'],
database=postgresql_url_dict['database']
)
def create_app(self):
'''Start the wsgi application'''
from orcid_service import app
a = app.create_app(**{
'SQLALCHEMY_DATABASE_URI': self.postgresql_url,
'SQLALCHEMY_ECHO': False,
'TESTING': True,
'PROPAGATE_EXCEPTIONS': True,
'TRAP_BAD_REQUEST_ERRORS': True
})
return a
@classmethod
def setUpClass(cls):
cls.postgresql = \
testing.postgresql.Postgresql(**cls.postgresql_url_dict)
@classmethod
def tearDownClass(cls):
cls.postgresql.stop()
def setUp(self):
Base.metadata.create_all(bind=self.app.db.engine)
def tearDown(self):
self.app.db.session.remove()
self.app.db.drop_all() | unknown | codeparrot/codeparrot-clean | ||
"""
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
cut_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import bisect
from collections import deque
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
'median': 4, 'ward': 5, 'weighted': 6}
_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'cut_tree', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
class ClusterWarning(UserWarning):
pass
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the `linkage` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the `linkage` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the input matrix,
return structure, and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See `linkage`
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the `linkage` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See `linkage`
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
scipy.spatial.distance.pdist : pairwise distance metrics
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering.
The input y may be either a 1d compressed distance matrix
or a 2d array of observation vectors.
If y is a 1d compressed distance matrix,
then y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array. All elements of `y` must be finite,
i.e. no NaNs or infs.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``pdist``
function for a list of valid distance metrics. A custom distance
function can also be used.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
Notes
-----
1. For method 'single' an optimized algorithm based on minimum spanning
tree is implemented. It has time complexity :math:`O(n^2)`.
For methods 'complete', 'average', 'weighted' and 'ward' an algorithm
called nearest-neighbors chain is implemented. It also has time
complexity :math:`O(n^2)`.
For other methods a naive algorithm is implemented with :math:`O(n^3)`
time complexity.
All algorithms use :math:`O(n^2)` memory.
Refer to [1]_ for details about the algorithms.
2. Methods 'centroid', 'median' and 'ward' are correctly defined only if
Euclidean pairwise metric is used. If `y` is passed as precomputed
pairwise distances, then it is a user responsibility to assure that
these distances are in fact Euclidean, otherwise the produced result
will be incorrect.
See Also
--------
scipy.spatial.distance.pdist : pairwise distance metrics
References
----------
.. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
algorithms", :arXiv:`1109.2378v1`.
"""
if method not in _LINKAGE_METHODS:
raise ValueError("Invalid method: {0}".format(method))
y = _convert_to_double(np.asarray(y, order='c'))
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
[y] = _copy_arrays_if_base_present([y])
elif y.ndim == 2:
if method in _EUCLIDEAN_METHODS and metric != 'euclidean':
raise ValueError("Method '{0}' requires the distance metric "
"to be Euclidean".format(method))
if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0):
if np.all(y >= 0) and np.allclose(y, y.T):
_warning('The symmetric non-negative hollow observation '
'matrix looks suspiciously like an uncondensed '
'distance matrix')
y = distance.pdist(y, metric)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
n = int(distance.num_obs_y(y))
method_code = _LINKAGE_METHODS[method]
if method == 'single':
return _hierarchy.mst_single_linkage(y, n)
elif method in ['complete', 'average', 'weighted', 'ward']:
return _hierarchy.nn_chain(y, n, method_code)
else:
return _hierarchy.linkage(y, n, method_code)
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The `to_tree` function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
All parameter names are also attributes.
Parameters
----------
id : int
The node id.
left : ClusterNode instance, optional
The left child tree node.
right : ClusterNode instance, optional
The right child tree node.
dist : float, optional
Distance for this cluster in the linkage matrix.
count : int, optional
The number of samples in this cluster.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def __lt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist < node.dist
def __gt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist > node.dist
def __eq__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist == node.dist
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the ``i``-th leaf node in the pre-order traversal ``n[i]``, the
result of ``func(n[i])`` is stored in ``L[i]``. If not provided,
the index of the original observation to which the node
corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def _order_cluster_tree(Z):
"""
Returns clustering nodes in bottom-up order by distance.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
Returns
-------
nodes : list
A list of ClusterNode objects.
"""
q = deque()
tree = to_tree(Z)
q.append(tree)
nodes = []
while q:
node = q.popleft()
if not node.is_leaf():
bisect.insort_left(nodes, node)
q.append(node.get_right())
q.append(node.get_left())
return nodes
def cut_tree(Z, n_clusters=None, height=None):
"""
Given a linkage matrix Z, return the cut tree.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
n_clusters : array_like, optional
Number of clusters in the tree at the cut point.
height : array_like, optional
The height at which to cut the tree. Only possible for ultrametric
trees.
Returns
-------
cutree : array
An array indicating group membership at each agglomeration step. I.e.,
for a full cut tree, in the first column each data point is in its own
cluster. At the next step, two nodes are merged. Finally all singleton
and non-singleton clusters are in one group. If `n_clusters` or
`height` is given, the columns correspond to the columns of `n_clusters` or
`height`.
Examples
--------
>>> from scipy import cluster
>>> np.random.seed(23)
>>> X = np.random.randn(50, 4)
>>> Z = cluster.hierarchy.ward(X)
>>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
>>> cutree[:10]
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[3, 4],
[2, 2],
[0, 0],
[1, 5],
[3, 6],
[4, 7]])
"""
nobs = num_obs_linkage(Z)
nodes = _order_cluster_tree(Z)
if height is not None and n_clusters is not None:
raise ValueError("At least one of either height or n_clusters "
"must be None")
elif height is None and n_clusters is None: # return the full cut tree
cols_idx = np.arange(nobs)
elif height is not None:
heights = np.array([x.dist for x in nodes])
cols_idx = np.searchsorted(heights, height)
else:
cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)
try:
n_cols = len(cols_idx)
except TypeError: # scalar
n_cols = 1
cols_idx = np.array([cols_idx])
groups = np.zeros((n_cols, nobs), dtype=int)
last_group = np.arange(nobs)
if 0 in cols_idx:
groups[0] = last_group
for i, node in enumerate(nodes):
idx = node.pre_order()
this_group = last_group.copy()
this_group[idx] = last_group[idx].min()
this_group[this_group > last_group[idx].max()] -= 1
if i + 1 in cols_idx:
groups[np.where(i + 1 == cols_idx)[0]] = this_group
last_group = this_group
return groups.T
def to_tree(Z, rd=False):
"""
Converts a linkage matrix into an easy-to-use tree object.
The reference to the root `ClusterNode` object is returned (by default).
Each `ClusterNode` object has a ``left``, ``right``, ``dist``, ``id``,
and ``count`` attribute. The left and right attributes point to
ClusterNode objects that were combined to generate the cluster.
If both are None then the `ClusterNode` object is a leaf node, its count
must be 1, and its distance is meaningless but set to 0.
*Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.*
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the `linkage`
function documentation).
rd : bool, optional
When False (default), a reference to the root `ClusterNode` object is
returned. Otherwise, a tuple ``(r, d)`` is returned. ``r`` is a
reference to the root node while ``d`` is a list of `ClusterNode`
objects - one per original entry in the linkage matrix plus entries
for all clustering steps. If a cluster id is
less than the number of samples ``n`` in the data that the linkage
matrix describes, then it corresponds to a singleton cluster (leaf
node).
See `linkage` for more information on the assignment of cluster ids
to clusters.
Returns
-------
tree : ClusterNode or tuple (ClusterNode, list of ClusterNode)
If ``rd`` is False, a `ClusterNode`.
If ``rd`` is True, a list of length ``2*n - 1``, with ``n`` the number
of samples. See the description of `rd` above for more details.
See Also
--------
linkage, is_valid_linkage, ClusterNode
Examples
--------
>>> from scipy.cluster import hierarchy
>>> x = np.random.rand(10).reshape(5, 2)
>>> Z = hierarchy.linkage(x)
>>> hierarchy.to_tree(Z)
<scipy.cluster.hierarchy.ClusterNode object at ...
>>> rootnode, nodelist = hierarchy.to_tree(Z, rd=True)
>>> rootnode
<scipy.cluster.hierarchy.ClusterNode object at ...
>>> len(nodelist)
9
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# Number of original objects is equal to the number of rows minus 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see `linkage` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``Y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy**2
denomB = Zz**2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
r"""
Calculates inconsistency statistics on a linkage matrix.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
clustering). See `linkage` documentation for more information on its
form.
d : int, optional
The number of links up to `d` levels below each non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row contains the link
statistics for the non-singleton cluster ``i``. The link statistics are
computed over the link heights for links :math:`d` levels below the
cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is the number
of links included in the calculation; and ``R[i,3]`` is the
inconsistency coefficient,
.. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
Notes
-----
This function behaves similarly to the MATLAB(TM) ``inconsistent``
function.
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the
number of original observations (leaves) in the non-singleton
cluster ``i``.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with ``scipy.cluster.hierarchy``.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by ``scipy.cluster.hierarchy``.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional array (type double)
with :math:`n` rows and 4 columns. The first two columns must contain
indices between 0 and :math:`2n-1`. For a given row ``i``, the following
two expressions have to hold:
.. math::
0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
0 \\leq Z[i,1] \\leq i+n-1
I.e. a cluster cannot join another cluster unless the cluster being joined
has been generated.
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the given linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do::
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length ``n``. ``T[i]`` is the flat cluster number to
which original observation ``i`` belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array ``T`` of length ``n`` is returned. ``T[i]`` is
the index of the flat cluster to which the original observation ``i``
belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
``distance.pdist`` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
See Also
--------
scipy.spatial.distance.pdist : pairwise distance metrics
Notes
-----
This function is similar to the MATLAB function ``clusterdata``.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See `linkage` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot "
"the dendrogram. Use no_plot=True to calculate the "
"dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Dependent variable plot height
dvw = mh + mh * 0.05
iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation in ('top', 'bottom'):
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
else:
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(iv_ticks)
if orientation == 'top':
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = float(_get_tick_rotation(len(ivl))) if (
leaf_rotation is None) else leaf_rotation
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation in ('left', 'right'):
if orientation == 'left':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
else:
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(iv_ticks)
if orientation == 'left':
ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
# Let's use collections instead. This way there is a separate legend item
# for each tree grouping, rather than stupidly one for each line segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there's a grouping of links above the color threshold, it goes last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
Ellipse = matplotlib.patches.Ellipse
for (x, y) in contraction_marks:
if orientation in ('left', 'right'):
e = Ellipse((y, x), width=dvw / 100, height=1.0)
else:
e = Ellipse((x, y), width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for use by dendrogram.
Note that this palette is global (i.e. setting it once changes the colors
for all subsequent calls to `dendrogram`) and that it affects only the
the colors below ``color_threshold``.
Note that `dendrogram` also accepts a custom coloring function through its
``link_color_func`` keyword, which is more flexible and non-global.
Parameters
----------
palette : list of str or None
A list of matplotlib color codes. The order of the color codes is the
order in which the colors are cycled through when color thresholding in
the dendrogram.
If ``None``, resets the palette to its default (which is
``['g', 'r', 'c', 'm', 'y', 'k']``).
Returns
-------
None
See Also
--------
dendrogram
Notes
-----
Ability to reset the palette with ``None`` added in Scipy 0.17.0.
Examples
--------
>>> from scipy.cluster import hierarchy
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., 400.,
... 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['g', 'b', 'b', 'b', 'b']
>>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['c', 'b', 'b', 'b', 'b']
>>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
... above_threshold_color='k')
>>> dn['color_list']
['c', 'm', 'm', 'k', 'k']
Now reset the color palette to its default:
>>> hierarchy.set_link_color_palette(None)
"""
if palette is None:
# reset to its default
palette = ['g', 'r', 'c', 'm', 'y', 'k']
elif type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, leaf_font_size=None,
leaf_rotation=None, leaf_label_func=None,
show_contracted=False, link_color_func=None, ax=None,
above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
See Also
--------
linkage, set_link_color_palette
Examples
--------
>>> from scipy.cluster import hierarchy
>>> import matplotlib.pyplot as plt
A very basic example:
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> plt.figure()
>>> dn = hierarchy.dendrogram(Z)
Now plot in given axes, improve the color scheme and use both vertical and
horizontal orientations:
>>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
>>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
>>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
... orientation='top')
>>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], above_threshold_color='#bcbddc',
... orientation='right')
>>> hierarchy.set_link_color_palette(None) # reset to default after use
>>> plt.show()
"""
# This feature was thought about but never implemented (still useful?):
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
ivl = [] # list of leaves
if color_threshold is None or (isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
# Empty list will be filled in _dendrogram_calculate_info
contraction_marks = [] if show_contracted else None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2*n - 2,
iv=0.0,
ivl=ivl,
n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list,
lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# its label is either the empty string or the number of original
# observations belonging to cluster i.
if 2 * n - p > i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
`linkage` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
`linkage` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M) | unknown | codeparrot/codeparrot-clean | ||
//go:build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package preflight
import (
"golang.org/x/sys/windows"
utilsexec "k8s.io/utils/exec"
"k8s.io/kubernetes/cmd/kubeadm/app/util/errors"
)
// Check validates if a user has elevated (administrator) privileges.
func (ipuc IsPrivilegedUserCheck) Check() (warnings, errorList []error) {
hProcessToken := windows.GetCurrentProcessToken()
if hProcessToken.IsElevated() {
return nil, nil
}
return nil, []error{errors.New("the kubeadm process must be run by a user with elevated privileges")}
}
// Check number of memory required by kubeadm
// No-op for Windows.
func (mc MemCheck) Check() (warnings, errorList []error) {
return nil, nil
}
// addExecChecks adds checks that verify if certain binaries are in PATH.
func addExecChecks(checks []Checker, execer utilsexec.Interface, _ string) []Checker {
// kubeadm requires xcopy to be present in PATH for copying etcd directories.
checks = append(checks, InPathCheck{executable: "xcopy", mandatory: true, exec: execer})
return checks
} | go | github | https://github.com/kubernetes/kubernetes | cmd/kubeadm/app/preflight/checks_windows.go |
"""Implements the Astropy TestRunner which is a thin wrapper around py.test."""
import inspect
import os
import glob
import copy
import shlex
import sys
import tempfile
import warnings
import importlib
from collections import OrderedDict
from importlib.util import find_spec
from functools import wraps
from astropy.config.paths import set_temp_config, set_temp_cache
from astropy.utils import find_current_module
from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning
__all__ = ['TestRunner', 'TestRunnerBase', 'keyword']
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == 'keyword', functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += ' '*8
doc_keywords += func.__doc__.strip()
doc_keywords += '\n\n'
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super().__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError(f"run_tests() got an unexpected keyword argument {keyword}")
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError(f"{keyword} keyword method must return a list")
args += result
return args
RUN_TESTS_DOCSTRING = \
"""
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
{keywords}
"""
_required_dependancies = ['pytest', 'pytest_remotedata', 'pytest_doctestplus']
_missing_dependancy_error = "Test dependencies are missing. You should install the 'pytest-astropy' package."
@classmethod
def _has_test_dependencies(cls): # pragma: no cover
# Using the test runner will not work without these dependencies, but
# pytest-openfiles is optional, so it's not listed here.
for module in cls._required_dependancies:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled
if spec is None or spec.loader is None:
raise RuntimeError(cls._missing_dependancy_error)
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# runnning python setup.py test, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop('add_local_eggs_to_path', False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join('.eggs', '*.egg')):
sys.path.insert(0, egg)
# We now need to force reload pkg_resources in case any pytest
# plugins were added above, so that their entry points are picked up
import pkg_resources
importlib.reload(pkg_resources)
self._has_test_dependencies() # pragma: no cover
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0]))
args = self._generate_args(**kwargs)
if kwargs.get('plugins', None) is not None:
plugins = kwargs.pop('plugins')
elif self.keywords.get('plugins', None) is not None:
plugins = self.keywords['plugins']
else:
plugins = []
# Override the config locations to not make a new directory nor use
# existing cache or config. Note that we need to do this here in
# addition to in conftest.py - for users running tests interactively
# in e.g. IPython, conftest.py would get read in too late, so we need
# to do it here - but at the same time the code here doesn't work when
# running tests in parallel mode because this uses subprocesses which
# don't know about the temporary config/cache.
astropy_config = tempfile.mkdtemp('astropy_config')
astropy_cache = tempfile.mkdtemp('astropy_cache')
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to py.test are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=plugins)
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ('__doc__',))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, '__wrapped__'):
del test.__wrapped__
test.__test__ = False
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests
"""
def packages_path(self, packages, base_path, error=None, warning=None):
"""
Generates the path for multiple packages.
Parameters
----------
packages : str
Comma separated string of packages.
base_path : str
Base path to the source code or documentation.
error : str
Error message to be raised as ``ValueError``. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No error is raised if `None`. (Default: `None`)
warning : str
Warning message to be issued. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No warning is issues if `None`. (Default: `None`)
Returns
-------
paths : list of str
List of stings of existing package paths.
"""
packages = packages.split(",")
paths = []
for package in packages:
path = os.path.join(
base_path, package.replace('.', os.path.sep))
if not os.path.isdir(path):
info = {'name': package, 'path': path}
if error is not None:
raise ValueError(error.format(**info))
if warning is not None:
warnings.warn(warning.format(**info))
else:
paths.append(path)
return paths
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or
'utils'. Accepts comma separated string to specify multiple
packages. If nothing is specified all default tests are run.
"""
if package is None:
self.package_path = [self.base_path]
else:
error_message = ('package to test is not found: {name} '
'(at path {path}).')
self.package_path = self.packages_path(package, self.base_path,
error=error_message)
if not kwargs['test_path']:
return self.package_path
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs['package'], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in ('.rst', ''):
if kwargs['docs_path'] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path "
"specified.")
abs_docs_path = os.path.abspath(kwargs['docs_path'])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path))
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append('--doctest-rst')
test_path = abs_test_path
# Check that the extensions are in the path and not at the end to
# support specifying the name of the test, i.e.
# test_quantity.py::test_unit
if not (os.path.isdir(test_path) or ('.py' in test_path or '.rst' in test_path)):
raise ValueError("Test path must be a directory or a path to "
"a .py or .rst file")
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith('win'))
return []
@keyword(default_value=['astropy.tests.plugins.display'])
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
# Plugins are handled independently by `run_tests` so we define this
# keyword just for the docstring
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ['-v']
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ['failed', 'all']:
return [f'--pastebin={pastebin}']
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value='none')
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = 'any'
elif remote_data is False:
remote_data = 'none'
elif remote_data not in ('none', 'astropy', 'any'):
warnings.warn("The remote_data option should be one of "
"none/astropy/any (found {}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.".format(remote_data),
AstropyDeprecationWarning)
remote_data = 'any'
return [f'--remote-data={remote_data}']
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # pylint: disable=W0611
except ImportError:
raise ImportError('PEP8 checking requires pytest-pep8 plugin: '
'http://pypi.python.org/pypi/pytest-pep8')
else:
return ['--pep8', '-k', 'pep8']
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ['--pdb']
return []
@keyword()
def open_files(self, open_files, kwargs):
"""
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
``psutil`` package.
"""
if open_files:
if kwargs['parallel'] != 0:
raise SystemError(
"open file detection may not be used in conjunction with "
"parallel testing.")
try:
import psutil # pylint: disable=W0611
except ImportError:
raise SystemError(
"open file detection requested, but psutil package "
"is not installed.")
return ['--open-files']
print("Checking for unclosed files")
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int or 'auto', optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is ``'auto'``, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package")
return ['-n', str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
paths = []
if docs_path is not None and not kwargs['skip_docs']:
if kwargs['package'] is not None:
warning_message = ("Can not test .rst docs for {name}, since "
"docs path ({path}) does not exist.")
paths = self.packages_path(kwargs['package'], docs_path,
warning=warning_message)
elif not kwargs['test_path']:
paths = [docs_path, ]
if len(paths) and not kwargs['test_path']:
paths.append('--doctest-rst')
return paths
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return [f'--repeat={repeat}']
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from astropy.table import Table # pylint: disable=W0611
return super().run_tests(**kwargs) | unknown | codeparrot/codeparrot-clean | ||
import asyncio
import collections
import itertools
import json
from .data_watcher import DataWatcher
from .party import Party
from .lock import Lock
from .recipe import Recipe
class Allocator(Recipe):
sub_recipes = {
"party": (Party, ["member_path", "name"]),
"lock": (Lock, ["lock_path"]),
"data_watcher": DataWatcher,
}
def __init__(self, base_path, name, allocator_fn=None):
self.name = name
super().__init__(base_path)
if allocator_fn is None:
allocator_fn = round_robin
self.allocator_fn = allocator_fn
self.active = False
self.full_allocation = collections.defaultdict(set)
self.full_set = set()
@property
def lock_path(self):
return self.base_path + "/lock"
@property
def member_path(self):
return self.base_path + "/members"
@property
def allocation(self):
return self.full_allocation[self.name]
def validate(self, new_allocation):
as_list = []
for subset in new_allocation.values():
as_list.extend(list(subset))
# make sure there are no duplicates among the subsets
assert len(as_list) == len(set(as_list)), (
"duplicate items found in allocation: %s" % self.full_allocation
)
# make sure there's no mismatch beween the full set and allocations
assert len(self.full_set.symmetric_difference(set(as_list))) == 0, (
"mismatch between full set and allocation: %s vs %s" % (
self.full_set, self.full_allocation
)
)
async def start(self):
self.active = True
await self.ensure_path()
await self.party.join()
self.data_watcher.add_callback(self.base_path, self.handle_data_change)
asyncio.ensure_future(self.monitor_member_changes(), loop=self.client.loop)
async def add(self, new_item):
new_set = self.full_set.copy().add(new_item)
await self.update_set(new_set)
async def remove(self, new_item):
new_set = self.full_set.copy().remove(new_item)
await self.update_set(new_set)
async def update(self, new_items):
new_items = set(new_items)
data = json.dumps(list(new_items))
with (await self.lock.acquire()):
await self.client.set_data(self.base_path, data=data)
def monitor_member_changes(self):
while self.active:
yield self.party.wait_for_change()
if not self.active:
break
self.allocate()
def handle_data_change(self, new_set_data):
if new_set_data is None:
return
new_set_data = set(json.loads(new_set_data))
if new_set_data == self.full_set:
return
self.full_set = new_set_data
self.allocate()
def allocate(self):
new_allocation = self.allocator_fn(self.party.members, self.full_set)
self.validate(new_allocation)
self.full_allocation = new_allocation
async def stop(self):
await self.party.leave()
self.data_watcher.remove_callback(
self.base_path, self.handle_data_change
)
def round_robin(members, items):
"""
Default allocator with a round robin approach.
In this algorithm, each member of the group is cycled over and given an
item until there are no items left. This assumes roughly equal capacity
for each member and aims for even distribution of item counts.
"""
allocation = collections.defaultdict(set)
for member, item in zip(itertools.cycle(members), items):
allocation[member].add(item)
return allocation | unknown | codeparrot/codeparrot-clean | ||
import os
import sys
import linecache
import re
import tkinter as tk
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.ObjectBrowser import ObjectTreeItem, make_objecttreeitem
from idlelib.PyShell import PyShellFileList
def StackBrowser(root, flist=None, tb=None, top=None):
if top is None:
top = tk.Toplevel(root)
sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
item = StackTreeItem(flist, tb)
node = TreeNode(sc.canvas, None, item)
node.expand()
class StackTreeItem(TreeItem):
def __init__(self, flist=None, tb=None):
self.flist = flist
self.stack = self.get_stack(tb)
self.text = self.get_exception()
def get_stack(self, tb):
if tb is None:
tb = sys.last_traceback
stack = []
if tb and tb.tb_frame is None:
tb = tb.tb_next
while tb is not None:
stack.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
return stack
def get_exception(self):
type = sys.last_type
value = sys.last_value
if hasattr(type, "__name__"):
type = type.__name__
s = str(type)
if value is not None:
s = s + ": " + str(value)
return s
def GetText(self):
return self.text
def GetSubList(self):
sublist = []
for info in self.stack:
item = FrameTreeItem(info, self.flist)
sublist.append(item)
return sublist
class FrameTreeItem(TreeItem):
def __init__(self, info, flist):
self.info = info
self.flist = flist
def GetText(self):
frame, lineno = self.info
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
sourceline = linecache.getline(filename, lineno)
sourceline = sourceline.strip()
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(...), line %d: %s" % (modname, funcname,
lineno, sourceline)
return item
def GetSubList(self):
frame, lineno = self.info
sublist = []
if frame.f_globals is not frame.f_locals:
item = VariablesTreeItem("<locals>", frame.f_locals, self.flist)
sublist.append(item)
item = VariablesTreeItem("<globals>", frame.f_globals, self.flist)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if self.flist:
frame, lineno = self.info
filename = frame.f_code.co_filename
if os.path.isfile(filename):
self.flist.gotofileline(filename, lineno)
class VariablesTreeItem(ObjectTreeItem):
def GetText(self):
return self.labeltext
def GetLabelText(self):
return None
def IsExpandable(self):
return len(self.object) > 0
def GetSubList(self):
sublist = []
for key in self.object.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem(key + " =", value, setfunction)
sublist.append(item)
return sublist
def keys(self): # unused, left for possible 3rd party use
return list(self.object.keys())
def _stack_viewer(parent):
root = tk.Tk()
root.title("Test StackViewer")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
flist = PyShellFileList(root)
try: # to obtain a traceback object
intentional_name_error
except NameError:
exc_type, exc_value, exc_tb = sys.exc_info()
# inject stack trace to sys
sys.last_type = exc_type
sys.last_value = exc_value
sys.last_traceback = exc_tb
StackBrowser(root, flist=flist, top=root, tb=exc_tb)
# restore sys to original state
del sys.last_type
del sys.last_value
del sys.last_traceback
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_stack_viewer) | unknown | codeparrot/codeparrot-clean | ||
# :coding: utf-8
def fetch_environment(file_id, files=None, module_names=None):
"""Return module environment dictionary from *file_id*.
*file_id* represent the identifier of the file.
*files* is an optional list of the other file names stored in the same
directory as the one analyzed.
*module_names* is an optional list of all the other module name
previously fetched to help determine the module name of the current
file.
The environment is in the form of::
{
"id": "module.test",
"name": test,
"file_id": "module/test/index.js"
}
"""
if module_names is None:
module_names = []
hierarchy = file_id.split("/")
file_name = hierarchy.pop()
if file_name == "index.js":
module_id = ".".join(hierarchy)
module_name = _guess_module_name(
hierarchy[-1],
hierarchy_folders=hierarchy[:-1],
module_names=module_names
)
elif "index.js" in files:
name = file_name.split(".js")[0]
module_id = ".".join(hierarchy + [name])
module_name = _guess_module_name(
".".join([hierarchy[-1], name]),
hierarchy_folders=hierarchy[:-1],
module_names=module_names
)
else:
name = file_name.split(".js")[0]
module_id = ".".join(hierarchy + [name])
module_name = name
return {
"id": module_id,
"name": module_name,
"path": module_id.replace(".", "/"),
"file_id": file_id
}
def _guess_module_name(name, hierarchy_folders, module_names):
"""Return the full module *name* from *hierarchy_folders*.
*module_names* is the list of modules already fetched.
"""
for i in range(len(hierarchy_folders)):
root_module = ".".join(hierarchy_folders[i:])
if root_module in module_names:
return ".".join([root_module, name])
return name | unknown | codeparrot/codeparrot-clean | ||
import time
class SimpleTimer(object):
def __init__(self):
self.reset()
def now(self):
return time.time()
def elapsed(self):
return self.now() - self.t0
def reset(self):
self.t0 = self.now()
class FPSCounter(object):
def __init__(self):
self.averageComputer = MovingAverageComputer()
self.printToConsole = False
def tick(self):
newAverage = self.averageComputer.timer.elapsed() > self.averageComputer.timeWindow
self.averageComputer.update(1)
if newAverage and self.printToConsole:
print 'fps:', self.getAverageFPS()
def getAverageFPS(self):
return self.averageComputer.getAverage()
class AverageComputer(object):
def __init__(self):
self.timer = SimpleTimer()
self.quantity = 0.0
def update(self, quantitySinceLastUpdate):
self.quantity += quantitySinceLastUpdate
def getAverage(self):
return self.quantity / self.timer.elapsed()
def reset(self):
self.quantity = 0.0
self.timer.reset()
class MovingAverageComputer(object):
def __init__(self):
self.timer = SimpleTimer()
self.alpha = 0.9
self.timeWindow = 1.0
self.average = 0.0
self.quantityThisWindow = 0.0
def update(self, quantitySinceLastUpdate):
self.quantityThisWindow += quantitySinceLastUpdate
self._updateAverage()
def getAverage(self):
self._updateAverage()
return self.average
def _updateAverage(self):
elapsedTime = self.timer.elapsed()
if elapsedTime > self.timeWindow:
# compute FPS for this time window
averageThisWindow = self.quantityThisWindow / elapsedTime
# update moving average
self.average = self.alpha * averageThisWindow + (1.0 - self.alpha) * self.average
# reset
self.timer.reset()
self.quantityThisWindow = 0.0 | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2022 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package cli
import (
"context"
"fmt"
"io"
"math"
"time"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
)
// drainAndShutdown attempts to drain the server and then shut it
// down.
func drainAndShutdown(
ctx context.Context, c serverpb.RPCAdminClient, targetNode string,
) (err error) {
hardError, remainingWork, err := doDrain(ctx, c, targetNode)
if hardError {
return err
}
if remainingWork {
log.Dev.Warningf(ctx, "graceful shutdown may not have completed successfully; check the node's logs for details.")
}
if err != nil {
log.Dev.Warningf(ctx, "drain did not complete successfully; hard shutdown may cause disruption")
}
// We have already performed the drain above, so now go straight to
// shutdown. We try twice just in case there is a transient error.
hardErr, err := doShutdown(ctx, c, targetNode)
if err != nil && !hardErr {
log.Dev.Warningf(ctx, "hard shutdown attempt failed, retrying: %v", err)
_, err = doShutdown(ctx, c, targetNode)
}
return errors.Wrap(err, "hard shutdown failed")
}
// doDrain calls a graceful drain.
//
// If the function returns hardError true, then the caller should not
// proceed with an alternate strategy (it's likely the server has gone
// away).
func doDrain(
ctx context.Context, c serverpb.RPCAdminClient, targetNode string,
) (hardError, remainingWork bool, err error) {
// The next step is to drain. The timeout is configurable
// via --drain-wait.
if drainCtx.drainWait == 0 {
return doDrainNoTimeout(ctx, c, targetNode)
}
if err := timeutil.RunWithTimeout(ctx, "get-drain-settings", 5*time.Second, func(ctx context.Context) error {
shutdownSettings, err := c.Settings(ctx, &serverpb.SettingsRequest{
Keys: []string{
string(server.DrainWait.InternalKey()),
string(server.ConnectionShutdownTimeout.InternalKey()),
string(server.JobShutdownTimeout.InternalKey()),
string(server.QueryShutdownTimeout.InternalKey()),
string(kvserver.LeaseTransferPerIterationTimeout.InternalKey()),
},
})
if err != nil {
return err
}
minWait := 0 * time.Second
for k, v := range shutdownSettings.KeyValues {
wait, err := time.ParseDuration(v.Value)
if err != nil {
return err
}
minWait += wait
// query_wait is used twice during draining, so count it twice here.
if k == string(server.QueryShutdownTimeout.InternalKey()) {
minWait += wait
}
}
if minWait > drainCtx.drainWait {
fmt.Fprintf(stderr, "warning: --drain-wait is %s, but the server.shutdown.{drain,query,jobs,connection,lease_transfer}_wait "+
"cluster settings require a value of at least %s; using the larger value\n",
drainCtx.drainWait, minWait)
drainCtx.drainWait = minWait
}
return nil
}); err != nil {
fmt.Fprintf(stderr, "warning: could not check drain related cluster settings: %v\n", err)
}
err = timeutil.RunWithTimeout(ctx, "drain", drainCtx.drainWait, func(ctx context.Context) (err error) {
hardError, remainingWork, err = doDrainNoTimeout(ctx, c, targetNode)
return err
})
if errors.HasType(err, (*timeutil.TimeoutError)(nil)) || grpcutil.IsTimeout(err) {
log.Dev.Infof(ctx, "drain timed out: %v", err)
err = errors.New("drain timeout, consider adjusting --drain-wait, especially under " +
"custom server.shutdown cluster settings")
}
return
}
func doDrainNoTimeout(
ctx context.Context, c serverpb.RPCAdminClient, targetNode string,
) (hardError, remainingWork bool, err error) {
defer func() {
if grpcutil.IsWaitingForInit(err) {
log.Dev.Infof(ctx, "%v", err)
err = errors.New("node cannot be drained before it has been initialized")
}
}()
var (
remaining = uint64(math.MaxUint64)
prevRemaining = uint64(math.MaxUint64)
verbose = false
)
for ; ; prevRemaining = remaining {
// Tell the user we're starting to drain. This enables the user to
// mentally prepare for something to take some time, as opposed to
// wondering why nothing is happening.
fmt.Fprintf(stderr, "node is draining... ") // notice no final newline.
// Send a drain request with the drain bit set and the shutdown bit
// unset.
stream, err := c.Drain(ctx, &serverpb.DrainRequest{
DoDrain: true,
Shutdown: false,
NodeId: targetNode,
Verbose: verbose,
})
if err != nil {
fmt.Fprintf(stderr, "\n") // finish the line started above.
return !grpcutil.IsTimeout(err), remaining > 0, errors.Wrap(err, "error sending drain request")
}
for {
resp, err := stream.Recv()
if err == io.EOF {
// Done.
break
}
if err != nil {
// Unexpected error.
fmt.Fprintf(stderr, "\n") // finish the line started above.
log.Dev.Infof(ctx, "graceful drain failed: %v", err)
return false, remaining > 0, err
}
if resp.IsDraining {
// We want to assert that the node is quitting, and tell the
// story about how much work was performed in logs for
// debugging.
remaining = resp.DrainRemainingIndicator
finalString := ""
if remaining == 0 {
finalString = " (complete)"
}
// We use stderr so that the stdout output remains a
// simple 'ok' in case of success (for compatibility with
// scripts).
fmt.Fprintf(stderr, "remaining: %d%s\n", remaining, finalString)
} else {
// Either the server has decided it wanted to stop quitting; or
// we're running a pre-20.1 node which doesn't populate IsDraining.
// In either case, we need to stop sending drain requests.
remaining = 0
fmt.Fprintf(stderr, "done\n")
}
if resp.DrainRemainingDescription != "" {
// Only show this information in the log; we'd use this for debugging.
// (This can be revealed e.g. via --logtostderr.)
log.Dev.Infof(ctx, "drain details: %s\n", resp.DrainRemainingDescription)
}
// Iterate until end of stream, which indicates the drain is
// complete.
}
if remaining == 0 {
// No more work to do.
break
}
// If range lease transfer stalls or the number of remaining leases
// somehow increases, verbosity is set to help with troubleshooting.
if remaining >= prevRemaining {
verbose = true
}
// Avoid a busy wait with high CPU/network usage if the server
// replies with an incomplete drain too quickly.
time.Sleep(200 * time.Millisecond)
}
return false, remaining > 0, nil
}
// doShutdown attempts to trigger a server shutdown *without*
// draining. Use doDrain() prior to perform a drain, or
// drainAndShutdown() to combine both.
func doShutdown(
ctx context.Context, c serverpb.RPCAdminClient, targetNode string,
) (hardError bool, err error) {
defer func() {
if err != nil {
if grpcutil.IsWaitingForInit(err) {
log.Dev.Infof(ctx, "encountered error: %v", err)
err = errors.New("node cannot be shut down before it has been initialized")
err = errors.WithHint(err, "You can still stop the process using a service manager or a signal.")
hardError = true
}
if grpcutil.IsClosedConnection(err) {
// This most likely means that we shut down successfully. Note
// that sometimes the connection can be shut down even before a
// DrainResponse gets sent back to us, so we don't require a
// response on the stream (see #14184).
err = nil
}
}
}()
// We use a shorter timeout because a shutdown request has nothing
// else to do than shut down the node immediately.
err = timeutil.RunWithTimeout(ctx, "hard shutdown", 10*time.Second, func(ctx context.Context) error {
// Send a drain request with the drain bit unset (no drain).
// and the shutdown bit set.
stream, err := c.Drain(ctx, &serverpb.DrainRequest{NodeId: targetNode, Shutdown: true})
if err != nil {
return errors.Wrap(err, "error sending shutdown request")
}
for {
_, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
}
})
if !errors.HasType(err, (*timeutil.TimeoutError)(nil)) {
hardError = true
}
return hardError, err
} | go | github | https://github.com/cockroachdb/cockroach | pkg/cli/rpc_node_shutdown.go |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Allows applications to identify API outages and scheduled downtime.
Some examples:
def StoreUploadedProfileImage(self):
uploaded_image = self.request.get('img')
# If the images API is unavailable, we'll just skip the resize.
if CapabilitySet('images').is_enabled():
uploaded_image = images.resize(uploaded_image, 64, 64)
store(uploaded_image)
def RenderHTMLForm(self):
datastore_readonly = CapabilitySet('datastore_v3', capabilities=['write'])
if datastore_readonly.may_be_disabled_in(60):
# self.response.out('<p>Not accepting submissions right now: %s</p>' %
datastore_readonly.admin_message())
# ...render form with form elements disabled...
else:
# ...render form normally...
Individual API wrapper modules should expose CapabilitySet objects
for users rather than relying on users to create them. They may
also create convenience methods (e.g. db.IsReadOnly()) that delegate
to the relevant CapabilitySet.
Classes defined here:
CapabilitySet: encapsulates one or more capabilities, allows introspection.
UnknownCapabilityError: thrown when an unknown capability is requested.
"""
from google.appengine.api.capabilities import capability_service_pb
from google.appengine.base import capabilities_pb
from google.appengine.api import apiproxy_stub_map
IsEnabledRequest = capability_service_pb.IsEnabledRequest
IsEnabledResponse = capability_service_pb.IsEnabledResponse
CapabilityConfig = capabilities_pb.CapabilityConfig
class UnknownCapabilityError(Exception):
"""An unknown capability was requested."""
class CapabilitySet(object):
"""Encapsulates one or more capabilities.
Capabilities can either be named explicitly, or inferred from the
list of methods provided. If no capabilities or methods are
provided, this will check whether the entire package is enabled.
"""
def __init__(self, package, capabilities=None, methods=None,
stub_map=apiproxy_stub_map):
"""Constructor.
Args:
capabilities: list of strings
methods: list of strings
"""
if capabilities is None:
capabilities = []
if methods is None:
methods = []
self._package = package
self._capabilities = ['*'] + capabilities
self._methods = methods
self._stub_map = stub_map
def is_enabled(self):
"""Tests whether the capabilities is currently enabled.
Returns:
True if API calls that require these capabillities will succeed.
Raises:
UnknownCapabilityError, if a specified capability was not recognized.
"""
config = self._get_status()
return config.summary_status() in (IsEnabledResponse.ENABLED,
IsEnabledResponse.SCHEDULED_FUTURE,
IsEnabledResponse.SCHEDULED_NOW)
def will_remain_enabled_for(self, time=60):
"""Returns true if it will remain enabled for the specified amount of time.
Args:
time: Number of seconds in the future to look when checking for scheduled
downtime.
Returns:
True if there is no scheduled downtime for the specified capability
within the amount of time specified.
Raises:
UnknownCapabilityError, if a specified capability was not recognized.
"""
config = self._get_status()
status = config.summary_status()
if status == IsEnabledResponse.ENABLED:
return True
elif status == IsEnabledResponse.SCHEDULED_NOW:
return False
elif status == IsEnabledResponse.SCHEDULED_FUTURE:
if config.has_time_until_scheduled():
return config.time_until_scheduled() >= time
else:
return True
elif status == IsEnabledResponse.DISABLED:
return False
else:
return False
def admin_message(self):
"""Get any administrator notice messages for these capabilities.
Returns:
A string containing one or more admin messages, or an empty string.
Raises:
UnknownCapabilityError, if a specified capability was not recognized.
"""
message_list = []
for config in self._get_status().config_list():
message = config.admin_message()
if message and message not in message_list:
message_list.append(message)
return ' '.join(message_list)
def _get_status(self):
"""Get an IsEnabledResponse for the capabilities listed.
Returns:
IsEnabledResponse for the specified capabilities.
Raises:
UnknownCapabilityError: If an unknown capability was requested.
"""
req = IsEnabledRequest()
req.set_package(self._package)
for capability in self._capabilities:
req.add_capability(capability)
for method in self._methods:
req.add_call(method)
resp = capability_service_pb.IsEnabledResponse()
self._stub_map.MakeSyncCall('capability_service', 'IsEnabled', req, resp)
if resp.summary_status() == IsEnabledResponse.UNKNOWN:
raise UnknownCapabilityError()
return resp | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
try:
import resource # pylint: disable=F0401
except ImportError:
resource = None # Not available on all platforms
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core.platform import platform_backend
class LinuxBasedPlatformBackend(platform_backend.PlatformBackend):
"""Abstract platform containing functionality shared by all Linux based OSes.
This includes Android and ChromeOS.
Subclasses must implement RunCommand, GetFileContents, GetPsOutput, and
ParseCStateSample."""
def GetSystemCommitCharge(self):
meminfo_contents = self.GetFileContents('/proc/meminfo')
meminfo = self._GetProcFileDict(meminfo_contents)
if not meminfo:
return None
return (self._ConvertKbToByte(meminfo['MemTotal'])
- self._ConvertKbToByte(meminfo['MemFree'])
- self._ConvertKbToByte(meminfo['Buffers'])
- self._ConvertKbToByte(meminfo['Cached']))
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
meminfo_contents = self.GetFileContents('/proc/meminfo')
meminfo = self._GetProcFileDict(meminfo_contents)
if not meminfo:
return None
return self._ConvertKbToByte(meminfo['MemTotal'])
def GetCpuStats(self, pid):
results = {}
stats = self._GetProcFileForPid(pid, 'stat')
if not stats:
return results
stats = stats.split()
utime = float(stats[13])
stime = float(stats[14])
cpu_process_jiffies = utime + stime
clock_ticks = self.GetClockTicks()
results.update({'CpuProcessTime': cpu_process_jiffies / clock_ticks})
return results
def GetCpuTimestamp(self):
timer_list = self.GetFileContents('/proc/timer_list')
total_jiffies = float(self._GetProcJiffies(timer_list))
clock_ticks = self.GetClockTicks()
return {'TotalTime': total_jiffies / clock_ticks}
def GetMemoryStats(self, pid):
status_contents = self._GetProcFileForPid(pid, 'status')
stats = self._GetProcFileForPid(pid, 'stat').split()
status = self._GetProcFileDict(status_contents)
if not status or not stats or 'Z' in status['State']:
return {}
vm = int(stats[22])
vm_peak = (self._ConvertKbToByte(status['VmPeak'])
if 'VmPeak' in status else vm)
wss = int(stats[23]) * resource.getpagesize()
wss_peak = (self._ConvertKbToByte(status['VmHWM'])
if 'VmHWM' in status else wss)
private_dirty_bytes = 0
for line in self._GetProcFileForPid(pid, 'smaps').splitlines():
if line.startswith('Private_Dirty:'):
private_dirty_bytes += self._ConvertKbToByte(line.split(':')[1].strip())
return {'VM': vm,
'VMPeak': vm_peak,
'PrivateDirty': private_dirty_bytes,
'WorkingSetSize': wss,
'WorkingSetSizePeak': wss_peak}
@decorators.Cache
def GetClockTicks(self):
"""Returns the number of clock ticks per second.
The proper way is to call os.sysconf('SC_CLK_TCK') but that is not easy to
do on Android/CrOS. In practice, nearly all Linux machines have a USER_HZ
of 100, so just return that.
"""
return 100
def GetFileContents(self, filename):
raise NotImplementedError()
def GetPsOutput(self, columns, pid=None):
raise NotImplementedError()
def RunCommand(self, cmd):
"""Runs the specified command.
Args:
cmd: A list of program arguments or the path string of the program.
Returns:
A string whose content is the output of the command.
"""
raise NotImplementedError()
@staticmethod
def ParseCStateSample(sample):
"""Parse a single c-state residency sample.
Args:
sample: A sample of c-state residency times to be parsed. Organized as
a dictionary mapping CPU name to a string containing all c-state
names, the times in each state, the latency of each state, and the
time at which the sample was taken all separated by newlines.
Ex: {'cpu0': 'C0\nC1\n5000\n2000\n20\n30\n1406673171'}
Returns:
Dictionary associating a c-state with a time.
"""
raise NotImplementedError()
def _IsPidAlive(self, pid):
assert pid, 'pid is required'
return bool(self.GetPsOutput(['pid'], pid) == str(pid))
def _GetProcFileForPid(self, pid, filename):
try:
return self.GetFileContents('/proc/%s/%s' % (pid, filename))
except IOError:
if not self._IsPidAlive(pid):
raise exceptions.ProcessGoneException()
raise
def _ConvertKbToByte(self, value):
return int(value.replace('kB', '')) * 1024
def _GetProcFileDict(self, contents):
retval = {}
for line in contents.splitlines():
key, value = line.split(':')
retval[key.strip()] = value.strip()
return retval
def _GetProcJiffies(self, timer_list):
"""Parse '/proc/timer_list' output and returns the first jiffies attribute.
Multi-CPU machines will have multiple 'jiffies:' lines, all of which will be
essentially the same. Return the first one."""
if isinstance(timer_list, str):
timer_list = timer_list.splitlines()
for line in timer_list:
if line.startswith('jiffies:'):
_, value = line.split(':')
return value
raise Exception('Unable to find jiffies from /proc/timer_list') | unknown | codeparrot/codeparrot-clean | ||
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Downloader;
/**
* @author Jordi Boggiano <j.boggiano@seld.be>
*/
class TransportException extends \RuntimeException
{
/** @var ?array<string> */
protected $headers;
/** @var ?string */
protected $response;
/** @var ?int */
protected $statusCode;
/** @var array<mixed> */
protected $responseInfo = [];
public function __construct(string $message = "", int $code = 400, ?\Throwable $previous = null)
{
parent::__construct($message, $code, $previous);
}
/**
* @param array<string> $headers
*/
public function setHeaders(array $headers): void
{
$this->headers = $headers;
}
/**
* @return ?array<string>
*/
public function getHeaders(): ?array
{
return $this->headers;
}
public function setResponse(?string $response): void
{
$this->response = $response;
}
public function getResponse(): ?string
{
return $this->response;
}
/**
* @param ?int $statusCode
*/
public function setStatusCode($statusCode): void
{
$this->statusCode = $statusCode;
}
public function getStatusCode(): ?int
{
return $this->statusCode;
}
/**
* @return array<mixed>
*/
public function getResponseInfo(): array
{
return $this->responseInfo;
}
/**
* @param array<mixed> $responseInfo
*/
public function setResponseInfo(array $responseInfo): void
{
$this->responseInfo = $responseInfo;
}
} | php | github | https://github.com/composer/composer | src/Composer/Downloader/TransportException.php |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides the server-side framework for authentication.
*/
@InterfaceAudience.LimitedPrivate({ "HBase", "HDFS", "MapReduce" })
@InterfaceStability.Evolving
package org.apache.hadoop.security.authentication.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/package-info.java |
"""The tests for the DTE Energy Bridge."""
import unittest
import requests_mock
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
DTE_ENERGY_BRIDGE_CONFIG = {
'platform': 'dte_energy_bridge',
'ip': '192.168.1.1',
}
class TestDteEnergyBridgeSetup(unittest.TestCase):
"""Test the DTE Energy Bridge platform."""
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_with_config(self):
"""Test the platform setup with configuration."""
assert setup_component(self.hass, 'sensor',
{'dte_energy_bridge': DTE_ENERGY_BRIDGE_CONFIG})
@requests_mock.Mocker()
def test_setup_correct_reading(self, mock_req):
"""Test DTE Energy bridge returns a correct value."""
mock_req.get("http://{}/instantaneousdemand"
.format(DTE_ENERGY_BRIDGE_CONFIG['ip']),
text='.411 kW')
assert setup_component(self.hass, 'sensor', {
'sensor': DTE_ENERGY_BRIDGE_CONFIG})
assert '0.411' == \
self.hass.states \
.get('sensor.current_energy_usage').state
@requests_mock.Mocker()
def test_setup_incorrect_units_reading(self, mock_req):
"""Test DTE Energy bridge handles a value with incorrect units."""
mock_req.get("http://{}/instantaneousdemand"
.format(DTE_ENERGY_BRIDGE_CONFIG['ip']),
text='411 kW')
assert setup_component(self.hass, 'sensor', {
'sensor': DTE_ENERGY_BRIDGE_CONFIG})
assert '0.411' == \
self.hass.states \
.get('sensor.current_energy_usage').state
@requests_mock.Mocker()
def test_setup_bad_format_reading(self, mock_req):
"""Test DTE Energy bridge handles an invalid value."""
mock_req.get("http://{}/instantaneousdemand"
.format(DTE_ENERGY_BRIDGE_CONFIG['ip']),
text='411')
assert setup_component(self.hass, 'sensor', {
'sensor': DTE_ENERGY_BRIDGE_CONFIG})
assert 'unknown' == \
self.hass.states \
.get('sensor.current_energy_usage').state | unknown | codeparrot/codeparrot-clean | ||
/**
* Generated by '@kontent-ai/model-generator@5.9.0'
*
* Project name: Next.js Sample App
* Environment: Production
* Project Id: a7844231-064c-016c-1dad-38228cbc505d
*/
export const contentTypeSnippets = {} as const; | typescript | github | https://github.com/vercel/next.js | examples/cms-kontent-ai/models/project/contentTypeSnippets.ts |
import pytest
from webdriver import error
from tests.support.asserts import assert_success
def delete_session(session):
return session.transport.send("DELETE", "session/{session_id}".format(**vars(session)))
def test_null_response_value(session):
response = delete_session(session)
value = assert_success(response)
assert value is None
# Need an explicit call to session.end() to notify the test harness
# that a new session needs to be created for subsequent tests.
session.end()
def test_dismissed_beforeunload_prompt(session, inline):
session.url = inline("""
<input type="text">
<script>
window.addEventListener("beforeunload", function (event) {
event.preventDefault();
});
</script>
""")
session.find.css("input", all=False).send_keys("foo")
response = delete_session(session)
assert_success(response)
# A beforeunload prompt has to be automatically dismissed, and the session deleted
with pytest.raises(error.InvalidSessionIdException):
session.alert.text
# Need an explicit call to session.end() to notify the test harness
# that a new session needs to be created for subsequent tests.
session.end() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2012 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for soc.logic.mapreduce_convert_user."""
import unittest
from google.appengine.api import users
from soc.models.user import User
from soc.mapreduce import convert_user
from soc.modules.seeder.logic.seeder import logic as seeder_logic
from soc.modules.seeder.logic.providers import string as seeder_string
class TestAccounts(unittest.TestCase):
"""Tests for convert_user logic.
"""
def setUp(self):
self.link_id = seeder_string.LinkIDProvider(User).getValue()
def convert(self, email, same_user_id=False):
account = users.User(email=email)
properties = {
'account': account,
'key_name': self.link_id,
'link_id': self.link_id,
'name': 'Test user',
'status': 'valid',
}
user = seeder_logic.seed(User, properties)
if same_user_id:
user = User.get_by_key_name(self.link_id)
user.user_id = user.account.user_id()
user.put()
return convert_user.convert_user_txn(user.key())
def assertUserEqual(self, email):
user = User.get_by_key_name(self.link_id)
self.assertEqual(email, user.account.email())
self.assertTrue(user.account.user_id())
self.assertEqual(user.account.user_id(), user.user_id)
def testNoop(self):
result = self.convert('test@example.com', True)
self.assertEqual(convert_user.IGNORED_USER, result)
self.assertUserEqual('test@example.com')
def testConverted(self):
result = self.convert('test@gmail.com', True)
self.assertEqual(convert_user.IGNORED_USER, result)
self.assertUserEqual('test@gmail.com')
def testPartiallyConverted(self):
result = self.convert('test@gmail.com')
self.assertEqual(convert_user.CONVERTED_USER, result)
self.assertUserEqual('test@gmail.com')
def testNonAuthConverted(self):
result = self.convert('test@example.com')
self.assertEqual(convert_user.CONVERTED_USER, result)
self.assertUserEqual('test@example.com')
def testFullConversion(self):
result = self.convert('test')
self.assertEqual(convert_user.CONVERTED_USER, result)
self.assertUserEqual('test@gmail.com') | unknown | codeparrot/codeparrot-clean | ||
from JumpScale import j
# import JumpScale.baselib.remote
class CiscoSwitchManager(object):
def get(self, host, login,password):
return CiscoSwitch(host, login,password)
#!/usr/bin/python
from Router import Router
class CiscoSwitch(object):
def __init__(self, host, login,password):
R1 = Router(hostname=host, logfile='cisco.log')
login_cmd = 'telnet ' + host
login_expect = '#'#.format(hostname) #@TODO NEEDS TO BE ADJUSTED
out = R1.login(login_cmd, login, password, login_expect)
# if out != R1._LOGIN_USERNAME_PROMPTS:
# R1.logout()
# time.sleep(60)
# R1 = Router(hostname, logfile='cisco.log')
# password = Localhost1.get_rsa_token()
# out = R1.login(login_cmd, login, password, login_expect)
self.cisco=R1
self.host=host
self.login=login
self.password=password
# if res<>True: #adjust to check @TODO
# raise RuntimeError("Could not login into cisco switch: %s"%host)
# inputsentence = []
cmd="terminal length 0"
self.do(cmd)
self.do("configure terminal","#")
self.do("hostname %s"%host,"#")
self.do("exit")
def logout(self):
self._client.logout()
def do(self,cmd,prompt=None):
if prompt=="":
prompt="%s#"%self.cisco.hostname
return self.cisco.exec_cmd(cmd,prompt=prompt)
def interface_getvlanconfig(self,interfaceName):
"""
return vlan config of interface
"""
def interface_setvlan(self,interfaceName,fromVlanId,toVlanId,reset=False):
"""
configure set of vlan's on interface
@param reset when True older info is deleted and only this vlanrange is added
"""
def _normalizespaces(self,line):
while line.find(" ")<>-1:
line=line.replace(" "," ")
return line
def interface_getArpMAC(self):
"""
returns mac addresses an interface knows about (can be used to detect connected ports from servers)
return dict as follows
{$interfacename:[$macaddr1,$macaddr2,...]}
"""
result={}
out=self.do("sh mac-address-table")
for line in out.split("\n"):
line=line.strip()
if line=="" or line[0]<>"*":
continue
line=self._normalizespaces(line)
splitted=line.split(" ")
if len(splitted)>5:
vlan=splitted[1]
mac=splitted[2].replace(".","").lower()
ttype=splitted[3]
interface=splitted[5]
if not result.has_key(interface):
result[interface]=[]
result[interface].append(mac)
else:
pass
return result
def interface_getall(self):
"""
return info about interfaces on switch (name, macaddresses, types, ...)
"""
raise RuntimeError("implement")
return r
def interface_getnames(self):
raise RuntimeError("implement")
return r
def backup(self,name,destinationdir):
config=self.do("show running-config")
raise RuntimeError("implement")
return r
self.do("/system/backup/save", args={"name":name})
path="%s.backup"%name
self.download(path, j.system.fs.joinPaths(destinationdir,path))
self.do("/export", args={"file":name})
path="%s.rsc"%name
self.download(path, j.system.fs.joinPaths(destinationdir,path))
def download(self,path,dest):
#@todo now sure how that works on cisco sw
from ftplib import FTP
ftp=FTP(host=self.host, user=self.login, passwd=self.password)
ftp.retrbinary('RETR %s'%path, open(dest, 'wb').write)
ftp.close() | unknown | codeparrot/codeparrot-clean | ||
"""
Classes to represent the default SQL aggregate functions
"""
import copy
import warnings
from django.db.models.fields import IntegerField, FloatField
from django.db.models.lookups import RegisterLookupMixin
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
__all__ = ['Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance']
warnings.warn(
"django.db.models.sql.aggregates is deprecated. Use "
"django.db.models.aggregates instead.",
RemovedInDjango20Warning, stacklevel=2)
class Aggregate(RegisterLookupMixin):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = self._ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = self._computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
# Two fake fields used to identify aggregate types in data-conversion operations.
@cached_property
def _ordinal_aggregate_field(self):
return IntegerField()
@cached_property
def _computed_aggregate_field(self):
return FloatField()
def relabeled_clone(self, change_map):
clone = copy.copy(self)
if isinstance(self.col, (list, tuple)):
clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
return clone
def as_sql(self, compiler, connection):
"Return the aggregate, rendered as SQL with parameters."
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(compiler, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join(compiler(c) for c in self.col)
else:
field_name = compiler(self.col)
substitutions = {
'function': self.sql_function,
'field': field_name
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
def get_group_by_cols(self):
return []
@property
def output_field(self):
return self.field
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP' | unknown | codeparrot/codeparrot-clean | ||
//===--- URI.h - File URIs with schemes --------------------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANGD_URI_H
#define LLVM_CLANG_TOOLS_EXTRA_CLANGD_URI_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Registry.h"
namespace clang {
namespace clangd {
/// A URI describes the location of a source file.
/// In the simplest case, this is a "file" URI that directly encodes the
/// absolute path to a file. More abstract cases are possible: a shared index
/// service might expose repo:// URIs that are relative to the source control
/// root.
///
/// Clangd handles URIs of the form <scheme>:[//<authority>]<body>. It doesn't
/// further split the authority or body into constituent parts (e.g. query
/// strings is included in the body).
class URI {
public:
URI(llvm::StringRef Scheme, llvm::StringRef Authority, llvm::StringRef Body);
/// Returns decoded scheme e.g. "https"
llvm::StringRef scheme() const { return Scheme; }
/// Returns decoded authority e.g. "reviews.lvm.org"
llvm::StringRef authority() const { return Authority; }
/// Returns decoded body e.g. "/D41946"
llvm::StringRef body() const { return Body; }
/// Returns a string URI with all components percent-encoded.
std::string toString() const;
/// Creates a URI for a file in the given scheme. \p Scheme must be
/// registered. The URI is percent-encoded.
static llvm::Expected<URI> create(llvm::StringRef AbsolutePath,
llvm::StringRef Scheme);
// Similar to above except this picks a registered scheme that works. If none
// works, this falls back to "file" scheme.
static URI create(llvm::StringRef AbsolutePath);
/// This creates a file:// URI for \p AbsolutePath. The path must be absolute.
static URI createFile(llvm::StringRef AbsolutePath);
/// Parse a URI string "<scheme>:[//<authority>/]<path>". Percent-encoded
/// characters in the URI will be decoded.
static llvm::Expected<URI> parse(llvm::StringRef Uri);
/// Resolves the absolute path of \p U. If there is no matching scheme, or the
/// URI is invalid in the scheme, this returns an error.
///
/// \p HintPath A related path, such as the current file or working directory,
/// which can help disambiguate when the same file exists in many workspaces.
static llvm::Expected<std::string> resolve(const URI &U,
llvm::StringRef HintPath = "");
/// Same as above, in addition it parses the \p FileURI using URI::parse.
static llvm::Expected<std::string> resolve(llvm::StringRef FileURI,
llvm::StringRef HintPath = "");
/// Resolves \p AbsPath into a canonical path of its URI, by converting
/// \p AbsPath to URI and resolving the URI to get th canonical path.
/// This ensures that paths with the same URI are resolved into consistent
/// file path.
static llvm::Expected<std::string> resolvePath(llvm::StringRef AbsPath,
llvm::StringRef HintPath = "");
/// Gets the preferred spelling of this file for #include, if there is one,
/// e.g. <system_header.h>, "path/to/x.h".
///
/// This allows URI schemas to provide their customized include paths.
///
/// Returns an empty string if normal include-shortening based on the absolute
/// path should be used.
/// Fails if the URI is not valid in the schema.
static llvm::Expected<std::string> includeSpelling(const URI &U);
friend bool operator==(const URI &LHS, const URI &RHS) {
return std::tie(LHS.Scheme, LHS.Authority, LHS.Body) ==
std::tie(RHS.Scheme, RHS.Authority, RHS.Body);
}
friend bool operator<(const URI &LHS, const URI &RHS) {
return std::tie(LHS.Scheme, LHS.Authority, LHS.Body) <
std::tie(RHS.Scheme, RHS.Authority, RHS.Body);
}
private:
URI() = default;
std::string Scheme;
std::string Authority;
std::string Body;
};
/// URIScheme is an extension point for teaching clangd to recognize a custom
/// URI scheme. This is expected to be implemented and exposed via the
/// URISchemeRegistry.
class URIScheme {
public:
virtual ~URIScheme() = default;
/// Returns the absolute path of the file corresponding to the URI
/// authority+body in the file system. See URI::resolve for semantics of
/// \p HintPath.
virtual llvm::Expected<std::string>
getAbsolutePath(llvm::StringRef Authority, llvm::StringRef Body,
llvm::StringRef HintPath) const = 0;
virtual llvm::Expected<URI>
uriFromAbsolutePath(llvm::StringRef AbsolutePath) const = 0;
/// Returns the include path of the file (e.g. <path>, "path"), which can be
/// #included directly. See URI::includeSpelling for details.
virtual llvm::Expected<std::string> getIncludeSpelling(const URI &U) const {
return ""; // no customized include path for this scheme.
}
};
/// By default, a "file" scheme is supported where URI paths are always absolute
/// in the file system.
typedef llvm::Registry<URIScheme> URISchemeRegistry;
} // namespace clangd
} // namespace clang
namespace llvm {
extern template class Registry<clang::clangd::URIScheme>;
} // namespace llvm
#endif // LLVM_CLANG_TOOLS_EXTRA_CLANGD_URI_H | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/clangd/URI.h |
#
# Copyright 2006,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from fm_emph import fm_deemph
from math import pi
try:
from gnuradio import analog
except ImportError:
import analog_swig as analog
class fm_demod_cf(gr.hier_block2):
"""
Generalized FM demodulation block with deemphasis and audio
filtering.
This block demodulates a band-limited, complex down-converted FM
channel into the the original baseband signal, optionally applying
deemphasis. Low pass filtering is done on the resultant signal. It
produces an output float stream in the range of [-1.0, +1.0].
Args:
channel_rate: incoming sample rate of the FM baseband (integer)
deviation: maximum FM deviation (default = 5000) (float)
audio_decim: input to output decimation rate (integer)
audio_pass: audio low pass filter passband frequency (float)
audio_stop: audio low pass filter stop frequency (float)
gain: gain applied to audio output (default = 1.0) (float)
tau: deemphasis time constant (default = 75e-6), specify tau=0.0 to prevent deemphasis (float)
"""
def __init__(self, channel_rate, audio_decim, deviation,
audio_pass, audio_stop, gain=1.0, tau=75e-6):
gr.hier_block2.__init__(self, "fm_demod_cf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
k = channel_rate/(2*pi*deviation)
QUAD = analog.quadrature_demod_cf(k)
audio_taps = filter.optfir.low_pass(
gain, # Filter gain
channel_rate, # Sample rate
audio_pass, # Audio passband
audio_stop, # Audio stopband
0.1, # Passband ripple
60 # Stopband attenuation
)
LPF = filter.fir_filter_fff(audio_decim, audio_taps)
if tau is not None and tau > 0.0: # None should be deprecated someday
DEEMPH = fm_deemph(channel_rate, tau)
self.connect(self, QUAD, DEEMPH, LPF, self)
else:
self.connect(self, QUAD, LPF, self)
class demod_20k0f3e_cf(fm_demod_cf):
"""
NBFM demodulation block, 20 KHz channels
This block demodulates a complex, downconverted, narrowband FM
channel conforming to 20K0F3E emission standards, outputting
floats in the range [-1.0, +1.0].
Args:
sample_rate: incoming sample rate of the FM baseband (integer)
audio_decim: input to output decimation rate (integer)
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
5000, # Deviation
3000, # Audio passband frequency
4500) # Audio stopband frequency
class demod_200kf3e_cf(fm_demod_cf):
"""
WFM demodulation block, mono.
This block demodulates a complex, downconverted, wideband FM
channel conforming to 200KF3E emission standards, outputting
floats in the range [-1.0, +1.0].
Args:
sample_rate: incoming sample rate of the FM baseband (integer)
audio_decim: input to output decimation rate (integer)
"""
def __init__(self, channel_rate, audio_decim):
fm_demod_cf.__init__(self, channel_rate, audio_decim,
75000, # Deviation
15000, # Audio passband
16000, # Audio stopband
20.0) # Audio gain | unknown | codeparrot/codeparrot-clean | ||
name: Trigger Docs Build
on:
push:
branches: 'main'
paths: [ 'antora/*' ]
workflow_dispatch:
inputs:
build-version:
description: 'Version being build (e.g. 1.0.3-SNAPSHOT)'
required: false
build-sha:
description: Enter the SHA to build (e.g. 82c97891569821a7f91a77ca074232e0b54ca7a5)
required: false
build-refname:
description: 'Git refname to build (e.g., 1.0.x)'
required: false
permissions:
contents: read
jobs:
trigger-docs-build:
name: Trigger Docs Build
if: github.repository_owner == 'spring-projects'
runs-on: ${{ vars.UBUNTU_SMALL || 'ubuntu-latest' }}
permissions:
actions: write
steps:
- name: Check Out
uses: actions/checkout@v6
with:
ref: docs-build
- name: Trigger Workflow
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: gh workflow run deploy-docs.yml -r docs-build -f build-refname=${{ github.event.inputs.build-refname }} -f build-sha=${{ github.event.inputs.build-sha }} -f build-version=${{ github.event.inputs.build-version }} | unknown | github | https://github.com/spring-projects/spring-boot | .github/workflows/trigger-docs-build.yml |
# -*- coding: utf-8 -*-
"""
pygments.lexers._sourcemodbuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of SourceMod functions.
It is able to re-generate itself.
Do not edit the FUNCTIONS list by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
FUNCTIONS = ['TopMenuHandler',
'CreateTopMenu',
'LoadTopMenuConfig',
'AddToTopMenu',
'GetTopMenuInfoString',
'GetTopMenuObjName',
'RemoveFromTopMenu',
'DisplayTopMenu',
'FindTopMenuCategory',
'OnAdminMenuCreated',
'OnAdminMenuReady',
'GetAdminTopMenu',
'AddTargetsToMenu',
'AddTargetsToMenu2',
'RedisplayAdminMenu',
'TEHook',
'AddTempEntHook',
'RemoveTempEntHook',
'TE_Start',
'TE_IsValidProp',
'TE_WriteNum',
'TE_ReadNum',
'TE_WriteFloat',
'TE_ReadFloat',
'TE_WriteVector',
'TE_ReadVector',
'TE_WriteAngles',
'TE_WriteFloatArray',
'TE_Send',
'TE_WriteEncodedEnt',
'TE_SendToAll',
'TE_SendToClient',
'CreateKeyValues',
'KvSetString',
'KvSetNum',
'KvSetUInt64',
'KvSetFloat',
'KvSetColor',
'KvSetVector',
'KvGetString',
'KvGetNum',
'KvGetFloat',
'KvGetColor',
'KvGetUInt64',
'KvGetVector',
'KvJumpToKey',
'KvJumpToKeySymbol',
'KvGotoFirstSubKey',
'KvGotoNextKey',
'KvSavePosition',
'KvDeleteKey',
'KvDeleteThis',
'KvGoBack',
'KvRewind',
'KvGetSectionName',
'KvSetSectionName',
'KvGetDataType',
'KeyValuesToFile',
'FileToKeyValues',
'KvSetEscapeSequences',
'KvNodesInStack',
'KvCopySubkeys',
'KvFindKeyById',
'KvGetNameSymbol',
'KvGetSectionSymbol',
'TE_SetupSparks',
'TE_SetupSmoke',
'TE_SetupDust',
'TE_SetupMuzzleFlash',
'TE_SetupMetalSparks',
'TE_SetupEnergySplash',
'TE_SetupArmorRicochet',
'TE_SetupGlowSprite',
'TE_SetupExplosion',
'TE_SetupBloodSprite',
'TE_SetupBeamRingPoint',
'TE_SetupBeamPoints',
'TE_SetupBeamLaser',
'TE_SetupBeamRing',
'TE_SetupBeamFollow',
'HookEvent',
'HookEventEx',
'UnhookEvent',
'CreateEvent',
'FireEvent',
'CancelCreatedEvent',
'GetEventBool',
'SetEventBool',
'GetEventInt',
'SetEventInt',
'GetEventFloat',
'SetEventFloat',
'GetEventString',
'SetEventString',
'GetEventName',
'SetEventBroadcast',
'GetUserMessageId',
'GetUserMessageName',
'StartMessage',
'StartMessageEx',
'EndMessage',
'MsgHook',
'MsgPostHook',
'HookUserMessage',
'UnhookUserMessage',
'StartMessageAll',
'StartMessageOne',
'InactivateClient',
'ReconnectClient',
'GetMaxEntities',
'GetEntityCount',
'IsValidEntity',
'IsValidEdict',
'IsEntNetworkable',
'CreateEdict',
'RemoveEdict',
'GetEdictFlags',
'SetEdictFlags',
'GetEdictClassname',
'GetEntityNetClass',
'ChangeEdictState',
'GetEntData',
'SetEntData',
'GetEntDataFloat',
'SetEntDataFloat',
'GetEntDataEnt2',
'SetEntDataEnt2',
'GetEntDataVector',
'SetEntDataVector',
'GetEntDataString',
'SetEntDataString',
'FindSendPropOffs',
'FindSendPropInfo',
'FindDataMapOffs',
'GetEntSendPropOffs',
'GetEntProp',
'SetEntProp',
'GetEntPropFloat',
'SetEntPropFloat',
'GetEntPropEnt',
'SetEntPropEnt',
'GetEntPropVector',
'SetEntPropVector',
'GetEntPropString',
'SetEntPropString',
'GetEntPropArraySize',
'GetEntDataArray',
'SetEntDataArray',
'GetEntityClassname',
'float',
'FloatMul',
'FloatDiv',
'FloatAdd',
'FloatSub',
'FloatFraction',
'RoundToZero',
'RoundToCeil',
'RoundToFloor',
'RoundToNearest',
'FloatCompare',
'SquareRoot',
'Pow',
'Exponential',
'Logarithm',
'Sine',
'Cosine',
'Tangent',
'FloatAbs',
'ArcTangent',
'ArcCosine',
'ArcSine',
'ArcTangent2',
'RoundFloat',
'operator%',
'DegToRad',
'RadToDeg',
'GetURandomInt',
'GetURandomFloat',
'SetURandomSeed',
'SetURandomSeedSimple',
'RemovePlayerItem',
'GivePlayerItem',
'GetPlayerWeaponSlot',
'IgniteEntity',
'ExtinguishEntity',
'TeleportEntity',
'ForcePlayerSuicide',
'SlapPlayer',
'FindEntityByClassname',
'GetClientEyeAngles',
'CreateEntityByName',
'DispatchSpawn',
'DispatchKeyValue',
'DispatchKeyValueFloat',
'DispatchKeyValueVector',
'GetClientAimTarget',
'GetTeamCount',
'GetTeamName',
'GetTeamScore',
'SetTeamScore',
'GetTeamClientCount',
'SetEntityModel',
'GetPlayerDecalFile',
'GetServerNetStats',
'EquipPlayerWeapon',
'ActivateEntity',
'SetClientInfo',
'SetClientListeningFlags',
'GetClientListeningFlags',
'SetListenOverride',
'GetListenOverride',
'IsClientMuted',
'TR_GetPointContents',
'TR_GetPointContentsEnt',
'TR_TraceRay',
'TR_TraceHull',
'TR_TraceRayFilter',
'TR_TraceHullFilter',
'TR_TraceRayEx',
'TR_TraceHullEx',
'TR_TraceRayFilterEx',
'TR_TraceHullFilterEx',
'TR_GetFraction',
'TR_GetEndPosition',
'TR_GetEntityIndex',
'TR_DidHit',
'TR_GetHitGroup',
'TR_GetPlaneNormal',
'TR_PointOutsideWorld',
'SortIntegers',
'SortFloats',
'SortStrings',
'SortFunc1D',
'SortCustom1D',
'SortCustom2D',
'SortADTArray',
'SortFuncADTArray',
'SortADTArrayCustom',
'CompileRegex',
'MatchRegex',
'GetRegexSubString',
'SimpleRegexMatch',
'TF2_GetPlayerClass',
'TF2_SetPlayerClass',
'TF2_GetPlayerResourceData',
'TF2_SetPlayerResourceData',
'TF2_RemoveWeaponSlot',
'TF2_RemoveAllWeapons',
'TF2_IsPlayerInCondition',
'TF2_GetObjectType',
'TF2_GetObjectMode',
'NominateMap',
'RemoveNominationByMap',
'RemoveNominationByOwner',
'GetExcludeMapList',
'GetNominatedMapList',
'CanMapChooserStartVote',
'InitiateMapChooserVote',
'HasEndOfMapVoteFinished',
'EndOfMapVoteEnabled',
'OnNominationRemoved',
'OnMapVoteStarted',
'CreateTimer',
'KillTimer',
'TriggerTimer',
'GetTickedTime',
'GetMapTimeLeft',
'GetMapTimeLimit',
'ExtendMapTimeLimit',
'GetTickInterval',
'OnMapTimeLeftChanged',
'IsServerProcessing',
'CreateDataTimer',
'ByteCountToCells',
'CreateArray',
'ClearArray',
'CloneArray',
'ResizeArray',
'GetArraySize',
'PushArrayCell',
'PushArrayString',
'PushArrayArray',
'GetArrayCell',
'GetArrayString',
'GetArrayArray',
'SetArrayCell',
'SetArrayString',
'SetArrayArray',
'ShiftArrayUp',
'RemoveFromArray',
'SwapArrayItems',
'FindStringInArray',
'FindValueInArray',
'ProcessTargetString',
'ReplyToTargetError',
'MultiTargetFilter',
'AddMultiTargetFilter',
'RemoveMultiTargetFilter',
'OnBanClient',
'OnBanIdentity',
'OnRemoveBan',
'BanClient',
'BanIdentity',
'RemoveBan',
'CreateTrie',
'SetTrieValue',
'SetTrieArray',
'SetTrieString',
'GetTrieValue',
'GetTrieArray',
'GetTrieString',
'RemoveFromTrie',
'ClearTrie',
'GetTrieSize',
'GetFunctionByName',
'CreateGlobalForward',
'CreateForward',
'GetForwardFunctionCount',
'AddToForward',
'RemoveFromForward',
'RemoveAllFromForward',
'Call_StartForward',
'Call_StartFunction',
'Call_PushCell',
'Call_PushCellRef',
'Call_PushFloat',
'Call_PushFloatRef',
'Call_PushArray',
'Call_PushArrayEx',
'Call_PushString',
'Call_PushStringEx',
'Call_Finish',
'Call_Cancel',
'NativeCall',
'CreateNative',
'ThrowNativeError',
'GetNativeStringLength',
'GetNativeString',
'SetNativeString',
'GetNativeCell',
'GetNativeCellRef',
'SetNativeCellRef',
'GetNativeArray',
'SetNativeArray',
'FormatNativeString',
'OnRebuildAdminCache',
'DumpAdminCache',
'AddCommandOverride',
'GetCommandOverride',
'UnsetCommandOverride',
'CreateAdmGroup',
'FindAdmGroup',
'SetAdmGroupAddFlag',
'GetAdmGroupAddFlag',
'GetAdmGroupAddFlags',
'SetAdmGroupImmuneFrom',
'GetAdmGroupImmuneCount',
'GetAdmGroupImmuneFrom',
'AddAdmGroupCmdOverride',
'GetAdmGroupCmdOverride',
'RegisterAuthIdentType',
'CreateAdmin',
'GetAdminUsername',
'BindAdminIdentity',
'SetAdminFlag',
'GetAdminFlag',
'GetAdminFlags',
'AdminInheritGroup',
'GetAdminGroupCount',
'GetAdminGroup',
'SetAdminPassword',
'GetAdminPassword',
'FindAdminByIdentity',
'RemoveAdmin',
'FlagBitsToBitArray',
'FlagBitArrayToBits',
'FlagArrayToBits',
'FlagBitsToArray',
'FindFlagByName',
'FindFlagByChar',
'FindFlagChar',
'ReadFlagString',
'CanAdminTarget',
'CreateAuthMethod',
'SetAdmGroupImmunityLevel',
'GetAdmGroupImmunityLevel',
'SetAdminImmunityLevel',
'GetAdminImmunityLevel',
'FlagToBit',
'BitToFlag',
'ServerCommand',
'ServerCommandEx',
'InsertServerCommand',
'ServerExecute',
'ClientCommand',
'FakeClientCommand',
'FakeClientCommandEx',
'PrintToServer',
'PrintToConsole',
'ReplyToCommand',
'GetCmdReplySource',
'SetCmdReplySource',
'IsChatTrigger',
'ShowActivity2',
'ShowActivity',
'ShowActivityEx',
'FormatActivitySource',
'SrvCmd',
'RegServerCmd',
'ConCmd',
'RegConsoleCmd',
'RegAdminCmd',
'GetCmdArgs',
'GetCmdArg',
'GetCmdArgString',
'CreateConVar',
'FindConVar',
'ConVarChanged',
'HookConVarChange',
'UnhookConVarChange',
'GetConVarBool',
'SetConVarBool',
'GetConVarInt',
'SetConVarInt',
'GetConVarFloat',
'SetConVarFloat',
'GetConVarString',
'SetConVarString',
'ResetConVar',
'GetConVarDefault',
'GetConVarFlags',
'SetConVarFlags',
'GetConVarBounds',
'SetConVarBounds',
'GetConVarName',
'QueryClientConVar',
'GetCommandIterator',
'ReadCommandIterator',
'CheckCommandAccess',
'CheckAccess',
'IsValidConVarChar',
'GetCommandFlags',
'SetCommandFlags',
'FindFirstConCommand',
'FindNextConCommand',
'SendConVarValue',
'AddServerTag',
'RemoveServerTag',
'CommandListener',
'AddCommandListener',
'RemoveCommandListener',
'TF2_IgnitePlayer',
'TF2_RespawnPlayer',
'TF2_RegeneratePlayer',
'TF2_AddCondition',
'TF2_RemoveCondition',
'TF2_SetPlayerPowerPlay',
'TF2_DisguisePlayer',
'TF2_RemovePlayerDisguise',
'TF2_StunPlayer',
'TF2_MakeBleed',
'TF2_GetResourceEntity',
'TF2_GetClass',
'TF2_CalcIsAttackCritical',
'TF2_OnIsHolidayActive',
'TF2_IsPlayerInDuel',
'TF2_OnConditionAdded',
'TF2_OnConditionRemoved',
'TF2_OnWaitingForPlayersStart',
'TF2_OnWaitingForPlayersEnd',
'SQL_Connect',
'SQL_DefConnect',
'SQL_ConnectCustom',
'SQLite_UseDatabase',
'SQL_CheckConfig',
'SQL_GetDriver',
'SQL_ReadDriver',
'SQL_GetDriverIdent',
'SQL_GetDriverProduct',
'SQL_GetAffectedRows',
'SQL_GetInsertId',
'SQL_GetError',
'SQL_EscapeString',
'SQL_QuoteString',
'SQL_FastQuery',
'SQL_Query',
'SQL_PrepareQuery',
'SQL_FetchMoreResults',
'SQL_HasResultSet',
'SQL_GetRowCount',
'SQL_GetFieldCount',
'SQL_FieldNumToName',
'SQL_FieldNameToNum',
'SQL_FetchRow',
'SQL_MoreRows',
'SQL_Rewind',
'SQL_FetchString',
'SQL_FetchFloat',
'SQL_FetchInt',
'SQL_IsFieldNull',
'SQL_FetchSize',
'SQL_BindParamInt',
'SQL_BindParamFloat',
'SQL_BindParamString',
'SQL_Execute',
'SQL_LockDatabase',
'SQL_UnlockDatabase',
'SQLTCallback',
'SQL_IsSameConnection',
'SQL_TConnect',
'SQL_TQuery',
'CloseHandle',
'CloneHandle',
'MenuHandler',
'CreateMenu',
'DisplayMenu',
'DisplayMenuAtItem',
'AddMenuItem',
'InsertMenuItem',
'RemoveMenuItem',
'RemoveAllMenuItems',
'GetMenuItem',
'GetMenuSelectionPosition',
'GetMenuItemCount',
'SetMenuPagination',
'GetMenuPagination',
'GetMenuStyle',
'SetMenuTitle',
'GetMenuTitle',
'CreatePanelFromMenu',
'GetMenuExitButton',
'SetMenuExitButton',
'GetMenuExitBackButton',
'SetMenuExitBackButton',
'SetMenuNoVoteButton',
'CancelMenu',
'GetMenuOptionFlags',
'SetMenuOptionFlags',
'IsVoteInProgress',
'CancelVote',
'VoteMenu',
'VoteMenuToAll',
'VoteHandler',
'SetVoteResultCallback',
'CheckVoteDelay',
'IsClientInVotePool',
'RedrawClientVoteMenu',
'GetMenuStyleHandle',
'CreatePanel',
'CreateMenuEx',
'GetClientMenu',
'CancelClientMenu',
'GetMaxPageItems',
'GetPanelStyle',
'SetPanelTitle',
'DrawPanelItem',
'DrawPanelText',
'CanPanelDrawFlags',
'SetPanelKeys',
'SendPanelToClient',
'GetPanelTextRemaining',
'GetPanelCurrentKey',
'SetPanelCurrentKey',
'RedrawMenuItem',
'InternalShowMenu',
'GetMenuVoteInfo',
'IsNewVoteAllowed',
'PrefetchSound',
'EmitAmbientSound',
'FadeClientVolume',
'StopSound',
'EmitSound',
'EmitSentence',
'GetDistGainFromSoundLevel',
'AmbientSHook',
'NormalSHook',
'AddAmbientSoundHook',
'AddNormalSoundHook',
'RemoveAmbientSoundHook',
'RemoveNormalSoundHook',
'EmitSoundToClient',
'EmitSoundToAll',
'ATTN_TO_SNDLEVEL',
'strlen',
'StrContains',
'strcmp',
'strncmp',
'StrEqual',
'strcopy',
'Format',
'FormatEx',
'VFormat',
'StringToInt',
'StringToIntEx',
'IntToString',
'StringToFloat',
'StringToFloatEx',
'FloatToString',
'BreakString',
'TrimString',
'SplitString',
'ReplaceString',
'ReplaceStringEx',
'GetCharBytes',
'IsCharAlpha',
'IsCharNumeric',
'IsCharSpace',
'IsCharMB',
'IsCharUpper',
'IsCharLower',
'StripQuotes',
'CharToUpper',
'CharToLower',
'FindCharInString',
'StrCat',
'ExplodeString',
'ImplodeStrings',
'GetVectorLength',
'GetVectorDistance',
'GetVectorDotProduct',
'GetVectorCrossProduct',
'NormalizeVector',
'GetAngleVectors',
'GetVectorAngles',
'GetVectorVectors',
'AddVectors',
'SubtractVectors',
'ScaleVector',
'NegateVector',
'MakeVectorFromPoints',
'BaseComm_IsClientGagged',
'BaseComm_IsClientMuted',
'BaseComm_SetClientGag',
'BaseComm_SetClientMute',
'FormatUserLogText',
'FindPluginByFile',
'FindTarget',
'AcceptEntityInput',
'SetVariantBool',
'SetVariantString',
'SetVariantInt',
'SetVariantFloat',
'SetVariantVector3D',
'SetVariantPosVector3D',
'SetVariantColor',
'SetVariantEntity',
'GameRules_GetProp',
'GameRules_SetProp',
'GameRules_GetPropFloat',
'GameRules_SetPropFloat',
'GameRules_GetPropEnt',
'GameRules_SetPropEnt',
'GameRules_GetPropVector',
'GameRules_SetPropVector',
'GameRules_GetPropString',
'GameRules_SetPropString',
'GameRules_GetRoundState',
'OnClientConnect',
'OnClientConnected',
'OnClientPutInServer',
'OnClientDisconnect',
'OnClientDisconnect_Post',
'OnClientCommand',
'OnClientSettingsChanged',
'OnClientAuthorized',
'OnClientPreAdminCheck',
'OnClientPostAdminFilter',
'OnClientPostAdminCheck',
'GetMaxClients',
'GetClientCount',
'GetClientName',
'GetClientIP',
'GetClientAuthString',
'GetClientUserId',
'IsClientConnected',
'IsClientInGame',
'IsClientInKickQueue',
'IsClientAuthorized',
'IsFakeClient',
'IsClientSourceTV',
'IsClientReplay',
'IsClientObserver',
'IsPlayerAlive',
'GetClientInfo',
'GetClientTeam',
'SetUserAdmin',
'GetUserAdmin',
'AddUserFlags',
'RemoveUserFlags',
'SetUserFlagBits',
'GetUserFlagBits',
'CanUserTarget',
'RunAdminCacheChecks',
'NotifyPostAdminCheck',
'CreateFakeClient',
'SetFakeClientConVar',
'GetClientHealth',
'GetClientModel',
'GetClientWeapon',
'GetClientMaxs',
'GetClientMins',
'GetClientAbsAngles',
'GetClientAbsOrigin',
'GetClientArmor',
'GetClientDeaths',
'GetClientFrags',
'GetClientDataRate',
'IsClientTimingOut',
'GetClientTime',
'GetClientLatency',
'GetClientAvgLatency',
'GetClientAvgLoss',
'GetClientAvgChoke',
'GetClientAvgData',
'GetClientAvgPackets',
'GetClientOfUserId',
'KickClient',
'KickClientEx',
'ChangeClientTeam',
'GetClientSerial',
'GetClientFromSerial',
'FindStringTable',
'GetNumStringTables',
'GetStringTableNumStrings',
'GetStringTableMaxStrings',
'GetStringTableName',
'FindStringIndex',
'ReadStringTable',
'GetStringTableDataLength',
'GetStringTableData',
'SetStringTableData',
'AddToStringTable',
'LockStringTables',
'AddFileToDownloadsTable',
'GetEntityFlags',
'SetEntityFlags',
'GetEntityMoveType',
'SetEntityMoveType',
'GetEntityRenderMode',
'SetEntityRenderMode',
'GetEntityRenderFx',
'SetEntityRenderFx',
'SetEntityRenderColor',
'GetEntityGravity',
'SetEntityGravity',
'SetEntityHealth',
'GetClientButtons',
'EntityOutput',
'HookEntityOutput',
'UnhookEntityOutput',
'HookSingleEntityOutput',
'UnhookSingleEntityOutput',
'SMC_CreateParser',
'SMC_ParseFile',
'SMC_GetErrorString',
'SMC_ParseStart',
'SMC_SetParseStart',
'SMC_ParseEnd',
'SMC_SetParseEnd',
'SMC_NewSection',
'SMC_KeyValue',
'SMC_EndSection',
'SMC_SetReaders',
'SMC_RawLine',
'SMC_SetRawLine',
'BfWriteBool',
'BfWriteByte',
'BfWriteChar',
'BfWriteShort',
'BfWriteWord',
'BfWriteNum',
'BfWriteFloat',
'BfWriteString',
'BfWriteEntity',
'BfWriteAngle',
'BfWriteCoord',
'BfWriteVecCoord',
'BfWriteVecNormal',
'BfWriteAngles',
'BfReadBool',
'BfReadByte',
'BfReadChar',
'BfReadShort',
'BfReadWord',
'BfReadNum',
'BfReadFloat',
'BfReadString',
'BfReadEntity',
'BfReadAngle',
'BfReadCoord',
'BfReadVecCoord',
'BfReadVecNormal',
'BfReadAngles',
'BfGetNumBytesLeft',
'CreateProfiler',
'StartProfiling',
'StopProfiling',
'GetProfilerTime',
'OnPluginStart',
'AskPluginLoad2',
'OnPluginEnd',
'OnPluginPauseChange',
'OnGameFrame',
'OnMapStart',
'OnMapEnd',
'OnConfigsExecuted',
'OnAutoConfigsBuffered',
'OnAllPluginsLoaded',
'GetMyHandle',
'GetPluginIterator',
'MorePlugins',
'ReadPlugin',
'GetPluginStatus',
'GetPluginFilename',
'IsPluginDebugging',
'GetPluginInfo',
'FindPluginByNumber',
'SetFailState',
'ThrowError',
'GetTime',
'FormatTime',
'LoadGameConfigFile',
'GameConfGetOffset',
'GameConfGetKeyValue',
'GetSysTickCount',
'AutoExecConfig',
'RegPluginLibrary',
'LibraryExists',
'GetExtensionFileStatus',
'OnLibraryAdded',
'OnLibraryRemoved',
'ReadMapList',
'SetMapListCompatBind',
'OnClientFloodCheck',
'OnClientFloodResult',
'CanTestFeatures',
'GetFeatureStatus',
'RequireFeature',
'LoadFromAddress',
'StoreToAddress',
'CreateStack',
'PushStackCell',
'PushStackString',
'PushStackArray',
'PopStackCell',
'PopStackString',
'PopStackArray',
'IsStackEmpty',
'PopStack',
'OnPlayerRunCmd',
'BuildPath',
'OpenDirectory',
'ReadDirEntry',
'OpenFile',
'DeleteFile',
'ReadFileLine',
'ReadFile',
'ReadFileString',
'WriteFile',
'WriteFileString',
'WriteFileLine',
'ReadFileCell',
'WriteFileCell',
'IsEndOfFile',
'FileSeek',
'FilePosition',
'FileExists',
'RenameFile',
'DirExists',
'FileSize',
'FlushFile',
'RemoveDir',
'CreateDirectory',
'GetFileTime',
'LogToOpenFile',
'LogToOpenFileEx',
'SetNextMap',
'GetNextMap',
'ForceChangeLevel',
'GetMapHistorySize',
'GetMapHistory',
'GeoipCode2',
'GeoipCode3',
'GeoipCountry',
'MarkNativeAsOptional',
'RegClientCookie',
'FindClientCookie',
'SetClientCookie',
'GetClientCookie',
'SetAuthIdCookie',
'AreClientCookiesCached',
'OnClientCookiesCached',
'CookieMenuHandler',
'SetCookiePrefabMenu',
'SetCookieMenuItem',
'ShowCookieMenu',
'GetCookieIterator',
'ReadCookieIterator',
'GetCookieAccess',
'GetClientCookieTime',
'LoadTranslations',
'SetGlobalTransTarget',
'GetClientLanguage',
'GetServerLanguage',
'GetLanguageCount',
'GetLanguageInfo',
'SetClientLanguage',
'GetLanguageByCode',
'GetLanguageByName',
'CS_OnBuyCommand',
'CS_OnCSWeaponDrop',
'CS_OnGetWeaponPrice',
'CS_OnTerminateRound',
'CS_RespawnPlayer',
'CS_SwitchTeam',
'CS_DropWeapon',
'CS_TerminateRound',
'CS_GetTranslatedWeaponAlias',
'CS_GetWeaponPrice',
'CS_GetClientClanTag',
'CS_SetClientClanTag',
'LogToGame',
'SetRandomSeed',
'GetRandomFloat',
'GetRandomInt',
'IsMapValid',
'IsDedicatedServer',
'GetEngineTime',
'GetGameTime',
'GetGameTickCount',
'GetGameDescription',
'GetGameFolderName',
'GetCurrentMap',
'PrecacheModel',
'PrecacheSentenceFile',
'PrecacheDecal',
'PrecacheGeneric',
'IsModelPrecached',
'IsDecalPrecached',
'IsGenericPrecached',
'PrecacheSound',
'IsSoundPrecached',
'CreateDialog',
'GuessSDKVersion',
'PrintToChat',
'PrintToChatAll',
'PrintCenterText',
'PrintCenterTextAll',
'PrintHintText',
'PrintHintTextToAll',
'ShowVGUIPanel',
'CreateHudSynchronizer',
'SetHudTextParams',
'SetHudTextParamsEx',
'ShowSyncHudText',
'ClearSyncHud',
'ShowHudText',
'ShowMOTDPanel',
'DisplayAskConnectBox',
'EntIndexToEntRef',
'EntRefToEntIndex',
'MakeCompatEntRef',
'SetClientViewEntity',
'SetLightStyle',
'GetClientEyePosition',
'CreateDataPack',
'WritePackCell',
'WritePackFloat',
'WritePackString',
'ReadPackCell',
'ReadPackFloat',
'ReadPackString',
'ResetPack',
'GetPackPosition',
'SetPackPosition',
'IsPackReadable',
'LogMessage',
'LogMessageEx',
'LogToFile',
'LogToFileEx',
'LogAction',
'LogError',
'OnLogAction',
'GameLogHook',
'AddGameLogHook',
'RemoveGameLogHook',
'FindTeamByName',
'StartPrepSDKCall',
'PrepSDKCall_SetVirtual',
'PrepSDKCall_SetSignature',
'PrepSDKCall_SetFromConf',
'PrepSDKCall_SetReturnInfo',
'PrepSDKCall_AddParameter',
'EndPrepSDKCall',
'SDKCall']
if __name__ == '__main__':
import pprint
import re
import sys
import urllib
# urllib ends up wanting to import a module called 'math' -- if
# pygments/lexers is in the path, this ends badly.
for i in range(len(sys.path)-1, -1, -1):
if sys.path[i].endswith('/lexers'):
del sys.path[i]
def get_version():
f = urllib.urlopen('http://docs.sourcemod.net/api/index.php')
r = re.compile(r'SourceMod v\.<b>([\d\.]+)</td>')
for line in f:
m = r.search(line)
if m is not None:
return m.groups()[0]
def get_sm_functions():
f = urllib.urlopen('http://docs.sourcemod.net/api/SMfuncs.js')
r = re.compile(r'SMfunctions\[\d+\] = Array \("(?:public )?([^,]+)",".+"\);')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def regenerate(filename, natives):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('FUNCTIONS = [')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('FUNCTIONS = %s\n\n' % pprint.pformat(natives))
f.write(footer)
f.close()
def run():
version = get_version()
print '> Downloading function index for SourceMod %s' % version
functions = get_sm_functions()
print '> %d functions found:' % len(functions)
functionlist = []
for full_function_name in functions:
print '>> %s' % full_function_name
functionlist.append(full_function_name)
regenerate(__file__, functionlist)
run() | unknown | codeparrot/codeparrot-clean | ||
"""
Apple Quicktime Movie (file extension ".mov") parser.
Documents:
- Parsing and Writing QuickTime Files in Java (by Chris Adamson, 02/19/2003)
http://www.onjava.com/pub/a/onjava/2003/02/19/qt_file_format.html
- QuickTime File Format (official technical reference)
http://developer.apple.com/documentation/QuickTime/QTFF/qtff.pdf
- Apple QuickTime:
http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
- File type (ftyp):
http://www.ftyps.com/
- MPEG4 standard
http://neuron2.net/library/avc/c041828_ISO_IEC_14496-12_2005%28E%29.pdf
Author: Victor Stinner, Robert Xiao
Creation: 2 august 2006
"""
from hachoir_parser import Parser
from hachoir_parser.common.win32 import GUID
from hachoir_core.field import (ParserError, FieldSet, MissingField,
Enum,
Bit, NullBits, Bits, UInt8, Int16, UInt16, Int32, UInt32, Int64, UInt64, TimestampMac32,
String, PascalString8, PascalString16, CString,
RawBytes, NullBytes, PaddingBytes)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.tools import MAC_TIMESTAMP_T0, timedelta
def timestampMac64(value):
if not isinstance(value, (float, int, long)):
raise TypeError("an integer or float is required")
return MAC_TIMESTAMP_T0 + timedelta(seconds=value)
from hachoir_core.field.timestamp import timestampFactory
TimestampMac64 = timestampFactory("TimestampMac64", timestampMac64, 64)
def fixedFloatFactory(name, int_bits, float_bits, doc):
size = int_bits + float_bits
class Float(FieldSet):
static_size = size
__doc__ = doc
def createFields(self):
yield Bits(self, "int_part", int_bits)
yield Bits(self, "float_part", float_bits)
def createValue(self):
return self["int_part"].value + float(self["float_part"].value) / (1<<float_bits)
klass = Float
klass.__name__ = name
return klass
QTFloat16 = fixedFloatFactory("QTFloat32", 8, 8, "8.8 fixed point number")
QTFloat32 = fixedFloatFactory("QTFloat32", 16, 16, "16.16 fixed point number")
QTFloat2_30 = fixedFloatFactory("QTFloat2_30", 2, 30, "2.30 fixed point number")
class AtomList(FieldSet):
def createFields(self):
while not self.eof:
yield Atom(self, "atom[]")
class TrackHeader(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version (0 or 1)")
yield NullBits(self, "flags", 20)
yield Bit(self, "is_in_poster")
yield Bit(self, "is_in_preview", "Is this track used when previewing the presentation?")
yield Bit(self, "is_in_movie", "Is this track used in the presentation?")
yield Bit(self, "is_enabled", "Is this track enabled?")
if self['version'].value == 0:
# 32-bit version
yield TimestampMac32(self, "creation_date", "Creation time of this track")
yield TimestampMac32(self, "lastmod_date", "Last modification time of this track")
yield UInt32(self, "track_id", "Unique nonzero identifier of this track within the presentation")
yield NullBytes(self, "reserved[]", 4)
yield UInt32(self, "duration", "Length of track, in movie time-units")
elif self['version'].value == 1:
# 64-bit version
yield TimestampMac64(self, "creation_date", "Creation time of this track")
yield TimestampMac64(self, "lastmod_date", "Last modification time of this track")
yield UInt32(self, "track_id", "Unique nonzero identifier of this track within the presentation")
yield NullBytes(self, "reserved[]", 4)
yield UInt64(self, "duration", "Length of track, in movie time-units")
yield NullBytes(self, "reserved[]", 8)
yield Int16(self, "video_layer", "Middle layer is 0; lower numbers are closer to the viewer")
yield Int16(self, "alternate_group", "Group ID that this track belongs to (0=no group)")
yield QTFloat16(self, "volume", "Track relative audio volume (1.0 = full)")
yield NullBytes(self, "reserved[]", 2)
yield QTFloat32(self, "geom_a", "Width scale")
yield QTFloat32(self, "geom_b", "Width rotate")
yield QTFloat2_30(self, "geom_u", "Width angle")
yield QTFloat32(self, "geom_c", "Height rotate")
yield QTFloat32(self, "geom_d", "Height scale")
yield QTFloat2_30(self, "geom_v", "Height angle")
yield QTFloat32(self, "geom_x", "Position X")
yield QTFloat32(self, "geom_y", "Position Y")
yield QTFloat2_30(self, "geom_w", "Divider scale")
yield QTFloat32(self, "frame_size_width")
yield QTFloat32(self, "frame_size_height")
class TrackReferenceType(FieldSet):
def createFields(self):
while not self.eof:
yield UInt32(self, "track_id[]", "Referenced track ID")
class Handler(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version")
yield NullBits(self, "flags", 24)
yield String(self, "creator", 4)
yield String(self, "subtype", 4)
yield String(self, "manufacturer", 4)
yield UInt32(self, "res_flags")
yield UInt32(self, "res_flags_mask")
if self.root.is_mpeg4:
yield CString(self, "name", charset="UTF-8")
else:
yield PascalString8(self, "name")
class LanguageCode(FieldSet):
static_size = 16
MAC_LANG = {
0: 'English',
1: 'French',
2: 'German',
3: 'Italian',
4: 'Dutch',
5: 'Swedish',
6: 'Spanish',
7: 'Danish',
8: 'Portuguese',
9: 'Norwegian',
10: 'Hebrew',
11: 'Japanese',
12: 'Arabic',
13: 'Finnish',
14: 'Greek',
15: 'Icelandic',
16: 'Maltese',
17: 'Turkish',
18: 'Croatian',
19: 'Traditional Chinese',
20: 'Urdu',
21: 'Hindi',
22: 'Thai',
23: 'Korean',
24: 'Lithuanian',
25: 'Polish',
26: 'Hungarian',
27: 'Estonian',
28: 'Latvian',
28: 'Lettish',
29: 'Lappish',
29: 'Saamisk',
30: 'Faeroese',
31: 'Farsi',
32: 'Russian',
33: 'Simplified Chinese',
34: 'Flemish',
35: 'Irish',
36: 'Albanian',
37: 'Romanian',
38: 'Czech',
39: 'Slovak',
40: 'Slovenian',
41: 'Yiddish',
42: 'Serbian',
43: 'Macedonian',
44: 'Bulgarian',
45: 'Ukrainian',
46: 'Byelorussian',
47: 'Uzbek',
48: 'Kazakh',
49: 'Azerbaijani',
50: 'AzerbaijanAr',
51: 'Armenian',
52: 'Georgian',
53: 'Moldavian',
54: 'Kirghiz',
55: 'Tajiki',
56: 'Turkmen',
57: 'Mongolian',
58: 'MongolianCyr',
59: 'Pashto',
60: 'Kurdish',
61: 'Kashmiri',
62: 'Sindhi',
63: 'Tibetan',
64: 'Nepali',
65: 'Sanskrit',
66: 'Marathi',
67: 'Bengali',
68: 'Assamese',
69: 'Gujarati',
70: 'Punjabi',
71: 'Oriya',
72: 'Malayalam',
73: 'Kannada',
74: 'Tamil',
75: 'Telugu',
76: 'Sinhalese',
77: 'Burmese',
78: 'Khmer',
79: 'Lao',
80: 'Vietnamese',
81: 'Indonesian',
82: 'Tagalog',
83: 'MalayRoman',
84: 'MalayArabic',
85: 'Amharic',
86: 'Tigrinya',
88: 'Somali',
89: 'Swahili',
90: 'Ruanda',
91: 'Rundi',
92: 'Chewa',
93: 'Malagasy',
94: 'Esperanto',
128: 'Welsh',
129: 'Basque',
130: 'Catalan',
131: 'Latin',
132: 'Quechua',
133: 'Guarani',
134: 'Aymara',
135: 'Tatar',
136: 'Uighur',
137: 'Dzongkha',
138: 'JavaneseRom',
}
def fieldHandler(self, field):
if field.value == 0:
return ' '
return chr(field.value + 0x60)
def createFields(self):
value = self.stream.readBits(self.absolute_address, 16, self.endian)
if value < 1024:
yield Enum(UInt16(self, "lang"), self.MAC_LANG)
else:
yield NullBits(self, "padding[]", 1)
yield textHandler(Bits(self, "lang[0]", 5), self.fieldHandler)
yield textHandler(Bits(self, "lang[1]", 5), self.fieldHandler)
yield textHandler(Bits(self, "lang[2]", 5), self.fieldHandler)
def createValue(self):
if 'lang' in self:
return self['lang'].display
return self['lang[0]'].display + self['lang[1]'].display + self['lang[2]'].display
class MediaHeader(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version (0 or 1)")
yield NullBits(self, "flags", 24)
if self['version'].value == 0:
# 32-bit version
yield TimestampMac32(self, "creation_date", "Creation time of this media")
yield TimestampMac32(self, "lastmod_date", "Last modification time of this media")
yield UInt32(self, "time_scale", "Number of time-units per second")
yield UInt32(self, "duration", "Length of media, in time-units")
elif self['version'].value == 1:
# 64-bit version
yield TimestampMac64(self, "creation_date", "Creation time of this media")
yield TimestampMac64(self, "lastmod_date", "Last modification time of this media")
yield UInt32(self, "time_scale", "Number of time-units per second")
yield UInt64(self, "duration", "Length of media, in time-units")
yield LanguageCode(self, "language")
yield Int16(self, "quality")
class VideoMediaHeader(FieldSet):
GRAPHICSMODE = {
0: ('Copy', "Copy the source image over the destination"),
0x20: ('Blend', "Blend of source and destination; blending factor is controlled by op color"),
0x24: ('Transparent', "Replace destination pixel with source pixel if the source pixel is not the op color"),
0x40: ('Dither copy', "Dither image if necessary, else copy"),
0x100: ('Straight alpha', "Blend of source and destination; blending factor is controlled by alpha channel"),
0x101: ('Premul white alpha', "Remove white from each pixel and blend"),
0x102: ('Premul black alpha', "Remove black from each pixel and blend"),
0x103: ('Composition', "Track drawn offscreen and dither copied onto screen"),
0x104: ('Straight alpha blend', "Blend of source and destination; blending factor is controlled by combining alpha channel and op color")
}
def graphicsDisplay(self, field):
if field.value in self.GRAPHICSMODE:
return self.GRAPHICSMODE[field.value][0]
return hex(field.value)
def graphicsDescription(self, field):
if field.value in self.GRAPHICSMODE:
return self.GRAPHICSMODE[field.value][1]
return ""
def createFields(self):
yield UInt8(self, "version", "Version")
yield Bits(self, "flags", 24, "Flags (=1)")
graphics = UInt16(self, "graphicsmode")
graphics.createDisplay = lambda:self.graphicsDisplay(graphics)
graphics.createDescription = lambda:self.graphicsDescription(graphics)
yield graphics
yield UInt16(self, "op_red", "Red value for graphics mode")
yield UInt16(self, "op_green", "Green value for graphics mode")
yield UInt16(self, "op_blue", "Blue value for graphics mode")
class SoundMediaHeader(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version")
yield NullBits(self, "flags", 24)
yield QTFloat16(self, "balance")
yield UInt16(self, "reserved[]")
class HintMediaHeader(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version")
yield NullBits(self, "flags", 24)
yield UInt16(self, "max_pdu_size")
yield UInt16(self, "avg_pdu_size")
yield UInt32(self, "max_bit_rate")
yield UInt32(self, "avg_bit_rate")
yield UInt32(self, "reserved[]")
class DataEntryUrl(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version")
yield NullBits(self, "flags", 23)
yield Bit(self, "is_same_file", "Is the reference to this file?")
if not self['is_same_file'].value:
yield CString(self, "location")
class DataEntryUrn(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version")
yield NullBits(self, "flags", 23)
yield Bit(self, "is_same_file", "Is the reference to this file?")
if not self['is_same_file'].value:
yield CString(self, "name")
yield CString(self, "location")
class DataReference(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count")
for i in xrange(self['count'].value):
yield Atom(self, "atom[]")
class EditList(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version (0 or 1)")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count")
version = self['version'].value
if version == 0:
UInt, Int = UInt32, Int32
elif version == 1:
UInt, Int = UInt64, Int64
else:
raise ParserError("elst version %d not supported"%version)
for i in xrange(self['count'].value):
yield UInt(self, "duration[]", "Duration of this edit segment")
yield Int(self, "time[]", "Starting time of this edit segment within the media (-1 = empty edit)")
yield QTFloat32(self, "play_speed[]", "Playback rate (0 = dwell edit, 1 = normal playback)")
class Load(FieldSet):
def createFields(self):
yield UInt32(self, "start")
yield UInt32(self, "length")
yield UInt32(self, "flags") # PreloadAlways = 1 or TrackEnabledPreload = 2
yield UInt32(self, "hints") # KeepInBuffer = 0x00000004; HighQuality = 0x00000100; SingleFieldVideo = 0x00100000
class MovieHeader(FieldSet):
def createFields(self):
yield UInt8(self, "version", "Version (0 or 1)")
yield NullBits(self, "flags", 24)
if self['version'].value == 0:
# 32-bit version
yield TimestampMac32(self, "creation_date", "Creation time of this presentation")
yield TimestampMac32(self, "lastmod_date", "Last modification time of this presentation")
yield UInt32(self, "time_scale", "Number of time-units per second")
yield UInt32(self, "duration", "Length of presentation, in time-units")
elif self['version'].value == 1:
# 64-bit version
yield TimestampMac64(self, "creation_date", "Creation time of this presentation")
yield TimestampMac64(self, "lastmod_date", "Last modification time of this presentation")
yield UInt32(self, "time_scale", "Number of time-units per second")
yield UInt64(self, "duration", "Length of presentation, in time-units")
yield QTFloat32(self, "play_speed", "Preferred playback speed (1.0 = normal)")
yield QTFloat16(self, "volume", "Preferred playback volume (1.0 = full)")
yield NullBytes(self, "reserved[]", 10)
yield QTFloat32(self, "geom_a", "Width scale")
yield QTFloat32(self, "geom_b", "Width rotate")
yield QTFloat2_30(self, "geom_u", "Width angle")
yield QTFloat32(self, "geom_c", "Height rotate")
yield QTFloat32(self, "geom_d", "Height scale")
yield QTFloat2_30(self, "geom_v", "Height angle")
yield QTFloat32(self, "geom_x", "Position X")
yield QTFloat32(self, "geom_y", "Position Y")
yield QTFloat2_30(self, "geom_w", "Divider scale")
yield UInt32(self, "preview_start")
yield UInt32(self, "preview_length")
yield UInt32(self, "still_poster")
yield UInt32(self, "sel_start")
yield UInt32(self, "sel_length")
yield UInt32(self, "current_time")
yield UInt32(self, "next_track_ID", "Value to use as the track ID for the next track added")
class FileType(FieldSet):
def createFields(self):
yield String(self, "brand", 4, "Major brand")
yield UInt32(self, "version", "Version")
while not self.eof:
yield String(self, "compat_brand[]", 4, "Compatible brand")
def findHandler(self):
''' find the handler corresponding to this fieldset '''
while self:
if self.name in ('media', 'tags'):
break
self = self.parent
else:
return None
for atom in self:
if atom['tag'].value == 'hdlr':
return atom['hdlr']
return None
class METATAG(FieldSet):
def createFields(self):
yield UInt8(self, "unk[]", "0x80 or 0x00")
yield PascalString16(self, "tag_name", charset='UTF-8')
yield UInt16(self, "unk[]", "0x0001")
yield UInt16(self, "unk[]", "0x0000")
yield PascalString16(self, "tag_value", charset='UTF-8')
class META(FieldSet):
def createFields(self):
# This tag has too many variant forms.
if '/tags/' in self.path:
yield UInt32(self, "count")
for i in xrange(self['count'].value):
yield METATAG(self, "tag[]")
elif self.stream.readBits(self.absolute_address, 32, self.endian) == 0:
yield UInt8(self, "version")
yield Bits(self, "flags", 24)
yield AtomList(self, "tags")
else:
yield AtomList(self, "tags")
class Item(FieldSet):
def createFields(self):
yield UInt32(self, "size")
yield UInt32(self, "index")
yield Atom(self, "value")
class KeyList(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count")
for i in xrange(self['count'].value):
yield Atom(self, "key[]")
class ItemList(FieldSet):
def createFields(self):
handler = findHandler(self)
if handler is None:
raise ParserError("ilst couldn't find metadata handler")
if handler['subtype'].value == 'mdir':
while not self.eof:
yield Atom(self, "atom[]")
elif handler['subtype'].value == 'mdta':
while not self.eof:
yield Item(self, "item[]")
class NeroChapters(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "unknown")
yield UInt8(self, "count", description="Number of chapters")
for i in xrange(self['count'].value):
yield UInt64(self, "chapter_start[]")
yield PascalString8(self, "chapter_name[]", charset='UTF-8')
class SampleDecodeTimeTable(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count", description="Total entries in sample time table")
for i in xrange(self['count'].value):
yield UInt32(self, "sample_count[]", "Number of consecutive samples with this delta")
yield UInt32(self, "sample_delta[]", "Decode time delta since last sample, in time-units")
class SampleCompositionTimeTable(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count", description="Total entries in sample time table")
for i in xrange(self['count'].value):
yield UInt32(self, "sample_count[]", "Number of consecutive samples with this offset")
yield UInt32(self, "sample_offset[]", "Difference between decode time and composition time of this sample, in time-units")
class ChunkOffsetTable(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count", description="Total entries in offset table")
for i in xrange(self['count'].value):
yield UInt32(self, "chunk_offset[]")
class ChunkOffsetTable64(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count", description="Total entries in offset table")
for i in xrange(self['count'].value):
yield UInt64(self, "chunk_offset[]")
class SampleEntry(FieldSet):
def createFields(self):
yield UInt32(self, "size")
yield RawBytes(self, "format", 4, "Data Format (codec)")
yield NullBytes(self, "reserved[]", 6, "Reserved")
yield UInt16(self, "data_reference_index")
handler = findHandler(self)
if not handler:
raise ParserError("stsd couldn't find track handler")
if handler['subtype'].value == 'soun':
# Audio sample entry
yield NullBytes(self, "reserved[]", 8)
yield UInt16(self, "channels", "Number of audio channels")
yield UInt16(self, "samplesize", "Sample size in bits")
yield UInt16(self, "unknown")
yield NullBytes(self, "reserved[]", 2)
yield QTFloat32(self, "samplerate", "Sample rate in Hz")
elif handler['subtype'].value == 'vide':
# Video sample entry
yield UInt16(self, "version")
yield UInt16(self, "revision_level")
yield RawBytes(self, "vendor_id", 4)
yield UInt32(self, "temporal_quality")
yield UInt32(self, "spatial_quality")
yield UInt16(self, "width", "Width (pixels)")
yield UInt16(self, "height", "Height (pixels)")
yield QTFloat32(self, "horizontal_resolution", "Horizontal resolution in DPI")
yield QTFloat32(self, "vertical resolution", "Vertical resolution in DPI")
yield UInt32(self, "data_size")
yield UInt16(self, "frame_count")
yield UInt8(self, "compressor_name_length")
yield String(self, "compressor_name", 31, strip='\0')
yield UInt16(self, "depth", "Bit depth of image")
yield Int16(self, "unknown")
elif handler['subtype'].value == 'hint':
# Hint sample entry
pass
size = self['size'].value - self.current_size//8
if size > 0:
yield RawBytes(self, "extra_data", size)
class SampleDescription(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count", description="Total entries in table")
for i in xrange(self['count'].value):
yield SampleEntry(self, "sample_entry[]")
class SyncSampleTable(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count", description="Number of sync samples")
for i in xrange(self['count'].value):
yield UInt32(self, "sample_number[]")
class SampleSizeTable(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "uniform_size", description="Uniform size of each sample (0 if non-uniform)")
yield UInt32(self, "count", description="Number of samples")
if self['uniform_size'].value == 0:
for i in xrange(self['count'].value):
yield UInt32(self, "sample_size[]")
class CompactSampleSizeTable(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield NullBits(self, "reserved[]", 24)
yield UInt8(self, "field_size", "Size of each entry in this table, in bits")
yield UInt32(self, "count", description="Number of samples")
bitsize = self['field_size'].value
for i in xrange(self['count'].value):
yield Bits(self, "sample_size[]", bitsize)
if self.current_size % 8 != 0:
yield NullBits(self, "padding[]", 8 - (self.current_size % 8))
class SampleToChunkTable(FieldSet):
def createFields(self):
yield UInt8(self, "version")
yield NullBits(self, "flags", 24)
yield UInt32(self, "count", description="Number of samples")
for i in xrange(self['count'].value):
yield UInt32(self, "first_chunk[]")
yield UInt32(self, "samples_per_chunk[]")
yield UInt32(self, "sample_description_index[]")
class Atom(FieldSet):
tag_info = {
"ftyp": (FileType, "file_type", "File type and compatibility"),
# pdin: progressive download information
# pnot: movie preview (old QT spec)
"moov": (AtomList, "movie", "Container for all metadata"),
"mvhd": (MovieHeader, "movie_hdr", "Movie header, overall declarations"),
# clip: movie clipping (old QT spec)
# crgn: movie clipping region (old QT spec)
"trak": (AtomList, "track", "Container for an individual track or stream"),
"tkhd": (TrackHeader, "track_hdr", "Track header, overall information about the track"),
# matt: track matte (old QT spec)
# kmat: compressed matte (old QT spec)
"tref": (AtomList, "tref", "Track reference container"),
"hint": (TrackReferenceType, "hint", "Original media track(s) for this hint track"),
"cdsc": (TrackReferenceType, "cdsc", "Reference to track described by this track"),
"edts": (AtomList, "edts", "Edit list container"),
"elst": (EditList, "elst", "Edit list"),
"load": (Load, "load", "Track loading settings (old QT spec)"),
# imap: Track input map (old QT spec)
"mdia": (AtomList, "media", "Container for the media information in a track"),
"mdhd": (MediaHeader, "media_hdr", "Media header, overall information about the media"),
"hdlr": (Handler, "hdlr", "Handler, declares the media or metadata (handler) type"),
"minf": (AtomList, "minf", "Media information container"),
"vmhd": (VideoMediaHeader, "vmhd", "Video media header, overall information (video track only)"),
"smhd": (SoundMediaHeader, "smhd", "Sound media header, overall information (sound track only)"),
"hmhd": (HintMediaHeader, "hmhd", "Hint media header, overall information (hint track only)"),
# nmhd: Null media header, overall information (some tracks only) (unparsed)
"dinf": (AtomList, "dinf", "Data information, container"),
"dref": (DataReference, "dref", "Data reference, declares source(s) of media data in track"),
"url ": (DataEntryUrl, "url", "URL data reference"),
"urn ": (DataEntryUrn, "urn", "URN data reference"),
"stbl": (AtomList, "stbl", "Sample table, container for the time/space map"),
"stsd": (SampleDescription, "stsd", "Sample descriptions (codec types, initialization etc.)"),
"stts": (SampleDecodeTimeTable, "stts", "decoding time-to-sample delta table"),
"ctts": (SampleCompositionTimeTable, "ctts", "composition time-to-sample offset table"),
"stsc": (SampleToChunkTable, "stsc", "sample-to-chunk, partial data-offset information"),
"stsz": (SampleSizeTable, "stsz", "Sample size table (framing)"),
"stz2": (CompactSampleSizeTable, "stz2", "Compact sample size table (framing)"),
"stco": (ChunkOffsetTable, "stco", "Chunk offset, partial data-offset information"),
"co64": (ChunkOffsetTable64, "co64", "64-bit chunk offset"),
"stss": (SyncSampleTable, "stss", "Sync sample table (random access points)"),
# stsh: shadow sync sample table
# padb: sample padding bits
# stdp: sample degradation priority
# sdtp: independent and disposable samples
# sbgp: sample-to-group
# sgpd: sample group description
# subs: sub-sample information
# ctab color table (old QT spec)
# mvex: movie extends
# mehd: movie extends header
# trex: track extends defaults
# ipmc: IPMP control
# moof: movie fragment
# mfhd: movie fragment header
# traf: track fragment
# tfhd: track fragment header
# trun: track fragment run
# sdtp: independent and disposable samples
# sbgp: sample-to-group
# subs: sub-sample information
# mfra: movie fragment random access
# tfra: track fragment random access
# mfro: movie fragment random access offset
# mdat: media data container
# free: free space (unparsed)
# skip: free space (unparsed)
"udta": (AtomList, "udta", "User data"),
"meta": (META, "meta", "File metadata"),
"keys": (KeyList, "keys", "Metadata keys"),
## hdlr
## dinf
## dref: data reference, declares source(s) of metadata items
## ipmc: IPMP control
# iloc: item location
# ipro: item protection
# sinf: protection scheme information
# frma: original format
# imif: IPMP information
# schm: scheme type
# schi: scheme information
# iinf: item information
# xml : XML container
# bxml: binary XML container
# pitm: primary item reference
## other tags
"ilst": (ItemList, "ilst", "Item list"),
"trkn": (AtomList, "trkn", "Metadata: Track number"),
"disk": (AtomList, "disk", "Metadata: Disk number"),
"tmpo": (AtomList, "tempo", "Metadata: Tempo"),
"cpil": (AtomList, "cpil", "Metadata: Compilation"),
"gnre": (AtomList, "gnre", "Metadata: Genre"),
"\xa9cpy": (AtomList, "copyright", "Metadata: Copyright statement"),
"\xa9day": (AtomList, "date", "Metadata: Date of content creation"),
"\xa9dir": (AtomList, "director", "Metadata: Movie director"),
"\xa9ed1": (AtomList, "edit1", "Metadata: Edit date and description (1)"),
"\xa9ed2": (AtomList, "edit2", "Metadata: Edit date and description (2)"),
"\xa9ed3": (AtomList, "edit3", "Metadata: Edit date and description (3)"),
"\xa9ed4": (AtomList, "edit4", "Metadata: Edit date and description (4)"),
"\xa9ed5": (AtomList, "edit5", "Metadata: Edit date and description (5)"),
"\xa9ed6": (AtomList, "edit6", "Metadata: Edit date and description (6)"),
"\xa9ed7": (AtomList, "edit7", "Metadata: Edit date and description (7)"),
"\xa9ed8": (AtomList, "edit8", "Metadata: Edit date and description (8)"),
"\xa9ed9": (AtomList, "edit9", "Metadata: Edit date and description (9)"),
"\xa9fmt": (AtomList, "format", "Metadata: Movie format (CGI, digitized, etc.)"),
"\xa9inf": (AtomList, "info", "Metadata: Information about the movie"),
"\xa9prd": (AtomList, "producer", "Metadata: Movie producer"),
"\xa9prf": (AtomList, "performers", "Metadata: Performer names"),
"\xa9req": (AtomList, "requirements", "Metadata: Special hardware and software requirements"),
"\xa9src": (AtomList, "source", "Metadata: Credits for those who provided movie source content"),
"\xa9nam": (AtomList, "name", "Metadata: Name of song or video"),
"\xa9des": (AtomList, "description", "Metadata: File description"),
"\xa9cmt": (AtomList, "comment", "Metadata: General comment"),
"\xa9alb": (AtomList, "album", "Metadata: Album name"),
"\xa9gen": (AtomList, "genre", "Metadata: Custom genre"),
"\xa9ART": (AtomList, "artist", "Metadata: Artist name"),
"\xa9too": (AtomList, "encoder", "Metadata: Encoder"),
"\xa9wrt": (AtomList, "writer", "Metadata: Writer"),
"covr": (AtomList, "cover", "Metadata: Cover art"),
"----": (AtomList, "misc", "Metadata: Miscellaneous"),
"tags": (AtomList, "tags", "File tags"),
"tseg": (AtomList, "tseg", "tseg"),
"chpl": (NeroChapters, "chpl", "Nero chapter data"),
}
tag_handler = [ item[0] for item in tag_info ]
tag_desc = [ item[1] for item in tag_info ]
def createFields(self):
yield UInt32(self, "size")
yield RawBytes(self, "tag", 4)
size = self["size"].value
if size == 1:
# 64-bit size
yield UInt64(self, "size64")
size = self["size64"].value - 16
elif size == 0:
# Unbounded atom
if self._size is None:
size = (self.parent.size - self.parent.current_size) / 8 - 8
else:
size = (self.size - self.current_size) / 8
else:
size = size - 8
if self['tag'].value == 'uuid':
yield GUID(self, "usertag")
tag = self["usertag"].value
size -= 16
else:
tag = self["tag"].value
if size > 0:
if tag in self.tag_info:
handler, name, desc = self.tag_info[tag]
yield handler(self, name, desc, size=size*8)
else:
yield RawBytes(self, "data", size)
def createDescription(self):
if self["tag"].value == "uuid":
return "Atom: uuid: "+self["usertag"].value
return "Atom: %s" % self["tag"].value
class MovFile(Parser):
PARSER_TAGS = {
"id": "mov",
"category": "video",
"file_ext": ("mov", "qt", "mp4", "m4v", "m4a", "m4p", "m4b"),
"mime": (u"video/quicktime", u'video/mp4'),
"min_size": 8*8,
"magic": (("moov", 4*8),),
"description": "Apple QuickTime movie"
}
BRANDS = {
# File type brand => MIME type
'mp41': u'video/mp4',
'mp42': u'video/mp4',
'avc1': u'video/mp4',
'isom': u'video/mp4',
'iso2': u'video/mp4',
}
endian = BIG_ENDIAN
def __init__(self, *args, **kw):
Parser.__init__(self, *args, **kw)
is_mpeg4 = property(lambda self:self.mime_type==u'video/mp4')
def validate(self):
# TODO: Write better code, erk!
size = self.stream.readBits(0, 32, self.endian)
if size < 8:
return "Invalid first atom size"
tag = self.stream.readBytes(4*8, 4)
return tag in ("ftyp", "moov", "free")
def createFields(self):
while not self.eof:
yield Atom(self, "atom[]")
def createMimeType(self):
first = self[0]
try:
# Read brands in the file type
if first['tag'].value != "ftyp":
return None
file_type = first["file_type"]
brand = file_type["brand"].value
if brand in self.BRANDS:
return self.BRANDS[brand]
for field in file_type.array("compat_brand"):
brand = field.value
if brand in self.BRANDS:
return self.BRANDS[brand]
except MissingField:
pass
return u'video/quicktime' | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2015 David I. Urbina, david.urbina@utdallas.edu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Scapy Dissector for Ethernet/IP Implicit I/O messages at the Ring Level of the
Secure Water Testbed (SWaT) at the Singapore University of Technology and Design
Documentation: SWaT's Control Panels and Electrical Drawings manual.
"""
from scapy.main import interact
from swat import *
banner = 'Add-on: Scapy Dissector for Ethernet/IP Implicit I/O messages' \
'\n\tat the Ring Level of the Secure Water Testbed (SWaT)'
interact(mydict=globals(), mybanner=banner) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
import re
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import doc_controls
# Allow deprecation warnings to be silenced temporarily with a context manager.
_PRINT_DEPRECATION_WARNINGS = True
# Remember which deprecation warnings have been printed already.
_PRINTED_WARNING = {}
class DeprecatedNamesAlreadySet(Exception):
"""Raised when setting deprecated names multiple times for the same symbol."""
pass
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' %
('in a future version' if date is None else ('after %s' % date))]
if instructions:
main_text.append('Instructions for updating:')
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION',
'(deprecated)', main_text)
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions,
deprecated_names):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(sorted(deprecated_names))
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' %
(deprecation_string, 'in a future version' if date is None else
('after %s' % date)), 'Instructions for updating:'
])
def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions,
deprecated_name_value_dict):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(
'%s=%r' % (key, value)
for key, value in sorted(deprecated_name_value_dict.items()))
when = 'in a future version' if date is None else ('after %s' % date)
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENT VALUES',
'(deprecated argument values)', [
'SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' % (deprecation_string, when),
'Instructions for updating:'
])
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError('Date must be YYYY-MM-DD.')
if not instructions:
raise ValueError('Don\'t deprecate things without conversion instructions!')
def _call_location(outer=False):
"""Returns call location given level up from current call."""
# Two up: <_call_location>, <_call_location's caller>
f = inspect.currentframe().f_back.f_back
parent = f.f_back
if outer and parent is not None:
f = parent
return '{}:{}'.format(f.f_code.co_filename, f.f_lineno)
def _wrap_decorator(wrapped_function):
"""Indicate that one function wraps another.
This decorator wraps a function using `tf_decorator.make_decorator`
so that doc generation scripts can pick up original function
signature.
It would be better to use @functools.wrap decorator, but it would
not update function signature to match wrapped function in Python 2.
Args:
wrapped_function: The function that decorated function wraps.
Returns:
Function that accepts wrapper function as an argument and returns
`TFDecorator` instance.
"""
def wrapper(wrapper_func):
return tf_decorator.make_decorator(wrapped_function, wrapper_func)
return wrapper
def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
"""Deprecate a symbol in favor of a new name with identical semantics.
This function is meant to be used when defining a backwards-compatibility
alias for a symbol which has been moved. For example:
module1.py:
```python
class NewNameForClass: pass
```
module2.py:
```python
import module1
DeprecatedNameForClass = deprecated_alias(
deprecated_name='module2.DeprecatedNameForClass',
name='module1.NewNameForClass',
func_or_class=module1.NewNameForClass)
```
This function works for classes and functions.
For classes, it creates a new class which is functionally identical (it
inherits from the original, and overrides its constructor), but which prints
a deprecation warning when an instance is created. It also adds a deprecation
notice to the class' docstring.
For functions, it returns a function wrapped by `tf_decorator.make_decorator`.
That function prints a warning when used, and has a deprecation notice in its
docstring. This is more or less equivalent (the deprecation warning has
slightly different text) to writing:
```python
@deprecated
def deprecated_alias(original_args):
real_function(original_args)
```
Args:
deprecated_name: The name of the symbol that is being deprecated, to be used
in the warning message. This should be its fully qualified name to avoid
confusion.
name: The name of the symbol that is to be used instead of the deprecated
name. This should be a fully qualified name to avoid confusion.
func_or_class: The (non-deprecated) class or function for which a deprecated
alias should be created.
warn_once: If True (the default), only print a deprecation warning the first
time this function is used, or the class is instantiated.
Returns:
A wrapped version of `func_or_class` which prints a deprecation warning on
use and has a modified docstring.
"""
if tf_inspect.isclass(func_or_class):
# Make a new class with __init__ wrapped in a warning.
class _NewClass(func_or_class): # pylint: disable=missing-docstring
__doc__ = decorator_utils.add_notice_to_docstring(
func_or_class.__doc__, 'Please use %s instead.' % name,
'DEPRECATED CLASS',
'(deprecated)', ['THIS CLASS IS DEPRECATED. '
'It will be removed in a future version. '])
__name__ = func_or_class.__name__
__module__ = _call_location(outer=True)
@_wrap_decorator(func_or_class.__init__)
def __init__(self, *args, **kwargs):
if hasattr(_NewClass.__init__, '__func__'):
# Python 2
_NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__
else:
# Python 3
_NewClass.__init__.__doc__ = func_or_class.__init__.__doc__
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if _NewClass.__init__ not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[_NewClass.__init__] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
super(_NewClass, self).__init__(*args, **kwargs)
return _NewClass
else:
decorator_utils.validate_callable(func_or_class, 'deprecated')
# Make a wrapper for the original
@functools.wraps(func_or_class)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if new_func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[new_func] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
return func_or_class(*args, **kwargs)
return tf_decorator.make_decorator(
func_or_class, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(
func_or_class.__doc__, None, 'Please use %s instead.' % name))
def deprecated_endpoints(*args):
"""Decorator for marking endpoints deprecated.
This decorator does not print deprecation messages.
TODO(annarev): eventually start printing deprecation warnings when
@deprecation_endpoints decorator is added.
Args:
*args: Deprecated endpoint names.
Returns:
A function that takes symbol as an argument and adds
_tf_deprecated_api_names to that symbol.
_tf_deprecated_api_names would be set to a list of deprecated
endpoint names for the symbol.
"""
def deprecated_wrapper(func):
# pylint: disable=protected-access
if '_tf_deprecated_api_names' in func.__dict__:
raise DeprecatedNamesAlreadySet(
'Cannot set deprecated names for %s to %s. '
'Deprecated names are already set to %s.' % (
func.__name__, str(args), str(func._tf_deprecated_api_names)))
func._tf_deprecated_api_names = args
# pylint: disable=protected-access
return func
return deprecated_wrapper
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: Boolean. Set to `True` to warn only the first time the decorated
function is called. Otherwise, every call will log a warning.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func_or_class):
"""Deprecation wrapper."""
if isinstance(func_or_class, type):
# If a class is deprecated, you actually want to wrap the constructor.
cls = func_or_class
if cls.__new__ is object.__new__:
func = cls.__init__
constructor_name = '__init__'
else:
func = cls.__new__
constructor_name = '__new__'
else:
cls = None
constructor_name = None
func = func_or_class
decorator_utils.validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[func] = True
logging.warning(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc_controls.set_deprecated(new_func)
new_func = tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
if cls is None:
return new_func
else:
# Insert the wrapped function as the constructor
setattr(cls, constructor_name, new_func)
# And update the docstring of the class.
cls.__doc__ = _add_deprecated_function_notice_to_docstring(
cls.__doc__, date, instructions)
return cls
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples,
**kwargs):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String or 2-Tuple(String,
[ok_vals]). The string is the deprecated argument name.
Optionally, an ok-value may be provided. If the user provided
argument equals this value, the warning is suppressed.
**kwargs: If `warn_once=False` is passed, every call with a deprecated
argument will log a warning. The default behavior is to only warn the
first time the function is called with any given deprecated argument.
All other kwargs raise `ValueError`.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, the second element of a deprecated_tuple is not a
list, or if a kwarg other than `warn_once` is passed.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
if kwargs and list(kwargs.keys()) != ['warn_once']:
kwargs.pop('warn_once', None)
raise ValueError('Illegal argument to deprecated_args: %s' % kwargs)
warn_once = kwargs.get('warn_once', True)
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values,
possibly empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getfullargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
arg_name_to_pos = {
name: pos for pos, name in enumerate(arg_spec.args)}
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
deprecated_arg_names = _get_arg_names_to_ok_vals()
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
arg_spec = tf_inspect.getfullargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.varkw in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated + is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = arg_spec.args + [arg_spec.varargs, arg_spec.varkw]
missing_args = [arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args]
raise ValueError('The following deprecated arguments are not present '
'in the function signature: %s. '
'Found next arguments: %s.' % (missing_args, known_args))
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
# TODO(apassos) figure out a way to have reasonable performance with
# deprecation warnings and eager mode.
if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS:
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.varkw)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions, sorted(deprecated_arg_names.keys()))
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_arg_values(date, instructions, warn_once=True,
**deprecated_kwargs):
"""Decorator for marking specific function argument values as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument values. It has the following format:
Calling <function> (from <module>) with <arg>=<value> is deprecated and
will be removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: If `True`, warn only the first time this function is called with
deprecated argument values. Otherwise, every call (with a deprecated
argument value) will log a warning.
**deprecated_kwargs: The deprecated argument values.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_kwargs:
raise ValueError('Specify which argument values are deprecated.')
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_arg_values')
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
if _PRINT_DEPRECATION_WARNINGS:
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, arg_value in deprecated_kwargs.items():
if arg_name in named_args and named_args[arg_name] == arg_value:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s=%s is deprecated and '
'will be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name, arg_value, 'in a future version'
if date is None else ('after %s' % date), instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_value_notice_to_docstring(
func.__doc__, date, instructions, deprecated_kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
"""Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
"""
if old_value is not None:
if new_value is not None:
raise ValueError("Cannot specify both '%s' and '%s'" %
(old_name, new_name))
return old_value
return new_value
def rewrite_argument_docstring(old_doc, old_argument, new_argument):
return old_doc.replace('`%s`' % old_argument, '`%s`' % new_argument).replace(
'%s:' % old_argument, '%s:' % new_argument)
@tf_contextlib.contextmanager
def silence():
"""Temporarily silence deprecation warnings."""
global _PRINT_DEPRECATION_WARNINGS
print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS
_PRINT_DEPRECATION_WARNINGS = False
yield
_PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings
class HiddenTfApiAttribute(property):
"""Hides a class attribute from the public API.
Attributes in public classes can be hidden from the API by having an '_' in
front of the name (e.g. ClassName._variables). This doesn't work when
attributes or methods are inherited from a parent class. To hide inherited
attributes, set their values to be `deprecation.hide_attribute_from_api`.
For example, this is used in V2 Estimator to hide the deprecated
export_savedmodel method:
class EstimatorV2(Estimator):
export_savedmodel = deprecation.hide_attribute_from_api('...')
"""
def __init__(self, deprecation_message):
def raise_error(unused_self):
raise AttributeError(deprecation_message)
super(HiddenTfApiAttribute, self).__init__(raise_error)
hide_attribute_from_api = HiddenTfApiAttribute # pylint: disable=invalid-name
# TODO(kathywu): Remove once cl/246395236 is submitted.
HIDDEN_ATTRIBUTE = HiddenTfApiAttribute('This attribute has been deprecated.') | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Tests\Database;
use Exception;
use Illuminate\Database\Capsule\Manager as DB;
use Illuminate\Database\DatabaseTransactionsManager;
use Mockery as m;
use PHPUnit\Framework\TestCase;
use Throwable;
class DatabaseTransactionsTest extends TestCase
{
/**
* Setup the database schema.
*
* @return void
*/
protected function setUp(): void
{
$db = new DB;
$db->addConnection([
'driver' => 'sqlite',
'database' => ':memory:',
]);
$db->addConnection([
'driver' => 'sqlite',
'database' => ':memory:',
], 'second_connection');
$db->setAsGlobal();
$this->createSchema();
}
protected function createSchema()
{
foreach (['default', 'second_connection'] as $connection) {
$this->schema($connection)->create('users', function ($table) {
$table->increments('id');
$table->string('name')->nullable();
$table->string('value')->nullable();
});
}
}
/**
* Tear down the database schema.
*
* @return void
*/
protected function tearDown(): void
{
foreach (['default', 'second_connection'] as $connection) {
$this->schema($connection)->drop('users');
}
parent::tearDown();
}
public function testTransactionIsRecordedAndCommitted()
{
$transactionManager = m::mock(new DatabaseTransactionsManager);
$transactionManager->shouldReceive('begin')->once()->with('default', 1);
$transactionManager->shouldReceive('commit')->once()->with('default', 1, 0);
$this->connection()->setTransactionManager($transactionManager);
$this->connection()->table('users')->insert([
'name' => 'zain', 'value' => 1,
]);
$this->connection()->transaction(function () {
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
});
}
public function testTransactionIsRecordedAndCommittedUsingTheSeparateMethods()
{
$transactionManager = m::mock(new DatabaseTransactionsManager);
$transactionManager->shouldReceive('begin')->once()->with('default', 1);
$transactionManager->shouldReceive('commit')->once()->with('default', 1, 0);
$this->connection()->setTransactionManager($transactionManager);
$this->connection()->table('users')->insert([
'name' => 'zain', 'value' => 1,
]);
$this->connection()->beginTransaction();
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
$this->connection()->commit();
}
public function testNestedTransactionIsRecordedAndCommitted()
{
$transactionManager = m::mock(new DatabaseTransactionsManager);
$transactionManager->shouldReceive('begin')->once()->with('default', 1);
$transactionManager->shouldReceive('begin')->once()->with('default', 2);
$transactionManager->shouldReceive('commit')->once()->with('default', 2, 1);
$transactionManager->shouldReceive('commit')->once()->with('default', 1, 0);
$this->connection()->setTransactionManager($transactionManager);
$this->connection()->table('users')->insert([
'name' => 'zain', 'value' => 1,
]);
$this->connection()->transaction(function () {
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
$this->connection()->transaction(function () {
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
});
});
}
public function testNestedTransactionIsRecordeForDifferentConnectionsdAndCommitted()
{
$transactionManager = m::mock(new DatabaseTransactionsManager);
$transactionManager->shouldReceive('begin')->once()->with('default', 1);
$transactionManager->shouldReceive('begin')->once()->with('second_connection', 1);
$transactionManager->shouldReceive('begin')->once()->with('second_connection', 2);
$transactionManager->shouldReceive('commit')->once()->with('default', 1, 0);
$transactionManager->shouldReceive('commit')->once()->with('second_connection', 2, 1);
$transactionManager->shouldReceive('commit')->once()->with('second_connection', 1, 0);
$this->connection()->setTransactionManager($transactionManager);
$this->connection('second_connection')->setTransactionManager($transactionManager);
$this->connection()->table('users')->insert([
'name' => 'zain', 'value' => 1,
]);
$this->connection()->transaction(function () {
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
$this->connection('second_connection')->transaction(function () {
$this->connection('second_connection')->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
$this->connection('second_connection')->transaction(function () {
$this->connection('second_connection')->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
});
});
});
}
public function testTransactionIsRolledBack()
{
$transactionManager = m::mock(new DatabaseTransactionsManager);
$transactionManager->shouldReceive('begin')->once()->with('default', 1);
$transactionManager->shouldReceive('rollback')->once()->with('default', 0);
$transactionManager->shouldNotReceive('commit');
$this->connection()->setTransactionManager($transactionManager);
$this->connection()->table('users')->insert([
'name' => 'zain', 'value' => 1,
]);
try {
$this->connection()->transaction(function () {
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
throw new Exception;
});
} catch (Throwable) {
}
}
public function testTransactionIsRolledBackUsingSeparateMethods()
{
$transactionManager = m::mock(new DatabaseTransactionsManager);
$transactionManager->shouldReceive('begin')->once()->with('default', 1);
$transactionManager->shouldReceive('rollback')->once()->with('default', 0);
$transactionManager->shouldNotReceive('commit', 1, 0);
$this->connection()->setTransactionManager($transactionManager);
$this->connection()->table('users')->insert([
'name' => 'zain', 'value' => 1,
]);
$this->connection()->beginTransaction();
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
$this->connection()->rollBack();
}
public function testNestedTransactionsAreRolledBack()
{
$transactionManager = m::mock(new DatabaseTransactionsManager);
$transactionManager->shouldReceive('begin')->once()->with('default', 1);
$transactionManager->shouldReceive('begin')->once()->with('default', 2);
$transactionManager->shouldReceive('rollback')->once()->with('default', 1);
$transactionManager->shouldReceive('rollback')->once()->with('default', 0);
$transactionManager->shouldNotReceive('commit');
$this->connection()->setTransactionManager($transactionManager);
$this->connection()->table('users')->insert([
'name' => 'zain', 'value' => 1,
]);
try {
$this->connection()->transaction(function () {
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
$this->connection()->transaction(function () {
$this->connection()->table('users')->where(['name' => 'zain'])->update([
'value' => 2,
]);
throw new Exception;
});
});
} catch (Throwable) {
}
}
/**
* Get a schema builder instance.
*
* @return \Illuminate\Database\Schema\Builder
*/
protected function schema($connection = 'default')
{
return $this->connection($connection)->getSchemaBuilder();
}
public function connection($name = 'default')
{
return DB::connection($name);
}
} | php | github | https://github.com/laravel/framework | tests/Database/DatabaseTransactionsTest.php |
/*
* Copyright 2010-2025 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.contracts.description.booleans
import org.jetbrains.kotlin.analysis.api.KaExperimentalApi
import org.jetbrains.kotlin.analysis.api.KaImplementationDetail
/**
* See: [KaContractBooleanExpression].
*/
@KaExperimentalApi
@SubclassOptInRequired(KaImplementationDetail::class)
public interface KaContractBinaryLogicExpression : KaContractBooleanExpression {
@KaExperimentalApi
public enum class KaLogicOperation {
AND,
OR,
}
public val left: KaContractBooleanExpression
public val right: KaContractBooleanExpression
public val operation: KaLogicOperation
}
/**
* See: [KaContractBooleanExpression].
*/
@KaExperimentalApi
@SubclassOptInRequired(KaImplementationDetail::class)
public interface KaContractLogicalNotExpression : KaContractBooleanExpression {
public val argument: KaContractBooleanExpression
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/src/org/jetbrains/kotlin/analysis/api/contracts/description/booleans/contractLogicalCombinators.kt |
# -*- coding: utf-8 -*-
# Fichier tp3.py
from numpy import * # importation du module numpy
from numpy.linalg import * # importation du module numpy.linalg
from matplotlib.pyplot import * # importation du module matplotlib.pyplot
from mpl_toolkits.mplot3d import Axes3D # importation du module mpl_toolkits.mplot3d
import time
from pylab import *
def U0(X):
Y = zeros(shape(X))
Y=sin(pi*X)+.25*sin(10.*pi*X)
return Y
def U1(X):
Y = zeros(shape(X))
return Y
def solex(X,ct):
return sin(pi*X)*cos(ct*pi)+.25*sin(10.*pi*X)*cos(10.*ct*pi)
print('Choix de la vitesse de transport c')
#c = float(input('c = '))
c = -2
Ns = int(1/.002)
h = 1./(Ns + 1.)
X = linspace(0.,1.,Ns+1)
Xh = X[0:Ns]
dt = .00101
T=1.
M = int((T/dt) - 1)
meth = 2
#Uj temps actuel
#Ujm temps precedent
#Tjn temps suivant
Uj = U0(Xh)
Ujm = zeros(shape(U0))
Ujn = zeros(shape(U0))
#Iteration 1
Ujn = Uj+dt*U1(Xh)
Uj, Ujm = Ujm, Uj
Ujn, Uj = Uj, Ujn
A= diag(-ones(Ns-1),1)-diag(ones(Ns-1),-1)+2.*eye(Ns)
A=A/h/h
#Erreur
Err = 0
Errn = 0
#line1, = plot(linspace(0,1,100), solex(linspace(0,1,100),T), label = 'sol exacte')
for j in arange(1, M):
if( meth == 1):
for i in arange(1,Ns):
Ujn = 2.*Uj-Ujm-c*c*dt*dt*(A.dot(Uj))
if( meth == 2 ):
Ujn = solve( (eye(Ns)+c*c*dt*dt*A), 2.*Uj-Ujm)
#Calcul de l'erreur
U=solex(Xh,j*dt*c)
Errn = amax(absolute(U - Ujn))
if (Err < Errn):
Err = Errn
Uj, Ujm = Ujm, Uj
Ujn, Uj = Uj, Ujn
plot(Xh, Uj,label="Approché")
plot(linspace(0,1,500),solex(linspace(0,1,500),T*c),label='exacte')
xlabel('X')
ylabel('Y')
legend()
show()
disp(Err) | unknown | codeparrot/codeparrot-clean | ||
from typing import Any
from pandas import Index
def allow_na_ops(obj: Any) -> bool:
"""Whether to skip test cases including NaN"""
is_bool_index = isinstance(obj, Index) and obj.inferred_type == "boolean"
return not is_bool_index and obj._can_hold_na | python | github | https://github.com/pandas-dev/pandas | pandas/tests/base/common.py |
# -*- coding: utf-8 -*-
# $Id: da.py 7678 2013-07-03 09:57:36Z milde $
# Author: E D
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Danish-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': u'Forfatter',
'authors': u'Forfattere',
'organization': u'Organisation',
'address': u'Adresse',
'contact': u'Kontakt',
'version': u'Version',
'revision': u'Revision',
'status': u'Status',
'date': u'Dato',
'copyright': u'Copyright',
'dedication': u'Dedikation',
'abstract': u'Resumé',
'attention': u'Giv agt!',
'caution': u'Pas på!',
'danger': u'!FARE!',
'error': u'Fejl',
'hint': u'Vink',
'important': u'Vigtigt',
'note': u'Bemærk',
'tip': u'Tips',
'warning': u'Advarsel',
'contents': u'Indhold'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'forfatter': 'author',
u'forfattere': 'authors',
u'organisation': 'organization',
u'adresse': 'address',
u'kontakt': 'contact',
u'version': 'version',
u'revision': 'revision',
u'status': 'status',
u'dato': 'date',
u'copyright': 'copyright',
u'dedikation': 'dedication',
u'resume': 'abstract',
u'resumé': 'abstract'}
"""Danish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order.""" | unknown | codeparrot/codeparrot-clean | ||
import { test } from '../../test';
export default test({
error: {
code: 'legacy_export_invalid',
message: 'Cannot use `export let` in runes mode — use `$props()` instead'
}
}); | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/tests/compiler-errors/samples/runes-export-let/_config.js |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.release;
import groovy.text.SimpleTemplateEngine;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Map;
/**
* Methods for working with Groovy templates.
*/
public class TemplateUtils {
/**
* Applies {@code bindings} to {@code template}, then removes all carriage returns from
* the result.
*
* @param template a Groovy template
* @param bindings parameters for the template
* @return the rendered template
*/
public static String render(String template, Map<String, Object> bindings) throws IOException {
final StringWriter writer = new StringWriter();
try {
final SimpleTemplateEngine engine = new SimpleTemplateEngine();
engine.createTemplate(template).make(bindings).writeTo(writer);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
return writer.toString().replace("\\r", "");
}
} | java | github | https://github.com/elastic/elasticsearch | build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/TemplateUtils.java |
/*-------------------------------------------------------------------------
*
* slot.c
* Replication slot management.
*
*
* Copyright (c) 2012-2026, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
* src/backend/replication/slot.c
*
* NOTES
*
* Replication slots are used to keep state about replication streams
* originating from this cluster. Their primary purpose is to prevent the
* premature removal of WAL or of old tuple versions in a manner that would
* interfere with replication; they are also useful for monitoring purposes.
* Slots need to be permanent (to allow restarts), crash-safe, and allocatable
* on standbys (to support cascading setups). The requirement that slots be
* usable on standbys precludes storing them in the system catalogs.
*
* Each replication slot gets its own directory inside the directory
* $PGDATA / PG_REPLSLOT_DIR. Inside that directory the state file will
* contain the slot's own data. Additional data can be stored alongside that
* file if required. While the server is running, the state data is also
* cached in memory for efficiency.
*
* ReplicationSlotAllocationLock must be taken in exclusive mode to allocate
* or free a slot. ReplicationSlotControlLock must be taken in shared mode
* to iterate over the slots, and in exclusive mode to change the in_use flag
* of a slot. The remaining data in each slot is protected by its mutex.
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <unistd.h>
#include <sys/stat.h>
#include "access/transam.h"
#include "access/xlog_internal.h"
#include "access/xlogrecovery.h"
#include "common/file_utils.h"
#include "common/string.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "postmaster/interrupt.h"
#include "replication/logicallauncher.h"
#include "replication/slotsync.h"
#include "replication/slot.h"
#include "replication/walsender_private.h"
#include "storage/fd.h"
#include "storage/ipc.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "utils/builtins.h"
#include "utils/guc_hooks.h"
#include "utils/injection_point.h"
#include "utils/varlena.h"
/*
* Replication slot on-disk data structure.
*/
typedef struct ReplicationSlotOnDisk
{
/* first part of this struct needs to be version independent */
/* data not covered by checksum */
uint32 magic;
pg_crc32c checksum;
/* data covered by checksum */
uint32 version;
uint32 length;
/*
* The actual data in the slot that follows can differ based on the above
* 'version'.
*/
ReplicationSlotPersistentData slotdata;
} ReplicationSlotOnDisk;
/*
* Struct for the configuration of synchronized_standby_slots.
*
* Note: this must be a flat representation that can be held in a single chunk
* of guc_malloc'd memory, so that it can be stored as the "extra" data for the
* synchronized_standby_slots GUC.
*/
typedef struct
{
/* Number of slot names in the slot_names[] */
int nslotnames;
/*
* slot_names contains 'nslotnames' consecutive null-terminated C strings.
*/
char slot_names[FLEXIBLE_ARRAY_MEMBER];
} SyncStandbySlotsConfigData;
/*
* Lookup table for slot invalidation causes.
*/
typedef struct SlotInvalidationCauseMap
{
ReplicationSlotInvalidationCause cause;
const char *cause_name;
} SlotInvalidationCauseMap;
static const SlotInvalidationCauseMap SlotInvalidationCauses[] = {
{RS_INVAL_NONE, "none"},
{RS_INVAL_WAL_REMOVED, "wal_removed"},
{RS_INVAL_HORIZON, "rows_removed"},
{RS_INVAL_WAL_LEVEL, "wal_level_insufficient"},
{RS_INVAL_IDLE_TIMEOUT, "idle_timeout"},
};
/*
* Ensure that the lookup table is up-to-date with the enums defined in
* ReplicationSlotInvalidationCause.
*/
StaticAssertDecl(lengthof(SlotInvalidationCauses) == (RS_INVAL_MAX_CAUSES + 1),
"array length mismatch");
/* size of version independent data */
#define ReplicationSlotOnDiskConstantSize \
offsetof(ReplicationSlotOnDisk, slotdata)
/* size of the part of the slot not covered by the checksum */
#define ReplicationSlotOnDiskNotChecksummedSize \
offsetof(ReplicationSlotOnDisk, version)
/* size of the part covered by the checksum */
#define ReplicationSlotOnDiskChecksummedSize \
sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskNotChecksummedSize
/* size of the slot data that is version dependent */
#define ReplicationSlotOnDiskV2Size \
sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskConstantSize
#define SLOT_MAGIC 0x1051CA1 /* format identifier */
#define SLOT_VERSION 5 /* version for new files */
/* Control array for replication slot management */
ReplicationSlotCtlData *ReplicationSlotCtl = NULL;
/* My backend's replication slot in the shared memory array */
ReplicationSlot *MyReplicationSlot = NULL;
/* GUC variables */
int max_replication_slots = 10; /* the maximum number of replication
* slots */
/*
* Invalidate replication slots that have remained idle longer than this
* duration; '0' disables it.
*/
int idle_replication_slot_timeout_secs = 0;
/*
* This GUC lists streaming replication standby server slot names that
* logical WAL sender processes will wait for.
*/
char *synchronized_standby_slots;
/* This is the parsed and cached configuration for synchronized_standby_slots */
static SyncStandbySlotsConfigData *synchronized_standby_slots_config;
/*
* Oldest LSN that has been confirmed to be flushed to the standbys
* corresponding to the physical slots specified in the synchronized_standby_slots GUC.
*/
static XLogRecPtr ss_oldest_flush_lsn = InvalidXLogRecPtr;
static void ReplicationSlotShmemExit(int code, Datum arg);
static bool IsSlotForConflictCheck(const char *name);
static void ReplicationSlotDropPtr(ReplicationSlot *slot);
/* internal persistency functions */
static void RestoreSlotFromDisk(const char *name);
static void CreateSlotOnDisk(ReplicationSlot *slot);
static void SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel);
/*
* Report shared-memory space needed by ReplicationSlotsShmemInit.
*/
Size
ReplicationSlotsShmemSize(void)
{
Size size = 0;
if (max_replication_slots == 0)
return size;
size = offsetof(ReplicationSlotCtlData, replication_slots);
size = add_size(size,
mul_size(max_replication_slots, sizeof(ReplicationSlot)));
return size;
}
/*
* Allocate and initialize shared memory for replication slots.
*/
void
ReplicationSlotsShmemInit(void)
{
bool found;
if (max_replication_slots == 0)
return;
ReplicationSlotCtl = (ReplicationSlotCtlData *)
ShmemInitStruct("ReplicationSlot Ctl", ReplicationSlotsShmemSize(),
&found);
if (!found)
{
int i;
/* First time through, so initialize */
MemSet(ReplicationSlotCtl, 0, ReplicationSlotsShmemSize());
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *slot = &ReplicationSlotCtl->replication_slots[i];
/* everything else is zeroed by the memset above */
slot->active_proc = INVALID_PROC_NUMBER;
SpinLockInit(&slot->mutex);
LWLockInitialize(&slot->io_in_progress_lock,
LWTRANCHE_REPLICATION_SLOT_IO);
ConditionVariableInit(&slot->active_cv);
}
}
}
/*
* Register the callback for replication slot cleanup and releasing.
*/
void
ReplicationSlotInitialize(void)
{
before_shmem_exit(ReplicationSlotShmemExit, 0);
}
/*
* Release and cleanup replication slots.
*/
static void
ReplicationSlotShmemExit(int code, Datum arg)
{
/* Make sure active replication slots are released */
if (MyReplicationSlot != NULL)
ReplicationSlotRelease();
/* Also cleanup all the temporary slots. */
ReplicationSlotCleanup(false);
}
/*
* Check whether the passed slot name is valid and report errors at elevel.
*
* See comments for ReplicationSlotValidateNameInternal().
*/
bool
ReplicationSlotValidateName(const char *name, bool allow_reserved_name,
int elevel)
{
int err_code;
char *err_msg = NULL;
char *err_hint = NULL;
if (!ReplicationSlotValidateNameInternal(name, allow_reserved_name,
&err_code, &err_msg, &err_hint))
{
/*
* Use errmsg_internal() and errhint_internal() instead of errmsg()
* and errhint(), since the messages from
* ReplicationSlotValidateNameInternal() are already translated. This
* avoids double translation.
*/
ereport(elevel,
errcode(err_code),
errmsg_internal("%s", err_msg),
(err_hint != NULL) ? errhint_internal("%s", err_hint) : 0);
pfree(err_msg);
if (err_hint != NULL)
pfree(err_hint);
return false;
}
return true;
}
/*
* Check whether the passed slot name is valid.
*
* An error will be reported for a reserved replication slot name if
* allow_reserved_name is set to false.
*
* Slot names may consist out of [a-z0-9_]{1,NAMEDATALEN-1} which should allow
* the name to be used as a directory name on every supported OS.
*
* Returns true if the slot name is valid. Otherwise, returns false and stores
* the error code, error message, and optional hint in err_code, err_msg, and
* err_hint, respectively. The caller is responsible for freeing err_msg and
* err_hint, which are palloc'd.
*/
bool
ReplicationSlotValidateNameInternal(const char *name, bool allow_reserved_name,
int *err_code, char **err_msg, char **err_hint)
{
const char *cp;
if (strlen(name) == 0)
{
*err_code = ERRCODE_INVALID_NAME;
*err_msg = psprintf(_("replication slot name \"%s\" is too short"), name);
*err_hint = NULL;
return false;
}
if (strlen(name) >= NAMEDATALEN)
{
*err_code = ERRCODE_NAME_TOO_LONG;
*err_msg = psprintf(_("replication slot name \"%s\" is too long"), name);
*err_hint = NULL;
return false;
}
for (cp = name; *cp; cp++)
{
if (!((*cp >= 'a' && *cp <= 'z')
|| (*cp >= '0' && *cp <= '9')
|| (*cp == '_')))
{
*err_code = ERRCODE_INVALID_NAME;
*err_msg = psprintf(_("replication slot name \"%s\" contains invalid character"), name);
*err_hint = psprintf(_("Replication slot names may only contain lower case letters, numbers, and the underscore character."));
return false;
}
}
if (!allow_reserved_name && IsSlotForConflictCheck(name))
{
*err_code = ERRCODE_RESERVED_NAME;
*err_msg = psprintf(_("replication slot name \"%s\" is reserved"), name);
*err_hint = psprintf(_("The name \"%s\" is reserved for the conflict detection slot."),
CONFLICT_DETECTION_SLOT);
return false;
}
return true;
}
/*
* Return true if the replication slot name is "pg_conflict_detection".
*/
static bool
IsSlotForConflictCheck(const char *name)
{
return (strcmp(name, CONFLICT_DETECTION_SLOT) == 0);
}
/*
* Create a new replication slot and mark it as used by this backend.
*
* name: Name of the slot
* db_specific: logical decoding is db specific; if the slot is going to
* be used for that pass true, otherwise false.
* two_phase: If enabled, allows decoding of prepared transactions.
* failover: If enabled, allows the slot to be synced to standbys so
* that logical replication can be resumed after failover.
* synced: True if the slot is synchronized from the primary server.
*/
void
ReplicationSlotCreate(const char *name, bool db_specific,
ReplicationSlotPersistency persistency,
bool two_phase, bool failover, bool synced)
{
ReplicationSlot *slot = NULL;
int i;
Assert(MyReplicationSlot == NULL);
/*
* The logical launcher or pg_upgrade may create or migrate an internal
* slot, so using a reserved name is allowed in these cases.
*/
ReplicationSlotValidateName(name, IsBinaryUpgrade || IsLogicalLauncher(),
ERROR);
if (failover)
{
/*
* Do not allow users to create the failover enabled slots on the
* standby as we do not support sync to the cascading standby.
*
* However, failover enabled slots can be created during slot
* synchronization because we need to retain the same values as the
* remote slot.
*/
if (RecoveryInProgress() && !IsSyncingReplicationSlots())
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot enable failover for a replication slot created on the standby"));
/*
* Do not allow users to create failover enabled temporary slots,
* because temporary slots will not be synced to the standby.
*
* However, failover enabled temporary slots can be created during
* slot synchronization. See the comments atop slotsync.c for details.
*/
if (persistency == RS_TEMPORARY && !IsSyncingReplicationSlots())
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot enable failover for a temporary replication slot"));
}
/*
* If some other backend ran this code concurrently with us, we'd likely
* both allocate the same slot, and that would be bad. We'd also be at
* risk of missing a name collision. Also, we don't want to try to create
* a new slot while somebody's busy cleaning up an old one, because we
* might both be monkeying with the same directory.
*/
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
/*
* Check for name collision, and identify an allocatable slot. We need to
* hold ReplicationSlotControlLock in shared mode for this, so that nobody
* else can change the in_use flags while we're looking at them.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("replication slot \"%s\" already exists", name)));
if (!s->in_use && slot == NULL)
slot = s;
}
LWLockRelease(ReplicationSlotControlLock);
/* If all slots are in use, we're out of luck. */
if (slot == NULL)
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("all replication slots are in use"),
errhint("Free one or increase \"max_replication_slots\".")));
/*
* Since this slot is not in use, nobody should be looking at any part of
* it other than the in_use field unless they're trying to allocate it.
* And since we hold ReplicationSlotAllocationLock, nobody except us can
* be doing that. So it's safe to initialize the slot.
*/
Assert(!slot->in_use);
Assert(slot->active_proc == INVALID_PROC_NUMBER);
/* first initialize persistent data */
memset(&slot->data, 0, sizeof(ReplicationSlotPersistentData));
namestrcpy(&slot->data.name, name);
slot->data.database = db_specific ? MyDatabaseId : InvalidOid;
slot->data.persistency = persistency;
slot->data.two_phase = two_phase;
slot->data.two_phase_at = InvalidXLogRecPtr;
slot->data.failover = failover;
slot->data.synced = synced;
/* and then data only present in shared memory */
slot->just_dirtied = false;
slot->dirty = false;
slot->effective_xmin = InvalidTransactionId;
slot->effective_catalog_xmin = InvalidTransactionId;
slot->candidate_catalog_xmin = InvalidTransactionId;
slot->candidate_xmin_lsn = InvalidXLogRecPtr;
slot->candidate_restart_valid = InvalidXLogRecPtr;
slot->candidate_restart_lsn = InvalidXLogRecPtr;
slot->last_saved_confirmed_flush = InvalidXLogRecPtr;
slot->last_saved_restart_lsn = InvalidXLogRecPtr;
slot->inactive_since = 0;
slot->slotsync_skip_reason = SS_SKIP_NONE;
/*
* Create the slot on disk. We haven't actually marked the slot allocated
* yet, so no special cleanup is required if this errors out.
*/
CreateSlotOnDisk(slot);
/*
* We need to briefly prevent any other backend from iterating over the
* slots while we flip the in_use flag. We also need to set the active
* flag while holding the ControlLock as otherwise a concurrent
* ReplicationSlotAcquire() could acquire the slot as well.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_EXCLUSIVE);
slot->in_use = true;
/* We can now mark the slot active, and that makes it our slot. */
SpinLockAcquire(&slot->mutex);
Assert(slot->active_proc == INVALID_PROC_NUMBER);
slot->active_proc = MyProcNumber;
SpinLockRelease(&slot->mutex);
MyReplicationSlot = slot;
LWLockRelease(ReplicationSlotControlLock);
/*
* Create statistics entry for the new logical slot. We don't collect any
* stats for physical slots, so no need to create an entry for the same.
* See ReplicationSlotDropPtr for why we need to do this before releasing
* ReplicationSlotAllocationLock.
*/
if (SlotIsLogical(slot))
pgstat_create_replslot(slot);
/*
* Now that the slot has been marked as in_use and active, it's safe to
* let somebody else try to allocate a slot.
*/
LWLockRelease(ReplicationSlotAllocationLock);
/* Let everybody know we've modified this slot */
ConditionVariableBroadcast(&slot->active_cv);
}
/*
* Search for the named replication slot.
*
* Return the replication slot if found, otherwise NULL.
*/
ReplicationSlot *
SearchNamedReplicationSlot(const char *name, bool need_lock)
{
int i;
ReplicationSlot *slot = NULL;
if (need_lock)
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
if (s->in_use && strcmp(name, NameStr(s->data.name)) == 0)
{
slot = s;
break;
}
}
if (need_lock)
LWLockRelease(ReplicationSlotControlLock);
return slot;
}
/*
* Return the index of the replication slot in
* ReplicationSlotCtl->replication_slots.
*
* This is mainly useful to have an efficient key for storing replication slot
* stats.
*/
int
ReplicationSlotIndex(ReplicationSlot *slot)
{
Assert(slot >= ReplicationSlotCtl->replication_slots &&
slot < ReplicationSlotCtl->replication_slots + max_replication_slots);
return slot - ReplicationSlotCtl->replication_slots;
}
/*
* If the slot at 'index' is unused, return false. Otherwise 'name' is set to
* the slot's name and true is returned.
*
* This likely is only useful for pgstat_replslot.c during shutdown, in other
* cases there are obvious TOCTOU issues.
*/
bool
ReplicationSlotName(int index, Name name)
{
ReplicationSlot *slot;
bool found;
slot = &ReplicationSlotCtl->replication_slots[index];
/*
* Ensure that the slot cannot be dropped while we copy the name. Don't
* need the spinlock as the name of an existing slot cannot change.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
found = slot->in_use;
if (slot->in_use)
namestrcpy(name, NameStr(slot->data.name));
LWLockRelease(ReplicationSlotControlLock);
return found;
}
/*
* Find a previously created slot and mark it as used by this process.
*
* An error is raised if nowait is true and the slot is currently in use. If
* nowait is false, we sleep until the slot is released by the owning process.
*
* An error is raised if error_if_invalid is true and the slot is found to
* be invalid. It should always be set to true, except when we are temporarily
* acquiring the slot and don't intend to change it.
*/
void
ReplicationSlotAcquire(const char *name, bool nowait, bool error_if_invalid)
{
ReplicationSlot *s;
ProcNumber active_proc;
int active_pid;
Assert(name != NULL);
retry:
Assert(MyReplicationSlot == NULL);
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
/* Check if the slot exists with the given name. */
s = SearchNamedReplicationSlot(name, false);
if (s == NULL || !s->in_use)
{
LWLockRelease(ReplicationSlotControlLock);
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("replication slot \"%s\" does not exist",
name)));
}
/*
* Do not allow users to acquire the reserved slot. This scenario may
* occur if the launcher that owns the slot has terminated unexpectedly
* due to an error, and a backend process attempts to reuse the slot.
*/
if (!IsLogicalLauncher() && IsSlotForConflictCheck(name))
ereport(ERROR,
errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("cannot acquire replication slot \"%s\"", name),
errdetail("The slot is reserved for conflict detection and can only be acquired by logical replication launcher."));
/*
* This is the slot we want; check if it's active under some other
* process. In single user mode, we don't need this check.
*/
if (IsUnderPostmaster)
{
/*
* Get ready to sleep on the slot in case it is active. (We may end
* up not sleeping, but we don't want to do this while holding the
* spinlock.)
*/
if (!nowait)
ConditionVariablePrepareToSleep(&s->active_cv);
/*
* It is important to reset the inactive_since under spinlock here to
* avoid race conditions with slot invalidation. See comments related
* to inactive_since in InvalidatePossiblyObsoleteSlot.
*/
SpinLockAcquire(&s->mutex);
if (s->active_proc == INVALID_PROC_NUMBER)
s->active_proc = MyProcNumber;
active_proc = s->active_proc;
ReplicationSlotSetInactiveSince(s, 0, false);
SpinLockRelease(&s->mutex);
}
else
{
s->active_proc = active_proc = MyProcNumber;
ReplicationSlotSetInactiveSince(s, 0, true);
}
active_pid = GetPGProcByNumber(active_proc)->pid;
LWLockRelease(ReplicationSlotControlLock);
/*
* If we found the slot but it's already active in another process, we
* wait until the owning process signals us that it's been released, or
* error out.
*/
if (active_proc != MyProcNumber)
{
if (!nowait)
{
/* Wait here until we get signaled, and then restart */
ConditionVariableSleep(&s->active_cv,
WAIT_EVENT_REPLICATION_SLOT_DROP);
ConditionVariableCancelSleep();
goto retry;
}
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("replication slot \"%s\" is active for PID %d",
NameStr(s->data.name), active_pid)));
}
else if (!nowait)
ConditionVariableCancelSleep(); /* no sleep needed after all */
/* We made this slot active, so it's ours now. */
MyReplicationSlot = s;
/*
* We need to check for invalidation after making the slot ours to avoid
* the possible race condition with the checkpointer that can otherwise
* invalidate the slot immediately after the check.
*/
if (error_if_invalid && s->data.invalidated != RS_INVAL_NONE)
ereport(ERROR,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("can no longer access replication slot \"%s\"",
NameStr(s->data.name)),
errdetail("This replication slot has been invalidated due to \"%s\".",
GetSlotInvalidationCauseName(s->data.invalidated)));
/* Let everybody know we've modified this slot */
ConditionVariableBroadcast(&s->active_cv);
/*
* The call to pgstat_acquire_replslot() protects against stats for a
* different slot, from before a restart or such, being present during
* pgstat_report_replslot().
*/
if (SlotIsLogical(s))
pgstat_acquire_replslot(s);
if (am_walsender)
{
ereport(log_replication_commands ? LOG : DEBUG1,
SlotIsLogical(s)
? errmsg("acquired logical replication slot \"%s\"",
NameStr(s->data.name))
: errmsg("acquired physical replication slot \"%s\"",
NameStr(s->data.name)));
}
}
/*
* Release the replication slot that this backend considers to own.
*
* This or another backend can re-acquire the slot later.
* Resources this slot requires will be preserved.
*/
void
ReplicationSlotRelease(void)
{
ReplicationSlot *slot = MyReplicationSlot;
char *slotname = NULL; /* keep compiler quiet */
bool is_logical;
TimestampTz now = 0;
Assert(slot != NULL && slot->active_proc != INVALID_PROC_NUMBER);
is_logical = SlotIsLogical(slot);
if (am_walsender)
slotname = pstrdup(NameStr(slot->data.name));
if (slot->data.persistency == RS_EPHEMERAL)
{
/*
* Delete the slot. There is no !PANIC case where this is allowed to
* fail, all that may happen is an incomplete cleanup of the on-disk
* data.
*/
ReplicationSlotDropAcquired();
/*
* Request to disable logical decoding, even though this slot may not
* have been the last logical slot. The checkpointer will verify if
* logical decoding should actually be disabled.
*/
if (is_logical)
RequestDisableLogicalDecoding();
}
/*
* If slot needed to temporarily restrain both data and catalog xmin to
* create the catalog snapshot, remove that temporary constraint.
* Snapshots can only be exported while the initial snapshot is still
* acquired.
*/
if (!TransactionIdIsValid(slot->data.xmin) &&
TransactionIdIsValid(slot->effective_xmin))
{
SpinLockAcquire(&slot->mutex);
slot->effective_xmin = InvalidTransactionId;
SpinLockRelease(&slot->mutex);
ReplicationSlotsComputeRequiredXmin(false);
}
/*
* Set the time since the slot has become inactive. We get the current
* time beforehand to avoid system call while holding the spinlock.
*/
now = GetCurrentTimestamp();
if (slot->data.persistency == RS_PERSISTENT)
{
/*
* Mark persistent slot inactive. We're not freeing it, just
* disconnecting, but wake up others that may be waiting for it.
*/
SpinLockAcquire(&slot->mutex);
slot->active_proc = INVALID_PROC_NUMBER;
ReplicationSlotSetInactiveSince(slot, now, false);
SpinLockRelease(&slot->mutex);
ConditionVariableBroadcast(&slot->active_cv);
}
else
ReplicationSlotSetInactiveSince(slot, now, true);
MyReplicationSlot = NULL;
/* might not have been set when we've been a plain slot */
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
MyProc->statusFlags &= ~PROC_IN_LOGICAL_DECODING;
ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags;
LWLockRelease(ProcArrayLock);
if (am_walsender)
{
ereport(log_replication_commands ? LOG : DEBUG1,
is_logical
? errmsg("released logical replication slot \"%s\"",
slotname)
: errmsg("released physical replication slot \"%s\"",
slotname));
pfree(slotname);
}
}
/*
* Cleanup temporary slots created in current session.
*
* Cleanup only synced temporary slots if 'synced_only' is true, else
* cleanup all temporary slots.
*
* If it drops the last logical slot in the cluster, requests to disable
* logical decoding.
*/
void
ReplicationSlotCleanup(bool synced_only)
{
int i;
bool found_valid_logicalslot;
bool dropped_logical = false;
Assert(MyReplicationSlot == NULL);
restart:
found_valid_logicalslot = false;
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
if (!s->in_use)
continue;
SpinLockAcquire(&s->mutex);
found_valid_logicalslot |=
(SlotIsLogical(s) && s->data.invalidated == RS_INVAL_NONE);
if ((s->active_proc == MyProcNumber &&
(!synced_only || s->data.synced)))
{
Assert(s->data.persistency == RS_TEMPORARY);
SpinLockRelease(&s->mutex);
LWLockRelease(ReplicationSlotControlLock); /* avoid deadlock */
if (SlotIsLogical(s))
dropped_logical = true;
ReplicationSlotDropPtr(s);
ConditionVariableBroadcast(&s->active_cv);
goto restart;
}
else
SpinLockRelease(&s->mutex);
}
LWLockRelease(ReplicationSlotControlLock);
if (dropped_logical && !found_valid_logicalslot)
RequestDisableLogicalDecoding();
}
/*
* Permanently drop replication slot identified by the passed in name.
*/
void
ReplicationSlotDrop(const char *name, bool nowait)
{
bool is_logical;
Assert(MyReplicationSlot == NULL);
ReplicationSlotAcquire(name, nowait, false);
/*
* Do not allow users to drop the slots which are currently being synced
* from the primary to the standby.
*/
if (RecoveryInProgress() && MyReplicationSlot->data.synced)
ereport(ERROR,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot drop replication slot \"%s\"", name),
errdetail("This replication slot is being synchronized from the primary server."));
is_logical = SlotIsLogical(MyReplicationSlot);
ReplicationSlotDropAcquired();
if (is_logical)
RequestDisableLogicalDecoding();
}
/*
* Change the definition of the slot identified by the specified name.
*
* Altering the two_phase property of a slot requires caution on the
* client-side. Enabling it at any random point during decoding has the
* risk that transactions prepared before this change may be skipped by
* the decoder, leading to missing prepare records on the client. So, we
* enable it for subscription related slots only once the initial tablesync
* is finished. See comments atop worker.c. Disabling it is safe only when
* there are no pending prepared transaction, otherwise, the changes of
* already prepared transactions can be replicated again along with their
* corresponding commit leading to duplicate data or errors.
*/
void
ReplicationSlotAlter(const char *name, const bool *failover,
const bool *two_phase)
{
bool update_slot = false;
Assert(MyReplicationSlot == NULL);
Assert(failover || two_phase);
ReplicationSlotAcquire(name, false, true);
if (SlotIsPhysical(MyReplicationSlot))
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot use %s with a physical replication slot",
"ALTER_REPLICATION_SLOT"));
if (RecoveryInProgress())
{
/*
* Do not allow users to alter the slots which are currently being
* synced from the primary to the standby.
*/
if (MyReplicationSlot->data.synced)
ereport(ERROR,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot alter replication slot \"%s\"", name),
errdetail("This replication slot is being synchronized from the primary server."));
/*
* Do not allow users to enable failover on the standby as we do not
* support sync to the cascading standby.
*/
if (failover && *failover)
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot enable failover for a replication slot"
" on the standby"));
}
if (failover)
{
/*
* Do not allow users to enable failover for temporary slots as we do
* not support syncing temporary slots to the standby.
*/
if (*failover && MyReplicationSlot->data.persistency == RS_TEMPORARY)
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot enable failover for a temporary replication slot"));
if (MyReplicationSlot->data.failover != *failover)
{
SpinLockAcquire(&MyReplicationSlot->mutex);
MyReplicationSlot->data.failover = *failover;
SpinLockRelease(&MyReplicationSlot->mutex);
update_slot = true;
}
}
if (two_phase && MyReplicationSlot->data.two_phase != *two_phase)
{
SpinLockAcquire(&MyReplicationSlot->mutex);
MyReplicationSlot->data.two_phase = *two_phase;
SpinLockRelease(&MyReplicationSlot->mutex);
update_slot = true;
}
if (update_slot)
{
ReplicationSlotMarkDirty();
ReplicationSlotSave();
}
ReplicationSlotRelease();
}
/*
* Permanently drop the currently acquired replication slot.
*/
void
ReplicationSlotDropAcquired(void)
{
ReplicationSlot *slot = MyReplicationSlot;
Assert(MyReplicationSlot != NULL);
/* slot isn't acquired anymore */
MyReplicationSlot = NULL;
ReplicationSlotDropPtr(slot);
}
/*
* Permanently drop the replication slot which will be released by the point
* this function returns.
*/
static void
ReplicationSlotDropPtr(ReplicationSlot *slot)
{
char path[MAXPGPATH];
char tmppath[MAXPGPATH];
/*
* If some other backend ran this code concurrently with us, we might try
* to delete a slot with a certain name while someone else was trying to
* create a slot with the same name.
*/
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
/* Generate pathnames. */
sprintf(path, "%s/%s", PG_REPLSLOT_DIR, NameStr(slot->data.name));
sprintf(tmppath, "%s/%s.tmp", PG_REPLSLOT_DIR, NameStr(slot->data.name));
/*
* Rename the slot directory on disk, so that we'll no longer recognize
* this as a valid slot. Note that if this fails, we've got to mark the
* slot inactive before bailing out. If we're dropping an ephemeral or a
* temporary slot, we better never fail hard as the caller won't expect
* the slot to survive and this might get called during error handling.
*/
if (rename(path, tmppath) == 0)
{
/*
* We need to fsync() the directory we just renamed and its parent to
* make sure that our changes are on disk in a crash-safe fashion. If
* fsync() fails, we can't be sure whether the changes are on disk or
* not. For now, we handle that by panicking;
* StartupReplicationSlots() will try to straighten it out after
* restart.
*/
START_CRIT_SECTION();
fsync_fname(tmppath, true);
fsync_fname(PG_REPLSLOT_DIR, true);
END_CRIT_SECTION();
}
else
{
bool fail_softly = slot->data.persistency != RS_PERSISTENT;
SpinLockAcquire(&slot->mutex);
slot->active_proc = INVALID_PROC_NUMBER;
SpinLockRelease(&slot->mutex);
/* wake up anyone waiting on this slot */
ConditionVariableBroadcast(&slot->active_cv);
ereport(fail_softly ? WARNING : ERROR,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
path, tmppath)));
}
/*
* The slot is definitely gone. Lock out concurrent scans of the array
* long enough to kill it. It's OK to clear the active PID here without
* grabbing the mutex because nobody else can be scanning the array here,
* and nobody can be attached to this slot and thus access it without
* scanning the array.
*
* Also wake up processes waiting for it.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_EXCLUSIVE);
slot->active_proc = INVALID_PROC_NUMBER;
slot->in_use = false;
LWLockRelease(ReplicationSlotControlLock);
ConditionVariableBroadcast(&slot->active_cv);
/*
* Slot is dead and doesn't prevent resource removal anymore, recompute
* limits.
*/
ReplicationSlotsComputeRequiredXmin(false);
ReplicationSlotsComputeRequiredLSN();
/*
* If removing the directory fails, the worst thing that will happen is
* that the user won't be able to create a new slot with the same name
* until the next server restart. We warn about it, but that's all.
*/
if (!rmtree(tmppath, true))
ereport(WARNING,
(errmsg("could not remove directory \"%s\"", tmppath)));
/*
* Drop the statistics entry for the replication slot. Do this while
* holding ReplicationSlotAllocationLock so that we don't drop a
* statistics entry for another slot with the same name just created in
* another session.
*/
if (SlotIsLogical(slot))
pgstat_drop_replslot(slot);
/*
* We release this at the very end, so that nobody starts trying to create
* a slot while we're still cleaning up the detritus of the old one.
*/
LWLockRelease(ReplicationSlotAllocationLock);
}
/*
* Serialize the currently acquired slot's state from memory to disk, thereby
* guaranteeing the current state will survive a crash.
*/
void
ReplicationSlotSave(void)
{
char path[MAXPGPATH];
Assert(MyReplicationSlot != NULL);
sprintf(path, "%s/%s", PG_REPLSLOT_DIR, NameStr(MyReplicationSlot->data.name));
SaveSlotToPath(MyReplicationSlot, path, ERROR);
}
/*
* Signal that it would be useful if the currently acquired slot would be
* flushed out to disk.
*
* Note that the actual flush to disk can be delayed for a long time, if
* required for correctness explicitly do a ReplicationSlotSave().
*/
void
ReplicationSlotMarkDirty(void)
{
ReplicationSlot *slot = MyReplicationSlot;
Assert(MyReplicationSlot != NULL);
SpinLockAcquire(&slot->mutex);
MyReplicationSlot->just_dirtied = true;
MyReplicationSlot->dirty = true;
SpinLockRelease(&slot->mutex);
}
/*
* Convert a slot that's marked as RS_EPHEMERAL or RS_TEMPORARY to a
* RS_PERSISTENT slot, guaranteeing it will be there after an eventual crash.
*/
void
ReplicationSlotPersist(void)
{
ReplicationSlot *slot = MyReplicationSlot;
Assert(slot != NULL);
Assert(slot->data.persistency != RS_PERSISTENT);
SpinLockAcquire(&slot->mutex);
slot->data.persistency = RS_PERSISTENT;
SpinLockRelease(&slot->mutex);
ReplicationSlotMarkDirty();
ReplicationSlotSave();
}
/*
* Compute the oldest xmin across all slots and store it in the ProcArray.
*
* If already_locked is true, both the ReplicationSlotControlLock and the
* ProcArrayLock have already been acquired exclusively. It is crucial that the
* caller first acquires the ReplicationSlotControlLock, followed by the
* ProcArrayLock, to prevent any undetectable deadlocks since this function
* acquires them in that order.
*/
void
ReplicationSlotsComputeRequiredXmin(bool already_locked)
{
int i;
TransactionId agg_xmin = InvalidTransactionId;
TransactionId agg_catalog_xmin = InvalidTransactionId;
Assert(ReplicationSlotCtl != NULL);
Assert(!already_locked ||
(LWLockHeldByMeInMode(ReplicationSlotControlLock, LW_EXCLUSIVE) &&
LWLockHeldByMeInMode(ProcArrayLock, LW_EXCLUSIVE)));
/*
* Hold the ReplicationSlotControlLock until after updating the slot xmin
* values, so no backend updates the initial xmin for newly created slot
* concurrently. A shared lock is used here to minimize lock contention,
* especially when many slots exist and advancements occur frequently.
* This is safe since an exclusive lock is taken during initial slot xmin
* update in slot creation.
*
* One might think that we can hold the ProcArrayLock exclusively and
* update the slot xmin values, but it could increase lock contention on
* the ProcArrayLock, which is not great since this function can be called
* at non-negligible frequency.
*
* Concurrent invocation of this function may cause the computed slot xmin
* to regress. However, this is harmless because tuples prior to the most
* recent xmin are no longer useful once advancement occurs (see
* LogicalConfirmReceivedLocation where the slot's xmin value is flushed
* before updating the effective_xmin). Thus, such regression merely
* prevents VACUUM from prematurely removing tuples without causing the
* early deletion of required data.
*/
if (!already_locked)
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
TransactionId effective_xmin;
TransactionId effective_catalog_xmin;
bool invalidated;
if (!s->in_use)
continue;
SpinLockAcquire(&s->mutex);
effective_xmin = s->effective_xmin;
effective_catalog_xmin = s->effective_catalog_xmin;
invalidated = s->data.invalidated != RS_INVAL_NONE;
SpinLockRelease(&s->mutex);
/* invalidated slots need not apply */
if (invalidated)
continue;
/* check the data xmin */
if (TransactionIdIsValid(effective_xmin) &&
(!TransactionIdIsValid(agg_xmin) ||
TransactionIdPrecedes(effective_xmin, agg_xmin)))
agg_xmin = effective_xmin;
/* check the catalog xmin */
if (TransactionIdIsValid(effective_catalog_xmin) &&
(!TransactionIdIsValid(agg_catalog_xmin) ||
TransactionIdPrecedes(effective_catalog_xmin, agg_catalog_xmin)))
agg_catalog_xmin = effective_catalog_xmin;
}
ProcArraySetReplicationSlotXmin(agg_xmin, agg_catalog_xmin, already_locked);
if (!already_locked)
LWLockRelease(ReplicationSlotControlLock);
}
/*
* Compute the oldest restart LSN across all slots and inform xlog module.
*
* Note: while max_slot_wal_keep_size is theoretically relevant for this
* purpose, we don't try to account for that, because this module doesn't
* know what to compare against.
*/
void
ReplicationSlotsComputeRequiredLSN(void)
{
int i;
XLogRecPtr min_required = InvalidXLogRecPtr;
Assert(ReplicationSlotCtl != NULL);
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
XLogRecPtr restart_lsn;
XLogRecPtr last_saved_restart_lsn;
bool invalidated;
ReplicationSlotPersistency persistency;
if (!s->in_use)
continue;
SpinLockAcquire(&s->mutex);
persistency = s->data.persistency;
restart_lsn = s->data.restart_lsn;
invalidated = s->data.invalidated != RS_INVAL_NONE;
last_saved_restart_lsn = s->last_saved_restart_lsn;
SpinLockRelease(&s->mutex);
/* invalidated slots need not apply */
if (invalidated)
continue;
/*
* For persistent slot use last_saved_restart_lsn to compute the
* oldest LSN for removal of WAL segments. The segments between
* last_saved_restart_lsn and restart_lsn might be needed by a
* persistent slot in the case of database crash. Non-persistent
* slots can't survive the database crash, so we don't care about
* last_saved_restart_lsn for them.
*/
if (persistency == RS_PERSISTENT)
{
if (XLogRecPtrIsValid(last_saved_restart_lsn) &&
restart_lsn > last_saved_restart_lsn)
{
restart_lsn = last_saved_restart_lsn;
}
}
if (XLogRecPtrIsValid(restart_lsn) &&
(!XLogRecPtrIsValid(min_required) ||
restart_lsn < min_required))
min_required = restart_lsn;
}
LWLockRelease(ReplicationSlotControlLock);
XLogSetReplicationSlotMinimumLSN(min_required);
}
/*
* Compute the oldest WAL LSN required by *logical* decoding slots..
*
* Returns InvalidXLogRecPtr if logical decoding is disabled or no logical
* slots exist.
*
* NB: this returns a value >= ReplicationSlotsComputeRequiredLSN(), since it
* ignores physical replication slots.
*
* The results aren't required frequently, so we don't maintain a precomputed
* value like we do for ComputeRequiredLSN() and ComputeRequiredXmin().
*/
XLogRecPtr
ReplicationSlotsComputeLogicalRestartLSN(void)
{
XLogRecPtr result = InvalidXLogRecPtr;
int i;
if (max_replication_slots <= 0)
return InvalidXLogRecPtr;
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s;
XLogRecPtr restart_lsn;
XLogRecPtr last_saved_restart_lsn;
bool invalidated;
ReplicationSlotPersistency persistency;
s = &ReplicationSlotCtl->replication_slots[i];
/* cannot change while ReplicationSlotCtlLock is held */
if (!s->in_use)
continue;
/* we're only interested in logical slots */
if (!SlotIsLogical(s))
continue;
/* read once, it's ok if it increases while we're checking */
SpinLockAcquire(&s->mutex);
persistency = s->data.persistency;
restart_lsn = s->data.restart_lsn;
invalidated = s->data.invalidated != RS_INVAL_NONE;
last_saved_restart_lsn = s->last_saved_restart_lsn;
SpinLockRelease(&s->mutex);
/* invalidated slots need not apply */
if (invalidated)
continue;
/*
* For persistent slot use last_saved_restart_lsn to compute the
* oldest LSN for removal of WAL segments. The segments between
* last_saved_restart_lsn and restart_lsn might be needed by a
* persistent slot in the case of database crash. Non-persistent
* slots can't survive the database crash, so we don't care about
* last_saved_restart_lsn for them.
*/
if (persistency == RS_PERSISTENT)
{
if (XLogRecPtrIsValid(last_saved_restart_lsn) &&
restart_lsn > last_saved_restart_lsn)
{
restart_lsn = last_saved_restart_lsn;
}
}
if (!XLogRecPtrIsValid(restart_lsn))
continue;
if (!XLogRecPtrIsValid(result) ||
restart_lsn < result)
result = restart_lsn;
}
LWLockRelease(ReplicationSlotControlLock);
return result;
}
/*
* ReplicationSlotsCountDBSlots -- count the number of slots that refer to the
* passed database oid.
*
* Returns true if there are any slots referencing the database. *nslots will
* be set to the absolute number of slots in the database, *nactive to ones
* currently active.
*/
bool
ReplicationSlotsCountDBSlots(Oid dboid, int *nslots, int *nactive)
{
int i;
*nslots = *nactive = 0;
if (max_replication_slots <= 0)
return false;
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s;
s = &ReplicationSlotCtl->replication_slots[i];
/* cannot change while ReplicationSlotCtlLock is held */
if (!s->in_use)
continue;
/* only logical slots are database specific, skip */
if (!SlotIsLogical(s))
continue;
/* not our database, skip */
if (s->data.database != dboid)
continue;
/* NB: intentionally counting invalidated slots */
/* count slots with spinlock held */
SpinLockAcquire(&s->mutex);
(*nslots)++;
if (s->active_proc != INVALID_PROC_NUMBER)
(*nactive)++;
SpinLockRelease(&s->mutex);
}
LWLockRelease(ReplicationSlotControlLock);
if (*nslots > 0)
return true;
return false;
}
/*
* ReplicationSlotsDropDBSlots -- Drop all db-specific slots relating to the
* passed database oid. The caller should hold an exclusive lock on the
* pg_database oid for the database to prevent creation of new slots on the db
* or replay from existing slots.
*
* Another session that concurrently acquires an existing slot on the target DB
* (most likely to drop it) may cause this function to ERROR. If that happens
* it may have dropped some but not all slots.
*
* This routine isn't as efficient as it could be - but we don't drop
* databases often, especially databases with lots of slots.
*
* If it drops the last logical slot in the cluster, it requests to disable
* logical decoding.
*/
void
ReplicationSlotsDropDBSlots(Oid dboid)
{
int i;
bool found_valid_logicalslot;
bool dropped = false;
if (max_replication_slots <= 0)
return;
restart:
found_valid_logicalslot = false;
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s;
char *slotname;
ProcNumber active_proc;
s = &ReplicationSlotCtl->replication_slots[i];
/* cannot change while ReplicationSlotCtlLock is held */
if (!s->in_use)
continue;
/* only logical slots are database specific, skip */
if (!SlotIsLogical(s))
continue;
/*
* Check logical slots on other databases too so we can disable
* logical decoding only if no slots in the cluster.
*/
SpinLockAcquire(&s->mutex);
found_valid_logicalslot |= (s->data.invalidated == RS_INVAL_NONE);
SpinLockRelease(&s->mutex);
/* not our database, skip */
if (s->data.database != dboid)
continue;
/* NB: intentionally including invalidated slots to drop */
/* acquire slot, so ReplicationSlotDropAcquired can be reused */
SpinLockAcquire(&s->mutex);
/* can't change while ReplicationSlotControlLock is held */
slotname = NameStr(s->data.name);
active_proc = s->active_proc;
if (active_proc == INVALID_PROC_NUMBER)
{
MyReplicationSlot = s;
s->active_proc = MyProcNumber;
}
SpinLockRelease(&s->mutex);
/*
* Even though we hold an exclusive lock on the database object a
* logical slot for that DB can still be active, e.g. if it's
* concurrently being dropped by a backend connected to another DB.
*
* That's fairly unlikely in practice, so we'll just bail out.
*
* The slot sync worker holds a shared lock on the database before
* operating on synced logical slots to avoid conflict with the drop
* happening here. The persistent synced slots are thus safe but there
* is a possibility that the slot sync worker has created a temporary
* slot (which stays active even on release) and we are trying to drop
* that here. In practice, the chances of hitting this scenario are
* less as during slot synchronization, the temporary slot is
* immediately converted to persistent and thus is safe due to the
* shared lock taken on the database. So, we'll just bail out in such
* a case.
*
* XXX: We can consider shutting down the slot sync worker before
* trying to drop synced temporary slots here.
*/
if (active_proc != INVALID_PROC_NUMBER)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("replication slot \"%s\" is active for PID %d",
slotname, GetPGProcByNumber(active_proc)->pid)));
/*
* To avoid duplicating ReplicationSlotDropAcquired() and to avoid
* holding ReplicationSlotControlLock over filesystem operations,
* release ReplicationSlotControlLock and use
* ReplicationSlotDropAcquired.
*
* As that means the set of slots could change, restart scan from the
* beginning each time we release the lock.
*/
LWLockRelease(ReplicationSlotControlLock);
ReplicationSlotDropAcquired();
dropped = true;
goto restart;
}
LWLockRelease(ReplicationSlotControlLock);
if (dropped && !found_valid_logicalslot)
RequestDisableLogicalDecoding();
}
/*
* Returns true if there is at least one in-use valid logical replication slot.
*/
bool
CheckLogicalSlotExists(void)
{
bool found = false;
if (max_replication_slots <= 0)
return false;
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (int i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s;
bool invalidated;
s = &ReplicationSlotCtl->replication_slots[i];
/* cannot change while ReplicationSlotCtlLock is held */
if (!s->in_use)
continue;
if (SlotIsPhysical(s))
continue;
SpinLockAcquire(&s->mutex);
invalidated = s->data.invalidated != RS_INVAL_NONE;
SpinLockRelease(&s->mutex);
if (invalidated)
continue;
found = true;
break;
}
LWLockRelease(ReplicationSlotControlLock);
return found;
}
/*
* Check whether the server's configuration supports using replication
* slots.
*/
void
CheckSlotRequirements(void)
{
/*
* NB: Adding a new requirement likely means that RestoreSlotFromDisk()
* needs the same check.
*/
if (max_replication_slots == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("replication slots can only be used if \"max_replication_slots\" > 0")));
if (wal_level < WAL_LEVEL_REPLICA)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("replication slots can only be used if \"wal_level\" >= \"replica\"")));
}
/*
* Check whether the user has privilege to use replication slots.
*/
void
CheckSlotPermissions(void)
{
if (!has_rolreplication(GetUserId()))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to use replication slots"),
errdetail("Only roles with the %s attribute may use replication slots.",
"REPLICATION")));
}
/*
* Reserve WAL for the currently active slot.
*
* Compute and set restart_lsn in a manner that's appropriate for the type of
* the slot and concurrency safe.
*/
void
ReplicationSlotReserveWal(void)
{
ReplicationSlot *slot = MyReplicationSlot;
XLogSegNo segno;
XLogRecPtr restart_lsn;
Assert(slot != NULL);
Assert(!XLogRecPtrIsValid(slot->data.restart_lsn));
Assert(!XLogRecPtrIsValid(slot->last_saved_restart_lsn));
/*
* The replication slot mechanism is used to prevent the removal of
* required WAL.
*
* Acquire an exclusive lock to prevent the checkpoint process from
* concurrently computing the minimum slot LSN (see
* CheckPointReplicationSlots). This ensures that the WAL reserved for
* replication cannot be removed during a checkpoint.
*
* The mechanism is reliable because if WAL reservation occurs first, the
* checkpoint must wait for the restart_lsn update before determining the
* minimum non-removable LSN. On the other hand, if the checkpoint happens
* first, subsequent WAL reservations will select positions at or beyond
* the redo pointer of that checkpoint.
*/
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
/*
* For logical slots log a standby snapshot and start logical decoding at
* exactly that position. That allows the slot to start up more quickly.
* But on a standby we cannot do WAL writes, so just use the replay
* pointer; effectively, an attempt to create a logical slot on standby
* will cause it to wait for an xl_running_xact record to be logged
* independently on the primary, so that a snapshot can be built using the
* record.
*
* None of this is needed (or indeed helpful) for physical slots as
* they'll start replay at the last logged checkpoint anyway. Instead,
* return the location of the last redo LSN, where a base backup has to
* start replay at.
*/
if (SlotIsPhysical(slot))
restart_lsn = GetRedoRecPtr();
else if (RecoveryInProgress())
restart_lsn = GetXLogReplayRecPtr(NULL);
else
restart_lsn = GetXLogInsertRecPtr();
SpinLockAcquire(&slot->mutex);
slot->data.restart_lsn = restart_lsn;
SpinLockRelease(&slot->mutex);
/* prevent WAL removal as fast as possible */
ReplicationSlotsComputeRequiredLSN();
/* Checkpoint shouldn't remove the required WAL. */
XLByteToSeg(slot->data.restart_lsn, segno, wal_segment_size);
if (XLogGetLastRemovedSegno() >= segno)
elog(ERROR, "WAL required by replication slot %s has been removed concurrently",
NameStr(slot->data.name));
LWLockRelease(ReplicationSlotAllocationLock);
if (!RecoveryInProgress() && SlotIsLogical(slot))
{
XLogRecPtr flushptr;
/* make sure we have enough information to start */
flushptr = LogStandbySnapshot();
/* and make sure it's fsynced to disk */
XLogFlush(flushptr);
}
}
/*
* Report that replication slot needs to be invalidated
*/
static void
ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
bool terminating,
int pid,
NameData slotname,
XLogRecPtr restart_lsn,
XLogRecPtr oldestLSN,
TransactionId snapshotConflictHorizon,
long slot_idle_seconds)
{
StringInfoData err_detail;
StringInfoData err_hint;
initStringInfo(&err_detail);
initStringInfo(&err_hint);
switch (cause)
{
case RS_INVAL_WAL_REMOVED:
{
uint64 ex = oldestLSN - restart_lsn;
appendStringInfo(&err_detail,
ngettext("The slot's restart_lsn %X/%08X exceeds the limit by %" PRIu64 " byte.",
"The slot's restart_lsn %X/%08X exceeds the limit by %" PRIu64 " bytes.",
ex),
LSN_FORMAT_ARGS(restart_lsn),
ex);
/* translator: %s is a GUC variable name */
appendStringInfo(&err_hint, _("You might need to increase \"%s\"."),
"max_slot_wal_keep_size");
break;
}
case RS_INVAL_HORIZON:
appendStringInfo(&err_detail, _("The slot conflicted with xid horizon %u."),
snapshotConflictHorizon);
break;
case RS_INVAL_WAL_LEVEL:
appendStringInfoString(&err_detail, _("Logical decoding on standby requires the primary server to either set \"wal_level\" >= \"logical\" or have at least one logical slot when \"wal_level\" = \"replica\"."));
break;
case RS_INVAL_IDLE_TIMEOUT:
{
/* translator: %s is a GUC variable name */
appendStringInfo(&err_detail, _("The slot's idle time of %lds exceeds the configured \"%s\" duration of %ds."),
slot_idle_seconds, "idle_replication_slot_timeout",
idle_replication_slot_timeout_secs);
/* translator: %s is a GUC variable name */
appendStringInfo(&err_hint, _("You might need to increase \"%s\"."),
"idle_replication_slot_timeout");
break;
}
case RS_INVAL_NONE:
pg_unreachable();
}
ereport(LOG,
terminating ?
errmsg("terminating process %d to release replication slot \"%s\"",
pid, NameStr(slotname)) :
errmsg("invalidating obsolete replication slot \"%s\"",
NameStr(slotname)),
errdetail_internal("%s", err_detail.data),
err_hint.len ? errhint("%s", err_hint.data) : 0);
pfree(err_detail.data);
pfree(err_hint.data);
}
/*
* Can we invalidate an idle replication slot?
*
* Idle timeout invalidation is allowed only when:
*
* 1. Idle timeout is set
* 2. Slot has reserved WAL
* 3. Slot is inactive
* 4. The slot is not being synced from the primary while the server is in
* recovery. This is because synced slots are always considered to be
* inactive because they don't perform logical decoding to produce changes.
*/
static inline bool
CanInvalidateIdleSlot(ReplicationSlot *s)
{
return (idle_replication_slot_timeout_secs != 0 &&
XLogRecPtrIsValid(s->data.restart_lsn) &&
s->inactive_since > 0 &&
!(RecoveryInProgress() && s->data.synced));
}
/*
* DetermineSlotInvalidationCause - Determine the cause for which a slot
* becomes invalid among the given possible causes.
*
* This function sequentially checks all possible invalidation causes and
* returns the first one for which the slot is eligible for invalidation.
*/
static ReplicationSlotInvalidationCause
DetermineSlotInvalidationCause(uint32 possible_causes, ReplicationSlot *s,
XLogRecPtr oldestLSN, Oid dboid,
TransactionId snapshotConflictHorizon,
TimestampTz *inactive_since, TimestampTz now)
{
Assert(possible_causes != RS_INVAL_NONE);
if (possible_causes & RS_INVAL_WAL_REMOVED)
{
XLogRecPtr restart_lsn = s->data.restart_lsn;
if (XLogRecPtrIsValid(restart_lsn) &&
restart_lsn < oldestLSN)
return RS_INVAL_WAL_REMOVED;
}
if (possible_causes & RS_INVAL_HORIZON)
{
/* invalid DB oid signals a shared relation */
if (SlotIsLogical(s) &&
(dboid == InvalidOid || dboid == s->data.database))
{
TransactionId effective_xmin = s->effective_xmin;
TransactionId catalog_effective_xmin = s->effective_catalog_xmin;
if (TransactionIdIsValid(effective_xmin) &&
TransactionIdPrecedesOrEquals(effective_xmin,
snapshotConflictHorizon))
return RS_INVAL_HORIZON;
else if (TransactionIdIsValid(catalog_effective_xmin) &&
TransactionIdPrecedesOrEquals(catalog_effective_xmin,
snapshotConflictHorizon))
return RS_INVAL_HORIZON;
}
}
if (possible_causes & RS_INVAL_WAL_LEVEL)
{
if (SlotIsLogical(s))
return RS_INVAL_WAL_LEVEL;
}
if (possible_causes & RS_INVAL_IDLE_TIMEOUT)
{
Assert(now > 0);
if (CanInvalidateIdleSlot(s))
{
/*
* Simulate the invalidation due to idle_timeout to test the
* timeout behavior promptly, without waiting for it to trigger
* naturally.
*/
#ifdef USE_INJECTION_POINTS
if (IS_INJECTION_POINT_ATTACHED("slot-timeout-inval"))
{
*inactive_since = 0; /* since the beginning of time */
return RS_INVAL_IDLE_TIMEOUT;
}
#endif
/*
* Check if the slot needs to be invalidated due to
* idle_replication_slot_timeout GUC.
*/
if (TimestampDifferenceExceedsSeconds(s->inactive_since, now,
idle_replication_slot_timeout_secs))
{
*inactive_since = s->inactive_since;
return RS_INVAL_IDLE_TIMEOUT;
}
}
}
return RS_INVAL_NONE;
}
/*
* Helper for InvalidateObsoleteReplicationSlots
*
* Acquires the given slot and mark it invalid, if necessary and possible.
*
* Returns true if the slot was invalidated.
*
* Set *released_lock_out if ReplicationSlotControlLock was released in the
* interim (and in that case we're not holding the lock at return, otherwise
* we are).
*
* This is inherently racy, because we release the LWLock
* for syscalls, so caller must restart if we return true.
*/
static bool
InvalidatePossiblyObsoleteSlot(uint32 possible_causes,
ReplicationSlot *s,
XLogRecPtr oldestLSN,
Oid dboid, TransactionId snapshotConflictHorizon,
bool *released_lock_out)
{
int last_signaled_pid = 0;
bool released_lock = false;
bool invalidated = false;
TimestampTz inactive_since = 0;
for (;;)
{
XLogRecPtr restart_lsn;
NameData slotname;
ProcNumber active_proc;
int active_pid = 0;
ReplicationSlotInvalidationCause invalidation_cause = RS_INVAL_NONE;
TimestampTz now = 0;
long slot_idle_secs = 0;
Assert(LWLockHeldByMeInMode(ReplicationSlotControlLock, LW_SHARED));
if (!s->in_use)
{
if (released_lock)
LWLockRelease(ReplicationSlotControlLock);
break;
}
if (possible_causes & RS_INVAL_IDLE_TIMEOUT)
{
/*
* Assign the current time here to avoid system call overhead
* while holding the spinlock in subsequent code.
*/
now = GetCurrentTimestamp();
}
/*
* Check if the slot needs to be invalidated. If it needs to be
* invalidated, and is not currently acquired, acquire it and mark it
* as having been invalidated. We do this with the spinlock held to
* avoid race conditions -- for example the restart_lsn could move
* forward, or the slot could be dropped.
*/
SpinLockAcquire(&s->mutex);
restart_lsn = s->data.restart_lsn;
/* we do nothing if the slot is already invalid */
if (s->data.invalidated == RS_INVAL_NONE)
invalidation_cause = DetermineSlotInvalidationCause(possible_causes,
s, oldestLSN,
dboid,
snapshotConflictHorizon,
&inactive_since,
now);
/* if there's no invalidation, we're done */
if (invalidation_cause == RS_INVAL_NONE)
{
SpinLockRelease(&s->mutex);
if (released_lock)
LWLockRelease(ReplicationSlotControlLock);
break;
}
slotname = s->data.name;
active_proc = s->active_proc;
/*
* If the slot can be acquired, do so and mark it invalidated
* immediately. Otherwise we'll signal the owning process, below, and
* retry.
*
* Note: Unlike other slot attributes, slot's inactive_since can't be
* changed until the acquired slot is released or the owning process
* is terminated. So, the inactive slot can only be invalidated
* immediately without being terminated.
*/
if (active_proc == INVALID_PROC_NUMBER)
{
MyReplicationSlot = s;
s->active_proc = MyProcNumber;
s->data.invalidated = invalidation_cause;
/*
* XXX: We should consider not overwriting restart_lsn and instead
* just rely on .invalidated.
*/
if (invalidation_cause == RS_INVAL_WAL_REMOVED)
{
s->data.restart_lsn = InvalidXLogRecPtr;
s->last_saved_restart_lsn = InvalidXLogRecPtr;
}
/* Let caller know */
invalidated = true;
}
else
{
active_pid = GetPGProcByNumber(active_proc)->pid;
Assert(active_pid != 0);
}
SpinLockRelease(&s->mutex);
/*
* Calculate the idle time duration of the slot if slot is marked
* invalidated with RS_INVAL_IDLE_TIMEOUT.
*/
if (invalidation_cause == RS_INVAL_IDLE_TIMEOUT)
{
int slot_idle_usecs;
TimestampDifference(inactive_since, now, &slot_idle_secs,
&slot_idle_usecs);
}
if (active_proc != INVALID_PROC_NUMBER)
{
/*
* Prepare the sleep on the slot's condition variable before
* releasing the lock, to close a possible race condition if the
* slot is released before the sleep below.
*/
ConditionVariablePrepareToSleep(&s->active_cv);
LWLockRelease(ReplicationSlotControlLock);
released_lock = true;
/*
* Signal to terminate the process that owns the slot, if we
* haven't already signalled it. (Avoidance of repeated
* signalling is the only reason for there to be a loop in this
* routine; otherwise we could rely on caller's restart loop.)
*
* There is the race condition that other process may own the slot
* after its current owner process is terminated and before this
* process owns it. To handle that, we signal only if the PID of
* the owning process has changed from the previous time. (This
* logic assumes that the same PID is not reused very quickly.)
*/
if (last_signaled_pid != active_pid)
{
ReportSlotInvalidation(invalidation_cause, true, active_pid,
slotname, restart_lsn,
oldestLSN, snapshotConflictHorizon,
slot_idle_secs);
if (MyBackendType == B_STARTUP)
(void) SignalRecoveryConflict(GetPGProcByNumber(active_proc),
active_pid,
RECOVERY_CONFLICT_LOGICALSLOT);
else
(void) kill(active_pid, SIGTERM);
last_signaled_pid = active_pid;
}
/* Wait until the slot is released. */
ConditionVariableSleep(&s->active_cv,
WAIT_EVENT_REPLICATION_SLOT_DROP);
/*
* Re-acquire lock and start over; we expect to invalidate the
* slot next time (unless another process acquires the slot in the
* meantime).
*
* Note: It is possible for a slot to advance its restart_lsn or
* xmin values sufficiently between when we release the mutex and
* when we recheck, moving from a conflicting state to a non
* conflicting state. This is intentional and safe: if the slot
* has caught up while we're busy here, the resources we were
* concerned about (WAL segments or tuples) have not yet been
* removed, and there's no reason to invalidate the slot.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
continue;
}
else
{
/*
* We hold the slot now and have already invalidated it; flush it
* to ensure that state persists.
*
* Don't want to hold ReplicationSlotControlLock across file
* system operations, so release it now but be sure to tell caller
* to restart from scratch.
*/
LWLockRelease(ReplicationSlotControlLock);
released_lock = true;
/* Make sure the invalidated state persists across server restart */
ReplicationSlotMarkDirty();
ReplicationSlotSave();
ReplicationSlotRelease();
ReportSlotInvalidation(invalidation_cause, false, active_pid,
slotname, restart_lsn,
oldestLSN, snapshotConflictHorizon,
slot_idle_secs);
/* done with this slot for now */
break;
}
}
Assert(released_lock == !LWLockHeldByMe(ReplicationSlotControlLock));
*released_lock_out = released_lock;
return invalidated;
}
/*
* Invalidate slots that require resources about to be removed.
*
* Returns true when any slot have got invalidated.
*
* Whether a slot needs to be invalidated depends on the invalidation cause.
* A slot is invalidated if it:
* - RS_INVAL_WAL_REMOVED: requires a LSN older than the given segment
* - RS_INVAL_HORIZON: requires a snapshot <= the given horizon in the given
* db; dboid may be InvalidOid for shared relations
* - RS_INVAL_WAL_LEVEL: is a logical slot and effective_wal_level is not
* logical.
* - RS_INVAL_IDLE_TIMEOUT: has been idle longer than the configured
* "idle_replication_slot_timeout" duration.
*
* Note: This function attempts to invalidate the slot for multiple possible
* causes in a single pass, minimizing redundant iterations. The "cause"
* parameter can be a MASK representing one or more of the defined causes.
*
* If it invalidates the last logical slot in the cluster, it requests to
* disable logical decoding.
*
* NB - this runs as part of checkpoint, so avoid raising errors if possible.
*/
bool
InvalidateObsoleteReplicationSlots(uint32 possible_causes,
XLogSegNo oldestSegno, Oid dboid,
TransactionId snapshotConflictHorizon)
{
XLogRecPtr oldestLSN;
bool invalidated = false;
bool invalidated_logical = false;
bool found_valid_logicalslot;
Assert(!(possible_causes & RS_INVAL_HORIZON) || TransactionIdIsValid(snapshotConflictHorizon));
Assert(!(possible_causes & RS_INVAL_WAL_REMOVED) || oldestSegno > 0);
Assert(possible_causes != RS_INVAL_NONE);
if (max_replication_slots == 0)
return invalidated;
XLogSegNoOffsetToRecPtr(oldestSegno, 0, wal_segment_size, oldestLSN);
restart:
found_valid_logicalslot = false;
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
for (int i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
bool released_lock = false;
if (!s->in_use)
continue;
/* Prevent invalidation of logical slots during binary upgrade */
if (SlotIsLogical(s) && IsBinaryUpgrade)
{
SpinLockAcquire(&s->mutex);
found_valid_logicalslot |= (s->data.invalidated == RS_INVAL_NONE);
SpinLockRelease(&s->mutex);
continue;
}
if (InvalidatePossiblyObsoleteSlot(possible_causes, s, oldestLSN,
dboid, snapshotConflictHorizon,
&released_lock))
{
Assert(released_lock);
/* Remember we have invalidated a physical or logical slot */
invalidated = true;
/*
* Additionally, remember we have invalidated a logical slot as we
* can request disabling logical decoding later.
*/
if (SlotIsLogical(s))
invalidated_logical = true;
}
else
{
/*
* We need to check if the slot is invalidated here since
* InvalidatePossiblyObsoleteSlot() returns false also if the slot
* is already invalidated.
*/
SpinLockAcquire(&s->mutex);
found_valid_logicalslot |=
(SlotIsLogical(s) && (s->data.invalidated == RS_INVAL_NONE));
SpinLockRelease(&s->mutex);
}
/* if the lock was released, start from scratch */
if (released_lock)
goto restart;
}
LWLockRelease(ReplicationSlotControlLock);
/*
* If any slots have been invalidated, recalculate the resource limits.
*/
if (invalidated)
{
ReplicationSlotsComputeRequiredXmin(false);
ReplicationSlotsComputeRequiredLSN();
}
/*
* Request the checkpointer to disable logical decoding if no valid
* logical slots remain. If called by the checkpointer during a
* checkpoint, only the request is initiated; actual deactivation is
* deferred until after the checkpoint completes.
*/
if (invalidated_logical && !found_valid_logicalslot)
RequestDisableLogicalDecoding();
return invalidated;
}
/*
* Flush all replication slots to disk.
*
* It is convenient to flush dirty replication slots at the time of checkpoint.
* Additionally, in case of a shutdown checkpoint, we also identify the slots
* for which the confirmed_flush LSN has been updated since the last time it
* was saved and flush them.
*/
void
CheckPointReplicationSlots(bool is_shutdown)
{
int i;
bool last_saved_restart_lsn_updated = false;
elog(DEBUG1, "performing replication slot checkpoint");
/*
* Prevent any slot from being created/dropped while we're active. As we
* explicitly do *not* want to block iterating over replication_slots or
* acquiring a slot we cannot take the control lock - but that's OK,
* because holding ReplicationSlotAllocationLock is strictly stronger, and
* enough to guarantee that nobody can change the in_use bits on us.
*
* Additionally, acquiring the Allocation lock is necessary to serialize
* the slot flush process with concurrent slot WAL reservation. This
* ensures that the WAL position being reserved is either flushed to disk
* or is beyond or equal to the redo pointer of the current checkpoint
* (See ReplicationSlotReserveWal for details).
*/
LWLockAcquire(ReplicationSlotAllocationLock, LW_SHARED);
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
char path[MAXPGPATH];
if (!s->in_use)
continue;
/* save the slot to disk, locking is handled in SaveSlotToPath() */
sprintf(path, "%s/%s", PG_REPLSLOT_DIR, NameStr(s->data.name));
/*
* Slot's data is not flushed each time the confirmed_flush LSN is
* updated as that could lead to frequent writes. However, we decide
* to force a flush of all logical slot's data at the time of shutdown
* if the confirmed_flush LSN is changed since we last flushed it to
* disk. This helps in avoiding an unnecessary retreat of the
* confirmed_flush LSN after restart.
*/
if (is_shutdown && SlotIsLogical(s))
{
SpinLockAcquire(&s->mutex);
if (s->data.invalidated == RS_INVAL_NONE &&
s->data.confirmed_flush > s->last_saved_confirmed_flush)
{
s->just_dirtied = true;
s->dirty = true;
}
SpinLockRelease(&s->mutex);
}
/*
* Track if we're going to update slot's last_saved_restart_lsn. We
* need this to know if we need to recompute the required LSN.
*/
if (s->last_saved_restart_lsn != s->data.restart_lsn)
last_saved_restart_lsn_updated = true;
SaveSlotToPath(s, path, LOG);
}
LWLockRelease(ReplicationSlotAllocationLock);
/*
* Recompute the required LSN if SaveSlotToPath() updated
* last_saved_restart_lsn for any slot.
*/
if (last_saved_restart_lsn_updated)
ReplicationSlotsComputeRequiredLSN();
}
/*
* Load all replication slots from disk into memory at server startup. This
* needs to be run before we start crash recovery.
*/
void
StartupReplicationSlots(void)
{
DIR *replication_dir;
struct dirent *replication_de;
elog(DEBUG1, "starting up replication slots");
/* restore all slots by iterating over all on-disk entries */
replication_dir = AllocateDir(PG_REPLSLOT_DIR);
while ((replication_de = ReadDir(replication_dir, PG_REPLSLOT_DIR)) != NULL)
{
char path[MAXPGPATH + sizeof(PG_REPLSLOT_DIR)];
PGFileType de_type;
if (strcmp(replication_de->d_name, ".") == 0 ||
strcmp(replication_de->d_name, "..") == 0)
continue;
snprintf(path, sizeof(path), "%s/%s", PG_REPLSLOT_DIR, replication_de->d_name);
de_type = get_dirent_type(path, replication_de, false, DEBUG1);
/* we're only creating directories here, skip if it's not our's */
if (de_type != PGFILETYPE_ERROR && de_type != PGFILETYPE_DIR)
continue;
/* we crashed while a slot was being setup or deleted, clean up */
if (pg_str_endswith(replication_de->d_name, ".tmp"))
{
if (!rmtree(path, true))
{
ereport(WARNING,
(errmsg("could not remove directory \"%s\"",
path)));
continue;
}
fsync_fname(PG_REPLSLOT_DIR, true);
continue;
}
/* looks like a slot in a normal state, restore */
RestoreSlotFromDisk(replication_de->d_name);
}
FreeDir(replication_dir);
/* currently no slots exist, we're done. */
if (max_replication_slots <= 0)
return;
/* Now that we have recovered all the data, compute replication xmin */
ReplicationSlotsComputeRequiredXmin(false);
ReplicationSlotsComputeRequiredLSN();
}
/* ----
* Manipulation of on-disk state of replication slots
*
* NB: none of the routines below should take any notice whether a slot is the
* current one or not, that's all handled a layer above.
* ----
*/
static void
CreateSlotOnDisk(ReplicationSlot *slot)
{
char tmppath[MAXPGPATH];
char path[MAXPGPATH];
struct stat st;
/*
* No need to take out the io_in_progress_lock, nobody else can see this
* slot yet, so nobody else will write. We're reusing SaveSlotToPath which
* takes out the lock, if we'd take the lock here, we'd deadlock.
*/
sprintf(path, "%s/%s", PG_REPLSLOT_DIR, NameStr(slot->data.name));
sprintf(tmppath, "%s/%s.tmp", PG_REPLSLOT_DIR, NameStr(slot->data.name));
/*
* It's just barely possible that some previous effort to create or drop a
* slot with this name left a temp directory lying around. If that seems
* to be the case, try to remove it. If the rmtree() fails, we'll error
* out at the MakePGDirectory() below, so we don't bother checking
* success.
*/
if (stat(tmppath, &st) == 0 && S_ISDIR(st.st_mode))
rmtree(tmppath, true);
/* Create and fsync the temporary slot directory. */
if (MakePGDirectory(tmppath) < 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create directory \"%s\": %m",
tmppath)));
fsync_fname(tmppath, true);
/* Write the actual state file. */
slot->dirty = true; /* signal that we really need to write */
SaveSlotToPath(slot, tmppath, ERROR);
/* Rename the directory into place. */
if (rename(tmppath, path) != 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
tmppath, path)));
/*
* If we'd now fail - really unlikely - we wouldn't know whether this slot
* would persist after an OS crash or not - so, force a restart. The
* restart would try to fsync this again till it works.
*/
START_CRIT_SECTION();
fsync_fname(path, true);
fsync_fname(PG_REPLSLOT_DIR, true);
END_CRIT_SECTION();
}
/*
* Shared functionality between saving and creating a replication slot.
*/
static void
SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
{
char tmppath[MAXPGPATH];
char path[MAXPGPATH];
int fd;
ReplicationSlotOnDisk cp;
bool was_dirty;
/* first check whether there's something to write out */
SpinLockAcquire(&slot->mutex);
was_dirty = slot->dirty;
slot->just_dirtied = false;
SpinLockRelease(&slot->mutex);
/* and don't do anything if there's nothing to write */
if (!was_dirty)
return;
LWLockAcquire(&slot->io_in_progress_lock, LW_EXCLUSIVE);
/* silence valgrind :( */
memset(&cp, 0, sizeof(ReplicationSlotOnDisk));
sprintf(tmppath, "%s/state.tmp", dir);
sprintf(path, "%s/state", dir);
fd = OpenTransientFile(tmppath, O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
if (fd < 0)
{
/*
* If not an ERROR, then release the lock before returning. In case
* of an ERROR, the error recovery path automatically releases the
* lock, but no harm in explicitly releasing even in that case. Note
* that LWLockRelease() could affect errno.
*/
int save_errno = errno;
LWLockRelease(&slot->io_in_progress_lock);
errno = save_errno;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not create file \"%s\": %m",
tmppath)));
return;
}
cp.magic = SLOT_MAGIC;
INIT_CRC32C(cp.checksum);
cp.version = SLOT_VERSION;
cp.length = ReplicationSlotOnDiskV2Size;
SpinLockAcquire(&slot->mutex);
memcpy(&cp.slotdata, &slot->data, sizeof(ReplicationSlotPersistentData));
SpinLockRelease(&slot->mutex);
COMP_CRC32C(cp.checksum,
(char *) (&cp) + ReplicationSlotOnDiskNotChecksummedSize,
ReplicationSlotOnDiskChecksummedSize);
FIN_CRC32C(cp.checksum);
errno = 0;
pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_WRITE);
if ((write(fd, &cp, sizeof(cp))) != sizeof(cp))
{
int save_errno = errno;
pgstat_report_wait_end();
CloseTransientFile(fd);
unlink(tmppath);
LWLockRelease(&slot->io_in_progress_lock);
/* if write didn't set errno, assume problem is no disk space */
errno = save_errno ? save_errno : ENOSPC;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not write to file \"%s\": %m",
tmppath)));
return;
}
pgstat_report_wait_end();
/* fsync the temporary file */
pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_SYNC);
if (pg_fsync(fd) != 0)
{
int save_errno = errno;
pgstat_report_wait_end();
CloseTransientFile(fd);
unlink(tmppath);
LWLockRelease(&slot->io_in_progress_lock);
errno = save_errno;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\": %m",
tmppath)));
return;
}
pgstat_report_wait_end();
if (CloseTransientFile(fd) != 0)
{
int save_errno = errno;
unlink(tmppath);
LWLockRelease(&slot->io_in_progress_lock);
errno = save_errno;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not close file \"%s\": %m",
tmppath)));
return;
}
/* rename to permanent file, fsync file and directory */
if (rename(tmppath, path) != 0)
{
int save_errno = errno;
unlink(tmppath);
LWLockRelease(&slot->io_in_progress_lock);
errno = save_errno;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
tmppath, path)));
return;
}
/*
* Check CreateSlotOnDisk() for the reasoning of using a critical section.
*/
START_CRIT_SECTION();
fsync_fname(path, false);
fsync_fname(dir, true);
fsync_fname(PG_REPLSLOT_DIR, true);
END_CRIT_SECTION();
/*
* Successfully wrote, unset dirty bit, unless somebody dirtied again
* already and remember the confirmed_flush LSN value.
*/
SpinLockAcquire(&slot->mutex);
if (!slot->just_dirtied)
slot->dirty = false;
slot->last_saved_confirmed_flush = cp.slotdata.confirmed_flush;
slot->last_saved_restart_lsn = cp.slotdata.restart_lsn;
SpinLockRelease(&slot->mutex);
LWLockRelease(&slot->io_in_progress_lock);
}
/*
* Load a single slot from disk into memory.
*/
static void
RestoreSlotFromDisk(const char *name)
{
ReplicationSlotOnDisk cp;
int i;
char slotdir[MAXPGPATH + sizeof(PG_REPLSLOT_DIR)];
char path[MAXPGPATH + sizeof(PG_REPLSLOT_DIR) + 10];
int fd;
bool restored = false;
int readBytes;
pg_crc32c checksum;
TimestampTz now = 0;
/* no need to lock here, no concurrent access allowed yet */
/* delete temp file if it exists */
sprintf(slotdir, "%s/%s", PG_REPLSLOT_DIR, name);
sprintf(path, "%s/state.tmp", slotdir);
if (unlink(path) < 0 && errno != ENOENT)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not remove file \"%s\": %m", path)));
sprintf(path, "%s/state", slotdir);
elog(DEBUG1, "restoring replication slot from \"%s\"", path);
/* on some operating systems fsyncing a file requires O_RDWR */
fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
/*
* We do not need to handle this as we are rename()ing the directory into
* place only after we fsync()ed the state file.
*/
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m", path)));
/*
* Sync state file before we're reading from it. We might have crashed
* while it wasn't synced yet and we shouldn't continue on that basis.
*/
pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_RESTORE_SYNC);
if (pg_fsync(fd) != 0)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\": %m",
path)));
pgstat_report_wait_end();
/* Also sync the parent directory */
START_CRIT_SECTION();
fsync_fname(slotdir, true);
END_CRIT_SECTION();
/* read part of statefile that's guaranteed to be version independent */
pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_READ);
readBytes = read(fd, &cp, ReplicationSlotOnDiskConstantSize);
pgstat_report_wait_end();
if (readBytes != ReplicationSlotOnDiskConstantSize)
{
if (readBytes < 0)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m", path)));
else
ereport(PANIC,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("could not read file \"%s\": read %d of %zu",
path, readBytes,
(Size) ReplicationSlotOnDiskConstantSize)));
}
/* verify magic */
if (cp.magic != SLOT_MAGIC)
ereport(PANIC,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("replication slot file \"%s\" has wrong magic number: %u instead of %u",
path, cp.magic, SLOT_MAGIC)));
/* verify version */
if (cp.version != SLOT_VERSION)
ereport(PANIC,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("replication slot file \"%s\" has unsupported version %u",
path, cp.version)));
/* boundary check on length */
if (cp.length != ReplicationSlotOnDiskV2Size)
ereport(PANIC,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("replication slot file \"%s\" has corrupted length %u",
path, cp.length)));
/* Now that we know the size, read the entire file */
pgstat_report_wait_start(WAIT_EVENT_REPLICATION_SLOT_READ);
readBytes = read(fd,
(char *) &cp + ReplicationSlotOnDiskConstantSize,
cp.length);
pgstat_report_wait_end();
if (readBytes != cp.length)
{
if (readBytes < 0)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m", path)));
else
ereport(PANIC,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("could not read file \"%s\": read %d of %zu",
path, readBytes, (Size) cp.length)));
}
if (CloseTransientFile(fd) != 0)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not close file \"%s\": %m", path)));
/* now verify the CRC */
INIT_CRC32C(checksum);
COMP_CRC32C(checksum,
(char *) &cp + ReplicationSlotOnDiskNotChecksummedSize,
ReplicationSlotOnDiskChecksummedSize);
FIN_CRC32C(checksum);
if (!EQ_CRC32C(checksum, cp.checksum))
ereport(PANIC,
(errmsg("checksum mismatch for replication slot file \"%s\": is %u, should be %u",
path, checksum, cp.checksum)));
/*
* If we crashed with an ephemeral slot active, don't restore but delete
* it.
*/
if (cp.slotdata.persistency != RS_PERSISTENT)
{
if (!rmtree(slotdir, true))
{
ereport(WARNING,
(errmsg("could not remove directory \"%s\"",
slotdir)));
}
fsync_fname(PG_REPLSLOT_DIR, true);
return;
}
/*
* Verify that requirements for the specific slot type are met. That's
* important because if these aren't met we're not guaranteed to retain
* all the necessary resources for the slot.
*
* NB: We have to do so *after* the above checks for ephemeral slots,
* because otherwise a slot that shouldn't exist anymore could prevent
* restarts.
*
* NB: Changing the requirements here also requires adapting
* CheckSlotRequirements() and CheckLogicalDecodingRequirements().
*/
if (cp.slotdata.database != InvalidOid)
{
if (wal_level < WAL_LEVEL_REPLICA)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("logical replication slot \"%s\" exists, but \"wal_level\" < \"replica\"",
NameStr(cp.slotdata.name)),
errhint("Change \"wal_level\" to be \"replica\" or higher.")));
/*
* In standby mode, the hot standby must be enabled. This check is
* necessary to ensure logical slots are invalidated when they become
* incompatible due to insufficient wal_level. Otherwise, if the
* primary reduces effective_wal_level < logical while hot standby is
* disabled, primary disable logical decoding while hot standby is
* disabled, logical slots would remain valid even after promotion.
*/
if (StandbyMode && !EnableHotStandby)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("logical replication slot \"%s\" exists on the standby, but \"hot_standby\" = \"off\"",
NameStr(cp.slotdata.name)),
errhint("Change \"hot_standby\" to be \"on\".")));
}
else if (wal_level < WAL_LEVEL_REPLICA)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("physical replication slot \"%s\" exists, but \"wal_level\" < \"replica\"",
NameStr(cp.slotdata.name)),
errhint("Change \"wal_level\" to be \"replica\" or higher.")));
/* nothing can be active yet, don't lock anything */
for (i = 0; i < max_replication_slots; i++)
{
ReplicationSlot *slot;
slot = &ReplicationSlotCtl->replication_slots[i];
if (slot->in_use)
continue;
/* restore the entire set of persistent data */
memcpy(&slot->data, &cp.slotdata,
sizeof(ReplicationSlotPersistentData));
/* initialize in memory state */
slot->effective_xmin = cp.slotdata.xmin;
slot->effective_catalog_xmin = cp.slotdata.catalog_xmin;
slot->last_saved_confirmed_flush = cp.slotdata.confirmed_flush;
slot->last_saved_restart_lsn = cp.slotdata.restart_lsn;
slot->candidate_catalog_xmin = InvalidTransactionId;
slot->candidate_xmin_lsn = InvalidXLogRecPtr;
slot->candidate_restart_lsn = InvalidXLogRecPtr;
slot->candidate_restart_valid = InvalidXLogRecPtr;
slot->in_use = true;
slot->active_proc = INVALID_PROC_NUMBER;
/*
* Set the time since the slot has become inactive after loading the
* slot from the disk into memory. Whoever acquires the slot i.e.
* makes the slot active will reset it. Use the same inactive_since
* time for all the slots.
*/
if (now == 0)
now = GetCurrentTimestamp();
ReplicationSlotSetInactiveSince(slot, now, false);
restored = true;
break;
}
if (!restored)
ereport(FATAL,
(errmsg("too many replication slots active before shutdown"),
errhint("Increase \"max_replication_slots\" and try again.")));
}
/*
* Maps an invalidation reason for a replication slot to
* ReplicationSlotInvalidationCause.
*/
ReplicationSlotInvalidationCause
GetSlotInvalidationCause(const char *cause_name)
{
Assert(cause_name);
/* Search lookup table for the cause having this name */
for (int i = 0; i <= RS_INVAL_MAX_CAUSES; i++)
{
if (strcmp(SlotInvalidationCauses[i].cause_name, cause_name) == 0)
return SlotInvalidationCauses[i].cause;
}
Assert(false);
return RS_INVAL_NONE; /* to keep compiler quiet */
}
/*
* Maps a ReplicationSlotInvalidationCause to the invalidation
* reason for a replication slot.
*/
const char *
GetSlotInvalidationCauseName(ReplicationSlotInvalidationCause cause)
{
/* Search lookup table for the name of this cause */
for (int i = 0; i <= RS_INVAL_MAX_CAUSES; i++)
{
if (SlotInvalidationCauses[i].cause == cause)
return SlotInvalidationCauses[i].cause_name;
}
Assert(false);
return "none"; /* to keep compiler quiet */
}
/*
* A helper function to validate slots specified in GUC synchronized_standby_slots.
*
* The rawname will be parsed, and the result will be saved into *elemlist.
*/
static bool
validate_sync_standby_slots(char *rawname, List **elemlist)
{
/* Verify syntax and parse string into a list of identifiers */
if (!SplitIdentifierString(rawname, ',', elemlist))
{
GUC_check_errdetail("List syntax is invalid.");
return false;
}
/* Iterate the list to validate each slot name */
foreach_ptr(char, name, *elemlist)
{
int err_code;
char *err_msg = NULL;
char *err_hint = NULL;
if (!ReplicationSlotValidateNameInternal(name, false, &err_code,
&err_msg, &err_hint))
{
GUC_check_errcode(err_code);
GUC_check_errdetail("%s", err_msg);
if (err_hint != NULL)
GUC_check_errhint("%s", err_hint);
return false;
}
}
return true;
}
/*
* GUC check_hook for synchronized_standby_slots
*/
bool
check_synchronized_standby_slots(char **newval, void **extra, GucSource source)
{
char *rawname;
char *ptr;
List *elemlist;
int size;
bool ok;
SyncStandbySlotsConfigData *config;
if ((*newval)[0] == '\0')
return true;
/* Need a modifiable copy of the GUC string */
rawname = pstrdup(*newval);
/* Now verify if the specified slots exist and have correct type */
ok = validate_sync_standby_slots(rawname, &elemlist);
if (!ok || elemlist == NIL)
{
pfree(rawname);
list_free(elemlist);
return ok;
}
/* Compute the size required for the SyncStandbySlotsConfigData struct */
size = offsetof(SyncStandbySlotsConfigData, slot_names);
foreach_ptr(char, slot_name, elemlist)
size += strlen(slot_name) + 1;
/* GUC extra value must be guc_malloc'd, not palloc'd */
config = (SyncStandbySlotsConfigData *) guc_malloc(LOG, size);
if (!config)
return false;
/* Transform the data into SyncStandbySlotsConfigData */
config->nslotnames = list_length(elemlist);
ptr = config->slot_names;
foreach_ptr(char, slot_name, elemlist)
{
strcpy(ptr, slot_name);
ptr += strlen(slot_name) + 1;
}
*extra = config;
pfree(rawname);
list_free(elemlist);
return true;
}
/*
* GUC assign_hook for synchronized_standby_slots
*/
void
assign_synchronized_standby_slots(const char *newval, void *extra)
{
/*
* The standby slots may have changed, so we must recompute the oldest
* LSN.
*/
ss_oldest_flush_lsn = InvalidXLogRecPtr;
synchronized_standby_slots_config = (SyncStandbySlotsConfigData *) extra;
}
/*
* Check if the passed slot_name is specified in the synchronized_standby_slots GUC.
*/
bool
SlotExistsInSyncStandbySlots(const char *slot_name)
{
const char *standby_slot_name;
/* Return false if there is no value in synchronized_standby_slots */
if (synchronized_standby_slots_config == NULL)
return false;
/*
* XXX: We are not expecting this list to be long so a linear search
* shouldn't hurt but if that turns out not to be true then we can cache
* this information for each WalSender as well.
*/
standby_slot_name = synchronized_standby_slots_config->slot_names;
for (int i = 0; i < synchronized_standby_slots_config->nslotnames; i++)
{
if (strcmp(standby_slot_name, slot_name) == 0)
return true;
standby_slot_name += strlen(standby_slot_name) + 1;
}
return false;
}
/*
* Return true if the slots specified in synchronized_standby_slots have caught up to
* the given WAL location, false otherwise.
*
* The elevel parameter specifies the error level used for logging messages
* related to slots that do not exist, are invalidated, or are inactive.
*/
bool
StandbySlotsHaveCaughtup(XLogRecPtr wait_for_lsn, int elevel)
{
const char *name;
int caught_up_slot_num = 0;
XLogRecPtr min_restart_lsn = InvalidXLogRecPtr;
/*
* Don't need to wait for the standbys to catch up if there is no value in
* synchronized_standby_slots.
*/
if (synchronized_standby_slots_config == NULL)
return true;
/*
* Don't need to wait for the standbys to catch up if we are on a standby
* server, since we do not support syncing slots to cascading standbys.
*/
if (RecoveryInProgress())
return true;
/*
* Don't need to wait for the standbys to catch up if they are already
* beyond the specified WAL location.
*/
if (XLogRecPtrIsValid(ss_oldest_flush_lsn) &&
ss_oldest_flush_lsn >= wait_for_lsn)
return true;
/*
* To prevent concurrent slot dropping and creation while filtering the
* slots, take the ReplicationSlotControlLock outside of the loop.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
name = synchronized_standby_slots_config->slot_names;
for (int i = 0; i < synchronized_standby_slots_config->nslotnames; i++)
{
XLogRecPtr restart_lsn;
bool invalidated;
bool inactive;
ReplicationSlot *slot;
slot = SearchNamedReplicationSlot(name, false);
/*
* If a slot name provided in synchronized_standby_slots does not
* exist, report a message and exit the loop.
*/
if (!slot)
{
ereport(elevel,
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("replication slot \"%s\" specified in parameter \"%s\" does not exist",
name, "synchronized_standby_slots"),
errdetail("Logical replication is waiting on the standby associated with replication slot \"%s\".",
name),
errhint("Create the replication slot \"%s\" or amend parameter \"%s\".",
name, "synchronized_standby_slots"));
break;
}
/* Same as above: if a slot is not physical, exit the loop. */
if (SlotIsLogical(slot))
{
ereport(elevel,
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot specify logical replication slot \"%s\" in parameter \"%s\"",
name, "synchronized_standby_slots"),
errdetail("Logical replication is waiting for correction on replication slot \"%s\".",
name),
errhint("Remove the logical replication slot \"%s\" from parameter \"%s\".",
name, "synchronized_standby_slots"));
break;
}
SpinLockAcquire(&slot->mutex);
restart_lsn = slot->data.restart_lsn;
invalidated = slot->data.invalidated != RS_INVAL_NONE;
inactive = slot->active_proc == INVALID_PROC_NUMBER;
SpinLockRelease(&slot->mutex);
if (invalidated)
{
/* Specified physical slot has been invalidated */
ereport(elevel,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("physical replication slot \"%s\" specified in parameter \"%s\" has been invalidated",
name, "synchronized_standby_slots"),
errdetail("Logical replication is waiting on the standby associated with replication slot \"%s\".",
name),
errhint("Drop and recreate the replication slot \"%s\", or amend parameter \"%s\".",
name, "synchronized_standby_slots"));
break;
}
if (!XLogRecPtrIsValid(restart_lsn) || restart_lsn < wait_for_lsn)
{
/* Log a message if no active_pid for this physical slot */
if (inactive)
ereport(elevel,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("replication slot \"%s\" specified in parameter \"%s\" does not have active_pid",
name, "synchronized_standby_slots"),
errdetail("Logical replication is waiting on the standby associated with replication slot \"%s\".",
name),
errhint("Start the standby associated with the replication slot \"%s\", or amend parameter \"%s\".",
name, "synchronized_standby_slots"));
/* Continue if the current slot hasn't caught up. */
break;
}
Assert(restart_lsn >= wait_for_lsn);
if (!XLogRecPtrIsValid(min_restart_lsn) ||
min_restart_lsn > restart_lsn)
min_restart_lsn = restart_lsn;
caught_up_slot_num++;
name += strlen(name) + 1;
}
LWLockRelease(ReplicationSlotControlLock);
/*
* Return false if not all the standbys have caught up to the specified
* WAL location.
*/
if (caught_up_slot_num != synchronized_standby_slots_config->nslotnames)
return false;
/* The ss_oldest_flush_lsn must not retreat. */
Assert(!XLogRecPtrIsValid(ss_oldest_flush_lsn) ||
min_restart_lsn >= ss_oldest_flush_lsn);
ss_oldest_flush_lsn = min_restart_lsn;
return true;
}
/*
* Wait for physical standbys to confirm receiving the given lsn.
*
* Used by logical decoding SQL functions. It waits for physical standbys
* corresponding to the physical slots specified in the synchronized_standby_slots GUC.
*/
void
WaitForStandbyConfirmation(XLogRecPtr wait_for_lsn)
{
/*
* Don't need to wait for the standby to catch up if the current acquired
* slot is not a logical failover slot, or there is no value in
* synchronized_standby_slots.
*/
if (!MyReplicationSlot->data.failover || !synchronized_standby_slots_config)
return;
ConditionVariablePrepareToSleep(&WalSndCtl->wal_confirm_rcv_cv);
for (;;)
{
CHECK_FOR_INTERRUPTS();
if (ConfigReloadPending)
{
ConfigReloadPending = false;
ProcessConfigFile(PGC_SIGHUP);
}
/* Exit if done waiting for every slot. */
if (StandbySlotsHaveCaughtup(wait_for_lsn, WARNING))
break;
/*
* Wait for the slots in the synchronized_standby_slots to catch up,
* but use a timeout (1s) so we can also check if the
* synchronized_standby_slots has been changed.
*/
ConditionVariableTimedSleep(&WalSndCtl->wal_confirm_rcv_cv, 1000,
WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION);
}
ConditionVariableCancelSleep();
} | c | github | https://github.com/postgres/postgres | src/backend/replication/slot.c |
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance.
If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made.
This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS), st1 (Throughput Optimized HDD), sc1 (Cold HDD).
"Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
kms_key_id:
description:
- Specify the id of the KMS key to use.
default: null
version_added: "2.3"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
delete_on_termination:
description:
- When set to "yes", the volume will be deleted upon instance termination.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.1"
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
tags:
description:
- tag:value pairs to add to the volume after creation
required: false
default: {}
version_added: "2.3"
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: string
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: string
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: string
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: string
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto.ec2
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def boto_supports_kms_key_id():
"""
Check if Boto library supports kms_key_ids (added in 2.39.0)
Returns:
True if version is equal to or higher then the version needed, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
tags = module.params.get('tags')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
if kms_key_id is not None:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
tags["Name"] = name
if tags:
ec2.create_tags([volume.id], tags)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume, changed
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
iops = dict(),
encrypted = dict(type='bool', default=False),
kms_key_id = dict(),
device_name = dict(),
delete_on_termination = dict(type='bool', default=False),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present'),
tags = dict(type='dict', default={})
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
tags = module.params.get('tags')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
if kms_key_id is not None and not boto_supports_kms_key_id():
module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and id:
module.fail_json(msg="Cannot specify volume_size together with id")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatibility
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'],
volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; version 2 only
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
### Copyright 2007-2008 Dag Wieers <dag@wieers.com>
import getopt
import sys
import os
import time
global unopath
### The first thing we ought to do is find a suitable OpenOffice installation
### with a compatible pyuno library that we can import.
### BEG Invenio customizations
#extrapaths = glob.glob('/usr/lib*/openoffice*/program') + \
#glob.glob('/usr/lib*/ooo*/program') + \
#glob.glob('/opt/openoffice*/program') + \
#glob.glob('C:\\Program Files\\OpenOffice.org *\\program\\') + \
#[ '/Applications/NeoOffice.app/Contents/program', '/usr/bin' ]
#for unopath in extrapaths:
#if os.path.exists(os.path.join(unopath, "pyuno.so")):
#filename = "pyuno.so"
#elif os.path.exists(os.path.join(unopath, "pyuno.dll")):
#filename = "pyuno.dll"
#else:
#continue
#sys.path.append(unopath)
#try:
#import uno, unohelper
#break
#except ImportError, e:
#sys.path.remove(unopath)
#print >>sys.stderr, e
#print >>sys.stderr, "WARNING: We found %s in %s, but could not import it." % (filename, unopath)
#continue
#else:
#print >>sys.stderr, "unoconv: Cannot find the pyuno.so library in sys.path and known paths."
#print >>sys.stderr, "ERROR: Please locate this library and send your feedback to: <tools@lists.rpmforge.net>."
#sys.exit(1)
#### Export an environment that OpenOffice is pleased to work with
#os.environ['LD_LIBRARY_PATH'] = '%s' % unopath
#os.environ['PATH'] = '%s:' % unopath + os.environ['PATH']
import uno, unohelper
### END Invenio customizations
### Now that we have found a working pyuno library, let's import some classes
from com.sun.star.beans import PropertyValue
from com.sun.star.connection import NoConnectException
from com.sun.star.lang import DisposedException
from com.sun.star.io import IOException, XOutputStream
from com.sun.star.script import CannotConvertException
from com.sun.star.uno import Exception as UnoException
__version__ = "$Revision$"
# $Source$
VERSION = '0.3svn'
doctypes = ('document', 'graphics', 'presentation', 'spreadsheet')
oopid = None
exitcode = 0
class Fmt:
def __init__(self, doctype, name, extension, summary, filter):
self.doctype = doctype
self.name = name
self.extension = extension
self.summary = summary
self.filter = filter
def __str__(self):
return "%s [.%s]" % (self.summary, self.extension)
def __repr__(self):
return "%s/%s" % (self.name, self.doctype)
class FmtList:
def __init__(self):
self.list = []
def add(self, doctype, name, extension, summary, filter):
self.list.append(Fmt(doctype, name, extension, summary, filter))
def byname(self, name):
ret = []
for fmt in self.list:
if fmt.name == name:
ret.append(fmt)
return ret
def byextension(self, extension):
ret = []
for fmt in self.list:
if '.'+fmt.extension == extension:
ret.append(fmt)
return ret
def bydoctype(self, doctype, name):
ret = []
for fmt in self.list:
if fmt.name == name and fmt.doctype == doctype:
ret.append(fmt)
return ret
def display(self, doctype):
print >>sys.stderr, "The following list of %s formats are currently available:\n" % doctype
for fmt in self.list:
if fmt.doctype == doctype:
print >>sys.stderr, " %-8s - %s" % (fmt.name, fmt)
print >>sys.stderr
class OutputStream( unohelper.Base, XOutputStream ):
def __init__( self ):
self.closed = 0
def closeOutput(self):
self.closed = 1
def writeBytes( self, seq ):
sys.stdout.write( seq.value )
def flush( self ):
pass
fmts = FmtList()
### Document / Writer
fmts.add('document', 'bib', 'bib', 'BibTeX', 'BibTeX_Writer')
fmts.add('document', 'doc', 'doc', 'Microsoft Word 97/2000/XP', 'MS Word 97')
fmts.add('document', 'doc6', 'doc', 'Microsoft Word 6.0', 'MS WinWord 6.0')
fmts.add('document', 'doc95', 'doc', 'Microsoft Word 95', 'MS Word 95')
fmts.add('document', 'docbook', 'xml', 'DocBook', 'DocBook File')
fmts.add('document', 'html', 'html', 'HTML Document (OpenOffice.org Writer)', 'HTML (StarWriter)')
fmts.add('document', 'odt', 'odt', 'Open Document Text', 'writer8')
fmts.add('document', 'ott', 'ott', 'Open Document Text', 'writer8_template')
fmts.add('document', 'ooxml', 'xml', 'Microsoft Office Open XML', 'MS Word 2003 XML')
fmts.add('document', 'pdb', 'pdb', 'AportisDoc (Palm)', 'AportisDoc Palm DB')
fmts.add('document', 'pdf', 'pdf', 'Portable Document Format', 'writer_pdf_Export')
fmts.add('document', 'psw', 'psw', 'Pocket Word', 'PocketWord File')
fmts.add('document', 'rtf', 'rtf', 'Rich Text Format', 'Rich Text Format')
fmts.add('document', 'latex', 'ltx', 'LaTeX 2e', 'LaTeX_Writer')
fmts.add('document', 'sdw', 'sdw', 'StarWriter 5.0', 'StarWriter 5.0')
fmts.add('document', 'sdw4', 'sdw', 'StarWriter 4.0', 'StarWriter 4.0')
fmts.add('document', 'sdw3', 'sdw', 'StarWriter 3.0', 'StarWriter 3.0')
fmts.add('document', 'stw', 'stw', 'Open Office.org 1.0 Text Document Template', 'writer_StarOffice_XML_Writer_Template')
fmts.add('document', 'sxw', 'sxw', 'Open Office.org 1.0 Text Document', 'StarOffice XML (Writer)')
fmts.add('document', 'text', 'txt', 'Text Encoded', 'Text (encoded)')
fmts.add('document', 'mediawiki', 'txt', 'Mediawiki', 'Mediawiki')
fmts.add('document', 'txt', 'txt', 'Plain Text', 'Text')
fmts.add('document', 'vor', 'vor', 'StarWriter 5.0 Template', 'StarWriter 5.0 Vorlage/Template')
fmts.add('document', 'vor4', 'vor', 'StarWriter 4.0 Template', 'StarWriter 4.0 Vorlage/Template')
fmts.add('document', 'vor3', 'vor', 'StarWriter 3.0 Template', 'StarWriter 3.0 Vorlage/Template')
fmts.add('document', 'xhtml', 'html', 'XHTML Document', 'XHTML Writer File')
### Spreadsheet
fmts.add('spreadsheet', 'csv', 'csv', 'Text CSV', 'Text - txt - csv (StarCalc)')
fmts.add('spreadsheet', 'dbf', 'dbf', 'dBase', 'dBase')
fmts.add('spreadsheet', 'dif', 'dif', 'Data Interchange Format', 'DIF')
fmts.add('spreadsheet', 'html', 'html', 'HTML Document (OpenOffice.org Calc)', 'HTML (StarCalc)')
fmts.add('spreadsheet', 'ods', 'ods', 'Open Document Spreadsheet', 'calc8')
fmts.add('spreadsheet', 'ooxml', 'xml', 'Microsoft Excel 2003 XML', 'MS Excel 2003 XML')
fmts.add('spreadsheet', 'pdf', 'pdf', 'Portable Document Format', 'calc_pdf_Export')
fmts.add('spreadsheet', 'pts', 'pts', 'OpenDocument Spreadsheet Template', 'calc8_template')
fmts.add('spreadsheet', 'pxl', 'pxl', 'Pocket Excel', 'Pocket Excel')
fmts.add('spreadsheet', 'sdc', 'sdc', 'StarCalc 5.0', 'StarCalc 5.0')
fmts.add('spreadsheet', 'sdc4', 'sdc', 'StarCalc 4.0', 'StarCalc 4.0')
fmts.add('spreadsheet', 'sdc3', 'sdc', 'StarCalc 3.0', 'StarCalc 3.0')
fmts.add('spreadsheet', 'slk', 'slk', 'SYLK', 'SYLK')
fmts.add('spreadsheet', 'stc', 'stc', 'OpenOffice.org 1.0 Spreadsheet Template', 'calc_StarOffice_XML_Calc_Template')
fmts.add('spreadsheet', 'sxc', 'sxc', 'OpenOffice.org 1.0 Spreadsheet', 'StarOffice XML (Calc)')
fmts.add('spreadsheet', 'vor3', 'vor', 'StarCalc 3.0 Template', 'StarCalc 3.0 Vorlage/Template')
fmts.add('spreadsheet', 'vor4', 'vor', 'StarCalc 4.0 Template', 'StarCalc 4.0 Vorlage/Template')
fmts.add('spreadsheet', 'vor', 'vor', 'StarCalc 5.0 Template', 'StarCalc 5.0 Vorlage/Template')
fmts.add('spreadsheet', 'xhtml', 'xhtml', 'XHTML', 'XHTML Calc File')
fmts.add('spreadsheet', 'xls', 'xls', 'Microsoft Excel 97/2000/XP', 'MS Excel 97')
fmts.add('spreadsheet', 'xls5', 'xls', 'Microsoft Excel 5.0', 'MS Excel 5.0/95')
fmts.add('spreadsheet', 'xls95', 'xls', 'Microsoft Excel 95', 'MS Excel 95')
fmts.add('spreadsheet', 'xlt', 'xlt', 'Microsoft Excel 97/2000/XP Template', 'MS Excel 97 Vorlage/Template')
fmts.add('spreadsheet', 'xlt5', 'xlt', 'Microsoft Excel 5.0 Template', 'MS Excel 5.0/95 Vorlage/Template')
fmts.add('spreadsheet', 'xlt95', 'xlt', 'Microsoft Excel 95 Template', 'MS Excel 95 Vorlage/Template')
### Graphics
fmts.add('graphics', 'bmp', 'bmp', 'Windows Bitmap', 'draw_bmp_Export')
fmts.add('graphics', 'emf', 'emf', 'Enhanced Metafile', 'draw_emf_Export')
fmts.add('graphics', 'eps', 'eps', 'Encapsulated PostScript', 'draw_eps_Export')
fmts.add('graphics', 'gif', 'gif', 'Graphics Interchange Format', 'draw_gif_Export')
fmts.add('graphics', 'html', 'html', 'HTML Document (OpenOffice.org Draw)', 'draw_html_Export')
fmts.add('graphics', 'jpg', 'jpg', 'Joint Photographic Experts Group', 'draw_jpg_Export')
fmts.add('graphics', 'met', 'met', 'OS/2 Metafile', 'draw_met_Export')
fmts.add('graphics', 'odd', 'odd', 'OpenDocument Drawing', 'draw8')
fmts.add('graphics', 'otg', 'otg', 'OpenDocument Drawing Template', 'draw8_template')
fmts.add('graphics', 'pbm', 'pbm', 'Portable Bitmap', 'draw_pbm_Export')
fmts.add('graphics', 'pct', 'pct', 'Mac Pict', 'draw_pct_Export')
fmts.add('graphics', 'pdf', 'pdf', 'Portable Document Format', 'draw_pdf_Export')
fmts.add('graphics', 'pgm', 'pgm', 'Portable Graymap', 'draw_pgm_Export')
fmts.add('graphics', 'png', 'png', 'Portable Network Graphic', 'draw_png_Export')
fmts.add('graphics', 'ppm', 'ppm', 'Portable Pixelmap', 'draw_ppm_Export')
fmts.add('graphics', 'ras', 'ras', 'Sun Raster Image', 'draw_ras_Export')
fmts.add('graphics', 'std', 'std', 'OpenOffice.org 1.0 Drawing Template', 'draw_StarOffice_XML_Draw_Template')
fmts.add('graphics', 'svg', 'svg', 'Scalable Vector Graphics', 'draw_svg_Export')
fmts.add('graphics', 'svm', 'svm', 'StarView Metafile', 'draw_svm_Export')
fmts.add('graphics', 'swf', 'swf', 'Macromedia Flash (SWF)', 'draw_flash_Export')
fmts.add('graphics', 'sxd', 'sxd', 'OpenOffice.org 1.0 Drawing', 'StarOffice XML (Draw)')
fmts.add('graphics', 'sxd3', 'sxd', 'StarDraw 3.0', 'StarDraw 3.0')
fmts.add('graphics', 'sxd5', 'sxd', 'StarDraw 5.0', 'StarDraw 5.0')
fmts.add('graphics', 'tiff', 'tiff', 'Tagged Image File Format', 'draw_tif_Export')
fmts.add('graphics', 'vor', 'vor', 'StarDraw 5.0 Template', 'StarDraw 5.0 Vorlage')
fmts.add('graphics', 'vor3', 'vor', 'StarDraw 3.0 Template', 'StarDraw 3.0 Vorlage')
fmts.add('graphics', 'wmf', 'wmf', 'Windows Metafile', 'draw_wmf_Export')
fmts.add('graphics', 'xhtml', 'xhtml', 'XHTML', 'XHTML Draw File')
fmts.add('graphics', 'xpm', 'xpm', 'X PixMap', 'draw_xpm_Export')
### Presentation
fmts.add('presentation', 'bmp', 'bmp', 'Windows Bitmap', 'impress_bmp_Export')
fmts.add('presentation', 'emf', 'emf', 'Enhanced Metafile', 'impress_emf_Export')
fmts.add('presentation', 'eps', 'eps', 'Encapsulated PostScript', 'impress_eps_Export')
fmts.add('presentation', 'gif', 'gif', 'Graphics Interchange Format', 'impress_gif_Export')
fmts.add('presentation', 'html', 'html', 'HTML Document (OpenOffice.org Impress)', 'impress_html_Export')
fmts.add('presentation', 'jpg', 'jpg', 'Joint Photographic Experts Group', 'impress_jpg_Export')
fmts.add('presentation', 'met', 'met', 'OS/2 Metafile', 'impress_met_Export')
fmts.add('presentation', 'odd', 'odd', 'OpenDocument Drawing (Impress)', 'impress8_draw')
fmts.add('presentation', 'odg', 'odg', 'OpenOffice.org 1.0 Drawing (OpenOffice.org Impress)', 'impress_StarOffice_XML_Draw')
fmts.add('presentation', 'odp', 'odp', 'OpenDocument Presentation', 'impress8')
fmts.add('presentation', 'pbm', 'pbm', 'Portable Bitmap', 'impress_pbm_Export')
fmts.add('presentation', 'pct', 'pct', 'Mac Pict', 'impress_pct_Export')
fmts.add('presentation', 'pdf', 'pdf', 'Portable Document Format', 'impress_pdf_Export')
fmts.add('presentation', 'pgm', 'pgm', 'Portable Graymap', 'impress_pgm_Export')
fmts.add('presentation', 'png', 'png', 'Portable Network Graphic', 'impress_png_Export')
fmts.add('presentation', 'pot', 'pot', 'Microsoft PowerPoint 97/2000/XP Template', 'MS PowerPoint 97 Vorlage')
fmts.add('presentation', 'ppm', 'ppm', 'Portable Pixelmap', 'impress_ppm_Export')
fmts.add('presentation', 'ppt', 'ppt', 'Microsoft PowerPoint 97/2000/XP', 'MS PowerPoint 97')
fmts.add('presentation', 'pwp', 'pwp', 'PlaceWare', 'placeware_Export')
fmts.add('presentation', 'ras', 'ras', 'Sun Raster Image', 'impress_ras_Export')
fmts.add('presentation', 'sda', 'sda', 'StarDraw 5.0 (OpenOffice.org Impress)', 'StarDraw 5.0 (StarImpress)')
fmts.add('presentation', 'sdd', 'sdd', 'StarImpress 5.0', 'StarImpress 5.0')
fmts.add('presentation', 'sdd3', 'sdd', 'StarDraw 3.0 (OpenOffice.org Impress)', 'StarDraw 3.0 (StarImpress)')
fmts.add('presentation', 'sdd4', 'sdd', 'StarImpress 4.0', 'StarImpress 4.0')
fmts.add('presentation', 'sti', 'sti', 'OpenOffice.org 1.0 Presentation Template', 'impress_StarOffice_XML_Impress_Template')
fmts.add('presentation', 'stp', 'stp', 'OpenDocument Presentation Template', 'impress8_template')
fmts.add('presentation', 'svg', 'svg', 'Scalable Vector Graphics', 'impress_svg_Export')
fmts.add('presentation', 'svm', 'svm', 'StarView Metafile', 'impress_svm_Export')
fmts.add('presentation', 'swf', 'swf', 'Macromedia Flash (SWF)', 'impress_flash_Export')
fmts.add('presentation', 'sxi', 'sxi', 'OpenOffice.org 1.0 Presentation', 'StarOffice XML (Impress)')
fmts.add('presentation', 'tiff', 'tiff', 'Tagged Image File Format', 'impress_tif_Export')
fmts.add('presentation', 'vor', 'vor', 'StarImpress 5.0 Template', 'StarImpress 5.0 Vorlage')
fmts.add('presentation', 'vor3', 'vor', 'StarDraw 3.0 Template (OpenOffice.org Impress)', 'StarDraw 3.0 Vorlage (StarImpress)')
fmts.add('presentation', 'vor4', 'vor', 'StarImpress 4.0 Template', 'StarImpress 4.0 Vorlage')
fmts.add('presentation', 'vor5', 'vor', 'StarDraw 5.0 Template (OpenOffice.org Impress)', 'StarDraw 5.0 Vorlage (StarImpress)')
fmts.add('presentation', 'wmf', 'wmf', 'Windows Metafile', 'impress_wmf_Export')
fmts.add('presentation', 'xhtml', 'xml', 'XHTML', 'XHTML Impress File')
fmts.add('presentation', 'xpm', 'xpm', 'X PixMap', 'impress_xpm_Export')
class Options:
def __init__(self, args):
self.stdout = False
self.showlist = False
self.listener = False
self.format = None
self.verbose = 0
self.timeout = 3
self.doctype = None
self.server = 'localhost'
self.port = '2002'
self.connection = None
self.filenames = []
self.pipe = None
self.outputpath = None
self.outputfile = None ### Invenio customizations
### Get options from the commandline
try:
opts, args = getopt.getopt (args, 'c:d:f:hi:Llo:p:s:T:t:v',
['connection=', 'doctype=', 'format=', 'help', 'listener', 'outputpath=', 'pipe=', 'port=', 'server=', 'timeout=', 'show', 'stdout', 'verbose', 'version', 'outputfile='] )
except getopt.error, exc:
print 'unoconv: %s, try unoconv -h for a list of all the options' % str(exc)
sys.exit(255)
for opt, arg in opts:
if opt in ['-h', '--help']:
self.usage()
print
self.help()
sys.exit(1)
elif opt in ['-c', '--connection']:
self.connection = arg
elif opt in ['-d', '--doctype']:
self.doctype = arg
elif opt in ['-f', '--format']:
self.format = arg
elif opt in ['-i', '--pipe']:
self.pipe = arg
elif opt in ['-l', '--listener']:
self.listener = True
elif opt in ['-o', '--outputpath']:
self.outputpath = arg
elif opt in ['--outputfile']: ### Invenio customizations
self.outputfile = arg ### Invenio customizations
elif opt in ['-p', '--port']:
self.port = arg
elif opt in ['-s', '--server']:
self.server = arg
elif opt in ['--show']:
self.showlist = True
elif opt in ['-T', '--timeout']:
self.timeout = int(arg)
elif opt in ['--stdout']:
self.stdout = True
elif opt in ['-v', '--verbose']:
self.verbose = self.verbose + 1
elif opt in ['--version']:
self.version()
sys.exit(255)
### Enable verbosity
if self.verbose >= 3:
print >>sys.stderr, 'Verbosity set to level %d' % (self.verbose - 1)
self.filenames = args
if not self.listener and not self.showlist and self.doctype != 'list' and not self.filenames:
print >>sys.stderr, 'unoconv: you have to provide a filename as argument'
print >>sys.stderr, 'Try `unoconv -h\' for more information.'
sys.exit(255)
### Set connection string
if not self.connection:
if not self.pipe:
self.connection = "socket,host=%s,port=%s;urp;StarOffice.ComponentContext" % (self.server, self.port)
# self.connection = "socket,host=%s,port=%s;urp;" % (self.server, self.port)
else:
self.connection = "pipe,name=%s;urp;StarOffice.ComponentContext" % (self.pipe)
if self.verbose >=3:
print >>sys.stderr, 'Connection type: %s' % self.connection
### Make it easier for people to use a doctype (first letter is enough)
if self.doctype:
for doctype in doctypes:
if doctype.startswith(self.doctype):
self.doctype = doctype
### Check if the user request to see the list of formats
if self.showlist or self.format == 'list':
if self.doctype:
fmts.display(self.doctype)
else:
for t in doctypes:
fmts.display(t)
sys.exit(0)
### If no format was specified, probe it or provide it
if not self.format:
l = sys.argv[0].split('2')
if len(l) == 2:
self.format = l[1]
else:
self.format = 'pdf'
def version(self):
print 'unoconv %s' % VERSION
print 'Written by Dag Wieers <dag@wieers.com>'
print 'Homepage at http://dag.wieers.com/home-made/unoconv/'
print
print 'platform %s/%s' % (os.name, sys.platform)
print 'python %s' % sys.version
print
print 'build revision $Rev$'
def usage(self):
print >>sys.stderr, 'usage: unoconv [options] file [file2 ..]'
def help(self):
print >>sys.stderr, '''Convert from and to any format supported by OpenOffice
unoconv options:
-c, --connection=string use a custom connection string
-d, --doctype=type specify document type
(document, graphics, presentation, spreadsheet)
-f, --format=format specify the output format
-i, --pipe=name alternative method of connection using a pipe
-l, --listener start a listener to use by unoconv clients
-o, --outputpath=name output directory
-p, --port=port specify the port (default: 2002)
to be used by client or listener
-s, --server=server specify the server address (default: localhost)
to be used by client or listener
-T, --timeout=secs timeout after secs if connections to OpenOffice fail
--show list the available output formats
--stdout write output to stdout
-v, --verbose be more and more verbose
'''
class Convertor:
def __init__(self):
global exitcode, oopid
unocontext = None
### Do the OpenOffice component dance
self.context = uno.getComponentContext()
resolver = self.context.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", self.context)
### Test for an existing connection (twice)
try:
unocontext = resolver.resolve("uno:%s" % op.connection)
except NoConnectException, e:
error(2, "Existing listener not found.\n%s" % e)
### Test if we can use an Openoffice *binary* in our (modified) path
for bin in ('soffice.bin', 'soffice', ):
error(2, "Trying to launch our own listener using %s." % bin)
try:
oopid = os.spawnvp(os.P_NOWAIT, bin, [bin, "-headless", "-nologo", "-nodefault", "-norestore", "-nofirststartwizard", "-accept=%s" % op.connection]);
except:
error(3, "Launch of %s failed.\n%s" % (bin, e))
continue
### Try connection to it for op.timeout seconds
timeout = 0
while timeout <= op.timeout:
try:
unocontext = resolver.resolve("uno:%s" % op.connection)
break
except NoConnectException:
time.sleep(0.5)
timeout = timeout + 0.5
else:
error(3, "Failed to connect to %s in %d seconds.\n%s" % (bin, op.timeout, e))
continue
break
else:
die(250, "No proper binaries found to launch OpenOffice. Bailing out.")
if not unocontext:
die(251, "Unable to connect or start own listener. Aborting.")
### And some more OpenOffice magic
unosvcmgr = unocontext.ServiceManager
self.desktop = unosvcmgr.createInstanceWithContext("com.sun.star.frame.Desktop", unocontext)
self.cwd = unohelper.systemPathToFileUrl( os.getcwd() )
def getformat(self, inputfn):
doctype = None
### Get the output format from mapping
if op.doctype:
outputfmt = fmts.bydoctype(op.doctype, op.format)
else:
outputfmt = fmts.byname(op.format)
if not outputfmt:
outputfmt = fmts.byextension('.'+op.format)
### If no doctype given, check list of acceptable formats for input file ext doctype
### FIXME: This should go into the for-loop to match each individual input filename
if outputfmt:
inputext = os.path.splitext(inputfn)[1]
inputfmt = fmts.byextension(inputext)
if inputfmt:
for fmt in outputfmt:
if inputfmt[0].doctype == fmt.doctype:
doctype = inputfmt[0].doctype
outputfmt = fmt
break
else:
outputfmt = outputfmt[0]
# print >>sys.stderr, 'unoconv: format `%s\' is part of multiple doctypes %s, selecting `%s\'.' % (format, [fmt.doctype for fmt in outputfmt], outputfmt[0].doctype)
else:
outputfmt = outputfmt[0]
### No format found, throw error
if not outputfmt:
if doctype:
print >>sys.stderr, 'unoconv: format [%s/%s] is not known to unoconv.' % (op.doctype, op.format)
else:
print >>sys.stderr, 'unoconv: format [%s] is not known to unoconv.' % op.format
die(1)
return outputfmt
def convert(self, inputfn):
global exitcode
doc = None
outputfmt = self.getformat(inputfn)
if op.verbose > 0:
print >>sys.stderr, 'Input file:', inputfn
if not os.path.exists(inputfn):
print >>sys.stderr, 'unoconv: file `%s\' does not exist.' % inputfn
exitcode = 1
try:
### Load inputfile
inputprops = ( PropertyValue( "Hidden", 0, True, 0 ), )
inputurl = unohelper.absolutize(self.cwd, unohelper.systemPathToFileUrl(inputfn))
doc = self.desktop.loadComponentFromURL( inputurl , "_blank", 0, inputprops )
if not doc:
raise UnoException("File could not be loaded by OpenOffice", None)
# standard = doc.getStyleFamilies().getByName('PageStyles').getByName('Standard')
# pageSize = Size()
# pageSize.Width=1480
# pageSize.Height=3354
# standard.setPropertyValue('Size', pageSize)
error(1, "Selected output format: %s" % outputfmt)
# pylint: disable=E1103
# outputfmt is an instance of the class Fmt.
error(1, "Selected ooffice filter: %s" % outputfmt.filter)
error(1, "Used doctype: %s" % outputfmt.doctype)
#### ### Write outputfile
#### outputprops = (
#### PropertyValue( "FilterName", 0, outputfmt.filter, 0),
#### PropertyValue( "Overwrite", 0, True, 0 ),
##### PropertyValue( "Size", 0, "A3", 0 ),
#### PropertyValue( "OutputStream", 0, OutputStream(), 0 ),
#### )
### BEG Invenio customizations
outputprops = [
PropertyValue( "FilterName" , 0, outputfmt.filter , 0 ),
PropertyValue( "Overwrite" , 0, True , 0 ),
PropertyValue( "OutputStream", 0, OutputStream(), 0),
]
if outputfmt.filter == 'Text (encoded)':
## To enable UTF-8
outputprops.append(PropertyValue( "FilterFlags", 0, "UTF8, LF", 0))
elif outputfmt.filter == 'writer_pdf_Export':
## To enable PDF/A
outputprops.append(PropertyValue( "SelectPdfVersion", 0, 1, 0))
outputprops = tuple(outputprops)
### END Invenio customizations
if not op.stdout:
(outputfn, ext) = os.path.splitext(inputfn)
### BEG Invenio customizations
if op.outputfile:
outputfn = op.outputfile
elif not op.outputpath: ### END Invenio customizations
outputfn = outputfn + '.' + outputfmt.extension
else:
outputfn = os.path.join(op.outputpath, os.path.basename(outputfn) + '.' + outputfmt.extension)
outputurl = unohelper.absolutize( self.cwd, unohelper.systemPathToFileUrl(outputfn) )
doc.storeToURL(outputurl, outputprops)
error(1, "Output file: %s" % outputfn)
else:
doc.storeToURL("private:stream", outputprops)
# pylint: enable=E1103
doc.dispose()
doc.close(True)
except SystemError, e:
error(0, "unoconv: SystemError during conversion: %s" % e)
error(0, "ERROR: The provided document cannot be converted to the desired format.")
exitcode = 1
except UnoException, e:
error(0, "unoconv: UnoException during conversion in %s: %s" % (repr(e.__class__), e.Message))
error(0, "ERROR: The provided document cannot be converted to the desired format. (code: %s)" % e.ErrCode)
exitcode = e.ErrCode
except IOException, e:
error(0, "unoconv: IOException during conversion: %s" % e.Message)
error(0, "ERROR: The provided document cannot be exported to %s." % outputfmt)
exitcode = 3
except CannotConvertException, e:
error(0, "unoconv: CannotConvertException during conversion: %s" % e.Message)
exitcode = 4
class Listener:
def __init__(self):
error(1, "Start listener on %s:%s" % (op.server, op.port))
for bin in ('soffice.bin', 'soffice', ):
error(2, "Warning: trying to launch %s." % bin)
try:
os.execvp(bin, [bin, "-headless", "-nologo", "-nodefault", "-norestore", "-nofirststartwizard", "-accept=%s" % op.connection]);
except:
error(3, "Launch of %s failed.\n%s" % (bin, e))
continue
else:
die(254, "Failed to start listener with connection %s" % (op.connection))
die(253, "Existing listener found, aborting.")
def error(level, str):
"Output error message"
if level <= op.verbose:
print >>sys.stderr, str
def info(level, str):
"Output info message"
if not op.stdout and level <= op.verbose:
print >>sys.stdout, str
def die(ret, str=None):
"Print error and exit with errorcode"
global convertor, oopid
if str:
error(0, 'Error: %s' % str)
### Did we start an instance ?
if oopid:
### If there is a GUI now attached to the instance, disable listener
if convertor.desktop.getCurrentFrame():
for bin in ('soffice.bin', 'soffice', ):
try:
os.spawnvp(os.P_NOWAIT, bin, [bin, "-headless", "-nologo", "-nodefault", "-norestore", "-nofirststartwizard", "-unaccept=%s" % op.connection]);
error(2, 'OpenOffice listener successfully disabled.')
break
except Exception, e:
error(3, "Launch of %s failed.\n%s" % (bin, e))
continue
### If there is no GUI attached to the instance, terminate instance
else:
try:
convertor.desktop.terminate()
except DisposedException:
error(2, 'OpenOffice instance successfully terminated.')
# error(2, 'Taking down OpenOffice with pid %s.' % oopid)
# os.setpgid(oopid, 0)
# os.killpg(os.getpgid(oopid), 15)
# try:
# os.kill(oopid, 15)
# error(2, 'Waiting for OpenOffice with pid %s to disappear.' % oopid)
# os.waitpid(oopid, os.WUNTRACED)
# except:
# error(2, 'No OpenOffice with pid %s to take down' % oopid)
sys.exit(ret)
def main():
global convertor, exitcode
try:
if op.listener:
listener = Listener()
else:
convertor = Convertor()
for inputfn in op.filenames:
convertor.convert(inputfn)
except NoConnectException, e:
error(0, "unoconv: could not find an existing connection to Open Office at %s:%s." % (op.server, op.port))
if op.connection:
error(0, "Please start an OpenOffice instance on server '%s' by doing:\n\n unoconv --listener --server %s --port %s\n\nor alternatively:\n\n ooffice -nologo -nodefault -accept=\"%s\"" % (op.server, op.server, op.port, op.connection))
else:
error(0, "Please start an OpenOffice instance on server '%s' by doing:\n\n unoconv --listener --server %s --port %s\n\nor alternatively:\n\n ooffice -nologo -nodefault -accept=\"socket,host=%s,port=%s;urp;\"" % (op.server, op.server, op.port, op.server, op.port))
error(0, "Please start an ooffice instance on server '%s' by doing:\n\n ooffice -nologo -nodefault -accept=\"socket,host=localhost,port=%s;urp;\"" % (op.server, op.port))
exitcode = 1
# except UnboundLocalError:
# die(252, "Failed to connect to remote listener.")
except OSError:
error(0, "Warning: failed to launch OpenOffice. Aborting.")
convertor = None
### Main entrance
if __name__ == '__main__':
exitcode = 0
op = Options(sys.argv[1:])
try:
main()
except KeyboardInterrupt, e:
die(6, 'Exiting on user request')
die(exitcode) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
class S3PrefixSensor(BaseSensorOperator):
"""
Waits for a prefix to exist. A prefix is the first part of a key,
thus enabling checking of constructs similar to glob airfl* or
SQL LIKE 'airfl%'. There is the possibility to precise a delimiter to
indicate the hierarchy or keys, meaning that the match will stop at that
delimiter. Current code accepts sane delimiters, i.e. characters that
are NOT special characters in the Python regex engine.
:param bucket_name: Name of the S3 bucket
:type bucket_name: str
:param prefix: The prefix being waited on. Relative path from bucket root level.
:type prefix: str
:param delimiter: The delimiter intended to show hierarchy.
Defaults to '/'.
:type delimiter: str
:param aws_conn_id: a reference to the s3 connection
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
"""
template_fields = ('prefix', 'bucket_name')
@apply_defaults
def __init__(self,
bucket_name,
prefix,
delimiter='/',
aws_conn_id='aws_default',
verify=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
# Parse
self.bucket_name = bucket_name
self.prefix = prefix
self.delimiter = delimiter
self.full_url = "s3://" + bucket_name + '/' + prefix
self.aws_conn_id = aws_conn_id
self.verify = verify
def poke(self, context):
self.log.info('Poking for prefix : %s in bucket s3://%s', self.prefix, self.bucket_name)
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
return hook.check_for_prefix(
prefix=self.prefix,
delimiter=self.delimiter,
bucket_name=self.bucket_name) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (c) 1988-1997 Sam Leffler
* Copyright (c) 1991-1997 Silicon Graphics, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee, provided
* that (i) the above copyright notices and this permission notice appear in
* all copies of the software and related documentation, and (ii) the names of
* Sam Leffler and Silicon Graphics may not be used in any advertising or
* publicity relating to the software without the specific, prior written
* permission of Sam Leffler and Silicon Graphics.
*
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND,
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
*
* IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR
* ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
* OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF
* LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
/*
* TIFF Library.
*
* Scanline-oriented Write Support
*/
#include "tiffiop.h"
#include <stdio.h>
#define STRIPINCR 20 /* expansion factor on strip array */
#define WRITECHECKSTRIPS(tif, module) \
(((tif)->tif_flags & TIFF_BEENWRITING) || TIFFWriteCheck((tif), 0, module))
#define WRITECHECKTILES(tif, module) \
(((tif)->tif_flags & TIFF_BEENWRITING) || TIFFWriteCheck((tif), 1, module))
#define BUFFERCHECK(tif) \
((((tif)->tif_flags & TIFF_BUFFERSETUP) && tif->tif_rawdata) || \
TIFFWriteBufferSetup((tif), NULL, (tmsize_t)-1))
static int TIFFGrowStrips(TIFF *tif, uint32_t delta, const char *module);
static int TIFFAppendToStrip(TIFF *tif, uint32_t strip, uint8_t *data,
tmsize_t cc);
int TIFFWriteScanline(TIFF *tif, void *buf, uint32_t row, uint16_t sample)
{
static const char module[] = "TIFFWriteScanline";
register TIFFDirectory *td;
int status, imagegrew = 0;
uint32_t strip;
if (!WRITECHECKSTRIPS(tif, module))
return (-1);
/*
* Handle delayed allocation of data buffer. This
* permits it to be sized more intelligently (using
* directory information).
*/
if (!BUFFERCHECK(tif))
return (-1);
tif->tif_flags |= TIFF_BUF4WRITE; /* not strictly sure this is right*/
td = &tif->tif_dir;
/*
* Extend image length if needed
* (but only for PlanarConfig=1).
*/
if (row >= td->td_imagelength)
{ /* extend image */
if (td->td_planarconfig == PLANARCONFIG_SEPARATE)
{
TIFFErrorExtR(
tif, module,
"Can not change \"ImageLength\" when using separate planes");
return (-1);
}
td->td_imagelength = row + 1;
imagegrew = 1;
}
/*
* Calculate strip and check for crossings.
*/
if (td->td_planarconfig == PLANARCONFIG_SEPARATE)
{
if (sample >= td->td_samplesperpixel)
{
TIFFErrorExtR(tif, module, "%lu: Sample out of range, max %lu",
(unsigned long)sample,
(unsigned long)td->td_samplesperpixel);
return (-1);
}
strip = sample * td->td_stripsperimage + row / td->td_rowsperstrip;
}
else
strip = row / td->td_rowsperstrip;
/*
* Check strip array to make sure there's space. We don't support
* dynamically growing files that have data organized in separate
* bitplanes because it's too painful. In that case we require that
* the imagelength be set properly before the first write (so that the
* strips array will be fully allocated above).
*/
if (strip >= td->td_nstrips && !TIFFGrowStrips(tif, 1, module))
return (-1);
if (strip != tif->tif_curstrip)
{
/*
* Changing strips -- flush any data present.
*/
if (!TIFFFlushData(tif))
return (-1);
tif->tif_curstrip = strip;
/*
* Watch out for a growing image. The value of strips/image
* will initially be 1 (since it can't be deduced until the
* imagelength is known).
*/
if (strip >= td->td_stripsperimage && imagegrew)
td->td_stripsperimage =
TIFFhowmany_32(td->td_imagelength, td->td_rowsperstrip);
if (td->td_stripsperimage == 0)
{
TIFFErrorExtR(tif, module, "Zero strips per image");
return (-1);
}
tif->tif_row = (strip % td->td_stripsperimage) * td->td_rowsperstrip;
if ((tif->tif_flags & TIFF_CODERSETUP) == 0)
{
if (!(*tif->tif_setupencode)(tif))
return (-1);
tif->tif_flags |= TIFF_CODERSETUP;
}
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
/* this informs TIFFAppendToStrip() we have changed strip */
tif->tif_curoff = 0;
if (!(*tif->tif_preencode)(tif, sample))
return (-1);
tif->tif_flags |= TIFF_POSTENCODE;
}
/*
* Ensure the write is either sequential or at the
* beginning of a strip (or that we can randomly
* access the data -- i.e. no encoding).
*/
if (row != tif->tif_row)
{
if (row < tif->tif_row)
{
/*
* Moving backwards within the same strip:
* backup to the start and then decode
* forward (below).
*/
tif->tif_row =
(strip % td->td_stripsperimage) * td->td_rowsperstrip;
tif->tif_rawcp = tif->tif_rawdata;
}
/*
* Seek forward to the desired row.
*/
if (!(*tif->tif_seek)(tif, row - tif->tif_row))
return (-1);
tif->tif_row = row;
}
/* swab if needed - note that source buffer will be altered */
tif->tif_postdecode(tif, (uint8_t *)buf, tif->tif_scanlinesize);
status = (*tif->tif_encoderow)(tif, (uint8_t *)buf, tif->tif_scanlinesize,
sample);
/* we are now poised at the beginning of the next row */
tif->tif_row = row + 1;
return (status);
}
/* Make sure that at the first attempt of rewriting a tile/strip, we will have
*/
/* more bytes available in the output buffer than the previous byte count, */
/* so that TIFFAppendToStrip() will detect the overflow when it is called the
* first */
/* time if the new compressed tile is bigger than the older one. (GDAL #4771) */
static int _TIFFReserveLargeEnoughWriteBuffer(TIFF *tif, uint32_t strip_or_tile)
{
TIFFDirectory *td = &tif->tif_dir;
if (td->td_stripbytecount_p[strip_or_tile] > 0)
{
/* The +1 is to ensure at least one extra bytes */
/* The +4 is because the LZW encoder flushes 4 bytes before the limit */
uint64_t safe_buffer_size =
(uint64_t)(td->td_stripbytecount_p[strip_or_tile] + 1 + 4);
if (tif->tif_rawdatasize <= (tmsize_t)safe_buffer_size)
{
if (!(TIFFWriteBufferSetup(
tif, NULL,
(tmsize_t)TIFFroundup_64(safe_buffer_size, 1024))))
return 0;
}
}
return 1;
}
/*
* Encode the supplied data and write it to the
* specified strip.
*
* NB: Image length must be setup before writing.
*/
tmsize_t TIFFWriteEncodedStrip(TIFF *tif, uint32_t strip, void *data,
tmsize_t cc)
{
static const char module[] = "TIFFWriteEncodedStrip";
TIFFDirectory *td = &tif->tif_dir;
uint16_t sample;
if (!WRITECHECKSTRIPS(tif, module))
return ((tmsize_t)-1);
/*
* Check strip array to make sure there's space.
* We don't support dynamically growing files that
* have data organized in separate bitplanes because
* it's too painful. In that case we require that
* the imagelength be set properly before the first
* write (so that the strips array will be fully
* allocated above).
*/
if (strip >= td->td_nstrips)
{
if (td->td_planarconfig == PLANARCONFIG_SEPARATE)
{
TIFFErrorExtR(
tif, module,
"Can not grow image by strips when using separate planes");
return ((tmsize_t)-1);
}
if (!TIFFGrowStrips(tif, 1, module))
return ((tmsize_t)-1);
td->td_stripsperimage =
TIFFhowmany_32(td->td_imagelength, td->td_rowsperstrip);
}
/*
* Handle delayed allocation of data buffer. This
* permits it to be sized according to the directory
* info.
*/
if (!BUFFERCHECK(tif))
return ((tmsize_t)-1);
tif->tif_flags |= TIFF_BUF4WRITE;
tif->tif_curstrip = strip;
/* this informs TIFFAppendToStrip() we have changed or reset strip */
tif->tif_curoff = 0;
if (!_TIFFReserveLargeEnoughWriteBuffer(tif, strip))
{
return ((tmsize_t)(-1));
}
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
if (td->td_stripsperimage == 0)
{
TIFFErrorExtR(tif, module, "Zero strips per image");
return ((tmsize_t)-1);
}
tif->tif_row = (strip % td->td_stripsperimage) * td->td_rowsperstrip;
if ((tif->tif_flags & TIFF_CODERSETUP) == 0)
{
if (!(*tif->tif_setupencode)(tif))
return ((tmsize_t)-1);
tif->tif_flags |= TIFF_CODERSETUP;
}
tif->tif_flags &= ~TIFF_POSTENCODE;
/* shortcut to avoid an extra memcpy() */
if (td->td_compression == COMPRESSION_NONE)
{
/* swab if needed - note that source buffer will be altered */
tif->tif_postdecode(tif, (uint8_t *)data, cc);
if (!isFillOrder(tif, td->td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits((uint8_t *)data, cc);
if (cc > 0 && !TIFFAppendToStrip(tif, strip, (uint8_t *)data, cc))
return ((tmsize_t)-1);
return (cc);
}
sample = (uint16_t)(strip / td->td_stripsperimage);
if (!(*tif->tif_preencode)(tif, sample))
return ((tmsize_t)-1);
/* swab if needed - note that source buffer will be altered */
tif->tif_postdecode(tif, (uint8_t *)data, cc);
if (!(*tif->tif_encodestrip)(tif, (uint8_t *)data, cc, sample))
return ((tmsize_t)-1);
if (!(*tif->tif_postencode)(tif))
return ((tmsize_t)-1);
if (!isFillOrder(tif, td->td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits(tif->tif_rawdata, tif->tif_rawcc);
if (tif->tif_rawcc > 0 &&
!TIFFAppendToStrip(tif, strip, tif->tif_rawdata, tif->tif_rawcc))
return ((tmsize_t)-1);
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
return (cc);
}
/*
* Write the supplied data to the specified strip.
*
* NB: Image length must be setup before writing.
*/
tmsize_t TIFFWriteRawStrip(TIFF *tif, uint32_t strip, void *data, tmsize_t cc)
{
static const char module[] = "TIFFWriteRawStrip";
TIFFDirectory *td = &tif->tif_dir;
if (!WRITECHECKSTRIPS(tif, module))
return ((tmsize_t)-1);
/*
* Check strip array to make sure there's space.
* We don't support dynamically growing files that
* have data organized in separate bitplanes because
* it's too painful. In that case we require that
* the imagelength be set properly before the first
* write (so that the strips array will be fully
* allocated above).
*/
if (strip >= td->td_nstrips)
{
if (td->td_planarconfig == PLANARCONFIG_SEPARATE)
{
TIFFErrorExtR(
tif, module,
"Can not grow image by strips when using separate planes");
return ((tmsize_t)-1);
}
/*
* Watch out for a growing image. The value of
* strips/image will initially be 1 (since it
* can't be deduced until the imagelength is known).
*/
if (strip >= td->td_stripsperimage)
td->td_stripsperimage =
TIFFhowmany_32(td->td_imagelength, td->td_rowsperstrip);
if (!TIFFGrowStrips(tif, 1, module))
return ((tmsize_t)-1);
}
if (tif->tif_curstrip != strip)
{
tif->tif_curstrip = strip;
/* this informs TIFFAppendToStrip() we have changed or reset strip */
tif->tif_curoff = 0;
}
if (td->td_stripsperimage == 0)
{
TIFFErrorExtR(tif, module, "Zero strips per image");
return ((tmsize_t)-1);
}
tif->tif_row = (strip % td->td_stripsperimage) * td->td_rowsperstrip;
return (TIFFAppendToStrip(tif, strip, (uint8_t *)data, cc) ? cc
: (tmsize_t)-1);
}
/*
* Write and compress a tile of data. The
* tile is selected by the (x,y,z,s) coordinates.
*/
tmsize_t TIFFWriteTile(TIFF *tif, void *buf, uint32_t x, uint32_t y, uint32_t z,
uint16_t s)
{
if (!TIFFCheckTile(tif, x, y, z, s))
return ((tmsize_t)(-1));
/*
* NB: A tile size of -1 is used instead of tif_tilesize knowing
* that TIFFWriteEncodedTile will clamp this to the tile size.
* This is done because the tile size may not be defined until
* after the output buffer is setup in TIFFWriteBufferSetup.
*/
return (TIFFWriteEncodedTile(tif, TIFFComputeTile(tif, x, y, z, s), buf,
(tmsize_t)(-1)));
}
/*
* Encode the supplied data and write it to the
* specified tile. There must be space for the
* data. The function clamps individual writes
* to a tile to the tile size, but does not (and
* can not) check that multiple writes to the same
* tile do not write more than tile size data.
*
* NB: Image length must be setup before writing; this
* interface does not support automatically growing
* the image on each write (as TIFFWriteScanline does).
*/
tmsize_t TIFFWriteEncodedTile(TIFF *tif, uint32_t tile, void *data, tmsize_t cc)
{
static const char module[] = "TIFFWriteEncodedTile";
TIFFDirectory *td;
uint16_t sample;
uint32_t howmany32;
if (!WRITECHECKTILES(tif, module))
return ((tmsize_t)(-1));
td = &tif->tif_dir;
if (tile >= td->td_nstrips)
{
TIFFErrorExtR(tif, module, "Tile %lu out of range, max %lu",
(unsigned long)tile, (unsigned long)td->td_nstrips);
return ((tmsize_t)(-1));
}
/*
* Handle delayed allocation of data buffer. This
* permits it to be sized more intelligently (using
* directory information).
*/
if (!BUFFERCHECK(tif))
return ((tmsize_t)(-1));
tif->tif_flags |= TIFF_BUF4WRITE;
tif->tif_curtile = tile;
/* this informs TIFFAppendToStrip() we have changed or reset tile */
tif->tif_curoff = 0;
if (!_TIFFReserveLargeEnoughWriteBuffer(tif, tile))
{
return ((tmsize_t)(-1));
}
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
/*
* Compute tiles per row & per column to compute
* current row and column
*/
howmany32 = TIFFhowmany_32(td->td_imagelength, td->td_tilelength);
if (howmany32 == 0)
{
TIFFErrorExtR(tif, module, "Zero tiles");
return ((tmsize_t)(-1));
}
tif->tif_row = (tile % howmany32) * td->td_tilelength;
howmany32 = TIFFhowmany_32(td->td_imagewidth, td->td_tilewidth);
if (howmany32 == 0)
{
TIFFErrorExtR(tif, module, "Zero tiles");
return ((tmsize_t)(-1));
}
tif->tif_col = (tile % howmany32) * td->td_tilewidth;
if ((tif->tif_flags & TIFF_CODERSETUP) == 0)
{
if (!(*tif->tif_setupencode)(tif))
return ((tmsize_t)(-1));
tif->tif_flags |= TIFF_CODERSETUP;
}
tif->tif_flags &= ~TIFF_POSTENCODE;
/*
* Clamp write amount to the tile size. This is mostly
* done so that callers can pass in some large number
* (e.g. -1) and have the tile size used instead.
*/
if (cc < 1 || cc > tif->tif_tilesize)
cc = tif->tif_tilesize;
/* shortcut to avoid an extra memcpy() */
if (td->td_compression == COMPRESSION_NONE)
{
/* swab if needed - note that source buffer will be altered */
tif->tif_postdecode(tif, (uint8_t *)data, cc);
if (!isFillOrder(tif, td->td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits((uint8_t *)data, cc);
if (cc > 0 && !TIFFAppendToStrip(tif, tile, (uint8_t *)data, cc))
return ((tmsize_t)-1);
return (cc);
}
sample = (uint16_t)(tile / td->td_stripsperimage);
if (!(*tif->tif_preencode)(tif, sample))
return ((tmsize_t)(-1));
/* swab if needed - note that source buffer will be altered */
tif->tif_postdecode(tif, (uint8_t *)data, cc);
if (!(*tif->tif_encodetile)(tif, (uint8_t *)data, cc, sample))
return ((tmsize_t)-1);
if (!(*tif->tif_postencode)(tif))
return ((tmsize_t)(-1));
if (!isFillOrder(tif, td->td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits((uint8_t *)tif->tif_rawdata, tif->tif_rawcc);
if (tif->tif_rawcc > 0 &&
!TIFFAppendToStrip(tif, tile, tif->tif_rawdata, tif->tif_rawcc))
return ((tmsize_t)(-1));
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
return (cc);
}
/*
* Write the supplied data to the specified strip.
* There must be space for the data; we don't check
* if strips overlap!
*
* NB: Image length must be setup before writing; this
* interface does not support automatically growing
* the image on each write (as TIFFWriteScanline does).
*/
tmsize_t TIFFWriteRawTile(TIFF *tif, uint32_t tile, void *data, tmsize_t cc)
{
static const char module[] = "TIFFWriteRawTile";
if (!WRITECHECKTILES(tif, module))
return ((tmsize_t)(-1));
if (tile >= tif->tif_dir.td_nstrips)
{
TIFFErrorExtR(tif, module, "Tile %lu out of range, max %lu",
(unsigned long)tile,
(unsigned long)tif->tif_dir.td_nstrips);
return ((tmsize_t)(-1));
}
return (TIFFAppendToStrip(tif, tile, (uint8_t *)data, cc) ? cc
: (tmsize_t)(-1));
}
#define isUnspecified(tif, f) \
(TIFFFieldSet(tif, f) && (tif)->tif_dir.td_imagelength == 0)
int TIFFSetupStrips(TIFF *tif)
{
TIFFDirectory *td = &tif->tif_dir;
if (isTiled(tif))
td->td_stripsperimage = isUnspecified(tif, FIELD_TILEDIMENSIONS)
? td->td_samplesperpixel
: TIFFNumberOfTiles(tif);
else
td->td_stripsperimage = isUnspecified(tif, FIELD_ROWSPERSTRIP)
? td->td_samplesperpixel
: TIFFNumberOfStrips(tif);
td->td_nstrips = td->td_stripsperimage;
/* TIFFWriteDirectoryTagData has a limitation to 0x80000000U bytes */
if (td->td_nstrips >=
0x80000000U / ((tif->tif_flags & TIFF_BIGTIFF) ? 0x8U : 0x4U))
{
TIFFErrorExtR(tif, "TIFFSetupStrips",
"Too large Strip/Tile Offsets/ByteCounts arrays");
return 0;
}
if (td->td_planarconfig == PLANARCONFIG_SEPARATE)
td->td_stripsperimage /= td->td_samplesperpixel;
if (td->td_stripoffset_p != NULL)
_TIFFfreeExt(tif, td->td_stripoffset_p);
td->td_stripoffset_p = (uint64_t *)_TIFFCheckMalloc(
tif, td->td_nstrips, sizeof(uint64_t), "for \"StripOffsets\" array");
if (td->td_stripbytecount_p != NULL)
_TIFFfreeExt(tif, td->td_stripbytecount_p);
td->td_stripbytecount_p = (uint64_t *)_TIFFCheckMalloc(
tif, td->td_nstrips, sizeof(uint64_t), "for \"StripByteCounts\" array");
if (td->td_stripoffset_p == NULL || td->td_stripbytecount_p == NULL)
return (0);
/*
* Place data at the end-of-file
* (by setting offsets to zero).
*/
_TIFFmemset(td->td_stripoffset_p, 0, td->td_nstrips * sizeof(uint64_t));
_TIFFmemset(td->td_stripbytecount_p, 0, td->td_nstrips * sizeof(uint64_t));
TIFFSetFieldBit(tif, FIELD_STRIPOFFSETS);
TIFFSetFieldBit(tif, FIELD_STRIPBYTECOUNTS);
return (1);
}
#undef isUnspecified
/*
* Verify file is writable and that the directory
* information is setup properly. In doing the latter
* we also "freeze" the state of the directory so
* that important information is not changed.
*/
int TIFFWriteCheck(TIFF *tif, int tiles, const char *module)
{
if (tif->tif_mode == O_RDONLY)
{
TIFFErrorExtR(tif, module, "File not open for writing");
return (0);
}
if (tiles ^ isTiled(tif))
{
TIFFErrorExtR(tif, module,
tiles ? "Can not write tiles to a striped image"
: "Can not write scanlines to a tiled image");
return (0);
}
_TIFFFillStriles(tif);
/*
* On the first write verify all the required information
* has been setup and initialize any data structures that
* had to wait until directory information was set.
* Note that a lot of our work is assumed to remain valid
* because we disallow any of the important parameters
* from changing after we start writing (i.e. once
* TIFF_BEENWRITING is set, TIFFSetField will only allow
* the image's length to be changed).
*/
if (!TIFFFieldSet(tif, FIELD_IMAGEDIMENSIONS))
{
TIFFErrorExtR(tif, module,
"Must set \"ImageWidth\" before writing data");
return (0);
}
if (tif->tif_dir.td_stripoffset_p == NULL && !TIFFSetupStrips(tif))
{
tif->tif_dir.td_nstrips = 0;
TIFFErrorExtR(tif, module, "No space for %s arrays",
isTiled(tif) ? "tile" : "strip");
return (0);
}
if (isTiled(tif))
{
tif->tif_tilesize = TIFFTileSize(tif);
if (tif->tif_tilesize == 0)
return (0);
}
else
tif->tif_tilesize = (tmsize_t)(-1);
tif->tif_scanlinesize = TIFFScanlineSize(tif);
if (tif->tif_scanlinesize == 0)
return (0);
tif->tif_flags |= TIFF_BEENWRITING;
if (tif->tif_dir.td_stripoffset_entry.tdir_tag != 0 &&
tif->tif_dir.td_stripoffset_entry.tdir_count == 0 &&
tif->tif_dir.td_stripoffset_entry.tdir_type == 0 &&
tif->tif_dir.td_stripoffset_entry.tdir_offset.toff_long8 == 0 &&
tif->tif_dir.td_stripbytecount_entry.tdir_tag != 0 &&
tif->tif_dir.td_stripbytecount_entry.tdir_count == 0 &&
tif->tif_dir.td_stripbytecount_entry.tdir_type == 0 &&
tif->tif_dir.td_stripbytecount_entry.tdir_offset.toff_long8 == 0 &&
!(tif->tif_flags & TIFF_DIRTYDIRECT))
{
TIFFForceStrileArrayWriting(tif);
}
return (1);
}
/*
* Setup the raw data buffer used for encoding.
*/
int TIFFWriteBufferSetup(TIFF *tif, void *bp, tmsize_t size)
{
static const char module[] = "TIFFWriteBufferSetup";
if (tif->tif_rawdata)
{
if (tif->tif_flags & TIFF_MYBUFFER)
{
_TIFFfreeExt(tif, tif->tif_rawdata);
tif->tif_flags &= ~TIFF_MYBUFFER;
}
tif->tif_rawdata = NULL;
}
if (size == (tmsize_t)(-1))
{
size = (isTiled(tif) ? tif->tif_tilesize : TIFFStripSize(tif));
/* Adds 10% margin for cases where compression would expand a bit */
if (size < TIFF_TMSIZE_T_MAX - size / 10)
size += size / 10;
/*
* Make raw data buffer at least 8K
*/
if (size < 8 * 1024)
size = 8 * 1024;
bp = NULL; /* NB: force malloc */
}
if (bp == NULL)
{
bp = _TIFFmallocExt(tif, size);
if (bp == NULL)
{
TIFFErrorExtR(tif, module, "No space for output buffer");
return (0);
}
tif->tif_flags |= TIFF_MYBUFFER;
}
else
tif->tif_flags &= ~TIFF_MYBUFFER;
tif->tif_rawdata = (uint8_t *)bp;
tif->tif_rawdatasize = size;
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
tif->tif_flags |= TIFF_BUFFERSETUP;
return (1);
}
/*
* Grow the strip data structures by delta strips.
*/
static int TIFFGrowStrips(TIFF *tif, uint32_t delta, const char *module)
{
TIFFDirectory *td = &tif->tif_dir;
uint64_t *new_stripoffset;
uint64_t *new_stripbytecount;
assert(td->td_planarconfig == PLANARCONFIG_CONTIG);
new_stripoffset = (uint64_t *)_TIFFreallocExt(
tif, td->td_stripoffset_p, (td->td_nstrips + delta) * sizeof(uint64_t));
new_stripbytecount = (uint64_t *)_TIFFreallocExt(
tif, td->td_stripbytecount_p,
(td->td_nstrips + delta) * sizeof(uint64_t));
if (new_stripoffset == NULL || new_stripbytecount == NULL)
{
if (new_stripoffset)
_TIFFfreeExt(tif, new_stripoffset);
if (new_stripbytecount)
_TIFFfreeExt(tif, new_stripbytecount);
td->td_nstrips = 0;
TIFFErrorExtR(tif, module, "No space to expand strip arrays");
return (0);
}
td->td_stripoffset_p = new_stripoffset;
td->td_stripbytecount_p = new_stripbytecount;
_TIFFmemset(td->td_stripoffset_p + td->td_nstrips, 0,
delta * sizeof(uint64_t));
_TIFFmemset(td->td_stripbytecount_p + td->td_nstrips, 0,
delta * sizeof(uint64_t));
td->td_nstrips += delta;
tif->tif_flags |= TIFF_DIRTYDIRECT;
return (1);
}
/*
* Append the data to the specified strip.
*/
static int TIFFAppendToStrip(TIFF *tif, uint32_t strip, uint8_t *data,
tmsize_t cc)
{
static const char module[] = "TIFFAppendToStrip";
TIFFDirectory *td = &tif->tif_dir;
uint64_t m;
int64_t old_byte_count = -1;
if (tif->tif_curoff == 0)
tif->tif_lastvalidoff = 0;
if (td->td_stripoffset_p[strip] == 0 || tif->tif_curoff == 0)
{
assert(td->td_nstrips > 0);
if (td->td_stripbytecount_p[strip] != 0 &&
td->td_stripoffset_p[strip] != 0 &&
td->td_stripbytecount_p[strip] >= (uint64_t)cc)
{
/*
* There is already tile data on disk, and the new tile
* data we have will fit in the same space. The only
* aspect of this that is risky is that there could be
* more data to append to this strip before we are done
* depending on how we are getting called.
*/
if (!SeekOK(tif, td->td_stripoffset_p[strip]))
{
TIFFErrorExtR(tif, module, "Seek error at scanline %lu",
(unsigned long)tif->tif_row);
return (0);
}
tif->tif_lastvalidoff =
td->td_stripoffset_p[strip] + td->td_stripbytecount_p[strip];
}
else
{
/*
* Seek to end of file, and set that as our location to
* write this strip.
*/
td->td_stripoffset_p[strip] = TIFFSeekFile(tif, 0, SEEK_END);
tif->tif_flags |= TIFF_DIRTYSTRIP;
}
tif->tif_curoff = td->td_stripoffset_p[strip];
/*
* We are starting a fresh strip/tile, so set the size to zero.
*/
old_byte_count = td->td_stripbytecount_p[strip];
td->td_stripbytecount_p[strip] = 0;
}
m = tif->tif_curoff + cc;
if (!(tif->tif_flags & TIFF_BIGTIFF))
m = (uint32_t)m;
if ((m < tif->tif_curoff) || (m < (uint64_t)cc))
{
TIFFErrorExtR(tif, module, "Maximum TIFF file size exceeded");
return (0);
}
if (tif->tif_lastvalidoff != 0 && m > tif->tif_lastvalidoff &&
td->td_stripbytecount_p[strip] > 0)
{
/* Ouch: we have detected that we are rewriting in place a strip/tile */
/* with several calls to TIFFAppendToStrip(). The first call was with */
/* a size smaller than the previous size of the strip/tile, so we */
/* opted to rewrite in place, but a following call causes us to go */
/* outsize of the strip/tile area, so we have to finally go for a */
/* append-at-end-of-file strategy, and start by moving what we already
*/
/* wrote. */
tmsize_t tempSize;
void *temp;
uint64_t offsetRead;
uint64_t offsetWrite;
uint64_t toCopy = td->td_stripbytecount_p[strip];
if (toCopy < 1024 * 1024)
tempSize = (tmsize_t)toCopy;
else
tempSize = 1024 * 1024;
offsetRead = td->td_stripoffset_p[strip];
offsetWrite = TIFFSeekFile(tif, 0, SEEK_END);
m = offsetWrite + toCopy + cc;
if (!(tif->tif_flags & TIFF_BIGTIFF) && m != (uint32_t)m)
{
TIFFErrorExtR(tif, module, "Maximum TIFF file size exceeded");
return (0);
}
temp = _TIFFmallocExt(tif, tempSize);
if (temp == NULL)
{
TIFFErrorExtR(tif, module, "No space for output buffer");
return (0);
}
tif->tif_flags |= TIFF_DIRTYSTRIP;
td->td_stripoffset_p[strip] = offsetWrite;
td->td_stripbytecount_p[strip] = 0;
/* Move data written by previous calls to us at end of file */
while (toCopy > 0)
{
if (!SeekOK(tif, offsetRead))
{
TIFFErrorExtR(tif, module, "Seek error");
_TIFFfreeExt(tif, temp);
return (0);
}
if (!ReadOK(tif, temp, tempSize))
{
TIFFErrorExtR(tif, module, "Cannot read");
_TIFFfreeExt(tif, temp);
return (0);
}
if (!SeekOK(tif, offsetWrite))
{
TIFFErrorExtR(tif, module, "Seek error");
_TIFFfreeExt(tif, temp);
return (0);
}
if (!WriteOK(tif, temp, tempSize))
{
TIFFErrorExtR(tif, module, "Cannot write");
_TIFFfreeExt(tif, temp);
return (0);
}
offsetRead += tempSize;
offsetWrite += tempSize;
td->td_stripbytecount_p[strip] += tempSize;
toCopy -= tempSize;
}
_TIFFfreeExt(tif, temp);
/* Append the data of this call */
offsetWrite += cc;
m = offsetWrite;
}
if (!WriteOK(tif, data, cc))
{
TIFFErrorExtR(tif, module, "Write error at scanline %lu",
(unsigned long)tif->tif_row);
return (0);
}
tif->tif_curoff = m;
td->td_stripbytecount_p[strip] += cc;
if ((int64_t)td->td_stripbytecount_p[strip] != old_byte_count)
tif->tif_flags |= TIFF_DIRTYSTRIP;
return (1);
}
/*
* Internal version of TIFFFlushData that can be
* called by ``encodestrip routines'' w/o concern
* for infinite recursion.
*/
int TIFFFlushData1(TIFF *tif)
{
if (tif->tif_rawcc > 0 && tif->tif_flags & TIFF_BUF4WRITE)
{
if (!isFillOrder(tif, tif->tif_dir.td_fillorder) &&
(tif->tif_flags & TIFF_NOBITREV) == 0)
TIFFReverseBits((uint8_t *)tif->tif_rawdata, tif->tif_rawcc);
if (!TIFFAppendToStrip(
tif, isTiled(tif) ? tif->tif_curtile : tif->tif_curstrip,
tif->tif_rawdata, tif->tif_rawcc))
{
/* We update those variables even in case of error since there's */
/* code that doesn't really check the return code of this */
/* function */
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
return (0);
}
tif->tif_rawcc = 0;
tif->tif_rawcp = tif->tif_rawdata;
}
return (1);
}
/*
* Set the current write offset. This should only be
* used to set the offset to a known previous location
* (very carefully), or to 0 so that the next write gets
* appended to the end of the file.
*/
void TIFFSetWriteOffset(TIFF *tif, toff_t off)
{
tif->tif_curoff = off;
tif->tif_lastvalidoff = 0;
} | c | github | https://github.com/opencv/opencv | 3rdparty/libtiff/tif_write.c |
package conversion
import (
"context"
"sync/atomic"
"testing"
"time"
dashv0 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v0alpha1"
dashv1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v1beta1"
dashv2alpha1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2alpha1"
dashv2beta1 "github.com/grafana/grafana/apps/dashboard/pkg/apis/dashboard/v2beta1"
"github.com/grafana/grafana/apps/dashboard/pkg/migration"
"github.com/grafana/grafana/apps/dashboard/pkg/migration/schemaversion"
common "github.com/grafana/grafana/pkg/apimachinery/apis/common/v0alpha1"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// countingDataSourceProvider tracks how many times Index() is called
type countingDataSourceProvider struct {
datasources []schemaversion.DataSourceInfo
callCount atomic.Int64
}
func newCountingDataSourceProvider(datasources []schemaversion.DataSourceInfo) *countingDataSourceProvider {
return &countingDataSourceProvider{
datasources: datasources,
}
}
func (p *countingDataSourceProvider) Index(_ context.Context) *schemaversion.DatasourceIndex {
p.callCount.Add(1)
return schemaversion.NewDatasourceIndex(p.datasources)
}
func (p *countingDataSourceProvider) getCallCount() int64 {
return p.callCount.Load()
}
// countingLibraryElementProvider tracks how many times GetLibraryElementInfo() is called
type countingLibraryElementProvider struct {
elements []schemaversion.LibraryElementInfo
callCount atomic.Int64
}
func newCountingLibraryElementProvider(elements []schemaversion.LibraryElementInfo) *countingLibraryElementProvider {
return &countingLibraryElementProvider{
elements: elements,
}
}
func (p *countingLibraryElementProvider) GetLibraryElementInfo(_ context.Context) []schemaversion.LibraryElementInfo {
p.callCount.Add(1)
return p.elements
}
func (p *countingLibraryElementProvider) getCallCount() int64 {
return p.callCount.Load()
}
// createTestV0Dashboard creates a minimal v0 dashboard for testing
// The dashboard has a datasource with UID only (no type) to force provider lookup
// and includes library panels to test library element provider caching
func createTestV0Dashboard(namespace, title string) *dashv0.Dashboard {
return &dashv0.Dashboard{
ObjectMeta: metav1.ObjectMeta{
Name: "test-dashboard",
Namespace: namespace,
},
Spec: common.Unstructured{
Object: map[string]interface{}{
"title": title,
"schemaVersion": schemaversion.LATEST_VERSION,
// Variables with datasource reference that requires lookup
"templating": map[string]interface{}{
"list": []interface{}{
map[string]interface{}{
"name": "query_var",
"type": "query",
"query": "label_values(up, job)",
// Datasource with UID only - type needs to be looked up
"datasource": map[string]interface{}{
"uid": "ds1",
// type is intentionally omitted to trigger provider lookup
},
},
},
},
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"title": "Test Panel",
"type": "timeseries",
"targets": []interface{}{
map[string]interface{}{
// Datasource with UID only - type needs to be looked up
"datasource": map[string]interface{}{
"uid": "ds1",
},
},
},
},
// Library panel reference - triggers library element provider lookup
map[string]interface{}{
"id": 2,
"title": "Library Panel with Horizontal Repeat",
"type": "library-panel-ref",
"gridPos": map[string]interface{}{
"h": 8,
"w": 12,
"x": 0,
"y": 8,
},
"libraryPanel": map[string]interface{}{
"uid": "lib-panel-repeat-h",
"name": "Library Panel with Horizontal Repeat",
},
},
// Another library panel reference
map[string]interface{}{
"id": 3,
"title": "Library Panel without Repeat",
"type": "library-panel-ref",
"gridPos": map[string]interface{}{
"h": 3,
"w": 6,
"x": 0,
"y": 16,
},
"libraryPanel": map[string]interface{}{
"uid": "lib-panel-no-repeat",
"name": "Library Panel without Repeat",
},
},
},
},
},
}
}
// createTestV1Dashboard creates a minimal v1beta1 dashboard for testing
// The dashboard has a datasource with UID only (no type) to force provider lookup
// and includes library panels to test library element provider caching
func createTestV1Dashboard(namespace, title string) *dashv1.Dashboard {
return &dashv1.Dashboard{
ObjectMeta: metav1.ObjectMeta{
Name: "test-dashboard",
Namespace: namespace,
},
Spec: common.Unstructured{
Object: map[string]interface{}{
"title": title,
"schemaVersion": schemaversion.LATEST_VERSION,
// Variables with datasource reference that requires lookup
"templating": map[string]interface{}{
"list": []interface{}{
map[string]interface{}{
"name": "query_var",
"type": "query",
"query": "label_values(up, job)",
// Datasource with UID only - type needs to be looked up
"datasource": map[string]interface{}{
"uid": "ds1",
// type is intentionally omitted to trigger provider lookup
},
},
},
},
"panels": []interface{}{
map[string]interface{}{
"id": 1,
"title": "Test Panel",
"type": "timeseries",
"targets": []interface{}{
map[string]interface{}{
// Datasource with UID only - type needs to be looked up
"datasource": map[string]interface{}{
"uid": "ds1",
},
},
},
},
// Library panel reference - triggers library element provider lookup
map[string]interface{}{
"id": 2,
"title": "Library Panel with Vertical Repeat",
"type": "library-panel-ref",
"gridPos": map[string]interface{}{
"h": 4,
"w": 6,
"x": 0,
"y": 8,
},
"libraryPanel": map[string]interface{}{
"uid": "lib-panel-repeat-v",
"name": "Library Panel with Vertical Repeat",
},
},
// Another library panel reference
map[string]interface{}{
"id": 3,
"title": "Library Panel without Repeat",
"type": "library-panel-ref",
"gridPos": map[string]interface{}{
"h": 3,
"w": 6,
"x": 6,
"y": 8,
},
"libraryPanel": map[string]interface{}{
"uid": "lib-panel-no-repeat",
"name": "Library Panel without Repeat",
},
},
},
},
},
}
}
// TestConversionCaching_V0_to_V2alpha1 verifies caching works when converting V0 to V2alpha1
func TestConversionCaching_V0_to_V2alpha1(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-h", Name: "Library Panel with Horizontal Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
// Convert multiple dashboards in the same namespace
numDashboards := 5
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV0Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2alpha1.Dashboard{}
err := Convert_V0_to_V2alpha1(source, target, nil, cachedDS, cachedLE)
require.NoError(t, err, "conversion %d should succeed", i)
require.NotNil(t, target.Spec)
}
// With caching, the underlying datasource provider should only be called once per namespace
// The test dashboard has datasources without type that require lookup
assert.Equal(t, int64(1), underlyingDS.getCallCount(),
"datasource provider should be called only once for %d conversions in same namespace", numDashboards)
// Library element provider should also be called only once per namespace due to caching
assert.Equal(t, int64(1), underlyingLE.getCallCount(),
"library element provider should be called only once for %d conversions in same namespace", numDashboards)
}
// TestConversionCaching_V0_to_V2beta1 verifies caching works when converting V0 to V2beta1
func TestConversionCaching_V0_to_V2beta1(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-h", Name: "Library Panel with Horizontal Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
numDashboards := 5
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV0Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2beta1.Dashboard{}
err := Convert_V0_to_V2beta1(source, target, nil, cachedDS, cachedLE)
require.NoError(t, err, "conversion %d should succeed", i)
require.NotNil(t, target.Spec)
}
assert.Equal(t, int64(1), underlyingDS.getCallCount(),
"datasource provider should be called only once for %d conversions in same namespace", numDashboards)
assert.Equal(t, int64(1), underlyingLE.getCallCount(),
"library element provider should be called only once for %d conversions in same namespace", numDashboards)
}
// TestConversionCaching_V1beta1_to_V2alpha1 verifies caching works when converting V1beta1 to V2alpha1
func TestConversionCaching_V1beta1_to_V2alpha1(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-v", Name: "Library Panel with Vertical Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
numDashboards := 5
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV1Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2alpha1.Dashboard{}
err := Convert_V1beta1_to_V2alpha1(source, target, nil, cachedDS, cachedLE)
require.NoError(t, err, "conversion %d should succeed", i)
require.NotNil(t, target.Spec)
}
assert.Equal(t, int64(1), underlyingDS.getCallCount(),
"datasource provider should be called only once for %d conversions in same namespace", numDashboards)
assert.Equal(t, int64(1), underlyingLE.getCallCount(),
"library element provider should be called only once for %d conversions in same namespace", numDashboards)
}
// TestConversionCaching_V1beta1_to_V2beta1 verifies caching works when converting V1beta1 to V2beta1
func TestConversionCaching_V1beta1_to_V2beta1(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-v", Name: "Library Panel with Vertical Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
numDashboards := 5
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV1Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2beta1.Dashboard{}
err := Convert_V1beta1_to_V2beta1(source, target, nil, cachedDS, cachedLE)
require.NoError(t, err, "conversion %d should succeed", i)
require.NotNil(t, target.Spec)
}
assert.Equal(t, int64(1), underlyingDS.getCallCount(),
"datasource provider should be called only once for %d conversions in same namespace", numDashboards)
assert.Equal(t, int64(1), underlyingLE.getCallCount(),
"library element provider should be called only once for %d conversions in same namespace", numDashboards)
}
// TestConversionCaching_MultipleNamespaces verifies that different namespaces get separate cache entries
func TestConversionCaching_MultipleNamespaces(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-h", Name: "Library Panel with Horizontal Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, time.Minute)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, time.Minute)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
namespaces := []string{"default", "org-2", "org-3"}
numDashboardsPerNs := 3
for _, ns := range namespaces {
for i := 0; i < numDashboardsPerNs; i++ {
source := createTestV0Dashboard(ns, "Dashboard "+string(rune('A'+i)))
target := &dashv2alpha1.Dashboard{}
err := Convert_V0_to_V2alpha1(source, target, nil, cachedDS, cachedLE)
require.NoError(t, err, "conversion for namespace %s should succeed", ns)
}
}
// With caching, each namespace should result in one call to the underlying provider
expectedCalls := int64(len(namespaces))
assert.Equal(t, expectedCalls, underlyingDS.getCallCount(),
"datasource provider should be called once per namespace (%d namespaces)", len(namespaces))
assert.Equal(t, expectedCalls, underlyingLE.getCallCount(),
"library element provider should be called once per namespace (%d namespaces)", len(namespaces))
}
// TestConversionCaching_CacheDisabled verifies that TTL=0 disables caching
func TestConversionCaching_CacheDisabled(t *testing.T) {
datasources := []schemaversion.DataSourceInfo{
{UID: "ds1", Type: "prometheus", Name: "Prometheus", Default: true},
}
elements := []schemaversion.LibraryElementInfo{
{UID: "lib-panel-repeat-h", Name: "Library Panel with Horizontal Repeat", Type: "timeseries"},
{UID: "lib-panel-no-repeat", Name: "Library Panel without Repeat", Type: "graph"},
}
underlyingDS := newCountingDataSourceProvider(datasources)
underlyingLE := newCountingLibraryElementProvider(elements)
// TTL of 0 should disable caching - the wrapper returns the underlying provider directly
cachedDS := schemaversion.WrapIndexProviderWithCache(underlyingDS, 0)
cachedLE := schemaversion.WrapLibraryElementProviderWithCache(underlyingLE, 0)
migration.ResetForTesting()
migration.Initialize(cachedDS, cachedLE, migration.DefaultCacheTTL)
numDashboards := 3
namespace := "default"
for i := 0; i < numDashboards; i++ {
source := createTestV0Dashboard(namespace, "Dashboard "+string(rune('A'+i)))
target := &dashv2alpha1.Dashboard{}
err := Convert_V0_to_V2alpha1(source, target, nil, cachedDS, cachedLE)
require.NoError(t, err, "conversion %d should succeed", i)
}
// Without caching, each conversion calls the underlying provider multiple times
// (once for each datasource lookup needed - variables and panels)
// The key check is that the count is GREATER than 1 per conversion (no caching benefit)
assert.Greater(t, underlyingDS.getCallCount(), int64(numDashboards),
"with cache disabled, conversions should call datasource provider multiple times")
// Library element provider is also called for each conversion without caching
assert.GreaterOrEqual(t, underlyingLE.getCallCount(), int64(numDashboards),
"with cache disabled, conversions should call library element provider multiple times")
} | go | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/conversion_cache_test.go |
## Input
```javascript
function Component(props) {
const items = (() => {
if (props.cond) {
return [];
} else {
return null;
}
})();
items?.push(props.a);
return items;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{a: {}}],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
function Component(props) {
const $ = _c(3);
let items;
if ($[0] !== props.a || $[1] !== props.cond) {
let t0;
if (props.cond) {
t0 = [];
} else {
t0 = null;
}
items = t0;
items?.push(props.a);
$[0] = props.a;
$[1] = props.cond;
$[2] = items;
} else {
items = $[2];
}
return items;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{ a: {} }],
};
```
### Eval output
(kind: ok) null | unknown | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/iife-return-modified-later-phi.expect.md |
/*
* Copyright (c) 2017 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitoinline;
import java.util.Collections;
import java.util.Set;
import org.junit.Test;
import org.mockito.internal.invocation.finder.AllInvocationsFinder;
import org.mockito.stubbing.Stubbing;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class StubbingLocationTest {
@Test
public void stubbing_location_should_be_the_correct_point() {
ConcreteClass mock = mock(ConcreteClass.class);
String frame;
// Initializing 'frame' at the method parameter point is to gain the exact line number of
// the stubbing point.
when(mock.concreteMethod(frame = Thread.currentThread().getStackTrace()[1].toString()))
.thenReturn("");
mock.concreteMethod(frame);
Set<Stubbing> stubbings = AllInvocationsFinder.findStubbings(Collections.singleton(mock));
assertEquals(1, stubbings.size());
String location = stubbings.iterator().next().getInvocation().getLocation().toString();
assertEquals("-> at " + frame, location);
}
static final class ConcreteClass {
String concreteMethod(String s) {
throw new RuntimeException(s);
}
}
} | java | github | https://github.com/mockito/mockito | mockito-integration-tests/inline-mocks-tests/src/test/java/org/mockitoinline/StubbingLocationTest.java |
#! /usr/bin/env python
# coding=utf-8
import matplotlib.pyplot as plt
path = "/home/nanook/nanook_ros/src/nanook_path_tracking/planta_motores/ensaios/ensaio_ajuste.txt"
f = open(path, 'r')
samples = []
t = []
left_speed = []
right_speed = []
##### Leitura do arquivo #####
for line in f:
line = line.split()
samples.append(int(line[0]))
t.append(float(line[1]))
left_speed.append(float(line[2]))
right_speed.append(float(line[3]))
f.close()
##### Média #####
n = len(samples)
s0 = 50
sf = n - 10
left_avg = left_speed[s0:sf+1]
right_avg = right_speed[s0:sf+1]
left_speed_avg = sum(left_avg) / len(left_avg)
right_speed_avg = sum(right_avg) / len(right_avg)
print('Média Esquerda = %f' % left_speed_avg)
print('Média Direita = %f' % right_speed_avg)
##### Plot #####
try:
plt.close('all')
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(samples, left_speed, 'b-')
plt.title('Motor Esquerdo')
plt.xlabel('Amostra')
plt.ylabel('Velocidade')
plt.grid('on')
plt.subplot(1, 2, 2)
plt.plot(samples, right_speed, 'b-')
plt.title('Motor Direito')
plt.xlabel('Amostra')
plt.ylabel('Velocidade')
plt.grid('on')
plt.show()
except KeyboardInterrupt:
plt.close('all')
raise SystemExit | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_profile_dns
short_description: Manage DNS profiles on a BIG-IP
description:
- Manage DNS profiles on a BIG-IP. Many DNS profiles; each with their
own adjustments to the standard C(dns) profile. Users of this module should be aware
that many of the adjustable knobs have no module default. Instead, the default is
assigned by the BIG-IP system itself which, in most cases, is acceptable.
version_added: 2.6
options:
name:
description:
- Specifies the name of the DNS profile.
type: str
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(dns) profile.
type: str
enable_dns_express:
description:
- Specifies whether the DNS Express engine is enabled.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
- The DNS Express engine receives zone transfers from the authoritative DNS server
for the zone. If the C(enable_zone_transfer) setting is also C(yes) on this profile,
the DNS Express engine also responds to zone transfer requests made by the nameservers
configured as zone transfer clients for the DNS Express zone.
type: bool
enable_zone_transfer:
description:
- Specifies whether the system answers zone transfer requests for a DNS zone created
on the system.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
- The C(enable_dns_express) and C(enable_zone_transfer) settings on a DNS profile
affect how the system responds to zone transfer requests.
- When the C(enable_dns_express) and C(enable_zone_transfer) settings are both C(yes),
if a zone transfer request matches a DNS Express zone, then DNS Express answers the
request.
- When the C(enable_dns_express) setting is C(no) and the C(enable_zone_transfer)
setting is C(yes), the BIG-IP system processes zone transfer requests based on the
last action and answers the request from local BIND or a pool member.
type: bool
enable_dnssec:
description:
- Specifies whether the system signs responses with DNSSEC keys and replies to DNSSEC
specific queries (e.g., DNSKEY query type).
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
enable_gtm:
description:
- Specifies whether the system uses Global Traffic Manager to manage the response.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
process_recursion_desired:
description:
- Specifies whether to process client-side DNS packets with Recursion Desired set in
the header.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
- If set to C(no), processing of the packet is subject to the unhandled-query-action
option.
type: bool
use_local_bind:
description:
- Specifies whether the system forwards non-wide IP queries to the local BIND server
on the BIG-IP system.
- For best performance, disable this setting when using a DNS cache.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
enable_dns_firewall:
description:
- Specifies whether DNS firewall capability is enabled.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: bool
enable_cache:
description:
- Specifies whether the system caches DNS responses.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
- When C(yes), the BIG-IP system caches DNS responses handled by the virtual
servers associated with this profile. When you enable this setting, you must
also specify a value for C(cache_name).
- When C(no), the BIG-IP system does not cache DNS responses handled by the
virtual servers associated with this profile. However, the profile retains
the association with the DNS cache in the C(cache_name) parameter. Disable
this setting when you want to debug the system.
type: bool
version_added: 2.7
cache_name:
description:
- Specifies the user-created cache that the system uses to cache DNS responses.
- When you select a cache for the system to use, you must also set C(enable_dns_cache)
to C(yes)
type: str
version_added: 2.7
unhandled_query_action:
description:
- Specifies the action to take when a query does not match a Wide IP or a DNS Express Zone.
- When C(allow), the BIG-IP system forwards queries to a DNS server or pool member.
If a pool is not associated with a listener and the Use BIND Server on BIG-IP setting
is set to Enabled, requests are forwarded to the local BIND server.
- When C(drop), the BIG-IP system does not respond to the query.
- When C(reject), the BIG-IP system returns the query with the REFUSED return code.
- When C(hint), the BIG-IP system returns the query with a list of root name servers.
- When C(no-error), the BIG-IP system returns the query with the NOERROR return code.
- When creating a new profile, if this parameter is not specified, the default
is provided by the parent profile.
type: str
choices:
- allow
- drop
- reject
- hint
- no-error
version_added: 2.7
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a DNS profile
bigip_profile_dns:
name: foo
enable_dns_express: no
enable_dnssec: no
enable_gtm: no
process_recursion_desired: no
use_local_bind: no
enable_dns_firewall: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
enable_dns_express:
description: Whether DNS Express is enabled on the resource or not.
returned: changed
type: bool
sample: yes
enable_zone_transfer:
description: Whether zone transfer are enabled on the resource or not.
returned: changed
type: bool
sample: no
enable_dnssec:
description: Whether DNSSEC is enabled on the resource or not.
returned: changed
type: bool
sample: no
enable_gtm:
description: Whether GTM is used to manage the resource or not.
returned: changed
type: bool
sample: yes
process_recursion_desired:
description: Whether client-side DNS packets are processed with Recursion Desired set.
returned: changed
type: bool
sample: yes
use_local_bind:
description: Whether non-wide IP queries are forwarded to the local BIND server or not.
returned: changed
type: bool
sample: no
enable_dns_firewall:
description: Whether DNS firewall capability is enabled or not.
returned: changed
type: bool
sample: no
enable_cache:
description: Whether DNS caching is enabled or not.
returned: changed
type: bool
sample: no
cache_name:
description: Name of the cache used by DNS.
returned: changed
type: str
sample: /Common/cache1
unhandled_query_action:
description: What to do with unhandled queries
returned: changed
type: str
sample: allow
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'enableDnsFirewall': 'enable_dns_firewall',
'useLocalBind': 'use_local_bind',
'processRd': 'process_recursion_desired',
'enableGtm': 'enable_gtm',
'enableDnssec': 'enable_dnssec',
'processXfr': 'enable_zone_transfer',
'enableDnsExpress': 'enable_dns_express',
'defaultsFrom': 'parent',
'enableCache': 'enable_cache',
'cache': 'cache_name',
'unhandledQueryAction': 'unhandled_query_action',
}
api_attributes = [
'enableDnsFirewall',
'useLocalBind',
'processRd',
'enableGtm',
'enableDnssec',
'processXfr',
'enableDnsExpress',
'defaultsFrom',
'cache',
'enableCache',
'unhandledQueryAction',
]
returnables = [
'enable_dns_firewall',
'use_local_bind',
'process_recursion_desired',
'enable_gtm',
'enable_dnssec',
'enable_zone_transfer',
'enable_dns_express',
'cache_name',
'enable_cache',
'unhandled_query_action',
]
updatables = [
'enable_dns_firewall',
'use_local_bind',
'process_recursion_desired',
'enable_gtm',
'enable_dnssec',
'enable_zone_transfer',
'enable_dns_express',
'cache_name',
'enable_cache',
'unhandled_query_action',
]
class ApiParameters(Parameters):
@property
def enable_dns_firewall(self):
if self._values['enable_dns_firewall'] is None:
return None
if self._values['enable_dns_firewall'] == 'yes':
return True
return False
@property
def use_local_bind(self):
if self._values['use_local_bind'] is None:
return None
if self._values['use_local_bind'] == 'yes':
return True
return False
@property
def process_recursion_desired(self):
if self._values['process_recursion_desired'] is None:
return None
if self._values['process_recursion_desired'] == 'yes':
return True
return False
@property
def enable_gtm(self):
if self._values['enable_gtm'] is None:
return None
if self._values['enable_gtm'] == 'yes':
return True
return False
@property
def enable_cache(self):
if self._values['enable_cache'] is None:
return None
if self._values['enable_cache'] == 'yes':
return True
return False
@property
def enable_dnssec(self):
if self._values['enable_dnssec'] is None:
return None
if self._values['enable_dnssec'] == 'yes':
return True
return False
@property
def enable_zone_transfer(self):
if self._values['enable_zone_transfer'] is None:
return None
if self._values['enable_zone_transfer'] == 'yes':
return True
return False
@property
def enable_dns_express(self):
if self._values['enable_dns_express'] is None:
return None
if self._values['enable_dns_express'] == 'yes':
return True
return False
@property
def unhandled_query_action(self):
if self._values['unhandled_query_action'] is None:
return None
elif self._values['unhandled_query_action'] == 'noerror':
return 'no-error'
return self._values['unhandled_query_action']
class ModuleParameters(Parameters):
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def cache_name(self):
if self._values['cache_name'] is None:
return None
if self._values['cache_name'] == '':
return ''
result = fq_name(self.partition, self._values['cache_name'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def enable_dns_firewall(self):
if self._values['enable_dns_firewall'] is None:
return None
if self._values['enable_dns_firewall']:
return 'yes'
return 'no'
@property
def use_local_bind(self):
if self._values['use_local_bind'] is None:
return None
if self._values['use_local_bind']:
return 'yes'
return 'no'
@property
def process_recursion_desired(self):
if self._values['process_recursion_desired'] is None:
return None
if self._values['process_recursion_desired']:
return 'yes'
return 'no'
@property
def enable_gtm(self):
if self._values['enable_gtm'] is None:
return None
if self._values['enable_gtm']:
return 'yes'
return 'no'
@property
def enable_cache(self):
if self._values['enable_cache'] is None:
return None
if self._values['enable_cache']:
return 'yes'
return 'no'
@property
def enable_dnssec(self):
if self._values['enable_dnssec'] is None:
return None
if self._values['enable_dnssec']:
return 'yes'
return 'no'
@property
def enable_zone_transfer(self):
if self._values['enable_zone_transfer'] is None:
return None
if self._values['enable_zone_transfer']:
return 'yes'
return 'no'
@property
def enable_dns_express(self):
if self._values['enable_dns_express'] is None:
return None
if self._values['enable_dns_express']:
return 'yes'
return 'no'
@property
def unhandled_query_action(self):
if self._values['unhandled_query_action'] is None:
return None
elif self._values['unhandled_query_action'] == 'no-error':
return 'noerror'
return self._values['unhandled_query_action']
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.changes.enable_cache is True or self.have.enable_cache is True:
if not self.have.cache_name or self.changes.cache_name == '':
raise F5ModuleError(
"To enable DNS cache, a DNS cache must be specified."
)
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.want.enable_cache is True and not self.want.cache_name:
raise F5ModuleError(
"You must specify a 'cache_name' when creating a DNS profile that sets 'enable_cache' to 'yes'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/dns/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(),
enable_dns_express=dict(type='bool'),
enable_zone_transfer=dict(type='bool'),
enable_dnssec=dict(type='bool'),
enable_gtm=dict(type='bool'),
process_recursion_desired=dict(type='bool'),
use_local_bind=dict(type='bool'),
enable_dns_firewall=dict(type='bool'),
enable_cache=dict(type='bool'),
unhandled_query_action=dict(
choices=['allow', 'drop', 'reject', 'hint', 'no-error']
),
cache_name=dict(),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import copy
import filecmp
import os
import tarfile
import zipfile
from collections import deque
from io import BytesIO
from unittest import mock
import pytest
from requests import compat
from requests._internal_utils import unicode_is_ascii
from requests.cookies import RequestsCookieJar
from requests.structures import CaseInsensitiveDict
from requests.utils import (
_parse_content_type_header,
add_dict_to_cookiejar,
address_in_network,
dotted_netmask,
extract_zipped_paths,
get_auth_from_url,
get_encoding_from_headers,
get_encodings_from_content,
get_environ_proxies,
get_netrc_auth,
guess_filename,
guess_json_utf,
is_ipv4_address,
is_valid_cidr,
iter_slices,
parse_dict_header,
parse_header_links,
prepend_scheme_if_needed,
requote_uri,
select_proxy,
set_environ,
should_bypass_proxies,
super_len,
to_key_val_list,
to_native_string,
unquote_header_value,
unquote_unreserved,
urldefragauth,
)
from .compat import StringIO, cStringIO
class TestSuperLen:
@pytest.mark.parametrize(
"stream, value",
(
(StringIO.StringIO, "Test"),
(BytesIO, b"Test"),
pytest.param(
cStringIO, "Test", marks=pytest.mark.skipif("cStringIO is None")
),
),
)
def test_io_streams(self, stream, value):
"""Ensures that we properly deal with different kinds of IO streams."""
assert super_len(stream()) == 0
assert super_len(stream(value)) == 4
def test_super_len_correctly_calculates_len_of_partially_read_file(self):
"""Ensure that we handle partially consumed file like objects."""
s = StringIO.StringIO()
s.write("foobarbogus")
assert super_len(s) == 0
@pytest.mark.parametrize("error", [IOError, OSError])
def test_super_len_handles_files_raising_weird_errors_in_tell(self, error):
"""If tell() raises errors, assume the cursor is at position zero."""
class BoomFile:
def __len__(self):
return 5
def tell(self):
raise error()
assert super_len(BoomFile()) == 0
@pytest.mark.parametrize("error", [IOError, OSError])
def test_super_len_tell_ioerror(self, error):
"""Ensure that if tell gives an IOError super_len doesn't fail"""
class NoLenBoomFile:
def tell(self):
raise error()
def seek(self, offset, whence):
pass
assert super_len(NoLenBoomFile()) == 0
def test_string(self):
assert super_len("Test") == 4
@pytest.mark.parametrize(
"mode, warnings_num",
(
("r", 1),
("rb", 0),
),
)
def test_file(self, tmpdir, mode, warnings_num, recwarn):
file_obj = tmpdir.join("test.txt")
file_obj.write("Test")
with file_obj.open(mode) as fd:
assert super_len(fd) == 4
assert len(recwarn) == warnings_num
def test_tarfile_member(self, tmpdir):
file_obj = tmpdir.join("test.txt")
file_obj.write("Test")
tar_obj = str(tmpdir.join("test.tar"))
with tarfile.open(tar_obj, "w") as tar:
tar.add(str(file_obj), arcname="test.txt")
with tarfile.open(tar_obj) as tar:
member = tar.extractfile("test.txt")
assert super_len(member) == 4
def test_super_len_with__len__(self):
foo = [1, 2, 3, 4]
len_foo = super_len(foo)
assert len_foo == 4
def test_super_len_with_no__len__(self):
class LenFile:
def __init__(self):
self.len = 5
assert super_len(LenFile()) == 5
def test_super_len_with_tell(self):
foo = StringIO.StringIO("12345")
assert super_len(foo) == 5
foo.read(2)
assert super_len(foo) == 3
def test_super_len_with_fileno(self):
with open(__file__, "rb") as f:
length = super_len(f)
file_data = f.read()
assert length == len(file_data)
def test_super_len_with_no_matches(self):
"""Ensure that objects without any length methods default to 0"""
assert super_len(object()) == 0
class TestGetNetrcAuth:
def test_works(self, tmp_path, monkeypatch):
netrc_path = tmp_path / ".netrc"
monkeypatch.setenv("NETRC", str(netrc_path))
with open(netrc_path, "w") as f:
f.write("machine example.com login aaaa password bbbb\n")
auth = get_netrc_auth("http://example.com/thing")
assert auth == ("aaaa", "bbbb")
def test_not_vulnerable_to_bad_url_parsing(self, tmp_path, monkeypatch):
netrc_path = tmp_path / ".netrc"
monkeypatch.setenv("NETRC", str(netrc_path))
with open(netrc_path, "w") as f:
f.write("machine example.com login aaaa password bbbb\n")
auth = get_netrc_auth("http://example.com:@evil.com/'")
assert auth is None
def test_empty_default_credentials_ignored(self, tmp_path, monkeypatch):
"""Empty default credentials should not be returned."""
netrc_path = tmp_path / ".netrc"
monkeypatch.setenv("NETRC", str(netrc_path))
with open(netrc_path, "w") as f:
f.write("machine example.com login user password pass\ndefault\n")
auth = get_netrc_auth("http://httpbin.org/")
assert auth is None
class TestToKeyValList:
@pytest.mark.parametrize(
"value, expected",
(
([("key", "val")], [("key", "val")]),
((("key", "val"),), [("key", "val")]),
({"key": "val"}, [("key", "val")]),
(None, None),
),
)
def test_valid(self, value, expected):
assert to_key_val_list(value) == expected
def test_invalid(self):
with pytest.raises(ValueError):
to_key_val_list("string")
class TestUnquoteHeaderValue:
@pytest.mark.parametrize(
"value, expected",
(
(None, None),
("Test", "Test"),
('"Test"', "Test"),
('"Test\\\\"', "Test\\"),
('"\\\\Comp\\Res"', "\\Comp\\Res"),
),
)
def test_valid(self, value, expected):
assert unquote_header_value(value) == expected
def test_is_filename(self):
assert unquote_header_value('"\\\\Comp\\Res"', True) == "\\\\Comp\\Res"
class TestGetEnvironProxies:
"""Ensures that IP addresses are correctly matches with ranges
in no_proxy variable.
"""
@pytest.fixture(autouse=True, params=["no_proxy", "NO_PROXY"])
def no_proxy(self, request, monkeypatch):
monkeypatch.setenv(
request.param, "192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1"
)
@pytest.mark.parametrize(
"url",
(
"http://192.168.0.1:5000/",
"http://192.168.0.1/",
"http://172.16.1.1/",
"http://172.16.1.1:5000/",
"http://localhost.localdomain:5000/v1.0/",
),
)
def test_bypass(self, url):
assert get_environ_proxies(url, no_proxy=None) == {}
@pytest.mark.parametrize(
"url",
(
"http://192.168.1.1:5000/",
"http://192.168.1.1/",
"http://www.requests.com/",
),
)
def test_not_bypass(self, url):
assert get_environ_proxies(url, no_proxy=None) != {}
@pytest.mark.parametrize(
"url",
(
"http://192.168.1.1:5000/",
"http://192.168.1.1/",
"http://www.requests.com/",
),
)
def test_bypass_no_proxy_keyword(self, url):
no_proxy = "192.168.1.1,requests.com"
assert get_environ_proxies(url, no_proxy=no_proxy) == {}
@pytest.mark.parametrize(
"url",
(
"http://192.168.0.1:5000/",
"http://192.168.0.1/",
"http://172.16.1.1/",
"http://172.16.1.1:5000/",
"http://localhost.localdomain:5000/v1.0/",
),
)
def test_not_bypass_no_proxy_keyword(self, url, monkeypatch):
# This is testing that the 'no_proxy' argument overrides the
# environment variable 'no_proxy'
monkeypatch.setenv("http_proxy", "http://proxy.example.com:3128/")
no_proxy = "192.168.1.1,requests.com"
assert get_environ_proxies(url, no_proxy=no_proxy) != {}
class TestIsIPv4Address:
def test_valid(self):
assert is_ipv4_address("8.8.8.8")
@pytest.mark.parametrize("value", ("8.8.8.8.8", "localhost.localdomain"))
def test_invalid(self, value):
assert not is_ipv4_address(value)
class TestIsValidCIDR:
def test_valid(self):
assert is_valid_cidr("192.168.1.0/24")
@pytest.mark.parametrize(
"value",
(
"8.8.8.8",
"192.168.1.0/a",
"192.168.1.0/128",
"192.168.1.0/-1",
"192.168.1.999/24",
),
)
def test_invalid(self, value):
assert not is_valid_cidr(value)
class TestAddressInNetwork:
def test_valid(self):
assert address_in_network("192.168.1.1", "192.168.1.0/24")
def test_invalid(self):
assert not address_in_network("172.16.0.1", "192.168.1.0/24")
class TestGuessFilename:
@pytest.mark.parametrize(
"value",
(1, type("Fake", (object,), {"name": 1})()),
)
def test_guess_filename_invalid(self, value):
assert guess_filename(value) is None
@pytest.mark.parametrize(
"value, expected_type",
(
(b"value", compat.bytes),
(b"value".decode("utf-8"), compat.str),
),
)
def test_guess_filename_valid(self, value, expected_type):
obj = type("Fake", (object,), {"name": value})()
result = guess_filename(obj)
assert result == value
assert isinstance(result, expected_type)
class TestExtractZippedPaths:
@pytest.mark.parametrize(
"path",
(
"/",
__file__,
pytest.__file__,
"/etc/invalid/location",
),
)
def test_unzipped_paths_unchanged(self, path):
assert path == extract_zipped_paths(path)
def test_zipped_paths_extracted(self, tmpdir):
zipped_py = tmpdir.join("test.zip")
with zipfile.ZipFile(zipped_py.strpath, "w") as f:
f.write(__file__)
_, name = os.path.splitdrive(__file__)
zipped_path = os.path.join(zipped_py.strpath, name.lstrip(r"\/"))
extracted_path = extract_zipped_paths(zipped_path)
assert extracted_path != zipped_path
assert os.path.exists(extracted_path)
assert filecmp.cmp(extracted_path, __file__)
def test_invalid_unc_path(self):
path = r"\\localhost\invalid\location"
assert extract_zipped_paths(path) == path
class TestContentEncodingDetection:
def test_none(self):
encodings = get_encodings_from_content("")
assert not len(encodings)
@pytest.mark.parametrize(
"content",
(
# HTML5 meta charset attribute
'<meta charset="UTF-8">',
# HTML4 pragma directive
'<meta http-equiv="Content-type" content="text/html;charset=UTF-8">',
# XHTML 1.x served with text/html MIME type
'<meta http-equiv="Content-type" content="text/html;charset=UTF-8" />',
# XHTML 1.x served as XML
'<?xml version="1.0" encoding="UTF-8"?>',
),
)
def test_pragmas(self, content):
encodings = get_encodings_from_content(content)
assert len(encodings) == 1
assert encodings[0] == "UTF-8"
def test_precedence(self):
content = """
<?xml version="1.0" encoding="XML"?>
<meta charset="HTML5">
<meta http-equiv="Content-type" content="text/html;charset=HTML4" />
""".strip()
assert get_encodings_from_content(content) == ["HTML5", "HTML4", "XML"]
class TestGuessJSONUTF:
@pytest.mark.parametrize(
"encoding",
(
"utf-32",
"utf-8-sig",
"utf-16",
"utf-8",
"utf-16-be",
"utf-16-le",
"utf-32-be",
"utf-32-le",
),
)
def test_encoded(self, encoding):
data = "{}".encode(encoding)
assert guess_json_utf(data) == encoding
def test_bad_utf_like_encoding(self):
assert guess_json_utf(b"\x00\x00\x00\x00") is None
@pytest.mark.parametrize(
("encoding", "expected"),
(
("utf-16-be", "utf-16"),
("utf-16-le", "utf-16"),
("utf-32-be", "utf-32"),
("utf-32-le", "utf-32"),
),
)
def test_guess_by_bom(self, encoding, expected):
data = "\ufeff{}".encode(encoding)
assert guess_json_utf(data) == expected
USER = PASSWORD = "%!*'();:@&=+$,/?#[] "
ENCODED_USER = compat.quote(USER, "")
ENCODED_PASSWORD = compat.quote(PASSWORD, "")
@pytest.mark.parametrize(
"url, auth",
(
(
f"http://{ENCODED_USER}:{ENCODED_PASSWORD}@request.com/url.html#test",
(USER, PASSWORD),
),
("http://user:pass@complex.url.com/path?query=yes", ("user", "pass")),
(
"http://user:pass%20pass@complex.url.com/path?query=yes",
("user", "pass pass"),
),
("http://user:pass pass@complex.url.com/path?query=yes", ("user", "pass pass")),
(
"http://user%25user:pass@complex.url.com/path?query=yes",
("user%user", "pass"),
),
(
"http://user:pass%23pass@complex.url.com/path?query=yes",
("user", "pass#pass"),
),
("http://complex.url.com/path?query=yes", ("", "")),
),
)
def test_get_auth_from_url(url, auth):
assert get_auth_from_url(url) == auth
@pytest.mark.parametrize(
"uri, expected",
(
(
# Ensure requoting doesn't break expectations
"http://example.com/fiz?buz=%25ppicture",
"http://example.com/fiz?buz=%25ppicture",
),
(
# Ensure we handle unquoted percent signs in redirects
"http://example.com/fiz?buz=%ppicture",
"http://example.com/fiz?buz=%25ppicture",
),
),
)
def test_requote_uri_with_unquoted_percents(uri, expected):
"""See: https://github.com/psf/requests/issues/2356"""
assert requote_uri(uri) == expected
@pytest.mark.parametrize(
"uri, expected",
(
(
# Illegal bytes
"http://example.com/?a=%--",
"http://example.com/?a=%--",
),
(
# Reserved characters
"http://example.com/?a=%300",
"http://example.com/?a=00",
),
),
)
def test_unquote_unreserved(uri, expected):
assert unquote_unreserved(uri) == expected
@pytest.mark.parametrize(
"mask, expected",
(
(8, "255.0.0.0"),
(24, "255.255.255.0"),
(25, "255.255.255.128"),
),
)
def test_dotted_netmask(mask, expected):
assert dotted_netmask(mask) == expected
http_proxies = {
"http": "http://http.proxy",
"http://some.host": "http://some.host.proxy",
}
all_proxies = {
"all": "socks5://http.proxy",
"all://some.host": "socks5://some.host.proxy",
}
mixed_proxies = {
"http": "http://http.proxy",
"http://some.host": "http://some.host.proxy",
"all": "socks5://http.proxy",
}
@pytest.mark.parametrize(
"url, expected, proxies",
(
("hTTp://u:p@Some.Host/path", "http://some.host.proxy", http_proxies),
("hTTp://u:p@Other.Host/path", "http://http.proxy", http_proxies),
("hTTp:///path", "http://http.proxy", http_proxies),
("hTTps://Other.Host", None, http_proxies),
("file:///etc/motd", None, http_proxies),
("hTTp://u:p@Some.Host/path", "socks5://some.host.proxy", all_proxies),
("hTTp://u:p@Other.Host/path", "socks5://http.proxy", all_proxies),
("hTTp:///path", "socks5://http.proxy", all_proxies),
("hTTps://Other.Host", "socks5://http.proxy", all_proxies),
("http://u:p@other.host/path", "http://http.proxy", mixed_proxies),
("http://u:p@some.host/path", "http://some.host.proxy", mixed_proxies),
("https://u:p@other.host/path", "socks5://http.proxy", mixed_proxies),
("https://u:p@some.host/path", "socks5://http.proxy", mixed_proxies),
("https://", "socks5://http.proxy", mixed_proxies),
# XXX: unsure whether this is reasonable behavior
("file:///etc/motd", "socks5://http.proxy", all_proxies),
),
)
def test_select_proxies(url, expected, proxies):
"""Make sure we can select per-host proxies correctly."""
assert select_proxy(url, proxies) == expected
@pytest.mark.parametrize(
"value, expected",
(
('foo="is a fish", bar="as well"', {"foo": "is a fish", "bar": "as well"}),
("key_without_value", {"key_without_value": None}),
),
)
def test_parse_dict_header(value, expected):
assert parse_dict_header(value) == expected
@pytest.mark.parametrize(
"value, expected",
(
("application/xml", ("application/xml", {})),
(
"application/json ; charset=utf-8",
("application/json", {"charset": "utf-8"}),
),
(
"application/json ; Charset=utf-8",
("application/json", {"charset": "utf-8"}),
),
("text/plain", ("text/plain", {})),
(
"multipart/form-data; boundary = something ; boundary2='something_else' ; no_equals ",
(
"multipart/form-data",
{
"boundary": "something",
"boundary2": "something_else",
"no_equals": True,
},
),
),
(
'multipart/form-data; boundary = something ; boundary2="something_else" ; no_equals ',
(
"multipart/form-data",
{
"boundary": "something",
"boundary2": "something_else",
"no_equals": True,
},
),
),
(
"multipart/form-data; boundary = something ; 'boundary2=something_else' ; no_equals ",
(
"multipart/form-data",
{
"boundary": "something",
"boundary2": "something_else",
"no_equals": True,
},
),
),
(
'multipart/form-data; boundary = something ; "boundary2=something_else" ; no_equals ',
(
"multipart/form-data",
{
"boundary": "something",
"boundary2": "something_else",
"no_equals": True,
},
),
),
("application/json ; ; ", ("application/json", {})),
),
)
def test__parse_content_type_header(value, expected):
assert _parse_content_type_header(value) == expected
@pytest.mark.parametrize(
"value, expected",
(
(CaseInsensitiveDict(), None),
(
CaseInsensitiveDict({"content-type": "application/json; charset=utf-8"}),
"utf-8",
),
(CaseInsensitiveDict({"content-type": "text/plain"}), "ISO-8859-1"),
),
)
def test_get_encoding_from_headers(value, expected):
assert get_encoding_from_headers(value) == expected
@pytest.mark.parametrize(
"value, length",
(
("", 0),
("T", 1),
("Test", 4),
("Cont", 0),
("Other", -5),
("Content", None),
),
)
def test_iter_slices(value, length):
if length is None or (length <= 0 and len(value) > 0):
# Reads all content at once
assert len(list(iter_slices(value, length))) == 1
else:
assert len(list(iter_slices(value, 1))) == length
@pytest.mark.parametrize(
"value, expected",
(
(
'<http:/.../front.jpeg>; rel=front; type="image/jpeg"',
[{"url": "http:/.../front.jpeg", "rel": "front", "type": "image/jpeg"}],
),
("<http:/.../front.jpeg>", [{"url": "http:/.../front.jpeg"}]),
("<http:/.../front.jpeg>;", [{"url": "http:/.../front.jpeg"}]),
(
'<http:/.../front.jpeg>; type="image/jpeg",<http://.../back.jpeg>;',
[
{"url": "http:/.../front.jpeg", "type": "image/jpeg"},
{"url": "http://.../back.jpeg"},
],
),
("", []),
),
)
def test_parse_header_links(value, expected):
assert parse_header_links(value) == expected
@pytest.mark.parametrize(
"value, expected",
(
("example.com/path", "http://example.com/path"),
("//example.com/path", "http://example.com/path"),
("example.com:80", "http://example.com:80"),
(
"http://user:pass@example.com/path?query",
"http://user:pass@example.com/path?query",
),
("http://user@example.com/path?query", "http://user@example.com/path?query"),
),
)
def test_prepend_scheme_if_needed(value, expected):
assert prepend_scheme_if_needed(value, "http") == expected
@pytest.mark.parametrize(
"value, expected",
(
("T", "T"),
(b"T", "T"),
("T", "T"),
),
)
def test_to_native_string(value, expected):
assert to_native_string(value) == expected
@pytest.mark.parametrize(
"url, expected",
(
("http://u:p@example.com/path?a=1#test", "http://example.com/path?a=1"),
("http://example.com/path", "http://example.com/path"),
("//u:p@example.com/path", "//example.com/path"),
("//example.com/path", "//example.com/path"),
("example.com/path", "//example.com/path"),
("scheme:u:p@example.com/path", "scheme://example.com/path"),
),
)
def test_urldefragauth(url, expected):
assert urldefragauth(url) == expected
@pytest.mark.parametrize(
"url, expected",
(
("http://192.168.0.1:5000/", True),
("http://192.168.0.1/", True),
("http://172.16.1.1/", True),
("http://172.16.1.1:5000/", True),
("http://localhost.localdomain:5000/v1.0/", True),
("http://google.com:6000/", True),
("http://172.16.1.12/", False),
("http://172.16.1.12:5000/", False),
("http://google.com:5000/v1.0/", False),
("file:///some/path/on/disk", True),
),
)
def test_should_bypass_proxies(url, expected, monkeypatch):
"""Tests for function should_bypass_proxies to check if proxy
can be bypassed or not
"""
monkeypatch.setenv(
"no_proxy",
"192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1, google.com:6000",
)
monkeypatch.setenv(
"NO_PROXY",
"192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1, google.com:6000",
)
assert should_bypass_proxies(url, no_proxy=None) == expected
@pytest.mark.parametrize(
"url, expected",
(
("http://172.16.1.1/", "172.16.1.1"),
("http://172.16.1.1:5000/", "172.16.1.1"),
("http://user:pass@172.16.1.1", "172.16.1.1"),
("http://user:pass@172.16.1.1:5000", "172.16.1.1"),
("http://hostname/", "hostname"),
("http://hostname:5000/", "hostname"),
("http://user:pass@hostname", "hostname"),
("http://user:pass@hostname:5000", "hostname"),
),
)
def test_should_bypass_proxies_pass_only_hostname(url, expected):
"""The proxy_bypass function should be called with a hostname or IP without
a port number or auth credentials.
"""
with mock.patch("requests.utils.proxy_bypass") as proxy_bypass:
should_bypass_proxies(url, no_proxy=None)
proxy_bypass.assert_called_once_with(expected)
@pytest.mark.parametrize(
"cookiejar",
(
compat.cookielib.CookieJar(),
RequestsCookieJar(),
),
)
def test_add_dict_to_cookiejar(cookiejar):
"""Ensure add_dict_to_cookiejar works for
non-RequestsCookieJar CookieJars
"""
cookiedict = {"test": "cookies", "good": "cookies"}
cj = add_dict_to_cookiejar(cookiejar, cookiedict)
cookies = {cookie.name: cookie.value for cookie in cj}
assert cookiedict == cookies
@pytest.mark.parametrize(
"value, expected",
(
("test", True),
("æíöû", False),
("ジェーピーニック", False),
),
)
def test_unicode_is_ascii(value, expected):
assert unicode_is_ascii(value) is expected
@pytest.mark.parametrize(
"url, expected",
(
("http://192.168.0.1:5000/", True),
("http://192.168.0.1/", True),
("http://172.16.1.1/", True),
("http://172.16.1.1:5000/", True),
("http://localhost.localdomain:5000/v1.0/", True),
("http://172.16.1.12/", False),
("http://172.16.1.12:5000/", False),
("http://google.com:5000/v1.0/", False),
),
)
def test_should_bypass_proxies_no_proxy(url, expected, monkeypatch):
"""Tests for function should_bypass_proxies to check if proxy
can be bypassed or not using the 'no_proxy' argument
"""
no_proxy = "192.168.0.0/24,127.0.0.1,localhost.localdomain,172.16.1.1"
# Test 'no_proxy' argument
assert should_bypass_proxies(url, no_proxy=no_proxy) == expected
@pytest.mark.skipif(os.name != "nt", reason="Test only on Windows")
@pytest.mark.parametrize(
"url, expected, override",
(
("http://192.168.0.1:5000/", True, None),
("http://192.168.0.1/", True, None),
("http://172.16.1.1/", True, None),
("http://172.16.1.1:5000/", True, None),
("http://localhost.localdomain:5000/v1.0/", True, None),
("http://172.16.1.22/", False, None),
("http://172.16.1.22:5000/", False, None),
("http://google.com:5000/v1.0/", False, None),
("http://mylocalhostname:5000/v1.0/", True, "<local>"),
("http://192.168.0.1/", False, ""),
),
)
def test_should_bypass_proxies_win_registry(url, expected, override, monkeypatch):
"""Tests for function should_bypass_proxies to check if proxy
can be bypassed or not with Windows registry settings
"""
if override is None:
override = "192.168.*;127.0.0.1;localhost.localdomain;172.16.1.1"
import winreg
class RegHandle:
def Close(self):
pass
ie_settings = RegHandle()
proxyEnableValues = deque([1, "1"])
def OpenKey(key, subkey):
return ie_settings
def QueryValueEx(key, value_name):
if key is ie_settings:
if value_name == "ProxyEnable":
# this could be a string (REG_SZ) or a 32-bit number (REG_DWORD)
proxyEnableValues.rotate()
return [proxyEnableValues[0]]
elif value_name == "ProxyOverride":
return [override]
monkeypatch.setenv("http_proxy", "")
monkeypatch.setenv("https_proxy", "")
monkeypatch.setenv("ftp_proxy", "")
monkeypatch.setenv("no_proxy", "")
monkeypatch.setenv("NO_PROXY", "")
monkeypatch.setattr(winreg, "OpenKey", OpenKey)
monkeypatch.setattr(winreg, "QueryValueEx", QueryValueEx)
assert should_bypass_proxies(url, None) == expected
@pytest.mark.skipif(os.name != "nt", reason="Test only on Windows")
def test_should_bypass_proxies_win_registry_bad_values(monkeypatch):
"""Tests for function should_bypass_proxies to check if proxy
can be bypassed or not with Windows invalid registry settings.
"""
import winreg
class RegHandle:
def Close(self):
pass
ie_settings = RegHandle()
def OpenKey(key, subkey):
return ie_settings
def QueryValueEx(key, value_name):
if key is ie_settings:
if value_name == "ProxyEnable":
# Invalid response; Should be an int or int-y value
return [""]
elif value_name == "ProxyOverride":
return ["192.168.*;127.0.0.1;localhost.localdomain;172.16.1.1"]
monkeypatch.setenv("http_proxy", "")
monkeypatch.setenv("https_proxy", "")
monkeypatch.setenv("no_proxy", "")
monkeypatch.setenv("NO_PROXY", "")
monkeypatch.setattr(winreg, "OpenKey", OpenKey)
monkeypatch.setattr(winreg, "QueryValueEx", QueryValueEx)
assert should_bypass_proxies("http://172.16.1.1/", None) is False
@pytest.mark.parametrize(
"env_name, value",
(
("no_proxy", "192.168.0.0/24,127.0.0.1,localhost.localdomain"),
("no_proxy", None),
("a_new_key", "192.168.0.0/24,127.0.0.1,localhost.localdomain"),
("a_new_key", None),
),
)
def test_set_environ(env_name, value):
"""Tests set_environ will set environ values and will restore the environ."""
environ_copy = copy.deepcopy(os.environ)
with set_environ(env_name, value):
assert os.environ.get(env_name) == value
assert os.environ == environ_copy
def test_set_environ_raises_exception():
"""Tests set_environ will raise exceptions in context when the
value parameter is None."""
with pytest.raises(Exception) as exception:
with set_environ("test1", None):
raise Exception("Expected exception")
assert "Expected exception" in str(exception.value)
@pytest.mark.skipif(os.name != "nt", reason="Test only on Windows")
def test_should_bypass_proxies_win_registry_ProxyOverride_value(monkeypatch):
"""Tests for function should_bypass_proxies to check if proxy
can be bypassed or not with Windows ProxyOverride registry value ending with a semicolon.
"""
import winreg
class RegHandle:
def Close(self):
pass
ie_settings = RegHandle()
def OpenKey(key, subkey):
return ie_settings
def QueryValueEx(key, value_name):
if key is ie_settings:
if value_name == "ProxyEnable":
return [1]
elif value_name == "ProxyOverride":
return [
"192.168.*;127.0.0.1;localhost.localdomain;172.16.1.1;<-loopback>;"
]
monkeypatch.setenv("NO_PROXY", "")
monkeypatch.setenv("no_proxy", "")
monkeypatch.setattr(winreg, "OpenKey", OpenKey)
monkeypatch.setattr(winreg, "QueryValueEx", QueryValueEx)
assert should_bypass_proxies("http://example.com/", None) is False | python | github | https://github.com/psf/requests | tests/test_utils.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""label_image for tflite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
from PIL import Image
import tensorflow as tf # TF2
def load_labels(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f.readlines()]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--image',
default='/tmp/grace_hopper.bmp',
help='image to be classified')
parser.add_argument(
'-m',
'--model_file',
default='/tmp/mobilenet_v1_1.0_224_quant.tflite',
help='.tflite model to be executed')
parser.add_argument(
'-l',
'--label_file',
default='/tmp/labels.txt',
help='name of file containing labels')
parser.add_argument(
'--input_mean',
default=127.5, type=float,
help='input_mean')
parser.add_argument(
'--input_std',
default=127.5, type=float,
help='input standard deviation')
args = parser.parse_args()
interpreter = tf.lite.Interpreter(model_path=args.model_file)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# check the type of the input tensor
floating_model = input_details[0]['dtype'] == np.float32
# NxHxWxC, H:1, W:2
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
img = Image.open(args.image).resize((width, height))
# add N dim
input_data = np.expand_dims(img, axis=0)
if floating_model:
input_data = (np.float32(input_data) - args.input_mean) / args.input_std
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
results = np.squeeze(output_data)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(args.label_file)
for i in top_k:
if floating_model:
print('{:08.6f}: {}'.format(float(results[i]), labels[i]))
else:
print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i])) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#----------------------------------------------------------------------------
# 04-Mar-2014 ShaneG
#
# Utility to dump the contents of a device.
#----------------------------------------------------------------------------
import sys
from os.path import splitext, basename
from intelhex import IntelHex
from microboot import Microboot, MicrobootException
from mbutil import beginProgress, updateProgress, endProgress
#--- Banner and usage information
BANNER = """
mbdump.py - Microboot/Microboard Firmware Dump Utility
Copyright (c) 2014, The Garage Lab. All Rights Reserved.
"""
USAGE = """
Usage:
%s options [filename]
Options:
-d,--device name Specify the expected device, eg: attiny85,atmega8. This
is required.
-p,--port name Specify the name of the serial port to use for communication.
If not specified the port /dev/ttyUSB0 will be used.
--log Log all communications to the file 'transfer.log'
If a filename is not specified the output will be saved in the file 'device'.hex,
eg atmega8.hex if the device is an atmega8.
"""
#--- Communications log
g_logFile = None
#----------------------------------------------------------------------------
# Helper functions
#----------------------------------------------------------------------------
def showUsage():
print USAGE.strip() % basename(sys.argv[0])
exit(1)
def logFunction(send, recv):
g_logFile.write(">" + send)
g_logFile.write("<" + recv)
g_logFile.flush()
#----------------------------------------------------------------------------
# Main program
#----------------------------------------------------------------------------
if __name__ == "__main__":
print BANNER.strip() + "\n"
# Set up defaults
device = None
port = "/dev/ttyUSB0"
filename = None
# Process command line arguments
index = 1
while index < len(sys.argv):
arg = sys.argv[index]
if (arg == "--device") or (arg == "-d"):
index = index + 1
device = sys.argv[index]
elif (arg == "--port") or (arg == "-p"):
index = index + 1
port = sys.argv[index]
elif (arg == "--log"):
g_logFile = open("transfer.log", "w")
else:
filename = arg
index = index + 1
if index <> len(sys.argv):
print "Error: Filename must be the last parameter.\n"
showUsage()
index = index + 1
# Rationalise parameters
if device is None:
print "Error: You must specify a device.\n"
showUsage()
if filename is None:
filename = device + ".hex"
# Add default extension to filename
name, ext = splitext(filename)
if ext == "":
filename = name + ".hex"
# Set up the device interface
mb = Microboot()
info = mb.getDeviceInfo(device)
if info is None:
print "Unsupported device type '%s'." % device
# Show what we are doing
size = info[4] - info[3] + 1
print "Reading %d bytes (0x%04X:0x%04X) from '%s' on '%s'." % (size, info[3], info[4], device, port)
# Set up logging if requested
if g_logFile is not None:
mb.logger = logFunction
# Connect to the device
mb.connect(device, port)
# Read everything
data = None
beginProgress("Reading")
try:
data = mb.read(info[3], size, updateProgress)
except Exception, ex:
endProgress()
print "Error: Reading failed, error message is:"
print " " + str(ex)
exit(1)
endProgress()
mb.disconnect()
# Create the HEX file
hexfile = IntelHex()
address = info[3]
for val in data:
hexfile[address] = val
address = address + 1
hexfile.tofile(filename, "hex")
print "Output written to '%s'." % filename | unknown | codeparrot/codeparrot-clean | ||
---
applies_to:
stack:
serverless:
navigation_title: "Percolator"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/percolator.html
---
# Percolator field type [percolator]
The `percolator` field type parses a json structure into a native query and stores that query, so that the [percolate query](/reference/query-languages/query-dsl/query-dsl-percolate-query.md) can use it to match provided documents.
Any field that contains a json object can be configured to be a percolator field. The percolator field type has no settings. Just configuring the `percolator` field type is sufficient to instruct Elasticsearch to treat a field as a query.
If the following mapping configures the `percolator` field type for the `query` field:
```console
PUT my-index-000001
{
"mappings": {
"properties": {
"query": {
"type": "percolator"
},
"field": {
"type": "text"
}
}
}
}
```
% TESTSETUP
Then you can index a query:
```console
PUT my-index-000001/_doc/match_value
{
"query": {
"match": {
"field": "value"
}
}
}
```
::::{important}
Fields referred to in a percolator query must **already** exist in the mapping associated with the index used for percolation. In order to make sure these fields exist, add or update a mapping via the [create index](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-create) or [update mapping](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-mapping) APIs.
::::
## Reindexing your percolator queries [_reindexing_your_percolator_queries]
Reindexing percolator queries is sometimes required to benefit from improvements made to the `percolator` field type in new releases.
Reindexing percolator queries can be reindexed by using the [reindex api](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-reindex). Lets take a look at the following index with a percolator field type:
```console
PUT index
{
"mappings": {
"properties": {
"query" : {
"type" : "percolator"
},
"body" : {
"type": "text"
}
}
}
}
POST _aliases
{
"actions": [
{
"add": {
"index": "index",
"alias": "queries" <1>
}
}
]
}
PUT queries/_doc/1?refresh
{
"query" : {
"match" : {
"body" : "quick brown fox"
}
}
}
```
% TEST[continued]
1. It is always recommended to define an alias for your index, so that in case of a reindex systems / applications don’t need to be changed to know that the percolator queries are now in a different index.
Lets say you’re going to upgrade to a new major version and in order for the new Elasticsearch version to still be able to read your queries you need to reindex your queries into a new index on the current Elasticsearch version:
```console
PUT new_index
{
"mappings": {
"properties": {
"query" : {
"type" : "percolator"
},
"body" : {
"type": "text"
}
}
}
}
POST /_reindex?refresh
{
"source": {
"index": "index"
},
"dest": {
"index": "new_index"
}
}
POST _aliases
{
"actions": [ <1>
{
"remove": {
"index" : "index",
"alias": "queries"
}
},
{
"add": {
"index": "new_index",
"alias": "queries"
}
}
]
}
```
% TEST[continued]
1. If you have an alias don’t forget to point it to the new index.
Executing the `percolate` query via the `queries` alias:
```console
GET /queries/_search
{
"query": {
"percolate" : {
"field" : "query",
"document" : {
"body" : "fox jumps over the lazy dog"
}
}
}
}
```
% TEST[continued]
now returns matches from the new index:
```console-result
{
"took": 3,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped" : 0,
"failed": 0
},
"hits": {
"total" : {
"value": 1,
"relation": "eq"
},
"max_score": 0.13076457,
"hits": [
{
"_index": "new_index", <1>
"_id": "1",
"_score": 0.13076457,
"_source": {
"query": {
"match": {
"body": "quick brown fox"
}
}
},
"fields" : {
"_percolator_document_slot" : [0]
}
}
]
}
}
```
% TESTRESPONSE[s/"took": 3,/"took": "$body.took",/]
1. Percolator query hit is now being presented from the new index.
## Optimizing query time text analysis [_optimizing_query_time_text_analysis]
When the percolator verifies a percolator candidate match it is going to parse, perform query time text analysis and actually run the percolator query on the document being percolated. This is done for each candidate match and every time the `percolate` query executes. If your query time text analysis is relatively expensive part of query parsing then text analysis can become the dominating factor time is being spent on when percolating. This query parsing overhead can become noticeable when the percolator ends up verifying many candidate percolator query matches.
To avoid the most expensive part of text analysis at percolate time. One can choose to do the expensive part of text analysis when indexing the percolator query. This requires using two different analyzers. The first analyzer actually performs text analysis that needs be performed (expensive part). The second analyzer (usually whitespace) just splits the generated tokens that the first analyzer has produced. Then before indexing a percolator query, the analyze api should be used to analyze the query text with the more expensive analyzer. The result of the analyze api, the tokens, should be used to substitute the original query text in the percolator query. It is important that the query should now be configured to override the analyzer from the mapping and just the second analyzer. Most text based queries support an `analyzer` option (`match`, `query_string`, `simple_query_string`). Using this approach the expensive text analysis is performed once instead of many times.
Lets demonstrate this workflow via a simplified example.
Lets say we want to index the following percolator query:
```js
{
"query" : {
"match" : {
"body" : {
"query" : "missing bicycles"
}
}
}
}
```
% NOTCONSOLE
with these settings and mapping:
```console
PUT /test_index
{
"settings": {
"analysis": {
"analyzer": {
"my_analyzer" : {
"tokenizer": "standard",
"filter" : ["lowercase", "porter_stem"]
}
}
}
},
"mappings": {
"properties": {
"query" : {
"type": "percolator"
},
"body" : {
"type": "text",
"analyzer": "my_analyzer" <1>
}
}
}
}
```
% TEST[continued]
1. For the purpose of this example, this analyzer is considered expensive.
First we need to use the analyze api to perform the text analysis prior to indexing:
```console
POST /test_index/_analyze
{
"analyzer" : "my_analyzer",
"text" : "missing bicycles"
}
```
% TEST[continued]
This results the following response:
```console-result
{
"tokens": [
{
"token": "miss",
"start_offset": 0,
"end_offset": 7,
"type": "<ALPHANUM>",
"position": 0
},
{
"token": "bicycl",
"start_offset": 8,
"end_offset": 16,
"type": "<ALPHANUM>",
"position": 1
}
]
}
```
All the tokens in the returned order need to replace the query text in the percolator query:
```console
PUT /test_index/_doc/1?refresh
{
"query" : {
"match" : {
"body" : {
"query" : "miss bicycl",
"analyzer" : "whitespace" <1>
}
}
}
}
```
% TEST[continued]
1. It is important to select a whitespace analyzer here, otherwise the analyzer defined in the mapping will be used, which defeats the point of using this workflow. Note that `whitespace` is a built-in analyzer, if a different analyzer needs to be used, it needs to be configured first in the index’s settings.
The analyze api prior to the indexing the percolator flow should be done for each percolator query.
At percolate time nothing changes and the `percolate` query can be defined normally:
```console
GET /test_index/_search
{
"query": {
"percolate" : {
"field" : "query",
"document" : {
"body" : "Bycicles are missing"
}
}
}
}
```
% TEST[continued]
This results in a response like this:
```console-result
{
"took": 6,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped" : 0,
"failed": 0
},
"hits": {
"total" : {
"value": 1,
"relation": "eq"
},
"max_score": 0.13076457,
"hits": [
{
"_index": "test_index",
"_id": "1",
"_score": 0.13076457,
"_source": {
"query": {
"match": {
"body": {
"query": "miss bicycl",
"analyzer": "whitespace"
}
}
}
},
"fields" : {
"_percolator_document_slot" : [0]
}
}
]
}
}
```
% TESTRESPONSE[s/"took": 6,/"took": "$body.took",/]
## Optimizing wildcard queries. [_optimizing_wildcard_queries]
Wildcard queries are more expensive than other queries for the percolator, especially if the wildcard expressions are large.
In the case of `wildcard` queries with prefix wildcard expressions or just the `prefix` query, the `edge_ngram` token filter can be used to replace these queries with regular `term` query on a field where the `edge_ngram` token filter is configured.
Creating an index with custom analysis settings:
```console
PUT my_queries1
{
"settings": {
"analysis": {
"analyzer": {
"wildcard_prefix": { <1>
"type": "custom",
"tokenizer": "standard",
"filter": [
"lowercase",
"wildcard_edge_ngram"
]
}
},
"filter": {
"wildcard_edge_ngram": { <2>
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 32
}
}
}
},
"mappings": {
"properties": {
"query": {
"type": "percolator"
},
"my_field": {
"type": "text",
"fields": {
"prefix": { <3>
"type": "text",
"analyzer": "wildcard_prefix",
"search_analyzer": "standard"
}
}
}
}
}
}
```
% TEST[continued]
1. The analyzer that generates the prefix tokens to be used at index time only.
2. Increase the `min_gram` and decrease `max_gram` settings based on your prefix search needs.
3. This multifield should be used to do the prefix search with a `term` or `match` query instead of a `prefix` or `wildcard` query.
Then instead of indexing the following query:
```js
{
"query": {
"wildcard": {
"my_field": "abc*"
}
}
}
```
% NOTCONSOLE
this query below should be indexed:
```console
PUT /my_queries1/_doc/1?refresh
{
"query": {
"term": {
"my_field.prefix": "abc"
}
}
}
```
% TEST[continued]
This way can handle the second query more efficiently than the first query.
The following search request will match with the previously indexed percolator query:
```console
GET /my_queries1/_search
{
"query": {
"percolate": {
"field": "query",
"document": {
"my_field": "abcd"
}
}
}
}
```
% TEST[continued]
```console-result
{
"took": 6,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total" : {
"value": 1,
"relation": "eq"
},
"max_score": 0.18864399,
"hits": [
{
"_index": "my_queries1",
"_id": "1",
"_score": 0.18864399,
"_source": {
"query": {
"term": {
"my_field.prefix": "abc"
}
}
},
"fields": {
"_percolator_document_slot": [
0
]
}
}
]
}
}
```
% TESTRESPONSE[s/"took": 6,/"took": "$body.took",/]
The same technique can also be used to speed up suffix wildcard searches. By using the `reverse` token filter before the `edge_ngram` token filter.
```console
PUT my_queries2
{
"settings": {
"analysis": {
"analyzer": {
"wildcard_suffix": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"lowercase",
"reverse",
"wildcard_edge_ngram"
]
},
"wildcard_suffix_search_time": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"lowercase",
"reverse"
]
}
},
"filter": {
"wildcard_edge_ngram": {
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 32
}
}
}
},
"mappings": {
"properties": {
"query": {
"type": "percolator"
},
"my_field": {
"type": "text",
"fields": {
"suffix": {
"type": "text",
"analyzer": "wildcard_suffix",
"search_analyzer": "wildcard_suffix_search_time" <1>
}
}
}
}
}
}
```
% TEST[continued]
1. A custom analyzer is needed at search time too, because otherwise the query terms are not being reversed and would otherwise not match with the reserved suffix tokens.
Then instead of indexing the following query:
```js
{
"query": {
"wildcard": {
"my_field": "*xyz"
}
}
}
```
% NOTCONSOLE
the following query below should be indexed:
```console
PUT /my_queries2/_doc/2?refresh
{
"query": {
"match": { <1>
"my_field.suffix": "xyz"
}
}
}
```
% TEST[continued]
1. The `match` query should be used instead of the `term` query, because text analysis needs to reverse the query terms.
The following search request will match with the previously indexed percolator query:
```console
GET /my_queries2/_search
{
"query": {
"percolate": {
"field": "query",
"document": {
"my_field": "wxyz"
}
}
}
}
```
% TEST[continued]
## Dedicated Percolator Index [_dedicated_percolator_index]
Percolate queries can be added to any index. Instead of adding percolate queries to the index the data resides in, these queries can also be added to a dedicated index. The advantage of this is that this dedicated percolator index can have its own index settings (For example the number of primary and replica shards). If you choose to have a dedicated percolate index, you need to make sure that the mappings from the normal index are also available on the percolate index. Otherwise percolate queries can be parsed incorrectly.
## Forcing Unmapped Fields to be Handled as Strings [_forcing_unmapped_fields_to_be_handled_as_strings]
In certain cases it is unknown what kind of percolator queries do get registered, and if no field mapping exists for fields that are referred by percolator queries then adding a percolator query fails. This means the mapping needs to be updated to have the field with the appropriate settings, and then the percolator query can be added. But sometimes it is sufficient if all unmapped fields are handled as if these were default text fields. In those cases one can configure the `index.percolator.map_unmapped_fields_as_text` setting to `true` (default to `false`) and then if a field referred in a percolator query does not exist, it will be handled as a default text field so that adding the percolator query doesn’t fail.
## Limitations [_limitations_2]
### Parent/child [parent-child]
Because the `percolate` query is processing one document at a time, it doesn’t support queries and filters that run against child documents such as `has_child` and `has_parent`.
### Fetching queries [_fetching_queries]
There are a number of queries that fetch data via a get call during query parsing. For example the `terms` query when using terms lookup, `template` query when using indexed scripts and `geo_shape` when using pre-indexed shapes. When these queries are indexed by the `percolator` field type then the get call is executed once. So each time the `percolator` query evaluates these queries, the fetches terms, shapes etc. as the were upon index time will be used. Important to note is that fetching of terms that these queries do, happens both each time the percolator query gets indexed on both primary and replica shards, so the terms that are actually indexed can be different between shard copies, if the source index changed while indexing.
### Script query [_script_query]
The script inside a `script` query can only access doc values fields. The `percolate` query indexes the provided document into an in-memory index. This in-memory index doesn’t support stored fields and because of that the `_source` field and other stored fields are not stored. This is the reason why in the `script` query the `_source` and other stored fields aren’t available.
### Field aliases [_field_aliases]
Percolator queries that contain [field aliases](/reference/elasticsearch/mapping-reference/field-alias.md) may not always behave as expected. In particular, if a percolator query is registered that contains a field alias, and then that alias is updated in the mappings to refer to a different field, the stored query will still refer to the original target field. To pick up the change to the field alias, the percolator query must be explicitly reindexed. | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/elasticsearch/mapping-reference/percolator.md |
/* Copyright (c) 2024, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#pragma once
#include <stdint.h>
namespace vector_constants {
// maximum dimensions in a vector column
constexpr unsigned int max_dimensions = 16383;
} // namespace vector_constants
static inline uint32_t get_dimensions(const uint32_t length,
const uint32_t precision) {
if (length == 0 || (length % precision > 0)) {
return UINT32_MAX;
}
return length / precision;
} | c | github | https://github.com/mysql/mysql-server | vector-common/vector_constants.h |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Date
"""
from dateutil import parser
from rebulk.remodule import re
_dsep = r'[-/ \.]'
_dsep_bis = r'[-/ \.x]'
date_regexps = [
re.compile(r'%s((\d{8}))%s' % (_dsep, _dsep), re.IGNORECASE),
re.compile(r'%s((\d{6}))%s' % (_dsep, _dsep), re.IGNORECASE),
re.compile(r'(?:^|[^\d])((\d{2})%s(\d{1,2})%s(\d{1,2}))(?:$|[^\d])' % (_dsep, _dsep), re.IGNORECASE),
re.compile(r'(?:^|[^\d])((\d{1,2})%s(\d{1,2})%s(\d{2}))(?:$|[^\d])' % (_dsep, _dsep), re.IGNORECASE),
re.compile(r'(?:^|[^\d])((\d{4})%s(\d{1,2})%s(\d{1,2}))(?:$|[^\d])' % (_dsep_bis, _dsep), re.IGNORECASE),
re.compile(r'(?:^|[^\d])((\d{1,2})%s(\d{1,2})%s(\d{4}))(?:$|[^\d])' % (_dsep, _dsep_bis), re.IGNORECASE),
re.compile(r'(?:^|[^\d])((\d{1,2}(?:st|nd|rd|th)?%s(?:[a-z]{3,10})%s\d{4}))(?:$|[^\d])' % (_dsep, _dsep),
re.IGNORECASE)]
def valid_year(year):
"""Check if number is a valid year"""
return 1920 <= year < 2030
def _is_int(string):
"""
Check if the input string is an integer
:param string:
:type string:
:return:
:rtype:
"""
try:
int(string)
return True
except ValueError:
return False
def _guess_day_first_parameter(groups):
"""
If day_first is not defined, use some heuristic to fix it.
It helps to solve issues with python dateutils 2.5.3 parser changes.
:param groups: match groups found for the date
:type groups: list of match objects
:return: day_first option guessed value
:rtype: bool
"""
# If match starts with a long year, then day_first is force to false.
if _is_int(groups[0]) and valid_year(int(groups[0][:4])):
return False
# If match ends with a long year, the day_first is forced to true.
elif _is_int(groups[-1]) and valid_year(int(groups[-1][-4:])):
return True
# If match starts with a short year, then day_first is force to false.
elif _is_int(groups[0]) and int(groups[0][:2]) > 31:
return False
# If match ends with a short year, then day_first is force to true.
elif _is_int(groups[-1]) and int(groups[-1][-2:]) > 31:
return True
def search_date(string, year_first=None, day_first=None):
"""Looks for date patterns, and if found return the date and group span.
Assumes there are sentinels at the beginning and end of the string that
always allow matching a non-digit delimiting the date.
Year can be defined on two digit only. It will return the nearest possible
date from today.
>>> search_date(' This happened on 2002-04-22. ')
(18, 28, datetime.date(2002, 4, 22))
>>> search_date(' And this on 17-06-1998. ')
(13, 23, datetime.date(1998, 6, 17))
>>> search_date(' no date in here ')
"""
for date_re in date_regexps:
search_match = date_re.search(string)
if not search_match:
continue
start, end = search_match.start(1), search_match.end(1)
groups = search_match.groups()[1:]
match = '-'.join(groups)
if match is None:
continue
if year_first and day_first is None:
day_first = False
if day_first is None:
day_first = _guess_day_first_parameter(groups)
# If day_first/year_first is undefined, parse is made using both possible values.
yearfirst_opts = [False, True]
if year_first is not None:
yearfirst_opts = [year_first]
dayfirst_opts = [True, False]
if day_first is not None:
dayfirst_opts = [day_first]
kwargs_list = ({'dayfirst': d, 'yearfirst': y}
for d in dayfirst_opts for y in yearfirst_opts)
for kwargs in kwargs_list:
try:
date = parser.parse(match, **kwargs)
except (ValueError, TypeError): # pragma: no cover
# see https://bugs.launchpad.net/dateutil/+bug/1247643
date = None
# check date plausibility
if date and valid_year(date.year): # pylint:disable=no-member
return start, end, date.date() # pylint:disable=no-member | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing import Optional
from ..._models import BaseModel
from .realtime_transcription_session_audio_input import RealtimeTranscriptionSessionAudioInput
__all__ = ["RealtimeTranscriptionSessionAudio"]
class RealtimeTranscriptionSessionAudio(BaseModel):
"""Configuration for input and output audio."""
input: Optional[RealtimeTranscriptionSessionAudioInput] = None | python | github | https://github.com/openai/openai-python | src/openai/types/realtime/realtime_transcription_session_audio.py |
# Copyright (c) 2013 - Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for more details
"""
Tests for mamba.core.packages
"""
import sys
import tempfile
import functools
from twisted.trial import unittest
from twisted.python import filepath
from mamba.core import packages, GNU_LINUX
from mamba.application.model import ModelManager
from mamba.application.controller import ControllerManager
class PackagesManagerTest(unittest.TestCase):
def setUp(self):
sys.path.append('../mamba/test/dummy_app')
def configure_test(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
config_file = tempfile.NamedTemporaryFile(delete=False)
config_file.write(
'{'
' "packages": {'
' "fakeshared": {"autoimport": true,"use_scripts": true}'
' }'
'}'
)
config_file.close()
manager = packages.PackagesManager(config_file=config_file.name)
if GNU_LINUX:
mgr = manager.packages['fakeshared']['controller']
self.addCleanup(mgr.notifier.loseConnection)
mgr = manager.packages['fakeshared']['model']
self.addCleanup(mgr.notifier.loseConnection)
kwargs['manager'] = manager
result = func(self, *args, **kwargs)
filepath.FilePath(config_file.name).remove()
return result
return wrapper
@configure_test
def test_register_package(self, manager):
self.assertTrue(manager.config.loaded)
self.assertEqual(
manager.config.packages,
{u'fakeshared': {u'autoimport': True, u'use_scripts': True}}
)
pkgs = manager.packages
self.assertEqual(len(pkgs), 1)
self.assertIsInstance(
pkgs['fakeshared']['controller'], ControllerManager)
self.assertIsInstance(pkgs['fakeshared']['model'], ModelManager)
@configure_test
def test_controller_is_packed(self, manager):
mgr = manager.packages['fakeshared']['controller']
self.assertEqual(mgr._package, 'fakeshared')
@configure_test
def test_controller_modulize_store(self, manager):
mgr = manager.packages['fakeshared']['controller']
self.assertEqual(mgr._modulize_store(), 'fakeshared.controller')
@configure_test
def test_model_is_packed(self, manager):
mgr = manager.packages['fakeshared']['model']
self.assertEqual(mgr._package, 'fakeshared')
@configure_test
def test_model_modulize_store(self, manager):
mgr = manager.packages['fakeshared']['model']
self.assertEqual(mgr._modulize_store(), 'fakeshared.model') | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines.rx3
import kotlinx.coroutines.testing.*
import io.reactivex.rxjava3.core.*
import io.reactivex.rxjava3.exceptions.*
import kotlinx.coroutines.*
import kotlinx.coroutines.flow.*
import kotlinx.coroutines.reactive.*
import org.junit.Test
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import kotlin.test.*
// Check that exception is not leaked to the global exception handler
class LeakedExceptionTest : TestBase() {
private val handler: (Throwable) -> Unit =
{ assertTrue { it is UndeliverableException && it.cause is TestException } }
@Test
fun testSingle() = withExceptionHandler(handler) {
withFixedThreadPool(4) { dispatcher ->
val flow = rxSingle<Unit>(dispatcher) { throw TestException() }.toFlowable().asFlow()
runBlocking {
repeat(10000) {
combine(flow, flow) { _, _ -> Unit }
.catch {}
.collect {}
}
}
}
}
@Test
fun testObservable() = withExceptionHandler(handler) {
withFixedThreadPool(4) { dispatcher ->
val flow = rxObservable<Unit>(dispatcher) { throw TestException() }
.toFlowable(BackpressureStrategy.BUFFER)
.asFlow()
runBlocking {
repeat(10000) {
combine(flow, flow) { _, _ -> Unit }
.catch {}
.collect {}
}
}
}
}
@Test
fun testFlowable() = withExceptionHandler(handler) {
withFixedThreadPool(4) { dispatcher ->
val flow = rxFlowable<Unit>(dispatcher) { throw TestException() }.asFlow()
runBlocking {
repeat(10000) {
combine(flow, flow) { _, _ -> Unit }
.catch {}
.collect {}
}
}
}
}
/**
* This test doesn't test much and was added to display a problem with straighforward use of
* [withExceptionHandler].
*
* If one was to remove `dispatcher` and launch `rxFlowable` with an empty coroutine context,
* this test would fail fairly often, while other tests were also vulnerable, but the problem is
* much more difficult to reproduce. Thus, this test is a justification for adding `dispatcher`
* to other tests.
*
* See the commit that introduced this test for a better explanation.
*/
@Test
fun testResettingExceptionHandler() = withExceptionHandler(handler) {
withFixedThreadPool(4) { dispatcher ->
val flow = rxFlowable<Unit>(dispatcher) {
if ((0..1).random() == 0) {
Thread.sleep(100)
}
throw TestException()
}.asFlow()
runBlocking {
combine(flow, flow) { _, _ -> Unit }
.catch {}
.collect {}
}
}
}
/**
* Run in a thread pool, then wait for all the tasks to finish.
*/
private fun withFixedThreadPool(numberOfThreads: Int, block: (CoroutineDispatcher) -> Unit) {
val pool = Executors.newFixedThreadPool(numberOfThreads)
val dispatcher = pool.asCoroutineDispatcher()
block(dispatcher)
pool.shutdown()
while (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
/* deliberately empty */
}
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | reactive/kotlinx-coroutines-rx3/test/LeakedExceptionTest.kt |
#! -*- coding: utf8 -*-
from trytond.model import ModelView, ModelSQL, fields
__all__ = ['Company']
class Company(ModelSQL, ModelView):
'Company'
__name__ = 'company.company'
pyafipws_certificate = fields.Text('Certificado AFIP WS',
help="Certificado (.crt) de la empresa para webservices AFIP")
pyafipws_private_key = fields.Text('Clave Privada AFIP WS',
help="Clave Privada (.key) de la empresa para webservices AFIP")
pyafipws_mode_cert = fields.Selection([
('', 'n/a'),
('homologacion', u'Homologación'),
('produccion', u'Producción'),
], 'Modo de certificacion',
help=u"El objetivo de Homologación (testing), es facilitar las pruebas. Los certificados de Homologación y Producción son distintos.")
@staticmethod
def default_pyafipws_mode_cert():
return ''
@classmethod
def __setup__(cls):
super(Company, cls).__setup__()
cls._error_messages.update({
'wrong_pyafipws_mode': ('Problemas de Certificado: '
'"%(message)s".'),
})
@classmethod
def validate(cls, companies):
super(Company, cls).validate(companies)
for company in companies:
company.check_pyafipws_mode_cert()
def check_pyafipws_mode_cert(self):
if self.pyafipws_mode_cert == '':
return
auth_data = self.pyafipws_authenticate(service="wsfe", force=True)
if auth_data['err_msg'] != None:
self.raise_user_error('wrong_pyafipws_mode', {
'message': auth_data['err_msg'],
})
def pyafipws_authenticate(self, service="wsfe", force=False):
"Authenticate against AFIP, returns token, sign, err_msg (dict)"
import afip_auth
auth_data = {}
# get the authentication credentials:
certificate = str(self.pyafipws_certificate)
private_key = str(self.pyafipws_private_key)
if self.pyafipws_mode_cert == 'homologacion':
WSAA_URL = "https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl"
elif self.pyafipws_mode_cert == 'produccion':
WSAA_URL = "https://wsaa.afip.gov.ar/ws/services/LoginCms?wsdl"
else:
self.raise_user_error('wrong_pyafipws_mode', {
'message': u'El modo de certificación no es ni producción, ni homologación. Configure su Empresa',
})
# call the helper function to obtain the access ticket:
auth = afip_auth.authenticate(service, certificate, private_key, wsdl=WSAA_URL, force=force)
auth_data.update(auth)
return auth_data | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
Tests for FileBrowser sites and their views.
Note that we *dynamically generate* test cases for each deployed FileBrowser
site. This includes creation of TestCase subclasses at runtime and also
creation of instance methods from functions.
"""
# PYTHON IMPORTS
from __future__ import with_statement
import os
import sys
import json
# DJANGO IMPORTS
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import get_resolver, get_urlconf, reverse
try:
from django.utils.six.moves.urllib.parse import urlencode
except:
from django.utils.http import urlencode
# FILEBROWSER IMPORTS
import filebrowser
import filebrowser.settings
from filebrowser.settings import VERSIONS, DEFAULT_PERMISSIONS
from filebrowser.base import FileObject
from filebrowser.sites import get_site_dict
# This module will test all FileBrowser sites with the following app_name
APP_NAME = 'filebrowser'
TESTS_PATH = os.path.dirname(os.path.abspath(__file__))
FILEBROWSER_PATH = os.path.split(TESTS_PATH)[0]
# TEST FUNCTIONS
def test_browse(test):
"""
Check the browse view functions as expected.
"""
url = reverse('%s:fb_browse' % test.site_name)
response = test.c.get(url)
# Check we get OK response for browsing
test.assertTrue(response.status_code == 200)
# Check that a correct template was used:
test.assertTrue('filebrowser/index.html' in [t.name for t in response.templates])
# Check directory was set correctly in the context. If this fails, it may indicate
# that two sites were instantiated with the same name.
test.assertTrue(test.site.directory == response.context['filebrowser_site'].directory)
def test_ckeditor_params_in_search_form(test):
"""
The CKEditor GET params must be included in the search form as hidden
inputs so they persist after searching.
"""
url = reverse('%s:fb_browse' % test.site_name)
response = test.c.get(url, {
'pop': '3',
'type': 'image',
'CKEditor': 'id_body',
'CKEditorFuncNum': '1',
})
test.assertTrue(response.status_code == 200)
test.assertContains(response, '<input type="hidden" name="pop" value="3" />')
test.assertContains(response, '<input type="hidden" name="type" value="image" />')
test.assertContains(response, '<input type="hidden" name="CKEditor" value="id_body" />')
test.assertContains(response, '<input type="hidden" name="CKEditorFuncNum" value="1" />')
def test_createdir(test):
"""
Check the createdir view functions as expected. Creates a new tmp directory
under 'site.directory'.
"""
# Generate a name of a new temp directory
prefix = 'tmp_test'
sufix = 0
tmpdir_name = '%s_%d' % (prefix, sufix)
while test.site.storage.exists(os.path.join(test.site.directory, tmpdir_name)):
sufix += 1
tmpdir_name = '%s_%d' % (prefix, sufix)
# Store the this temp directory (we need to delete it later)
test.tmpdir = FileObject(os.path.join(test.site.directory, tmpdir_name), site=test.site)
# Create the directory using the createdir view
url = reverse('%s:fb_createdir' % test.site_name)
response = test.c.post(url, {'name': tmpdir_name})
# Check we got Redirection response for createdir
test.assertTrue(response.status_code == 302)
# Check the directory now exists
test.assertTrue(test.site.storage.exists(test.tmpdir.path))
def test_upload(test):
"""
Check the upload view functions as expected. Does not check the uploading itself.
"""
url = reverse('%s:fb_upload' % test.site_name)
response = test.c.get(url, {'name': test.tmpdir.path_relative_directory})
# Check we get OK response for upload view
test.assertTrue(response.status_code == 200)
# Check the correct template was used
test.assertTrue('filebrowser/upload.html' in [t.name for t in response.templates])
def test_do_upload(test):
"""
Test the actual uploading
"""
url = reverse('%s:fb_do_upload' % test.site_name)
url = '?'.join([url, urlencode({'folder': test.tmpdir.path_relative_directory, 'qqfile': 'testimage.jpg'})])
with open(os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/testimage.jpg'), "rb") as f:
file_size = os.path.getsize(f.name)
response = test.c.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check we get OK response
test.assertTrue(response.status_code == 200)
data = json.loads(response.content)
test.assertEqual(data["filename"], "testimage.jpg")
test.assertEqual(data["temp_filename"], None)
# Check the file now exists
path = os.path.join(test.tmpdir.path, 'testimage.jpg')
test.testfile = FileObject(path, site=test.site)
test.assertTrue(test.site.storage.exists(path))
# Check the file has the correct size
test.assertTrue(file_size == test.site.storage.size(path))
# Check permissions
if DEFAULT_PERMISSIONS is not None:
permissions_default = oct(DEFAULT_PERMISSIONS)
permissions_file = oct(os.stat(test.testfile.path_full).st_mode & 0o777)
test.assertTrue(permissions_default == permissions_file)
def test_do_temp_upload(test):
"""
Test the temporary upload (used with the FileBrowseUploadField)
We use the standard test directory here (no special upload dir needed).
"""
filebrowser.sites.UPLOAD_TEMPDIR = test.tmpdir.path_relative_directory
url = reverse('%s:fb_do_upload' % test.site_name)
url = '?'.join([url, urlencode({'folder': test.tmpdir.path_relative_directory, 'qqfile': 'testimage.jpg', 'temporary': 'true'})])
with open(os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/testimage.jpg'), "rb") as f:
file_size = os.path.getsize(f.name)
response = test.c.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check we get OK response
test.assertTrue(response.status_code == 200)
data = json.loads(response.content)
test.assertEqual(data["filename"], "testimage.jpg")
test.assertEqual(data["temp_filename"], os.path.join(test.tmpdir.path_relative_directory, "testimage.jpg"))
# Check the file now exists
path = os.path.join(test.tmpdir.path, 'testimage.jpg')
test.testfile = FileObject(path, site=test.site)
test.assertTrue(test.site.storage.exists(path))
# Check the file has the correct size
test.assertTrue(file_size == test.site.storage.size(path))
# Check permissions
if DEFAULT_PERMISSIONS is not None:
permissions_default = oct(DEFAULT_PERMISSIONS)
permissions_file = oct(os.stat(test.testfile.path_full).st_mode & 0o777)
test.assertTrue(permissions_default == permissions_file)
def test_overwrite(test):
"""
Test the uploading with OVERWRITE_EXISTING
"""
# Save settings
oe = filebrowser.sites.OVERWRITE_EXISTING
# OVERWRITE true
filebrowser.sites.OVERWRITE_EXISTING = True
url = reverse('%s:fb_do_upload' % test.site_name)
url = '?'.join([url, urlencode({'folder': test.tmpdir.path_relative_directory, 'qqfile': 'testimage.jpg'})])
with open(os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/testimage.jpg'), "rb") as f:
# file_size = os.path.getsize(f.name)
test.c.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check files
test.assertEqual(test.site.storage.listdir(test.tmpdir), ([], [u'testimage.jpg']))
# OVERWRITE false
filebrowser.sites.OVERWRITE_EXISTING = False
url = reverse('%s:fb_do_upload' % test.site_name)
url = '?'.join([url, urlencode({'folder': test.tmpdir.path_relative_directory, 'qqfile': 'testimage.jpg'})])
with open(os.path.join(FILEBROWSER_PATH, 'static/filebrowser/img/testimage.jpg'), "rb") as f:
# file_size = os.path.getsize(f.name)
test.c.post(url, data={'qqfile': 'testimage.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Check files
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 2)
# Reset settings
filebrowser.sites.OVERWRITE_EXISTING = oe
def test_convert_normalize(test):
"""
Test the uploading with CONVERT_FILENAME, NORMALIZE_FILENAME
"""
url = reverse('%s:fb_do_upload' % test.site_name)
url = '?'.join([url, urlencode({'folder': test.tmpdir.path_relative_directory, 'qqfile': 'TEST IMAGE 000.jpg'})])
f = open(os.path.join(FILEBROWSER_PATH, u'static/filebrowser/img/TEST IMAGE 000.jpg'), "rb")
# Save settings
oe = filebrowser.sites.OVERWRITE_EXISTING
cf = filebrowser.sites.CONVERT_FILENAME
nf = filebrowser.sites.NORMALIZE_FILENAME
# Set CONVERT_FILENAME, NORMALIZE_FILENAME
filebrowser.sites.CONVERT_FILENAME = False
filebrowser.sites.NORMALIZE_FILENAME = False
filebrowser.utils.CONVERT_FILENAME = False
filebrowser.utils.NORMALIZE_FILENAME = False
test.c.post(url, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'TEST IMAGE 000.jpg')
test.assertTrue(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 3)
# OVERWRITE true
filebrowser.sites.OVERWRITE_EXISTING = True
test.c.post(url, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'TEST IMAGE 000.jpg')
test.assertTrue(test.site.storage.exists(path))
path = os.path.join(test.tmpdir.path, 'TEST IMAGE 000_1.jpg')
test.assertFalse(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 3)
# OVERWRITE false
filebrowser.sites.OVERWRITE_EXISTING = False
test.c.post(url, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'TEST IMAGE 000.jpg')
test.assertTrue(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 4)
# Set CONVERT_FILENAME, NORMALIZE_FILENAME
filebrowser.sites.CONVERT_FILENAME = True
filebrowser.sites.NORMALIZE_FILENAME = False
filebrowser.utils.CONVERT_FILENAME = True
filebrowser.utils.NORMALIZE_FILENAME = False
test.c.post(url, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'test_image_000.jpg')
test.assertTrue(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 5)
# OVERWRITE true
filebrowser.sites.OVERWRITE_EXISTING = True
test.c.post(url, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'test_image_000.jpg')
test.assertTrue(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 5)
# OVERWRITE false
filebrowser.sites.OVERWRITE_EXISTING = False
test.c.post(url, data={'qqfile': 'TTEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'test_image_000.jpg')
test.assertTrue(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 6)
# Set CONVERT_FILENAME, NORMALIZE_FILENAME
filebrowser.sites.CONVERT_FILENAME = True
filebrowser.sites.NORMALIZE_FILENAME = True
filebrowser.utils.CONVERT_FILENAME = True
filebrowser.utils.NORMALIZE_FILENAME = True
test.c.post(url, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'test_image_000.jpg')
test.assertTrue(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 7)
# OVERWRITE true
filebrowser.sites.OVERWRITE_EXISTING = True
test.c.post(url, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'test_image_000.jpg')
test.assertTrue(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 7)
# OVERWRITE false
filebrowser.sites.OVERWRITE_EXISTING = False
test.c.post(url, data={'qqfile': 'TEST IMAGE 000.jpg', 'file': f}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
path = os.path.join(test.tmpdir.path, 'test_image_000.jpg')
test.assertTrue(test.site.storage.exists(path))
test.assertEqual(len(test.site.storage.listdir(test.tmpdir)[1]), 8)
# Reset settings
filebrowser.sites.CONVERT_FILENAME = cf
filebrowser.sites.NORMALIZE_FILENAME = nf
filebrowser.utils.CONVERT_FILENAME = cf
filebrowser.utils.NORMALIZE_FILENAME = nf
filebrowser.sites.OVERWRITE_EXISTING = oe
def test_detail(test):
"""
Check the detail view and version generation. Check also renaming of files.
"""
url = reverse('%s:fb_detail' % test.site_name)
response = test.c.get(url, {'dir': test.testfile.dirname, 'filename': test.testfile.filename})
# Check we get an OK response for the detail view
test.assertTrue(response.status_code == 200)
# At this moment all versions should be generated. Check that.
pre_rename_versions = []
for version_suffix in VERSIONS:
path = test.testfile.version_path(version_suffix)
pre_rename_versions.append(path)
test.assertTrue(test.site.storage.exists(path))
# Attemp renaming the file
url = '?'.join([url, urlencode({'dir': test.testfile.dirname, 'filename': test.testfile.filename})])
response = test.c.post(url, {'name': 'testpic.jpg'})
# Check we get 302 response for renaming
test.assertTrue(response.status_code == 302)
# Check the file was renamed correctly:
test.assertTrue(test.site.storage.exists(os.path.join(test.testfile.head, 'testpic.jpg')))
# Store the renamed file
test.testfile = FileObject(os.path.join(test.testfile.head, 'testpic.jpg'), site=test.site)
# Check if all pre-rename versions were deleted:
for path in pre_rename_versions:
test.assertFalse(test.site.storage.exists(path))
# Check if all post–rename versions were deleted (resp. not being generated):
for version_suffix in VERSIONS:
path = test.testfile.version_path(version_suffix)
test.assertFalse(test.site.storage.exists(path))
def test_delete_confirm(test):
"""
Check that the delete view functions as expected. Does not check the deletion itself,
that happens in test_delete().
"""
url = reverse('%s:fb_delete_confirm' % test.site_name)
response = test.c.get(url, {'dir': test.testfile.dirname, 'filename': test.testfile.filename})
# Check we get OK response for delete_confirm
test.assertTrue(response.status_code == 200)
# Check the correct template was used
test.assertTrue('filebrowser/delete_confirm.html' in [t.name for t in response.templates])
def test_delete(test):
"""
Generate all versions for the uploaded file and attempt a deletion of that file.
Finally, attempt a deletion of the tmp dir.
"""
# Generate all versions of the file
versions = []
for version_suffix in VERSIONS:
versions.append(test.testfile.version_generate(version_suffix))
# Request the delete view
url = reverse('%s:fb_delete' % test.site_name)
response = test.c.get(url, {'dir': test.testfile.dirname, 'filename': test.testfile.filename})
# Check we get 302 response for delete
test.assertTrue(response.status_code == 302)
# Check the file and its versions do not exist anymore
test.assertFalse(test.site.storage.exists(test.testfile.path))
for version in versions:
test.assertFalse(test.site.storage.exists(version.path))
test.testfile = None
# Delete the tmp dir and check it does not exist anymore
response = test.c.get(url, {'dir': test.tmpdir.dirname, 'filename': test.tmpdir.filename})
test.assertTrue(response.status_code == 302)
test.assertFalse(test.site.storage.exists(test.tmpdir.path))
test.tmpdir = None
# INSTANCE METHODS
# setUp, tearDown, and runTest methods for the dynamically created
# test cases (they will become instance methods)
def setUp(self):
# Create a site_tester user
from django.contrib.auth.models import User
user = User.objects.create_user('site_tester', 'st@willworkforfood.com', 'secret')
user.is_staff = True
user.save()
# Obtain the site object
self.site = get_site_dict(APP_NAME)[self.site_name]
self.original_upload_tempdir = filebrowser.sites.UPLOAD_TEMPDIR
def tearDown(self):
filebrowser.sites.UPLOAD_TEMPDIR = self.original_upload_tempdir
# Delete a left-over tmp directories, if there's any
if hasattr(self, 'tmpdir') and self.tmpdir:
print("Removing left-over tmp dir:", self.tmpdir.path)
self.site.storage.rmtree(self.tmpdir.path)
def runTest(self):
# Login
response = self.c.login(username='site_tester', password='secret')
self.assertTrue(response)
# Execute tests
test_browse(self)
test_ckeditor_params_in_search_form(self)
test_createdir(self)
test_upload(self)
test_do_upload(self)
test_do_temp_upload(self)
test_overwrite(self)
test_convert_normalize(self)
test_detail(self)
test_delete_confirm(self)
test_delete(self)
# CREATION OF TEST CASES
# Get the names of all deployed filebrowser sites with the given
all_sites = get_resolver(get_urlconf()).app_dict[APP_NAME]
this_module = sys.modules[__name__]
# Create a test class for each deployed filebrowser site
for site in all_sites:
print('Creating Test for the FileBrowser site:', site)
# Create a subclass of TestCase
testcase_class = type('TestSite_' + site, (TestCase,), {'site_name': site, 'c': Client(), 'tmpdirs': None})
# Add setUp, tearDown, and runTest methods
setattr(testcase_class, 'setUp', setUp)
setattr(testcase_class, 'tearDown', tearDown)
setattr(testcase_class, 'runTest', runTest)
# Add the test case class to this module
setattr(this_module, 'TestSite_' + site, testcase_class)
# Delete the attribute test_class, otherwise it will be
# considered as a test case by django
delattr(this_module, 'testcase_class') | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import netaddr
from oslo_utils import uuidutils
from six import moves
from neutron.common import constants as l3_constants
_uuid = uuidutils.generate_uuid
class FakeDev(object):
def __init__(self, name):
self.name = name
def get_ha_interface(ip='169.254.192.1', mac='12:34:56:78:2b:5d'):
subnet_id = _uuid()
return {'admin_state_up': True,
'device_id': _uuid(),
'device_owner': 'network:router_ha_interface',
'fixed_ips': [{'ip_address': ip,
'prefixlen': 18,
'subnet_id': subnet_id}],
'id': _uuid(),
'mac_address': mac,
'name': u'L3 HA Admin port 0',
'network_id': _uuid(),
'status': u'ACTIVE',
'subnets': [{'cidr': '169.254.192.0/18',
'gateway_ip': '169.254.255.254',
'id': subnet_id}],
'tenant_id': '',
'agent_id': _uuid(),
'agent_host': 'aaa',
'priority': 1}
def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1,
enable_floating_ip=False, enable_ha=False,
extra_routes=False, dual_stack=False,
v6_ext_gw_with_sub=True, **kwargs):
fixed_ips = []
subnets = []
gateway_mac = kwargs.get('gateway_mac', 'ca:fe:de:ad:be:ee')
extra_subnets = []
for loop_version in (4, 6):
if loop_version == 4 and (ip_version == 4 or dual_stack):
ip_address = kwargs.get('ip_address', '19.4.4.4')
prefixlen = 24
subnet_cidr = kwargs.get('subnet_cidr', '19.4.4.0/24')
gateway_ip = kwargs.get('gateway_ip', '19.4.4.1')
_extra_subnet = {'cidr': '9.4.5.0/24'}
elif (loop_version == 6 and (ip_version == 6 or dual_stack) and
v6_ext_gw_with_sub):
ip_address = kwargs.get('ip_address', 'fd00::4')
prefixlen = 64
subnet_cidr = kwargs.get('subnet_cidr', 'fd00::/64')
gateway_ip = kwargs.get('gateway_ip', 'fd00::1')
_extra_subnet = {'cidr': 'fd01::/64'}
else:
continue
subnet_id = _uuid()
fixed_ips.append({'ip_address': ip_address,
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append({'id': subnet_id,
'cidr': subnet_cidr,
'gateway_ip': gateway_ip})
extra_subnets.append(_extra_subnet)
if not fixed_ips and v6_ext_gw_with_sub:
raise ValueError("Invalid ip_version: %s" % ip_version)
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'mac_address': gateway_mac,
'network_id': _uuid(),
'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': extra_subnets}
routes = []
if extra_routes:
routes = [{'destination': '8.8.8.0/24', 'nexthop': '19.4.4.4'}]
router = {
'id': router_id,
'distributed': False,
l3_constants.INTERFACE_KEY: [],
'routes': routes,
'gw_port': ex_gw_port}
if enable_floating_ip:
router[l3_constants.FLOATINGIP_KEY] = [{
'id': _uuid(),
'port_id': _uuid(),
'status': 'DOWN',
'floating_ip_address': '19.4.4.2',
'fixed_ip_address': '10.0.0.1'}]
router_append_interface(router, count=num_internal_ports,
ip_version=ip_version, dual_stack=dual_stack)
if enable_ha:
router['ha'] = True
router['ha_vr_id'] = 1
router[l3_constants.HA_INTERFACE_KEY] = (get_ha_interface())
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router
def get_subnet_id(port):
return port['fixed_ips'][0]['subnet_id']
def router_append_interface(router, count=1, ip_version=4, ra_mode=None,
addr_mode=None, dual_stack=False):
interfaces = router[l3_constants.INTERFACE_KEY]
current = sum(
[netaddr.IPNetwork(subnet['cidr']).version == ip_version
for p in interfaces for subnet in p['subnets']])
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
for i in range(current, current + count):
fixed_ips = []
subnets = []
for loop_version in (4, 6):
if loop_version == 4 and (ip_version == 4 or dual_stack):
ip_pool = '35.4.%i.4'
cidr_pool = '35.4.%i.0/24'
prefixlen = 24
gw_pool = '35.4.%i.1'
elif loop_version == 6 and (ip_version == 6 or dual_stack):
ip_pool = 'fd01:%x:1::6'
cidr_pool = 'fd01:%x:1::/64'
prefixlen = 64
gw_pool = 'fd01:%x:1::1'
else:
continue
subnet_id = _uuid()
fixed_ips.append({'ip_address': ip_pool % i,
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append({'id': subnet_id,
'cidr': cidr_pool % i,
'gateway_ip': gw_pool % i,
'ipv6_ra_mode': ra_mode,
'ipv6_address_mode': addr_mode})
if not fixed_ips:
raise ValueError("Invalid ip_version: %s" % ip_version)
interfaces.append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': fixed_ips,
'mac_address': str(mac_address),
'subnets': subnets})
mac_address.value += 1
def router_append_subnet(router, count=1, ip_version=4,
ipv6_subnet_modes=None, interface_id=None):
if ip_version == 6:
subnet_mode_none = {'ra_mode': None, 'address_mode': None}
if not ipv6_subnet_modes:
ipv6_subnet_modes = [subnet_mode_none] * count
elif len(ipv6_subnet_modes) != count:
ipv6_subnet_modes.extend([subnet_mode_none for i in
moves.range(len(ipv6_subnet_modes),
count)])
if ip_version == 4:
ip_pool = '35.4.%i.4'
cidr_pool = '35.4.%i.0/24'
prefixlen = 24
gw_pool = '35.4.%i.1'
elif ip_version == 6:
ip_pool = 'fd01:%x::6'
cidr_pool = 'fd01:%x::/64'
prefixlen = 64
gw_pool = 'fd01:%x::1'
else:
raise ValueError("Invalid ip_version: %s" % ip_version)
interfaces = copy.deepcopy(router.get(l3_constants.INTERFACE_KEY, []))
if interface_id:
try:
interface = next(i for i in interfaces
if i['id'] == interface_id)
except StopIteration:
raise ValueError("interface_id not found")
fixed_ips, subnets = interface['fixed_ips'], interface['subnets']
else:
interface = None
fixed_ips, subnets = [], []
num_existing_subnets = len(subnets)
for i in moves.range(count):
subnet_id = _uuid()
fixed_ips.append(
{'ip_address': ip_pool % (i + num_existing_subnets),
'subnet_id': subnet_id,
'prefixlen': prefixlen})
subnets.append(
{'id': subnet_id,
'cidr': cidr_pool % (i + num_existing_subnets),
'gateway_ip': gw_pool % (i + num_existing_subnets),
'ipv6_ra_mode': ipv6_subnet_modes[i]['ra_mode'],
'ipv6_address_mode': ipv6_subnet_modes[i]['address_mode']})
if interface:
# Update old interface
index = interfaces.index(interface)
interfaces[index].update({'fixed_ips': fixed_ips, 'subnets': subnets})
else:
# New interface appended to interfaces list
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
interfaces.append(
{'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'mac_address': str(mac_address),
'fixed_ips': fixed_ips,
'subnets': subnets})
router[l3_constants.INTERFACE_KEY] = interfaces
def router_append_pd_enabled_subnet(router, count=1):
interfaces = router[l3_constants.INTERFACE_KEY]
current = sum(netaddr.IPNetwork(subnet['cidr']).version == 6
for p in interfaces for subnet in p['subnets'])
mac_address = netaddr.EUI('ca:fe:de:ad:be:ef')
mac_address.dialect = netaddr.mac_unix
pd_intfs = []
for i in range(current, current + count):
subnet_id = _uuid()
intf = {'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '::1',
'prefixlen': 64,
'subnet_id': subnet_id}],
'mac_address': str(mac_address),
'subnets': [{'id': subnet_id,
'cidr': l3_constants.PROVISIONAL_IPV6_PD_PREFIX,
'gateway_ip': '::1',
'ipv6_ra_mode': l3_constants.IPV6_SLAAC,
'subnetpool_id': l3_constants.IPV6_PD_POOL_ID}]}
interfaces.append(intf)
pd_intfs.append(intf)
mac_address.value += 1
return pd_intfs
def prepare_ext_gw_test(context, ri, dual_stack=False):
subnet_id = _uuid()
fixed_ips = [{'subnet_id': subnet_id,
'ip_address': '20.0.0.30',
'prefixlen': 24}]
subnets = [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}]
if dual_stack:
subnet_id_v6 = _uuid()
fixed_ips.append({'subnet_id': subnet_id_v6,
'ip_address': '2001:192:168:100::2',
'prefixlen': 64})
subnets.append({'id': subnet_id_v6,
'cidr': '2001:192:168:100::/64',
'gateway_ip': '2001:192:168:100::1'})
ex_gw_port = {'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri.get_external_device_name(ex_gw_port['id'])
context.device_exists.return_value = True
return interface_name, ex_gw_port | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0
/*
* High-level sync()-related operations
*/
#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/namei.h>
#include <linux/sched.h>
#include <linux/writeback.h>
#include <linux/syscalls.h>
#include <linux/linkage.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/backing-dev.h>
#include "internal.h"
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
SYNC_FILE_RANGE_WAIT_AFTER)
/*
* Write out and wait upon all dirty data associated with this
* superblock. Filesystem data as well as the underlying block
* device. Takes the superblock lock.
*/
int sync_filesystem(struct super_block *sb)
{
int ret = 0;
/*
* We need to be protected against the filesystem going from
* r/o to r/w or vice versa.
*/
WARN_ON(!rwsem_is_locked(&sb->s_umount));
/*
* No point in syncing out anything if the filesystem is read-only.
*/
if (sb_rdonly(sb))
return 0;
/*
* Do the filesystem syncing work. For simple filesystems
* writeback_inodes_sb(sb) just dirties buffers with inodes so we have
* to submit I/O for these buffers via sync_blockdev(). This also
* speeds up the wait == 1 case since in that case write_inode()
* methods call sync_dirty_buffer() and thus effectively write one block
* at a time.
*/
writeback_inodes_sb(sb, WB_REASON_SYNC);
if (sb->s_op->sync_fs) {
ret = sb->s_op->sync_fs(sb, 0);
if (ret)
return ret;
}
ret = sync_blockdev_nowait(sb->s_bdev);
if (ret)
return ret;
sync_inodes_sb(sb);
if (sb->s_op->sync_fs) {
ret = sb->s_op->sync_fs(sb, 1);
if (ret)
return ret;
}
return sync_blockdev(sb->s_bdev);
}
EXPORT_SYMBOL(sync_filesystem);
static void sync_inodes_one_sb(struct super_block *sb, void *arg)
{
if (!sb_rdonly(sb))
sync_inodes_sb(sb);
}
static void sync_fs_one_sb(struct super_block *sb, void *arg)
{
if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, *(int *)arg);
}
/*
* Sync everything. We start by waking flusher threads so that most of
* writeback runs on all devices in parallel. Then we sync all inodes reliably
* which effectively also waits for all flusher threads to finish doing
* writeback. At this point all data is on disk so metadata should be stable
* and we tell filesystems to sync their metadata via ->sync_fs() calls.
* Finally, we writeout all block devices because some filesystems (e.g. ext2)
* just write metadata (such as inodes or bitmaps) to block device page cache
* and do not sync it on their own in ->sync_fs().
*/
void ksys_sync(void)
{
int nowait = 0, wait = 1;
wakeup_flusher_threads(WB_REASON_SYNC);
iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
sync_bdevs(false);
sync_bdevs(true);
}
SYSCALL_DEFINE0(sync)
{
ksys_sync();
return 0;
}
static void do_sync_work(struct work_struct *work)
{
int nowait = 0;
int wait = 1;
/*
* Sync twice to reduce the possibility we skipped some inodes / pages
* because they were temporarily locked
*/
iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &nowait);
sync_bdevs(false);
iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &wait);
sync_bdevs(false);
printk("Emergency Sync complete\n");
kfree(work);
}
void emergency_sync(void)
{
struct work_struct *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (work) {
INIT_WORK(work, do_sync_work);
schedule_work(work);
}
}
/*
* sync a single super
*/
SYSCALL_DEFINE1(syncfs, int, fd)
{
CLASS(fd, f)(fd);
struct super_block *sb;
int ret, ret2;
if (fd_empty(f))
return -EBADF;
sb = fd_file(f)->f_path.dentry->d_sb;
down_read(&sb->s_umount);
ret = sync_filesystem(sb);
up_read(&sb->s_umount);
ret2 = errseq_check_and_advance(&sb->s_wb_err, &fd_file(f)->f_sb_err);
return ret ? ret : ret2;
}
/**
* vfs_fsync_range - helper to sync a range of data & metadata to disk
* @file: file to sync
* @start: offset in bytes of the beginning of data range to sync
* @end: offset in bytes of the end of data range (inclusive)
* @datasync: perform only datasync
*
* Write back data in range @start..@end and metadata for @file to disk. If
* @datasync is set only metadata needed to access modified file data is
* written.
*/
int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file->f_mapping->host;
if (!file->f_op->fsync)
return -EINVAL;
if (!datasync)
sync_lazytime(inode);
return file->f_op->fsync(file, start, end, datasync);
}
EXPORT_SYMBOL(vfs_fsync_range);
/**
* vfs_fsync - perform a fsync or fdatasync on a file
* @file: file to sync
* @datasync: only perform a fdatasync operation
*
* Write back data and metadata for @file to disk. If @datasync is
* set only metadata needed to access modified file data is written.
*/
int vfs_fsync(struct file *file, int datasync)
{
return vfs_fsync_range(file, 0, LLONG_MAX, datasync);
}
EXPORT_SYMBOL(vfs_fsync);
static int do_fsync(unsigned int fd, int datasync)
{
CLASS(fd, f)(fd);
if (fd_empty(f))
return -EBADF;
return vfs_fsync(fd_file(f), datasync);
}
SYSCALL_DEFINE1(fsync, unsigned int, fd)
{
return do_fsync(fd, 0);
}
SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
{
return do_fsync(fd, 1);
}
int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
unsigned int flags)
{
int ret;
struct address_space *mapping;
loff_t endbyte; /* inclusive */
umode_t i_mode;
ret = -EINVAL;
if (flags & ~VALID_FLAGS)
goto out;
endbyte = offset + nbytes;
if ((s64)offset < 0)
goto out;
if ((s64)endbyte < 0)
goto out;
if (endbyte < offset)
goto out;
if (sizeof(pgoff_t) == 4) {
if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
/*
* The range starts outside a 32 bit machine's
* pagecache addressing capabilities. Let it "succeed"
*/
ret = 0;
goto out;
}
if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
/*
* Out to EOF
*/
nbytes = 0;
}
}
if (nbytes == 0)
endbyte = LLONG_MAX;
else
endbyte--; /* inclusive */
i_mode = file_inode(file)->i_mode;
ret = -ESPIPE;
if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
!S_ISLNK(i_mode))
goto out;
mapping = file->f_mapping;
ret = 0;
if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
ret = file_fdatawait_range(file, offset, endbyte);
if (ret < 0)
goto out;
}
if (flags & SYNC_FILE_RANGE_WRITE) {
if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) ==
SYNC_FILE_RANGE_WRITE_AND_WAIT)
ret = filemap_fdatawrite_range(mapping, offset,
endbyte);
else
ret = filemap_flush_range(mapping, offset, endbyte);
if (ret < 0)
goto out;
}
if (flags & SYNC_FILE_RANGE_WAIT_AFTER)
ret = file_fdatawait_range(file, offset, endbyte);
out:
return ret;
}
/*
* ksys_sync_file_range() permits finely controlled syncing over a segment of
* a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
* zero then ksys_sync_file_range() will operate from offset out to EOF.
*
* The flag bits are:
*
* SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
* before performing the write.
*
* SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
* range which are not presently under writeback. Note that this may block for
* significant periods due to exhaustion of disk request structures.
*
* SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
* after performing the write.
*
* Useful combinations of the flag bits are:
*
* SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
* in the range which were dirty on entry to ksys_sync_file_range() are placed
* under writeout. This is a start-write-for-data-integrity operation.
*
* SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
* are not presently under writeout. This is an asynchronous flush-to-disk
* operation. Not suitable for data integrity operations.
*
* SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
* completion of writeout of all pages in the range. This will be used after an
* earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
* for that operation to complete and to return the result.
*
* SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER
* (a.k.a. SYNC_FILE_RANGE_WRITE_AND_WAIT):
* a traditional sync() operation. This is a write-for-data-integrity operation
* which will ensure that all pages in the range which were dirty on entry to
* ksys_sync_file_range() are written to disk. It should be noted that disk
* caches are not flushed by this call, so there are no guarantees here that the
* data will be available on disk after a crash.
*
*
* SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
* I/O errors or ENOSPC conditions and will return those to the caller, after
* clearing the EIO and ENOSPC flags in the address_space.
*
* It should be noted that none of these operations write out the file's
* metadata. So unless the application is strictly performing overwrites of
* already-instantiated disk blocks, there are no guarantees here that the data
* will be available after a crash.
*/
int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
unsigned int flags)
{
CLASS(fd, f)(fd);
if (fd_empty(f))
return -EBADF;
return sync_file_range(fd_file(f), offset, nbytes, flags);
}
SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
unsigned int, flags)
{
return ksys_sync_file_range(fd, offset, nbytes, flags);
}
#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_SYNC_FILE_RANGE)
COMPAT_SYSCALL_DEFINE6(sync_file_range, int, fd, compat_arg_u64_dual(offset),
compat_arg_u64_dual(nbytes), unsigned int, flags)
{
return ksys_sync_file_range(fd, compat_arg_u64_glue(offset),
compat_arg_u64_glue(nbytes), flags);
}
#endif
/* It would be nice if people remember that not all the world's an i386
when they introduce new system calls */
SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags,
loff_t, offset, loff_t, nbytes)
{
return ksys_sync_file_range(fd, offset, nbytes, flags);
} | c | github | https://github.com/torvalds/linux | fs/sync.c |
"""Self documenting XML-RPC Server.
This module can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
This module is built upon the pydoc and SimpleXMLRPCServer
modules.
"""
import pydoc
import inspect
import re
import sys
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler,
CGIXMLRPCRequestHandler,
resolve_dotted_attribute)
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=1, allow_none=False, encoding=None,
bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation()
print 'Content-Type: text/html'
print 'Content-Length: %d' % len(response)
print
sys.stdout.write(response)
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
SVMClassifierNode implements Support Vector Machines (SVMs)
"""
import math
import random
import logging
import numpy
from nupic.pynodes.PyNode import (PyNode,
NodeSpec,
NodeSpecItem,
RealTypeName,
RealNumpyDType)
from nupic.pynodes import PyNode as PyNodeModule
from nupic.algorithms import svm_01, svm_dense
from nupic.analysis.memoryAwareness import MemoryAwareness
_kKernelTypes = ["rbf", "linear"]
class SVMClassifierNode(PyNode, MemoryAwareness):
"""
SVMClassifierNode implements Support Vector Machines (SVMs), which can be used to
perform supervised learning by mapping a set of top-level groups beliefs onto
a set of category labels. The node is a wrapper around a modified version
of the libsvm library.
"""
def __init__(self,
categoriesOut=2,
# SVM parameter ranges
minC=0.0,
maxC=0.0,
minGamma=0.0,
maxGamma=0.0,
kernelType='rbf',
# Latin Hypercube sampling (LHS)
numSamplesPerRecursion=1,
numRecursions=0,
contractionFactor=0.3,
numCrossValidations=5,
# Implementation
convEpsilon=0.01,
useSparseSvm=False,
inputThresh=0.500,
useProbabilisticSvm=True,
doSphering=False,
deterministic=False,
nta_cpp_svm_seed=-1,
# PCA
numSVDSamples=None,
numSVDDims=None,
fractionOfMax=None,
useAuxiliary=False,
justUseAuxiliary=False,
discardProblem=True, # Set to False to keep data for PCA visualizer
# KNN-type stuff
keepSamples=False, # Keep the original inputs around
calculateDistances=False, # Calculate and store distances to orig. inputs
nta_monitorMemory=False,
):
"""
@param categoriesOut -- The maximum number of distinct category
labels that can be learned.
"""
MemoryAwareness.__init__(self)
self.clear()
self._firstComputeCall = True
self._inputVector = None
self._scanInfo = None
self._scanResults = None
# SVM parameters
self.C = minC
self.minC = minC
self.maxC = maxC
self.gamma = minGamma
self.minGamma = minGamma
self.maxGamma = maxGamma
self._kernelType = kernelType
# Latin Hypercube Sampling
self.numSamplesPerRecursion = numSamplesPerRecursion
self.numRecursions = numRecursions
self.contractionFactor = contractionFactor
self.numCrossValidations = numCrossValidations
# Bindings parameters
self.convEpsilon = convEpsilon
self.useSparseSvm = useSparseSvm
self.inputThresh = inputThresh
self.useProbabilisticSvm = useProbabilisticSvm
self.doSphering = doSphering
self.deterministic = deterministic
self.cpp_svm_seed = nta_cpp_svm_seed
# PCA
self._numSVDSamples = numSVDSamples
self._numSVDDims = numSVDDims
self._fractionOfMax = fractionOfMax
self._useAuxiliary = useAuxiliary
self._justUseAuxiliary = justUseAuxiliary
self._auxInputLen = None
if numSVDDims=='adaptive':
self._adaptiveSVDDims = True
else:
self._adaptiveSVDDims = False
self.discardProblem = discardProblem
# KNN-type stuff
self.keepSamples = keepSamples
self.calculateDistances = calculateDistances
# Memory monitoring
self._enableMonitoring(nta_monitorMemory)
self._initRandom()
self.clear()
def clear(self):
"""Clear all persistent internal state."""
self._learningMode = True
self._inferenceMode = False
self._autoTuningData = False
self._inputWidth = None
self._catIdMap = None
self._svm = None
self._svmParams = None
self._samples = None
self._labels = None
self._partitionIds = None
self._upcomingPartitionIds = None
# Support for using non-training samples for
# (C, Gamma) optimization
self._autoTuneSamples = None
self._autoTuneLabels = None
self._autoTunePartitionIds = None
# We operate in two modes:
# 'classification' - the SVM is used for top-level classification
# 'feedback' - the SVM is a "temporary" classified whose real purpose
# is simply to identify the most useful features
# so that this information can be fed back to the
# feature selection stage for pruning purposes.
# The only difference is the choice of kernel; for 'classification'
# mode, then kernel is selected based on the value of the parameter
# 'kernelType'. But in 'feedback' mode, the kernel is always
# of type 'linear' regardless of the value of 'kernelType'.
self._mode = 'classification'
# Sphering normalization
self._normOffset = None
self._normScale = None
# PCA
self._numPatterns = 0
self._s = None
self._vt = None
self._mean = None
# KNN-type stuff
self.distances = None
self._distanceCount = 0
def _initRandom(self):
"""
Create and seed random number generator.
"""
# Create PRNG
self._rng = random.Random()
# Seed
if self.deterministic:
self._rng.seed(42)
else:
self._rng.seed()
def _setInferenceMode(self, parameterValue):
# If inference mode is being turned on, build the SVM model and turn off learning mode
# This node only supports a one-time switch from learning mode to inference mode
value = bool(int(parameterValue))
if self._learningMode and value:
self._finishLearning()
assert self._inferenceMode == value
inferenceMode = property(
fget=lambda self: self._inferenceMode,
fset=_setInferenceMode,
doc="""Boolean indicating whether or not a node
is in inference mode"""
)
def _setLearningMode(self, parameterValue):
# If learning mode is being turned off, build the SVM model and turn on inference mode
# This node only supports a one-time switch from learning mode to inference mode
value = bool(int(parameterValue))
if self._learningMode and not value:
self._finishLearning()
assert self._learningMode == value
learningMode = property(
fget=lambda self: self._learningMode,
fset=_setLearningMode,
doc="""Boolean indicating whether or not a node
is in learning mode"""
)
def _setMode(self, parameterValue):
# We can only change this before we have received our first compute()
# call. Otherwise it is a runtime error.
if self._svm is not None:
raise RuntimeError, "SVMClassifierNode 'mode' parameter cannot be changed" \
"after first compute() call"
self._mode = parameterValue
mode = property(
fget=lambda self: self._mode,
fset=_setMode,
doc="""We operate in two modes: 'classification' - the SVM is
used for top-level classification; 'feedback' - the SVM is a "temporary"
classified whose real purpose is simply to identify the most useful
features so that this information can be fed back to the feature
selection stage for pruning purposes. The only difference is the
choice of kernel; for 'classification' mode, then kernel is selected
based on the value of the parameter 'kernelType'. But in 'feedback'
mode, the kernel is always of type 'linear' regardless of the value
of 'kernelType'."""
)
def _setKernelType(self, parameterValue):
# Convert in case they passed an integer, since technically we are an
# enum...
try: parameterValue = _kKernelTypes[parameterValue]
except: pass
self._kernelType = parameterValue
kernelType = property(
fget=lambda self: self._kernelType,
fset=_setKernelType,
# Doc provided with constraints in NodeSpec.
)
def _set_nta_cpp_svm_seed(self, parameterValue):
self.cpp_svm_seed = parameterValue
nta_cpp_svm_seed = property(fget=lambda self: self.cpp_svm_seed,
fset=_set_nta_cpp_svm_seed)
def compute(self, nodeInfo, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
"""
# If the first time being called, then print potential warning messsages
if self._firstComputeCall:
self._firstComputeCall = False
if self._useAuxiliary:
# print "\n Auxiliary input stream from Image Sensor enabled."
if self._justUseAuxiliary == True:
print " Warning: You have chosen to ignore the image data and instead just use the auxiliary data stream."
if self._scanInfo is not None:
# The input is much larger than what we've been trained on
# Sweep across it, assembling the outputs in a data structure
self._scan(inputs)
return
# Assemble inputs
childInputs = [x.wvector(0) for x in inputs['bottomUpIn']]
inputVector = numpy.concatenate([x.array() for x in childInputs])
# # Look for auxiliary input
# if self._useAuxiliary==True:
# auxVector = inputs['auxDataIn'][0].wvector(0).array()
# if auxVector.dtype != numpy.float32:
# raise RuntimeError, "SVMClassifierNode expects numpy.float32 for the auxiliary data vector"
# if self._justUseAuxiliary == True:
# inputVector = inputs['auxDataIn'][0].wvector(0).array()
# else:
# inputVector = numpy.concatenate([inputVector, inputs['auxDataIn'][0].wvector(0).array()])
# Look for auxiliary input
if self._useAuxiliary:
auxVector = inputs['auxDataIn'][0].wvector(0).array()
if auxVector.dtype != numpy.float32:
raise RuntimeError, "SVMClassifierNode expects numpy.float32 for the auxiliary data vector"
if self._justUseAuxiliary == True:
inputVector = auxVector
else:
inputVector = numpy.concatenate([inputVector, auxVector])
else:
auxVector = numpy.array([])
if self._auxInputLen == None:
self._auxInputLen = len(auxVector)
# Initialize data structures the first time
if self._inputWidth is None:
self._initDataStructures(len(inputVector))
# Learn
if self._learningMode:
# Extract category label
catInput = int(inputs["categoryIn"][0].wvector(0)[0])
# Project the vector onto the PCA basis if present
if self._vt is not None:
inputVector = numpy.dot(self._vt, inputVector - self._mean)
if self._useAuxiliary and not self._justUseAuxiliary:
inputVector = numpy.concatenate([inputVector, auxVector])
if self._upcomingPartitionIds:
partitionId = self._upcomingPartitionIds.pop(0)
else:
partitionId = None
# Perform learning
self._learn(inputVector, catInput, partitionId)
# Perform PCA if it is time to do so
if self._vt is None and self._numSVDDims is not None \
and self._numSVDSamples is not None \
and self._samples.shape[0] == self._numSVDSamples:
self.computeSVD()
# Inference
elif self._inferenceMode:
# Project the vector onto the PCA basis if present
if self._vt is not None:
if self._useAuxiliary and not self._justUseAuxiliary:
auxVector = inputVector[len(inputVector)-self._auxInputLen:]
inputVector = inputVector[:len(inputVector)-self._auxInputLen]
inputVector = numpy.dot(self._vt, inputVector - self._mean)
inputVector = numpy.concatenate([inputVector, auxVector])
else:
inputVector = numpy.dot(self._vt, inputVector - self._mean)
# Save the input vector, in case the distances are requested later
self._inputVector = inputVector
allOutputs = outputs['categoriesOut'].wvector()
inferenceResult = self._infer(inputVector)
allOutputs.fill(0)
# Convert from internal indices back to ImageSensor's IDs
# This is necessary to calculate accuracy by comparing our outputs to
# the true category from the sensor
remap = [self.catIndexToId(i) for i in xrange(len(self._catIdMap))]
out = numpy.zeros(max(remap)+1)
out[remap] = inferenceResult[0:len(self._catIdMap)]
nout = min(len(allOutputs), len(out))
allOutputs[0:nout] = out[0:nout]
def _scan(self, inputs):
"""
Run scanning inference and store the results.
The input is from many nodes, but we were trained with just a single child.
Perform inference on each node separately and store in a list.
"""
childInputs = [x.wvector(0).array() for x in inputs['bottomUpIn']]
self._scanResults = []
for inputVector in childInputs:
# Project the vector onto the PCA basis if present
if self._vt is not None:
inputVector = numpy.dot(self._vt, inputVector - self._mean)
# Run inference
inferenceResult = self._infer(inputVector)
# Convert from internal indices back to ImageSensor's IDs
# This is necessary to calculate accuracy by comparing our outputs to
# the true category from the sensor
remap = [self.catIndexToId(i) for i in xrange(len(self._catIdMap))]
out = numpy.zeros(max(remap)+1)
out[remap] = inferenceResult[0:len(self._catIdMap)]
# Store
self._scanResults.append(tuple(out))
# @todo -- Modernize nodeSpec
def getNodeSpec(self):
"""
Return the NodeSpec for this PyNode.
"""
parent = PyNode.getNodeSpec(self)
out = NodeSpec(
description=SVMClassifierNode.__doc__,
inputs=[
NodeSpecItem(name="categoryIn", type=RealTypeName,
description="""Category of the input sample"""),
NodeSpecItem(name="bottomUpIn", type=RealTypeName,
description="""Belief values over children's groups"""),
NodeSpecItem(name="auxDataIn", type=RealTypeName,
description="""Auxiliary data from the sensor""")
],
outputs=[
NodeSpecItem(name="categoriesOut", type=RealTypeName,
description="A vector representing, for each category" \
" index, the likelihood that the input to" \
" the node belongs to that category.")
],
parameters=[
NodeSpecItem(name="learningMode", type="bool", constraints="bool", access="gs", value = True,
description="Boolean indicating whether or not a node " \
"is in learning mode"),
NodeSpecItem(name="inferenceMode", type="bool", constraints="bool", access="gs", value = False,
description="Boolean indicating whether or not a node " \
"is in inference mode"),
NodeSpecItem(name="activeOutputCount", type="uint", access="g",
description="The number of active elements in the " \
"'categoriesOut' output."),
NodeSpecItem(name="categoryCount", type="uint", access="g",
description="An integer indicating the number of " \
"categories that have been learned"),
NodeSpecItem(name="C", type="float", access="g", value = 0.0,
description="""The current value of C, an SVM parameter that
influences the error rate. If numRecursions==0, this value is
used to build the SVM; otherwise, minC and maxC are used,
and this parameter is changed to the best value afterwards."""),
NodeSpecItem(name="minC", type="float", access="cg", value = 0.0,
description="""The minimum value of C, an SVM parameter that
influences the error rate. SVMClassifierNode will
perform an optimization process in order to find the
value of C that minimizes error
rate on the set of training samples."""),
NodeSpecItem(name="maxC", type="float", access="cg", value = 0.0,
description="""The maximum value of C, an SVM parameter that
influences the error rate."""),
NodeSpecItem(name="gamma", type="float", access="g", value = 0.0,
description="""The current value of gamma, an SVM parameter that
influences the error rate. If numRecursions==0, this value is
used to build the SVM; otherwise, minGamma and maxGamma are used,
and this parameter is changed to the best value afterwards."""),
NodeSpecItem(name="minGamma", type="float", access="cg", value = 0.0,
description="""The minimum value of Gamma, an SVM parameter that
influences the error rate. SVMClassifierNode will perform an
optimization process in order to find the value of Gamma that
minimizes error rate on the set of training samples."""),
NodeSpecItem(name="maxGamma", type="float", access="cg", value = 0.0,
description="""The maximum value of Gamma, an SVM parameter that
influences the error rate."""),
NodeSpecItem(name="kernelType", type="string", access="cgs", value= "rbf",
description="""Specifies the type of kernel to use. Valid choices
are: 'rbf' (for radial basis function kernel), or 'linear' (for
linear kernel.) Default is 'rbf'.""",
constraints="enum: %s,%s" % (
",".join(_kKernelTypes),
",".join(map(str, xrange(len(_kKernelTypes))))
)),
NodeSpecItem(name="numRecursions", type="uint", access="cgs", value = 0,
description="""The number of rounds of recursive Latin Hypercube Sampling
to perform. If set to 0 (the default), then no parameter search is
performed, and instead the values 'minC' and 'minGamma' are used."""),
NodeSpecItem(name="numSamplesPerRecursion", type="uint", access="gs", value = 1,
description="""The number of samples we will test in each round of
recursive Latin Hypercube Sampling."""),
NodeSpecItem(name="contractionFactor", type="float", access="cgs", value = 0.3,
description="""The fraction of the C and Gamma sampling space that we will
sample with each successive round of Latin Hypercube sampling."""),
NodeSpecItem(name="numCrossValidations", type="uint", access="cg", value = 5,
description="""The number of cross validation steps
steps that are used to estimate the error rate for a given
set of C and Gamma values. Increasing this number will yield
better error rate estimates, and hence more optimal values
for C and Gamma, but will take longer to complete the
learning process."""),
NodeSpecItem(name="autoTuningData", type="bool", constraints="bool", access="gs",
description="""Data presented to the classifier while autoTuningData is set to True
will be used as a test set for the auto-tuning phase and is NOT
learned by the node. The auto-tuning phase is performed at the
end of learning (when inferenceMode is first set to True)."""),
NodeSpecItem(name="convEpsilon", type="float", access="cg", value = 0.01,
description="""A parameter that controls the convergence
criterion used to halt the search for optimal SVM hyperplanes."""),
NodeSpecItem(name="useSparseSvm", type="bool", constraints="bool", access="gc", value = False,
description="""A boolean that controls whether input vectors
should be binarized during learning and inference."""),
NodeSpecItem(name="inputThresh", type="float", access="cg", value = 0.5,
description="""A floating point value that establishes the threshold
used for binarizing input vectors during learning and inference.
If 'useSparseSvm' is False, then this parameter has no effect."""),
NodeSpecItem(name="useProbabilisticSvm", type="bool", constraints="bool", access="cg", value = True,
description="""A boolean that controls whether or not to
build an underlying SVM model that is capable of estimating
probabilistic beliefs during inference, as opposed to simply
determining a single winner-take-all category."""),
NodeSpecItem(name="doSphering", type="bool", constraints="bool", access="cg", value = False,
description="""A boolean indicating whether or not data should
be "sphered" (i.e., each dimension should be normalized such that
its mean and variance are zero and one, respectively.) This
sphering normalization would be performed after all training
samples had been received but before the actual SVM model was
built. The dimension-specific normalization constants would then
be applied to all future incoming vectors prior to submitting them
to the SVM library for inference."""),
NodeSpecItem(name="mode", type="string", access="gs",
description="""We operate in two modes: 'classification' - the SVM is
used for top-level classification; 'feedback' - the SVM is a "temporary"
classified whose real purpose is simply to identify the most useful
features so that this information can be fed back to the feature
selection stage for pruning purposes. The only difference is the
choice of kernel; for 'classification' mode, then kernel is selected
based on the value of the parameter 'kernelType'. But in 'feedback'
mode, the kernel is always of type 'linear' regardless of the value
of 'kernelType'."""),
NodeSpecItem(name="deterministic", type="bool", constraints="bool", access="cgs", value = False,
description="""Set true to seed random number generator so that the SVM
picks the same hyperplanes each time during learning. Only has an
effect when finishLearning is called."""),
NodeSpecItem(name='numSVDSamples', type='PyObject', access='gcs', value=None,
description="""If not None, carries out SVD transformation after
that many samples have been seen. Otherwise, performs SVD when
switching to inference."""),
NodeSpecItem(name='numSVDDims', type='PyObject', access='gc', value=None,
description="""Number of dimensions to keep after SVD.
If set to 'adaptive' the number is chosen automatically."""),
NodeSpecItem(name='fractionOfMax', type='PyObject', access='gc', value=None,
description="""The smallest singular value which is retained
as a fraction of the largest singular value. This is used
only if numSVDDims=='adaptive'."""),
NodeSpecItem(name='useAuxiliary', type='PyObject', access='gc', value=None,
description="""Whether or not the classifier should use auxiliary
input data."""),
NodeSpecItem(name='justUseAuxiliary', type='PyObject', access='gc', value=None,
description="""Whether or not the classifier should ONLY use the auxiliary
input data."""),
NodeSpecItem(name='discardProblem', type='bool', access='cgs',
description="""Whether to discard extra SVM data to save space. Set to False
to preserve the data for the PCA visualizer."""),
NodeSpecItem(name='nta_cpp_svm_seed', type='int', access='cgs', value=-1,
description="""Seed for the C++ RNG"""),
NodeSpecItem(name='keepSamples', type='bool', access='cgs', value=False),
NodeSpecItem(name='calculateDistances', type='bool', access='cgs', value=False),
NodeSpecItem(name='nta_monitorMemory', type='bool', access='cgs',
description="""Whether to spawn an internal thread that monitors the
amount of memory being consumed during processing and reports
statistics""", value=False),
NodeSpecItem(name="numTrainingSamples", type="int", access="g", value = 0,
description="""Returns the current number of training samples."""),
]
)
return out + parent
def catIndexToId(self, catIndex):
"""
Map category indices (internal) to category IDs (external).
"""
return self._catIdMap[catIndex]
def _getHyperplanes(self):
"""
Return a numpy array containing the complete set of hyperplanes used
by the (trained) SVM classifier. In general, there will be N(N-1)/2
hyperplanes, where N is the number of categories. Each hyperplane
will be returned as a row within the numpy array; each row will be
of dimensionality equal to the number of "features" presented to the
SVM during training and inference.
"""
hyperplanes = self._svm.get_model().get_hyperplanes()
return hyperplanes
def simulateTrainingSample(self, inputWidth=None, category=None, partitionId=None):
"""
Debugging/profiling utility method to allow tools to
simulate the presentation of training sample.
"""
if inputWidth is None:
inputWidth = self._inputWidth
if category is None:
category = 0
if self._samples is None:
self._samples = numpy.zeros((0, inputWidth), dtype=RealNumpyDType)
assert self._labels is None
self._labels = []
assert self._partitionIds is None
self._partitionIds = []
# Add the sample vector and category lable
sample = numpy.random.random((1, inputWidth))
self._samples = numpy.concatenate((self._samples, sample), axis=0)
self._labels += [category]
if partitionId is not None:
self._partitionIds.append(partitionId)
def getParameter(self, parameterName, nodeSet=""):
"""
Get the value of a parameter.
Note: this method may be overridden by derived classes, but if so, then
the derived class should invoke this base method if 'parameterName'
is unknown to the derived class.
@param parameterName -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
if parameterName == "hyperplanes":
return self._getHyperplanes()
elif parameterName in ("numCategoriesSeen", "categoryCount"):
if self._inferenceMode:
# Use the catIdMap, which is created during finishLearning
return len(self._catIdMap)
elif self._labels:
# Use the labels, which are not thrown away until finishLearning
# Don't count -1, if it appears
return len(set(self._labels)) - int(-1 in self._labels)
else:
return 0
elif parameterName == "numSVDSamples":
return self._numSVDSamples
elif parameterName == "numSVDDims":
return self._numSVDDims
elif parameterName == "fractionOfMax":
return self._fractionOfMax
elif parameterName == 'useAuxiliary':
return self._useAuxiliary
elif parameterName == "numTrainingSamples":
return self.getNumTrainingSamples()
else:
return PyNode.getParameter(self, parameterName, nodeSet)
def setParameter(self, parameterName, parameterValue, nodeSet=""):
"""Set the value of a parameter."""
if parameterName == "numSVDSamples":
self._numSVDSamples = parameterValue
else:
PyNode.setParameter(self, parameterName, parameterValue, nodeSet)
def getNumTrainingSamples(self):
if self._samples is None:
numSamples = 0
else:
numSamples = self._samples.shape[0]
return numSamples
def _getActiveOutputCount(self):
if self._catIdMap is not None:
# Use the catIdMap, which is created during finishLearning
return max(self._catIdMap)+1
elif self._labels is not None:
# Use the labels, which are not thrown away until finishLearning
return max(self._labels)+1
else:
return 0
activeOutputCount = property(fget=_getActiveOutputCount)
# Support for using non-training data for (C, Gamma)
# parameter optimization search.
def _setAutoTuningData(self, value):
self._autoTuningData = value
if value and self._numSVDDims is not None and self._vt is None:
# Run SVD now, so we can save space by projecting the autotuning samples
self.computeSVD()
def _getAutoTuningData(self):
return self._autoTuningData
autoTuningData = property(_getAutoTuningData, _setAutoTuningData)
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
# Register a global variable for scanning or other tomfoolery
PyNodeModule.nodes = getattr(PyNodeModule, 'nodes', []) + [self]
self.__dict__.update(state)
self._firstComputeCall = True
# Backward compatibility to be able to load
# saved networks in which these attributes were
# not defined.
missing = dict(_vt=None,
deterministic=False,
discardProblem=True,
cpp_svm_seed=-1,
keepSamples=False,
calculateDistances=False,
_partitionIds=[],
C=self.minC,
gamma=self.minGamma,
_autoTuningData=False,
_autoTuneSamples=None,
_autoTuneLabels=None,
_autoTunePartitionIds=None,
distances=None,
_upcomingPartitionIds=None,
_monitorMemory=False,
_distanceCount=0,
_adaptiveSVDDims=False,
_fractionOfMax=None,
_useAuxiliary=False,
_inputVector=None,
_auxInputLen=None,
_scanInfo=None,
_scanResults=None)
d = self.__dict__
for k in missing:
d[k] = d.get(k, missing[k])
self._initRandom()
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
for ephemeralMemberName in [x for x in self._getEphemeralMembers() if x in state]:
del state[ephemeralMemberName]
return state
def _getEphemeralMembers(self):
"""
Returns list of all ephemeral class members.
"""
return [
'_rng',
'_autoTuneSamples',
'_autoTuneLabels',
'_autoTunePartitionIds',
]
def _initDataStructures(self, inputWidth):
"""
Initialize internal data structures.
"""
self._inputWidth = inputWidth
self._catIdMap = None
self._initSvm()
def _initSvm(self, n_dims=None):
"""Initialize SVM Engine: Use the SWIG bindings to initialize
an instance of an SVM Classifier engine."""
# Select kernel type. If we are in 'classification' mode,
# then this choice is simply the user-specified parameter
# 'kernelType'. If we are in 'feedback' mode however, we
# must use a 'linear' kernel.
# 0 = linear
# 1 = rbf
if self._mode == 'feedback' or self._kernelType == 'linear':
kernelType = 0
elif self._kernelType == 'rbf':
kernelType = 1
if n_dims is None:
n_dims = self._inputWidth
if self.useSparseSvm:
self._svm = svm_01(kernelType,
n_dims=n_dims,
threshold=self.inputThresh,
probability=self.useProbabilisticSvm,
seed=self.cpp_svm_seed)
else:
self._svm = svm_dense(kernelType,
n_dims=n_dims,
probability=self.useProbabilisticSvm,
seed=self.cpp_svm_seed)
def _learn(self, inputVector, trueCatIndex, partitionId=None):
"""
Store current input vector and associated category index.
"""
assert self._svm is not None
# If we are sphering, then we can't provide the data to the SVM
# library until we have computed per-dimension normalization constants.
# So instead, we'll just store each training sample.
# Note: now we always do this, because the labels are used to build the
# category mapping later
self._storeSample(trueCatIndex, inputVector, partitionId)
def _storeSample(self, trueCatIndex, inputVector, partitionId):
"""
Store a training sample and associated category label
"""
# If the incoming samples are not training data, but instead
# are testing data to be used only for parameter optimization,
# then store them separately.
if self._autoTuningData:
# If this is the first sample, then allocate a numpy array
# of the appropriate size in which to store all samples.
if self._autoTuneSamples is None:
self._autoTuneSamples = numpy.zeros((0, inputVector.shape[0]),
dtype=RealNumpyDType)
assert self._autoTuneLabels is None
self._autoTuneLabels = []
assert self._autoTunePartitionIds is None
self._autoTunePartitionIds = []
# Add the sample vector and category lable
self._autoTuneSamples = numpy.concatenate((self._autoTuneSamples,
numpy.atleast_2d(inputVector)), axis=0)
self._autoTuneLabels += [trueCatIndex]
if partitionId is not None:
self._autoTunePartitionIds.append(partitionId)
# Normal mode (incoming samples are training data)
else:
# If this is the first sample, then allocate a numpy array
# of the appropriate size in which to store all samples.
if self._samples is None:
self._samples = numpy.zeros((0, self._inputWidth), dtype=RealNumpyDType)
assert self._labels is None
self._labels = []
assert self._partitionIds is None
self._partitionIds = []
# Add the sample vector and category lable
self._samples = numpy.concatenate((self._samples, numpy.atleast_2d(inputVector)), axis=0)
self._labels += [trueCatIndex]
if partitionId is not None:
self._partitionIds.append(partitionId)
def _infer(self, sample):
"""
Consult SVM to classify input vector.
"""
if self.calculateDistances:
# Calculate distances in the original input space (pre-sphering, post-PCA)
# if specified, effectively being a KNN
self._calculateAndStoreDistance(sample)
# If we are sphering, then apply normalization
if self.doSphering:
sample = (sample + self._normOffset) * self._normScale
numCats = self._svm.get_model().n_class()
if self.useProbabilisticSvm:
belief = numpy.zeros(numCats, dtype=RealNumpyDType)
prediction = int(self._svm.predict_probability(sample, belief))
else:
prediction = int(self._svm.predict(sample))
belief = numpy.zeros(numCats, dtype=RealNumpyDType)
belief[prediction] = 1.0
return belief
def _finishSphering(self):
"""
Compute normalization constants for each feature dimension
based on the collected training samples. Then normalize our
training samples using these constants (so that each input
dimension has mean and variance of zero and one, respectively.)
Then feed these "sphered" training samples into the underlying
SVM model.
"""
# If we are sphering our data, we need to compute the
# per-dimension normalization constants
# First normalize the means (to zero)
self._normOffset = self._samples.mean(axis=0) * -1.0
# Don't modify _samples in-place; it may be saved and used later
samples = self._samples + self._normOffset
# Now normalize the variances (to one). However, we need to be
# careful because the variance could conceivably be zero for one
# or more dimensions.
variance = samples.var(axis=0)
variance[numpy.where(variance == 0.0)] = 1.0
self._normScale = 1.0 / numpy.sqrt(variance)
samples *= self._normScale
return samples
def _finishLearning(self):
"""
Use the C++ implementation to build an SVM Model.
"""
self._beginMemoryMonitoring("_finishLearning")
print 'Finish learning'
# Set progress to 0
PyNodeModule.finishLearningProgress = 0.0
if self._upcomingPartitionIds:
raise Exception("Switching to inference, but upcomingPartitionIds is "
"not empty: %s" % self._upcomingPartitionIds)
# Compute the mapping of ImageSensor IDs to internal indices
# Only insert ImageSensor IDs if they were seen during training
# self._catIdMap = sorted(list(set(self._labels)))
# the above causes bugs!!!
self._catIdMap = []
for label in self._labels:
if label not in self._catIdMap and label != -1:
self._catIdMap.append(label)
if self._autoTuneLabels:
# Use the autotune labels too; there could be categories never seen
# during training that showed up during autotuning
for label in self._autoTuneLabels:
if label not in self._catIdMap and label != -1:
self._catIdMap.append(label)
# Do sphering if specified
if self.doSphering:
samples = self._finishSphering()
# Run SVD if necessary
if self._numSVDDims is not None and self._vt is None:
self._beginMemoryMonitoring("computeSVD")
self.computeSVD()
self._endMemoryMonitoring("computeSVD")
if not self.doSphering:
samples = self._samples
# Feed each sample into the SVM library
for sampleIndex, label in enumerate(self._labels):
# Ignore samples whose label is -1, which can occur if
# changeCategoriesOfPartitionIds was called
# Use the catIdMap to give the SVM a continuous range of indices
if label != -1:
self._svm.add_sample(self._catIdMap.index(label), samples[sampleIndex])
# Initialize the random number generator for LHS
#self._initRandom()
if self.numRecursions > 0 and (self.minC < self.maxC or self.minGamma < self.maxGamma):
# Do recursive Latin Hypercube sampling to obtain best C, gamma
paramRange = (self.minC, self.maxC, self.minGamma, self.maxGamma)
self._beginMemoryMonitoring("_doRecursion")
(svmParams, svmAccuracy, svmModel) = self._doRecursion(None, None, paramRange)
self._endMemoryMonitoring("_doRecursion")
if svmParams is None:
# User canceled
self._learningMode = False # To avoid exception
self._inferenceMode = True
return
print 'optimal params', svmParams
# Set self.C and self.gamma to the optimal values
self.C, self.gamma = svmParams
else:
# Use the specified values of C and gamma
svmParams = (self.C, self.gamma)
# Build final model
self._beginMemoryMonitoring("_buildSVM")
self._buildSVM(svmParams[0], svmParams[1])
self._endMemoryMonitoring("_buildSVM")
# Save parameters used to build the model
self._svmParams = svmParams
if self.discardProblem:
# Discard the samples and labels
self._svm.discard_problem()
# Uncomment this to print out the chosen C and Gamma values
#print "SVM node (C,Gamma) = ", self._svmParams
# We can now throw away all our stored training samples; this way they
# won't get pickled into the final 'inference stage' node.
if not self.keepSamples:
self._samples = None
self._labels = None
# Set learning mode off and turn on inference mode
self._learningMode = False
self._inferenceMode = True
# Clear data structures that should begin fresh for testing
self.distances = None
self._distanceCount = None
# Update progress
PyNodeModule.finishLearningProgress = 1.0
self._endMemoryMonitoring("_finishLearning")
def _doRecursion(self, samples, validationSets, paramRange, recursionIndex=0, bestResults=None):
"""
Perform recursive Latin Hypercube sampling
"""
# Construct set of sample points
minC, maxC, minGamma, maxGamma = paramRange
rangeC = maxC - minC
rangeGamma = maxGamma - minGamma
if self.numSamplesPerRecursion == 1:
intervalC = 0.0
intervalGamma = 0.0
else:
intervalC = rangeC / float(self.numSamplesPerRecursion - 1)
intervalGamma = rangeGamma / float(self.numSamplesPerRecursion - 1)
gammaIndices = range(self.numSamplesPerRecursion)
self._rng.shuffle(gammaIndices)
sampleIndices = [(float(x), float(gammaIndices[x])) for x in range(self.numSamplesPerRecursion)]
samplePoints = [(minC + intervalC * x[0], minGamma + intervalGamma * x[1]) for x in sampleIndices]
bestAccuracy = -1.0
bestSamplePoint = None
for i, samplePoint in enumerate(samplePoints):
# For progress updates
progressStart = PyNodeModule.finishLearningProgress
progressEnd = progressStart \
+ 1 / float(self.numSamplesPerRecursion * self.numRecursions)
# Make sure progress does not reach 1.0 before the end of finishLearning
progressEnd = min(0.99, progressEnd)
logging.debug("SVM recursion %d, sample %d: C is %.2f, gamma is %.2f"
% (recursionIndex, i, samplePoint[0], samplePoint[1]))
accuracy, svmModel = self._validateSvm(samplePoint[0], samplePoint[1],
progressStart, progressEnd)
if accuracy is None:
# User canceled
return (None, None, None)
logging.debug("Accuracy: %.2f" % (accuracy * 100))
if accuracy > bestAccuracy:
bestAccuracy = accuracy
bestSamplePoint = samplePoint
bestModel = svmModel
# Check if the previous recursion actually produced better results (by chance)
if bestResults is not None and bestResults[1] > bestAccuracy:
bestSamplePoint, bestAccuracy, bestModel = bestResults
# Have we completed our LHS recursions?
recursionIndex += 1
if recursionIndex == self.numRecursions:
return (bestSamplePoint, bestAccuracy, bestModel)
# Perform another round of LHS
else:
# Contract down the sampling range
halfRangeC = 0.5 * self.contractionFactor * rangeC
halfRangeGamma = 0.5 * self.contractionFactor * rangeGamma
newParamRange = (bestSamplePoint[0] - halfRangeC,
bestSamplePoint[0] + halfRangeC,
bestSamplePoint[1] - halfRangeGamma,
bestSamplePoint[1] + halfRangeGamma)
return self._doRecursion(samples, validationSets, newParamRange, recursionIndex,
(bestSamplePoint, bestAccuracy, svmModel))
def _autoTuneTest(self, svm):
if self.useProbabilisticSvm:
numCats = svm.get_model().n_class()
belief = numpy.zeros(numCats, dtype=RealNumpyDType)
numErrors = 0
numTestingSamples = self._autoTuneSamples.shape[0]
for k in xrange(numTestingSamples):
sample = self._autoTuneSamples[k]
# Note: we currently do not support sphering and PCA at
# the same time.
assert not self.doSphering or not self._vt
# If we are sphering, then apply normalization
if self.doSphering:
sample = (sample + self._normOffset) * self._normScale
# Vector is already projected onto PCA basis (if present)
# Present the sample
if self.useProbabilisticSvm:
prediction = int(svm.predict_probability(sample, belief))
else:
prediction = int(svm.predict(sample))
if prediction != self._catIdMap.index(self._autoTuneLabels[k]):
numErrors += 1
return float(numTestingSamples - numErrors) / float(numTestingSamples)
def _validateSvm(self, C, gamma, progressStart, progressEnd):
"""
Perform cross-validation to measure the recognition accuracy of an SVM.
"""
# @todo Problem - the cross_validate() API doesn't accept C, gamma parameters!
#accuracy = self._svm.cross_validate(C, gamma, nFold=self.numCrossValidations)
#accuracy = self._svm.cross_validate(nFold=self.numCrossValidations)
valueC = math.pow(10.0, C)
valueGamma = math.pow(10.0, gamma)
# Validate against test samples
if self._autoTuneSamples is not None:
assert self._autoTuneLabels is not None
assert self._autoTunePartitionIds is not None
if False:
# Choose number of dimensions
if self._numSVDDims:
numDims = self._numSVDDims
else:
numDims = self._inputWidth
# Choosing kernel type
if self._mode == 'feedback' or self._kernelType == 'linear':
kernelType = 0
elif self._kernelType == 'rbf':
kernelType = 1
# Create a new svm
if self.useSparseSvm:
svm = svm_01(kernelType,
n_dims=numDims,
threshold=self.inputThresh,
probability=self.useProbabilisticSvm,
seed=self.cpp_svm_seed)
else:
svm = svm_dense(kernelType,
n_dims=numDims,
probability=self.useProbabilisticSvm,
seed=self.cpp_svm_seed)
# Feed each sample into the SVM library
for k, label in enumerate(self._labels):
# Ignore samples whose label is -1, which can occur if
# changeCategoriesOfPartitionIds was called
# Use the catIdMap to give the SVM a continuous range of indices
sample = self._samples[k]
if label != -1:
svm.add_sample(self._catIdMap.index(label), sample)
else:
svm = self._svm
# Train using all the training samples with this
# (C, Gamma) sample point
svm.trainReleaseGIL(gamma=valueGamma, C=valueC, eps=self.convEpsilon)
# Check and update progress
if PyNodeModule.finishLearningProgress == -1:
return (None, None)
PyNodeModule.finishLearningProgress += (progressStart - progressEnd)/2
# Test against all testing samples
accuracy = self._autoTuneTest(svm)
# Check and update progress
if PyNodeModule.finishLearningProgress == -1:
return (None, None)
PyNodeModule.finishLearningProgress = progressEnd
#print "(%.3f, %.3f) ==> %.2f%%" % (C, gamma, 100.0 * accuracy)
# Use cross-validation to validate against training samples
else:
accuracy = self._svm.cross_validate(n_fold=self.numCrossValidations,
gamma=valueGamma,
C=valueC,
eps=self.convEpsilon)
return (accuracy, None)
def _buildSVM(self, C, gamma):
"""
Train an SVM model.
@param C -- the value of parameter C to use.
@param gamma -- the value of parameter Gamma to use.
@param trainLabels -- the category labels associated with the
training data.
@param trainSamples -- the input sample vectors associated with
the training data.
"""
valueC = math.pow(10.0, C)
valueGamma = math.pow(10.0, gamma)
self._svm.trainReleaseGIL(gamma=valueGamma, C=valueC, eps=self.convEpsilon)
def computeSVD(self, numSVDSamples=None, finalize=True):
print 'Computing SVD'
# Samples are in self._samples, not in the SVM yet
if numSVDSamples is None:
numSVDSamples = self._samples.shape[0]
if self._useAuxiliary and not self._justUseAuxiliary:
self._mean = numpy.mean(self._samples[:,:self._samples.shape[1]-self._auxInputLen], axis=0)
self._samples[:,:self._samples.shape[1]-self._auxInputLen] -= self._mean
# Remove the auxiliary data prior to computing the SVD
u, self._s, self._vt = numpy.linalg.svd(self._samples[:numSVDSamples,:self._samples.shape[1]-self._auxInputLen])
else:
self._mean = numpy.mean(self._samples, axis=0)
self._samples -= self._mean
u, self._s, self._vt = numpy.linalg.svd(self._samples[:numSVDSamples,:])
if finalize:
self.finalizeSVD()
return self._s
def getAdaptiveSVDDims(self, singularValues, fractionOfMax=0.001):
v = singularValues/singularValues[0]
idx = numpy.where(v<fractionOfMax)[0]
if len(idx):
print "Number of PCA dimensions chosen: ", idx[0], "out of ", len(v)
return idx[0]
else:
print "Number of PCA dimensions chosen: ", len(v)-1, "out of ", len(v)
return len(v)-1
def finalizeSVD(self, numSVDDims=None):
print 'Finalizing SVD'
if numSVDDims is not None:
self._numSVDDims = numSVDDims
if self._numSVDDims=='adaptive':
if self._fractionOfMax is not None:
self._numSVDDims = self.getAdaptiveSVDDims(self._s, self._fractionOfMax)
else:
self._numSVDDims = self.getAdaptiveSVDDims(self._s)
if self._vt.shape[0] < self._numSVDDims:
print "******************************************************************************"
print "Warning: The requested number of PCA dimensions is more than the number of pattern dimensions."
print "Setting numSVDDims = ", self._vt.shape[0]
print "******************************************************************************"
self._numSVDDims = self._vt.shape[0]
self._vt = self._vt[:self._numSVDDims]
if self._useAuxiliary and not self._justUseAuxiliary:
self._initSvm(n_dims=self._numSVDDims + self._auxInputLen)
# Project all the vectors (mean has already been subtracted from each one)
auxSamples = self._samples[:, self._samples.shape[1]+1:]
self._samples = self._samples[:, :self._samples.shape[1]-self._auxInputLen]
self._samples = numpy.dot(self._samples, self._vt.T)
self._samples = numpy.concatenate([numpy.atleast_2d(self._samples), numpy.atleast_2d(auxSamples)], axis=1)
else:
self._initSvm(n_dims=self._numSVDDims)
self._samples = numpy.dot(self._samples, self._vt.T)
def getAllDistances(self):
"""Return all the prototype distances from all computes available."""
if self.distances is None:
return None
return self.distances[:self._distanceCount, :]
def getLatestDistances(self):
"""Get the distances to all training samples (pre-SVM, post-PCA)."""
if self._inputVector is None:
return
if self._samples is None:
raise Exception("No samples stored")
return self._calculateDistances(self._inputVector)
def getCategoryList(self):
"""
Public API for returning the category list
"""
return self._labels
def _calculateDistances(self, inputVector):
"""Calculate distances in the original input space (pre-SVM, post-PCA)."""
# Calculate the distances from this input to all prototypes
# TODO do this after sphering instead?
# TODO custom distance norm?
dist = numpy.power(numpy.abs(self._samples - inputVector), 2)
dist = dist.sum(1)
dist = numpy.power(dist, 0.5)
# Ignore samples with a category of -1
dist[numpy.array(self._labels) == -1] = numpy.inf
return dist
def _calculateAndStoreDistances(self, inputVector):
"""Calculate distances in the original input space (pre-SVM, post-PCA)."""
dist = self._calculateDistances(inputVector)
# Keep all distances in an array
if self.distances is None:
self.distances = numpy.zeros((1, dist.shape[0]), dist.dtype)
self.distances[0,:] = dist
self._distanceCount = 1
else:
if self._distanceCount == self.distances.shape[0]:
# Double the size of the array
newDistances = numpy.zeros((self.distances.shape[0] * 2,
self.distances.shape[1]),
self.distances.dtype)
newDistances[:self.distances.shape[0],:] = self.distances
self.distances = newDistances
# Store the new distances
self.distances[self._distanceCount,:] = dist
self._distanceCount += 1
def setUpcomingPartitionIds(self, partitionIds):
"""
Set the queue of upcoming partition ids. This can be used instead of the
partitionId input. Checks that no partition ids currently exist (which
could indicate a bug).
"""
if self._upcomingPartitionIds:
raise Exception("PartitionIds already exist: %s"
% str(self._upcomingPartitionIds))
if not partitionIds:
return
if not hasattr(partitionIds, '__iter__'):
partitionIds = [partitionIds]
else:
partitionIds = list(partitionIds)
self._upcomingPartitionIds = partitionIds
def remapCategories(self, mapping):
"""
Change the existing category labels.
mapping -- List of new category indices. For example, mapping=[2,0,1]
would change all vectors of category 0 to be category 2, category 1 to 0,
and category 2 to 1.
"""
if not self._labels:
return
if not hasattr(mapping, '__len__'):
mapping = [mapping] # Cannot send singleton lists through session
labels = numpy.array(self._labels, dtype=numpy.int)
newLabels = numpy.zeros(labels.shape[0], dtype=numpy.int)
newLabels.fill(-1)
for i in xrange(len(mapping)):
newLabels[labels==i] = mapping[i]
self._labels = list(newLabels)
def changePartitionId(self, oldPartitionId, newPartitionId):
"""
Change all instances of oldPartitionId to newPartitionId.
"""
if not self._partitionIds:
# No learning has occurred yet
return
self._partitionIds = numpy.array(self._partitionIds)
self._partitionIds[self._partitionIds == oldPartitionId] = newPartitionId
self._partitionIds = list(self._partitionIds)
def changeCategoriesOfPartitionIds(self, partitionIds, categoryIndices):
"""
Change the category associated with all vectors with this partitionId(s).
partitionIds -- Single id or list of ids.
categoryIndices -- Single index or list of indices. Can also be a single
index when partitionIds is a list, in which case the same category will
be used for all vectors with the specified id.
"""
if not hasattr(partitionIds, '__iter__'):
partitionIds = [partitionIds]
categoryIndices = [categoryIndices]
elif not hasattr(categoryIndices, '__iter__'):
categoryIndices = [categoryIndices] * len(partitionIds)
if not self._partitionIds:
# No learning has occurred yet
return
# Convert partitionIds and labels to arrays
self._partitionIds = numpy.array(self._partitionIds)
self._labels = numpy.array(self._labels)
for i in xrange(len(partitionIds)):
partitionId = partitionIds[i]
categoryIndex = categoryIndices[i]
self._labels[self._partitionIds == partitionId] = categoryIndex
# Convert partitionIds and labels back to lists
self._partitionIds = list(self._partitionIds)
self._labels = list(self._labels)
def switchToLearning(self):
"""Force a switch back to learning mode (not normally supported)."""
self._learningMode = True
self._inferenceMode = False
# Clear data structures that were creating in finishLearning or first
# used there
self._catIdMap = None
self._svm = None
self._svmParams = None
self._initSvm()
self.distances = None
self._distanceCount = None
if __name__=='__main__':
import os
from nupic.network import CreateNode
name = os.path.splitext(os.path.basename(__file__))[0]
n = CreateNode('nupic.pynodes.extra.%(m)s.%(m)s' % {'m':name})
n.nodeHelp() | unknown | codeparrot/codeparrot-clean | ||
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::{BasicBlock, Body, Location};
/// Maps between a `Location` and a `PointIndex` (and vice versa).
pub struct DenseLocationMap {
/// For each basic block, how many points are contained within?
statements_before_block: IndexVec<BasicBlock, usize>,
/// Map backward from each point to the basic block that it
/// belongs to.
basic_blocks: IndexVec<PointIndex, BasicBlock>,
num_points: usize,
}
impl DenseLocationMap {
#[inline]
pub fn new(body: &Body<'_>) -> Self {
let mut num_points = 0;
let statements_before_block: IndexVec<BasicBlock, usize> = body
.basic_blocks
.iter()
.map(|block_data| {
let v = num_points;
num_points += block_data.statements.len() + 1;
v
})
.collect();
let mut basic_blocks = IndexVec::with_capacity(num_points);
for (bb, bb_data) in body.basic_blocks.iter_enumerated() {
basic_blocks.extend((0..=bb_data.statements.len()).map(|_| bb));
}
Self { statements_before_block, basic_blocks, num_points }
}
/// Total number of point indices
#[inline]
pub fn num_points(&self) -> usize {
self.num_points
}
/// Converts a `Location` into a `PointIndex`. O(1).
#[inline]
pub fn point_from_location(&self, location: Location) -> PointIndex {
let Location { block, statement_index } = location;
let start_index = self.statements_before_block[block];
PointIndex::new(start_index + statement_index)
}
/// Returns the `PointIndex` for the first statement in the given `BasicBlock`. O(1).
#[inline]
pub fn entry_point(&self, block: BasicBlock) -> PointIndex {
let start_index = self.statements_before_block[block];
PointIndex::new(start_index)
}
/// Return the PointIndex for the block start of this index.
#[inline]
pub fn to_block_start(&self, index: PointIndex) -> PointIndex {
PointIndex::new(self.statements_before_block[self.basic_blocks[index]])
}
/// Converts a `PointIndex` back to a location. O(1).
#[inline]
pub fn to_location(&self, index: PointIndex) -> Location {
assert!(index.index() < self.num_points);
let block = self.basic_blocks[index];
let start_index = self.statements_before_block[block];
let statement_index = index.index() - start_index;
Location { block, statement_index }
}
/// Sometimes we get point-indices back from bitsets that may be
/// out of range (because they round up to the nearest 2^N number
/// of bits). Use this function to filter such points out if you
/// like.
#[inline]
pub fn point_in_range(&self, index: PointIndex) -> bool {
index.index() < self.num_points
}
}
rustc_index::newtype_index! {
/// A single integer representing a `Location` in the MIR control-flow
/// graph. Constructed efficiently from `DenseLocationMap`.
#[orderable]
#[debug_format = "PointIndex({})"]
pub struct PointIndex {}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_mir_dataflow/src/points.rs |
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_http.h>
static char *ngx_http_flv(ngx_conf_t *cf, ngx_command_t *cmd, void *conf);
static ngx_command_t ngx_http_flv_commands[] = {
{ ngx_string("flv"),
NGX_HTTP_LOC_CONF|NGX_CONF_NOARGS,
ngx_http_flv,
0,
0,
NULL },
ngx_null_command
};
static u_char ngx_flv_header[] = "FLV\x1\x5\0\0\0\x9\0\0\0\0";
static ngx_http_module_t ngx_http_flv_module_ctx = {
NULL, /* preconfiguration */
NULL, /* postconfiguration */
NULL, /* create main configuration */
NULL, /* init main configuration */
NULL, /* create server configuration */
NULL, /* merge server configuration */
NULL, /* create location configuration */
NULL /* merge location configuration */
};
ngx_module_t ngx_http_flv_module = {
NGX_MODULE_V1,
&ngx_http_flv_module_ctx, /* module context */
ngx_http_flv_commands, /* module directives */
NGX_HTTP_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
static ngx_int_t
ngx_http_flv_handler(ngx_http_request_t *r)
{
u_char *last;
off_t start, len;
size_t root;
ngx_int_t rc;
ngx_uint_t level, i;
ngx_str_t path, value;
ngx_log_t *log;
ngx_buf_t *b;
ngx_chain_t out[2];
ngx_open_file_info_t of;
ngx_http_core_loc_conf_t *clcf;
if (!(r->method & (NGX_HTTP_GET|NGX_HTTP_HEAD))) {
return NGX_HTTP_NOT_ALLOWED;
}
if (r->uri.data[r->uri.len - 1] == '/') {
return NGX_DECLINED;
}
rc = ngx_http_discard_request_body(r);
if (rc != NGX_OK) {
return rc;
}
last = ngx_http_map_uri_to_path(r, &path, &root, 0);
if (last == NULL) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
log = r->connection->log;
path.len = last - path.data;
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, log, 0,
"http flv filename: \"%V\"", &path);
clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
ngx_memzero(&of, sizeof(ngx_open_file_info_t));
of.read_ahead = clcf->read_ahead;
of.directio = clcf->directio;
of.valid = clcf->open_file_cache_valid;
of.min_uses = clcf->open_file_cache_min_uses;
of.errors = clcf->open_file_cache_errors;
of.events = clcf->open_file_cache_events;
if (ngx_http_set_disable_symlinks(r, clcf, &path, &of) != NGX_OK) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
if (ngx_open_cached_file(clcf->open_file_cache, &path, &of, r->pool)
!= NGX_OK)
{
switch (of.err) {
case 0:
return NGX_HTTP_INTERNAL_SERVER_ERROR;
case NGX_ENOENT:
case NGX_ENOTDIR:
case NGX_ENAMETOOLONG:
level = NGX_LOG_ERR;
rc = NGX_HTTP_NOT_FOUND;
break;
case NGX_EACCES:
#if (NGX_HAVE_OPENAT)
case NGX_EMLINK:
case NGX_ELOOP:
#endif
level = NGX_LOG_ERR;
rc = NGX_HTTP_FORBIDDEN;
break;
default:
level = NGX_LOG_CRIT;
rc = NGX_HTTP_INTERNAL_SERVER_ERROR;
break;
}
if (rc != NGX_HTTP_NOT_FOUND || clcf->log_not_found) {
ngx_log_error(level, log, of.err,
"%s \"%s\" failed", of.failed, path.data);
}
return rc;
}
if (!of.is_file) {
return NGX_DECLINED;
}
r->root_tested = !r->error_page;
start = 0;
len = of.size;
i = 1;
if (r->args.len) {
if (ngx_http_arg(r, (u_char *) "start", 5, &value) == NGX_OK) {
start = ngx_atoof(value.data, value.len);
if (start == NGX_ERROR || start >= len) {
start = 0;
}
if (start) {
len = sizeof(ngx_flv_header) - 1 + len - start;
i = 0;
}
}
}
log->action = "sending flv to client";
r->headers_out.status = NGX_HTTP_OK;
r->headers_out.content_length_n = len;
r->headers_out.last_modified_time = of.mtime;
if (ngx_http_set_etag(r) != NGX_OK) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
if (ngx_http_set_content_type(r) != NGX_OK) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
if (i == 0) {
b = ngx_calloc_buf(r->pool);
if (b == NULL) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
b->pos = ngx_flv_header;
b->last = ngx_flv_header + sizeof(ngx_flv_header) - 1;
b->memory = 1;
out[0].buf = b;
out[0].next = &out[1];
}
b = ngx_calloc_buf(r->pool);
if (b == NULL) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
b->file = ngx_pcalloc(r->pool, sizeof(ngx_file_t));
if (b->file == NULL) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
r->allow_ranges = 1;
rc = ngx_http_send_header(r);
if (rc == NGX_ERROR || rc > NGX_OK || r->header_only) {
return rc;
}
b->file_pos = start;
b->file_last = of.size;
b->in_file = b->file_last ? 1 : 0;
b->last_buf = (r == r->main) ? 1 : 0;
b->last_in_chain = 1;
b->sync = (b->last_buf || b->in_file) ? 0 : 1;
b->file->fd = of.fd;
b->file->name = path;
b->file->log = log;
b->file->directio = of.is_directio;
out[1].buf = b;
out[1].next = NULL;
return ngx_http_output_filter(r, &out[i]);
}
static char *
ngx_http_flv(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_http_core_loc_conf_t *clcf;
clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module);
clcf->handler = ngx_http_flv_handler;
return NGX_CONF_OK;
} | c | github | https://github.com/nginx/nginx | src/http/modules/ngx_http_flv_module.c |
// This file was automatically generated from channels.md by Knit tool. Do not edit.
package kotlinx.coroutines.guide.exampleChannel09
import kotlinx.coroutines.*
import kotlinx.coroutines.channels.*
data class Ball(var hits: Int)
fun main() = runBlocking {
val table = Channel<Ball>() // a shared table
launch { player("ping", table) }
launch { player("pong", table) }
table.send(Ball(0)) // serve the ball
delay(1000) // delay 1 second
coroutineContext.cancelChildren() // game over, cancel them
}
suspend fun player(name: String, table: Channel<Ball>) {
for (ball in table) { // receive the ball in a loop
ball.hits++
println("$name $ball")
delay(300) // wait a bit
table.send(ball) // send the ball back
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jvm/test/guide/example-channel-09.kt |
# Factories are self documenting
# pylint: disable=missing-docstring
import json
from functools import partial
import factory
from factory.django import DjangoModelFactory
# Imported to re-export
from student.tests.factories import UserFactory # Imported to re-export
from student.tests.factories import UserProfileFactory as StudentUserProfileFactory
from courseware.models import StudentModule, XModuleUserStateSummaryField
from courseware.models import XModuleStudentInfoField, XModuleStudentPrefsField
from student.roles import (
CourseInstructorRole,
CourseStaffRole,
CourseBetaTesterRole,
GlobalStaff,
OrgStaffRole,
OrgInstructorRole,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
# TODO fix this (course_id and location are invalid names as constants, and course_id should really be COURSE_KEY)
# pylint: disable=invalid-name
course_id = SlashSeparatedCourseKey(u'edX', u'test_course', u'test')
location = partial(course_id.make_usage_key, u'problem')
class UserProfileFactory(StudentUserProfileFactory):
courseware = 'course.xml'
# For the following factories, these are disabled because we're ok ignoring the
# unused arguments create and **kwargs in the line:
# course_key(self, create, extracted, **kwargs)
# pylint: disable=unused-argument
class InstructorFactory(UserFactory):
"""
Given a course Location, returns a User object with instructor
permissions for `course`.
"""
last_name = "Instructor"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for a course instructor user")
CourseInstructorRole(extracted).add_users(self)
class StaffFactory(UserFactory):
"""
Given a course Location, returns a User object with staff
permissions for `course`.
"""
last_name = "Staff"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for a course staff user")
CourseStaffRole(extracted).add_users(self)
class BetaTesterFactory(UserFactory):
"""
Given a course Location, returns a User object with beta-tester
permissions for `course`.
"""
last_name = "Beta-Tester"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for a beta-tester user")
CourseBetaTesterRole(extracted).add_users(self)
class OrgStaffFactory(UserFactory):
"""
Given a course Location, returns a User object with org-staff
permissions for `course`.
"""
last_name = "Org-Staff"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for an org-staff user")
OrgStaffRole(extracted.org).add_users(self)
class OrgInstructorFactory(UserFactory):
"""
Given a course Location, returns a User object with org-instructor
permissions for `course`.
"""
last_name = "Org-Instructor"
@factory.post_generation
def course_key(self, create, extracted, **kwargs):
if extracted is None:
raise ValueError("Must specify a CourseKey for an org-instructor user")
OrgInstructorRole(extracted.org).add_users(self)
class GlobalStaffFactory(UserFactory):
"""
Returns a User object with global staff access
"""
last_name = "GlobalStaff"
@factory.post_generation
def set_staff(self, create, extracted, **kwargs):
GlobalStaff().add_users(self)
# pylint: enable=unused-argument
class StudentModuleFactory(DjangoModelFactory):
class Meta(object):
model = StudentModule
module_type = "problem"
student = factory.SubFactory(UserFactory)
course_id = SlashSeparatedCourseKey("MITx", "999", "Robot_Super_Course")
state = None
grade = None
max_grade = None
done = 'na'
class UserStateSummaryFactory(DjangoModelFactory):
class Meta(object):
model = XModuleUserStateSummaryField
field_name = 'existing_field'
value = json.dumps('old_value')
usage_id = location('usage_id')
class StudentPrefsFactory(DjangoModelFactory):
class Meta(object):
model = XModuleStudentPrefsField
field_name = 'existing_field'
value = json.dumps('old_value')
student = factory.SubFactory(UserFactory)
module_type = 'mock_problem'
class StudentInfoFactory(DjangoModelFactory):
class Meta(object):
model = XModuleStudentInfoField
field_name = 'existing_field'
value = json.dumps('old_value')
student = factory.SubFactory(UserFactory) | unknown | codeparrot/codeparrot-clean | ||
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#!/usr/bin/env python
"""hodring launches hadoop commands on work node and
cleans up all the work dirs afterward
"""
# -*- python -*-
import os, sys, time, shutil, getpass, xml.dom.minidom, xml.dom.pulldom
import socket, sets, urllib, csv, signal, pprint, random, re, httplib
from xml.dom import getDOMImplementation
from pprint import pformat
from optparse import OptionParser
from urlparse import urlparse
from hodlib.Common.util import local_fqdn, parseEquals, getMapredSystemDirectory, isProcessRunning
from hodlib.Common.tcp import tcpSocket, tcpError
binfile = sys.path[0]
libdir = os.path.dirname(binfile)
sys.path.append(libdir)
import hodlib.Common.logger
from hodlib.GridServices.service import *
from hodlib.Common.util import *
from hodlib.Common.socketServers import threadedHTTPServer
from hodlib.Common.hodsvc import hodBaseService
from hodlib.Common.threads import simpleCommand
from hodlib.Common.xmlrpc import hodXRClient
mswindows = (sys.platform == "win32")
originalcwd = os.getcwd()
reHdfsURI = re.compile("hdfs://(.*?:\d+)(.*)")
class CommandDesc:
"""A class that represents the commands that
are run by hodring"""
def __init__(self, dict, log):
self.log = log
self.log.debug("In command desc")
self.log.debug("Done in command desc")
dict.setdefault('argv', [])
dict.setdefault('version', None)
dict.setdefault('envs', {})
dict.setdefault('workdirs', [])
dict.setdefault('attrs', {})
dict.setdefault('final-attrs', {})
dict.setdefault('fg', False)
dict.setdefault('ignorefailures', False)
dict.setdefault('stdin', None)
self.log.debug("Printing dict")
self._checkRequired(dict)
self.dict = dict
def _checkRequired(self, dict):
if 'name' not in dict:
raise ValueError, "Command description lacks 'name'"
if 'program' not in dict:
raise ValueError, "Command description lacks 'program'"
if 'pkgdirs' not in dict:
raise ValueError, "Command description lacks 'pkgdirs'"
def getName(self):
return self.dict['name']
def getProgram(self):
return self.dict['program']
def getArgv(self):
return self.dict['argv']
def getVersion(self):
return self.dict['version']
def getEnvs(self):
return self.dict['envs']
def getPkgDirs(self):
return self.dict['pkgdirs']
def getWorkDirs(self):
return self.dict['workdirs']
def getAttrs(self):
return self.dict['attrs']
def getfinalAttrs(self):
return self.dict['final-attrs']
def isForeground(self):
return self.dict['fg']
def isIgnoreFailures(self):
return self.dict['ignorefailures']
def getStdin(self):
return self.dict['stdin']
def parseDesc(str):
dict = CommandDesc._parseMap(str)
dict['argv'] = CommandDesc._parseList(dict['argv'])
dict['envs'] = CommandDesc._parseMap(dict['envs'])
dict['pkgdirs'] = CommandDesc._parseList(dict['pkgdirs'], ':')
dict['workdirs'] = CommandDesc._parseList(dict['workdirs'], ':')
dict['attrs'] = CommandDesc._parseMap(dict['attrs'])
dict['final-attrs'] = CommandDesc._parseMap(dict['final-attrs'])
return CommandDesc(dict)
parseDesc = staticmethod(parseDesc)
def _parseList(str, delim = ','):
list = []
for row in csv.reader([str], delimiter=delim, escapechar='\\',
quoting=csv.QUOTE_NONE, doublequote=False):
list.extend(row)
return list
_parseList = staticmethod(_parseList)
def _parseMap(str):
"""Parses key value pairs"""
dict = {}
for row in csv.reader([str], escapechar='\\', quoting=csv.QUOTE_NONE, doublequote=False):
for f in row:
[k, v] = f.split('=', 1)
dict[k] = v
return dict
_parseMap = staticmethod(_parseMap)
class MRSystemDirectoryManager:
"""Class that is responsible for managing the MapReduce system directory"""
def __init__(self, jtPid, mrSysDir, fsName, hadoopPath, log, retries=120):
self.__jtPid = jtPid
self.__mrSysDir = mrSysDir
self.__fsName = fsName
self.__hadoopPath = hadoopPath
self.__log = log
self.__retries = retries
def toCleanupArgs(self):
return " --jt-pid %s --mr-sys-dir %s --fs-name %s --hadoop-path %s " \
% (self.__jtPid, self.__mrSysDir, self.__fsName, self.__hadoopPath)
def removeMRSystemDirectory(self):
jtActive = isProcessRunning(self.__jtPid)
count = 0 # try for a max of a minute for the process to end
while jtActive and (count<self.__retries):
time.sleep(0.5)
jtActive = isProcessRunning(self.__jtPid)
count += 1
if count == self.__retries:
self.__log.warn('Job Tracker did not exit even after a minute. Not going to try and cleanup the system directory')
return
self.__log.debug('jt is now inactive')
cmd = "%s dfs -fs hdfs://%s -rmr %s" % (self.__hadoopPath, self.__fsName, \
self.__mrSysDir)
self.__log.debug('Command to run to remove system directory: %s' % (cmd))
try:
hadoopCommand = simpleCommand('mr-sys-dir-cleaner', cmd)
hadoopCommand.start()
hadoopCommand.wait()
hadoopCommand.join()
ret = hadoopCommand.exit_code()
if ret != 0:
self.__log.warn("Error in removing MapReduce system directory '%s' from '%s' using path '%s'" \
% (self.__mrSysDir, self.__fsName, self.__hadoopPath))
self.__log.warn(pprint.pformat(hadoopCommand.output()))
else:
self.__log.info("Removed MapReduce system directory successfully.")
except:
self.__log.error('Exception while cleaning up MapReduce system directory. May not be cleaned up. %s', \
get_exception_error_string())
self.__log.debug(get_exception_string())
def createMRSystemDirectoryManager(dict, log):
keys = [ 'jt-pid', 'mr-sys-dir', 'fs-name', 'hadoop-path' ]
for key in keys:
if (not dict.has_key(key)) or (dict[key] is None):
return None
mrSysDirManager = MRSystemDirectoryManager(int(dict['jt-pid']), dict['mr-sys-dir'], \
dict['fs-name'], dict['hadoop-path'], log)
return mrSysDirManager
class HadoopCommand:
"""Runs a single hadoop command"""
def __init__(self, id, desc, tempdir, tardir, hadoopportrange, log, javahome,
mrSysDir, restart=False):
self.desc = desc
self.log = log
self.javahome = javahome
self.__mrSysDir = mrSysDir
self.program = desc.getProgram()
self.name = desc.getName()
self.workdirs = desc.getWorkDirs()
self.hadoopdir = tempdir
self.confdir = os.path.join(self.hadoopdir, '%d-%s' % (id, self.name),
"confdir")
self.logdir = os.path.join(self.hadoopdir, '%d-%s' % (id, self.name),
"logdir")
self.out = os.path.join(self.logdir, '%s.out' % self.name)
self.err = os.path.join(self.logdir, '%s.err' % self.name)
self.child = None
self.restart = restart
self.filledInKeyVals = []
self.__hadoopPortRange = hadoopportrange
self._createWorkDirs()
self._createHadoopSiteXml()
self._createHadoopLogDir()
self.__hadoopThread = None
self.stdErrContents = "" # store list of contents for returning to user
def _createWorkDirs(self):
for dir in self.workdirs:
if os.path.exists(dir):
if not os.access(dir, os.F_OK | os.R_OK | os.W_OK | os.X_OK):
raise ValueError, "Workdir %s does not allow rwx permission." % (dir)
continue
try:
os.makedirs(dir)
except:
pass
def getFilledInKeyValues(self):
return self.filledInKeyVals
def createXML(self, doc, attr, topElement, final):
for k,v in attr.iteritems():
self.log.debug('_createHadoopSiteXml: ' + str(k) + " " + str(v))
lowport, highport = self.__hadoopPortRange
if ( v == "fillinport" ):
v = "%d" % (ServiceUtil.getUniqRandomPort(low=lowport, high=highport, log=self.log))
keyvalpair = ''
if isinstance(v, (tuple, list)):
for item in v:
keyvalpair = "%s%s=%s," % (keyvalpair, k, item)
keyvalpair = keyvalpair[:-1]
else:
keyvalpair = k + '=' + v
self.filledInKeyVals.append(keyvalpair)
if(k == "mapred.job.tracker"): # total hack for time's sake
keyvalpair = k + "=" + v
self.filledInKeyVals.append(keyvalpair)
if ( v == "fillinhostport"):
port = "%d" % (ServiceUtil.getUniqRandomPort(low=lowport, high=highport, log=self.log))
self.log.debug('Setting hostname to: %s' % local_fqdn())
v = local_fqdn() + ':' + port
keyvalpair = ''
if isinstance(v, (tuple, list)):
for item in v:
keyvalpair = "%s%s=%s," % (keyvalpair, k, item)
keyvalpair = keyvalpair[:-1]
else:
keyvalpair = k + '=' + v
self.filledInKeyVals.append(keyvalpair)
if ( v == "fillindir"):
v = self.__mrSysDir
pass
prop = None
if isinstance(v, (tuple, list)):
for item in v:
prop = self._createXmlElement(doc, k, item, "No description", final)
topElement.appendChild(prop)
else:
if k == 'fs.default.name':
prop = self._createXmlElement(doc, k, "hdfs://" + v, "No description", final)
else:
prop = self._createXmlElement(doc, k, v, "No description", final)
topElement.appendChild(prop)
def _createHadoopSiteXml(self):
if self.restart:
if not os.path.exists(self.confdir):
os.makedirs(self.confdir)
else:
assert os.path.exists(self.confdir) == False
os.makedirs(self.confdir)
implementation = getDOMImplementation()
doc = implementation.createDocument('', 'configuration', None)
comment = doc.createComment("This is an auto generated hadoop-site.xml, do not modify")
topElement = doc.documentElement
topElement.appendChild(comment)
finalAttr = self.desc.getfinalAttrs()
self.createXML(doc, finalAttr, topElement, True)
attr = {}
attr1 = self.desc.getAttrs()
for k,v in attr1.iteritems():
if not finalAttr.has_key(k):
attr[k] = v
self.createXML(doc, attr, topElement, False)
siteName = os.path.join(self.confdir, "hadoop-site.xml")
sitefile = file(siteName, 'w')
print >> sitefile, topElement.toxml()
sitefile.close()
self.log.debug('created %s' % (siteName))
def _createHadoopLogDir(self):
if self.restart:
if not os.path.exists(self.logdir):
os.makedirs(self.logdir)
else:
assert os.path.exists(self.logdir) == False
os.makedirs(self.logdir)
def _createXmlElement(self, doc, name, value, description, final):
prop = doc.createElement("property")
nameP = doc.createElement("name")
string = doc.createTextNode(name)
nameP.appendChild(string)
valueP = doc.createElement("value")
string = doc.createTextNode(value)
valueP.appendChild(string)
desc = doc.createElement("description")
string = doc.createTextNode(description)
desc.appendChild(string)
prop.appendChild(nameP)
prop.appendChild(valueP)
prop.appendChild(desc)
if (final):
felement = doc.createElement("final")
string = doc.createTextNode("true")
felement.appendChild(string)
prop.appendChild(felement)
pass
return prop
def getMRSystemDirectoryManager(self):
return MRSystemDirectoryManager(self.__hadoopThread.getPid(), self.__mrSysDir, \
self.desc.getfinalAttrs()['fs.default.name'], \
self.path, self.log)
def run(self, dir):
status = True
args = []
desc = self.desc
self.log.debug(pprint.pformat(desc.dict))
self.log.debug("Got package dir of %s" % dir)
self.path = os.path.join(dir, self.program)
self.log.debug("path: %s" % self.path)
args.append(self.path)
args.extend(desc.getArgv())
envs = desc.getEnvs()
fenvs = os.environ
for k, v in envs.iteritems():
fenvs[k] = v
if envs.has_key('HADOOP_OPTS'):
fenvs['HADOOP_OPTS'] = envs['HADOOP_OPTS']
self.log.debug("HADOOP_OPTS : %s" % fenvs['HADOOP_OPTS'])
fenvs['JAVA_HOME'] = self.javahome
fenvs['HADOOP_CONF_DIR'] = self.confdir
fenvs['HADOOP_LOG_DIR'] = self.logdir
self.log.info(pprint.pformat(fenvs))
hadoopCommand = ''
for item in args:
hadoopCommand = "%s%s " % (hadoopCommand, item)
# Redirecting output and error to self.out and self.err
hadoopCommand = hadoopCommand + ' 1>%s 2>%s ' % (self.out, self.err)
self.log.debug('running command: %s' % (hadoopCommand))
self.log.debug('hadoop env: %s' % fenvs)
self.log.debug('Command stdout will be redirected to %s ' % self.out + \
'and command stderr to %s' % self.err)
self.__hadoopThread = simpleCommand('hadoop', hadoopCommand, env=fenvs)
self.__hadoopThread.start()
while self.__hadoopThread.stdin == None:
time.sleep(.2)
self.log.debug("hadoopThread still == None ...")
input = desc.getStdin()
self.log.debug("hadoop input: %s" % input)
if input:
if self.__hadoopThread.is_running():
print >>self.__hadoopThread.stdin, input
else:
self.log.error("hadoop command failed to start")
self.__hadoopThread.stdin.close()
self.log.debug("isForground: %s" % desc.isForeground())
if desc.isForeground():
self.log.debug("Waiting on hadoop to finish...")
self.__hadoopThread.wait()
self.log.debug("Joining hadoop thread...")
self.__hadoopThread.join()
if self.__hadoopThread.exit_code() != 0:
status = False
else:
status = self.getCommandStatus()
self.log.debug("hadoop run status: %s" % status)
if status == False:
self.handleFailedCommand()
if (status == True) or (not desc.isIgnoreFailures()):
return status
else:
self.log.error("Ignoring Failure")
return True
def kill(self):
self.__hadoopThread.kill()
if self.__hadoopThread:
self.__hadoopThread.join()
def addCleanup(self, list):
list.extend(self.workdirs)
list.append(self.confdir)
def getCommandStatus(self):
status = True
ec = self.__hadoopThread.exit_code()
if (ec != 0) and (ec != None):
status = False
return status
def handleFailedCommand(self):
self.log.error('hadoop error: %s' % (
self.__hadoopThread.exit_status_string()))
# read the contents of redirected stderr to print information back to user
if os.path.exists(self.err):
f = None
try:
f = open(self.err)
lines = f.readlines()
# format
for line in lines:
self.stdErrContents = "%s%s" % (self.stdErrContents, line)
finally:
if f is not None:
f.close()
self.log.error('See %s.out and/or %s.err for details. They are ' % \
(self.name, self.name) + \
'located at subdirectories under either ' + \
'hodring.work-dirs or hodring.log-destination-uri.')
class HodRing(hodBaseService):
"""The main class for hodring that
polls the commands it runs"""
def __init__(self, config):
hodBaseService.__init__(self, 'hodring', config['hodring'])
self.log = self.logs['main']
self._http = None
self.__pkg = None
self.__pkgDir = None
self.__tempDir = None
self.__running = {}
self.__hadoopLogDirs = []
self.__init_temp_dir()
def __init_temp_dir(self):
self.__tempDir = os.path.join(self._cfg['temp-dir'],
"%s.%s.hodring" % (self._cfg['userid'],
self._cfg['service-id']))
if not os.path.exists(self.__tempDir):
os.makedirs(self.__tempDir)
os.chdir(self.__tempDir)
def __fetch(self, url, spath):
retry = 3
success = False
while (retry != 0 and success != True):
try:
input = urllib.urlopen(url)
bufsz = 81920
buf = input.read(bufsz)
out = open(spath, 'w')
while len(buf) > 0:
out.write(buf)
buf = input.read(bufsz)
input.close()
out.close()
success = True
except:
self.log.debug("Failed to copy file")
retry = retry - 1
if (retry == 0 and success != True):
raise IOError, "Failed to copy the files"
def __get_name(self, addr):
parsedUrl = urlparse(addr)
path = parsedUrl[2]
split = path.split('/', 1)
return split[1]
def __get_dir(self, name):
"""Return the root directory inside the tarball
specified by name. Assumes that the tarball begins
with a root directory."""
import tarfile
myTarFile = tarfile.open(name)
hadoopPackage = myTarFile.getnames()[0]
self.log.debug("tarball name : %s hadoop package name : %s" %(name,hadoopPackage))
return hadoopPackage
def getRunningValues(self):
return self.__running.values()
def getTempDir(self):
return self.__tempDir
def getHadoopLogDirs(self):
return self.__hadoopLogDirs
def __download_package(self, ringClient):
self.log.debug("Found download address: %s" %
self._cfg['download-addr'])
try:
addr = 'none'
downloadTime = self._cfg['tarball-retry-initial-time'] # download time depends on tarball size and network bandwidth
increment = 0
addr = ringClient.getTarList(self.hostname)
while(addr == 'none'):
rand = self._cfg['tarball-retry-initial-time'] + increment + \
random.uniform(0,self._cfg['tarball-retry-interval'])
increment = increment + 1
self.log.debug("got no tarball. Retrying again in %s seconds." % rand)
time.sleep(rand)
addr = ringClient.getTarList(self.hostname)
self.log.debug("got this address %s" % addr)
tarName = self.__get_name(addr)
self.log.debug("tar package name: %s" % tarName)
fetchPath = os.path.join(os.getcwd(), tarName)
self.log.debug("fetch path: %s" % fetchPath)
self.__fetch(addr, fetchPath)
self.log.debug("done fetching")
tarUrl = "http://%s:%d/%s" % (self._http.server_address[0],
self._http.server_address[1],
tarName)
try:
ringClient.registerTarSource(self.hostname, tarUrl,addr)
#ringClient.tarDone(addr)
except KeyError, e:
self.log.error("registerTarSource and tarDone failed: ", e)
raise KeyError(e)
check = untar(fetchPath, os.getcwd())
if (check == False):
raise IOError, "Untarring failed."
self.__pkg = self.__get_dir(tarName)
self.__pkgDir = os.path.join(os.getcwd(), self.__pkg)
except Exception, e:
self.log.error("Failed download tar package: %s" %
get_exception_error_string())
raise Exception(e)
def __run_hadoop_commands(self, restart=True):
id = 0
for desc in self._cfg['commanddesc']:
self.log.debug(pprint.pformat(desc.dict))
mrSysDir = getMapredSystemDirectory(self._cfg['mapred-system-dir-root'],
self._cfg['userid'], self._cfg['service-id'])
self.log.debug('mrsysdir is %s' % mrSysDir)
cmd = HadoopCommand(id, desc, self.__tempDir, self.__pkgDir, self._cfg['hadoop-port-range'], self.log,
self._cfg['java-home'], mrSysDir, restart)
self.__hadoopLogDirs.append(cmd.logdir)
self.log.debug("hadoop log directory: %s" % self.__hadoopLogDirs)
try:
# if the tarball isn't there, we use the pkgs dir given.
if self.__pkgDir == None:
pkgdir = desc.getPkgDirs()
else:
pkgdir = self.__pkgDir
self.log.debug('This is the packcage dir %s ' % (pkgdir))
if not cmd.run(pkgdir):
addnInfo = ""
if cmd.stdErrContents is not "":
addnInfo = " Information from stderr of the command:\n%s" % (cmd.stdErrContents)
raise Exception("Could not launch the %s using %s/bin/hadoop.%s" % (desc.getName(), pkgdir, addnInfo))
except Exception, e:
self.log.debug("Exception running hadoop command: %s\n%s" % (get_exception_error_string(), get_exception_string()))
self.__running[id] = cmd
raise Exception(e)
id += 1
if desc.isForeground():
continue
self.__running[id-1] = cmd
# ok.. now command is running. If this HodRing got jobtracker,
# Check if it is ready for accepting jobs, and then only return
self.__check_jobtracker(desc, id-1, pkgdir)
def __check_jobtracker(self, desc, id, pkgdir):
# Check jobtracker status. Return properly if it is ready to accept jobs.
# Currently Checks for Jetty to come up, the last thing that can be checked
# before JT completes initialisation. To be perfectly reliable, we need
# hadoop support
name = desc.getName()
if name == 'jobtracker':
# Yes I am the Jobtracker
self.log.debug("Waiting for jobtracker to initialise")
version = desc.getVersion()
self.log.debug("jobtracker version : %s" % version)
hadoopCmd = self.getRunningValues()[id]
attrs = hadoopCmd.getFilledInKeyValues()
attrs = parseEquals(attrs)
jobTrackerAddr = attrs['mapred.job.tracker']
self.log.debug("jobtracker rpc server : %s" % jobTrackerAddr)
if version < 16:
jettyAddr = jobTrackerAddr.split(':')[0] + ':' + \
attrs['mapred.job.tracker.info.port']
else:
jettyAddr = attrs['mapred.job.tracker.http.address']
self.log.debug("Jobtracker jetty : %s" % jettyAddr)
# Check for Jetty to come up
# For this do a http head, and then look at the status
defaultTimeout = socket.getdefaulttimeout()
# socket timeout isn`t exposed at httplib level. Setting explicitly.
socket.setdefaulttimeout(1)
sleepTime = 0.5
jettyStatus = False
jettyStatusmsg = ""
while sleepTime <= 32:
# There is a possibility that the command might fail after a while.
# This code will check if the command failed so that a better
# error message can be returned to the user.
if not hadoopCmd.getCommandStatus():
self.log.critical('Hadoop command found to have failed when ' \
'checking for jobtracker status')
hadoopCmd.handleFailedCommand()
addnInfo = ""
if hadoopCmd.stdErrContents is not "":
addnInfo = " Information from stderr of the command:\n%s" \
% (hadoopCmd.stdErrContents)
raise Exception("Could not launch the %s using %s/bin/hadoop.%s" \
% (desc.getName(), pkgdir, addnInfo))
try:
jettyConn = httplib.HTTPConnection(jettyAddr)
jettyConn.request("HEAD", "/jobtracker.jsp")
# httplib inherently retries the following till socket timeout
resp = jettyConn.getresponse()
if resp.status != 200:
# Some problem?
jettyStatus = False
jettyStatusmsg = "Jetty gave a non-200 response to a HTTP-HEAD" +\
" request. HTTP Status (Code, Msg): (%s, %s)" % \
( resp.status, resp.reason )
break
else:
self.log.info("Jetty returned a 200 status (%s)" % resp.reason)
self.log.info("JobTracker successfully initialised")
return
except socket.error:
self.log.debug("Jetty gave a socket error. Sleeping for %s" \
% sleepTime)
time.sleep(sleepTime)
sleepTime = sleepTime * 2
except Exception, e:
jettyStatus = False
jettyStatusmsg = ("Process(possibly other than jetty) running on" + \
" port assigned to jetty is returning invalid http response")
break
socket.setdefaulttimeout(defaultTimeout)
if not jettyStatus:
self.log.critical("Jobtracker failed to initialise.")
if jettyStatusmsg:
self.log.critical( "Reason: %s" % jettyStatusmsg )
else: self.log.critical( "Reason: Jetty failed to give response")
raise Exception("JobTracker failed to initialise")
def stop(self):
self.log.debug("Entered hodring stop.")
if self._http:
self.log.debug("stopping http server...")
self._http.stop()
self.log.debug("call hodsvcrgy stop...")
hodBaseService.stop(self)
def _xr_method_clusterStart(self, initialize=True):
return self.clusterStart(initialize)
def _xr_method_clusterStop(self):
return self.clusterStop()
def start(self):
"""Run and maintain hodring commands"""
try:
if self._cfg.has_key('download-addr'):
self._http = threadedHTTPServer('', self._cfg['http-port-range'])
self.log.info("Starting http server...")
self._http.serve_forever()
self.log.debug("http://%s:%d" % (self._http.server_address[0],
self._http.server_address[1]))
hodBaseService.start(self)
ringXRAddress = None
if self._cfg.has_key('ringmaster-xrs-addr'):
ringXRAddress = "http://%s:%s/" % (self._cfg['ringmaster-xrs-addr'][0],
self._cfg['ringmaster-xrs-addr'][1])
self.log.debug("Ringmaster at %s" % ringXRAddress)
self.log.debug("Creating service registry XML-RPC client.")
serviceClient = hodXRClient(to_http_url(
self._cfg['svcrgy-addr']))
if ringXRAddress == None:
self.log.info("Did not get ringmaster XML-RPC address. Fetching information from service registry.")
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
self.log.debug(pprint.pformat(ringList))
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = 0
while (ringXRAddress == None and count < 3000):
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = count + 1
time.sleep(.2)
if ringXRAddress == None:
raise Exception("Could not get ringmaster XML-RPC server address.")
self.log.debug("Creating ringmaster XML-RPC client.")
ringClient = hodXRClient(ringXRAddress)
id = self.hostname + "_" + str(os.getpid())
if 'download-addr' in self._cfg:
self.__download_package(ringClient)
else:
self.log.debug("Did not find a download address.")
cmdlist = []
firstTime = True
increment = 0
hadoopStartupTime = 2
cmdlist = ringClient.getCommand(id)
while (cmdlist == []):
if firstTime:
sleepTime = increment + self._cfg['cmd-retry-initial-time'] + hadoopStartupTime\
+ random.uniform(0,self._cfg['cmd-retry-interval'])
firstTime = False
else:
sleepTime = increment + self._cfg['cmd-retry-initial-time'] + \
+ random.uniform(0,self._cfg['cmd-retry-interval'])
self.log.debug("Did not get command list. Waiting for %s seconds." % (sleepTime))
time.sleep(sleepTime)
increment = increment + 1
cmdlist = ringClient.getCommand(id)
self.log.debug(pformat(cmdlist))
cmdDescs = []
for cmds in cmdlist:
cmdDescs.append(CommandDesc(cmds['dict'], self.log))
self._cfg['commanddesc'] = cmdDescs
self.log.info("Running hadoop commands...")
self.__run_hadoop_commands(False)
masterParams = []
for k, cmd in self.__running.iteritems():
masterParams.extend(cmd.filledInKeyVals)
self.log.debug("printing getparams")
self.log.debug(pformat(id))
self.log.debug(pformat(masterParams))
# when this is on a required host, the ringMaster already has our masterParams
if(len(masterParams) > 0):
ringClient.addMasterParams(id, masterParams)
except Exception, e:
raise Exception(e)
def clusterStart(self, initialize=True):
"""Start a stopped mapreduce/dfs cluster"""
if initialize:
self.log.debug('clusterStart Method Invoked - Initialize')
else:
self.log.debug('clusterStart Method Invoked - No Initialize')
try:
self.log.debug("Creating service registry XML-RPC client.")
serviceClient = hodXRClient(to_http_url(self._cfg['svcrgy-addr']),
None, None, 0, 0, 0)
self.log.info("Fetching ringmaster information from service registry.")
count = 0
ringXRAddress = None
while (ringXRAddress == None and count < 3000):
ringList = serviceClient.getServiceInfo(self._cfg['userid'],
self._cfg['service-id'], 'ringmaster', 'hod')
if len(ringList):
if isinstance(ringList, list):
ringXRAddress = ringList[0]['xrs']
count = count + 1
if ringXRAddress == None:
raise Exception("Could not get ringmaster XML-RPC server address.")
self.log.debug("Creating ringmaster XML-RPC client.")
ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0)
id = self.hostname + "_" + str(os.getpid())
cmdlist = []
if initialize:
if 'download-addr' in self._cfg:
self.__download_package(ringClient)
else:
self.log.debug("Did not find a download address.")
while (cmdlist == []):
cmdlist = ringClient.getCommand(id)
else:
while (cmdlist == []):
cmdlist = ringClient.getAdminCommand(id)
self.log.debug(pformat(cmdlist))
cmdDescs = []
for cmds in cmdlist:
cmdDescs.append(CommandDesc(cmds['dict'], self.log))
self._cfg['commanddesc'] = cmdDescs
if initialize:
self.log.info("Running hadoop commands again... - Initialize")
self.__run_hadoop_commands()
masterParams = []
for k, cmd in self.__running.iteritems():
self.log.debug(cmd)
masterParams.extend(cmd.filledInKeyVals)
self.log.debug("printing getparams")
self.log.debug(pformat(id))
self.log.debug(pformat(masterParams))
# when this is on a required host, the ringMaster already has our masterParams
if(len(masterParams) > 0):
ringClient.addMasterParams(id, masterParams)
else:
self.log.info("Running hadoop commands again... - No Initialize")
self.__run_hadoop_commands()
except:
self.log.error(get_exception_string())
return True
def clusterStop(self):
"""Stop a running mapreduce/dfs cluster without stopping the hodring"""
self.log.debug('clusterStop Method Invoked')
try:
for cmd in self.__running.values():
cmd.kill()
self.__running = {}
except:
self.log.error(get_exception_string())
return True | unknown | codeparrot/codeparrot-clean | ||
extension Request {
/// Returns the current `Session` or creates one.
///
/// router.get("session") { req -> String in
/// req.session.data["name"] = "Vapor"
/// return "Session set"
/// }
///
/// - note: `SessionsMiddleware` must be added and enabled.
/// - returns: `Session` for this `Request`.
public var session: Session {
if !self._sessionCache.middlewareFlag.withLockedValue({ $0 }) {
// No `SessionsMiddleware` was detected on your app.
// Suggested solutions:
// - Add the `SessionsMiddleware` globally to your app using `app.middleware.use`
// - Add the `SessionsMiddleware` to a route group.
assertionFailure("No `SessionsMiddleware` detected.")
}
return self._sessionCache.session.withLockedValue { storedSession in
if let existing = storedSession {
return existing
} else {
let new = Session()
storedSession = new
return new
}
}
}
public var hasSession: Bool {
return self._sessionCache.session.withLockedValue { $0 != nil }
}
private struct SessionCacheKey: StorageKey {
typealias Value = SessionCache
}
internal var _sessionCache: SessionCache {
if let existing = self.storage[SessionCacheKey.self] {
return existing
} else {
let new = SessionCache()
self.storage[SessionCacheKey.self] = new
return new
}
}
} | swift | github | https://github.com/vapor/vapor | Sources/Vapor/Sessions/Request+Session.swift |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.