code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.validation;
import java.beans.PropertyDescriptor;
import java.io.Serializable;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.List;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.BeanUtils;
import org.springframework.util.Assert;
import org.springframework.util.ObjectUtils;
import org.springframework.util.ReflectionUtils;
import org.springframework.util.StringUtils;
/**
* A simple implementation of the {@link Errors} interface, managing global
* errors and field errors for a top-level target object. Flexibly retrieves
* field values through bean property getter methods, and automatically
* falls back to raw field access if necessary.
*
* <p>Note that this {@link Errors} implementation comes without support for
* nested paths. It is exclusively designed for the validation of individual
* top-level objects, not aggregating errors from multiple sources.
* If this is insufficient for your purposes, use a binding-capable
* {@link Errors} implementation such as {@link BeanPropertyBindingResult}.
*
* @author Juergen Hoeller
* @since 6.1
* @see Validator#validateObject(Object)
* @see BeanPropertyBindingResult
* @see DirectFieldBindingResult
*/
@SuppressWarnings("serial")
public class SimpleErrors implements Errors, Serializable {
private final Object target;
private final String objectName;
private final List<ObjectError> globalErrors = new ArrayList<>();
private final List<FieldError> fieldErrors = new ArrayList<>();
/**
* Create a new {@link SimpleErrors} holder for the given target,
* using the simple name of the target class as the object name.
* @param target the target to wrap
*/
public SimpleErrors(Object target) {
Assert.notNull(target, "Target must not be null");
this.target = target;
this.objectName = this.target.getClass().getSimpleName();
}
/**
* Create a new {@link SimpleErrors} holder for the given target.
* @param target the target to wrap
* @param objectName the name of the target object for error reporting
*/
public SimpleErrors(Object target, String objectName) {
Assert.notNull(target, "Target must not be null");
this.target = target;
this.objectName = objectName;
}
@Override
public String getObjectName() {
return this.objectName;
}
@Override
public void reject(String errorCode, Object @Nullable [] errorArgs, @Nullable String defaultMessage) {
this.globalErrors.add(new ObjectError(getObjectName(), new String[] {errorCode}, errorArgs, defaultMessage));
}
@Override
public void rejectValue(@Nullable String field, String errorCode,
Object @Nullable [] errorArgs, @Nullable String defaultMessage) {
if (!StringUtils.hasLength(field)) {
reject(errorCode, errorArgs, defaultMessage);
return;
}
Object newVal = getFieldValue(field);
this.fieldErrors.add(new FieldError(getObjectName(), field, newVal, false,
new String[] {errorCode}, errorArgs, defaultMessage));
}
@Override
public void addAllErrors(Errors errors) {
this.globalErrors.addAll(errors.getGlobalErrors());
this.fieldErrors.addAll(errors.getFieldErrors());
}
@Override
public List<ObjectError> getGlobalErrors() {
return this.globalErrors;
}
@Override
public List<FieldError> getFieldErrors() {
return this.fieldErrors;
}
@Override
public @Nullable Object getFieldValue(String field) {
FieldError fieldError = getFieldError(field);
if (fieldError != null) {
return fieldError.getRejectedValue();
}
PropertyDescriptor pd = BeanUtils.getPropertyDescriptor(this.target.getClass(), field);
if (pd != null && pd.getReadMethod() != null) {
ReflectionUtils.makeAccessible(pd.getReadMethod());
return ReflectionUtils.invokeMethod(pd.getReadMethod(), this.target);
}
Field rawField = ReflectionUtils.findField(this.target.getClass(), field);
if (rawField != null) {
ReflectionUtils.makeAccessible(rawField);
return ReflectionUtils.getField(rawField, this.target);
}
throw new IllegalArgumentException("Cannot retrieve value for field '" + field +
"' - neither a getter method nor a raw field found");
}
@Override
public @Nullable Class<?> getFieldType(String field) {
PropertyDescriptor pd = BeanUtils.getPropertyDescriptor(this.target.getClass(), field);
if (pd != null) {
return pd.getPropertyType();
}
Field rawField = ReflectionUtils.findField(this.target.getClass(), field);
if (rawField != null) {
return rawField.getType();
}
return null;
}
@Override
public boolean equals(@Nullable Object other) {
return (this == other || (other instanceof SimpleErrors that &&
ObjectUtils.nullSafeEquals(this.target, that.target) &&
this.globalErrors.equals(that.globalErrors) &&
this.fieldErrors.equals(that.fieldErrors)));
}
@Override
public int hashCode() {
return this.target.hashCode();
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (ObjectError error : this.globalErrors) {
sb.append('\n').append(error);
}
for (ObjectError error : this.fieldErrors) {
sb.append('\n').append(error);
}
return sb.toString();
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/validation/SimpleErrors.java |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sequential executor."""
import subprocess
from typing import Any, Optional
from airflow.executors.base_executor import BaseExecutor, CommandType
from airflow.models.taskinstance import TaskInstanceKeyType
from airflow.utils.state import State
class SequentialExecutor(BaseExecutor):
"""
This executor will only run one task instance at a time, can be used
for debugging. It is also the only executor that can be used with sqlite
since sqlite doesn't support multiple connections.
Since we want airflow to work out of the box, it defaults to this
SequentialExecutor alongside sqlite as you first install it.
"""
def __init__(self):
super().__init__()
self.commands_to_run = []
def execute_async(self,
key: TaskInstanceKeyType,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None) -> None:
self.commands_to_run.append((key, command))
def sync(self) -> None:
for key, command in self.commands_to_run:
self.log.info("Executing command: %s", command)
try:
subprocess.check_call(command, close_fds=True)
self.change_state(key, State.SUCCESS)
except subprocess.CalledProcessError as e:
self.change_state(key, State.FAILED)
self.log.error("Failed to execute task %s.", str(e))
self.commands_to_run = []
def end(self):
"""End the executor."""
self.heartbeat()
def terminate(self):
"""Terminate the executor is not doing anything.""" | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y) | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* assert.c
* Assert support code.
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/utils/error/assert.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include <unistd.h>
#ifdef HAVE_EXECINFO_H
#include <execinfo.h>
#endif
/*
* ExceptionalCondition - Handles the failure of an Assert()
*
* We intentionally do not go through elog() here, on the grounds of
* wanting to minimize the amount of infrastructure that has to be
* working to report an assertion failure.
*/
void
ExceptionalCondition(const char *conditionName,
const char *fileName,
int lineNumber)
{
/* Report the failure on stderr (or local equivalent) */
if (!conditionName || !fileName)
write_stderr("TRAP: ExceptionalCondition: bad arguments in PID %d\n",
(int) getpid());
else
write_stderr("TRAP: failed Assert(\"%s\"), File: \"%s\", Line: %d, PID: %d\n",
conditionName, fileName, lineNumber, (int) getpid());
/* Usually this shouldn't be needed, but make sure the msg went out */
fflush(stderr);
/* If we have support for it, dump a simple backtrace */
#ifdef HAVE_BACKTRACE_SYMBOLS
{
void *buf[100];
int nframes;
nframes = backtrace(buf, lengthof(buf));
backtrace_symbols_fd(buf, nframes, fileno(stderr));
}
#endif
/*
* If configured to do so, sleep indefinitely to allow user to attach a
* debugger. It would be nice to use pg_usleep() here, but that can sleep
* at most 2G usec or ~33 minutes, which seems too short.
*/
#ifdef SLEEP_ON_ASSERT
sleep(1000000);
#endif
abort();
} | c | github | https://github.com/postgres/postgres | src/backend/utils/error/assert.c |
#!/usr/bin/env python
from ConfigParser import ConfigParser
from twisted.web import http
from twisted.internet import protocol
from twisted.internet import reactor, threads
from twisted.web.server import Site
from twisted.web.static import File
from twisted.web.resource import Resource
from twisted.web.error import NoResource
from zope.interface import implements
from twisted.cred.portal import IRealm, Portal
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse, ICredentialsChecker
from twisted.web.guard import HTTPAuthSessionWrapper, DigestCredentialFactory
from twisted.web.resource import IResource
from twisted.cred import credentials
from ordereddict import OrderedDict # don't lose compatibility with python < 2.7
import SQLWrapper
import pprint
import re
import getopt
import sys
import datetime
import time
import cgi
import os
glob_allow=True
glob_rules_file="/etc/nginx/naxsi_core.rules"
glob_conf_file = ''
glob_username = ''
glob_pass = ''
glob_fileList = []
class rules_extractor(object):
def __init__(self, page_hit, rules_hit, rules_file, conf_file='naxsi-ui.conf'):
self.wrapper = SQLWrapper.SQLWrapper(glob_conf_file)
self.wrapper.connect()
self.wrapper.setRowToDict()
self.rules_list = []
self.final_rules = []
self.base_rules = []
self.page_hit = page_hit
self.rules_hit = rules_hit
self.core_msg = {}
self.extract_core(glob_rules_file)
def extract_core(self, rules_file):
try:
fd = open(glob_rules_file, 'r')
for i in fd:
if i.startswith('MainRule'):
pos = i.find('id:')
pos_msg = i.find('msg:')
self.core_msg[i[pos + 3:i[pos + 3].find(';') - 1]] = i[pos_msg + 4:][:i[pos_msg + 4:].find('"')]
fd.close()
except:
print ("Unable to open rules file.")
pass
def gen_basic_rules(self,url=None, srcip=None, dsthost=None,
rule_id=None, exception_md5=None,
exception_id=None):
tmp_rules = []
#self.rules_list = self.wrapper.getWhitelist()
self.base_rules = self.rules_list[:]
# pprint.pprint(self.base_rules)
def transform_to_dict(self, l):
d = {}
for i in l:
if not d.has_key(i[0]):
d[i[0]] = []
d[i[0]].append(i[1])
#elimininate duplicate ids in each value
for i in d:
d[i] = list(set(d[i]))
return d
def get_partial_match_dict(self, d, to_find):
for i, current_dict in enumerate(d):
if all(key in current_dict and current_dict[key] == val
for key, val in to_find.iteritems()):
return i
def opti_rules_back(self):
# rules of requests extracting optimized whitelists, from
# more restrictive to less restrictive.
opti_select_DESC = [
# select on url+var_name+zone+rule_id
("select count(*) as ct, e.rule_id, e.zone, e.var_name, u.url, count(distinct c.peer_ip) as peer_count, "
"(select count(distinct peer_ip) from connections) as ptot, "
"(select count(*) from connections) as tot "
"from exceptions as e, urls as u, connections as c where c.url_id "
"= u.url_id and c.id_exception = e.exception_id GROUP BY u.url, e.var_name,"
"e.zone, e.rule_id HAVING (ct) > ((select count(*) from connections)/1000)"),
# select on var_name+zone+rule_id (unpredictable URL)
("select count(*) as ct, e.rule_id, e.zone, e.var_name, '' as url, count(distinct c.peer_ip) as peer_count, "
"(select count(distinct peer_ip) from connections) as ptot, "
"(select count(*) from connections) as tot "
"from exceptions as e, urls as u, connections as c where c.url_id = u.url_id and c.id_exception = "
"e.exception_id GROUP BY e.var_name, e.zone, e.rule_id HAVING (ct) > "
"((select count(*) from connections)/1000)"),
# select on zone+url+rule_id (unpredictable arg_name)
("select count(*) as ct, e.rule_id, e.zone, '' as var_name, u.url, count(distinct c.peer_ip) as peer_count, "
"(select count(distinct peer_ip) from connections) as ptot, "
"(select count(*) from connections) as tot "
"from exceptions as e, urls as u, connections as c where c.url_id "
"= u.url_id and c.id_exception = e.exception_id GROUP BY u.url, "
"e.zone, e.rule_id HAVING (ct) > ((select count(*) from connections)/1000)"),
# select on zone+url+var_name (unpredictable id)
("select count(*) as ct, 0 as rule_id, e.zone, e.var_name, u.url, count(distinct c.peer_ip) as peer_count, "
"(select count(distinct peer_ip) from connections) as ptot, "
"(select count(*) from connections) as tot "
"from exceptions as e, urls as u, connections as c where c.url_id "
"= u.url_id and c.id_exception = e.exception_id GROUP BY u.url, "
"e.zone, e.var_name HAVING (ct) > ((select count(*) from connections)/1000)")
]
for req in opti_select_DESC:
self.wrapper.execute(req)
res = self.wrapper.getResults()
for r in res:
#r += "# total_count:"+str(i['count'])+" ("+str(round((i['count'] / float(i['total'])) * 100,2))+"% of total) peer_count:"+str(i['peer_count'])+"\n"
if len(r['var_name']) > 0:
self.try_append({'url': r['url'], 'rule_id': r['rule_id'], 'zone': r['zone'], 'var_name': r['var_name'],
'hcount': r['ct'], 'htotal': r['tot'], 'pcount':r['peer_count'], 'ptotal':r['ptot'],
'pratio': round((r['peer_count'] / float(r['ptot'])) * 100,2),
'hratio': round((r['ct'] / float(r['tot'])) * 100,2)
})
else:
self.try_append({'url': r['url'], 'rule_id': r['rule_id'], 'zone': r['zone'], 'var_name': '',
'hcount': r['ct'], 'htotal': r['tot'], 'ptotal':r['ptot'],
'pratio': round((r['peer_count'] / float(r['ptot'])) * 100,2),
'hratio': round((r['ct'] / float(r['tot'])) * 100,2),
'pcount':r['peer_count']})
return self.base_rules, self.final_rules
#returns true if whitelist 'target' is already handled by final_rules
#does a dummy comparison and compares the counters
def try_append(self, target, delmatch=False):
count=0
nb_rule=0
for z in self.final_rules[:]:
if len(target['url']) > 0 and len(z['url']) > 0 and target['url'] != z['url']:
continue
if target['rule_id'] != 0 and z['rule_id'] != 0 and target['rule_id'] != z['rule_id']:
continue
if len(target['zone']) > 0 and len(z['zone']) > 0 and target['zone'] != z['zone']:
continue
if len(target['var_name']) > 0 and len(z['var_name']) > 0 and target['var_name'] != z['var_name']:
continue
if delmatch is True:
self.final_rules.remove(z)
else:
nb_rule += 1
count += int(z['hcount'])
if delmatch is True:
return
if (target['hcount'] > count) or (target['hcount'] >= count and nb_rule > self.rules_hit):
pprint.pprint(target)
self.try_append(target, True)
self.final_rules.append(target)
return
def generate_stats(self):
stats = ""
self.wrapper.execute("select count(distinct exception_id) as uniq_exception from exceptions")
uniq_ex = self.wrapper.getResults()[0]['uniq_exception']
self.wrapper.execute("select count(distinct peer_ip) as uniq_peer from connections")
uniq_peer = self.wrapper.getResults()[0]['uniq_peer']
return "<ul><li>There is currently %s unique exceptions.</li></ul><ul><li>There is currently %s different peers that triggered rules.</li></ul>" % (uniq_ex, uniq_peer)
class NaxsiUI(Resource):
def __init__(self):
Resource.__init__(self)
#twisted will handle static content for me
self.putChild('bootstrap', File('./bootstrap'))
self.putChild('js', File('./js'))
#make the correspondance between the path and the object to call
self.page_handler = {'/' : Index, '/graphs': GraphView, '/get_rules': GenWhitelist, '/map': WootMap}
def getChild(self, name, request):
handler = self.page_handler.get(request.path)
if handler is not None:
return handler()
else:
return NoResource()
class Index(Resource):
def __init__(self):
Resource.__init__(self)
self.ex = rules_extractor(0,0, None)
def render_GET(self, request):
fd = open('index.tpl', 'r')
helpmsg = ''
for i in fd:
helpmsg += i
fd.close()
helpmsg = helpmsg.replace('__STATS__', self.ex.generate_stats())
helpmsg = helpmsg.replace('__HOSTNAME__', request.getHeader('Host'))
return helpmsg
class WootMap(Resource):
isLeaf = True
def __init__(self):
self.has_geoip = False
try:
import GeoIP
self.has_geoip = True
except:
print "No GeoIP module, no map"
return
Resource.__init__(self)
self.ex = rules_extractor(0,0, None)
self.gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE)
def render_GET(self, request):
if self.has_geoip is False:
return "No GeoIP module/database installed."
render = open('map.tpl').read()
self.ex.wrapper.execute('select peer_ip as p, count(*) as c from connections group by peer_ip')
ips = self.ex.wrapper.getResults()
fd = open("country2coords.txt", "r")
bycn = {}
for ip in ips:
country = self.gi.country_code_by_addr(ip['p'])
if country is None or len(country) < 2:
country = "CN"
if country not in bycn:
bycn[country] = {'count': int(ip['c']), 'coords': ''}
fd.seek(0)
for cn in fd:
if country in cn:
bycn[country]['coords'] = cn[len(country)+1:-1]
break
if len(bycn[country]['coords']) < 1:
bycn[country]['coords'] = "37.090240,-95.7128910"
else:
bycn[country]['count'] += ip['c']
pprint.pprint(bycn[country])
base_array = 'citymap["__CN__"] = {center: new google.maps.LatLng(__COORDS__), population: __COUNT__};\n'
citymap = ''
for cn in bycn.keys():
citymap += base_array.replace('__CN__', cn).replace('__COORDS__', bycn[cn]['coords']).replace('__COUNT__',
str(bycn[cn]['count']))
render = render.replace('__CITYMAP__', citymap)
return render
class GraphView(Resource):
isLeaf = True
def __init__(self):
Resource.__init__(self)
self.ex = rules_extractor(0,0, None)
def render_GET(self, request):
fd = open('graphs.tpl')
html = ''
for i in fd:
html += i
fd.close()
array_excep, _ = self.build_js_array()
sqli_array, sql_count = self.build_js_array(1000, 1099)
xss_array, xss_count = self.build_js_array(1300, 1399)
rfi_array, rfi_count = self.build_js_array(1100, 1199)
upload_array, upload_count = self.build_js_array(1500, 1599)
dt_array, dt_count = self.build_js_array(1200, 1299)
evade_array, evade_count = self.build_js_array(1400, 1499)
intern_array, intern_count = self.build_js_array(0, 10)
self.ex.wrapper.execute('select peer_ip as ip, count(id_exception) as c from connections group by peer_ip order by count(id_exception) DESC limit 10')
top_ten = self.ex.wrapper.getResults()
top_ten_html = '<table class="table table-bordered" border="1" ><thead><tr><th>IP</th><th>Rule Hits</th></tr></thead><tbody>'
for i in top_ten:
top_ten_html += '<tr><td>' + cgi.escape(i['ip']) + ' </td><td> ' + str(i['c']) + '</td></tr>'
top_ten_html += '</tbody></table>'
top_ten_page_html = ''
self.ex.wrapper.execute('select distinct u.url as url, count(id_exception) as c from connections join urls as u on (u.url_id = connections.url_id) group by u.url order by count(id_exception) DESC limit 10;')
top_ten_page = self.ex.wrapper.getResults()
top_ten_page_html = '<table class="table table-bordered" border="1" ><thead><tr><th>URI</th><th>Exceptions Count</th></tr></thead><tbody>'
for i in top_ten_page:
top_ten_page_html += '<tr><td>' + cgi.escape(i['url']).replace('\'', '\\\'') + ' </td><td> ' + str(i['c']) + '</td></tr>'
top_ten_page_html += '</tbody></table>'
dict_replace = {'__TOPTEN__': top_ten_html, '__TOPTENPAGE__': top_ten_page_html, '__TOTALEXCEP__': array_excep, '__SQLCOUNT__': str(sql_count), '__XSSCOUNT__': str(xss_count), '__DTCOUNT__': str(dt_count), '__RFICOUNT__': str(rfi_count), '__EVCOUNT__': str(evade_count), '__UPCOUNT__': str(upload_count), '__INTCOUNT__': str(intern_count), '__SQLIEXCEP__': sqli_array, '__XSSEXCEP__': xss_array, '__RFIEXCEP__': rfi_array, '__DTEXCEP__': dt_array, '__UPLOADEXCEP__': upload_array, '__EVADEEXCEP__': evade_array, '__INTERNEXCEP__': intern_array}
html = reduce(lambda html,(b, c): html.replace(b, c), dict_replace.items(), html)
return html
def create_js_array(self, res):
array = '['
for i in res:
d = i.replace('/', '-')
date_begin = str(d).split('-')
date_begin[1] = str(int(date_begin[1]) - 1)
date_begin = ','.join(date_begin)
array += '[Date.UTC(' + date_begin + '),' + str(res[i]).replace('/', '-') + '],'
if array != '[':
array = array[:-1] + ']'
else:
array += ']'
return array
def build_dict(self, res):
d = OrderedDict()
for i in res:
if i['d'] not in d.keys():
d[i['d']] = i['ex']
return d
def build_js_array(self, id_beg = None, id_end = None):
if id_beg is None or id_end is None:
self.ex.wrapper.execute('select substr(date,1,10) as d, count(id_exception) as ex from connections group by substr(date,1,10)')
else:
self.ex.wrapper.execute('select substr(date,1, 10) as d, count(id_exception) as ex from connections join exceptions as e on (e.exception_id = id_exception) where e.rule_id >= %s and e.rule_id <= %s group by substr(date, 1, 10)', (str(id_beg), str(id_end)))
count = self.ex.wrapper.getResults()
mydict = self.build_dict(count)
total_hit = 0
for i in count:
total_hit += i['ex']
myarray = self.create_js_array(mydict)
return myarray, total_hit
class GenWhitelist(Resource):
def render_GET(self, request):
request.setHeader('content-type', 'text/plain')
ex = rules_extractor(int(request.args.get('page_hit', ['10'])[0]),
int(request.args.get('rules_hit', ['10'])[0]),
glob_rules_file)
ex.gen_basic_rules()
base_rules, opti_rules = ex.opti_rules_back()
opti_rules.sort(lambda a,b: (b['hratio']+(b['pratio']*3)) < (a['hratio']+(a['pratio']*3)))
pprint.pprint(opti_rules)
r = '########### Optimized Rules Suggestion ##################\n'
if not len(opti_rules):
r+= "#No rules to be generated\n"
return
opti_rules.sort(key=lambda k: (k['hratio'], k['pratio']))
_i = len(opti_rules)-1
while _i >= 0:
i = opti_rules[_i]
_i = _i - 1
r += ("# total_count:"+str(i['hcount'])+" ("+str(i['hratio'])+
"%), peer_count:"+str(i['pcount'])+" ("+str(i['pratio'])+"%)")
r += " | "+ex.core_msg.get(str(i['rule_id']), "?")+"\n"
if (i['hratio'] < 5 and i['pratio'] < 5) or (i['pratio'] < 5):
r += '#'
r += 'BasicRule wl:' + str(i['rule_id']) + ' "mz:'
if i['url'] is not None and len(i['url']) > 0:
r += '$URL:' + i['url']
if i['zone'] is not None and len(i['zone']) > 0:
if i['url']:
r += '|'
r += i['zone']
if i['var_name'] is not None and len(i['var_name']) > 0:
# oooh, that must be bad.
r = r[:-len(i['zone'])]+"$"+r[-len(i['zone']):]
r += "_VAR:"+i['var_name']
r += '";\n'
return r
class HTTPRealm(object):
implements(IRealm)
def requestAvatar(self, avatarID, mind, *interfaces):
return (IResource, NaxsiUI(), lambda: None)
def daemonize (stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
sys.stderr.write ("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror) )
sys.exit(1)
# os.chdir("/")
os.umask(0)
os.setsid()
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, e:
sys.stderr.write ("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror) )
sys.exit(1)
si = open(stdin, 'r')
so = open(stdout, 'a+')
se = open(stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def usage():
print 'Usage : python nx_extract /path/to/conf/file'
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
exit(42)
glob_conf_file = sys.argv[1]
fd = open(sys.argv[1], 'r')
conf = ConfigParser()
conf.readfp(fd)
try:
port = int(conf.get('nx_extract', 'port'))
except:
print "No port in conf file ! Using default port (8081)"
port = 8081
try:
glob_rules_file = conf.get('nx_extract', 'rules_path')
except:
print "No rules path in conf file ! Using default (/etc/nginx/sec-rules/core.rules)"
try:
glob_user = conf.get('nx_extract', 'username')
except:
print 'No username for web access ! Nx_extract will exit.'
exit(-1)
try:
glob_pass = conf.get('nx_extract', 'password')
except:
print 'No password for web access ! Nx_extract will exit.'
exit(-1)
fd.close()
credshandler = InMemoryUsernamePasswordDatabaseDontUse() # i know there is DontUse in the name
credshandler.addUser(glob_user, glob_pass)
portal = Portal(HTTPRealm(), [credshandler])
credentialFactory = DigestCredentialFactory("md5", "Naxsi-UI")
webroot = HTTPAuthSessionWrapper(portal, [credentialFactory])
factory = Site(webroot)
reactor.listenTCP(port, factory)
# daemonize(stdout = '/tmp/nx_extract_output', stderr = '/tmp/nx_extract_error')
reactor.run() | unknown | codeparrot/codeparrot-clean | ||
module.exports = {
data: "ok",
default: "default"
}; | javascript | github | https://github.com/webpack/webpack | test/cases/mjs/cjs-import-default/cjs.js |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.sql import drop_view_if_exists
class report_timesheet_line(osv.osv):
_name = "report.timesheet.line"
_description = "Timesheet Line"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id': fields.many2one('res.users', 'User', readonly=True),
'date': fields.date('Date', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'product_id': fields.many2one('product.product', 'Product',readonly=True),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'general_account_id': fields.many2one('account.account', 'General Account', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoiced', readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_line')
cr.execute("""
create or replace view report_timesheet_line as (
select
min(l.id) as id,
l.date as date,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
to_char(l.date, 'YYYY-MM-DD') as day,
l.invoice_id,
l.product_id,
l.account_id,
l.general_account_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
l.user_id is not null
group by
l.date,
l.user_id,
l.product_id,
l.account_id,
l.general_account_id,
l.invoice_id
)
""")
class report_timesheet_user(osv.osv):
_name = "report_timesheet.user"
_description = "Timesheet per day"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_user')
cr.execute("""
create or replace view report_timesheet_user as (
select
min(l.id) as id,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
user_id is not null
group by l.date, to_char(l.date,'YYYY'),to_char(l.date,'MM'), l.user_id
)
""")
class report_timesheet_account(osv.osv):
_name = "report_timesheet.account"
_description = "Timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account')
cr.execute("""
create or replace view report_timesheet_account as (
select
min(id) as id,
to_char(create_date, 'YYYY') as name,
to_char(create_date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(create_date, 'YYYY'),to_char(create_date, 'MM'), user_id, account_id
)
""")
class report_timesheet_account_date(osv.osv):
_name = "report_timesheet.account.date"
_description = "Daily timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account_date')
cr.execute("""
create or replace view report_timesheet_account_date as (
select
min(id) as id,
to_char(date,'YYYY') as name,
to_char(date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(date,'YYYY'),to_char(date,'MM'), user_id, account_id
)
""")
class report_timesheet_invoice(osv.osv):
_name = "report_timesheet.invoice"
_description = "Costs to invoice"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Project', readonly=True),
'manager_id':fields.many2one('res.users', 'Manager', readonly=True),
'quantity': fields.float('Time', readonly=True),
'amount_invoice': fields.float('To invoice', readonly=True)
}
_rec_name = 'user_id'
_order = 'user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_invoice')
cr.execute("""
create or replace view report_timesheet_invoice as (
select
min(l.id) as id,
l.user_id as user_id,
l.account_id as account_id,
a.user_id as manager_id,
sum(l.unit_amount) as quantity,
sum(l.unit_amount * t.list_price) as amount_invoice
from account_analytic_line l
left join hr_timesheet_invoice_factor f on (l.to_invoice=f.id)
left join account_analytic_account a on (l.account_id=a.id)
left join product_product p on (l.to_invoice=f.id)
left join product_template t on (l.to_invoice=f.id)
where
l.to_invoice is not null and
l.invoice_id is null
group by
l.user_id,
l.account_id,
a.user_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ComparePointerToMemberVirtualFunctionCheck.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Type.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/DiagnosticIDs.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang::ast_matchers;
namespace clang::tidy::bugprone {
namespace {
AST_MATCHER(CXXMethodDecl, isVirtual) { return Node.isVirtual(); }
static constexpr StringRef ErrorMsg =
"comparing a pointer to member virtual function with other pointer is "
"unspecified behavior, only compare it with a null-pointer constant for "
"equality.";
} // namespace
void ComparePointerToMemberVirtualFunctionCheck::registerMatchers(
MatchFinder *Finder) {
auto DirectMemberVirtualFunctionPointer = unaryOperator(
allOf(hasOperatorName("&"),
hasUnaryOperand(declRefExpr(to(cxxMethodDecl(isVirtual()))))));
auto IndirectMemberPointer =
ignoringImpCasts(declRefExpr().bind("indirect_member_pointer"));
Finder->addMatcher(
binaryOperator(
allOf(hasAnyOperatorName("==", "!="),
hasEitherOperand(
hasType(memberPointerType(pointee(functionType())))),
anyOf(hasEitherOperand(DirectMemberVirtualFunctionPointer),
hasEitherOperand(IndirectMemberPointer)),
unless(hasEitherOperand(
castExpr(hasCastKind(CK_NullToMemberPointer))))))
.bind("binary_operator"),
this);
}
void ComparePointerToMemberVirtualFunctionCheck::check(
const MatchFinder::MatchResult &Result) {
const auto *BO = Result.Nodes.getNodeAs<BinaryOperator>("binary_operator");
const auto *DRE =
Result.Nodes.getNodeAs<DeclRefExpr>("indirect_member_pointer");
if (DRE == nullptr) {
// compare with pointer to member virtual function.
diag(BO->getOperatorLoc(), ErrorMsg);
return;
}
// compare with variable which type is pointer to member function.
llvm::SmallVector<SourceLocation, 12U> SameSignatureVirtualMethods{};
const auto *MPT = cast<MemberPointerType>(DRE->getType().getCanonicalType());
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
if (RD == nullptr)
return;
constexpr bool StopVisit = false;
auto VisitSameSignatureVirtualMethods =
[&](const CXXRecordDecl *CurrentRecordDecl) -> bool {
bool Ret = !StopVisit;
for (const auto *MD : CurrentRecordDecl->methods()) {
if (MD->isVirtual() && MD->getType() == MPT->getPointeeType()) {
SameSignatureVirtualMethods.push_back(MD->getBeginLoc());
Ret = StopVisit;
}
}
return Ret;
};
if (StopVisit != VisitSameSignatureVirtualMethods(RD))
RD->forallBases(VisitSameSignatureVirtualMethods);
if (!SameSignatureVirtualMethods.empty()) {
diag(BO->getOperatorLoc(), ErrorMsg);
for (const auto Loc : SameSignatureVirtualMethods)
diag(Loc, "potential member virtual function is declared here.",
DiagnosticIDs::Note);
}
}
} // namespace clang::tidy::bugprone | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/clang-tidy/bugprone/ComparePointerToMemberVirtualFunctionCheck.cpp |
"""
=================================================================
Permutation Importance with Multicollinear or Correlated Features
=================================================================
In this example, we compute the permutation importance on the Wisconsin
breast cancer dataset using :func:`~sklearn.inspection.permutation_importance`.
The :class:`~sklearn.ensemble.RandomForestClassifier` can easily get about 97%
accuracy on a test dataset. Because this dataset contains multicollinear
features, the permutation importance will show that none of the features are
important. One approach to handling multicollinearity is by performing
hierarchical clustering on the features' Spearman rank-order correlations,
picking a threshold, and keeping a single feature from each cluster.
.. note::
See also
:ref:`sphx_glr_auto_examples_inspection_plot_permutation_importance.py`
"""
print(__doc__)
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import spearmanr
from scipy.cluster import hierarchy
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
# %%
# Random Forest Feature Importance on Breast Cancer Data
# ------------------------------------------------------
# First, we train a random forest on the breast cancer dataset and evaluate
# its accuracy on a test set:
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf = RandomForestClassifier(n_estimators=100, random_state=42)
clf.fit(X_train, y_train)
print("Accuracy on test data: {:.2f}".format(clf.score(X_test, y_test)))
# %%
# Next, we plot the tree based feature importance and the permutation
# importance. The permutation importance plot shows that permuting a feature
# drops the accuracy by at most `0.012`, which would suggest that none of the
# features are important. This is in contradiction with the high test accuracy
# computed above: some feature must be important. The permutation importance
# is calculated on the training set to show how much the model relies on each
# feature during training.
result = permutation_importance(clf, X_train, y_train, n_repeats=10,
random_state=42)
perm_sorted_idx = result.importances_mean.argsort()
tree_importance_sorted_idx = np.argsort(clf.feature_importances_)
tree_indices = np.arange(0, len(clf.feature_importances_)) + 0.5
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
ax1.barh(tree_indices,
clf.feature_importances_[tree_importance_sorted_idx], height=0.7)
ax1.set_yticks(tree_indices)
ax1.set_yticklabels(data.feature_names[tree_importance_sorted_idx])
ax1.set_ylim((0, len(clf.feature_importances_)))
ax2.boxplot(result.importances[perm_sorted_idx].T, vert=False,
labels=data.feature_names[perm_sorted_idx])
fig.tight_layout()
plt.show()
# %%
# Handling Multicollinear Features
# --------------------------------
# When features are collinear, permutating one feature will have little
# effect on the models performance because it can get the same information
# from a correlated feature. One way to handle multicollinear features is by
# performing hierarchical clustering on the Spearman rank-order correlations,
# picking a threshold, and keeping a single feature from each cluster. First,
# we plot a heatmap of the correlated features:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
corr = spearmanr(X).correlation
corr_linkage = hierarchy.ward(corr)
dendro = hierarchy.dendrogram(
corr_linkage, labels=data.feature_names.tolist(), ax=ax1, leaf_rotation=90
)
dendro_idx = np.arange(0, len(dendro['ivl']))
ax2.imshow(corr[dendro['leaves'], :][:, dendro['leaves']])
ax2.set_xticks(dendro_idx)
ax2.set_yticks(dendro_idx)
ax2.set_xticklabels(dendro['ivl'], rotation='vertical')
ax2.set_yticklabels(dendro['ivl'])
fig.tight_layout()
plt.show()
# %%
# Next, we manually pick a threshold by visual inspection of the dendrogram
# to group our features into clusters and choose a feature from each cluster to
# keep, select those features from our dataset, and train a new random forest.
# The test accuracy of the new random forest did not change much compared to
# the random forest trained on the complete dataset.
cluster_ids = hierarchy.fcluster(corr_linkage, 1, criterion='distance')
cluster_id_to_feature_ids = defaultdict(list)
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_ids[cluster_id].append(idx)
selected_features = [v[0] for v in cluster_id_to_feature_ids.values()]
X_train_sel = X_train[:, selected_features]
X_test_sel = X_test[:, selected_features]
clf_sel = RandomForestClassifier(n_estimators=100, random_state=42)
clf_sel.fit(X_train_sel, y_train)
print("Accuracy on test data with features removed: {:.2f}".format(
clf_sel.score(X_test_sel, y_test))) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.apps import AppConfig
class ShuupNotifyAppConfig(AppConfig):
name = "shuup.notify"
verbose_name = "Shuup Notification Framework"
label = "shuup_notify"
provides = {
"notify_condition": [
"shuup.notify.conditions:LanguageEqual",
"shuup.notify.conditions:BooleanEqual",
"shuup.notify.conditions:IntegerEqual",
"shuup.notify.conditions:TextEqual",
"shuup.notify.conditions:Empty",
"shuup.notify.conditions:NonEmpty",
],
"notify_action": [
"shuup.notify.actions:SetDebugFlag",
"shuup.notify.actions:AddOrderLogEntry",
"shuup.notify.actions:SendEmail",
"shuup.notify.actions:AddNotification",
],
"notify_event": [],
"admin_module": [
"shuup.notify.admin_module:NotifyAdminModule",
]
}
default_app_config = "shuup.notify.ShuupNotifyAppConfig" | unknown | codeparrot/codeparrot-clean | ||
"""
Timezone-related classes and functions.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import pytz
from django.conf import settings
from django.utils import lru_cache, six
from django.utils.decorators import ContextDecorator
__all__ = [
'utc', 'get_fixed_timezone',
'get_default_timezone', 'get_default_timezone_name',
'get_current_timezone', 'get_current_timezone_name',
'activate', 'deactivate', 'override',
'localtime', 'now',
'is_aware', 'is_naive', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class FixedOffset(tzinfo):
"""
Fixed offset in minutes east from UTC. Taken from Python's docs.
Kept as close as possible to the reference version. __init__ was changed
to make its arguments optional, according to Python's requirement that
tzinfo subclasses can be instantiated without arguments.
"""
def __init__(self, offset=None, name=None):
if offset is not None:
self.__offset = timedelta(minutes=offset)
if name is not None:
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
utc = pytz.utc
"""UTC time zone as a tzinfo instance."""
def get_fixed_timezone(offset):
"""
Returns a tzinfo instance with a fixed offset from UTC.
"""
if isinstance(offset, timedelta):
offset = offset.seconds // 60
sign = '-' if offset < 0 else '+'
hhmm = '%02d%02d' % divmod(abs(offset), 60)
name = sign + hhmm
return FixedOffset(offset, name)
# In order to avoid accessing settings at compile time,
# wrap the logic in a function and cache the result.
@lru_cache.lru_cache()
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
"""
return pytz.timezone(settings.TIME_ZONE)
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
return timezone.tzname(None)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, six.string_types):
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(ContextDecorator):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If it is ``None``, Django enables the default
time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
def __enter__(self):
self.old_timezone = getattr(_active, 'value', None)
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is None:
deactivate()
else:
_active.value = self.old_timezone
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (
isinstance(value, datetime) and
(settings.USE_TZ if use_tz is None else use_tz) and
not is_naive(value) and
getattr(value, 'convert_to_local_time', True)
)
return localtime(value) if should_convert else value
# Utilities
def localtime(value=None, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Only aware datetimes are allowed. When value is omitted, it defaults to
now().
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if value is None:
value = now()
if timezone is None:
timezone = get_current_timezone()
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("localtime() cannot be applied to a naive datetime")
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value
def localdate(value=None, timezone=None):
"""
Convert an aware datetime to local time and return the value's date.
Only aware datetimes are allowed. When value is omitted, it defaults to
now().
Local time is defined by the current time zone, unless another time zone is
specified.
"""
return localtime(value, timezone).date()
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The concept is defined in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
Assuming value.tzinfo is either None or a proper datetime.tzinfo,
value.utcoffset() implements the appropriate logic.
"""
return value.utcoffset() is None
def make_aware(value, timezone=None, is_dst=None):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if timezone is None:
timezone = get_current_timezone()
if hasattr(timezone, 'localize'):
# This method is available for pytz time zones.
return timezone.localize(value, is_dst=is_dst)
else:
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
raise ValueError(
"make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)
def make_naive(value, timezone=None):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
if timezone is None:
timezone = get_current_timezone()
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("make_naive() cannot be applied to a naive datetime")
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# This method is available for pytz time zones.
value = timezone.normalize(value)
return value.replace(tzinfo=None) | unknown | codeparrot/codeparrot-clean | ||
##########################################################
# THIS IS A GENERATED FILE -- DO NOT MODIFY.
# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
# AND REGENERATE THE MATRIX SUITES.
#
# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/replica_sets_jscore_pqs_hints.yml
# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
##########################################################
description:
Test the correctness of query settings application by emulating cursor
hints with 'setQuerySettings' commands. This suite runs jscore tests using a replica
set fixture.
executor:
archive:
hooks:
- RunDBCheckInBackground
- CheckReplDBHashInBackground
- ValidateCollectionsInBackground
- CheckReplDBHash
- CheckReplOplogs
- ValidateCollections
config:
shell_options:
eval: globalThis.testingReplication = true;; await import("jstests/libs/override_methods/make_cursor_hints_into_query_settings.js");
global_vars:
TestData:
isHintsToQuerySettingsSuite: true
fixture:
class: ReplicaSetFixture
mongod_options:
set_parameters:
enableTestCommands: 1
num_nodes: 2
hooks:
- class: RunDBCheckInBackground
- class: CheckReplDBHashInBackground
- class: ValidateCollectionsInBackground
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
- class: CleanEveryN
n: 20
matrix_suite: true
selector:
exclude_files:
- jstests/core/txns/abort_expired_transaction.js
- jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js
- jstests/core/txns/kill_op_on_txn_expiry.js
- jstests/core/**/set_param1.js
- jstests/core/query/awaitdata_getmore_cmd.js
- jstests/core/administrative/current_op/currentop.js
- jstests/core/administrative/fsync/fsync.js
- jstests/core/txns/prepare_conflict.js
- jstests/core/txns/prepare_conflict_aggregation_behavior.js
- jstests/core/timeseries/write/timeseries_update_multi.js
- jstests/core/query/distinct/distinct_semantics.js
- jstests/core/index/express.js
- jstests/core/index/index_filter_commands.js
- jstests/core/query/query_settings/**/*.js
- jstests/core/administrative/current_op/**/*.js
- jstests/core/query/collation/collation.js
- jstests/core/diagnostics/operation_latency_histogram.js
- jstests/core/query/top/top.js
- jstests/core/catalog/views/views_stats.js
- jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js
- jstests/core/timeseries/ddl/timeseries_sparse_index.js
- jstests/core/administrative/getlog2.js
- jstests/core/txns/speculative_snapshot_includes_all_writes.js
- jstests/core/query/commands_with_uuid.js
- jstests/core/clustered/clustered_collection_collation.js
- jstests/core/timeseries/write/timeseries_delete_multi.js
- jstests/core/timeseries/write/timeseries_simple.js
- jstests/core/timeseries/write/timeseries_update.js
- jstests/core/timeseries/write/timeseries_update_arbitrary_updates_not_enabled.js
- jstests/core/write/empty_ts/bypass_empty_ts_replacement_timeseries.js
- jstests/core/index/index_filter_catalog_independent.js
- jstests/core/index/index_filter_collation.js
- jstests/core/index/index_filter_commands.js
- jstests/core/index/index_filter_on_hidden_index.js
- jstests/core/index/wildcard/compound_wildcard_index_filter.js
- jstests/core/index/wildcard/wildcard_index_filter.js
- jstests/core/query/queryable_encryption/**/*.js
- src/mongo/db/modules/enterprise/jstests/fle2/**/*.js
- jstests/core/index/geo/geo6.js
- jstests/core/index/index_check6.js
- jstests/core/index/index_diag.js
- jstests/core/index/index_multikey.js
- jstests/core/query/covered_multikey.js
- jstests/core/query/cursor/getmore_invalidated_cursors.js
- jstests/core/query/explain/explain_shell_helpers.js
- jstests/core/query/index_deduplication.js
- jstests/core/query/internal_hash_eq/lookup_using_hash_key.js
- jstests/core/query/return_key.js
- jstests/core/timeseries/ddl/timeseries_index_collation.js
- jstests/core/timeseries/query/timeseries_internal_bounded_sort_compound.js
- jstests/core/timeseries/ddl/timeseries_metric_index_compound.js
- jstests/core/timeseries/ddl/timeseries_special_indexes_metadata.js
- jstests/core/index/index_multiple_compatibility.js
- jstests/core/index/indext.js
- jstests/core/index/sparse_index_internal_expr.js
- jstests/core/query/exists/existsa.js
- jstests/core/timeseries/ddl/timeseries_index_partial.js
exclude_with_any_tags:
- assumes_standalone_mongod
- requires_profiling
- assumes_standalone_mongod
- does_not_support_repeated_reads
- requires_profiling
- requires_fsync
- known_query_shape_computation_problem
- query_intensive_pbt
roots:
- jstests/core/**/*.js
- jstests/fle2/**/*.js
- src/mongo/db/modules/*/jstests/fle2/**/*.js
test_kind: js_test | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_pqs_hints.yml |
import {runAllRoleManagementCommandsTests} from "jstests/auth/role_management_commands_lib.js";
let conn = MongoRunner.runMongod({auth: "", useHostname: false});
runAllRoleManagementCommandsTests(conn);
MongoRunner.stopMongod(conn); | javascript | github | https://github.com/mongodb/mongo | jstests/auth/role_management_commands_standalone.js |
#Your code here
#You can import some modules or create additional functions
def checkio(maze_map):
#replace this for solution
#This is just example for first maze
MOVE = {"S": (1, 0), "N": (-1, 0), "W": (0, -1), "E": (0, 1)}
copy_maze_map = [row[:] for row in maze_map]
#print type(copy_maze_map)
current_pos = (1, 1)
copy_maze_map[current_pos[0]][current_pos[1]] = 1
route = [current_pos]
goal = (10, 10)
stack = []
while current_pos[0] != goal[0] or current_pos[1] != goal[1]:
go_pos = []
for mx, my in [(current_pos[0] + m[0], current_pos[1] + m[1]) for m in MOVE.values()]:
if copy_maze_map[mx][my] == 0:
go_pos.append((mx, my))
#print go_pos
if len(go_pos) == 0:
if len(stack) == 0:
print("can't find route")
return ""
current_pos, back_pos = stack.pop()
while route[-1] != back_pos:
route.pop()
else:
for pos in go_pos:
stack.append((pos, current_pos[:]))
current_pos = stack.pop()[0]
copy_maze_map[current_pos[0]][current_pos[1]] = 1
route.append(current_pos)
result = ""
#print route
for i in xrange (0, len(route) - 1):
value = (route[i + 1][0] - route[i][0], route[i + 1][1] - route[i][1])
for k, v in MOVE.items():
if value == v:result += k
#print result
return result
if __name__ == '__main__':
#web page:http://www.checkio.org/mission/open-labyrinth/
#This code using only for self-checking and not necessary for auto-testing
def check_route(func, labyrinth):
MOVE = {"S": (1, 0), "N": (-1, 0), "W": (0, -1), "E": (0, 1)}
#copy maze
route = func([row[:] for row in labyrinth])
pos = (1, 1)
goal = (10, 10)
for i, d in enumerate(route):
move = MOVE.get(d, None)
if not move:
print("Wrong symbol in route")
return False
pos = pos[0] + move[0], pos[1] + move[1]
if pos == goal:
return True
if labyrinth[pos[0]][pos[1]] == 1:
print("Player in the pit")
return False
print("Player did not reach exit")
return False
# These assert are using only for self-testing as examples.
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "First maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "Empty maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "Up and down maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "Dotted maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "Need left maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "The big dead end."
print("The local tests are done.") | unknown | codeparrot/codeparrot-clean | ||
'''
Functions for handling star formation rates
'''
import time
import numpy as np
# --- local ---
import util as UT
def LogSFR_sfms(logMstar, z_in, sfms_dict=None):
''' Wrapper for SFMS star formation rates
'''
if sfms_dict['name'] == 'constant_offset':
# the offset from the average SFMS is preserved throughout the redshift
logsfr = AverageLogSFR_sfms(logMstar, z_in, sfms_dict=sfms_dict['sfms']) + \
sfms_dict['dsfr']
elif sfms_dict['name'] == 'no_scatter':
# SFR is just the average SFMS
logsfr = AverageLogSFR_sfms(logMstar, z_in, sfms_dict=sfms_dict['sfms'])
elif sfms_dict['name'] == 'random_step':
t = UT.t_from_z(z_in)
ishift = np.abs(sfms_dict['tshift'] -
np.tile(t, (sfms_dict['tshift'].shape[1],1)).T).argmin(axis=1)
ishift[np.where((sfms_dict['tshift'])[range(len(ishift)), ishift] > t)] -= 1
dsfr = sfms_dict['amp'][range(len(ishift)), ishift]
logsfr = AverageLogSFR_sfms(logMstar, z_in, sfms_dict=sfms_dict['sfms']) + dsfr
return logsfr
def LogSFR_Q(t, logSFR_Q=None, tau_Q=None, t_Q=None):
''' Wrapper for SFR of quenching galaxies
'''
SFRQ = np.power(10, logSFR_Q)
logsfr = np.log10(SFRQ * np.exp( (t_Q - t) / tau_Q ) )
return logsfr
def AverageLogSFR_sfms(mstar, z_in, sfms_dict=None):
''' Model for the average SFR of the SFMS as a function of M* at redshift z_in.
The model takes the functional form of
log(SFR) = A * log M* + B * z + C
'''
if sfms_dict is None:
raise ValueError
if sfms_dict['name'] == 'linear':
# mass slope
A_highmass = 0.53
A_lowmass = 0.53
try:
mslope = np.repeat(A_highmass, len(mstar))
except TypeError:
mstar = np.array([mstar])
mslope = np.repeat(A_highmass, len(mstar))
# z slope
zslope = sfms_dict['zslope'] # 0.76, 1.1
# offset
offset = np.repeat(-0.11, len(mstar))
elif sfms_dict['name'] == 'kinked': # Kinked SFMS
# mass slope
A_highmass = 0.53
A_lowmass = sfms_dict['mslope_lowmass']
try:
mslope = np.repeat(A_highmass, len(mstar))
except TypeError:
mstar = np.array([mstar])
mslope = np.repeat(A_highmass, len(mstar))
lowmass = np.where(mstar < 9.5)
mslope[lowmass] = A_lowmass
# z slope
zslope = sfms_dict['zslope'] # 0.76, 1.1
# offset
offset = np.repeat(-0.11, len(mstar))
offset[lowmass] += A_lowmass - A_highmass
mu_SFR = mslope * (mstar - 10.5) + zslope * (z_in-0.0502) + offset
return mu_SFR
def ScatterLogSFR_sfms(mstar, z_in, sfms_dict=None):
''' Scatter of the SFMS logSFR as a function of M* and
redshift z_in. Hardcoded at 0.3
'''
if sfms_dict is None:
raise ValueError
return 0.3
def integSFR(logsfr, mass0, t0, tf, mass_dict=None):
''' Integrated star formation rate stellar mass using Euler or RK4 integration
M* = M*0 + f_retain * Int(SFR(t) dt, t0, tf)
Parameters
----------
logsfr : function
SFR function that accepts mass and t_cosmic as inputs
mass : ndarray
initial stellar mass
t0 : ndarray
initial cosmic time
tf : ndarray
final cosmic time
f_retain :
fraction of stellar mass not lost from SNe and winds from Wetzel Paper
'''
type = mass_dict['type'] # Euler or RK4
f_retain = mass_dict['f_retain'] # Retain fraction
delt = mass_dict['t_step'] # maximum t resolution of the integral
niter = int(np.ceil( (tf.max()-t0.min())/delt ))
niters = np.ceil( (tf - t0) / delt).astype('int')
t_n_1 = t0
t_n = t_n_1
logSFR_n_1 = logsfr(mass0, t0)
logM_n_1 = mass0
#print niter, ' ', type, ' iterations'
#print 'f_reatin = ', f_retain
if niter > 0:
for i in xrange(niter):
iter_time = time.time()
keep = np.where(niters > i)
t_n[keep] = t_n_1[keep] + delt
if type == 'euler': # Forward Euler Method
logM_n_1[keep] = np.log10(
(10. ** logM_n_1[keep]) +
delt * 10.**9. * f_retain * (10.** logSFR_n_1[keep])
)
elif type == 'rk4': # Runge Kutta
k1 = (10.0 ** logSFR_n_1)
k2_sfr = logsfr(
np.log10(10.0**logM_n_1 + (10**9 * delt)/2.0 * k1),
t_n_1 + delt/2.0)
k2 = (10.0 ** k2_sfr)
k3_sfr = logsfr(
np.log10(10.0**logM_n_1 + (10**9 * delt)/2.0 * k2),
t_n_1 + delt/2.0)
k3 = (10.0 ** k3_sfr)
k4_sfr = logsfr(
np.log10(10.0**logM_n_1 + (10**9 * delt) * k3),
t_n_1 + delt)
k4 = (10.0 ** k4_sfr)
logM_n_1[keep] = np.log10(10.0 ** logM_n_1[keep] + f_retain/6.0 * (delt * 10**9) * (k1[keep] + 2.0*k2[keep] + 2.0*k3[keep] + k4[keep]))
else:
raise NotImplementedError
if np.sum(np.isnan(logM_n_1)) > 0:
raise ValueError('There are NaNs')
# update log(SFR), and t from step n-1
logSFR_n_1[keep] = logsfr(logM_n_1, t_n)[keep]
t_n_1 = t_n
# sanity check
if np.min(logM_n_1 - mass0) < 0.0:
if np.min(logM_n_1 - mass0) > -0.001:
pass
else:
raise ValueError("integrated mass cannot decrease over cosmic time")
return logM_n_1, logSFR_n_1
def ODE_Euler(dydt, init_cond, t_arr, delt, **func_args):
'''
'''
# t where we will evaluate
t_eval = np.arange(t_arr.min(), t_arr.max()+delt, delt)
t_eval[-1] = t_arr[-1]
indices = []
for tt in t_arr[1:-1]:
idx = np.argmin(np.abs(t_eval - tt))
t_eval[idx] = tt
indices.append(idx)
indices.append(len(t_eval) - 1)
dts = t_eval[1:] - t_eval[:-1]
y = init_cond.copy()
y_out = [init_cond.copy()]
for it in range(len(dts)):
dy = dts[it] * dydt(y, t_eval[it], **func_args)
y += dy
if it+1 in indices:
y_out.append(y.copy())
return np.array(y_out)
def ODE_RK4(dydt, init_cond, t_arr, delt, **func_args):
'''
'''
# t where we will evaluate
t_eval = np.arange(t_arr.min(), t_arr.max()+delt, delt)
t_eval[-1] = t_arr[-1]
indices = []
for tt in t_arr[1:-1]:
idx = np.argmin(np.abs(t_eval - tt))
t_eval[idx] = tt
indices.append(idx)
indices.append(len(t_eval) - 1)
dts = t_eval[1:] - t_eval[:-1]
y = init_cond.copy()
y_out = [init_cond.copy()]
for it in range(len(dts)):
k1 = dts[it] * dydt(y, t_eval[it], **func_args)
k2 = dts[it] * dydt(y + 0.5 * k1, t_eval[it] + 0.5 * dts[it], **func_args)
k3 = dts[it] * dydt(y + 0.5 * k2, t_eval[it] + 0.5 * dts[it], **func_args)
k4 = dts[it] * dydt(y + k3, t_eval[it] + dts[it], **func_args)
y += (k1 + 2.*k2 + 2.*k3 + k4)/6.
if it+1 in indices:
y_out.append(y.copy())
return np.array(y_out)
def dlogMdt_MS(logMstar, t, t_initial=None, t_final=None, f_retain=None, zfromt=None, sfh_kwargs=None):
''' Integrand d(logM)/dt for solving the ODE
d(logM)/dt = SFR'(logM, t) * 10^9/(M ln(10))
SFR'(t) = SFR(M*, t+t_offset)
or
= 0 if t > tf - t_offset
'''
dlogMdt = np.zeros(len(logMstar))
within = np.where((t <= t_final) & (t >= t_initial) )
if len(within[0]) > 0:
try:
dsfr = dSFR_MS(t, sfh_kwargs)[within]
except TypeError:
dsfr = dSFR_MS(t, sfh_kwargs)
tmp = AverageLogSFR_sfms(
logMstar[within],
zfromt(t),
sfms_dict=sfh_kwargs['sfms']) + dsfr + \
9. - \
logMstar[within] + \
np.log10(f_retain) - \
0.3622157
dlogMdt[within] = np.power(10, tmp)
return dlogMdt
def dSFR_MS(t, sfh_kwargs):
'''
'''
if sfh_kwargs['name'] == 'constant_offset':
dsfr = sfh_kwargs['dsfr']
elif sfh_kwargs['name'] == 'no_scatter':
dsfr = 0.
elif sfh_kwargs['name'] in ['random_step']:
ishift = np.abs(sfh_kwargs['tshift'] - t).argmin(axis=1)
ishift[np.where((sfh_kwargs['tshift'])[range(len(ishift)), ishift] > t)] -= 1
dsfr = sfh_kwargs['amp'][range(len(ishift)), ishift]
return dsfr
def dlogMdt_Q(logMstar, t, logSFR_Q=None, tau_Q=None, t_Q=None, f_retain=None, t_final=None):
''' dlogM/dt for quenching galaxies. Note that this is derived from dM/dt.
dlogM/dt quenching = SFR(M_Q, t_Q)/(M ln10) * exp( (t_Q - t) / tau_Q )
'''
dlogMdt = np.zeros(len(logMstar))
within = np.where((t <= t_final) & (t >= t_Q))
if len(within[0]) > 0:
SFRQ = np.power(10, logSFR_Q[within] + 9. - logMstar[within])
dlogMdt[within] = f_retain * SFRQ * \
np.exp( (t_Q[within] - t) / tau_Q[within] ) / np.log(10)
return dlogMdt
def logSFRt_MS(mstar, t, method_kwargs=None):
''' log SFR(t) for different methods
'''
if method_kwargs['name'] == 'constant_offset':
# the offset from the average SFMS is preserved throughout the redshift
mu_logsfr = AverageLogSFR_sfms(mstar, UT.z_from_t(t), sfms_dict=method_kwargs['sfms'])
return mu_logsfr + method_kwargs['dsfr']
elif method_kwargs['name'] == 'no_scatter':
# SFR is just the average SFMS
mu_logsfr = AverageLogSFR_sfms(mstar, UT.z_from_t(t), sfms_dict=method_kwargs['sfms'])
return mu_logsfr
def logSFRt_Q(MQ, t, tQ=None, tau_dict=None, method_kwargs=None):
''' log SFR(t) after tQ to tf for quenching galaxies (NOTE THAT THIS IS VERY SPECIFIC)
log(SFR)_quenching = np.log10( np.exp( -(t_f - t_Q)/tau) )
'''
if method_kwargs == 'constant_offset':
mu_logsfr = AverageLogSFR_sfms(MQ, UT.z_from_t(tQ), sfms_dict=method_kwargs['sfms'])
tauQ = getTauQ(MQ, tau_dict=tau_dict)
dlogsfrq = np.log10( np.exp( (tQ - t) / tauQ ) )
return mu_logsfr + method_kwargs['dsfr'] + dlogsfrq
elif method_kwargs == 'no_scatter':
mu_logsfr = AverageLogSFR_sfms(MQ, UT.z_from_t(tQ), sfms_dict=method_kwargs['sfms'])
tauQ = getTauQ(MQ, tau_dict=tau_dict)
dlogsfrq = np.log10( np.exp( (tQ - t) / tauQ ) )
return mu_logsfr + dlogsfrq
def getTauQ(mstar, tau_dict=None):
''' Return quenching efold based on stellar mass of galaxy, Tau(M*).
'''
type = tau_dict['name']
if type == 'constant': # constant tau
n_arr = len(mstar)
tau = np.array([0.5 for i in xrange(n_arr)])
elif type == 'linear': # lienar tau(mass)
tau = -(0.8 / 1.67) * ( mstar - 9.5) + 1.0
#if np.min(tau) < 0.1: # tau[ tau < 0.1 ] = 0.1
elif type == 'instant': # instant quenching
n_arr = len(mstar)
tau = np.array([0.001 for i in range(n_arr)])
elif type == 'discrete':
# param will give 4 discrete tau at the center of mass bins
masses = np.array([9.75, 10.25, 10.75, 11.25])
if param is None:
raise ValueError('asdfasdfa')
tau = np.interp(mstar, masses, param)
tau[ tau < 0.05 ] = 0.05
elif type == 'line':
# param will give slope and yint of pivoted tau line
tau = tau_dict['slope'] * (mstar - tau_dict['fid_mass']) + tau_dict['yint']
try:
if np.min(tau) < 0.001:
tau[np.where( tau < 0.001 )] = 0.001
except ValueError:
pass
elif type == 'satellite': # quenching e-fold of satellite
tau = -0.57 * ( mstar - 9.78) + 0.8
if np.min(tau) < 0.001:
tau[np.where( tau < 0.001 )] = 0.001
elif type == 'long': # long quenching (for qa purposes)
n_arr = len(mstar)
tau = np.array([2.0 for i in xrange(n_arr)])
else:
raise NotImplementedError('asdf')
return tau | unknown | codeparrot/codeparrot-clean | ||
/*
* GIT - The information manager from hell
*
* Copyright (C) Linus Torvalds, 2005
* Copyright (C) Johannes Schindelin, 2005
*
*/
#include "git-compat-util.h"
#include "abspath.h"
#include "advice.h"
#include "date.h"
#include "branch.h"
#include "config.h"
#include "dir.h"
#include "parse.h"
#include "convert.h"
#include "environment.h"
#include "gettext.h"
#include "git-zlib.h"
#include "repository.h"
#include "lockfile.h"
#include "exec-cmd.h"
#include "strbuf.h"
#include "quote.h"
#include "hashmap.h"
#include "string-list.h"
#include "object-name.h"
#include "odb.h"
#include "path.h"
#include "utf8.h"
#include "color.h"
#include "refs.h"
#include "setup.h"
#include "strvec.h"
#include "trace2.h"
#include "wildmatch.h"
#include "write-or-die.h"
struct config_source {
struct config_source *prev;
union {
FILE *file;
struct config_buf {
const char *buf;
size_t len;
size_t pos;
} buf;
} u;
enum config_origin_type origin_type;
const char *name;
enum config_error_action default_error_action;
int linenr;
int eof;
size_t total_len;
struct strbuf value;
struct strbuf var;
unsigned subsection_case_sensitive : 1;
int (*do_fgetc)(struct config_source *c);
int (*do_ungetc)(int c, struct config_source *conf);
long (*do_ftell)(struct config_source *c);
};
#define CONFIG_SOURCE_INIT { 0 }
/*
* Config that comes from trusted scopes, namely:
* - CONFIG_SCOPE_SYSTEM (e.g. /etc/gitconfig)
* - CONFIG_SCOPE_GLOBAL (e.g. $HOME/.gitconfig, $XDG_CONFIG_HOME/git)
* - CONFIG_SCOPE_COMMAND (e.g. "-c" option, environment variables)
*
* This is declared here for code cleanliness, but unlike the other
* static variables, this does not hold config parser state.
*/
static struct config_set protected_config;
static int config_file_fgetc(struct config_source *conf)
{
return getc_unlocked(conf->u.file);
}
static int config_file_ungetc(int c, struct config_source *conf)
{
return ungetc(c, conf->u.file);
}
static long config_file_ftell(struct config_source *conf)
{
return ftell(conf->u.file);
}
static int config_buf_fgetc(struct config_source *conf)
{
if (conf->u.buf.pos < conf->u.buf.len)
return conf->u.buf.buf[conf->u.buf.pos++];
return EOF;
}
static int config_buf_ungetc(int c, struct config_source *conf)
{
if (conf->u.buf.pos > 0) {
conf->u.buf.pos--;
if (conf->u.buf.buf[conf->u.buf.pos] != c)
BUG("config_buf can only ungetc the same character");
return c;
}
return EOF;
}
static long config_buf_ftell(struct config_source *conf)
{
return conf->u.buf.pos;
}
struct config_include_data {
int depth;
config_fn_t fn;
void *data;
const struct config_options *opts;
const struct git_config_source *config_source;
struct repository *repo;
/*
* All remote URLs discovered when reading all config files.
*/
struct string_list *remote_urls;
};
#define CONFIG_INCLUDE_INIT { 0 }
static int git_config_include(const char *var, const char *value,
const struct config_context *ctx, void *data);
#define MAX_INCLUDE_DEPTH 10
static const char include_depth_advice[] = N_(
"exceeded maximum include depth (%d) while including\n"
" %s\n"
"from\n"
" %s\n"
"This might be due to circular includes.");
static int handle_path_include(const struct key_value_info *kvi,
const char *path,
struct config_include_data *inc)
{
int ret = 0;
struct strbuf buf = STRBUF_INIT;
char *expanded;
if (!path)
return config_error_nonbool("include.path");
expanded = interpolate_path(path, 0);
if (!expanded)
return error(_("could not expand include path '%s'"), path);
path = expanded;
/*
* Use an absolute path as-is, but interpret relative paths
* based on the including config file.
*/
if (!is_absolute_path(path)) {
char *slash;
if (!kvi || kvi->origin_type != CONFIG_ORIGIN_FILE) {
ret = error(_("relative config includes must come from files"));
goto cleanup;
}
slash = find_last_dir_sep(kvi->filename);
if (slash)
strbuf_add(&buf, kvi->filename, slash - kvi->filename + 1);
strbuf_addstr(&buf, path);
path = buf.buf;
}
if (!access_or_die(path, R_OK, 0)) {
if (++inc->depth > MAX_INCLUDE_DEPTH)
die(_(include_depth_advice), MAX_INCLUDE_DEPTH, path,
!kvi ? "<unknown>" :
kvi->filename ? kvi->filename :
"the command line");
ret = git_config_from_file_with_options(git_config_include, path, inc,
kvi->scope, NULL);
inc->depth--;
}
cleanup:
strbuf_release(&buf);
free(expanded);
return ret;
}
static void add_trailing_starstar_for_dir(struct strbuf *pat)
{
if (pat->len && is_dir_sep(pat->buf[pat->len - 1]))
strbuf_addstr(pat, "**");
}
static int prepare_include_condition_pattern(const struct key_value_info *kvi,
struct strbuf *pat,
size_t *out)
{
struct strbuf path = STRBUF_INIT;
char *expanded;
size_t prefix = 0;
expanded = interpolate_path(pat->buf, 1);
if (expanded) {
strbuf_reset(pat);
strbuf_addstr(pat, expanded);
free(expanded);
}
if (pat->buf[0] == '.' && is_dir_sep(pat->buf[1])) {
const char *slash;
if (!kvi || kvi->origin_type != CONFIG_ORIGIN_FILE)
return error(_("relative config include "
"conditionals must come from files"));
strbuf_realpath(&path, kvi->filename, 1);
slash = find_last_dir_sep(path.buf);
if (!slash)
BUG("how is this possible?");
strbuf_splice(pat, 0, 1, path.buf, slash - path.buf);
prefix = slash - path.buf + 1 /* slash */;
} else if (!is_absolute_path(pat->buf))
strbuf_insertstr(pat, 0, "**/");
add_trailing_starstar_for_dir(pat);
*out = prefix;
strbuf_release(&path);
return 0;
}
static int include_by_gitdir(const struct key_value_info *kvi,
const struct config_options *opts,
const char *cond, size_t cond_len, int icase)
{
struct strbuf text = STRBUF_INIT;
struct strbuf pattern = STRBUF_INIT;
size_t prefix;
int ret = 0;
const char *git_dir;
int already_tried_absolute = 0;
if (opts->git_dir)
git_dir = opts->git_dir;
else
goto done;
strbuf_realpath(&text, git_dir, 1);
strbuf_add(&pattern, cond, cond_len);
ret = prepare_include_condition_pattern(kvi, &pattern, &prefix);
if (ret < 0)
goto done;
again:
if (prefix > 0) {
/*
* perform literal matching on the prefix part so that
* any wildcard character in it can't create side effects.
*/
if (text.len < prefix)
goto done;
if (!icase && strncmp(pattern.buf, text.buf, prefix))
goto done;
if (icase && strncasecmp(pattern.buf, text.buf, prefix))
goto done;
}
ret = !wildmatch(pattern.buf + prefix, text.buf + prefix,
WM_PATHNAME | (icase ? WM_CASEFOLD : 0));
if (!ret && !already_tried_absolute) {
/*
* We've tried e.g. matching gitdir:~/work, but if
* ~/work is a symlink to /mnt/storage/work
* strbuf_realpath() will expand it, so the rule won't
* match. Let's match against a
* strbuf_add_absolute_path() version of the path,
* which'll do the right thing
*/
strbuf_reset(&text);
strbuf_add_absolute_path(&text, git_dir);
already_tried_absolute = 1;
goto again;
}
done:
strbuf_release(&pattern);
strbuf_release(&text);
return ret;
}
static int include_by_branch(struct config_include_data *data,
const char *cond, size_t cond_len)
{
int flags;
int ret;
struct strbuf pattern = STRBUF_INIT;
const char *refname, *shortname;
if (!data->repo || data->repo->ref_storage_format == REF_STORAGE_FORMAT_UNKNOWN)
return 0;
refname = refs_resolve_ref_unsafe(get_main_ref_store(data->repo),
"HEAD", 0, NULL, &flags);
if (!refname ||
!(flags & REF_ISSYMREF) ||
!skip_prefix(refname, "refs/heads/", &shortname))
return 0;
strbuf_add(&pattern, cond, cond_len);
add_trailing_starstar_for_dir(&pattern);
ret = !wildmatch(pattern.buf, shortname, WM_PATHNAME);
strbuf_release(&pattern);
return ret;
}
static int add_remote_url(const char *var, const char *value,
const struct config_context *ctx UNUSED, void *data)
{
struct string_list *remote_urls = data;
const char *remote_name;
size_t remote_name_len;
const char *key;
if (!parse_config_key(var, "remote", &remote_name, &remote_name_len,
&key) &&
remote_name &&
!strcmp(key, "url"))
string_list_append(remote_urls, value);
return 0;
}
static void populate_remote_urls(struct config_include_data *inc)
{
struct config_options opts;
opts = *inc->opts;
opts.unconditional_remote_url = 1;
inc->remote_urls = xmalloc(sizeof(*inc->remote_urls));
string_list_init_dup(inc->remote_urls);
config_with_options(add_remote_url, inc->remote_urls,
inc->config_source, inc->repo, &opts);
}
static int forbid_remote_url(const char *var, const char *value UNUSED,
const struct config_context *ctx UNUSED,
void *data UNUSED)
{
const char *remote_name;
size_t remote_name_len;
const char *key;
if (!parse_config_key(var, "remote", &remote_name, &remote_name_len,
&key) &&
remote_name &&
!strcmp(key, "url"))
die(_("remote URLs cannot be configured in file directly or indirectly included by includeIf.hasconfig:remote.*.url"));
return 0;
}
static int at_least_one_url_matches_glob(const char *glob, int glob_len,
struct string_list *remote_urls)
{
struct strbuf pattern = STRBUF_INIT;
struct string_list_item *url_item;
int found = 0;
strbuf_add(&pattern, glob, glob_len);
for_each_string_list_item(url_item, remote_urls) {
if (!wildmatch(pattern.buf, url_item->string, WM_PATHNAME)) {
found = 1;
break;
}
}
strbuf_release(&pattern);
return found;
}
static int include_by_remote_url(struct config_include_data *inc,
const char *cond, size_t cond_len)
{
if (inc->opts->unconditional_remote_url)
return 1;
if (!inc->remote_urls)
populate_remote_urls(inc);
return at_least_one_url_matches_glob(cond, cond_len,
inc->remote_urls);
}
static int include_condition_is_true(const struct key_value_info *kvi,
struct config_include_data *inc,
const char *cond, size_t cond_len)
{
const struct config_options *opts = inc->opts;
if (skip_prefix_mem(cond, cond_len, "gitdir:", &cond, &cond_len))
return include_by_gitdir(kvi, opts, cond, cond_len, 0);
else if (skip_prefix_mem(cond, cond_len, "gitdir/i:", &cond, &cond_len))
return include_by_gitdir(kvi, opts, cond, cond_len, 1);
else if (skip_prefix_mem(cond, cond_len, "onbranch:", &cond, &cond_len))
return include_by_branch(inc, cond, cond_len);
else if (skip_prefix_mem(cond, cond_len, "hasconfig:remote.*.url:", &cond,
&cond_len))
return include_by_remote_url(inc, cond, cond_len);
/* unknown conditionals are always false */
return 0;
}
static int git_config_include(const char *var, const char *value,
const struct config_context *ctx,
void *data)
{
struct config_include_data *inc = data;
const char *cond, *key;
size_t cond_len;
int ret;
/*
* Pass along all values, including "include" directives; this makes it
* possible to query information on the includes themselves.
*/
ret = inc->fn(var, value, ctx, inc->data);
if (ret < 0)
return ret;
if (!strcmp(var, "include.path"))
ret = handle_path_include(ctx->kvi, value, inc);
if (!parse_config_key(var, "includeif", &cond, &cond_len, &key) &&
cond && include_condition_is_true(ctx->kvi, inc, cond, cond_len) &&
!strcmp(key, "path")) {
config_fn_t old_fn = inc->fn;
if (inc->opts->unconditional_remote_url)
inc->fn = forbid_remote_url;
ret = handle_path_include(ctx->kvi, value, inc);
inc->fn = old_fn;
}
return ret;
}
static void git_config_push_split_parameter(const char *key, const char *value)
{
struct strbuf env = STRBUF_INIT;
const char *old = getenv(CONFIG_DATA_ENVIRONMENT);
if (old && *old) {
strbuf_addstr(&env, old);
strbuf_addch(&env, ' ');
}
sq_quote_buf(&env, key);
strbuf_addch(&env, '=');
if (value)
sq_quote_buf(&env, value);
setenv(CONFIG_DATA_ENVIRONMENT, env.buf, 1);
strbuf_release(&env);
}
void git_config_push_parameter(const char *text)
{
const char *value;
/*
* When we see:
*
* section.subsection=with=equals.key=value
*
* we cannot tell if it means:
*
* [section "subsection=with=equals"]
* key = value
*
* or:
*
* [section]
* subsection = with=equals.key=value
*
* We parse left-to-right for the first "=", meaning we'll prefer to
* keep the value intact over the subsection. This is historical, but
* also sensible since values are more likely to contain odd or
* untrusted input than a section name.
*
* A missing equals is explicitly allowed (as a bool-only entry).
*/
value = strchr(text, '=');
if (value) {
char *key = xmemdupz(text, value - text);
git_config_push_split_parameter(key, value + 1);
free(key);
} else {
git_config_push_split_parameter(text, NULL);
}
}
void git_config_push_env(const char *spec)
{
char *key;
const char *env_name;
const char *env_value;
env_name = strrchr(spec, '=');
if (!env_name)
die(_("invalid config format: %s"), spec);
key = xmemdupz(spec, env_name - spec);
env_name++;
if (!*env_name)
die(_("missing environment variable name for configuration '%.*s'"),
(int)(env_name - spec - 1), spec);
env_value = getenv(env_name);
if (!env_value)
die(_("missing environment variable '%s' for configuration '%.*s'"),
env_name, (int)(env_name - spec - 1), spec);
git_config_push_split_parameter(key, env_value);
free(key);
}
static inline int iskeychar(int c)
{
return isalnum(c) || c == '-';
}
/*
* Auxiliary function to sanity-check and split the key into the section
* identifier and variable name.
*
* Returns 0 on success, -1 when there is an invalid character in the key and
* -2 if there is no section name in the key.
*
* store_key - pointer to char* which will hold a copy of the key with
* lowercase section and variable name
* baselen - pointer to size_t which will hold the length of the
* section + subsection part, can be NULL
*/
int git_config_parse_key(const char *key, char **store_key, size_t *baselen_)
{
size_t i, baselen;
int dot;
const char *last_dot = strrchr(key, '.');
/*
* Since "key" actually contains the section name and the real
* key name separated by a dot, we have to know where the dot is.
*/
if (last_dot == NULL || last_dot == key) {
error(_("key does not contain a section: %s"), key);
return -CONFIG_NO_SECTION_OR_NAME;
}
if (!last_dot[1]) {
error(_("key does not contain variable name: %s"), key);
return -CONFIG_NO_SECTION_OR_NAME;
}
baselen = last_dot - key;
if (baselen_)
*baselen_ = baselen;
/*
* Validate the key and while at it, lower case it for matching.
*/
*store_key = xmallocz(strlen(key));
dot = 0;
for (i = 0; key[i]; i++) {
unsigned char c = key[i];
if (c == '.')
dot = 1;
/* Leave the extended basename untouched.. */
if (!dot || i > baselen) {
if (!iskeychar(c) ||
(i == baselen + 1 && !isalpha(c))) {
error(_("invalid key: %s"), key);
goto out_free_ret_1;
}
c = tolower(c);
} else if (c == '\n') {
error(_("invalid key (newline): %s"), key);
goto out_free_ret_1;
}
(*store_key)[i] = c;
}
return 0;
out_free_ret_1:
FREE_AND_NULL(*store_key);
return -CONFIG_INVALID_KEY;
}
static int config_parse_pair(const char *key, const char *value,
struct key_value_info *kvi,
config_fn_t fn, void *data)
{
char *canonical_name;
int ret;
struct config_context ctx = {
.kvi = kvi,
};
if (!strlen(key))
return error(_("empty config key"));
if (git_config_parse_key(key, &canonical_name, NULL))
return -1;
ret = (fn(canonical_name, value, &ctx, data) < 0) ? -1 : 0;
free(canonical_name);
return ret;
}
/* for values read from `git_config_from_parameters()` */
void kvi_from_param(struct key_value_info *out)
{
out->filename = NULL;
out->linenr = -1;
out->origin_type = CONFIG_ORIGIN_CMDLINE;
out->scope = CONFIG_SCOPE_COMMAND;
}
int git_config_parse_parameter(const char *text,
config_fn_t fn, void *data)
{
const char *value;
struct string_list pair = STRING_LIST_INIT_DUP;
int ret;
struct key_value_info kvi = KVI_INIT;
kvi_from_param(&kvi);
string_list_split(&pair, text, "=", 1);
if (!pair.nr)
return error(_("bogus config parameter: %s"), text);
if (pair.nr == 1)
value = NULL;
else
value = pair.items[1].string;
if (!*pair.items[0].string) {
string_list_clear(&pair, 0);
return error(_("bogus config parameter: %s"), text);
}
ret = config_parse_pair(pair.items[0].string, value, &kvi, fn, data);
string_list_clear(&pair, 0);
return ret;
}
static int parse_config_env_list(char *env, struct key_value_info *kvi,
config_fn_t fn, void *data)
{
char *cur = env;
while (cur && *cur) {
const char *key = sq_dequote_step(cur, &cur);
if (!key)
return error(_("bogus format in %s"),
CONFIG_DATA_ENVIRONMENT);
if (!cur || isspace(*cur)) {
/* old-style 'key=value' */
if (git_config_parse_parameter(key, fn, data) < 0)
return -1;
}
else if (*cur == '=') {
/* new-style 'key'='value' */
const char *value;
cur++;
if (*cur == '\'') {
/* quoted value */
value = sq_dequote_step(cur, &cur);
if (!value || (cur && !isspace(*cur))) {
return error(_("bogus format in %s"),
CONFIG_DATA_ENVIRONMENT);
}
} else if (!*cur || isspace(*cur)) {
/* implicit bool: 'key'= */
value = NULL;
} else {
return error(_("bogus format in %s"),
CONFIG_DATA_ENVIRONMENT);
}
if (config_parse_pair(key, value, kvi, fn, data) < 0)
return -1;
}
else {
/* unknown format */
return error(_("bogus format in %s"),
CONFIG_DATA_ENVIRONMENT);
}
if (cur) {
while (isspace(*cur))
cur++;
}
}
return 0;
}
int git_config_from_parameters(config_fn_t fn, void *data)
{
const char *env;
struct strbuf envvar = STRBUF_INIT;
struct strvec to_free = STRVEC_INIT;
int ret = 0;
char *envw = NULL;
struct key_value_info kvi = KVI_INIT;
kvi_from_param(&kvi);
env = getenv(CONFIG_COUNT_ENVIRONMENT);
if (env) {
unsigned long count;
char *endp;
count = strtoul(env, &endp, 10);
if (*endp) {
ret = error(_("bogus count in %s"), CONFIG_COUNT_ENVIRONMENT);
goto out;
}
if (count > INT_MAX) {
ret = error(_("too many entries in %s"), CONFIG_COUNT_ENVIRONMENT);
goto out;
}
for (unsigned long i = 0; i < count; i++) {
const char *key, *value;
strbuf_addf(&envvar, "GIT_CONFIG_KEY_%lu", i);
key = getenv_safe(&to_free, envvar.buf);
if (!key) {
ret = error(_("missing config key %s"), envvar.buf);
goto out;
}
strbuf_reset(&envvar);
strbuf_addf(&envvar, "GIT_CONFIG_VALUE_%lu", i);
value = getenv_safe(&to_free, envvar.buf);
if (!value) {
ret = error(_("missing config value %s"), envvar.buf);
goto out;
}
strbuf_reset(&envvar);
if (config_parse_pair(key, value, &kvi, fn, data) < 0) {
ret = -1;
goto out;
}
}
}
env = getenv(CONFIG_DATA_ENVIRONMENT);
if (env) {
/* sq_dequote will write over it */
envw = xstrdup(env);
if (parse_config_env_list(envw, &kvi, fn, data) < 0) {
ret = -1;
goto out;
}
}
out:
strbuf_release(&envvar);
strvec_clear(&to_free);
free(envw);
return ret;
}
static int get_next_char(struct config_source *cs)
{
int c = cs->do_fgetc(cs);
if (c == '\r') {
/* DOS like systems */
c = cs->do_fgetc(cs);
if (c != '\n') {
if (c != EOF)
cs->do_ungetc(c, cs);
c = '\r';
}
}
if (c != EOF && ++cs->total_len > INT_MAX) {
/*
* This is an absurdly long config file; refuse to parse
* further in order to protect downstream code from integer
* overflows. Note that we can't return an error specifically,
* but we can mark EOF and put trash in the return value,
* which will trigger a parse error.
*/
cs->eof = 1;
return 0;
}
if (c == '\n')
cs->linenr++;
if (c == EOF) {
cs->eof = 1;
cs->linenr++;
c = '\n';
}
return c;
}
static char *parse_value(struct config_source *cs)
{
int quote = 0, comment = 0;
size_t trim_len = 0;
strbuf_reset(&cs->value);
for (;;) {
int c = get_next_char(cs);
if (c == '\n') {
if (quote) {
cs->linenr--;
return NULL;
}
if (trim_len)
strbuf_setlen(&cs->value, trim_len);
return cs->value.buf;
}
if (comment)
continue;
if (isspace(c) && !quote) {
if (!trim_len)
trim_len = cs->value.len;
if (cs->value.len)
strbuf_addch(&cs->value, c);
continue;
}
if (!quote) {
if (c == ';' || c == '#') {
comment = 1;
continue;
}
}
if (trim_len)
trim_len = 0;
if (c == '\\') {
c = get_next_char(cs);
switch (c) {
case '\n':
continue;
case 't':
c = '\t';
break;
case 'b':
c = '\b';
break;
case 'n':
c = '\n';
break;
/* Some characters escape as themselves */
case '\\': case '"':
break;
/* Reject unknown escape sequences */
default:
return NULL;
}
strbuf_addch(&cs->value, c);
continue;
}
if (c == '"') {
quote = 1 - quote;
continue;
}
strbuf_addch(&cs->value, c);
}
}
static int get_value(struct config_source *cs, struct key_value_info *kvi,
config_fn_t fn, void *data, struct strbuf *name)
{
int c;
char *value;
int ret;
struct config_context ctx = {
.kvi = kvi,
};
/* Get the full name */
for (;;) {
c = get_next_char(cs);
if (cs->eof)
break;
if (!iskeychar(c))
break;
strbuf_addch(name, tolower(c));
}
while (c == ' ' || c == '\t')
c = get_next_char(cs);
value = NULL;
if (c != '\n') {
if (c != '=')
return -1;
value = parse_value(cs);
if (!value)
return -1;
}
/*
* We already consumed the \n, but we need linenr to point to
* the line we just parsed during the call to fn to get
* accurate line number in error messages.
*/
cs->linenr--;
kvi->linenr = cs->linenr;
ret = fn(name->buf, value, &ctx, data);
if (ret >= 0)
cs->linenr++;
return ret;
}
static int get_extended_base_var(struct config_source *cs, struct strbuf *name,
int c)
{
cs->subsection_case_sensitive = 0;
do {
if (c == '\n')
goto error_incomplete_line;
c = get_next_char(cs);
} while (isspace(c));
/* We require the format to be '[base "extension"]' */
if (c != '"')
return -1;
strbuf_addch(name, '.');
for (;;) {
int c = get_next_char(cs);
if (c == '\n')
goto error_incomplete_line;
if (c == '"')
break;
if (c == '\\') {
c = get_next_char(cs);
if (c == '\n')
goto error_incomplete_line;
}
strbuf_addch(name, c);
}
/* Final ']' */
if (get_next_char(cs) != ']')
return -1;
return 0;
error_incomplete_line:
cs->linenr--;
return -1;
}
static int get_base_var(struct config_source *cs, struct strbuf *name)
{
cs->subsection_case_sensitive = 1;
for (;;) {
int c = get_next_char(cs);
if (cs->eof)
return -1;
if (c == ']')
return 0;
if (isspace(c))
return get_extended_base_var(cs, name, c);
if (!iskeychar(c) && c != '.')
return -1;
strbuf_addch(name, tolower(c));
}
}
struct parse_event_data {
enum config_event_t previous_type;
size_t previous_offset;
const struct config_options *opts;
};
static int do_event(struct config_source *cs, enum config_event_t type,
struct parse_event_data *data)
{
size_t offset;
if (!data->opts || !data->opts->event_fn)
return 0;
if (type == CONFIG_EVENT_WHITESPACE &&
data->previous_type == type)
return 0;
offset = cs->do_ftell(cs);
/*
* At EOF, the parser always "inserts" an extra '\n', therefore
* the end offset of the event is the current file position, otherwise
* we will already have advanced to the next event.
*/
if (type != CONFIG_EVENT_EOF)
offset--;
if (data->previous_type != CONFIG_EVENT_EOF &&
data->opts->event_fn(data->previous_type, data->previous_offset,
offset, cs, data->opts->event_fn_data) < 0)
return -1;
data->previous_type = type;
data->previous_offset = offset;
return 0;
}
static void kvi_from_source(struct config_source *cs,
enum config_scope scope,
struct key_value_info *out)
{
out->filename = strintern(cs->name);
out->origin_type = cs->origin_type;
out->linenr = cs->linenr;
out->scope = scope;
}
static int git_parse_source(struct config_source *cs, config_fn_t fn,
struct key_value_info *kvi, void *data,
const struct config_options *opts)
{
int comment = 0;
size_t baselen = 0;
struct strbuf *var = &cs->var;
int error_return = 0;
char *error_msg = NULL;
/* U+FEFF Byte Order Mark in UTF8 */
const char *bomptr = utf8_bom;
/* For the parser event callback */
struct parse_event_data event_data = {
CONFIG_EVENT_EOF, 0, opts
};
for (;;) {
int c;
c = get_next_char(cs);
if (bomptr && *bomptr) {
/* We are at the file beginning; skip UTF8-encoded BOM
* if present. Sane editors won't put this in on their
* own, but e.g. Windows Notepad will do it happily. */
if (c == (*bomptr & 0377)) {
bomptr++;
continue;
} else {
/* Do not tolerate partial BOM. */
if (bomptr != utf8_bom)
break;
/* No BOM at file beginning. Cool. */
bomptr = NULL;
}
}
if (c == '\n') {
if (cs->eof) {
if (do_event(cs, CONFIG_EVENT_EOF, &event_data) < 0)
return -1;
return 0;
}
if (do_event(cs, CONFIG_EVENT_WHITESPACE, &event_data) < 0)
return -1;
comment = 0;
continue;
}
if (comment)
continue;
if (isspace(c)) {
if (do_event(cs, CONFIG_EVENT_WHITESPACE, &event_data) < 0)
return -1;
continue;
}
if (c == '#' || c == ';') {
if (do_event(cs, CONFIG_EVENT_COMMENT, &event_data) < 0)
return -1;
comment = 1;
continue;
}
if (c == '[') {
if (do_event(cs, CONFIG_EVENT_SECTION, &event_data) < 0)
return -1;
/* Reset prior to determining a new stem */
strbuf_reset(var);
if (get_base_var(cs, var) < 0 || var->len < 1)
break;
strbuf_addch(var, '.');
baselen = var->len;
continue;
}
if (!isalpha(c))
break;
if (do_event(cs, CONFIG_EVENT_ENTRY, &event_data) < 0)
return -1;
/*
* Truncate the var name back to the section header
* stem prior to grabbing the suffix part of the name
* and the value.
*/
strbuf_setlen(var, baselen);
strbuf_addch(var, tolower(c));
if (get_value(cs, kvi, fn, data, var) < 0)
break;
}
if (do_event(cs, CONFIG_EVENT_ERROR, &event_data) < 0)
return -1;
switch (cs->origin_type) {
case CONFIG_ORIGIN_BLOB:
error_msg = xstrfmt(_("bad config line %d in blob %s"),
cs->linenr, cs->name);
break;
case CONFIG_ORIGIN_FILE:
error_msg = xstrfmt(_("bad config line %d in file %s"),
cs->linenr, cs->name);
break;
case CONFIG_ORIGIN_STDIN:
error_msg = xstrfmt(_("bad config line %d in standard input"),
cs->linenr);
break;
case CONFIG_ORIGIN_SUBMODULE_BLOB:
error_msg = xstrfmt(_("bad config line %d in submodule-blob %s"),
cs->linenr, cs->name);
break;
case CONFIG_ORIGIN_CMDLINE:
error_msg = xstrfmt(_("bad config line %d in command line %s"),
cs->linenr, cs->name);
break;
default:
error_msg = xstrfmt(_("bad config line %d in %s"),
cs->linenr, cs->name);
}
switch (opts && opts->error_action ?
opts->error_action :
cs->default_error_action) {
case CONFIG_ERROR_DIE:
die("%s", error_msg);
break;
case CONFIG_ERROR_ERROR:
error_return = error("%s", error_msg);
break;
case CONFIG_ERROR_SILENT:
error_return = -1;
break;
case CONFIG_ERROR_UNSET:
BUG("config error action unset");
}
free(error_msg);
return error_return;
}
NORETURN
static void die_bad_number(const char *name, const char *value,
const struct key_value_info *kvi)
{
const char *error_type = (errno == ERANGE) ?
N_("out of range") : N_("invalid unit");
const char *bad_numeric = N_("bad numeric config value '%s' for '%s': %s");
if (!kvi)
BUG("kvi should not be NULL");
if (!value)
value = "";
if (!kvi->filename)
die(_(bad_numeric), value, name, _(error_type));
switch (kvi->origin_type) {
case CONFIG_ORIGIN_BLOB:
die(_("bad numeric config value '%s' for '%s' in blob %s: %s"),
value, name, kvi->filename, _(error_type));
case CONFIG_ORIGIN_FILE:
die(_("bad numeric config value '%s' for '%s' in file %s: %s"),
value, name, kvi->filename, _(error_type));
case CONFIG_ORIGIN_STDIN:
die(_("bad numeric config value '%s' for '%s' in standard input: %s"),
value, name, _(error_type));
case CONFIG_ORIGIN_SUBMODULE_BLOB:
die(_("bad numeric config value '%s' for '%s' in submodule-blob %s: %s"),
value, name, kvi->filename, _(error_type));
case CONFIG_ORIGIN_CMDLINE:
die(_("bad numeric config value '%s' for '%s' in command line %s: %s"),
value, name, kvi->filename, _(error_type));
default:
die(_("bad numeric config value '%s' for '%s' in %s: %s"),
value, name, kvi->filename, _(error_type));
}
}
int git_config_int(const char *name, const char *value,
const struct key_value_info *kvi)
{
int ret;
if (!git_parse_int(value, &ret))
die_bad_number(name, value, kvi);
return ret;
}
int64_t git_config_int64(const char *name, const char *value,
const struct key_value_info *kvi)
{
int64_t ret;
if (!git_parse_int64(value, &ret))
die_bad_number(name, value, kvi);
return ret;
}
unsigned long git_config_ulong(const char *name, const char *value,
const struct key_value_info *kvi)
{
unsigned long ret;
if (!git_parse_ulong(value, &ret))
die_bad_number(name, value, kvi);
return ret;
}
ssize_t git_config_ssize_t(const char *name, const char *value,
const struct key_value_info *kvi)
{
ssize_t ret;
if (!git_parse_ssize_t(value, &ret))
die_bad_number(name, value, kvi);
return ret;
}
double git_config_double(const char *name, const char *value,
const struct key_value_info *kvi)
{
double ret;
if (!git_parse_double(value, &ret))
die_bad_number(name, value, kvi);
return ret;
}
int git_config_bool_or_int(const char *name, const char *value,
const struct key_value_info *kvi, int *is_bool)
{
int v = git_parse_maybe_bool_text(value);
if (0 <= v) {
*is_bool = 1;
return v;
}
*is_bool = 0;
return git_config_int(name, value, kvi);
}
int git_config_bool(const char *name, const char *value)
{
int v = git_parse_maybe_bool(value);
if (v < 0)
die(_("bad boolean config value '%s' for '%s'"), value, name);
return v;
}
int git_config_string(char **dest, const char *var, const char *value)
{
if (!value)
return config_error_nonbool(var);
*dest = xstrdup(value);
return 0;
}
int git_config_pathname(char **dest, const char *var, const char *value)
{
bool is_optional;
char *path;
if (!value)
return config_error_nonbool(var);
is_optional = skip_prefix(value, ":(optional)", &value);
path = interpolate_path(value, 0);
if (!path)
die(_("failed to expand user dir in: '%s'"), value);
if (is_optional && is_missing_file(path)) {
free(path);
*dest = NULL;
return 0;
}
*dest = path;
return 0;
}
int git_config_expiry_date(timestamp_t *timestamp, const char *var, const char *value)
{
if (!value)
return config_error_nonbool(var);
if (parse_expiry_date(value, timestamp))
return error(_("'%s' for '%s' is not a valid timestamp"),
value, var);
return 0;
}
int git_config_color(char *dest, const char *var, const char *value)
{
if (!value)
return config_error_nonbool(var);
if (color_parse(value, dest) < 0)
return -1;
return 0;
}
/*
* All source specific fields in the union, die_on_error, name and the callbacks
* fgetc, ungetc, ftell of top need to be initialized before calling
* this function.
*/
static int do_config_from(struct config_source *top, config_fn_t fn,
void *data, enum config_scope scope,
const struct config_options *opts)
{
struct key_value_info kvi = KVI_INIT;
int ret;
/* push config-file parsing state stack */
top->linenr = 1;
top->eof = 0;
top->total_len = 0;
strbuf_init(&top->value, 1024);
strbuf_init(&top->var, 1024);
kvi_from_source(top, scope, &kvi);
ret = git_parse_source(top, fn, &kvi, data, opts);
strbuf_release(&top->value);
strbuf_release(&top->var);
return ret;
}
static int do_config_from_file(config_fn_t fn,
const enum config_origin_type origin_type,
const char *name, FILE *f, void *data,
enum config_scope scope,
const struct config_options *opts)
{
struct config_source top = CONFIG_SOURCE_INIT;
int ret;
if (origin_type == CONFIG_ORIGIN_FILE && (!name || !*name))
BUG("missing filename for CONFIG_ORIGIN_FILE");
top.u.file = f;
top.origin_type = origin_type;
top.name = name;
top.default_error_action = CONFIG_ERROR_DIE;
top.do_fgetc = config_file_fgetc;
top.do_ungetc = config_file_ungetc;
top.do_ftell = config_file_ftell;
flockfile(f);
ret = do_config_from(&top, fn, data, scope, opts);
funlockfile(f);
return ret;
}
static int git_config_from_stdin(config_fn_t fn, void *data,
enum config_scope scope)
{
return do_config_from_file(fn, CONFIG_ORIGIN_STDIN, "", stdin, data,
scope, NULL);
}
int git_config_from_file_with_options(config_fn_t fn, const char *filename,
void *data, enum config_scope scope,
const struct config_options *opts)
{
int ret = -1;
FILE *f;
if (!filename)
BUG("filename cannot be NULL");
f = fopen_or_warn(filename, "r");
if (f) {
ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename,
f, data, scope, opts);
fclose(f);
}
return ret;
}
int git_config_from_file(config_fn_t fn, const char *filename, void *data)
{
return git_config_from_file_with_options(fn, filename, data,
CONFIG_SCOPE_UNKNOWN, NULL);
}
int git_config_from_mem(config_fn_t fn,
const enum config_origin_type origin_type,
const char *name, const char *buf, size_t len,
void *data, enum config_scope scope,
const struct config_options *opts)
{
struct config_source top = CONFIG_SOURCE_INIT;
top.u.buf.buf = buf;
top.u.buf.len = len;
top.u.buf.pos = 0;
top.origin_type = origin_type;
top.name = name;
top.default_error_action = CONFIG_ERROR_ERROR;
top.do_fgetc = config_buf_fgetc;
top.do_ungetc = config_buf_ungetc;
top.do_ftell = config_buf_ftell;
return do_config_from(&top, fn, data, scope, opts);
}
int git_config_from_blob_oid(config_fn_t fn,
const char *name,
struct repository *repo,
const struct object_id *oid,
void *data,
enum config_scope scope)
{
enum object_type type;
char *buf;
unsigned long size;
int ret;
buf = odb_read_object(repo->objects, oid, &type, &size);
if (!buf)
return error(_("unable to load config blob object '%s'"), name);
if (type != OBJ_BLOB) {
free(buf);
return error(_("reference '%s' does not point to a blob"), name);
}
ret = git_config_from_mem(fn, CONFIG_ORIGIN_BLOB, name, buf, size,
data, scope, NULL);
free(buf);
return ret;
}
static int git_config_from_blob_ref(config_fn_t fn,
struct repository *repo,
const char *name,
void *data,
enum config_scope scope)
{
struct object_id oid;
if (repo_get_oid(repo, name, &oid) < 0)
return error(_("unable to resolve config blob '%s'"), name);
return git_config_from_blob_oid(fn, name, repo, &oid, data, scope);
}
char *git_system_config(void)
{
char *system_config = xstrdup_or_null(getenv("GIT_CONFIG_SYSTEM"));
if (!system_config)
system_config = system_path(ETC_GITCONFIG);
normalize_path_copy(system_config, system_config);
return system_config;
}
char *git_global_config(void)
{
char *user_config, *xdg_config;
git_global_config_paths(&user_config, &xdg_config);
if (!user_config) {
free(xdg_config);
return NULL;
}
if (access_or_warn(user_config, R_OK, 0) && xdg_config &&
!access_or_warn(xdg_config, R_OK, 0)) {
free(user_config);
return xdg_config;
} else {
free(xdg_config);
return user_config;
}
}
void git_global_config_paths(char **user_out, char **xdg_out)
{
char *user_config = xstrdup_or_null(getenv("GIT_CONFIG_GLOBAL"));
char *xdg_config = NULL;
if (!user_config) {
user_config = interpolate_path("~/.gitconfig", 0);
xdg_config = xdg_config_home("config");
}
*user_out = user_config;
*xdg_out = xdg_config;
}
int git_config_system(void)
{
return !git_env_bool("GIT_CONFIG_NOSYSTEM", 0);
}
static int do_git_config_sequence(const struct config_options *opts,
const struct repository *repo,
config_fn_t fn, void *data)
{
int ret = 0;
char *system_config = git_system_config();
char *xdg_config = NULL;
char *user_config = NULL;
char *repo_config;
char *worktree_config;
/*
* Ensure that either:
* - the git_dir and commondir are both set, or
* - the git_dir and commondir are both NULL
*/
if (!opts->git_dir != !opts->commondir)
BUG("only one of commondir and git_dir is non-NULL");
if (opts->commondir) {
repo_config = mkpathdup("%s/config", opts->commondir);
worktree_config = mkpathdup("%s/config.worktree", opts->git_dir);
} else {
repo_config = NULL;
worktree_config = NULL;
}
if (git_config_system() && system_config &&
!access_or_die(system_config, R_OK,
opts->system_gently ? ACCESS_EACCES_OK : 0))
ret += git_config_from_file_with_options(fn, system_config,
data, CONFIG_SCOPE_SYSTEM,
NULL);
git_global_config_paths(&user_config, &xdg_config);
if (xdg_config && !access_or_die(xdg_config, R_OK, ACCESS_EACCES_OK))
ret += git_config_from_file_with_options(fn, xdg_config, data,
CONFIG_SCOPE_GLOBAL, NULL);
if (user_config && !access_or_die(user_config, R_OK, ACCESS_EACCES_OK))
ret += git_config_from_file_with_options(fn, user_config, data,
CONFIG_SCOPE_GLOBAL, NULL);
if (!opts->ignore_repo && repo_config &&
!access_or_die(repo_config, R_OK, 0))
ret += git_config_from_file_with_options(fn, repo_config, data,
CONFIG_SCOPE_LOCAL, NULL);
if (!opts->ignore_worktree && worktree_config &&
repo && repo->repository_format_worktree_config &&
!access_or_die(worktree_config, R_OK, 0)) {
ret += git_config_from_file_with_options(fn, worktree_config, data,
CONFIG_SCOPE_WORKTREE,
NULL);
}
if (!opts->ignore_cmdline && git_config_from_parameters(fn, data) < 0)
die(_("unable to parse command-line config"));
free(system_config);
free(xdg_config);
free(user_config);
free(repo_config);
free(worktree_config);
return ret;
}
int config_with_options(config_fn_t fn, void *data,
const struct git_config_source *config_source,
struct repository *repo,
const struct config_options *opts)
{
struct config_include_data inc = CONFIG_INCLUDE_INIT;
int ret;
if (opts->respect_includes) {
inc.fn = fn;
inc.data = data;
inc.opts = opts;
inc.repo = repo;
inc.config_source = config_source;
fn = git_config_include;
data = &inc;
}
/*
* If we have a specific filename, use it. Otherwise, follow the
* regular lookup sequence.
*/
if (config_source && config_source->use_stdin) {
ret = git_config_from_stdin(fn, data, config_source->scope);
} else if (config_source && config_source->file) {
ret = git_config_from_file_with_options(fn, config_source->file,
data, config_source->scope,
NULL);
} else if (config_source && config_source->blob) {
ret = git_config_from_blob_ref(fn, repo, config_source->blob,
data, config_source->scope);
} else {
ret = do_git_config_sequence(opts, repo, fn, data);
}
if (inc.remote_urls) {
string_list_clear(inc.remote_urls, 0);
FREE_AND_NULL(inc.remote_urls);
}
return ret;
}
static void configset_iter(struct config_set *set, config_fn_t fn, void *data)
{
int value_index;
struct string_list *values;
struct config_set_element *entry;
struct configset_list *list = &set->list;
struct config_context ctx = CONFIG_CONTEXT_INIT;
for (size_t i = 0; i < list->nr; i++) {
entry = list->items[i].e;
value_index = list->items[i].value_index;
values = &entry->value_list;
ctx.kvi = values->items[value_index].util;
if (fn(entry->key, values->items[value_index].string, &ctx, data) < 0)
git_die_config_linenr(entry->key,
ctx.kvi->filename,
ctx.kvi->linenr);
}
}
void read_early_config(struct repository *repo, config_fn_t cb, void *data)
{
struct config_options opts = {0};
struct strbuf commondir = STRBUF_INIT;
struct strbuf gitdir = STRBUF_INIT;
opts.respect_includes = 1;
if (repo && repo->gitdir) {
opts.commondir = repo_get_common_dir(repo);
opts.git_dir = repo_get_git_dir(repo);
/*
* When setup_git_directory() was not yet asked to discover the
* GIT_DIR, we ask discover_git_directory() to figure out whether there
* is any repository config we should use (but unlike
* setup_git_directory_gently(), no global state is changed, most
* notably, the current working directory is still the same after the
* call).
*/
} else if (!discover_git_directory(&commondir, &gitdir)) {
opts.commondir = commondir.buf;
opts.git_dir = gitdir.buf;
}
config_with_options(cb, data, NULL, NULL, &opts);
strbuf_release(&commondir);
strbuf_release(&gitdir);
}
void read_very_early_config(config_fn_t cb, void *data)
{
struct config_options opts = { 0 };
opts.respect_includes = 1;
opts.ignore_repo = 1;
opts.ignore_worktree = 1;
opts.ignore_cmdline = 1;
opts.system_gently = 1;
config_with_options(cb, data, NULL, NULL, &opts);
}
RESULT_MUST_BE_USED
static int configset_find_element(struct config_set *set, const char *key,
struct config_set_element **dest)
{
struct config_set_element k;
struct config_set_element *found_entry;
char *normalized_key;
int ret;
/*
* `key` may come from the user, so normalize it before using it
* for querying entries from the hashmap.
*/
ret = git_config_parse_key(key, &normalized_key, NULL);
if (ret)
return ret;
hashmap_entry_init(&k.ent, strhash(normalized_key));
k.key = normalized_key;
found_entry = hashmap_get_entry(&set->config_hash, &k, ent, NULL);
free(normalized_key);
*dest = found_entry;
return 0;
}
static int configset_add_value(const struct key_value_info *kvi_p,
struct config_set *set, const char *key,
const char *value)
{
struct config_set_element *e;
struct string_list_item *si;
struct configset_list_item *l_item;
struct key_value_info *kv_info = xmalloc(sizeof(*kv_info));
int ret;
ret = configset_find_element(set, key, &e);
if (ret)
return ret;
/*
* Since the keys are being fed by git_config*() callback mechanism, they
* are already normalized. So simply add them without any further munging.
*/
if (!e) {
e = xmalloc(sizeof(*e));
hashmap_entry_init(&e->ent, strhash(key));
e->key = xstrdup(key);
string_list_init_dup(&e->value_list);
hashmap_add(&set->config_hash, &e->ent);
}
si = string_list_append_nodup(&e->value_list, xstrdup_or_null(value));
ALLOC_GROW(set->list.items, set->list.nr + 1, set->list.alloc);
l_item = &set->list.items[set->list.nr++];
l_item->e = e;
l_item->value_index = e->value_list.nr - 1;
*kv_info = *kvi_p;
si->util = kv_info;
return 0;
}
static int config_set_element_cmp(const void *cmp_data UNUSED,
const struct hashmap_entry *eptr,
const struct hashmap_entry *entry_or_key,
const void *keydata UNUSED)
{
const struct config_set_element *e1, *e2;
e1 = container_of(eptr, const struct config_set_element, ent);
e2 = container_of(entry_or_key, const struct config_set_element, ent);
return strcmp(e1->key, e2->key);
}
void git_configset_init(struct config_set *set)
{
hashmap_init(&set->config_hash, config_set_element_cmp, NULL, 0);
set->hash_initialized = 1;
set->list.nr = 0;
set->list.alloc = 0;
set->list.items = NULL;
}
void git_configset_clear(struct config_set *set)
{
struct config_set_element *entry;
struct hashmap_iter iter;
if (!set->hash_initialized)
return;
hashmap_for_each_entry(&set->config_hash, &iter, entry,
ent /* member name */) {
free(entry->key);
string_list_clear(&entry->value_list, 1);
}
hashmap_clear_and_free(&set->config_hash, struct config_set_element, ent);
set->hash_initialized = 0;
free(set->list.items);
set->list.nr = 0;
set->list.alloc = 0;
set->list.items = NULL;
}
static int config_set_callback(const char *key, const char *value,
const struct config_context *ctx,
void *cb)
{
struct config_set *set = cb;
configset_add_value(ctx->kvi, set, key, value);
return 0;
}
int git_configset_add_file(struct config_set *set, const char *filename)
{
return git_config_from_file(config_set_callback, filename, set);
}
int git_configset_get_value(struct config_set *set, const char *key,
const char **value, struct key_value_info *kvi)
{
const struct string_list *values = NULL;
int ret;
struct string_list_item item;
/*
* Follows "last one wins" semantic, i.e., if there are multiple matches for the
* queried key in the files of the configset, the value returned will be the last
* value in the value list for that key.
*/
if ((ret = git_configset_get_value_multi(set, key, &values)))
return ret;
assert(values->nr > 0);
item = values->items[values->nr - 1];
*value = item.string;
if (kvi)
*kvi = *((struct key_value_info *)item.util);
return 0;
}
int git_configset_get_value_multi(struct config_set *set, const char *key,
const struct string_list **dest)
{
struct config_set_element *e;
int ret;
if ((ret = configset_find_element(set, key, &e)))
return ret;
else if (!e)
return 1;
*dest = &e->value_list;
return 0;
}
static int check_multi_string(struct string_list_item *item, void *util)
{
return item->string ? 0 : config_error_nonbool(util);
}
int git_configset_get_string_multi(struct config_set *cs, const char *key,
const struct string_list **dest)
{
int ret;
if ((ret = git_configset_get_value_multi(cs, key, dest)))
return ret;
if ((ret = for_each_string_list((struct string_list *)*dest,
check_multi_string, (void *)key)))
return ret;
return 0;
}
int git_configset_get(struct config_set *set, const char *key)
{
struct config_set_element *e;
int ret;
if ((ret = configset_find_element(set, key, &e)))
return ret;
else if (!e)
return 1;
return 0;
}
int git_configset_get_string(struct config_set *set, const char *key, char **dest)
{
const char *value;
if (!git_configset_get_value(set, key, &value, NULL))
return git_config_string(dest, key, value);
else
return 1;
}
static int git_configset_get_string_tmp(struct config_set *set, const char *key,
const char **dest)
{
const char *value;
if (!git_configset_get_value(set, key, &value, NULL)) {
if (!value)
return config_error_nonbool(key);
*dest = value;
return 0;
} else {
return 1;
}
}
int git_configset_get_int(struct config_set *set, const char *key, int *dest)
{
const char *value;
struct key_value_info kvi;
if (!git_configset_get_value(set, key, &value, &kvi)) {
*dest = git_config_int(key, value, &kvi);
return 0;
} else
return 1;
}
int git_configset_get_ulong(struct config_set *set, const char *key, unsigned long *dest)
{
const char *value;
struct key_value_info kvi;
if (!git_configset_get_value(set, key, &value, &kvi)) {
*dest = git_config_ulong(key, value, &kvi);
return 0;
} else
return 1;
}
int git_configset_get_bool(struct config_set *set, const char *key, int *dest)
{
const char *value;
if (!git_configset_get_value(set, key, &value, NULL)) {
*dest = git_config_bool(key, value);
return 0;
} else
return 1;
}
int git_configset_get_bool_or_int(struct config_set *set, const char *key,
int *is_bool, int *dest)
{
const char *value;
struct key_value_info kvi;
if (!git_configset_get_value(set, key, &value, &kvi)) {
*dest = git_config_bool_or_int(key, value, &kvi, is_bool);
return 0;
} else
return 1;
}
int git_configset_get_maybe_bool(struct config_set *set, const char *key, int *dest)
{
const char *value;
if (!git_configset_get_value(set, key, &value, NULL)) {
*dest = git_parse_maybe_bool(value);
if (*dest == -1)
return -1;
return 0;
} else
return 1;
}
static int git_configset_get_pathname(struct config_set *set, const char *key, char **dest)
{
const char *value;
if (!git_configset_get_value(set, key, &value, NULL))
return git_config_pathname(dest, key, value);
else
return 1;
}
struct comment_char_config {
unsigned last_key_id;
bool auto_set;
bool auto_set_in_file;
struct strintmap key_flags;
size_t alloc, nr;
struct comment_char_config_item {
unsigned key_id;
char *path;
enum config_scope scope;
} *item;
};
#define COMMENT_CHAR_CFG_INIT { \
.key_flags = STRINTMAP_INIT, \
}
static void comment_char_config_release(struct comment_char_config *config)
{
strintmap_clear(&config->key_flags);
for (size_t i = 0; i < config->nr; i++)
free(config->item[i].path);
free(config->item);
}
/* Used to track whether the key occurs more than once in a given file */
#define KEY_SEEN_ONCE 1u
#define KEY_SEEN_TWICE 2u
#define COMMENT_KEY_SHIFT(id) (2 * (id))
#define COMMENT_KEY_MASK(id) (3u << COMMENT_KEY_SHIFT(id))
static void set_comment_key_flags(struct comment_char_config *config,
const char *path, unsigned id, unsigned value)
{
unsigned old = strintmap_get(&config->key_flags, path);
unsigned new = (old & ~COMMENT_KEY_MASK(id)) |
value << COMMENT_KEY_SHIFT(id);
strintmap_set(&config->key_flags, path, new);
}
static unsigned get_comment_key_flags(struct comment_char_config *config,
const char *path, unsigned id)
{
unsigned value = strintmap_get(&config->key_flags, path);
return (value & COMMENT_KEY_MASK(id)) >> COMMENT_KEY_SHIFT(id);
}
static const char *comment_key_name(unsigned id)
{
static const char *name[] = {
"core.commentChar",
"core.commentString",
};
if (id >= ARRAY_SIZE(name))
BUG("invalid comment key id");
return name[id];
}
static void comment_char_callback(const char *key, const char *value,
const struct config_context *ctx, void *data)
{
struct comment_char_config *config = data;
const struct key_value_info *kvi = ctx->kvi;
unsigned key_id;
if (!strcmp(key, "core.commentchar"))
key_id = 0;
else if (!strcmp(key, "core.commentstring"))
key_id = 1;
else
return;
config->last_key_id = key_id;
config->auto_set = value && !strcmp(value, "auto");
if (kvi->origin_type != CONFIG_ORIGIN_FILE) {
return;
} else if (get_comment_key_flags(config, kvi->filename, key_id)) {
set_comment_key_flags(config, kvi->filename, key_id,
KEY_SEEN_TWICE);
} else {
struct comment_char_config_item *item;
ALLOC_GROW_BY(config->item, config->nr, 1, config->alloc);
item = &config->item[config->nr - 1];
item->key_id = key_id;
item->scope = kvi->scope;
item->path = xstrdup(kvi->filename);
set_comment_key_flags(config, kvi->filename, key_id,
KEY_SEEN_ONCE);
}
config->auto_set_in_file = config->auto_set;
}
static void add_config_scope_arg(struct repository *repo, struct strbuf *buf,
struct comment_char_config_item *item)
{
char *global_config = git_global_config();
char *system_config = git_system_config();
if (item->scope == CONFIG_SCOPE_SYSTEM && access(item->path, W_OK)) {
/*
* If the user cannot write to the system config recommend
* setting the global config instead.
*/
strbuf_addstr(buf, "--global ");
} else if (fspatheq(item->path, system_config)) {
strbuf_addstr(buf, "--system ");
} else if (fspatheq(item->path, global_config)) {
strbuf_addstr(buf, "--global ");
} else if (fspatheq(item->path,
mkpath("%s/config",
repo_get_git_dir(repo)))) {
; /* --local is the default */
} else if (fspatheq(item->path,
mkpath("%s/config.worktree",
repo_get_common_dir(repo)))) {
strbuf_addstr(buf, "--worktree ");
} else {
const char *path = item->path;
const char *home = getenv("HOME");
strbuf_addstr(buf, "--file ");
if (home && !fspathncmp(path, home, strlen(home))) {
path += strlen(home);
if (!fspathncmp(path, "/", 1))
path++;
strbuf_addstr(buf, "~/");
}
sq_quote_buf_pretty(buf, path);
strbuf_addch(buf, ' ');
}
free(global_config);
free(system_config);
}
static bool can_unset_comment_char_config(struct comment_char_config *config)
{
for (size_t i = 0; i < config->nr; i++) {
struct comment_char_config_item *item = &config->item[i];
if (item->scope == CONFIG_SCOPE_SYSTEM &&
access(item->path, W_OK))
return false;
}
return true;
}
static void add_unset_auto_comment_char_advice(struct repository *repo,
struct comment_char_config *config)
{
struct strbuf buf = STRBUF_INIT;
if (!can_unset_comment_char_config(config))
return;
for (size_t i = 0; i < config->nr; i++) {
struct comment_char_config_item *item = &config->item[i];
strbuf_addstr(&buf, " git config unset ");
add_config_scope_arg(repo, &buf, item);
if (get_comment_key_flags(config, item->path, item->key_id) == KEY_SEEN_TWICE)
strbuf_addstr(&buf, "--all ");
strbuf_addf(&buf, "%s\n", comment_key_name(item->key_id));
}
advise(_("\nTo use the default comment string (#) please run\n\n%s"),
buf.buf);
strbuf_release(&buf);
}
static void add_comment_char_advice(struct repository *repo,
struct comment_char_config *config)
{
struct strbuf buf = STRBUF_INIT;
struct comment_char_config_item *item;
/* TRANSLATORS this is a place holder for the value of core.commentString */
const char *placeholder = _("<comment string>");
/*
* If auto is set in the last file that we saw advise the user how to
* update their config.
*/
if (!config->auto_set_in_file)
return;
add_unset_auto_comment_char_advice(repo, config);
item = &config->item[config->nr - 1];
strbuf_reset(&buf);
strbuf_addstr(&buf, " git config set ");
add_config_scope_arg(repo, &buf, item);
strbuf_addf(&buf, "%s %s\n", comment_key_name(item->key_id),
placeholder);
advise(_("\nTo set a custom comment string please run\n\n"
"%s\nwhere '%s' is the string you wish to use.\n"),
buf.buf, placeholder);
strbuf_release(&buf);
}
#undef KEY_SEEN_ONCE
#undef KEY_SEEN_TWICE
#undef COMMENT_KEY_SHIFT
#undef COMMENT_KEY_MASK
struct repo_config {
struct repository *repo;
struct comment_char_config comment_char_config;
};
#define REPO_CONFIG_INIT(repo_) { \
.comment_char_config = COMMENT_CHAR_CFG_INIT, \
.repo = repo_, \
};
static void repo_config_release(struct repo_config *config)
{
comment_char_config_release(&config->comment_char_config);
}
#ifdef WITH_BREAKING_CHANGES
static void check_auto_comment_char_config(struct repository *repo,
struct comment_char_config *config)
{
if (!config->auto_set)
return;
die_message(_("Support for '%s=auto' has been removed in Git 3.0"),
comment_key_name(config->last_key_id));
add_comment_char_advice(repo, config);
die(NULL);
}
#else
static void check_auto_comment_char_config(struct repository *repo,
struct comment_char_config *config)
{
extern bool warn_on_auto_comment_char;
const char *DEPRECATED_CONFIG_ENV =
"GIT_AUTO_COMMENT_CHAR_CONFIG_WARNING_GIVEN";
if (!config->auto_set || !warn_on_auto_comment_char)
return;
/*
* Use an environment variable to ensure that subprocesses do not repeat
* the warning.
*/
if (git_env_bool(DEPRECATED_CONFIG_ENV, false))
return;
setenv(DEPRECATED_CONFIG_ENV, "true", true);
warning(_("Support for '%s=auto' is deprecated and will be removed in "
"Git 3.0"), comment_key_name(config->last_key_id));
add_comment_char_advice(repo, config);
}
#endif /* WITH_BREAKING_CHANGES */
static void check_deprecated_config(struct repo_config *config)
{
if (!config->repo->check_deprecated_config)
return;
check_auto_comment_char_config(config->repo,
&config->comment_char_config);
}
static int repo_config_callback(const char *key, const char *value,
const struct config_context *ctx, void *data)
{
struct repo_config *config = data;
comment_char_callback(key, value, ctx, &config->comment_char_config);
return config_set_callback(key, value, ctx, config->repo->config);
}
/* Functions use to read configuration from a repository */
static void repo_read_config(struct repository *repo)
{
struct config_options opts = { 0 };
struct repo_config config = REPO_CONFIG_INIT(repo);
opts.respect_includes = 1;
opts.commondir = repo->commondir;
opts.git_dir = repo->gitdir;
if (!repo->config)
CALLOC_ARRAY(repo->config, 1);
else
git_configset_clear(repo->config);
git_configset_init(repo->config);
if (config_with_options(repo_config_callback, &config, NULL, repo,
&opts) < 0)
/*
* config_with_options() normally returns only
* zero, as most errors are fatal, and
* non-fatal potential errors are guarded by "if"
* statements that are entered only when no error is
* possible.
*
* If we ever encounter a non-fatal error, it means
* something went really wrong and we should stop
* immediately.
*/
die(_("unknown error occurred while reading the configuration files"));
check_deprecated_config(&config);
repo_config_release(&config);
}
static void git_config_check_init(struct repository *repo)
{
if (repo->config && repo->config->hash_initialized)
return;
repo_read_config(repo);
}
void repo_config_clear(struct repository *repo)
{
if (!repo->config || !repo->config->hash_initialized)
return;
git_configset_clear(repo->config);
}
void repo_config(struct repository *repo, config_fn_t fn, void *data)
{
if (!repo) {
read_very_early_config(fn, data);
return;
}
git_config_check_init(repo);
configset_iter(repo->config, fn, data);
}
int repo_config_get(struct repository *repo, const char *key)
{
git_config_check_init(repo);
return git_configset_get(repo->config, key);
}
int repo_config_get_value(struct repository *repo,
const char *key, const char **value)
{
git_config_check_init(repo);
return git_configset_get_value(repo->config, key, value, NULL);
}
int repo_config_get_value_multi(struct repository *repo, const char *key,
const struct string_list **dest)
{
git_config_check_init(repo);
return git_configset_get_value_multi(repo->config, key, dest);
}
int repo_config_get_string_multi(struct repository *repo, const char *key,
const struct string_list **dest)
{
git_config_check_init(repo);
return git_configset_get_string_multi(repo->config, key, dest);
}
int repo_config_get_string(struct repository *repo,
const char *key, char **dest)
{
int ret;
git_config_check_init(repo);
ret = git_configset_get_string(repo->config, key, dest);
if (ret < 0)
git_die_config(repo, key, NULL);
return ret;
}
int repo_config_get_string_tmp(struct repository *repo,
const char *key, const char **dest)
{
int ret;
git_config_check_init(repo);
ret = git_configset_get_string_tmp(repo->config, key, dest);
if (ret < 0)
git_die_config(repo, key, NULL);
return ret;
}
int repo_config_get_int(struct repository *repo,
const char *key, int *dest)
{
git_config_check_init(repo);
return git_configset_get_int(repo->config, key, dest);
}
int repo_config_get_ulong(struct repository *repo,
const char *key, unsigned long *dest)
{
git_config_check_init(repo);
return git_configset_get_ulong(repo->config, key, dest);
}
int repo_config_get_bool(struct repository *repo,
const char *key, int *dest)
{
git_config_check_init(repo);
return git_configset_get_bool(repo->config, key, dest);
}
int repo_config_get_bool_or_int(struct repository *repo,
const char *key, int *is_bool, int *dest)
{
git_config_check_init(repo);
return git_configset_get_bool_or_int(repo->config, key, is_bool, dest);
}
int repo_config_get_maybe_bool(struct repository *repo,
const char *key, int *dest)
{
git_config_check_init(repo);
return git_configset_get_maybe_bool(repo->config, key, dest);
}
int repo_config_get_pathname(struct repository *repo,
const char *key, char **dest)
{
int ret;
git_config_check_init(repo);
ret = git_configset_get_pathname(repo->config, key, dest);
if (ret < 0)
git_die_config(repo, key, NULL);
return ret;
}
/* Read values into protected_config. */
static void read_protected_config(void)
{
struct config_options opts = {
.respect_includes = 1,
.ignore_repo = 1,
.ignore_worktree = 1,
.system_gently = 1,
};
git_configset_init(&protected_config);
config_with_options(config_set_callback, &protected_config, NULL,
NULL, &opts);
}
void git_protected_config(config_fn_t fn, void *data)
{
if (!protected_config.hash_initialized)
read_protected_config();
configset_iter(&protected_config, fn, data);
}
int repo_config_get_expiry(struct repository *r, const char *key, char **output)
{
int ret = repo_config_get_string(r, key, output);
if (ret)
return ret;
if (strcmp(*output, "now")) {
timestamp_t now = approxidate("now");
if (approxidate(*output) >= now)
git_die_config(r, key, _("Invalid %s: '%s'"), key, *output);
}
return ret;
}
int repo_config_get_expiry_in_days(struct repository *r, const char *key,
timestamp_t *expiry, timestamp_t now)
{
const char *expiry_string;
int days;
timestamp_t when;
if (repo_config_get_string_tmp(r, key, &expiry_string))
return 1; /* no such thing */
if (git_parse_int(expiry_string, &days)) {
const intmax_t scale = 86400;
*expiry = now - days * scale;
return 0;
}
if (!parse_expiry_date(expiry_string, &when)) {
*expiry = when;
return 0;
}
return -1; /* thing exists but cannot be parsed */
}
int repo_config_get_split_index(struct repository *r)
{
int val;
if (!repo_config_get_maybe_bool(r, "core.splitindex", &val))
return val;
return -1; /* default value */
}
int repo_config_get_max_percent_split_change(struct repository *r)
{
int val = -1;
if (!repo_config_get_int(r, "splitindex.maxpercentchange", &val)) {
if (0 <= val && val <= 100)
return val;
return error(_("splitIndex.maxPercentChange value '%d' "
"should be between 0 and 100"), val);
}
return -1; /* default value */
}
int repo_config_get_index_threads(struct repository *r, int *dest)
{
int is_bool, val;
val = git_env_ulong("GIT_TEST_INDEX_THREADS", 0);
if (val) {
*dest = val;
return 0;
}
if (!repo_config_get_bool_or_int(r, "index.threads", &is_bool, &val)) {
if (is_bool)
*dest = val ? 0 : 1;
else
*dest = val;
return 0;
}
return 1;
}
NORETURN
void git_die_config_linenr(const char *key, const char *filename, int linenr)
{
if (!filename)
die(_("unable to parse '%s' from command-line config"), key);
else
die(_("bad config variable '%s' in file '%s' at line %d"),
key, filename, linenr);
}
void git_die_config(struct repository *r, const char *key, const char *err, ...)
{
const struct string_list *values;
struct key_value_info *kv_info;
report_fn error_fn = get_error_routine();
if (err) {
va_list params;
va_start(params, err);
error_fn(err, params);
va_end(params);
}
if (repo_config_get_value_multi(r, key, &values))
BUG("for key '%s' we must have a value to report on", key);
kv_info = values->items[values->nr - 1].util;
git_die_config_linenr(key, kv_info->filename, kv_info->linenr);
}
/*
* Find all the stuff for repo_config_set() below.
*/
struct config_store_data {
size_t baselen;
char *key;
int do_not_match;
const char *fixed_value;
regex_t *value_pattern;
int multi_replace;
struct {
size_t begin, end;
enum config_event_t type;
int is_keys_section;
} *parsed;
unsigned int parsed_nr, parsed_alloc, *seen, seen_nr, seen_alloc;
unsigned int key_seen:1, section_seen:1, is_keys_section:1;
};
#define CONFIG_STORE_INIT { 0 }
static void config_store_data_clear(struct config_store_data *store)
{
free(store->key);
if (store->value_pattern != NULL &&
store->value_pattern != CONFIG_REGEX_NONE) {
regfree(store->value_pattern);
free(store->value_pattern);
}
free(store->parsed);
free(store->seen);
memset(store, 0, sizeof(*store));
}
static int matches(const char *key, const char *value,
const struct config_store_data *store)
{
if (strcmp(key, store->key))
return 0; /* not ours */
if (store->fixed_value && value)
return !strcmp(store->fixed_value, value);
if (!store->value_pattern)
return 1; /* always matches */
if (store->value_pattern == CONFIG_REGEX_NONE)
return 0; /* never matches */
return store->do_not_match ^
(value && !regexec(store->value_pattern, value, 0, NULL, 0));
}
static int store_aux_event(enum config_event_t type, size_t begin, size_t end,
struct config_source *cs, void *data)
{
struct config_store_data *store = data;
ALLOC_GROW(store->parsed, store->parsed_nr + 1, store->parsed_alloc);
store->parsed[store->parsed_nr].begin = begin;
store->parsed[store->parsed_nr].end = end;
store->parsed[store->parsed_nr].type = type;
if (type == CONFIG_EVENT_SECTION) {
int (*cmpfn)(const char *, const char *, size_t);
if (cs->var.len < 2 || cs->var.buf[cs->var.len - 1] != '.')
return error(_("invalid section name '%s'"), cs->var.buf);
if (cs->subsection_case_sensitive)
cmpfn = strncasecmp;
else
cmpfn = strncmp;
/* Is this the section we were looking for? */
store->is_keys_section =
store->parsed[store->parsed_nr].is_keys_section =
cs->var.len - 1 == store->baselen &&
!cmpfn(cs->var.buf, store->key, store->baselen);
if (store->is_keys_section) {
store->section_seen = 1;
ALLOC_GROW(store->seen, store->seen_nr + 1,
store->seen_alloc);
store->seen[store->seen_nr] = store->parsed_nr;
}
}
store->parsed_nr++;
return 0;
}
static int store_aux(const char *key, const char *value,
const struct config_context *ctx UNUSED, void *cb)
{
struct config_store_data *store = cb;
if (store->key_seen) {
if (matches(key, value, store)) {
if (store->seen_nr == 1 && store->multi_replace == 0) {
warning(_("%s has multiple values"), key);
}
ALLOC_GROW(store->seen, store->seen_nr + 1,
store->seen_alloc);
store->seen[store->seen_nr] = store->parsed_nr;
store->seen_nr++;
}
} else if (store->is_keys_section) {
/*
* Do not increment matches yet: this may not be a match, but we
* are in the desired section.
*/
ALLOC_GROW(store->seen, store->seen_nr + 1, store->seen_alloc);
store->seen[store->seen_nr] = store->parsed_nr;
store->section_seen = 1;
if (matches(key, value, store)) {
store->seen_nr++;
store->key_seen = 1;
}
}
return 0;
}
static int write_error(const char *filename)
{
error(_("failed to write new configuration file %s"), filename);
/* Same error code as "failed to rename". */
return 4;
}
static struct strbuf store_create_section(const char *key,
const struct config_store_data *store)
{
const char *dot;
size_t i;
struct strbuf sb = STRBUF_INIT;
dot = memchr(key, '.', store->baselen);
if (dot) {
strbuf_addf(&sb, "[%.*s \"", (int)(dot - key), key);
for (i = dot - key + 1; i < store->baselen; i++) {
if (key[i] == '"' || key[i] == '\\')
strbuf_addch(&sb, '\\');
strbuf_addch(&sb, key[i]);
}
strbuf_addstr(&sb, "\"]\n");
} else {
strbuf_addch(&sb, '[');
strbuf_add(&sb, key, store->baselen);
strbuf_addstr(&sb, "]\n");
}
return sb;
}
static ssize_t write_section(int fd, const char *key,
const struct config_store_data *store)
{
struct strbuf sb = store_create_section(key, store);
ssize_t ret;
ret = write_in_full(fd, sb.buf, sb.len);
strbuf_release(&sb);
return ret;
}
static ssize_t write_pair(int fd, const char *key, const char *value,
const char *comment,
const struct config_store_data *store)
{
int i;
ssize_t ret;
const char *quote = "";
struct strbuf sb = STRBUF_INIT;
/*
* Check to see if the value needs to be surrounded with a dq pair.
* Note that problematic characters are always backslash-quoted; this
* check is about not losing leading or trailing SP and strings that
* follow beginning-of-comment characters (i.e. ';' and '#') by the
* configuration parser.
*/
if (value[0] == ' ')
quote = "\"";
for (i = 0; value[i]; i++)
if (value[i] == ';' || value[i] == '#' || value[i] == '\r')
quote = "\"";
if (i && value[i - 1] == ' ')
quote = "\"";
strbuf_addf(&sb, "\t%s = %s", key + store->baselen + 1, quote);
for (i = 0; value[i]; i++)
switch (value[i]) {
case '\n':
strbuf_addstr(&sb, "\\n");
break;
case '\t':
strbuf_addstr(&sb, "\\t");
break;
case '"':
case '\\':
strbuf_addch(&sb, '\\');
/* fallthrough */
default:
strbuf_addch(&sb, value[i]);
break;
}
if (comment)
strbuf_addf(&sb, "%s%s\n", quote, comment);
else
strbuf_addf(&sb, "%s\n", quote);
ret = write_in_full(fd, sb.buf, sb.len);
strbuf_release(&sb);
return ret;
}
/*
* If we are about to unset the last key(s) in a section, and if there are
* no comments surrounding (or included in) the section, we will want to
* extend begin/end to remove the entire section.
*
* Note: the parameter `seen_ptr` points to the index into the store.seen
* array. * This index may be incremented if a section has more than one
* entry (which all are to be removed).
*/
static void maybe_remove_section(struct config_store_data *store,
size_t *begin_offset, size_t *end_offset,
unsigned *seen_ptr)
{
size_t begin;
int section_seen = 0;
unsigned int i, seen;
/*
* First, ensure that this is the first key, and that there are no
* comments before the entry nor before the section header.
*/
seen = *seen_ptr;
for (i = store->seen[seen]; i > 0; i--) {
enum config_event_t type = store->parsed[i - 1].type;
if (type == CONFIG_EVENT_COMMENT)
/* There is a comment before this entry or section */
return;
if (type == CONFIG_EVENT_ENTRY) {
if (!section_seen)
/* This is not the section's first entry. */
return;
/* We encountered no comment before the section. */
break;
}
if (type == CONFIG_EVENT_SECTION) {
if (!store->parsed[i - 1].is_keys_section)
break;
section_seen = 1;
}
}
begin = store->parsed[i].begin;
/*
* Next, make sure that we are removing the last key(s) in the section,
* and that there are no comments that are possibly about the current
* section.
*/
for (i = store->seen[seen] + 1; i < store->parsed_nr; i++) {
enum config_event_t type = store->parsed[i].type;
if (type == CONFIG_EVENT_COMMENT)
return;
if (type == CONFIG_EVENT_SECTION) {
if (store->parsed[i].is_keys_section)
continue;
break;
}
if (type == CONFIG_EVENT_ENTRY) {
if (++seen < store->seen_nr &&
i == store->seen[seen])
/* We want to remove this entry, too */
continue;
/* There is another entry in this section. */
return;
}
}
/*
* We are really removing the last entry/entries from this section, and
* there are no enclosed or surrounding comments. Remove the entire,
* now-empty section.
*/
*seen_ptr = seen;
*begin_offset = begin;
if (i < store->parsed_nr)
*end_offset = store->parsed[i].begin;
else
*end_offset = store->parsed[store->parsed_nr - 1].end;
}
int repo_config_set_in_file_gently(struct repository *r, const char *config_filename,
const char *key, const char *comment, const char *value)
{
return repo_config_set_multivar_in_file_gently(r, config_filename, key, value, NULL, comment, 0);
}
void repo_config_set_in_file(struct repository *r, const char *config_filename,
const char *key, const char *value)
{
repo_config_set_multivar_in_file(r, config_filename, key, value, NULL, 0);
}
int repo_config_set_gently(struct repository *r, const char *key, const char *value)
{
return repo_config_set_multivar_gently(r, key, value, NULL, 0);
}
int repo_config_set_worktree_gently(struct repository *r,
const char *key, const char *value)
{
/* Only use worktree-specific config if it is already enabled. */
if (r->repository_format_worktree_config) {
char *file = repo_git_path(r, "config.worktree");
int ret = repo_config_set_multivar_in_file_gently(
r, file, key, value, NULL, NULL, 0);
free(file);
return ret;
}
return repo_config_set_multivar_gently(r, key, value, NULL, 0);
}
void repo_config_set(struct repository *r, const char *key, const char *value)
{
repo_config_set_multivar(r, key, value, NULL, 0);
trace2_cmd_set_config(key, value);
}
char *git_config_prepare_comment_string(const char *comment)
{
size_t leading_blanks;
char *prepared;
if (!comment)
return NULL;
if (strchr(comment, '\n'))
die(_("no multi-line comment allowed: '%s'"), comment);
/*
* If it begins with one or more leading whitespace characters
* followed by '#", the comment string is used as-is.
*
* If it begins with '#', a SP is inserted between the comment
* and the value the comment is about.
*
* Otherwise, the value is followed by a SP followed by '#'
* followed by SP and then the comment string comes.
*/
leading_blanks = strspn(comment, " \t");
if (leading_blanks && comment[leading_blanks] == '#')
prepared = xstrdup(comment); /* use it as-is */
else if (comment[0] == '#')
prepared = xstrfmt(" %s", comment);
else
prepared = xstrfmt(" # %s", comment);
return prepared;
}
static void validate_comment_string(const char *comment)
{
size_t leading_blanks;
if (!comment)
return;
/*
* The front-end must have massaged the comment string
* properly before calling us.
*/
if (strchr(comment, '\n'))
BUG("multi-line comments are not permitted: '%s'", comment);
leading_blanks = strspn(comment, " \t");
if (!leading_blanks || comment[leading_blanks] != '#')
BUG("comment must begin with one or more SP followed by '#': '%s'",
comment);
}
/*
* If value==NULL, unset in (remove from) config,
* if value_pattern!=NULL, disregard key/value pairs where value does not match.
* if value_pattern==CONFIG_REGEX_NONE, do not match any existing values
* (only add a new one)
* if flags contains the CONFIG_FLAGS_MULTI_REPLACE flag, all matching
* key/values are removed before a single new pair is written. If the
* flag is not present, then replace only the first match.
*
* Returns 0 on success.
*
* This function does this:
*
* - it locks the config file by creating ".git/config.lock"
*
* - it then parses the config using store_aux() as validator to find
* the position on the key/value pair to replace. If it is to be unset,
* it must be found exactly once.
*
* - the config file is mmap()ed and the part before the match (if any) is
* written to the lock file, then the changed part and the rest.
*
* - the config file is removed and the lock file rename()d to it.
*
*/
int repo_config_set_multivar_in_file_gently(struct repository *r,
const char *config_filename,
const char *key, const char *value,
const char *value_pattern,
const char *comment,
unsigned flags)
{
int fd = -1, in_fd = -1;
int ret;
struct lock_file lock = LOCK_INIT;
char *filename_buf = NULL;
char *contents = NULL;
size_t contents_sz;
struct config_store_data store = CONFIG_STORE_INIT;
bool saved_check_deprecated_config = r->check_deprecated_config;
/*
* Do not warn or die if there are deprecated config settings as
* we want the user to be able to change those settings by running
* "git config".
*/
r->check_deprecated_config = false;
validate_comment_string(comment);
/* parse-key returns negative; flip the sign to feed exit(3) */
ret = 0 - git_config_parse_key(key, &store.key, &store.baselen);
if (ret)
goto out_free;
store.multi_replace = (flags & CONFIG_FLAGS_MULTI_REPLACE) != 0;
if (!config_filename)
config_filename = filename_buf = repo_git_path(r, "config");
/*
* The lock serves a purpose in addition to locking: the new
* contents of .git/config will be written into it.
*/
fd = hold_lock_file_for_update(&lock, config_filename, 0);
if (fd < 0) {
error_errno(_("could not lock config file %s"), config_filename);
ret = CONFIG_NO_LOCK;
goto out_free;
}
/*
* If .git/config does not exist yet, write a minimal version.
*/
in_fd = open(config_filename, O_RDONLY);
if ( in_fd < 0 ) {
if ( ENOENT != errno ) {
error_errno(_("opening %s"), config_filename);
ret = CONFIG_INVALID_FILE; /* same as "invalid config file" */
goto out_free;
}
/* if nothing to unset, error out */
if (!value) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
free(store.key);
store.key = xstrdup(key);
if (write_section(fd, key, &store) < 0 ||
write_pair(fd, key, value, comment, &store) < 0)
goto write_err_out;
} else {
struct stat st;
size_t copy_begin, copy_end;
unsigned i;
int new_line = 0;
struct config_options opts;
if (!value_pattern)
store.value_pattern = NULL;
else if (value_pattern == CONFIG_REGEX_NONE)
store.value_pattern = CONFIG_REGEX_NONE;
else if (flags & CONFIG_FLAGS_FIXED_VALUE)
store.fixed_value = value_pattern;
else {
if (value_pattern[0] == '!') {
store.do_not_match = 1;
value_pattern++;
} else
store.do_not_match = 0;
store.value_pattern = (regex_t*)xmalloc(sizeof(regex_t));
if (regcomp(store.value_pattern, value_pattern,
REG_EXTENDED)) {
error(_("invalid pattern: %s"), value_pattern);
FREE_AND_NULL(store.value_pattern);
ret = CONFIG_INVALID_PATTERN;
goto out_free;
}
}
ALLOC_GROW(store.parsed, 1, store.parsed_alloc);
store.parsed[0].end = 0;
memset(&opts, 0, sizeof(opts));
opts.event_fn = store_aux_event;
opts.event_fn_data = &store;
/*
* After this, store.parsed will contain offsets of all the
* parsed elements, and store.seen will contain a list of
* matches, as indices into store.parsed.
*
* As a side effect, we make sure to transform only a valid
* existing config file.
*/
if (git_config_from_file_with_options(store_aux,
config_filename,
&store, CONFIG_SCOPE_UNKNOWN,
&opts)) {
error(_("invalid config file %s"), config_filename);
ret = CONFIG_INVALID_FILE;
goto out_free;
}
/* if nothing to unset, or too many matches, error out */
if ((store.seen_nr == 0 && value == NULL) ||
(store.seen_nr > 1 && !store.multi_replace)) {
ret = CONFIG_NOTHING_SET;
goto out_free;
}
if (fstat(in_fd, &st) == -1) {
error_errno(_("fstat on %s failed"), config_filename);
ret = CONFIG_INVALID_FILE;
goto out_free;
}
contents_sz = xsize_t(st.st_size);
contents = xmmap_gently(NULL, contents_sz, PROT_READ,
MAP_PRIVATE, in_fd, 0);
if (contents == MAP_FAILED) {
if (errno == ENODEV && S_ISDIR(st.st_mode))
errno = EISDIR;
error_errno(_("unable to mmap '%s'%s"),
config_filename, mmap_os_err());
ret = CONFIG_INVALID_FILE;
contents = NULL;
goto out_free;
}
close(in_fd);
in_fd = -1;
if (chmod(get_lock_file_path(&lock), st.st_mode & 07777) < 0) {
error_errno(_("chmod on %s failed"), get_lock_file_path(&lock));
ret = CONFIG_NO_WRITE;
goto out_free;
}
if (store.seen_nr == 0) {
if (!store.seen_alloc) {
/* Did not see key nor section */
ALLOC_GROW(store.seen, 1, store.seen_alloc);
store.seen[0] = store.parsed_nr
- !!store.parsed_nr;
}
store.seen_nr = 1;
}
for (i = 0, copy_begin = 0; i < store.seen_nr; i++) {
size_t replace_end;
int j = store.seen[i];
new_line = 0;
if (!store.key_seen) {
copy_end = store.parsed[j].end;
/* include '\n' when copying section header */
if (copy_end > 0 && copy_end < contents_sz &&
contents[copy_end - 1] != '\n' &&
contents[copy_end] == '\n')
copy_end++;
replace_end = copy_end;
} else {
replace_end = store.parsed[j].end;
copy_end = store.parsed[j].begin;
if (!value)
maybe_remove_section(&store,
©_end,
&replace_end, &i);
/*
* Swallow preceding white-space on the same
* line.
*/
while (copy_end > 0 ) {
char c = contents[copy_end - 1];
if (isspace(c) && c != '\n')
copy_end--;
else
break;
}
}
if (copy_end > 0 && contents[copy_end-1] != '\n')
new_line = 1;
/* write the first part of the config */
if (copy_end > copy_begin) {
if (write_in_full(fd, contents + copy_begin,
copy_end - copy_begin) < 0)
goto write_err_out;
if (new_line &&
write_str_in_full(fd, "\n") < 0)
goto write_err_out;
}
copy_begin = replace_end;
}
/* write the pair (value == NULL means unset) */
if (value) {
if (!store.section_seen) {
if (write_section(fd, key, &store) < 0)
goto write_err_out;
}
if (write_pair(fd, key, value, comment, &store) < 0)
goto write_err_out;
}
/* write the rest of the config */
if (copy_begin < contents_sz)
if (write_in_full(fd, contents + copy_begin,
contents_sz - copy_begin) < 0)
goto write_err_out;
munmap(contents, contents_sz);
contents = NULL;
}
if (commit_lock_file(&lock) < 0) {
error_errno(_("could not write config file %s"), config_filename);
ret = CONFIG_NO_WRITE;
goto out_free;
}
ret = 0;
/* Invalidate the config cache */
repo_config_clear(r);
out_free:
rollback_lock_file(&lock);
free(filename_buf);
if (contents)
munmap(contents, contents_sz);
if (in_fd >= 0)
close(in_fd);
config_store_data_clear(&store);
r->check_deprecated_config = saved_check_deprecated_config;
return ret;
write_err_out:
ret = write_error(get_lock_file_path(&lock));
goto out_free;
}
void repo_config_set_multivar_in_file(struct repository *r,
const char *config_filename,
const char *key, const char *value,
const char *value_pattern, unsigned flags)
{
if (!repo_config_set_multivar_in_file_gently(r, config_filename, key, value,
value_pattern, NULL, flags))
return;
if (value)
die(_("could not set '%s' to '%s'"), key, value);
else
die(_("could not unset '%s'"), key);
}
int repo_config_set_multivar_gently(struct repository *r, const char *key,
const char *value,
const char *value_pattern, unsigned flags)
{
char *file = repo_git_path(r, "config");
int res = repo_config_set_multivar_in_file_gently(r, file,
key, value,
value_pattern,
NULL, flags);
free(file);
return res;
}
void repo_config_set_multivar(struct repository *r,
const char *key, const char *value,
const char *value_pattern, unsigned flags)
{
char *file = repo_git_path(r, "config");
repo_config_set_multivar_in_file(r, file, key, value,
value_pattern, flags);
free(file);
}
static size_t section_name_match (const char *buf, const char *name)
{
size_t i = 0, j = 0;
int dot = 0;
if (buf[i] != '[')
return 0;
for (i = 1; buf[i] && buf[i] != ']'; i++) {
if (!dot && isspace(buf[i])) {
dot = 1;
if (name[j++] != '.')
break;
for (i++; isspace(buf[i]); i++)
; /* do nothing */
if (buf[i] != '"')
break;
continue;
}
if (buf[i] == '\\' && dot)
i++;
else if (buf[i] == '"' && dot) {
for (i++; isspace(buf[i]); i++)
; /* do_nothing */
break;
}
if (buf[i] != name[j++])
break;
}
if (buf[i] == ']' && name[j] == 0) {
/*
* We match, now just find the right length offset by
* gobbling up any whitespace after it, as well
*/
i++;
for (; buf[i] && isspace(buf[i]); i++)
; /* do nothing */
return i;
}
return 0;
}
static int section_name_is_ok(const char *name)
{
/* Empty section names are bogus. */
if (!*name)
return 0;
/*
* Before a dot, we must be alphanumeric or dash. After the first dot,
* anything goes, so we can stop checking.
*/
for (; *name && *name != '.'; name++)
if (*name != '-' && !isalnum(*name))
return 0;
return 1;
}
#define GIT_CONFIG_MAX_LINE_LEN (512 * 1024)
/* if new_name == NULL, the section is removed instead */
static int repo_config_copy_or_rename_section_in_file(
struct repository *r,
const char *config_filename,
const char *old_name,
const char *new_name, int copy)
{
int ret = 0, remove = 0;
char *filename_buf = NULL;
struct lock_file lock = LOCK_INIT;
int out_fd;
struct strbuf buf = STRBUF_INIT;
FILE *config_file = NULL;
struct stat st;
struct strbuf copystr = STRBUF_INIT;
struct config_store_data store;
uint32_t line_nr = 0;
memset(&store, 0, sizeof(store));
if (new_name && !section_name_is_ok(new_name)) {
ret = error(_("invalid section name: %s"), new_name);
goto out_no_rollback;
}
if (!config_filename)
config_filename = filename_buf = repo_git_path(r, "config");
out_fd = hold_lock_file_for_update(&lock, config_filename, 0);
if (out_fd < 0) {
ret = error(_("could not lock config file %s"), config_filename);
goto out;
}
if (!(config_file = fopen(config_filename, "rb"))) {
ret = warn_on_fopen_errors(config_filename);
if (ret)
goto out;
/* no config file means nothing to rename, no error */
goto commit_and_out;
}
if (fstat(fileno(config_file), &st) == -1) {
ret = error_errno(_("fstat on %s failed"), config_filename);
goto out;
}
if (chmod(get_lock_file_path(&lock), st.st_mode & 07777) < 0) {
ret = error_errno(_("chmod on %s failed"),
get_lock_file_path(&lock));
goto out;
}
while (!strbuf_getwholeline(&buf, config_file, '\n')) {
size_t i, length;
int is_section = 0;
char *output = buf.buf;
line_nr++;
if (buf.len >= GIT_CONFIG_MAX_LINE_LEN) {
ret = error(_("refusing to work with overly long line "
"in '%s' on line %"PRIuMAX),
config_filename, (uintmax_t)line_nr);
goto out;
}
for (i = 0; buf.buf[i] && isspace(buf.buf[i]); i++)
; /* do nothing */
if (buf.buf[i] == '[') {
/* it's a section */
size_t offset;
is_section = 1;
/*
* When encountering a new section under -c we
* need to flush out any section we're already
* coping and begin anew. There might be
* multiple [branch "$name"] sections.
*/
if (copystr.len > 0) {
if (write_in_full(out_fd, copystr.buf, copystr.len) < 0) {
ret = write_error(get_lock_file_path(&lock));
goto out;
}
strbuf_reset(©str);
}
offset = section_name_match(&buf.buf[i], old_name);
if (offset > 0) {
ret++;
if (!new_name) {
remove = 1;
continue;
}
store.baselen = strlen(new_name);
if (!copy) {
if (write_section(out_fd, new_name, &store) < 0) {
ret = write_error(get_lock_file_path(&lock));
goto out;
}
/*
* We wrote out the new section, with
* a newline, now skip the old
* section's length
*/
output += offset + i;
if (strlen(output) > 0) {
/*
* More content means there's
* a declaration to put on the
* next line; indent with a
* tab
*/
output -= 1;
output[0] = '\t';
}
} else {
strbuf_release(©str);
copystr = store_create_section(new_name, &store);
}
}
remove = 0;
}
if (remove)
continue;
length = strlen(output);
if (!is_section && copystr.len > 0) {
strbuf_add(©str, output, length);
}
if (write_in_full(out_fd, output, length) < 0) {
ret = write_error(get_lock_file_path(&lock));
goto out;
}
}
/*
* Copy a trailing section at the end of the config, won't be
* flushed by the usual "flush because we have a new section
* logic in the loop above.
*/
if (copystr.len > 0) {
if (write_in_full(out_fd, copystr.buf, copystr.len) < 0) {
ret = write_error(get_lock_file_path(&lock));
goto out;
}
strbuf_reset(©str);
}
fclose(config_file);
config_file = NULL;
commit_and_out:
if (commit_lock_file(&lock) < 0)
ret = error_errno(_("could not write config file %s"),
config_filename);
out:
if (config_file)
fclose(config_file);
rollback_lock_file(&lock);
out_no_rollback:
free(filename_buf);
config_store_data_clear(&store);
strbuf_release(&buf);
strbuf_release(©str);
return ret;
}
int repo_config_rename_section_in_file(struct repository *r, const char *config_filename,
const char *old_name, const char *new_name)
{
return repo_config_copy_or_rename_section_in_file(r, config_filename,
old_name, new_name, 0);
}
int repo_config_rename_section(struct repository *r, const char *old_name, const char *new_name)
{
return repo_config_rename_section_in_file(r, NULL, old_name, new_name);
}
int repo_config_copy_section_in_file(struct repository *r, const char *config_filename,
const char *old_name, const char *new_name)
{
return repo_config_copy_or_rename_section_in_file(r, config_filename,
old_name, new_name, 1);
}
int repo_config_copy_section(struct repository *r, const char *old_name, const char *new_name)
{
return repo_config_copy_section_in_file(r, NULL, old_name, new_name);
}
/*
* Call this to report error for your variable that should not
* get a boolean value (i.e. "[my] var" means "true").
*/
#undef config_error_nonbool
int config_error_nonbool(const char *var)
{
return error(_("missing value for '%s'"), var);
}
int parse_config_key(const char *var,
const char *section,
const char **subsection, size_t *subsection_len,
const char **key)
{
const char *dot;
/* Does it start with "section." ? */
if (!skip_prefix(var, section, &var) || *var != '.')
return -1;
/*
* Find the key; we don't know yet if we have a subsection, but we must
* parse backwards from the end, since the subsection may have dots in
* it, too.
*/
dot = strrchr(var, '.');
*key = dot + 1;
/* Did we have a subsection at all? */
if (dot == var) {
if (subsection) {
*subsection = NULL;
*subsection_len = 0;
}
}
else {
if (!subsection)
return -1;
*subsection = var + 1;
*subsection_len = dot - *subsection;
}
return 0;
}
const char *config_origin_type_name(enum config_origin_type type)
{
switch (type) {
case CONFIG_ORIGIN_BLOB:
return "blob";
case CONFIG_ORIGIN_FILE:
return "file";
case CONFIG_ORIGIN_STDIN:
return "standard input";
case CONFIG_ORIGIN_SUBMODULE_BLOB:
return "submodule-blob";
case CONFIG_ORIGIN_CMDLINE:
return "command line";
default:
BUG("unknown config origin type");
}
}
const char *config_scope_name(enum config_scope scope)
{
switch (scope) {
case CONFIG_SCOPE_SYSTEM:
return "system";
case CONFIG_SCOPE_GLOBAL:
return "global";
case CONFIG_SCOPE_LOCAL:
return "local";
case CONFIG_SCOPE_WORKTREE:
return "worktree";
case CONFIG_SCOPE_COMMAND:
return "command";
case CONFIG_SCOPE_SUBMODULE:
return "submodule";
default:
return "unknown";
}
}
int lookup_config(const char **mapping, int nr_mapping, const char *var)
{
int i;
for (i = 0; i < nr_mapping; i++) {
const char *name = mapping[i];
if (name && !strcasecmp(var, name))
return i;
}
return -1;
} | c | github | https://github.com/git/git | config.c |
from __future__ import division
import statsmodels.tsa.stattools as sta
import linecache
from random import shuffle
from math import isnan
import numpy as np
from numpy import mean
import time
import sys
import os
mydir = os.path.expanduser("~/GitHub/Micro-Encounter")
sys.path.append(mydir + "/model/bide")
import bide
sys.path.append(mydir + "/model/randparams")
import randparams as rp
sys.path.append(mydir + "/model/spatial")
import spatial
sys.path.append(mydir + "/model/metrics")
import metrics
'''
sys.path.append(mydir + "/model/col_labels")
labels = linecache.getline(mydir + '/model/col_labels/labels.txt', 1)
with open(mydir + '/results/simulated_data/SimData.csv', 'w+') as text_file:
text_file.write(labels)
'''
######################### Randomly chosen variables ##########################
ComplexityLevels = metrics.get_complexity_levels()
SC, TC, RC = ComplexityLevels
extremes = True
params = rp.get_rand_params(extremes)
width, height, length, seed, m, r, gmax, mmax, dmax, pmax, mfact, std = params
####################### Variables, Lists & Dictionaries ######################
Ns = []
IndDict, ResDict = {}, {}
ct, N = 0, 0
x = range(0, 8)
SpDicts = [{}, {}, {}, {}, {}, {}]
ResLists = [[], [], [], [], [], []]
if 'lockandkey' in RC:
ResDict['dead'] = np.random.uniform(0.1, 0.5)
elif 'simple' in RC:
ResDict['dead'] = 1.0
numSims, sim, p, ct, RowID, PRODI = 10**6, 0, 0, 0, 0, 0
BurnIn = 'not done'
####################### Main Simulation Loop #################################
t0 = time.clock()
while sim < numSims:
numDead, encounters, res_in = 0, 0, 0
ct += 1
RowID += 1
shuffle(x)
t2 = float()
t1 = time.clock()
for xi in x:
# Inflow of resources
if xi == 0: ResLists, ResDict, res_in = bide.ResIn(ResLists, ResDict, params, ct, ComplexityLevels)
# Immigration
elif xi == 1: IndDict, SpDicts = bide.immigration(IndDict, SpDicts, params, ct, ComplexityLevels)
# Individual Dispersal
elif xi == 2 and '-none-' not in SC: IndDict, ResList, ResDict, numDead = bide.dispersal(IndDict, SpDicts, ResLists, ResDict, params, ComplexityLevels, numDead)
# Resource Dispersal
elif xi == 3: ResLists = bide.res_dispersal(ResLists, params, ct, ComplexityLevels)
# Consumption
elif xi == 4: ResLists, ResDict, IndDict, encounters, numDead = bide.consume(IndDict, SpDicts, ResLists, ResDict, params, ComplexityLevels, numDead)
# Reproduction
elif xi == 5: PRODI, IndDicts, ResLists, ResDict, numDead = bide.reproduce(IndDict, SpDicts, ResLists, ResDict, params, ComplexityLevels, numDead)
# Maintenance
elif xi == 6: IndDict, ResLists, ResDict, numDead = bide.maintenance(IndDict, SpDicts, ResLists, ResDict, ComplexityLevels, numDead)
# Transition to or from dormancy
elif xi == 7: IndDict, ResLists, ResDict, numDead = bide.transition(IndDict, SpDicts, ResLists, ResDict, ComplexityLevels, numDead)
t2 = time.clock() - t1
if t2 >= 0.12: break
#if t2 < 0.12: time.sleep(0.12 - t2)
N = len(IndDict.keys())
Ns.append(N)
Rvs = ResLists[0]
Rvs.sort()
R = len(Rvs)
if ct%100 == 0:
print 'sim:', sim, 'ct:', ct, ' N:', N, ' R:', R, 'prodi:', PRODI, ' dead:', numDead, 'encounters:', encounters,' ', 'pmax:', round(pmax,2), 'mfact:', mfact
BurnIn = 'done'
if len(Ns) >= 500 and BurnIn == 'not done':
AugmentedDickeyFuller = sta.adfuller(Ns)
val, p = AugmentedDickeyFuller[0:2]
if p >= 0.05: Ns.pop(0)
elif p < 0.05 or isnan(p) == True:
BurnIn = 'done'
Ns = [Ns[-1]] # only keep the most recent N value
ct = 0
if BurnIn == 'done' and ct%10 == 0:
Rvals, Rtypes, RX, RY, RZ, RIDs = ResLists
IndIDs = IndDict.keys()
SpeciesIDs, IndX, IndY, IndZ, ADList, CellQuotas = [], [], [], [], [], []
[SpeciesIDs.append(IndDict[i]['species']) for i in IndIDs]
[IndX.append(IndDict[i]['x']) for i in IndIDs]
[IndY.append(IndDict[i]['y']) for i in IndIDs]
[IndZ.append(IndDict[i]['z']) for i in IndIDs]
[CellQuotas.append(IndDict[i]['quota']) for i in IndIDs]
[ADList.append(IndDict[i]['state']) for i in IndIDs]
GrowthDict, MaintDict, MainFactorDict, RPFDict, DispDict, TrophicDict = SpDicts
Iagg = 1 #spatial.morisitas(IndX, IndY, width, height, length)
Ragg = 1 #spatial.morisitas(RX, RY, width, height, length)
outlist = [RowID, sim, ct, width, height, length, seed, m, r, gmax, mmax, dmax, pmax, mfact, std, \
res_in, ComplexityLevels[0], ComplexityLevels[1], ComplexityLevels[2],\
mean(DispDict.values()), mean(MaintDict.values()), mean(GrowthDict.values()), \
metrics.per_capita(GrowthDict, SpeciesIDs), metrics.per_capita(MaintDict, SpeciesIDs),\
metrics.per_capita(DispDict, SpeciesIDs), mean(CellQuotas), ADList.count('dormant'), numDead,\
PRODI, N, len(RX), encounters, Iagg, Ragg]
outlist = str(outlist).strip('[]')
outlist = str(outlist).strip('')
OUT = open(mydir + '/results/simulated_data/SimData.csv', 'a')
print>>OUT, outlist
OUT.close()
limlist = [N, R]
if len(Ns) > 2000:
if len(Ns) > 2000:
N = int(round(np.mean(Ns)))
if N == 0:
N = 1
print 'sim:',sim, ' N:',N, ' R:',len(ResLists[0]), ' %Dormant:',100*round(ADList.count('dormant')/N, 3), ' Encounters:',encounters, ' Prod:',PRODI, ' dead:',numDead, ' SC:', SC
ComplexityLevels = metrics.get_complexity_levels()
SC, TC, RC = ComplexityLevels
ct, N = 0, 0
extremes = True
params = rp.get_rand_params(extremes)
width, height, length, seed, m, r, gmax, mmax, dmax, pmax, mfact, std = params
#print params
SpDicts = [{}, {}, {}, {}, {}, {}]
IndDict, ResDict = {}, {}
ResLists = [[], [], [], [], [], []]
Ns = []
if '-lockandkey-' in RC:
ResDict['dead'] = np.random.uniform(0.1, 0.5)
elif '-simple-' in RC:
ResDict['dead'] = 1.0
BurnIn = 'not done'
sim += 1 | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
The rrule module offers a small, complete, and very fast, implementation of
the recurrence rules documented in the
`iCalendar RFC <http://www.ietf.org/rfc/rfc2445.txt>`_,
including support for caching of results.
"""
import itertools
import datetime
import calendar
import sys
from fractions import gcd
from six import advance_iterator, integer_types
from six.moves import _thread
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = list(range(7))
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError("Can't create weekday with n == 0")
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
""" Returns the number of recurrences in this set. It will have go
trough the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
"""
That's the base of the rrule operation. It accepts all the keywords
defined in the RFC as its constructor parameters (except byday,
which was renamed to byweekday) and more. The constructor prototype is::
rrule(freq)
Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
or SECONDLY.
Additionally, it supports the following keyword arguments:
:param cache:
If given, it must be a boolean value specifying to enable or disable
caching of results. If you will use the same rrule instance multiple
times, enabling caching will improve the performance considerably.
:param dtstart:
The recurrence start. Besides being the base for the recurrence,
missing parameters in the final recurrence instances will also be
extracted from this date. If not given, datetime.now() will be used
instead.
:param interval:
The interval between each freq iteration. For example, when using
YEARLY, an interval of 2 means once every two years, but with HOURLY,
it means once every two hours. The default interval is 1.
:param wkst:
The week start day. Must be one of the MO, TU, WE constants, or an
integer, specifying the first day of the week. This will affect
recurrences based on weekly periods. The default week start is got
from calendar.firstweekday(), and may be modified by
calendar.setfirstweekday().
:param count:
How many occurrences will be generated.
:param until:
If given, this must be a datetime instance, that will specify the
limit of the recurrence. If a recurrence instance happens to be the
same as the datetime instance given in the until keyword, this will
be the last occurrence.
:param bysetpos:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each given integer will specify an occurrence
number, corresponding to the nth occurrence of the rule inside the
frequency period. For example, a bysetpos of -1 if combined with a
MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
result in the last work day of every month.
:param bymonth:
If given, it must be either an integer, or a sequence of integers,
meaning the months to apply the recurrence to.
:param bymonthday:
If given, it must be either an integer, or a sequence of integers,
meaning the month days to apply the recurrence to.
:param byyearday:
If given, it must be either an integer, or a sequence of integers,
meaning the year days to apply the recurrence to.
:param byweekno:
If given, it must be either an integer, or a sequence of integers,
meaning the week numbers to apply the recurrence to. Week numbers
have the meaning described in ISO8601, that is, the first week of
the year is that containing at least four days of the new year.
:param byweekday:
If given, it must be either an integer (0 == MO), a sequence of
integers, one of the weekday constants (MO, TU, etc), or a sequence
of these constants. When given, these variables will define the
weekdays where the recurrence will be applied. It's also possible to
use an argument n for the weekday instances, which will mean the nth
occurrence of this weekday in the period. For example, with MONTHLY,
or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
first friday of the month where the recurrence happens. Notice that in
the RFC documentation, this is specified as BYDAY, but was renamed to
avoid the ambiguity of that keyword.
:param byhour:
If given, it must be either an integer, or a sequence of integers,
meaning the hours to apply the recurrence to.
:param byminute:
If given, it must be either an integer, or a sequence of integers,
meaning the minutes to apply the recurrence to.
:param bysecond:
If given, it must be either an integer, or a sequence of integers,
meaning the seconds to apply the recurrence to.
:param byeaster:
If given, it must be either an integer, or a sequence of integers,
positive or negative. Each integer will define an offset from the
Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
Sunday itself. This is an extension to the RFC specification.
"""
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
super(rrule, self).__init__(cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif isinstance(wkst, integer_types):
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif isinstance(bysetpos, integer_types):
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if (byweekno is None and byyearday is None and bymonthday is None and
byweekday is None and byeaster is None):
if freq == YEARLY:
if bymonth is None:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if bymonth is None:
self._bymonth = None
else:
if isinstance(bymonth, integer_types):
bymonth = (bymonth,)
self._bymonth = tuple(sorted(set(bymonth)))
# byyearday
if byyearday is None:
self._byyearday = None
else:
if isinstance(byyearday, integer_types):
byyearday = (byyearday,)
self._byyearday = tuple(sorted(set(byyearday)))
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if isinstance(byeaster, integer_types):
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(sorted(byeaster))
else:
self._byeaster = None
# bymonthay
if bymonthday is None:
self._bymonthday = ()
self._bynmonthday = ()
else:
if isinstance(bymonthday, integer_types):
bymonthday = (bymonthday,)
self._bymonthday = tuple(sorted(set([x for x in bymonthday if x > 0])))
self._bynmonthday = tuple(sorted(set([x for x in bymonthday if x < 0])))
# byweekno
if byweekno is None:
self._byweekno = None
else:
if isinstance(byweekno, integer_types):
byweekno = (byweekno,)
self._byweekno = tuple(sorted(set(byweekno)))
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
else:
# If it's one of the valid non-sequence types, convert to a
# single-element sequence before the iterator that builds the
# byweekday set.
if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
byweekday = (byweekday,)
self._byweekday = set()
self._bynweekday = set()
for wday in byweekday:
if isinstance(wday, integer_types):
self._byweekday.add(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.add(wday.weekday)
else:
self._bynweekday.add((wday.weekday, wday.n))
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
if self._byweekday is not None:
self._byweekday = tuple(sorted(self._byweekday))
if self._bynweekday is not None:
self._bynweekday = tuple(sorted(self._bynweekday))
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = set((dtstart.hour,))
else:
self._byhour = None
else:
if isinstance(byhour, integer_types):
byhour = (byhour,)
if freq == HOURLY:
self._byhour = self.__construct_byset(start=dtstart.hour,
byxxx=byhour,
base=24)
else:
self._byhour = set(byhour)
self._byhour = tuple(sorted(self._byhour))
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = set((dtstart.minute,))
else:
self._byminute = None
else:
if isinstance(byminute, integer_types):
byminute = (byminute,)
if freq == MINUTELY:
self._byminute = self.__construct_byset(start=dtstart.minute,
byxxx=byminute,
base=60)
else:
self._byminute = set(byminute)
self._byminute = tuple(sorted(self._byminute))
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = ((dtstart.second,))
else:
self._bysecond = None
else:
if isinstance(bysecond, integer_types):
bysecond = (bysecond,)
self._bysecond = set(bysecond)
if freq == SECONDLY:
self._bysecond = self.__construct_byset(start=dtstart.second,
byxxx=bysecond,
base=60)
else:
self._bysecond = set(bysecond)
self._bysecond = tuple(sorted(self._bysecond))
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY: ii.ydayset,
MONTHLY: ii.mdayset,
WEEKLY: ii.wdayset,
DAILY: ii.ddayset,
HOURLY: ii.ddayset,
MINUTELY: ii.ddayset,
SECONDLY: ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY: ii.htimeset,
MINUTELY: ii.mtimeset,
SECONDLY: ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday and
-ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
-ii.nextyearlen+i-ii.yearlen not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
if byhour:
ndays, hour = self.__mod_distance(value=hour,
byxxx=self._byhour,
base=24)
else:
ndays, hour = divmod(hour+interval, 24)
if ndays:
day += ndays
fixday = True
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
valid = False
rep_rate = (24*60)
for j in range(rep_rate // gcd(interval, rep_rate)):
if byminute:
nhours, minute = \
self.__mod_distance(value=minute,
byxxx=self._byminute,
base=60)
else:
nhours, minute = divmod(minute+interval, 60)
div, hour = divmod(hour+nhours, 24)
if div:
day += div
fixday = True
filtered = False
if not byhour or hour in byhour:
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval and ' +
'byhour resulting in empty rule.')
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
// interval)*interval)
rep_rate = (24*3600)
valid = False
for j in range(0, rep_rate // gcd(interval, rep_rate)):
if bysecond:
nminutes, second = \
self.__mod_distance(value=second,
byxxx=self._bysecond,
base=60)
else:
nminutes, second = divmod(second+interval, 60)
div, minute = divmod(minute+nminutes, 60)
if div:
hour += div
div, hour = divmod(hour, 24)
if div:
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
valid = True
break
if not valid:
raise ValueError('Invalid combination of interval, ' +
'byhour and byminute resulting in empty' +
' rule.')
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
def __construct_byset(self, start, byxxx, base):
"""
If a `BYXXX` sequence is passed to the constructor at the same level as
`FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
specifications which cannot be reached given some starting conditions.
This occurs whenever the interval is not coprime with the base of a
given unit and the difference between the starting position and the
ending position is not coprime with the greatest common denominator
between the interval and the base. For example, with a FREQ of hourly
starting at 17:00 and an interval of 4, the only valid values for
BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
coprime.
:param start:
Specifies the starting position.
:param byxxx:
An iterable containing the list of allowed values.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
This does not preserve the type of the iterable, returning a set, since
the values should be unique and the order is irrelevant, this will
speed up later lookups.
In the event of an empty set, raises a :exception:`ValueError`, as this
results in an empty rrule.
"""
cset = set()
# Support a single byxxx value.
if isinstance(byxxx, integer_types):
byxxx = (byxxx, )
for num in byxxx:
i_gcd = gcd(self._interval, base)
# Use divmod rather than % because we need to wrap negative nums.
if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
cset.add(num)
if len(cset) == 0:
raise ValueError("Invalid rrule byxxx generates an empty set.")
return cset
def __mod_distance(self, value, byxxx, base):
"""
Calculates the next value in a sequence where the `FREQ` parameter is
specified along with a `BYXXX` parameter at the same "level"
(e.g. `HOURLY` specified with `BYHOUR`).
:param value:
The old value of the component.
:param byxxx:
The `BYXXX` set, which should have been generated by
`rrule._construct_byset`, or something else which checks that a
valid rule is present.
:param base:
The largest allowable value for the specified frequency (e.g.
24 hours, 60 minutes).
If a valid value is not found after `base` iterations (the maximum
number before the sequence would start to repeat), this raises a
:exception:`ValueError`, as no valid values were found.
This returns a tuple of `divmod(n*interval, base)`, where `n` is the
smallest number of `interval` repetitions until the next specified
value in `byxxx` is found.
"""
accumulator = 0
for ii in range(1, base + 1):
# Using divmod() over % to account for negative intervals
div, value = divmod(value + self._interval, base)
accumulator += div
if value in byxxx:
return (accumulator, value)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
# no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1, 1, 1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst) % 7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen +
(lyearweekday-rr._wkst) % 7) % 7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and (month != self.lastmonth or
year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday) % 7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday) % 7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return list(range(self.yearlen)), 0, self.yearlen
def mdayset(self, year, month, day):
dset = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
dset[i] = i
return dset, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
dset = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
dset[i] = i
i += 1
# if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return dset, start, i
def ddayset(self, year, month, day):
dset = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
dset[i] = i
return dset, i, i+1
def htimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
tset.sort()
return tset
def mtimeset(self, hour, minute, second):
tset = []
rr = self.rrule
for second in rr._bysecond:
tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
tset.sort()
return tset
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
""" The rruleset type allows more complex recurrence setups, mixing
multiple rules, dates, exclusion rules, and exclusion dates. The type
constructor takes the following keyword arguments:
:param cache: If True, caching of results will be enabled, improving
performance of multiple queries considerably. """
class _genitem(object):
def __init__(self, genlist, gen):
try:
self.dt = advance_iterator(gen)
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def __next__(self):
try:
self.dt = advance_iterator(self.gen)
except StopIteration:
self.genlist.remove(self)
next = __next__
def __lt__(self, other):
return self.dt < other.dt
def __gt__(self, other):
return self.dt > other.dt
def __eq__(self, other):
return self.dt == other.dt
def __ne__(self, other):
return self.dt != other.dt
def __init__(self, cache=False):
super(rruleset, self).__init__(cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
""" Include the given :py:class:`rrule` instance in the recurrence set
generation. """
self._rrule.append(rrule)
def rdate(self, rdate):
""" Include the given :py:class:`datetime` instance in the recurrence
set generation. """
self._rdate.append(rdate)
def exrule(self, exrule):
""" Include the given rrule instance in the recurrence set exclusion
list. Dates which are part of the given recurrence rules will not
be generated, even if some inclusive rrule or rdate matches them.
"""
self._exrule.append(exrule)
def exdate(self, exdate):
""" Include the given datetime instance in the recurrence set
exclusion list. Dates included that way will not be generated,
even if some inclusive rrule or rdate matches them. """
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate))
for gen in [iter(x) for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate))
for gen in [iter(x) for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
advance_iterator(exlist[0])
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
advance_iterator(ritem)
rlist.sort()
self._len = total
class _rrulestr(object):
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError("unsupported DTSTART parm: "+parm)
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
rset.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et | unknown | codeparrot/codeparrot-clean | ||
/* Copyright (c) 2013, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef _my_icp_h
#define _my_icp_h
/**
@file include/my_icp.h
*/
/**
Values returned by index_cond_func_xxx functions.
*/
typedef enum icp_result {
/** Index tuple doesn't satisfy the pushed index condition (the engine
should discard the tuple and go to the next one) */
ICP_NO_MATCH,
/** Index tuple satisfies the pushed index condition (the engine should
fetch and return the record) */
ICP_MATCH,
/** Index tuple is out of the range that we're scanning, e.g. if we're
scanning "t.key BETWEEN 10 AND 20" and got a "t.key=21" tuple (the engine
should stop scanning and return HA_ERR_END_OF_FILE right away). */
ICP_OUT_OF_RANGE
} ICP_RESULT;
#endif /* _my_icp_h */ | c | github | https://github.com/mysql/mysql-server | include/my_icp.h |
import {Component, output} from '@angular/core';
@Component({
selector: 'app-child',
styles: `
.btn {
padding: 5px;
}
`,
template: ` <button class="btn" (click)="addItem()">Add Item</button> `,
})
export class Child {
addItem() {}
} | typescript | github | https://github.com/angular/angular | adev/src/content/tutorials/learn-angular/steps/9-output/src/app/child.ts |
#include <cmath>
#include <limits>
#include <vector>
#include <c10/util/Half.h>
#include <c10/util/floating_point_utils.h>
#include <c10/util/irange.h>
#include <gtest/gtest.h>
namespace {
float halfbits2float(unsigned short h) {
unsigned sign = ((h >> 15) & 1);
unsigned exponent = ((h >> 10) & 0x1f);
unsigned mantissa = ((h & 0x3ff) << 13);
if (exponent == 0x1f) { /* NaN or Inf */
mantissa = (mantissa ? (sign = 0, 0x7fffff) : 0);
exponent = 0xff;
} else if (!exponent) { /* Denorm or Zero */
if (mantissa) {
unsigned int msb = 0;
exponent = 0x71;
do {
msb = (mantissa & 0x400000);
mantissa <<= 1; /* normalize */
--exponent;
} while (!msb);
mantissa &= 0x7fffff; /* 1.mantissa is implicit */
}
} else {
exponent += 0x70;
}
unsigned result_bit = (sign << 31) | (exponent << 23) | mantissa;
return c10::detail::fp32_from_bits(result_bit);
}
unsigned short float2halfbits(float src) {
unsigned x = c10::detail::fp32_to_bits(src);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
unsigned u = (x & 0x7fffffff), shift = 0;
// Get rid of +NaN/-NaN case first.
if (u > 0x7f800000) {
return 0x7fffU;
}
unsigned sign = ((x >> 16) & 0x8000);
// Get rid of +Inf/-Inf, +0/-0.
if (u > 0x477fefff) {
return sign | 0x7c00U;
}
if (u < 0x33000001) {
return (sign | 0x0000);
}
unsigned exponent = ((u >> 23) & 0xff);
unsigned mantissa = (u & 0x7fffff);
if (exponent > 0x70) {
shift = 13;
exponent -= 0x70;
} else {
shift = 0x7e - exponent;
exponent = 0;
mantissa |= 0x800000;
}
unsigned lsb = (1 << shift);
unsigned lsb_s1 = (lsb >> 1);
unsigned lsb_m1 = (lsb - 1);
// Round to nearest even.
unsigned remainder = (mantissa & lsb_m1);
mantissa >>= shift;
if (remainder > lsb_s1 || (remainder == lsb_s1 && (mantissa & 0x1))) {
++mantissa;
if (!(mantissa & 0x3ff)) {
++exponent;
mantissa = 0;
}
}
return (sign | (exponent << 10) | mantissa);
}
TEST(HalfConversionTest, TestPorableConversion) {
std::vector<uint16_t> inputs = {
0,
0xfbff, // 1111 1011 1111 1111
(1 << 15 | 1),
0x7bff // 0111 1011 1111 1111
};
for (auto x : inputs) {
auto target = c10::detail::fp16_ieee_to_fp32_value(x);
EXPECT_EQ(halfbits2float(x), target)
<< "Test failed for uint16 to float " << x << '\n';
EXPECT_EQ(
float2halfbits(target), c10::detail::fp16_ieee_from_fp32_value(target))
<< "Test failed for float to uint16" << target << '\n';
}
}
TEST(HalfConversion, TestNativeConversionToFloat) {
// There are only 2**16 possible values, so test them all
for (auto x : c10::irange(std::numeric_limits<uint16_t>::max() + 1)) {
auto h = c10::Half(x, c10::Half::from_bits());
auto f = halfbits2float(x);
// NaNs are not equal to each other
if (std::isnan(f) && std::isnan(static_cast<float>(h))) {
continue;
}
EXPECT_EQ(f, static_cast<float>(h)) << "Conversion error using " << x;
}
}
TEST(HalfConversion, TestNativeConversionToHalf) {
auto check_conversion = [](float f) {
auto h = c10::Half(f);
auto h_bits = float2halfbits(f);
// NaNs are not equal to each other, just check that half is NaN
if (std::isnan(f)) {
EXPECT_TRUE(std::isnan(static_cast<float>(h)));
} else {
EXPECT_EQ(h.x, h_bits) << "Conversion error using " << f;
}
};
for (auto x : c10::irange(std::numeric_limits<uint16_t>::max() + 1)) {
check_conversion(halfbits2float(x));
}
// Check a few values outside of Half range
check_conversion(std::numeric_limits<float>::max());
check_conversion(std::numeric_limits<float>::min());
check_conversion(std::numeric_limits<float>::epsilon());
check_conversion(std::numeric_limits<float>::lowest());
}
} // namespace | cpp | github | https://github.com/pytorch/pytorch | c10/test/util/Half_test.cpp |
# Copyright 2012 NEC Corporation
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
DETAIL_URL = 'horizon:admin:networks:ports:detail'
NETWORKS_INDEX_URL = reverse('horizon:admin:networks:index')
NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail'
class NetworkPortTests(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('network_get',
'port_get',
'is_extension_supported',)})
def test_port_detail(self):
self._test_port_detail()
@test.create_stubs({api.neutron: ('network_get',
'port_get',
'is_extension_supported',)})
def test_port_detail_with_mac_learning(self):
self._test_port_detail(mac_learning=True)
def _test_port_detail(self, mac_learning=False):
port = self.ports.first()
network_id = self.networks.first().id
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(self.ports.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.MultipleTimes().AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'allowed-address-pairs') \
.MultipleTimes().AndReturn(False)
api.neutron.network_get(IsA(http.HttpRequest), network_id)\
.AndReturn(self.networks.first())
self.mox.ReplayAll()
res = self.client.get(reverse(DETAIL_URL, args=[port.id]))
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['port'].id, port.id)
@test.create_stubs({api.neutron: ('port_get',)})
def test_port_detail_exception(self):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse(DETAIL_URL, args=[port.id]))
redir_url = NETWORKS_INDEX_URL
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',)})
def test_port_create_get(self):
self._test_port_create_get()
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',)})
def test_port_create_get_with_mac_learning(self):
self._test_port_create_get(mac_learning=True)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',)})
def test_port_create_get_with_port_security(self):
self._test_port_create_get(port_security=True)
def _test_port_create_get(self, mac_learning=False, binding=False,
port_security=False):
network = self.networks.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:addport',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/networks/ports/create.html')
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post(self):
self._test_port_create_post()
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post_with_mac_learning(self):
self._test_port_create_post(mac_learning=True, binding=False)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post_with_port_security(self):
self._test_port_create_post(port_security=True)
def _test_port_create_post(self, mac_learning=False, binding=False,
port_security=False):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding') \
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = \
port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
if port_security:
extension_kwargs['port_security_enabled'] = True
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
if port_security:
form_data['port_security_enabled'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'is_extension_supported',
'port_create',)})
def test_port_create_post_with_fixed_ip(self):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(True)
extension_kwargs = {}
extension_kwargs['binding__vnic_type'] = \
port.binding__vnic_type
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
fixed_ips=port.fixed_ips,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address,
'specify_ip': 'fixed_ip',
'fixed_ip': port.fixed_ips[0]['ip_address'],
'subnet_id': port.fixed_ips[0]['subnet_id']}
form_data['binding__vnic_type'] = port.binding__vnic_type
form_data['mac_state'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('network_get',
'port_create',
'is_extension_supported',)})
def test_port_create_post_exception(self):
self._test_port_create_post_exception()
@test.create_stubs({api.neutron: ('network_get',
'port_create',
'is_extension_supported',)})
def test_port_create_post_exception_with_mac_learning(self):
self._test_port_create_post_exception(mac_learning=True)
@test.create_stubs({api.neutron: ('network_get',
'port_create',
'is_extension_supported',)})
def test_port_create_post_exception_with_port_security(self):
self._test_port_create_post_exception(port_security=True)
def _test_port_create_post_exception(self, mac_learning=False,
binding=False,
port_security=False):
network = self.networks.first()
port = self.ports.first()
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.network_get(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.networks.first())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding') \
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
if port_security:
extension_kwargs['port_security_enabled'] = True
api.neutron.port_create(IsA(http.HttpRequest),
tenant_id=network.tenant_id,
network_id=network.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'network_name': network.name,
'name': port.name,
'admin_state': port.admin_state_up,
'mac_state': True,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_learning_enabled'] = True
if port_security:
form_data['port_security_enabled'] = True
url = reverse('horizon:admin:networks:addport',
args=[port.network_id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get(self):
self._test_port_update_get()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_mac_learning(self):
self._test_port_update_get(mac_learning=True)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',)})
def test_port_update_get_with_port_security(self):
self._test_port_update_get(port_security=True)
def _test_port_update_get(self, mac_learning=False, binding=False,
port_security=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest),
port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding') \
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/networks/ports/update.html')
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post(self):
self._test_port_update_post()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_mac_learning(self):
self._test_port_update_post(mac_learning=True)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_with_port_security(self):
self._test_port_update_post(port_security=True)
def _test_port_update_post(self, mac_learning=False, binding=False,
port_security=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
if port_security:
extension_kwargs['port_security_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
**extension_kwargs)\
.AndReturn(port)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
if port_security:
form_data['port_security_enabled'] = True
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception(self):
self._test_port_update_post_exception()
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_mac_learning(self):
self._test_port_update_post_exception(mac_learning=True, binding=False)
@test.create_stubs({api.neutron: ('port_get',
'is_extension_supported',
'port_update')})
def test_port_update_post_exception_with_port_security(self):
self._test_port_update_post_exception(port_security=True)
def _test_port_update_post_exception(self, mac_learning=False,
binding=False,
port_security=False):
port = self.ports.first()
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'binding')\
.AndReturn(binding)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'port-security')\
.AndReturn(port_security)
extension_kwargs = {}
if binding:
extension_kwargs['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
extension_kwargs['mac_learning_enabled'] = True
if port_security:
extension_kwargs['port_security_enabled'] = True
api.neutron.port_update(IsA(http.HttpRequest), port.id,
name=port.name,
admin_state_up=port.admin_state_up,
device_id=port.device_id,
device_owner=port.device_owner,
binding__host_id=port.binding__host_id,
mac_address=port.mac_address,
**extension_kwargs)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': port.network_id,
'port_id': port.id,
'name': port.name,
'admin_state': port.admin_state_up,
'device_id': port.device_id,
'device_owner': port.device_owner,
'binding__host_id': port.binding__host_id,
'mac_address': port.mac_address}
if binding:
form_data['binding__vnic_type'] = port.binding__vnic_type
if mac_learning:
form_data['mac_state'] = True
if port_security:
form_data['port_security_enabled'] = True
url = reverse('horizon:admin:networks:editport',
args=[port.network_id, port.id])
res = self.client.post(url, form_data)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[port.network_id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete(self):
self._test_port_delete()
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete_with_mac_learning(self):
self._test_port_delete(mac_learning=True)
def _test_port_delete(self, mac_learning=False):
port = self.ports.first()
network_id = port.network_id
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'ports__delete__%s' % port.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks',)})
def test_port_delete_exception(self):
self._test_port_delete_exception()
@test.create_stubs({api.neutron: ('port_delete',
'subnet_list',
'port_list',
'show_network_ip_availability',
'is_extension_supported',
'list_dhcp_agent_hosting_networks')})
def test_port_delete_exception_with_mac_learning(self):
self._test_port_delete_exception(mac_learning=True)
def _test_port_delete_exception(self, mac_learning=False):
port = self.ports.first()
network_id = port.network_id
api.neutron.port_delete(IsA(http.HttpRequest), port.id)\
.AndRaise(self.exceptions.neutron)
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.is_extension_supported(
IsA(http.HttpRequest),
'network-ip-availability').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(mac_learning)
self.mox.ReplayAll()
form_data = {'action': 'ports__delete__%s' % port.id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
from charms.leadership import leader_get, leader_set
from shutil import move
from tempfile import TemporaryDirectory
from pathlib import Path
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from urllib.request import Request, urlopen
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import endpoint_from_flag
from charms.reactive import when, when_any, when_not, when_none
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.layer import tls_client
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
snap_resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager',
'kube-scheduler', 'cdk-addons']
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
def set_upgrade_needed(forced=False):
set_state('kubernetes-master.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
hookenv.log('set upgrade needed')
if previous_channel is None or not require_manual or forced:
hookenv.log('forcing upgrade')
set_state('kubernetes-master.upgrade-specified')
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
def service_cidr():
''' Return the charm's service-cidr config '''
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def check_for_upgrade_needed():
'''An upgrade charm event was triggered by Juju, react to that here.'''
hookenv.status_set('maintenance', 'Checking resources')
# migrate to new flags
if is_state('kubernetes-master.restarted-for-cloud'):
remove_state('kubernetes-master.restarted-for-cloud')
set_state('kubernetes-master.cloud.ready')
if is_state('kubernetes-master.cloud-request-sent'):
# minor change, just for consistency
remove_state('kubernetes-master.cloud-request-sent')
set_state('kubernetes-master.cloud.request-sent')
migrate_from_pre_snaps()
add_rbac_roles()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
if not db.get('snap.resources.fingerprint.initialised'):
# We are here on an upgrade from non-rolling master
# Since this upgrade might also include resource updates eg
# juju upgrade-charm kubernetes-master --resource kube-any=my.snap
# we take no risk and forcibly upgrade the snaps.
# Forcibly means we do not prompt the user to call the upgrade action.
set_upgrade_needed(forced=True)
migrate_resource_checksums()
check_resources_for_upgrade_needed()
# Set the auto storage backend to etcd2.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if not auto_storage_backend and is_leader:
leader_set(auto_storage_backend='etcd2')
def get_resource_checksum_db_key(resource):
''' Convert a resource name to a resource checksum database key. '''
return 'kubernetes-master.resource-checksums.' + resource
def calculate_resource_checksum(resource):
''' Calculate a checksum for a resource '''
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def migrate_resource_checksums():
''' Migrate resource checksums from the old schema to the new one '''
for resource in snap_resources:
new_key = get_resource_checksum_db_key(resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = 'reactive.files_changed.' + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
set_upgrade_needed()
def calculate_and_store_resource_checksums():
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def add_rbac_roles():
'''Update the known_tokens file with proper groups.'''
tokens_fname = '/root/cdk/known_tokens.csv'
tokens_backup_fname = '/root/cdk/known_tokens.csv.backup'
move(tokens_fname, tokens_backup_fname)
with open(tokens_fname, 'w') as ftokens:
with open(tokens_backup_fname, 'r') as stream:
for line in stream:
record = line.strip().split(',')
# token, username, user, groups
if record[2] == 'admin' and len(record) == 3:
towrite = '{0},{1},{2},"{3}"\n'.format(record[0],
record[1],
record[2],
'system:masters')
ftokens.write(towrite)
continue
if record[2] == 'kube_proxy':
towrite = '{0},{1},{2}\n'.format(record[0],
'system:kube-proxy',
'kube-proxy')
ftokens.write(towrite)
continue
if record[2] == 'kubelet' and record[1] == 'kubelet':
continue
ftokens.write('{}'.format(line))
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('kubernetes-master.upgrade-specified')
def do_upgrade():
install_snaps()
remove_state('kubernetes-master.upgrade-needed')
remove_state('kubernetes-master.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
calculate_and_store_resource_checksums()
db.set('snap.resources.fingerprint.initialised', True)
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin", "system:masters")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('config.changed.storage-backend')
def storage_backend_changed():
remove_state('kubernetes-master.components.started')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters')
if not os.path.isfile(known_tokens):
touch(known_tokens)
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
fp.write('\n')
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-master.snaps.installed')
@when('snap.refresh.set')
@when('leadership.is_leader')
def process_snapd_timer():
''' Set the snapd refresh timer on the leader so all cluster members
(present and future) will refresh near the same time. '''
# Get the current snapd refresh timer; we know layer-snap has set this
# when the 'snap.refresh.set' flag is present.
timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8')
# The first time through, data_changed will be true. Subsequent calls
# should only update leader data if something changed.
if data_changed('master_snapd_refresh', timer):
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
leader_set({'snapd_refresh': timer})
@when('kubernetes-master.snaps.installed')
@when('snap.refresh.set')
@when('leadership.changed.snapd_refresh')
@when_not('leadership.is_leader')
def set_snapd_timer():
''' Set the snapd refresh.timer on non-leader cluster members. '''
# NB: This method should only be run when 'snap.refresh.set' is present.
# Layer-snap will always set a core refresh.timer, which may not be the
# same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
# has finished and we are free to set our config to the leader's timer.
timer = leader_get('snapd_refresh')
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
snap.set_refresh_timer(timer)
@hookenv.atexit
def set_final_status():
''' Set the final status of the charm as we leave hook execution '''
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
cloud_blocked = is_state('kubernetes-master.cloud.blocked')
if vsphere_joined and cloud_blocked:
hookenv.status_set('blocked',
'vSphere integration requires K8s 1.12 or greater')
return
if azure_joined and cloud_blocked:
hookenv.status_set('blocked',
'Azure integration requires K8s 1.11 or greater')
return
if is_state('kubernetes-master.cloud.pending'):
hookenv.status_set('waiting', 'Waiting for cloud integration')
return
if not is_state('kube-api-endpoint.available'):
if 'kube-api-endpoint' in goal_state.get('relations', {}):
status = 'waiting'
else:
status = 'blocked'
hookenv.status_set(status, 'Waiting for kube-api-endpoint relation')
return
if not is_state('kube-control.connected'):
if 'kube-control' in goal_state.get('relations', {}):
status = 'waiting'
else:
status = 'blocked'
hookenv.status_set(status, 'Waiting for workers.')
return
upgrade_needed = is_state('kubernetes-master.upgrade-needed')
upgrade_specified = is_state('kubernetes-master.upgrade-specified')
if upgrade_needed and not upgrade_specified:
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
return
if is_state('kubernetes-master.components.started'):
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) != 0:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
return
is_leader = is_state('leadership.is_leader')
authentication_setup = is_state('authentication.setup')
if not is_leader and not authentication_setup:
hookenv.status_set('waiting', 'Waiting on leaders crypto keys.')
return
components_started = is_state('kubernetes-master.components.started')
addons_configured = is_state('cdk-addons.configured')
if components_started and not addons_configured:
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
return
if addons_configured and not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
return
if hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
return
gpu_available = is_state('kube-control.gpu.available')
gpu_enabled = is_state('kubernetes-master.gpu.enabled')
if gpu_available and not gpu_enabled:
msg = 'GPUs available. Set allow-privileged="auto" to enable.'
hookenv.status_set('active', msg)
return
hookenv.status_set('active', 'Kubernetes master running.')
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when('leadership.set.auto_storage_backend')
@when_not('kubernetes-master.components.started',
'kubernetes-master.cloud.pending',
'kubernetes-master.cloud.blocked')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This happens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
# TODO: Make sure below relation is handled on change
# https://github.com/kubernetes/kubernetes/issues/43461
handle_etcd_relation(etcd)
# Add CLI options to all components
configure_apiserver(etcd.get_connection_string())
configure_controller_manager()
configure_scheduler()
set_state('kubernetes-master.components.started')
hookenv.open_port(6443)
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistently only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
# We are the leader and the auto_storage_backend is not set meaning
# this is the first time we connect to etcd.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if is_leader and not auto_storage_backend:
if etcd.get_version().startswith('3.'):
leader_set(auto_storage_backend='etcd3')
else:
leader_set(auto_storage_backend='etcd2')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
enableKubeDNS = hookenv.config('enable-kube-dns')
dnsDomain = hookenv.config('dns_domain')
dns_ip = None
if enableKubeDNS:
try:
dns_ip = get_dns_ip()
except CalledProcessError:
hookenv.log("kubedns not ready yet")
return
kube_control.set_dns(53, dnsDomain, dns_ip, enableKubeDNS)
@when('kube-control.connected')
@when('snap.installed.kubectl')
@when('leadership.is_leader')
def create_service_configs(kube_control):
"""Create the users for kubelet"""
should_restart = False
# generate the username/pass for the requesting unit
proxy_token = get_token('system:kube-proxy')
if not proxy_token:
setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
proxy_token = get_token('system:kube-proxy')
should_restart = True
client_token = get_token('admin')
if not client_token:
setup_tokens(None, 'admin', 'admin', "system:masters")
client_token = get_token('admin')
should_restart = True
requests = kube_control.auth_user()
for request in requests:
username = request[1]['user']
group = request[1]['group']
kubelet_token = get_token(username)
if not kubelet_token and username and group:
# Usernames have to be in the form of system:node:<nodeName>
userid = "kubelet-{}".format(request[0].split('/')[1])
setup_tokens(None, username, userid, group)
kubelet_token = get_token(username)
kube_control.sign_auth_request(request[0], username,
kubelet_token, proxy_token,
client_token)
should_restart = True
if should_restart:
host.service_restart('snap.kube-apiserver.daemon')
remove_state('authentication.setup')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
def get_ingress_address(relation_name):
try:
network_info = hookenv.network_get(relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-api-endpoint.available')
def send_data(tls, kube_api_endpoint):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
# Get ingress address
ingress_ip = get_ingress_address(kube_api_endpoint.relation_name)
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# maybe they have extra names they want as SANs
extra_sans = hookenv.config('extra_sans')
if extra_sans and not extra_sans == "":
sans.extend(extra_sans.split())
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('config.changed.extra_sans', 'certificates.available',
'kube-api-endpoint.available')
def update_certificate(tls, kube_api_endpoint):
# Using the config.changed.extra_sans flag to catch changes.
# IP changes will take ~5 minutes or so to propagate, but
# it will update.
send_data(tls, kube_api_endpoint)
@when('certificates.server.cert.available',
'kubernetes-master.components.started',
'tls_client.server.certificate.written')
def kick_api_server(tls):
# need to be idempotent and don't want to kick the api server
# without need
if data_changed('cert', tls.get_server_cert()):
# certificate changed, so restart the api server
hookenv.log("Certificate information changed, restarting api server")
restart_apiserver()
tls_client.reset_certificate_write_flag('server')
@when_any('kubernetes-master.components.started', 'ceph-storage.configured')
@when('leadership.is_leader')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
load_gpu_plugin = hookenv.config('enable-nvidia-plugin').lower()
gpuEnable = (get_version('kube-apiserver') >= (1, 9) and
load_gpu_plugin == "auto" and
is_state('kubernetes-master.gpu.enabled'))
registry = hookenv.config('addons-registry')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
dnsEnabled = str(hookenv.config('enable-kube-dns')).lower()
metricsEnabled = str(hookenv.config('enable-metrics')).lower()
if (is_state('ceph-storage.configured') and
get_version('kube-apiserver') >= (1, 10)):
cephEnabled = "true"
else:
cephEnabled = "false"
ceph_ep = endpoint_from_flag('ceph-storage.available')
ceph = {}
default_storage = ''
if ceph_ep:
b64_ceph_key = base64.b64encode(ceph_ep.key().encode('utf-8'))
ceph['admin_key'] = b64_ceph_key.decode('ascii')
ceph['kubernetes_key'] = b64_ceph_key.decode('ascii')
ceph['mon_hosts'] = ceph_ep.mon_hosts()
default_storage = hookenv.config('default-storage')
args = [
'arch=' + arch(),
'dns-ip=' + get_deprecated_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'registry=' + registry,
'enable-dashboard=' + dbEnabled,
'enable-kube-dns=' + dnsEnabled,
'enable-metrics=' + metricsEnabled,
'enable-gpu=' + str(gpuEnable).lower(),
'enable-ceph=' + cephEnabled,
'ceph-admin-key=' + (ceph.get('admin_key', '')),
'ceph-kubernetes-key=' + (ceph.get('admin_key', '')),
'ceph-mon-hosts="' + (ceph.get('mon_hosts', '')) + '"',
'default-storage=' + default_storage,
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
# deprecated in 1.10 in favor of using CSI
if get_version('kube-apiserver') >= (1, 10):
# this is actually false, but by setting this flag we won't keep
# running this function for no reason. Also note that we watch this
# flag to run cdk-addons.apply.
set_state('ceph-storage.configured')
return
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except: # NOQA
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('config.changed.authorization-mode',
'kubernetes-master.components.started')
def switch_auth_mode():
config = hookenv.config()
mode = config.get('authorization-mode')
if data_changed('auth-mode', mode):
remove_state('kubernetes-master.components.started')
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when_any('config.changed.api-extra-args',
'config.changed.audit-policy',
'config.changed.audit-webhook-config')
@when('kubernetes-master.components.started')
@when('leadership.set.auto_storage_backend')
@when('etcd.available')
def reconfigure_apiserver(etcd):
configure_apiserver(etcd.get_connection_string())
@when('config.changed.controller-manager-extra-args')
@when('kubernetes-master.components.started')
def on_config_controller_manager_extra_args_change():
configure_controller_manager()
@when('config.changed.scheduler-extra-args')
@when('kubernetes-master.components.started')
def on_config_scheduler_extra_args_change():
configure_scheduler()
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
kube_version = get_version('kube-apiserver')
config = hookenv.config()
if (config['allow-privileged'].lower() == "false" and
kube_version < (1, 9)):
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.privileged')
def gpu_with_no_privileged():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
if get_version('kube-apiserver') < (1, 9):
remove_state('kubernetes-master.gpu.enabled')
@when('kube-control.connected')
@when_not('kube-control.gpu.available')
@when('kubernetes-master.gpu.enabled')
@when('kubernetes-master.components.started')
def gpu_departed(kube_control):
"""We were in gpu mode, but the workers informed us there is
no gpu support anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def restart_apiserver():
hookenv.status_set('maintenance', 'Restarting kube-apiserver')
host.service_restart('snap.kube-apiserver.daemon')
def restart_controller_manager():
hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
host.service_restart('snap.kube-controller-manager.daemon')
def restart_scheduler():
hookenv.status_set('maintenance', 'Restarting kube-scheduler')
host.service_restart('snap.kube-scheduler.daemon')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
cmd = "kubectl get service --namespace kube-system kube-dns --output json"
output = check_output(cmd, shell=True).decode()
svc = json.loads(output)
return svc['spec']['clusterIP']
def get_deprecated_dns_ip():
'''We previously hardcoded the dns ip. This function returns the old
hardcoded value for use with older versions of cdk_addons.'''
interface = ipaddress.IPv4Interface(service_cidr())
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
prev_args_key = 'kubernetes-master.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
# note this is so we remove them from the snap's config
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def remove_if_exists(path):
try:
os.remove(path)
except FileNotFoundError:
pass
def write_audit_config_file(path, contents):
with open(path, 'w') as f:
header = '# Autogenerated by kubernetes-master charm'
f.write(header + '\n' + contents)
def configure_apiserver(etcd_connection_string):
api_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
# at one point in time, this code would set ca-client-cert,
# but this was removed. This was before configure_kubernetes_service
# kept track of old arguments and removed them, so client-ca-cert
# was able to hang around forever stored in the snap configuration.
# This removes that stale configuration from the snap if it still
# exists.
api_opts['client-ca-file'] = 'null'
if is_privileged():
api_opts['allow-privileged'] = 'true'
set_state('kubernetes-master.privileged')
else:
api_opts['allow-privileged'] = 'false'
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts['service-cluster-ip-range'] = service_cidr()
api_opts['min-request-timeout'] = '300'
api_opts['v'] = '4'
api_opts['tls-cert-file'] = server_cert_path
api_opts['tls-private-key-file'] = server_key_path
api_opts['kubelet-certificate-authority'] = ca_cert_path
api_opts['kubelet-client-certificate'] = client_cert_path
api_opts['kubelet-client-key'] = client_key_path
api_opts['logtostderr'] = 'true'
api_opts['insecure-bind-address'] = '127.0.0.1'
api_opts['insecure-port'] = '8080'
api_opts['storage-backend'] = getStorageBackend()
api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv'
api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv'
api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key'
api_opts['kubelet-preferred-address-types'] = \
'[InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP]'
api_opts['advertise-address'] = get_ingress_address('kube-control')
etcd_dir = '/root/cdk/etcd'
etcd_ca = os.path.join(etcd_dir, 'client-ca.pem')
etcd_key = os.path.join(etcd_dir, 'client-key.pem')
etcd_cert = os.path.join(etcd_dir, 'client-cert.pem')
api_opts['etcd-cafile'] = etcd_ca
api_opts['etcd-keyfile'] = etcd_key
api_opts['etcd-certfile'] = etcd_cert
api_opts['etcd-servers'] = etcd_connection_string
admission_control_pre_1_9 = [
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
admission_control = [
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'PersistentVolumeLabel',
'DefaultStorageClass',
'DefaultTolerationSeconds',
'MutatingAdmissionWebhook',
'ValidatingAdmissionWebhook',
'ResourceQuota'
]
auth_mode = hookenv.config('authorization-mode')
if 'Node' in auth_mode:
admission_control.append('NodeRestriction')
api_opts['authorization-mode'] = auth_mode
kube_version = get_version('kube-apiserver')
if kube_version < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control_pre_1_9.remove('DefaultTolerationSeconds')
if kube_version < (1, 9):
api_opts['admission-control'] = ','.join(admission_control_pre_1_9)
else:
api_opts['admission-control'] = ','.join(admission_control)
if kube_version > (1, 6) and \
hookenv.config('enable-metrics'):
api_opts['requestheader-client-ca-file'] = ca_cert_path
api_opts['requestheader-allowed-names'] = 'client'
api_opts['requestheader-extra-headers-prefix'] = 'X-Remote-Extra-'
api_opts['requestheader-group-headers'] = 'X-Remote-Group'
api_opts['requestheader-username-headers'] = 'X-Remote-User'
api_opts['proxy-client-cert-file'] = client_cert_path
api_opts['proxy-client-key-file'] = client_key_path
api_opts['enable-aggregator-routing'] = 'true'
api_opts['client-ca-file'] = ca_cert_path
if is_state('endpoint.aws.ready'):
api_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'gce'
api_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'openstack'
api_opts['cloud-config'] = str(cloud_config_path)
elif (is_state('endpoint.vsphere.ready') and
get_version('kube-apiserver') >= (1, 12)):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'vsphere'
api_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.azure.ready'):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'azure'
api_opts['cloud-config'] = str(cloud_config_path)
audit_root = '/root/cdk/audit'
os.makedirs(audit_root, exist_ok=True)
audit_log_path = audit_root + '/audit.log'
api_opts['audit-log-path'] = audit_log_path
api_opts['audit-log-maxsize'] = '100'
api_opts['audit-log-maxbackup'] = '9'
audit_policy_path = audit_root + '/audit-policy.yaml'
audit_policy = hookenv.config('audit-policy')
if audit_policy:
write_audit_config_file(audit_policy_path, audit_policy)
api_opts['audit-policy-file'] = audit_policy_path
else:
remove_if_exists(audit_policy_path)
audit_webhook_config_path = audit_root + '/audit-webhook-config.yaml'
audit_webhook_config = hookenv.config('audit-webhook-config')
if audit_webhook_config:
write_audit_config_file(audit_webhook_config_path,
audit_webhook_config)
api_opts['audit-webhook-config-file'] = audit_webhook_config_path
else:
remove_if_exists(audit_webhook_config_path)
configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args')
restart_apiserver()
def configure_controller_manager():
controller_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
# Default to 3 minute resync. TODO: Make this configurable?
controller_opts['min-resync-period'] = '3m'
controller_opts['v'] = '2'
controller_opts['root-ca-file'] = ca_cert_path
controller_opts['logtostderr'] = 'true'
controller_opts['master'] = 'http://127.0.0.1:8080'
controller_opts['service-account-private-key-file'] = \
'/root/cdk/serviceaccount.key'
if is_state('endpoint.aws.ready'):
controller_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'gce'
controller_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'openstack'
controller_opts['cloud-config'] = str(cloud_config_path)
elif (is_state('endpoint.vsphere.ready') and
get_version('kube-apiserver') >= (1, 12)):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'vsphere'
controller_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.azure.ready'):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'azure'
controller_opts['cloud-config'] = str(cloud_config_path)
configure_kubernetes_service('kube-controller-manager', controller_opts,
'controller-manager-extra-args')
restart_controller_manager()
def configure_scheduler():
scheduler_opts = {}
scheduler_opts['v'] = '2'
scheduler_opts['logtostderr'] = 'true'
scheduler_opts['master'] = 'http://127.0.0.1:8080'
configure_kubernetes_service('kube-scheduler', scheduler_opts,
'scheduler-extra-args')
restart_scheduler()
def setup_basic_auth(password=None, username='admin', uid='admin',
groups=None):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"'.format(password,
username, uid, groups))
else:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user, groups=None):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"\n'.format(token,
username,
user,
groups))
else:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
result = json.loads(output)
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
hookenv.log('Checking system pods status: {}'.format(', '.join(
'='.join([pod['metadata']['name'], pod['status']['phase']])
for pod in result['items'])))
all_pending = all(pod['status']['phase'] == 'Pending'
for pod in result['items'])
if is_state('endpoint.gcp.ready') and all_pending:
poke_network_unavailable()
return False
# All pods must be Running or Evicted (which should re-spawn)
all_running = all(pod['status']['phase'] == 'Running' or
pod['status'].get('reason', '') == 'Evicted'
for pod in result['items'])
return all_running
def poke_network_unavailable():
"""
Work around https://github.com/kubernetes/kubernetes/issues/44254 by
manually poking the status into the API server to tell the nodes they have
a network route.
This is needed because kubelet sets the NetworkUnavailable flag and expects
the network plugin to clear it, which only kubenet does. There is some
discussion about refactoring the affected code but nothing has happened
in a while.
"""
cmd = ['kubectl', 'get', 'nodes', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
nodes = json.loads(output)['items']
except CalledProcessError:
hookenv.log('failed to get kube-system nodes')
return
except (KeyError, json.JSONDecodeError) as e:
hookenv.log('failed to parse kube-system node status '
'({}): {}'.format(e, output), hookenv.ERROR)
return
for node in nodes:
node_name = node['metadata']['name']
url = 'http://localhost:8080/api/v1/nodes/{}/status'.format(node_name)
with urlopen(url) as response:
code = response.getcode()
body = response.read().decode('utf8')
if code != 200:
hookenv.log('failed to get node status from {} [{}]: {}'.format(
url, code, body), hookenv.ERROR)
return
try:
node_info = json.loads(body)
conditions = node_info['status']['conditions']
i = [c['type'] for c in conditions].index('NetworkUnavailable')
if conditions[i]['status'] == 'True':
hookenv.log('Clearing NetworkUnavailable from {}'.format(
node_name))
conditions[i] = {
"type": "NetworkUnavailable",
"status": "False",
"reason": "RouteCreated",
"message": "Manually set through k8s api",
}
req = Request(url, method='PUT',
data=json.dumps(node_info).encode('utf8'),
headers={'Content-Type': 'application/json'})
with urlopen(req) as response:
code = response.getcode()
body = response.read().decode('utf8')
if code not in (200, 201, 202):
hookenv.log('failed to update node status [{}]: {}'.format(
code, body), hookenv.ERROR)
return
except (json.JSONDecodeError, KeyError):
hookenv.log('failed to parse node status: {}'.format(body),
hookenv.ERROR)
return
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def touch(fname):
try:
os.utime(fname, None)
except OSError:
open(fname, 'a').close()
def getStorageBackend():
storage_backend = hookenv.config('storage-backend')
if storage_backend == 'auto':
storage_backend = leader_get('auto_storage_backend')
return storage_backend
@when('leadership.is_leader')
@when_not('leadership.set.cluster_tag')
def create_cluster_tag():
cluster_tag = 'kubernetes-{}'.format(token_generator().lower())
leader_set(cluster_tag=cluster_tag)
@when('leadership.set.cluster_tag',
'kube-control.connected')
@when_not('kubernetes-master.cluster-tag-sent')
def send_cluster_tag():
cluster_tag = leader_get('cluster_tag')
kube_control = endpoint_from_flag('kube-control.connected')
kube_control.set_cluster_tag(cluster_tag)
set_state('kubernetes-master.cluster-tag-sent')
@when_not('kube-control.connected')
def clear_cluster_tag_sent():
remove_state('kubernetes-master.cluster-tag-sent')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
@when_not('kubernetes-master.cloud.ready')
def set_cloud_pending():
k8s_version = get_version('kube-apiserver')
k8s_1_11 = k8s_version >= (1, 11)
k8s_1_12 = k8s_version >= (1, 12)
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
set_state('kubernetes-master.cloud.blocked')
else:
remove_state('kubernetes-master.cloud.blocked')
set_state('kubernetes-master.cloud.pending')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.azure.joined')
@when('leadership.set.cluster_tag')
@when_not('kubernetes-master.cloud.request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
cluster_tag = leader_get('cluster_tag')
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
'k8s.io/role/master': 'true',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
cloud.enable_load_balancer_management()
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
'k8s-io-role-master': 'master',
})
cloud.enable_object_storage_management()
cloud.enable_security_management()
elif is_state('endpoint.azure.joined'):
cloud = endpoint_from_flag('endpoint.azure.joined')
cloud.tag_instance({
'k8s-io-cluster-name': cluster_tag,
'k8s-io-role-master': 'master',
})
cloud.enable_object_storage_management()
cloud.enable_security_management()
cloud.enable_instance_inspection()
cloud.enable_network_management()
cloud.enable_dns_management()
cloud.enable_block_storage_management()
set_state('kubernetes-master.cloud.request-sent')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
def clear_cloud_flags():
remove_state('kubernetes-master.cloud.pending')
remove_state('kubernetes-master.cloud.request-sent')
remove_state('kubernetes-master.cloud.blocked')
remove_state('kubernetes-master.cloud.ready')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready',
'endpoint.vsphere.ready',
'endpoint.azure.ready')
@when_not('kubernetes-master.cloud.blocked',
'kubernetes-master.cloud.ready')
def cloud_ready():
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kube-apiserver')
_write_gcp_snap_config('kube-controller-manager')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kube-apiserver')
_write_openstack_snap_config('kube-controller-manager')
elif is_state('endpoint.vsphere.ready'):
_write_vsphere_snap_config('kube-apiserver')
_write_vsphere_snap_config('kube-controller-manager')
elif is_state('endpoint.azure.ready'):
_write_azure_snap_config('kube-apiserver')
_write_azure_snap_config('kube-controller-manager')
remove_state('kubernetes-master.cloud.pending')
set_state('kubernetes-master.cloud.ready')
remove_state('kubernetes-master.components.started') # force restart
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _cdk_addons_template_path():
return Path('/snap/cdk-addons/current/templates')
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
def _write_vsphere_snap_config(component):
# vsphere requires additional cloud config
vsphere = endpoint_from_flag('endpoint.vsphere.ready')
# NB: vsphere provider will ask kube-apiserver and -controller-manager to
# find a uuid from sysfs unless a global config value is set. Our strict
# snaps cannot read sysfs, so let's do it in the charm. An invalid uuid is
# not fatal for storage, but it will muddy the logs; try to get it right.
uuid_file = '/sys/class/dmi/id/product_uuid'
try:
with open(uuid_file, 'r') as f:
uuid = f.read().strip()
except IOError as err:
hookenv.log("Unable to read UUID from sysfs: {}".format(err))
uuid = 'UNKNOWN'
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'insecure-flag = true',
'datacenters = "{}"'.format(vsphere.datacenter),
'vm-uuid = "VMware-{}"'.format(uuid),
'[VirtualCenter "{}"]'.format(vsphere.vsphere_ip),
'user = {}'.format(vsphere.user),
'password = {}'.format(vsphere.password),
'[Workspace]',
'server = {}'.format(vsphere.vsphere_ip),
'datacenter = "{}"'.format(vsphere.datacenter),
'default-datastore = "{}"'.format(vsphere.datastore),
'folder = "kubernetes"',
'resourcepool-path = ""',
'[Disk]',
'scsicontrollertype = "pvscsi"',
]))
def _write_azure_snap_config(component):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text(json.dumps({
'useInstanceMetadata': True,
'useManagedIdentityExtension': True,
'subscriptionId': azure.subscription_id,
'resourceGroup': azure.resource_group,
'location': azure.resource_group_location,
'vnetName': azure.vnet_name,
'vnetResourceGroup': azure.vnet_resource_group,
'subnetName': azure.subnet_name,
'securityGroupName': azure.security_group_name,
})) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2010 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.base;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.GwtCompatible;
import java.nio.charset.StandardCharsets;
/**
* Static methods pertaining to ASCII characters (those in the range of values {@code 0x00} through
* {@code 0x7F}), and to strings containing such characters.
*
* <p>ASCII utilities also exist in other classes of this package:
*
* <ul>
* <!-- TODO(kevinb): how can we make this not produce a warning when building gwt javadoc? -->
* <li>{@link StandardCharsets#US_ASCII} specifies the {@code Charset} of ASCII characters.
* <li>{@link CharMatcher#ascii} matches ASCII characters and provides text processing methods
* which operate only on the ASCII characters of a string.
* </ul>
*
* @author Catherine Berry
* @author Gregory Kick
* @since 7.0
*/
@GwtCompatible
public final class Ascii {
private Ascii() {}
/* The ASCII control characters, per RFC 20. */
/**
* Null ('\0'): The all-zeros character which may serve to accomplish time fill and media fill.
* Normally used as a C string terminator.
*
* <p>Although RFC 20 names this as "Null", note that it is distinct from the C/C++ "NULL"
* pointer.
*
* @since 8.0
*/
public static final byte NUL = 0;
/**
* Start of Heading: A communication control character used at the beginning of a sequence of
* characters which constitute a machine-sensible address or routing information. Such a sequence
* is referred to as the "heading." An STX character has the effect of terminating a heading.
*
* @since 8.0
*/
public static final byte SOH = 1;
/**
* Start of Text: A communication control character which precedes a sequence of characters that
* is to be treated as an entity and entirely transmitted through to the ultimate destination.
* Such a sequence is referred to as "text." STX may be used to terminate a sequence of characters
* started by SOH.
*
* @since 8.0
*/
public static final byte STX = 2;
/**
* End of Text: A communication control character used to terminate a sequence of characters
* started with STX and transmitted as an entity.
*
* @since 8.0
*/
public static final byte ETX = 3;
/**
* End of Transmission: A communication control character used to indicate the conclusion of a
* transmission, which may have contained one or more texts and any associated headings.
*
* @since 8.0
*/
public static final byte EOT = 4;
/**
* Enquiry: A communication control character used in data communication systems as a request for
* a response from a remote station. It may be used as a "Who Are You" (WRU) to obtain
* identification, or may be used to obtain station status, or both.
*
* @since 8.0
*/
public static final byte ENQ = 5;
/**
* Acknowledge: A communication control character transmitted by a receiver as an affirmative
* response to a sender.
*
* @since 8.0
*/
public static final byte ACK = 6;
/**
* Bell ('\a'): A character for use when there is a need to call for human attention. It may
* control alarm or attention devices.
*
* @since 8.0
*/
public static final byte BEL = 7;
/**
* Backspace ('\b'): A format effector which controls the movement of the printing position one
* printing space backward on the same printing line. (Applicable also to display devices.)
*
* @since 8.0
*/
public static final byte BS = 8;
/**
* Horizontal Tabulation ('\t'): A format effector which controls the movement of the printing
* position to the next in a series of predetermined positions along the printing line.
* (Applicable also to display devices and the skip function on punched cards.)
*
* @since 8.0
*/
public static final byte HT = 9;
/**
* Line Feed ('\n'): A format effector which controls the movement of the printing position to the
* next printing line. (Applicable also to display devices.) Where appropriate, this character may
* have the meaning "New Line" (NL), a format effector which controls the movement of the printing
* point to the first printing position on the next printing line. Use of this convention requires
* agreement between sender and recipient of data.
*
* @since 8.0
*/
public static final byte LF = 10;
/**
* Alternate name for {@link #LF}. ({@code LF} is preferred.)
*
* @since 8.0
*/
public static final byte NL = 10;
/**
* Vertical Tabulation ('\v'): A format effector which controls the movement of the printing
* position to the next in a series of predetermined printing lines. (Applicable also to display
* devices.)
*
* @since 8.0
*/
public static final byte VT = 11;
/**
* Form Feed ('\f'): A format effector which controls the movement of the printing position to the
* first pre-determined printing line on the next form or page. (Applicable also to display
* devices.)
*
* @since 8.0
*/
public static final byte FF = 12;
/**
* Carriage Return ('\r'): A format effector which controls the movement of the printing position
* to the first printing position on the same printing line. (Applicable also to display devices.)
*
* @since 8.0
*/
public static final byte CR = 13;
/**
* Shift Out: A control character indicating that the code combinations which follow shall be
* interpreted as outside of the character set of the standard code table until a Shift In
* character is reached.
*
* @since 8.0
*/
public static final byte SO = 14;
/**
* Shift In: A control character indicating that the code combinations which follow shall be
* interpreted according to the standard code table.
*
* @since 8.0
*/
public static final byte SI = 15;
/**
* Data Link Escape: A communication control character which will change the meaning of a limited
* number of contiguously following characters. It is used exclusively to provide supplementary
* controls in data communication networks.
*
* @since 8.0
*/
public static final byte DLE = 16;
/**
* Device Control 1. Characters for the control of ancillary devices associated with data
* processing or telecommunication systems, more especially switching devices "on" or "off." (If a
* single "stop" control is required to interrupt or turn off ancillary devices, DC4 is the
* preferred assignment.)
*
* @since 8.0
*/
public static final byte DC1 = 17; // aka XON
/**
* Transmission On: Although originally defined as DC1, this ASCII control character is now better
* known as the XON code used for software flow control in serial communications. The main use is
* restarting the transmission after the communication has been stopped by the XOFF control code.
*
* @since 8.0
*/
public static final byte XON = 17; // aka DC1
/**
* Device Control 2. Characters for the control of ancillary devices associated with data
* processing or telecommunication systems, more especially switching devices "on" or "off." (If a
* single "stop" control is required to interrupt or turn off ancillary devices, DC4 is the
* preferred assignment.)
*
* @since 8.0
*/
public static final byte DC2 = 18;
/**
* Device Control 3. Characters for the control of ancillary devices associated with data
* processing or telecommunication systems, more especially switching devices "on" or "off." (If a
* single "stop" control is required to interrupt or turn off ancillary devices, DC4 is the
* preferred assignment.)
*
* @since 8.0
*/
public static final byte DC3 = 19; // aka XOFF
/**
* Transmission off. See {@link #XON} for explanation.
*
* @since 8.0
*/
public static final byte XOFF = 19; // aka DC3
/**
* Device Control 4. Characters for the control of ancillary devices associated with data
* processing or telecommunication systems, more especially switching devices "on" or "off." (If a
* single "stop" control is required to interrupt or turn off ancillary devices, DC4 is the
* preferred assignment.)
*
* @since 8.0
*/
public static final byte DC4 = 20;
/**
* Negative Acknowledge: A communication control character transmitted by a receiver as a negative
* response to the sender.
*
* @since 8.0
*/
public static final byte NAK = 21;
/**
* Synchronous Idle: A communication control character used by a synchronous transmission system
* in the absence of any other character to provide a signal from which synchronism may be
* achieved or retained.
*
* @since 8.0
*/
public static final byte SYN = 22;
/**
* End of Transmission Block: A communication control character used to indicate the end of a
* block of data for communication purposes. ETB is used for blocking data where the block
* structure is not necessarily related to the processing format.
*
* @since 8.0
*/
public static final byte ETB = 23;
/**
* Cancel: A control character used to indicate that the data with which it is sent is in error or
* is to be disregarded.
*
* @since 8.0
*/
public static final byte CAN = 24;
/**
* End of Medium: A control character associated with the sent data which may be used to identify
* the physical end of the medium, or the end of the used, or wanted, portion of information
* recorded on a medium. (The position of this character does not necessarily correspond to the
* physical end of the medium.)
*
* @since 8.0
*/
public static final byte EM = 25;
/**
* Substitute: A character that may be substituted for a character which is determined to be
* invalid or in error.
*
* @since 8.0
*/
public static final byte SUB = 26;
/**
* Escape: A control character intended to provide code extension (supplementary characters) in
* general information interchange. The Escape character itself is a prefix affecting the
* interpretation of a limited number of contiguously following characters.
*
* @since 8.0
*/
public static final byte ESC = 27;
/**
* File Separator: These four information separators may be used within data in optional fashion,
* except that their hierarchical relationship shall be: FS is the most inclusive, then GS, then
* RS, and US is least inclusive. (The content and length of a File, Group, Record, or Unit are
* not specified.)
*
* @since 8.0
*/
public static final byte FS = 28;
/**
* Group Separator: These four information separators may be used within data in optional fashion,
* except that their hierarchical relationship shall be: FS is the most inclusive, then GS, then
* RS, and US is least inclusive. (The content and length of a File, Group, Record, or Unit are
* not specified.)
*
* @since 8.0
*/
public static final byte GS = 29;
/**
* Record Separator: These four information separators may be used within data in optional
* fashion, except that their hierarchical relationship shall be: FS is the most inclusive, then
* GS, then RS, and US is least inclusive. (The content and length of a File, Group, Record, or
* Unit are not specified.)
*
* @since 8.0
*/
public static final byte RS = 30;
/**
* Unit Separator: These four information separators may be used within data in optional fashion,
* except that their hierarchical relationship shall be: FS is the most inclusive, then GS, then
* RS, and US is least inclusive. (The content and length of a File, Group, Record, or Unit are
* not specified.)
*
* @since 8.0
*/
public static final byte US = 31;
/**
* Space: A normally non-printing graphic character used to separate words. It is also a format
* effector which controls the movement of the printing position, one printing position forward.
* (Applicable also to display devices.)
*
* @since 8.0
*/
public static final byte SP = 32;
/**
* Alternate name for {@link #SP}.
*
* @since 8.0
*/
public static final byte SPACE = 32;
/**
* Delete: This character is used primarily to "erase" or "obliterate" erroneous or unwanted
* characters in perforated tape.
*
* @since 8.0
*/
public static final byte DEL = 127;
/**
* The minimum value of an ASCII character.
*
* @since 9.0 (was type {@code int} before 12.0)
*/
public static final char MIN = 0;
/**
* The maximum value of an ASCII character.
*
* @since 9.0 (was type {@code int} before 12.0)
*/
public static final char MAX = 127;
/** A bit mask which selects the bit encoding ASCII character case. */
private static final char CASE_MASK = 0x20;
/**
* Returns a copy of the input string in which all {@linkplain #isUpperCase(char) uppercase ASCII
* characters} have been converted to lowercase. All other characters are copied without
* modification.
*/
public static String toLowerCase(String string) {
int length = string.length();
for (int i = 0; i < length; i++) {
if (isUpperCase(string.charAt(i))) {
char[] chars = string.toCharArray();
for (; i < length; i++) {
char c = chars[i];
if (isUpperCase(c)) {
chars[i] = (char) (c ^ CASE_MASK);
}
}
return String.valueOf(chars);
}
}
return string;
}
/**
* Returns a copy of the input character sequence in which all {@linkplain #isUpperCase(char)
* uppercase ASCII characters} have been converted to lowercase. All other characters are copied
* without modification.
*
* @since 14.0
*/
public static String toLowerCase(CharSequence chars) {
if (chars instanceof String) {
return toLowerCase((String) chars);
}
char[] newChars = new char[chars.length()];
for (int i = 0; i < newChars.length; i++) {
newChars[i] = toLowerCase(chars.charAt(i));
}
return String.valueOf(newChars);
}
/**
* If the argument is an {@linkplain #isUpperCase(char) uppercase ASCII character}, returns the
* lowercase equivalent. Otherwise returns the argument.
*/
public static char toLowerCase(char c) {
return isUpperCase(c) ? (char) (c ^ CASE_MASK) : c;
}
/**
* Returns a copy of the input string in which all {@linkplain #isLowerCase(char) lowercase ASCII
* characters} have been converted to uppercase. All other characters are copied without
* modification.
*/
public static String toUpperCase(String string) {
int length = string.length();
for (int i = 0; i < length; i++) {
if (isLowerCase(string.charAt(i))) {
char[] chars = string.toCharArray();
for (; i < length; i++) {
char c = chars[i];
if (isLowerCase(c)) {
chars[i] = (char) (c ^ CASE_MASK);
}
}
return String.valueOf(chars);
}
}
return string;
}
/**
* Returns a copy of the input character sequence in which all {@linkplain #isLowerCase(char)
* lowercase ASCII characters} have been converted to uppercase. All other characters are copied
* without modification.
*
* @since 14.0
*/
public static String toUpperCase(CharSequence chars) {
if (chars instanceof String) {
return toUpperCase((String) chars);
}
char[] newChars = new char[chars.length()];
for (int i = 0; i < newChars.length; i++) {
newChars[i] = toUpperCase(chars.charAt(i));
}
return String.valueOf(newChars);
}
/**
* If the argument is a {@linkplain #isLowerCase(char) lowercase ASCII character}, returns the
* uppercase equivalent. Otherwise returns the argument.
*/
public static char toUpperCase(char c) {
return isLowerCase(c) ? (char) (c ^ CASE_MASK) : c;
}
/**
* Indicates whether {@code c} is one of the twenty-six lowercase ASCII alphabetic characters
* between {@code 'a'} and {@code 'z'} inclusive. All others (including non-ASCII characters)
* return {@code false}.
*/
public static boolean isLowerCase(char c) {
// Note: This was benchmarked against the alternate expression "(char)(c - 'a') < 26" (Nov '13)
// and found to perform at least as well, or better.
return (c >= 'a') && (c <= 'z');
}
/**
* Indicates whether {@code c} is one of the twenty-six uppercase ASCII alphabetic characters
* between {@code 'A'} and {@code 'Z'} inclusive. All others (including non-ASCII characters)
* return {@code false}.
*/
public static boolean isUpperCase(char c) {
return (c >= 'A') && (c <= 'Z');
}
/**
* Truncates the given character sequence to the given maximum length. If the length of the
* sequence is greater than {@code maxLength}, the returned string will be exactly {@code
* maxLength} chars in length and will end with the given {@code truncationIndicator}. Otherwise,
* the sequence will be returned as a string with no changes to the content.
*
* <p>Examples:
*
* {@snippet :
* Ascii.truncate("foobar", 7, "..."); // returns "foobar"
* Ascii.truncate("foobar", 5, "..."); // returns "fo..."
* }
*
* <p><b>Note:</b> This method <i>may</i> work with certain non-ASCII text but is not safe for use
* with arbitrary Unicode text. It is mostly intended for use with text that is known to be safe
* for use with it (such as all-ASCII text) and for simple debugging text. When using this method,
* consider the following:
*
* <ul>
* <li>it may split surrogate pairs
* <li>it may split characters and combining characters
* <li>it does not consider word boundaries
* <li>if truncating for display to users, there are other considerations that must be taken
* into account
* <li>the appropriate truncation indicator may be locale-dependent
* <li>it is safe to use non-ASCII characters in the truncation indicator
* </ul>
*
* @throws IllegalArgumentException if {@code maxLength} is less than the length of {@code
* truncationIndicator}
* @since 16.0
*/
public static String truncate(CharSequence seq, int maxLength, String truncationIndicator) {
checkNotNull(seq);
// length to truncate the sequence to, not including the truncation indicator
int truncationLength = maxLength - truncationIndicator.length();
// in this worst case, this allows a maxLength equal to the length of the truncationIndicator,
// meaning that a string will be truncated to just the truncation indicator itself
checkArgument(
truncationLength >= 0,
"maxLength (%s) must be >= length of the truncation indicator (%s)",
maxLength,
truncationIndicator.length());
if (seq.length() <= maxLength) {
String string = seq.toString();
if (string.length() <= maxLength) {
return string;
}
// if the length of the toString() result was > maxLength for some reason, truncate that
seq = string;
}
return new StringBuilder(maxLength)
.append(seq, 0, truncationLength)
.append(truncationIndicator)
.toString();
}
/**
* Indicates whether the contents of the given character sequences {@code s1} and {@code s2} are
* equal, ignoring the case of any ASCII alphabetic characters between {@code 'a'} and {@code 'z'}
* or {@code 'A'} and {@code 'Z'} inclusive.
*
* <p>This method is significantly faster than {@link String#equalsIgnoreCase} and should be used
* in preference if at least one of the parameters is known to contain only ASCII characters.
*
* <p>Note however that this method does not always behave identically to expressions such as:
*
* <ul>
* <li>{@code string.toUpperCase().equals("UPPER CASE ASCII")}
* <li>{@code string.toLowerCase().equals("lower case ascii")}
* </ul>
*
* <p>due to case-folding of some non-ASCII characters (which does not occur in {@link
* String#equalsIgnoreCase}). However in almost all cases that ASCII strings are used, the author
* probably wanted the behavior provided by this method rather than the subtle and sometimes
* surprising behavior of {@code toUpperCase()} and {@code toLowerCase()}.
*
* @since 16.0
*/
public static boolean equalsIgnoreCase(CharSequence s1, CharSequence s2) {
// Calling length() is the null pointer check (so do it before we can exit early).
int length = s1.length();
if (s1 == s2) {
return true;
}
if (length != s2.length()) {
return false;
}
for (int i = 0; i < length; i++) {
char c1 = s1.charAt(i);
char c2 = s2.charAt(i);
if (c1 == c2) {
continue;
}
int alphaIndex = getAlphaIndex(c1);
// This was also benchmarked using '&' to avoid branching (but always evaluate the rhs),
// however this showed no obvious improvement.
if (alphaIndex < 26 && alphaIndex == getAlphaIndex(c2)) {
continue;
}
return false;
}
return true;
}
/**
* Returns the non-negative index value of the alpha character {@code c}, regardless of case. Ie,
* 'a'/'A' returns 0 and 'z'/'Z' returns 25. Non-alpha characters return a value of 26 or greater.
*/
private static int getAlphaIndex(char c) {
// Fold upper-case ASCII to lower-case and make zero-indexed and unsigned (by casting to char).
return (char) ((c | CASE_MASK) - 'a');
}
} | java | github | https://github.com/google/guava | android/guava/src/com/google/common/base/Ascii.java |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.integration.mailusage
import org.springframework.docs.integration.mailusagesimple.Order
// tag::snippet[]
interface OrderManager {
fun placeOrder(order: Order)
}
// end::snippet[] | kotlin | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/kotlin/org/springframework/docs/integration/mailusage/OrderManager.kt |
#
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils
module_type = utils.CachedType("struct module")
def module_list():
global module_type
module_ptr_type = module_type.get_type().pointer()
modules = gdb.parse_and_eval("modules")
entry = modules['next']
end_of_list = modules.address
while entry != end_of_list:
yield utils.container_of(entry, module_ptr_type, "list")
entry = entry['next']
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(module['module_core']).split()[0],
name=module['name'].string(),
size=str(module['core_size']),
ref=str(module['refcnt']['counter'])))
source_list = module['source_list']
t = self._module_use_type.get_type().pointer()
entry = source_list['next']
first = True
while entry != source_list.address:
use = utils.container_of(entry, t, "source_list")
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
entry = entry['next']
gdb.write("\n")
LxLsmod() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.jmx.support;
import java.util.LinkedHashSet;
import java.util.Set;
import javax.management.InstanceAlreadyExistsException;
import javax.management.InstanceNotFoundException;
import javax.management.JMException;
import javax.management.MBeanServer;
import javax.management.ObjectInstance;
import javax.management.ObjectName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.jspecify.annotations.Nullable;
import org.springframework.util.Assert;
/**
* Provides supporting infrastructure for registering MBeans with an
* {@link javax.management.MBeanServer}. The behavior when encountering
* an existing MBean at a given {@link ObjectName} is fully configurable
* allowing for flexible registration settings.
*
* <p>All registered MBeans are tracked and can be unregistered by calling
* the #{@link #unregisterBeans()} method.
*
* <p>Sub-classes can receive notifications when an MBean is registered or
* unregistered by overriding the {@link #onRegister(ObjectName)} and
* {@link #onUnregister(ObjectName)} methods respectively.
*
* <p>By default, the registration process will fail if attempting to
* register an MBean using a {@link javax.management.ObjectName} that is
* already used.
*
* <p>By setting the {@link #setRegistrationPolicy(RegistrationPolicy) registrationPolicy}
* property to {@link RegistrationPolicy#IGNORE_EXISTING} the registration process
* will simply ignore existing MBeans leaving them registered. This is useful in settings
* where multiple applications want to share a common MBean in a shared {@link MBeanServer}.
*
* <p>Setting {@link #setRegistrationPolicy(RegistrationPolicy) registrationPolicy} property
* to {@link RegistrationPolicy#REPLACE_EXISTING} will cause existing MBeans to be replaced
* during registration if necessary. This is useful in situations where you can't guarantee
* the state of your {@link MBeanServer}.
*
* @author Rob Harrop
* @author Juergen Hoeller
* @author Phillip Webb
* @since 2.0
* @see #setServer
* @see #setRegistrationPolicy
* @see org.springframework.jmx.export.MBeanExporter
*/
public class MBeanRegistrationSupport {
/**
* {@code Log} instance for this class.
*/
protected final Log logger = LogFactory.getLog(getClass());
/**
* The {@code MBeanServer} instance being used to register beans.
*/
protected @Nullable MBeanServer server;
/**
* The beans that have been registered by this exporter.
*/
private final Set<ObjectName> registeredBeans = new LinkedHashSet<>();
/**
* The policy used when registering an MBean and finding that it already exists.
* By default an exception is raised.
*/
private RegistrationPolicy registrationPolicy = RegistrationPolicy.FAIL_ON_EXISTING;
/**
* Specify the {@code MBeanServer} instance with which all beans should
* be registered. The {@code MBeanExporter} will attempt to locate an
* existing {@code MBeanServer} if none is supplied.
*/
public void setServer(@Nullable MBeanServer server) {
this.server = server;
}
/**
* Return the {@code MBeanServer} that the beans will be registered with.
*/
public final @Nullable MBeanServer getServer() {
return this.server;
}
/**
* The policy to use when attempting to register an MBean
* under an {@link javax.management.ObjectName} that already exists.
* @param registrationPolicy the policy to use
* @since 3.2
*/
public void setRegistrationPolicy(RegistrationPolicy registrationPolicy) {
Assert.notNull(registrationPolicy, "RegistrationPolicy must not be null");
this.registrationPolicy = registrationPolicy;
}
/**
* Actually register the MBean with the server. The behavior when encountering
* an existing MBean can be configured using {@link #setRegistrationPolicy}.
* @param mbean the MBean instance
* @param objectName the suggested ObjectName for the MBean
* @throws JMException if the registration failed
*/
protected void doRegister(Object mbean, ObjectName objectName) throws JMException {
Assert.state(this.server != null, "No MBeanServer set");
ObjectName actualObjectName;
synchronized (this.registeredBeans) {
ObjectInstance registeredBean = null;
try {
registeredBean = this.server.registerMBean(mbean, objectName);
}
catch (InstanceAlreadyExistsException ex) {
if (this.registrationPolicy == RegistrationPolicy.IGNORE_EXISTING) {
if (logger.isDebugEnabled()) {
logger.debug("Ignoring existing MBean at [" + objectName + "]");
}
}
else if (this.registrationPolicy == RegistrationPolicy.REPLACE_EXISTING) {
try {
if (logger.isDebugEnabled()) {
logger.debug("Replacing existing MBean at [" + objectName + "]");
}
this.server.unregisterMBean(objectName);
registeredBean = this.server.registerMBean(mbean, objectName);
}
catch (InstanceNotFoundException ex2) {
if (logger.isInfoEnabled()) {
logger.info("Unable to replace existing MBean at [" + objectName + "]", ex2);
}
throw ex;
}
}
else {
throw ex;
}
}
// Track registration and notify listeners.
actualObjectName = (registeredBean != null ? registeredBean.getObjectName() : null);
if (actualObjectName == null) {
actualObjectName = objectName;
}
this.registeredBeans.add(actualObjectName);
}
onRegister(actualObjectName, mbean);
}
/**
* Unregisters all beans that have been registered by an instance of this class.
*/
protected void unregisterBeans() {
Set<ObjectName> snapshot;
synchronized (this.registeredBeans) {
snapshot = new LinkedHashSet<>(this.registeredBeans);
}
if (!snapshot.isEmpty()) {
logger.debug("Unregistering JMX-exposed beans");
for (ObjectName objectName : snapshot) {
doUnregister(objectName);
}
}
}
/**
* Actually unregister the specified MBean from the server.
* @param objectName the suggested ObjectName for the MBean
*/
protected void doUnregister(ObjectName objectName) {
Assert.state(this.server != null, "No MBeanServer set");
boolean actuallyUnregistered = false;
synchronized (this.registeredBeans) {
if (this.registeredBeans.remove(objectName)) {
try {
// MBean might already have been unregistered by an external process
if (this.server.isRegistered(objectName)) {
this.server.unregisterMBean(objectName);
actuallyUnregistered = true;
}
else {
if (logger.isInfoEnabled()) {
logger.info("Could not unregister MBean [" + objectName + "] as said MBean " +
"is not registered (perhaps already unregistered by an external process)");
}
}
}
catch (JMException ex) {
if (logger.isInfoEnabled()) {
logger.info("Could not unregister MBean [" + objectName + "]", ex);
}
}
}
}
if (actuallyUnregistered) {
onUnregister(objectName);
}
}
/**
* Return the {@link ObjectName ObjectNames} of all registered beans.
*/
protected final ObjectName[] getRegisteredObjectNames() {
synchronized (this.registeredBeans) {
return this.registeredBeans.toArray(new ObjectName[0]);
}
}
/**
* Called when an MBean is registered under the given {@link ObjectName}. Allows
* subclasses to perform additional processing when an MBean is registered.
* <p>The default implementation delegates to {@link #onRegister(ObjectName)}.
* @param objectName the actual {@link ObjectName} that the MBean was registered with
* @param mbean the registered MBean instance
*/
protected void onRegister(ObjectName objectName, Object mbean) {
onRegister(objectName);
}
/**
* Called when an MBean is registered under the given {@link ObjectName}. Allows
* subclasses to perform additional processing when an MBean is registered.
* <p>The default implementation is empty. Can be overridden in subclasses.
* @param objectName the actual {@link ObjectName} that the MBean was registered with
*/
protected void onRegister(ObjectName objectName) {
}
/**
* Called when an MBean is unregistered under the given {@link ObjectName}. Allows
* subclasses to perform additional processing when an MBean is unregistered.
* <p>The default implementation is empty. Can be overridden in subclasses.
* @param objectName the {@link ObjectName} that the MBean was registered with
*/
protected void onUnregister(ObjectName objectName) {
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/jmx/support/MBeanRegistrationSupport.java |
"""
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better
results than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via
cross-validation. Sometimes dropping rows or using marker values is
more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score) | unknown | codeparrot/codeparrot-clean | ||
trigger:
batch: true
branches:
include:
- devel
- stable-*
pr:
autoCancel: true
branches:
include:
- devel
- stable-*
schedules:
- cron: 0 7 * * *
displayName: Nightly
always: true
branches:
include:
- devel
- stable-*
variables:
- name: checkoutPath
value: ansible
- name: coverageBranches
value: devel
- name: entryPoint
value: .azure-pipelines/commands/entry-point.sh
- name: fetchDepth
value: 500
- name: defaultContainer
value: quay.io/ansible/azure-pipelines-test-container:7.0.0
pool: Standard
stages:
- stage: Sanity
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Test {0}
testFormat: sanity/{0}
targets:
- test: 1
- test: 2
- stage: Units
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: units/{0}
targets:
- test: 3.9
- test: '3.10'
- test: 3.11
- test: 3.12
- test: 3.13
- test: 3.14
- stage: Windows
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Server {0}
testFormat: windows/{0}/1
targets:
- name: 2016 WinRM HTTP
test: 2016/winrm/http
- name: 2019 WinRM HTTPS
test: 2019/winrm/https
- name: 2022 WinRM HTTPS
test: 2022/winrm/https
- name: 2022 PSRP HTTP
test: 2022/psrp/http
- name: 2022 SSH Key
test: 2022/ssh/key
- name: 2025 PSRP HTTP
test: 2025/psrp/http
- name: 2025 SSH Key
test: 2025/ssh/key
- stage: Remote
dependsOn: []
jobs:
- template: templates/matrix.yml # context/target
parameters:
targets:
- name: macOS 15.3
test: macos/15.3
- name: RHEL 9.7 py39
test: rhel/9.7@3.9
- name: RHEL 9.7 py312
test: rhel/9.7@3.12
- name: RHEL 10.1
test: rhel/10.1
- name: FreeBSD 14.3
test: freebsd/14.3
- name: FreeBSD 15.0
test: freebsd/15.0
groups:
- 1
- 2
- template: templates/matrix.yml # context/controller
parameters:
targets:
- name: macOS 15.3
test: macos/15.3
- name: RHEL 9.7
test: rhel/9.7
- name: RHEL 10.1
test: rhel/10.1
- name: FreeBSD 15.0
test: freebsd/15.0
groups:
- 3
- 4
- 5
- template: templates/matrix.yml # context/controller (ansible-test container management)
parameters:
targets:
- name: Alpine 3.23
test: alpine/3.23
- name: Fedora 43
test: fedora/43
- name: RHEL 9.7
test: rhel/9.7
- name: RHEL 10.1
test: rhel/10.1
- name: Ubuntu 24.04
test: ubuntu/24.04
groups:
- 6
- stage: Docker
dependsOn: []
jobs:
- template: templates/matrix.yml # context/target
parameters:
testFormat: linux/{0}
targets:
- name: Alpine 3.23
test: alpine323
- name: Fedora 43
test: fedora43
- name: Ubuntu 22.04
test: ubuntu2204
- name: Ubuntu 24.04
test: ubuntu2404
groups:
- 1
- 2
- template: templates/matrix.yml # context/controller
parameters:
testFormat: linux/{0}
targets:
- name: Alpine 3.23
test: alpine323
- name: Fedora 43
test: fedora43
- name: Ubuntu 24.04
test: ubuntu2404
groups:
- 3
- 4
- 5
- template: templates/matrix.yml # context/target (dnf-oldest, dnf-latest)
parameters:
testFormat: linux/{0}
targets:
- name: Fedora 43
test: fedora43
groups:
- 7
- stage: Galaxy
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: galaxy/{0}/1
targets:
- test: 3.12
- test: 3.13
- test: 3.14
- stage: Generic
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Python {0}
testFormat: generic/{0}/1
targets:
- test: 3.12
- test: 3.13
- test: 3.14
- stage: Incidental_Windows
displayName: Incidental Windows
dependsOn: []
jobs:
- template: templates/matrix.yml
parameters:
nameFormat: Server {0}
testFormat: i/windows/{0}
targets:
- name: 2016 WinRM HTTP
test: 2016/winrm/http
- name: 2019 WinRM HTTPS
test: 2019/winrm/https
- name: 2022 WinRM HTTPS
test: 2022/winrm/https
- name: 2022 PSRP HTTP
test: 2022/psrp/http
- name: 2022 SSH Key
test: 2022/ssh/key
- name: 2025 PSRP HTTP
test: 2025/psrp/http
- name: 2025 SSH Key
test: 2025/ssh/key
- stage: Summary
condition: succeededOrFailed()
dependsOn:
- Sanity
- Units
- Windows
- Remote
- Docker
- Galaxy
- Generic
- Incidental_Windows
jobs:
- template: templates/coverage.yml | unknown | github | https://github.com/ansible/ansible | .azure-pipelines/azure-pipelines.yml |
#!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Look through skia-autogen, searching for all checksums which should have
corresponding files in Google Storage, and verify that those files exist. """
import json
import posixpath
import re
import subprocess
import sys
# TODO(borenet): Replace some/all of these with constants from gm/gm_json.py
AUTOGEN_URL = 'http://skia-autogen.googlecode.com/svn/gm-actual'
GS_URL = 'gs://chromium-skia-gm/gm'
TEST_NAME_PATTERN = re.compile('(\S+)_(\S+).png')
def FileNameToGSURL(filename, hash_type, hash_value):
""" Convert a file name given in a checksum file to the URL of the
corresponding image file in Google Storage.
filename: string; the file name to convert. Takes the form specified by
TEST_NAME_PATTERN.
hash_type: string; the type of the checksum.
hash_value: string; the checksum itself.
"""
test_name = TEST_NAME_PATTERN.match(filename).group(1)
if not test_name:
raise Exception('Invalid test name for file: %s' % filename)
return '%s/%s/%s/%s.png' % (GS_URL, hash_type, test_name, hash_value)
def FindURLSInJSON(json_file, gs_urls):
""" Extract Google Storage URLs from a JSON file in svn, adding them to the
gs_urls dictionary.
json_file: string; URL of the JSON file.
gs_urls: dict; stores Google Storage URLs as keys and lists of the JSON files
which reference them.
Example gs_urls:
{ 'gs://chromium-skia-gm/gm/sometest/12345.png': [
'http://skia-autogen.googlecode.com/svn/gm-actual/Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug/actual-results.json',
'http://skia-autogen.googlecode.com/svn/gm-actual/Test-Mac10.8-MacMini4.1-GeForce320M-x86-Debug/actual-results.json',
]
}
"""
output = subprocess.check_output(['svn', 'cat', json_file])
json_content = json.loads(output)
for dict_type in ['actual-results']:
for result_type in json_content[dict_type]:
if json_content[dict_type][result_type]:
for result in json_content[dict_type][result_type].keys():
hash_type, hash_value = json_content[dict_type][result_type][result]
gs_url = FileNameToGSURL(result, hash_type, str(hash_value))
if gs_urls.get(gs_url):
gs_urls[gs_url].append(json_file)
else:
gs_urls[gs_url] = [json_file]
def _FindJSONFiles(url, json_files):
""" Helper function for FindJsonFiles. Recursively explore the repository,
adding JSON files to a list.
url: string; URL of the repository (or subdirectory thereof) to explore.
json_files: list to which JSON file urls will be added.
"""
proc = subprocess.Popen(['svn', 'ls', url], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if proc.wait() != 0:
raise Exception('Failed to list svn directory.')
output = proc.communicate()[0].splitlines()
subdirs = []
for item in output:
if item.endswith(posixpath.sep):
subdirs.append(item)
elif item.endswith('.json'):
json_files.append(posixpath.join(url, item))
else:
print 'Warning: ignoring %s' % posixpath.join(url, item)
for subdir in subdirs:
_FindJSONFiles(posixpath.join(url, subdir), json_files)
def FindJSONFiles(url):
""" Recursively explore the given repository and return a list of the JSON
files it contains.
url: string; URL of the repository to explore.
"""
print 'Searching for JSON files in %s' % url
json_files = []
_FindJSONFiles(url, json_files)
return json_files
def FindURLs(url):
""" Find Google Storage URLs inside of JSON files in the given repository.
Returns a dictionary whose keys are Google Storage URLs and values are lists
of the JSON files which reference them.
url: string; URL of the repository to explore.
Example output:
{ 'gs://chromium-skia-gm/gm/sometest/12345.png': [
'http://skia-autogen.googlecode.com/svn/gm-actual/Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug/actual-results.json',
'http://skia-autogen.googlecode.com/svn/gm-actual/Test-Mac10.8-MacMini4.1-GeForce320M-x86-Debug/actual-results.json',
]
}
"""
gs_urls = {}
for json_file in FindJSONFiles(url):
print 'Looking for checksums in %s' % json_file
FindURLSInJSON(json_file, gs_urls)
return gs_urls
def VerifyURL(url):
""" Verify that the given URL exists.
url: string; the Google Storage URL of the image file in question.
"""
proc = subprocess.Popen(['gsutil', 'ls', url], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if proc.wait() != 0:
return False
return True
def VerifyURLs(urls):
""" Verify that each of the given URLs exists. Return a list of which URLs do
not exist.
urls: dictionary; URLs of the image files in question.
"""
print 'Verifying that images exist for URLs...'
missing = []
for url in urls.iterkeys():
if not VerifyURL(url):
print 'Missing: %s, referenced by: \n %s' % (url, '\n '.join(urls[url]))
missing.append(url)
return missing
def Main():
urls = FindURLs(AUTOGEN_URL)
missing = VerifyURLs(urls)
if missing:
print 'Found %d Missing files.' % len(missing)
return 1
if __name__ == '__main__':
sys.exit(Main()) | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/classes/classStaticBlock/classStaticBlock6.ts] ////
//// [classStaticBlock6.ts]
class B {
static a = 1;
}
class C extends B {
static {
let await = 1;
let arguments = 1;
let eval = 1;
}
static {
await: if (true) {
}
arguments;
await;
super();
}
}
class CC {
constructor () {
class C extends B {
static {
class CC extends B {
constructor () {
super();
}
}
super();
}
}
}
}
async function foo () {
class C extends B {
static {
arguments;
await;
async function ff () {
arguments;
await;
}
}
}
}
function foo1 () {
class C extends B {
static {
arguments;
function ff () {
arguments;
}
}
}
}
class foo2 {
static {
this.b // should error
let b: typeof this.b; // ok
if (1) {
this.b; // should error
}
}
static b = 1;
}
//// [classStaticBlock6.js]
"use strict";
var __extends = (this && this.__extends) || (function () {
var extendStatics = function (d, b) {
extendStatics = Object.setPrototypeOf ||
({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
return extendStatics(d, b);
};
return function (d, b) {
if (typeof b !== "function" && b !== null)
throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
extendStatics(d, b);
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
})();
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
var __generator = (this && this.__generator) || function (thisArg, body) {
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === "function" ? Iterator : Object).prototype);
return g.next = verb(0), g["throw"] = verb(1), g["return"] = verb(2), typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
function verb(n) { return function (v) { return step([n, v]); }; }
function step(op) {
if (f) throw new TypeError("Generator is already executing.");
while (g && (g = 0, op[0] && (_ = 0)), _) try {
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
if (y = 0, t) op = [op[0] & 2, t.value];
switch (op[0]) {
case 0: case 1: t = op; break;
case 4: _.label++; return { value: op[1], done: false };
case 5: _.label++; y = op[1]; op = [0]; continue;
case 7: op = _.ops.pop(); _.trys.pop(); continue;
default:
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
if (t[2]) _.ops.pop();
_.trys.pop(); continue;
}
op = body.call(thisArg, _);
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
}
};
var B = /** @class */ (function () {
function B() {
}
B.a = 1;
return B;
}());
var C = /** @class */ (function (_super) {
__extends(C, _super);
function C() {
return _super !== null && _super.apply(this, arguments) || this;
}
return C;
}(B));
(function () {
var await = 1;
var arguments = 1;
var eval = 1;
})();
(function () {
yield ;
if (true) {
}
arguments;
yield ;
_this = _super.call(this) || this;
})();
var CC = /** @class */ (function () {
function CC() {
var C = /** @class */ (function (_super) {
__extends(C, _super);
function C() {
return _super !== null && _super.apply(this, arguments) || this;
}
return C;
}(B));
(function () {
var CC = /** @class */ (function (_super) {
__extends(CC, _super);
function CC() {
return _super.call(this) || this;
}
return CC;
}(B));
_this = _super.call(this) || this;
})();
}
return CC;
}());
function foo() {
return __awaiter(this, void 0, void 0, function () {
var C;
return __generator(this, function (_a) {
C = /** @class */ (function (_super) {
__extends(C, _super);
function C() {
return _super !== null && _super.apply(this, arguments) || this;
}
return C;
}(B));
(function () {
arguments;
yield ;
function ff() {
var arguments_1 = arguments;
return __awaiter(this, void 0, void 0, function () {
return __generator(this, function (_a) {
switch (_a.label) {
case 0:
arguments_1;
return [4 /*yield*/, ];
case 1:
_a.sent();
return [2 /*return*/];
}
});
});
}
})();
return [2 /*return*/];
});
});
}
function foo1() {
var C = /** @class */ (function (_super) {
__extends(C, _super);
function C() {
return _super !== null && _super.apply(this, arguments) || this;
}
return C;
}(B));
(function () {
arguments;
function ff() {
arguments;
}
})();
}
var foo2 = /** @class */ (function () {
function foo2() {
}
var _a;
_a = foo2;
(function () {
_a.b; // should error
var b; // ok
if (1) {
_a.b; // should error
}
})();
foo2.b = 1;
return foo2;
}()); | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/classStaticBlock6(target=es5).js |
#!/usr/bin/env python
from __future__ import print_function
import sys
import gi
from gi.repository import GObject as gobject, Gst as gst
from livestreamer import Livestreamer, StreamError, PluginError, NoPluginError
def exit(msg):
print(msg, file=sys.stderr)
sys.exit()
class LivestreamerPlayer(object):
def __init__(self):
self.fd = None
self.mainloop = gobject.MainLoop()
# This creates a playbin pipeline and using the appsrc source
# we can feed it our stream data
self.pipeline = gst.ElementFactory.make("playbin", None)
self.pipeline.set_property("uri", "appsrc://")
# When the playbin creates the appsrc source it will call
# this callback and allow us to configure it
self.pipeline.connect("source-setup", self.on_source_setup)
# Creates a bus and set callbacks to receive errors
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self.on_eos)
self.bus.connect("message::error", self.on_error)
def exit(self, msg):
self.stop()
exit(msg)
def stop(self):
# Stop playback and exit mainloop
self.pipeline.set_state(gst.State.NULL)
self.mainloop.quit()
# Close the stream
if self.fd:
self.fd.close()
def play(self, stream):
# Attempt to open the stream
try:
self.fd = stream.open()
except StreamError as err:
self.exit("Failed to open stream: {0}".format(err))
# Start playback
self.pipeline.set_state(gst.State.PLAYING)
self.mainloop.run()
def on_source_setup(self, element, source):
# When this callback is called the appsrc expects
# us to feed it more data
source.connect("need-data", self.on_source_need_data)
def on_source_need_data(self, source, length):
# Attempt to read data from the stream
try:
data = self.fd.read(length)
except IOError as err:
self.exit("Failed to read data from stream: {0}".format(err))
# If data is empty it's the end of stream
if not data:
source.emit("end-of-stream")
return
# Convert the Python bytes into a GStreamer Buffer
# and then push it to the appsrc
buf = gst.Buffer.new_wrapped(data)
source.emit("push-buffer", buf)
def on_eos(self, bus, msg):
# Stop playback on end of stream
self.stop()
def on_error(self, bus, msg):
# Print error message and exit on error
error = msg.parse_error()[1]
self.exit(error)
def main():
if len(sys.argv) < 3:
exit("Usage: {0} <url> <quality>".format(sys.argv[0]))
# Initialize and check GStreamer version
gi.require_version("Gst", "1.0")
gobject.threads_init()
gst.init(None)
# Collect arguments
url = sys.argv[1]
quality = sys.argv[2]
# Create the Livestreamer session
livestreamer = Livestreamer()
# Enable logging
livestreamer.set_loglevel("info")
livestreamer.set_logoutput(sys.stdout)
# Attempt to fetch streams
try:
streams = livestreamer.streams(url)
except NoPluginError:
exit("Livestreamer is unable to handle the URL '{0}'".format(url))
except PluginError as err:
exit("Plugin error: {0}".format(err))
if not streams:
exit("No streams found on URL '{0}'".format(url))
# Look for specified stream
if quality not in streams:
exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url))
# We found the stream
stream = streams[quality]
# Create the player and start playback
player = LivestreamerPlayer()
# Blocks until playback is done
player.play(stream)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# (c) 2016, Tomas Karasek <tom.to.the.k@gmail.com>
# (c) 2016, Matt Baldwin <baldwin@stackpointcloud.com>
# (c) 2016, Thibaud Morel l'Horset <teebes@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: packet_device
short_description: Manage a bare metal server in the Packet Host.
description:
- Manage a bare metal server in the Packet Host (a "device" in the API terms).
- When the machine is created it can optionally wait for public IP address, or for active state.
- This module has a dependency on packet >= 1.0.
- API is documented at U(https://www.packet.net/developers/api/devices).
version_added: "2.3"
author:
- Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
- Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
- Thibaud Morel l'Horset (@teebes) <teebes@gmail.com>
options:
auth_token:
description:
- Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
count:
description:
- The number of devices to create. Count number can be included in hostname via the %d string formatter.
default: 1
count_offset:
description:
- From which number to start the count.
default: 1
device_ids:
description:
- List of device IDs on which to operate.
facility:
description:
- Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
features:
description:
- Dict with "features" for device creation. See Packet API docs for details.
hostnames:
description:
- A hostname of a device, or a list of hostnames.
- If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
- If only one hostname, it might be expanded to list if I(count)>1.
aliases: [name]
locked:
description:
- Whether to lock a created device.
default: false
version_added: "2.4"
aliases: [lock]
type: bool
operating_system:
description:
- OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
plan:
description:
- Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
project_id:
description:
- ID of project of the device.
required: true
state:
description:
- Desired state of the device.
- If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
- If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
choices: [present, absent, active, inactive, rebooted]
default: present
user_data:
description:
- Userdata blob made available to the machine
wait_for_public_IPv:
description:
- Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
- If set to 4, it will wait until IPv4 is assigned to the instance.
- If set to 6, wait until public IPv6 is assigned to the instance.
choices: [4,6]
version_added: "2.4"
wait_timeout:
description:
- How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
- If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
default: 900
ipxe_script_url:
description:
- URL of custom iPXE script for provisioning.
- More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
version_added: "2.4"
always_pxe:
description:
- Persist PXE as the first boot option.
- Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE.
default: false
version_added: "2.4"
type: bool
requirements:
- "packet-python >= 1.35"
notes:
- Doesn't support check mode.
'''
EXAMPLES = '''
# All the examples assume that you have your Packet api token in env var PACKET_API_TOKEN.
# You can also pass it to the auth_token parameter of the module instead.
# Creating devices
- name: create 1 device
hosts: localhost
tasks:
- packet_device:
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
hostnames: myserver
operating_system: ubuntu_16_04
plan: baremetal_0
facility: sjc1
# Create the same device and wait until it is in state "active", (when it's
# ready for other API operations). Fail if the devices in not "active" in
# 10 minutes.
- name: create device and wait up to 10 minutes for active state
hosts: localhost
tasks:
- packet_device:
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
hostnames: myserver
operating_system: ubuntu_16_04
plan: baremetal_0
facility: sjc1
state: active
wait_timeout: 600
- name: create 3 ubuntu devices called server-01, server-02 and server-03
hosts: localhost
tasks:
- packet_device:
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
hostnames: server-%02d
count: 3
operating_system: ubuntu_16_04
plan: baremetal_0
facility: sjc1
- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH
hosts: localhost
tasks:
- name: create 3 devices and register their facts
packet_device:
hostnames: [coreos-one, coreos-two, coreos-three]
operating_system: coreos_stable
plan: baremetal_0
facility: ewr1
locked: true
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
wait_for_public_IPv: 4
user_data: |
#cloud-config
ssh_authorized_keys:
- {{ lookup('file', 'my_packet_sshkey') }}
coreos:
etcd:
discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
addr: $private_ipv4:4001
peer-addr: $private_ipv4:7001
fleet:
public-ip: $private_ipv4
units:
- name: etcd.service
command: start
- name: fleet.service
command: start
register: newhosts
- name: wait for ssh
wait_for:
delay: 1
host: "{{ item.public_ipv4 }}"
port: 22
state: started
timeout: 500
with_items: "{{ newhosts.devices }}"
# Other states of devices
- name: remove 3 devices by uuid
hosts: localhost
tasks:
- packet_device:
project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
state: absent
device_ids:
- 1fb4faf8-a638-4ac7-8f47-86fe514c30d8
- 2eb4faf8-a638-4ac7-8f47-86fe514c3043
- 6bb4faf8-a638-4ac7-8f47-86fe514c301f
'''
RETURN = '''
changed:
description: True if a device was altered in any way (created, modified or removed)
type: bool
sample: True
returned: success
devices:
description: Information about each device that was processed
type: list
sample: '[{"hostname": "my-server.com", "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
"public_ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12",
"tags": [], "locked": false, "state": "provisioning",
"public_ipv6": ""2604:1380:2:5200::3"}]'
returned: success
''' # NOQA
import os
import re
import time
import uuid
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
HAS_PACKET_SDK = True
try:
import packet
except ImportError:
HAS_PACKET_SDK = False
from ansible.module_utils.basic import AnsibleModule
NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
MAX_DEVICES = 100
PACKET_DEVICE_STATES = (
'queued',
'provisioning',
'failed',
'powering_on',
'active',
'powering_off',
'inactive',
'rebooting',
)
PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present']
def serialize_device(device):
"""
Standard represenation for a device as returned by various tasks::
{
'id': 'device_id'
'hostname': 'device_hostname',
'tags': [],
'locked': false,
'state': 'provisioning',
'ip_addresses': [
{
"address": "147.75.194.227",
"address_family": 4,
"public": true
},
{
"address": "2604:1380:2:5200::3",
"address_family": 6,
"public": true
},
{
"address": "10.100.11.129",
"address_family": 4,
"public": false
}
],
"private_ipv4": "10.100.11.129",
"public_ipv4": "147.75.194.227",
"public_ipv6": "2604:1380:2:5200::3",
}
"""
device_data = {}
device_data['id'] = device.id
device_data['hostname'] = device.hostname
device_data['tags'] = device.tags
device_data['locked'] = device.locked
device_data['state'] = device.state
device_data['ip_addresses'] = [
{
'address': addr_data['address'],
'address_family': addr_data['address_family'],
'public': addr_data['public'],
}
for addr_data in device.ip_addresses
]
# Also include each IPs as a key for easier lookup in roles.
# Key names:
# - public_ipv4
# - public_ipv6
# - private_ipv4
# - private_ipv6 (if there is one)
for ipdata in device_data['ip_addresses']:
if ipdata['public']:
if ipdata['address_family'] == 6:
device_data['public_ipv6'] = ipdata['address']
elif ipdata['address_family'] == 4:
device_data['public_ipv4'] = ipdata['address']
elif not ipdata['public']:
if ipdata['address_family'] == 6:
# Packet doesn't give public ipv6 yet, but maybe one
# day they will
device_data['private_ipv6'] = ipdata['address']
elif ipdata['address_family'] == 4:
device_data['private_ipv4'] = ipdata['address']
return device_data
def is_valid_hostname(hostname):
return re.match(HOSTNAME_RE, hostname) is not None
def is_valid_uuid(myuuid):
try:
val = uuid.UUID(myuuid, version=4)
except ValueError:
return False
return str(val) == myuuid
def listify_string_name_or_id(s):
if ',' in s:
return s.split(',')
else:
return [s]
def get_hostname_list(module):
# hostname is a list-typed param, so I guess it should return list
# (and it does, in Ansible 2.2.1) but in order to be defensive,
# I keep here the code to convert an eventual string to list
hostnames = module.params.get('hostnames')
count = module.params.get('count')
count_offset = module.params.get('count_offset')
if isinstance(hostnames, str):
hostnames = listify_string_name_or_id(hostnames)
if not isinstance(hostnames, list):
raise Exception("name %s is not convertible to list" % hostnames)
# at this point, hostnames is a list
hostnames = [h.strip() for h in hostnames]
if (len(hostnames) > 1) and (count > 1):
_msg = ("If you set count>1, you should only specify one hostname "
"with the %d formatter, not a list of hostnames.")
raise Exception(_msg)
if (len(hostnames) == 1) and (count > 0):
hostname_spec = hostnames[0]
count_range = range(count_offset, count_offset + count)
if re.search(r"%\d{0,2}d", hostname_spec):
hostnames = [hostname_spec % i for i in count_range]
elif count > 1:
hostname_spec = '%s%%02d' % hostname_spec
hostnames = [hostname_spec % i for i in count_range]
for hn in hostnames:
if not is_valid_hostname(hn):
raise Exception("Hostname '%s' does not seem to be valid" % hn)
if len(hostnames) > MAX_DEVICES:
raise Exception("You specified too many hostnames, max is %d" %
MAX_DEVICES)
return hostnames
def get_device_id_list(module):
device_ids = module.params.get('device_ids')
if isinstance(device_ids, str):
device_ids = listify_string_name_or_id(device_ids)
device_ids = [di.strip() for di in device_ids]
for di in device_ids:
if not is_valid_uuid(di):
raise Exception("Device ID '%s' does not seem to be valid" % di)
if len(device_ids) > MAX_DEVICES:
raise Exception("You specified too many devices, max is %d" %
MAX_DEVICES)
return device_ids
def create_single_device(module, packet_conn, hostname):
for param in ('hostnames', 'operating_system', 'plan'):
if not module.params.get(param):
raise Exception("%s parameter is required for new device."
% param)
project_id = module.params.get('project_id')
plan = module.params.get('plan')
user_data = module.params.get('user_data')
facility = module.params.get('facility')
operating_system = module.params.get('operating_system')
locked = module.params.get('locked')
ipxe_script_url = module.params.get('ipxe_script_url')
always_pxe = module.params.get('always_pxe')
device = packet_conn.create_device(
project_id=project_id,
hostname=hostname,
plan=plan,
facility=facility,
operating_system=operating_system,
userdata=user_data,
locked=locked)
return device
def refresh_device_list(module, packet_conn, devices):
device_ids = [d.id for d in devices]
new_device_list = get_existing_devices(module, packet_conn)
return [d for d in new_device_list if d.id in device_ids]
def wait_for_devices_active(module, packet_conn, watched_devices):
wait_timeout = module.params.get('wait_timeout')
wait_timeout = time.time() + wait_timeout
refreshed = watched_devices
while wait_timeout > time.time():
refreshed = refresh_device_list(module, packet_conn, watched_devices)
if all(d.state == 'active' for d in refreshed):
return refreshed
time.sleep(5)
raise Exception("Waiting for state \"active\" timed out for devices: %s"
% [d.hostname for d in refreshed if d.state != "active"])
def wait_for_public_IPv(module, packet_conn, created_devices):
def has_public_ip(addr_list, ip_v):
return any([a['public'] and a['address_family'] == ip_v and
a['address'] for a in addr_list])
def all_have_public_ip(ds, ip_v):
return all([has_public_ip(d.ip_addresses, ip_v) for d in ds])
address_family = module.params.get('wait_for_public_IPv')
wait_timeout = module.params.get('wait_timeout')
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
refreshed = refresh_device_list(module, packet_conn, created_devices)
if all_have_public_ip(refreshed, address_family):
return refreshed
time.sleep(5)
raise Exception("Waiting for IPv%d address timed out. Hostnames: %s"
% (address_family, [d.hostname for d in created_devices]))
def get_existing_devices(module, packet_conn):
project_id = module.params.get('project_id')
return packet_conn.list_devices(
project_id, params={
'per_page': MAX_DEVICES})
def get_specified_device_identifiers(module):
if module.params.get('device_ids'):
device_id_list = get_device_id_list(module)
return {'ids': device_id_list, 'hostnames': []}
elif module.params.get('hostnames'):
hostname_list = get_hostname_list(module)
return {'hostnames': hostname_list, 'ids': []}
def act_on_devices(module, packet_conn, target_state):
specified_identifiers = get_specified_device_identifiers(module)
existing_devices = get_existing_devices(module, packet_conn)
changed = False
create_hostnames = []
if target_state in ['present', 'active', 'rebooted']:
# states where we might create non-existing specified devices
existing_devices_names = [ed.hostname for ed in existing_devices]
create_hostnames = [hn for hn in specified_identifiers['hostnames']
if hn not in existing_devices_names]
process_devices = [d for d in existing_devices
if (d.id in specified_identifiers['ids']) or
(d.hostname in specified_identifiers['hostnames'])]
if target_state != 'present':
_absent_state_map = {}
for s in PACKET_DEVICE_STATES:
_absent_state_map[s] = packet.Device.delete
state_map = {
'absent': _absent_state_map,
'active': {'inactive': packet.Device.power_on,
'provisioning': None, 'rebooting': None
},
'inactive': {'active': packet.Device.power_off},
'rebooted': {'active': packet.Device.reboot,
'inactive': packet.Device.power_on,
'provisioning': None, 'rebooting': None
},
}
# First do non-creation actions, it might be faster
for d in process_devices:
if d.state == target_state:
continue
if d.state in state_map[target_state]:
api_operation = state_map[target_state].get(d.state)
if api_operation is not None:
api_operation(d)
changed = True
else:
_msg = (
"I don't know how to process existing device %s from state %s "
"to state %s" %
(d.hostname, d.state, target_state))
raise Exception(_msg)
# At last create missing devices
created_devices = []
if create_hostnames:
created_devices = [create_single_device(module, packet_conn, n)
for n in create_hostnames]
if module.params.get('wait_for_public_IPv'):
created_devices = wait_for_public_IPv(
module, packet_conn, created_devices)
changed = True
processed_devices = created_devices + process_devices
if target_state == 'active':
processed_devices = wait_for_devices_active(
module, packet_conn, processed_devices)
return {
'changed': changed,
'devices': [serialize_device(d) for d in processed_devices]
}
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
no_log=True),
count=dict(type='int', default=1),
count_offset=dict(type='int', default=1),
device_ids=dict(type='list'),
facility=dict(),
features=dict(type='dict'),
hostnames=dict(type='list', aliases=['name']),
locked=dict(type='bool', default=False, aliases=['lock']),
operating_system=dict(),
plan=dict(),
project_id=dict(required=True),
state=dict(choices=ALLOWED_STATES, default='present'),
user_data=dict(default=None),
wait_for_public_IPv=dict(type='int', choices=[4, 6]),
wait_timeout=dict(type='int', default=900),
ipxe_script_url=dict(default=''),
always_pxe=dict(type='bool', default=False),
),
required_one_of=[('device_ids', 'hostnames',)],
mutually_exclusive=[
('always_pxe', 'operating_system'),
('ipxe_script_url', 'operating_system'),
('hostnames', 'device_ids'),
('count', 'device_ids'),
('count_offset', 'device_ids'),
]
)
if not HAS_PACKET_SDK:
module.fail_json(msg='packet required for this module')
if not module.params.get('auth_token'):
_fail_msg = ("if Packet API token is not in environment variable %s, "
"the auth_token parameter is required" %
PACKET_API_TOKEN_ENV_VAR)
module.fail_json(msg=_fail_msg)
auth_token = module.params.get('auth_token')
packet_conn = packet.Manager(auth_token=auth_token)
state = module.params.get('state')
try:
module.exit_json(**act_on_devices(module, packet_conn, state))
except Exception as e:
module.fail_json(msg='failed to set device state %s, error: %s' %
(state, to_native(e)), exception=traceback.format_exc())
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import pickle
import unittest
from collections.abc import Iterator, Iterable
from string.templatelib import Template, Interpolation, convert
from test.test_string._support import TStringBaseCase, fstring
class TestTemplate(unittest.TestCase, TStringBaseCase):
def test_common(self):
self.assertEqual(type(t'').__name__, 'Template')
self.assertEqual(type(t'').__qualname__, 'Template')
self.assertEqual(type(t'').__module__, 'string.templatelib')
a = 'a'
i = t'{a}'.interpolations[0]
self.assertEqual(type(i).__name__, 'Interpolation')
self.assertEqual(type(i).__qualname__, 'Interpolation')
self.assertEqual(type(i).__module__, 'string.templatelib')
def test_final_types(self):
with self.assertRaisesRegex(TypeError, 'is not an acceptable base type'):
class Sub(Template): ...
with self.assertRaisesRegex(TypeError, 'is not an acceptable base type'):
class Sub(Interpolation): ...
def test_basic_creation(self):
# Simple t-string creation
t = t'Hello, world'
self.assertIsInstance(t, Template)
self.assertTStringEqual(t, ('Hello, world',), ())
self.assertEqual(fstring(t), 'Hello, world')
# Empty t-string
t = t''
self.assertTStringEqual(t, ('',), ())
self.assertEqual(fstring(t), '')
# Multi-line t-string
t = t"""Hello,
world"""
self.assertEqual(t.strings, ('Hello,\nworld',))
self.assertEqual(len(t.interpolations), 0)
self.assertEqual(fstring(t), 'Hello,\nworld')
def test_interpolation_creation(self):
i = Interpolation('Maria', 'name', 'a', 'fmt')
self.assertInterpolationEqual(i, ('Maria', 'name', 'a', 'fmt'))
i = Interpolation('Maria', 'name', 'a')
self.assertInterpolationEqual(i, ('Maria', 'name', 'a'))
i = Interpolation('Maria', 'name')
self.assertInterpolationEqual(i, ('Maria', 'name'))
i = Interpolation('Maria')
self.assertInterpolationEqual(i, ('Maria',))
def test_creation_interleaving(self):
# Should add strings on either side
t = Template(Interpolation('Maria', 'name', None, ''))
self.assertTStringEqual(t, ('', ''), [('Maria', 'name')])
self.assertEqual(fstring(t), 'Maria')
# Should prepend empty string
t = Template(Interpolation('Maria', 'name', None, ''), ' is my name')
self.assertTStringEqual(t, ('', ' is my name'), [('Maria', 'name')])
self.assertEqual(fstring(t), 'Maria is my name')
# Should append empty string
t = Template('Hello, ', Interpolation('Maria', 'name', None, ''))
self.assertTStringEqual(t, ('Hello, ', ''), [('Maria', 'name')])
self.assertEqual(fstring(t), 'Hello, Maria')
# Should concatenate strings
t = Template('Hello', ', ', Interpolation('Maria', 'name', None, ''),
'!')
self.assertTStringEqual(t, ('Hello, ', '!'), [('Maria', 'name')])
self.assertEqual(fstring(t), 'Hello, Maria!')
# Should add strings on either side and in between
t = Template(Interpolation('Maria', 'name', None, ''),
Interpolation('Python', 'language', None, ''))
self.assertTStringEqual(
t, ('', '', ''), [('Maria', 'name'), ('Python', 'language')]
)
self.assertEqual(fstring(t), 'MariaPython')
def test_template_values(self):
t = t'Hello, world'
self.assertEqual(t.values, ())
name = "Lys"
t = t'Hello, {name}'
self.assertEqual(t.values, ("Lys",))
country = "GR"
age = 0
t = t'Hello, {name}, {age} from {country}'
self.assertEqual(t.values, ("Lys", 0, "GR"))
def test_pickle_template(self):
user = 'test'
for template in (
t'',
t"No values",
t'With inter {user}',
t'With ! {user!r}',
t'With format {1 / 0.3:.2f}',
Template(),
Template('a'),
Template(Interpolation('Nikita', 'name', None, '')),
Template('a', Interpolation('Nikita', 'name', 'r', '')),
):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, template=template):
pickled = pickle.dumps(template, protocol=proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled.values, template.values)
self.assertEqual(fstring(unpickled), fstring(template))
def test_pickle_interpolation(self):
for interpolation in (
Interpolation('Nikita', 'name', None, ''),
Interpolation('Nikita', 'name', 'r', ''),
Interpolation(1/3, 'x', None, '.2f'),
):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, interpolation=interpolation):
pickled = pickle.dumps(interpolation, protocol=proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled.value, interpolation.value)
self.assertEqual(unpickled.expression, interpolation.expression)
self.assertEqual(unpickled.conversion, interpolation.conversion)
self.assertEqual(unpickled.format_spec, interpolation.format_spec)
class TemplateIterTests(unittest.TestCase):
def test_abc(self):
self.assertIsInstance(iter(t''), Iterable)
self.assertIsInstance(iter(t''), Iterator)
def test_final(self):
TemplateIter = type(iter(t''))
with self.assertRaisesRegex(TypeError, 'is not an acceptable base type'):
class Sub(TemplateIter): ...
def test_iter(self):
x = 1
res = list(iter(t'abc {x} yz'))
self.assertEqual(res[0], 'abc ')
self.assertIsInstance(res[1], Interpolation)
self.assertEqual(res[1].value, 1)
self.assertEqual(res[1].expression, 'x')
self.assertEqual(res[1].conversion, None)
self.assertEqual(res[1].format_spec, '')
self.assertEqual(res[2], ' yz')
def test_exhausted(self):
# See https://github.com/python/cpython/issues/134119.
template_iter = iter(t"{1}")
self.assertIsInstance(next(template_iter), Interpolation)
self.assertRaises(StopIteration, next, template_iter)
self.assertRaises(StopIteration, next, template_iter)
class TestFunctions(unittest.TestCase):
def test_convert(self):
from fractions import Fraction
for obj in ('Café', None, 3.14, Fraction(1, 2)):
with self.subTest(f'{obj=}'):
self.assertEqual(convert(obj, None), obj)
self.assertEqual(convert(obj, 's'), str(obj))
self.assertEqual(convert(obj, 'r'), repr(obj))
self.assertEqual(convert(obj, 'a'), ascii(obj))
# Invalid conversion specifier
with self.assertRaises(ValueError):
convert(obj, 'z')
with self.assertRaises(ValueError):
convert(obj, 1)
with self.assertRaises(ValueError):
convert(obj, object())
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_string/test_templatelib.py |
#
# The Python Imaging Library.
# $Id$
#
# EPS file handling
#
# History:
# 1995-09-01 fl Created (0.1)
# 1996-05-18 fl Don't choke on "atend" fields, Ghostscript interface (0.2)
# 1996-08-22 fl Don't choke on floating point BoundingBox values
# 1996-08-23 fl Handle files from Macintosh (0.3)
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
# 2003-09-07 fl Check gs.close status (from Federico Di Gregorio) (0.5)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.5"
import re, string
import Image, ImageFile
#
# --------------------------------------------------------------------
def i32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16) + (ord(c[3])<<24)
def o32(i):
return chr(i&255) + chr(i>>8&255) + chr(i>>16&255) + chr(i>>24&255)
split = re.compile(r"^%%([^:]*):[ \t]*(.*)[ \t]*$")
field = re.compile(r"^%[%!\w]([^:]*)[ \t]*$")
def Ghostscript(tile, size, fp):
"""Render an image using Ghostscript (Unix only)"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
import tempfile, os
file = tempfile.mktemp()
# Build ghostscript command
command = ["gs",
"-q", # quite mode
"-g%dx%d" % size, # set output geometry (pixels)
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % file,# output file
"- >/dev/null 2>/dev/null"]
command = string.join(command)
# push data through ghostscript
try:
gs = os.popen(command, "w")
# adjust for image origin
if bbox[0] != 0 or bbox[1] != 0:
gs.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
fp.seek(offset)
while length > 0:
s = fp.read(8192)
if not s:
break
length = length - len(s)
gs.write(s)
status = gs.close()
if status:
raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
return im
class PSFile:
"""Wrapper that treats either CR or LF as end of line."""
def __init__(self, fp):
self.fp = fp
self.char = None
def __getattr__(self, id):
v = getattr(self.fp, id)
setattr(self, id, v)
return v
def seek(self, offset, whence=0):
self.char = None
self.fp.seek(offset, whence)
def tell(self):
pos = self.fp.tell()
if self.char:
pos = pos - 1
return pos
def readline(self):
s = ""
if self.char:
c = self.char
self.char = None
else:
c = self.fp.read(1)
while c not in "\r\n":
s = s + c
c = self.fp.read(1)
if c == "\r":
self.char = self.fp.read(1)
if self.char == "\n":
self.char = None
return s + "\n"
def _accept(prefix):
return prefix[:4] == "%!PS" or i32(prefix) == 0xC6D3D0C5L
##
# Image plugin for Encapsulated Postscript. This plugin supports only
# a few variants of this format.
class EpsImageFile(ImageFile.ImageFile):
"""EPS File Parser for the Python Imaging Library"""
format = "EPS"
format_description = "Encapsulated Postscript"
def _open(self):
# FIXME: should check the first 512 bytes to see if this
# really is necessary (platform-dependent, though...)
fp = PSFile(self.fp)
# HEAD
s = fp.read(512)
if s[:4] == "%!PS":
offset = 0
fp.seek(0, 2)
length = fp.tell()
elif i32(s) == 0xC6D3D0C5L:
offset = i32(s[4:])
length = i32(s[8:])
fp.seek(offset)
else:
raise SyntaxError, "not an EPS file"
fp.seek(offset)
box = None
self.mode = "RGB"
self.size = 1, 1 # FIXME: huh?
#
# Load EPS header
s = fp.readline()
while s:
if len(s) > 255:
raise SyntaxError, "not an EPS file"
if s[-2:] == '\r\n':
s = s[:-2]
elif s[-1:] == '\n':
s = s[:-1]
try:
m = split.match(s)
except re.error, v:
raise SyntaxError, "not an EPS file"
if m:
k, v = m.group(1, 2)
self.info[k] = v
if k == "BoundingBox":
try:
# Note: The DSC spec says that BoundingBox
# fields should be integers, but some drivers
# put floating point values there anyway.
box = map(int, map(float, string.split(v)))
self.size = box[2] - box[0], box[3] - box[1]
self.tile = [("eps", (0,0) + self.size, offset,
(length, box))]
except:
pass
else:
m = field.match(s)
if m:
k = m.group(1)
if k == "EndComments":
break
if k[:8] == "PS-Adobe":
self.info[k[:8]] = k[9:]
else:
self.info[k] = ""
else:
raise IOError, "bad EPS header"
s = fp.readline()
if s[:1] != "%":
break
#
# Scan for an "ImageData" descriptor
while s[0] == "%":
if len(s) > 255:
raise SyntaxError, "not an EPS file"
if s[-2:] == '\r\n':
s = s[:-2]
elif s[-1:] == '\n':
s = s[:-1]
if s[:11] == "%ImageData:":
[x, y, bi, mo, z3, z4, en, id] =\
string.split(s[11:], maxsplit=7)
x = int(x); y = int(y)
bi = int(bi)
mo = int(mo)
en = int(en)
if en == 1:
decoder = "eps_binary"
elif en == 2:
decoder = "eps_hex"
else:
break
if bi != 8:
break
if mo == 1:
self.mode = "L"
elif mo == 2:
self.mode = "LAB"
elif mo == 3:
self.mode = "RGB"
else:
break
if id[:1] == id[-1:] == '"':
id = id[1:-1]
# Scan forward to the actual image data
while 1:
s = fp.readline()
if not s:
break
if s[:len(id)] == id:
self.size = x, y
self.tile2 = [(decoder,
(0, 0, x, y),
fp.tell(),
0)]
return
s = fp.readline()
if not s:
break
if not box:
raise IOError, "cannot determine EPS bounding box"
def load(self):
# Load EPS via Ghostscript
if not self.tile:
return
self.im = Ghostscript(self.tile, self.size, self.fp)
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
#
# --------------------------------------------------------------------
def _save(im, fp, filename, eps=1):
"""EPS Writer for the Python Imaging Library."""
#
# make sure image data is available
im.load()
#
# determine postscript image mode
if im.mode == "L":
operator = (8, 1, "image")
elif im.mode == "RGB":
operator = (8, 3, "false 3 colorimage")
elif im.mode == "CMYK":
operator = (8, 4, "false 4 colorimage")
else:
raise ValueError, "image mode is not supported"
if eps:
#
# write EPS header
fp.write("%!PS-Adobe-3.0 EPSF-3.0\n")
fp.write("%%Creator: PIL 0.1 EpsEncode\n")
#fp.write("%%CreationDate: %s"...)
fp.write("%%%%BoundingBox: 0 0 %d %d\n" % im.size)
fp.write("%%Pages: 1\n")
fp.write("%%EndComments\n")
fp.write("%%Page: 1 1\n")
fp.write("%%ImageData: %d %d " % im.size)
fp.write("%d %d 0 1 1 \"%s\"\n" % operator)
#
# image header
fp.write("gsave\n")
fp.write("10 dict begin\n")
fp.write("/buf %d string def\n" % (im.size[0] * operator[1]))
fp.write("%d %d scale\n" % im.size)
fp.write("%d %d 8\n" % im.size) # <= bits
fp.write("[%d 0 0 -%d 0 %d]\n" % (im.size[0], im.size[1], im.size[1]))
fp.write("{ currentfile buf readhexstring pop } bind\n")
fp.write("%s\n" % operator[2])
ImageFile._save(im, fp, [("eps", (0,0)+im.size, 0, None)])
fp.write("\n%%%%EndBinary\n")
fp.write("grestore end\n")
fp.flush()
#
# --------------------------------------------------------------------
Image.register_open(EpsImageFile.format, EpsImageFile, _accept)
Image.register_save(EpsImageFile.format, _save)
Image.register_extension(EpsImageFile.format, ".ps")
Image.register_extension(EpsImageFile.format, ".eps")
Image.register_mime(EpsImageFile.format, "application/postscript") | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2022 - 2025 R. Thomas
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_DWARF_TYPE_BASE_H
#define LIEF_DWARF_TYPE_BASE_H
#include "LIEF/visibility.h"
#include "LIEF/DWARF/Type.hpp"
namespace LIEF {
namespace dwarf {
namespace types {
/// This class wraps the `DW_TAG_base_type` type which can be used -- for
/// instance -- to represent integers or primitive types.
class LIEF_API Base : public Type {
public:
using Type::Type;
enum class ENCODING {
NONE = 0,
/// Mirror `DW_ATE_signed`
SIGNED,
/// Mirror `DW_ATE_signed_char`
SIGNED_CHAR,
/// Mirror `DW_ATE_unsigned`
UNSIGNED,
/// Mirror `DW_ATE_unsigned_char`
UNSIGNED_CHAR,
/// Mirror `DW_ATE_float`
FLOAT,
/// Mirror `DW_ATE_boolean`
BOOLEAN,
/// Mirror `DW_ATE_address`
ADDRESS,
};
static bool classof(const Type* type) {
return type->kind() == Type::KIND::BASE;
}
/// Describe how the base type is encoded and should be interpreted
ENCODING encoding() const;
~Base() override;
};
}
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/DWARF/types/Base.hpp |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a warpcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a warpcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
#INSTALLED_APPS += (
# 'storages',
#)
#DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing stored files.
MEDIA_URL = '/media/'
# Static Assests
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='trivago2015 <noreply@waaaasssuuuuppp.com>')
EMAIL_HOST = env("DJANGO_EMAIL_HOST", default='smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = env("SENDGRID_PASSWORD")
EMAIL_HOST_USER = env('SENDGRID_USERNAME')
EMAIL_PORT = env.int("EMAIL_PORT", default=587)
EMAIL_SUBJECT_PREFIX = env("EMAIL_SUBJECT_PREFIX", default='[trivago2015] ')
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
try:
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = {
'default': env.cache_url("DJANGO_CACHE_URL", default="memcache://127.0.0.1:11211"),
}
# Your production stuff: Below this line define 3rd party library settings
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https') | unknown | codeparrot/codeparrot-clean | ||
#
# ff7.scene - Final Fantasy VII battle scene handling
#
# Copyright (C) 2014 Christian Bauer <www.cebix.net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
import struct
import gzip
import StringIO
import ff7
def _enum(**enums):
return type('Enum', (), enums)
# Some selected opcodes (control flow and text handling)
Op = _enum(
JMPZ = 0x70,
JMPNE = 0x71,
JMP = 0x72,
MES = 0x93,
DEBUG = 0xa0,
)
class Instruction:
# Create an instruction from binary data at a given offset
def __init__(self, data, offset):
self.offset = offset
op = data[offset]
self.op = op
size = 1
if op == 0x60:
size = 2
elif op == 0x61:
size = 3
elif op == 0x62:
size = 4
elif op in [0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13, Op.JMPZ, Op.JMPNE, Op.JMP]:
size = 3
elif op == Op.MES:
i = offset
while data[i] != 0xff: # 0xff-terminated
i += 1
size = i + 1 - offset
elif op == Op.DEBUG:
i = offset + 1
while data[i] != 0x00: # 0x00-terminated
i += 1
size = i + 1 - offset
self.size = size
self.code = data[offset:offset + size]
def __str__(self):
return "%04x: " % self.offset + ", ".join(map(hex, self.code))
def setOffset(self, offset):
self.offset = offset
def setArg(self, arg):
self.code = bytearray([self.op]) + bytearray(arg)
self.size = len(self.code)
# Decode binary script data, returning a list of Instruction objects.
def decodeScript(data):
instructions = []
offset = 0
while offset < len(data):
instr = Instruction(data, offset)
instructions.append(instr)
offset += instr.size
return instructions
# Battle scene
class Scene:
def __init__(self, data, index):
self.data = data
self.index = index
if len(data) == 0x1c50:
# Old scene format (original Japanese version)
self.maxStringSize = 0x10
self.enemyDataOffset = 0x298
self.enemyDataSize = 0xa8
self.abilitiesOffset = 0x850
self.aiDataOffset = 0xc50
elif len(data) == 0x1e80:
# New scene format
self.maxStringSize = 0x20
self.enemyDataOffset = 0x298
self.enemyDataSize = 0xb8
self.abilitiesOffset = 0x880
self.aiDataOffset = 0xe80
else:
raise EnvironmentError, "Battle scene %d has unexpected length" % index
# Extract enemy scripts
self.enemyScripts = self.extractScripts(self.aiDataOffset, 3, len(data))
# Return the binary scene data
def getData(self):
return self.data
# Extract entity scripts from binary scene data.
# Returns a list of numEntities lists of 16 scripts, each script being a
# list of Instruction objects.
def extractScripts(self, entitiesOffset, numEntities, maxOffset):
scripts = []
# Process all entities
entitiesTable = struct.unpack_from("<%dH" % numEntities, self.data, entitiesOffset)
for i in xrange(numEntities):
offset = entitiesTable[i]
if offset == 0xffff:
scripts.append(None) # no entity
else:
scriptsOfEntity = []
# Fetch the entity's script table
tableOffset = entitiesOffset + offset
scriptsTable = struct.unpack_from("<16H", self.data, tableOffset)
# The start of the next entity's table (or the end of the
# data) is the upper offset limit for scripts of this entity
nextTableOffset = maxOffset
for j in xrange(i + 1, numEntities):
if entitiesTable[j] != 0xffff:
nextTableOffset = entitiesOffset + entitiesTable[j]
break
# Process all scripts in the script table
for j in xrange(16):
offset = scriptsTable[j]
if offset == 0xffff:
scriptsOfEntity.append(None) # no script
else:
scriptOffset = tableOffset + offset
# The start of the next script (or the start of the
# next entity's script table) is the upper offset
# limit for the script
nextScriptOffset = nextTableOffset
for k in xrange(j + 1, 16):
if scriptsTable[k] != 0xffff:
nextScriptOffset = tableOffset + scriptsTable[k]
break
# Fetch the script data
scriptData = self.data[scriptOffset:nextScriptOffset]
# Remove trailing 0xff bytes
while scriptData[-1] == '\xff':
scriptData = scriptData[:-1]
scriptsOfEntity.append(decodeScript(bytearray(scriptData)))
scripts.append(scriptsOfEntity)
return scripts
# Insert list of entity scripts into binary scene data.
def insertScripts(self, scripts, entitiesOffset, numEntities, maxOffset):
entityTable = []
entityData = ""
tableOffset = numEntities * 2
for i in xrange(numEntities):
scriptsOfEntity = scripts[i]
if scriptsOfEntity is None:
entityTable.append(0xffff) # no entity
else:
scriptsTable = []
scriptData = ""
scriptOffset = 32
for j in xrange(16):
script = scriptsOfEntity[j]
if script is None:
scriptsTable.append(0xffff) # no script
else:
code = "".join([str(i.code) for i in script])
scriptData += code
scriptsTable.append(scriptOffset)
scriptOffset += len(code)
if len(scriptData) % 2:
scriptData += '\xff' # align to 16-bit boundary
scriptOffset += 1
# Append scripts table and all script data to entity data
for offset in scriptsTable:
entityData += struct.pack("<H", offset)
entityData += scriptData
entityTable.append(tableOffset)
tableOffset += scriptOffset
# Construct entity table and insert into scene data together with
# entity data
insertData = ""
for offset in entityTable:
insertData += struct.pack("<H", offset)
insertData += entityData
targetSize = maxOffset - entitiesOffset
assert len(insertData) <= targetSize
if len(insertData) < targetSize:
insertData += '\xff' * (targetSize - len(insertData)) # pad with 0xff bytes
prevDataSize = len(self.data)
self.data = self.data[:entitiesOffset] + insertData + self.data[maxOffset:]
assert len(self.data) == prevDataSize
# Return the enemy names defined in the scene.
def getEnemyNames(self, japanese = False):
enemies = []
for i in xrange(3):
offset = self.enemyDataOffset + i * self.enemyDataSize
enemies.append(ff7.decodeKernelText(self.data[offset:offset + self.maxStringSize], japanese))
return enemies
# Return the ability names defined in the scene.
def getAbilityNames(self, japanese = False):
abilities = []
for i in xrange(32):
offset = self.abilitiesOffset + i * self.maxStringSize
abilities.append(ff7.decodeKernelText(self.data[offset:offset + self.maxStringSize], japanese))
return abilities
# Set the enemy names.
def setEnemyNames(self, enemies, japanese = False):
for i in xrange(3):
rawString = ff7.encodeKernelText(enemies[i], japanese)
rawStringSize = len(rawString)
if rawStringSize > self.maxStringSize:
raise EnvironmentError, "Enemy name '%s' in scene %d is too long when encoded (%d > %d bytes)" % (enemies[i], self.index, rawStringSize, self.maxStringSize)
if rawStringSize < self.maxStringSize:
rawString += '\xff' * (self.maxStringSize - rawStringSize) # pad with 0xff bytes
offset = self.enemyDataOffset + i * self.enemyDataSize
self.data = self.data[:offset] + rawString + self.data[offset + self.maxStringSize:]
# Set the ability names.
def setAbilityNames(self, abilities, japanese = False):
for i in xrange(32):
rawString = ff7.encodeKernelText(abilities[i], japanese)
rawStringSize = len(rawString)
if rawStringSize > self.maxStringSize:
raise EnvironmentError, "Ability name '%s' in scene %d is too long when encoded (%d > %d bytes)" % (abilities[i], self.index, rawStringSize, self.maxStringSize)
if rawStringSize < self.maxStringSize:
rawString += '\xff' * (self.maxStringSize - rawStringSize) # pad with 0xff bytes
offset = self.abilitiesOffset + i * self.maxStringSize
self.data = self.data[:offset] + rawString + self.data[offset + self.maxStringSize:]
# Return the list of message strings in the scene scripts.
def getStrings(self, japanese = False):
strings = []
for scriptsOfEnemy in self.enemyScripts:
if scriptsOfEnemy is None:
continue
for script in scriptsOfEnemy:
if script is None:
continue
for instr in script:
if instr.op == Op.MES:
rawString = str(instr.code[1:])
strings.append(ff7.decodeKernelText(rawString, japanese))
return strings
# Replace the message strings in the scene scripts.
def setStrings(self, strings, japanese = False):
currentString = 0
for scriptsOfEntity in self.enemyScripts:
if scriptsOfEntity is None:
continue
for script in scriptsOfEntity:
if script is None:
continue
# Changing strings of MES instructions changes their size,
# so we need to fixup jump targets. We do this by first
# converting target offsets to target instruction indexes,
# changing instructions, and then converting the indexes
# back to offsets again.
# Construct list of all instruction offsets
instrOffsets = [instr.offset for instr in script]
# Find the target instruction indexes of all jumps
jumpMap = {} # maps index of jump instruction to index of target
for index in xrange(len(script)):
instr = script[index]
if instr.op in [Op.JMPZ, Op.JMPNE, Op.JMP]:
targetOffset = struct.unpack("<H", instr.code[1:])[0]
targetIndex = instrOffsets.index(targetOffset)
jumpMap[index] = targetIndex
# Replace the strings in all MES instructions
for index in xrange(len(script)):
if script[index].op == Op.MES:
rawString = ff7.encodeKernelText(strings[currentString], japanese)
script[index].setArg(rawString)
currentString += 1
# Recalculate all instruction offsets
offset = 0
for index in xrange(len(script)):
script[index].setOffset(offset)
offset += script[index].size
# Fixup the target offsets of jumps
for index in xrange(len(script)):
if script[index].op in [Op.JMPZ, Op.JMPNE, Op.JMP]:
targetIndex = jumpMap[index]
targetOffset = script[targetIndex].offset
script[index].setArg(struct.pack("<H", targetOffset))
# Convert scripts back to binary data
self.insertScripts(self.enemyScripts, self.aiDataOffset, 3, len(self.data))
# Battle scene archive file (SCENE.BIN)
class Archive:
blockSize = 0x2000
pointerTableSize = 0x40
maxSceneSize = 0x1e80 # maximum size of uncompressed scene
# Parse the scene archive from an open file object.
def __init__(self, fileobj):
self.sceneData = []
self.sceneIndexTable = [] # index of first scene in each block
sceneIndex = 0
# Read all blocks
while True:
# Read the next block
block = fileobj.read(self.blockSize)
if len(block) < self.blockSize:
break
# Parse the pointer table
pointers = struct.unpack_from("<16L", block)
offsets = []
for p in pointers:
if p == 0xffffffff:
break
offsets.append(p << 2)
numScenes = len(offsets)
offsets.append(self.blockSize) # dummy offset to determine end of last scene
self.sceneIndexTable.append(sceneIndex)
# Extract all scenes in the block
for i in xrange(numScenes):
start = offsets[i]
end = offsets[i + 1]
assert end >= start
buffer = StringIO.StringIO(block[start:end].rstrip('\xff'))
zipper = gzip.GzipFile(fileobj = buffer, mode = "rb")
scene = zipper.read(self.maxSceneSize)
self.sceneData.append(scene)
sceneIndex += 1
# Return the number of scenes (should be 256).
def numScenes(self):
return len(self.sceneData)
# Return the scene with the given index.
def getScene(self, index):
return Scene(self.sceneData[index], index)
# Replace the scene with the given index.
def setScene(self, index, scene):
self.sceneData[index] = scene.getData()
# Write the archive to a file object, truncating the file.
def writeToFile(self, fileobj):
# Truncate file
fileobj.seek(0)
fileobj.truncate()
# Scene index table will be rebuilt
self.sceneIndexTable = []
sceneIndex = 0
numScenes = len(self.sceneData)
# Process all scenes
block = ""
pointers = []
firstIndexInBlock = 0
while True:
writeBlock = False
if sceneIndex >= numScenes:
# All scenes done, write the last block
cmpData = None
writeBlock = True
else:
# Compress next scene
cmpData = ff7.compressGzip(self.sceneData[sceneIndex])
if len(cmpData) % 4 != 0:
cmpData += '\xff' * (4 - len(cmpData) % 4) # pad scene to 4-byte boundary
if self.pointerTableSize + len(block) + len(cmpData) > self.blockSize:
# Scene doesn't fit in current block, write it first
writeBlock = True
if writeBlock:
# Write current block to file
for p in pointers:
fileobj.write(struct.pack("<L", p >> 2))
for i in xrange(16 - len(pointers)):
fileobj.write(struct.pack("<L", 0xffffffff))
if len(block) < self.blockSize - self.pointerTableSize:
block += '\xff' * (self.blockSize - self.pointerTableSize - len(block)) # pad with 0xff bytes
fileobj.write(block)
self.sceneIndexTable.append(firstIndexInBlock)
block = ""
pointers = []
firstIndexInBlock = sceneIndex
if sceneIndex >= numScenes:
# All done
break
else:
# Add compressed scene to block
pointers.append(len(block) + self.pointerTableSize)
block += cmpData
sceneIndex += 1 | unknown | codeparrot/codeparrot-clean | ||
import collections
NOT_FLATTEN_KEYS = ['additionalIdentifiers',
'additionalClassifications',
'suppliers',
'changes',
'tenderers'
]
class IdValue(str):
'''This is basically a string but is used to differentiate itself when doing an ininstance check.'''
def __init__(self, value):
## Save original value. this is needed if id was originally an integer and you want to keep that iformation.
self.original_value = value
str.__init__(value)
def flatten(path, flattened, obj):
'''Flatten any nested json object into simple key value pairs.
The key is the json path represented as a tuple.
eg. {"a": "I am a", "b": ["A", "list"], "c": [{"ca": "I am ca"}, {"cb": "I am cb"}]}
will flatten to
{('a',): 'I am a',
('b', 1): 'list',
('c', 0, 'ca'): 'I am ca',
('b', 0): 'A',
('c', 1, 'cb'): 'I am cb'}
'''
if isinstance(obj, dict):
iterable = list(obj.items())
if not iterable:
flattened[path] = {}
else:
iterable = list(enumerate(obj))
if not iterable:
flattened[path] = []
for key, value in iterable:
# We do not flatten these keys as the child lists of
# these keys will not be merged, be totally replaced
# and versioned as a whole
if isinstance(value, (dict, list)) and key not in NOT_FLATTEN_KEYS:
flatten(path + (key,), flattened, value)
else:
flattened[path + (key,)] = value
return flattened
def unflatten(flattened):
'''Unflatten flattened object back into nested form.'''
unflattened = {}
for flat_key in flattened:
current_pos = unflattened
for num, item in enumerate(flat_key):
if isinstance(item, IdValue):
if len(flat_key) - 1 == num: #when this is an array of string or ints
current_pos.append(flattened[flat_key])
else:
for obj in current_pos:
obj_id = obj.get('id')
if obj_id == item.original_value:
current_pos = obj
break
else:
new_pos = {"id": item.original_value}
current_pos.append(new_pos)
current_pos = new_pos
continue
new_pos = current_pos.get(item)
if new_pos is not None:
current_pos = new_pos
continue
if len(flat_key) - 1 == num:
current_pos[item] = flattened[flat_key]
elif isinstance(flat_key[num + 1], IdValue):
new_pos = []
current_pos[item] = new_pos
current_pos = new_pos
else:
new_pos = {}
current_pos[item] = new_pos
current_pos = new_pos
return unflattened
def process_flattened(flattened):
''' Replace numbers in json path (representing position in arrays)
with special id object. This is to make detecting what is an
array possible without needed to check schema.'''
# Keep ordered so that arrays will stay in the same order.
processed = collections.OrderedDict()
for key in sorted(flattened.keys(), key=lambda a: (len(a),) + a):
new_key = []
for num, item in enumerate(key):
if isinstance(item, int):
id_value = flattened.get(tuple(key[:num+1]) + ('id',))
if id_value is None:
id_value = item
new_key.append(IdValue(id_value))
continue
new_key.append(item)
processed[tuple(new_key)] = flattened[key]
return processed
def merge(releases):
''' Takes a list of releases and merge them making a
compiledRelease suitible for an OCDS Record '''
merged = collections.OrderedDict({("tag",): ['compiled']})
for release in sorted(releases, key=lambda rel: rel["date"]):
release = release.copy()
release.pop('tag', None)
flat = flatten((), {}, release)
processed = process_flattened(flat)
# In flattening and adding the ids to the json path
# we make sure each json path is going to same as long as
# all the ids match. Position in the array is not relevent
# (however it will keep this order anyway due to having an ordered dict).
# This makes the actual merging come down to
# just this statement.
merged.update(processed)
return unflatten(merged)
def merge_versioned(releases):
''' Takes a list of releases and merge them making a
versionedRelease suitible for an OCDS Record '''
merged = collections.OrderedDict()
for release in sorted(releases, key=lambda rel: rel["date"]):
release = release.copy()
ocid = release.pop("ocid")
merged[("ocid",)] = ocid
releaseID = release.pop("id")
date = release.pop("date")
tag = release.pop('tag', None)
flat = flatten((), {}, release)
processed = process_flattened(flat)
for key, value in processed.items():
if key[-1] == 'id' and isinstance(key[-2], tuple):
merged[key] = value
continue
new_value = {"releaseID": releaseID,
"releaseDate": date,
"releaseTag": tag,
"value": value}
if key in merged:
if value == merged[key][-1]['value']:
continue
if key not in merged:
merged[key] = []
merged[key].append(new_value)
return unflatten(merged) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cases/helper"
require "models/topic"
require "models/person"
class ConfirmationValidationTest < ActiveModel::TestCase
def teardown
Topic.clear_validators!
end
def test_no_title_confirmation
Topic.validates_confirmation_of(:title)
t = Topic.new(author_name: "Plutarch")
assert_predicate t, :valid?
t.title_confirmation = "Parallel Lives"
assert_predicate t, :invalid?
t.title_confirmation = nil
t.title = "Parallel Lives"
assert_predicate t, :valid?
t.title_confirmation = "Parallel Lives"
assert_predicate t, :valid?
end
def test_title_confirmation
Topic.validates_confirmation_of(:title)
t = Topic.new("title" => "We should be confirmed", "title_confirmation" => "")
assert_predicate t, :invalid?
t.title_confirmation = "We should be confirmed"
assert_predicate t, :valid?
end
def test_validates_confirmation_of_with_boolean_attribute
Topic.validates_confirmation_of(:approved)
t = Topic.new(approved: true, approved_confirmation: nil)
assert_predicate t, :valid?
t.approved_confirmation = false
assert_predicate t, :invalid?
t.approved_confirmation = true
assert_predicate t, :valid?
end
def test_validates_confirmation_of_for_ruby_class
Person.validates_confirmation_of :karma
p = Person.new
p.karma_confirmation = "None"
assert_predicate p, :invalid?
assert_equal ["doesn't match Karma"], p.errors[:karma_confirmation]
p.karma = "None"
assert_predicate p, :valid?
ensure
Person.clear_validators!
end
def test_title_confirmation_with_i18n_attribute
@old_load_path, @old_backend = I18n.load_path.dup, I18n.backend
I18n.load_path.clear
I18n.backend = I18n::Backend::Simple.new
I18n.backend.store_translations("en",
errors: { messages: { confirmation: "doesn't match %{attribute}" } },
activemodel: { attributes: { topic: { title: "Test Title" } } })
Topic.validates_confirmation_of(:title)
t = Topic.new("title" => "We should be confirmed", "title_confirmation" => "")
assert_predicate t, :invalid?
assert_equal ["doesn't match Test Title"], t.errors[:title_confirmation]
ensure
I18n.load_path.replace @old_load_path
I18n.backend = @old_backend
I18n.backend.reload!
end
test "does not override confirmation reader if present" do
klass = Class.new do
include ActiveModel::Validations
def title_confirmation
"expected title"
end
validates_confirmation_of :title
end
assert_equal "expected title", klass.new.title_confirmation,
"confirmation validation should not override the reader"
end
test "does not override confirmation writer if present" do
klass = Class.new do
include ActiveModel::Validations
def title_confirmation=(value)
@title_confirmation = "expected title"
end
validates_confirmation_of :title
end
model = klass.new
model.title_confirmation = "new title"
assert_equal "expected title", model.title_confirmation,
"confirmation validation should not override the writer"
end
def test_title_confirmation_with_case_sensitive_option_true
Topic.validates_confirmation_of(:title, case_sensitive: true)
t = Topic.new(title: "title", title_confirmation: "Title")
assert_predicate t, :invalid?
end
def test_title_confirmation_with_case_sensitive_option_false
Topic.validates_confirmation_of(:title, case_sensitive: false)
t = Topic.new(title: "title", title_confirmation: "Title")
assert_predicate t, :valid?
end
end | ruby | github | https://github.com/rails/rails | activemodel/test/cases/validations/confirmation_validation_test.rb |
# -*- coding: utf-8 -*-
###############################################################################
#
# ListZoneOperations
# Retrieves the list of Zone Operation resources contained within the specified Zone.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListZoneOperations(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListZoneOperations Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListZoneOperations, self).__init__(temboo_session, '/Library/Google/ComputeEngine/ZoneOperations/ListZoneOperations')
def new_input_set(self):
return ListZoneOperationsInputSet()
def _make_result_set(self, result, path):
return ListZoneOperationsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListZoneOperationsChoreographyExecution(session, exec_id, path)
class ListZoneOperationsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListZoneOperations
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(ListZoneOperationsInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(ListZoneOperationsInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(ListZoneOperationsInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Comma-seperated list of fields you want to include in the response.)
"""
super(ListZoneOperationsInputSet, self)._set_input('Fields', value)
def set_Filter(self, value):
"""
Set the value of the Filter input for this Choreo. ((optional, string) A filter expression for narrowing results in the form: {field_name} {comparison_string} {literal_string} (e.g. name eq europe-west1-a). Comparison strings can be eq (equals) or ne (not equals).)
"""
super(ListZoneOperationsInputSet, self)._set_input('Filter', value)
def set_MaxResults(self, value):
"""
Set the value of the MaxResults input for this Choreo. ((optional, integer) The maximum number of results to return.)
"""
super(ListZoneOperationsInputSet, self)._set_input('MaxResults', value)
def set_PageToken(self, value):
"""
Set the value of the PageToken input for this Choreo. ((optional, string) The "nextPageToken" found in the response which is used to page through results.)
"""
super(ListZoneOperationsInputSet, self)._set_input('PageToken', value)
def set_Project(self, value):
"""
Set the value of the Project input for this Choreo. ((required, string) The ID of a Google Compute project.)
"""
super(ListZoneOperationsInputSet, self)._set_input('Project', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(ListZoneOperationsInputSet, self)._set_input('RefreshToken', value)
def set_Zone(self, value):
"""
Set the value of the Zone input for this Choreo. ((required, string) The name of the zone that contains the operation resources to retrieve.)
"""
super(ListZoneOperationsInputSet, self)._set_input('Zone', value)
class ListZoneOperationsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListZoneOperations Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class ListZoneOperationsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListZoneOperationsResultSet(response, path) | unknown | codeparrot/codeparrot-clean | ||
"""Ported cmemcached tests"""
import pylibmc
from nose.tools import eq_
from tests import PylibmcTestCase
class TestCmemcached(PylibmcTestCase):
def testSetAndGet(self):
self.mc.set("num12345", 12345)
eq_(self.mc.get("num12345"), 12345)
self.mc.set("str12345", "12345")
eq_(self.mc.get("str12345"), "12345")
def testDelete(self):
self.mc.set("str12345", "12345")
#delete return True on success, otherwise False
assert self.mc.delete("str12345")
assert self.mc.get("str12345") is None
# This test only works with old memcacheds. This has become a "client
# error" in memcached.
try:
assert not self.mc.delete("hello world")
except pylibmc.ClientError:
pass
def testGetMulti(self):
self.mc.set("a", "valueA")
self.mc.set("b", "valueB")
self.mc.set("c", "valueC")
result = self.mc.get_multi(["a", "b", "c", "", "hello world"])
eq_(result, {'a':'valueA', 'b':'valueB', 'c':'valueC'})
def testBigGetMulti(self):
count = 10 ** 4
keys = ['key%d' % i for i in xrange(count)]
pairs = zip(keys, ['value%d' % i for i in xrange(count)])
for key, value in pairs:
self.mc.set(key, value)
result = self.mc.get_multi(keys)
eq_(result, dict(pairs))
def testFunnyDelete(self):
assert not self.mc.delete("")
def testAppend(self):
self.mc.delete("a")
self.mc.set("a", "I ")
assert self.mc.append("a", "Do")
eq_(self.mc.get("a"), "I Do")
def testPrepend(self):
self.mc.delete("a")
self.mc.set("a", "Do")
assert self.mc.prepend("a", "I ")
eq_(self.mc.get("a"), "I Do") | unknown | codeparrot/codeparrot-clean | ||
"""
Atomic coordinate featurizer.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Joseph Gomes and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "LGPL v2.1+"
import numpy as np
from deepchem.utils.dependencies import mdtraj
from deepchem.feat import Featurizer
from deepchem.feat import ComplexFeaturizer
from deepchem.utils import rdkit_util, pad_array
class AtomicCoordinates(Featurizer):
"""
Nx3 matrix of Cartesian coordinates [Angstrom]
"""
name = ['atomic_coordinates']
def _featurize(self, mol):
"""
Calculate atomic coodinates.
Parameters
----------
mol : RDKit Mol
Molecule.
"""
N = mol.GetNumAtoms()
coords = np.zeros((N, 3))
# RDKit stores atomic coordinates in Angstrom. Atomic unit of length is the
# bohr (1 bohr = 0.529177 Angstrom). Converting units makes gradient calculation
# consistent with most QM software packages.
coords_in_bohr = [
mol.GetConformer(0).GetAtomPosition(i).__idiv__(0.52917721092)
for i in range(N)
]
for atom in range(N):
coords[atom, 0] = coords_in_bohr[atom].x
coords[atom, 1] = coords_in_bohr[atom].y
coords[atom, 2] = coords_in_bohr[atom].z
coords = [coords]
return coords
def compute_neighbor_list(coords, neighbor_cutoff, max_num_neighbors,
periodic_box_size):
"""Computes a neighbor list from atom coordinates."""
N = coords.shape[0]
traj = mdtraj.Trajectory(coords.reshape((1, N, 3)), None)
box_size = None
if periodic_box_size is not None:
box_size = np.array(periodic_box_size)
traj.unitcell_vectors = np.array(
[[[box_size[0], 0, 0], [0, box_size[1], 0], [0, 0, box_size[2]]]],
dtype=np.float32)
neighbors = mdtraj.geometry.compute_neighborlist(traj, neighbor_cutoff)
neighbor_list = {}
for i in range(N):
if max_num_neighbors is not None and len(neighbors[i]) > max_num_neighbors:
delta = coords[i] - coords.take(neighbors[i], axis=0)
if box_size is not None:
delta -= np.round(delta / box_size) * box_size
dist = np.linalg.norm(delta, axis=1)
sorted_neighbors = list(zip(dist, neighbors[i]))
sorted_neighbors.sort()
neighbor_list[
i] = [sorted_neighbors[j][1] for j in range(max_num_neighbors)]
else:
neighbor_list[i] = list(neighbors[i])
return neighbor_list
def get_coords(mol):
"""
Gets coordinates in Angstrom for RDKit mol.
"""
N = mol.GetNumAtoms()
coords = np.zeros((N, 3))
coords_raw = [mol.GetConformer(0).GetAtomPosition(i) for i in range(N)]
for atom in range(N):
coords[atom, 0] = coords_raw[atom].x
coords[atom, 1] = coords_raw[atom].y
coords[atom, 2] = coords_raw[atom].z
return coords
class NeighborListAtomicCoordinates(Featurizer):
"""
Adjacency List of neighbors in 3-space
Neighbors determined by user-defined distance cutoff [in Angstrom].
https://en.wikipedia.org/wiki/Cell_list
Ref: http://www.cs.cornell.edu/ron/references/1989/Calculations%20of%20a%20List%20of%20Neighbors%20in%20Molecular%20Dynamics%20Si.pdf
Parameters
----------
neighbor_cutoff: float
Threshold distance [Angstroms] for counting neighbors.
periodic_box_size: 3 element array
Dimensions of the periodic box in Angstroms, or None to not use periodic boundary conditions
"""
def __init__(self,
max_num_neighbors=None,
neighbor_cutoff=4,
periodic_box_size=None):
if neighbor_cutoff <= 0:
raise ValueError("neighbor_cutoff must be positive value.")
if max_num_neighbors is not None:
if not isinstance(max_num_neighbors, int) or max_num_neighbors <= 0:
raise ValueError("max_num_neighbors must be positive integer.")
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
self.periodic_box_size = periodic_box_size
# Type of data created by this featurizer
self.dtype = object
self.coordinates_featurizer = AtomicCoordinates()
def _featurize(self, mol):
"""
Compute neighbor list.
Parameters
----------
mol: rdkit Mol
To be featurized.
"""
N = mol.GetNumAtoms()
# TODO(rbharath): Should this return a list?
bohr_coords = self.coordinates_featurizer._featurize(mol)[0]
coords = get_coords(mol)
neighbor_list = compute_neighbor_list(coords, self.neighbor_cutoff,
self.max_num_neighbors,
self.periodic_box_size)
return (bohr_coords, neighbor_list)
class NeighborListComplexAtomicCoordinates(ComplexFeaturizer):
"""
Adjacency list of neighbors for protein-ligand complexes in 3-space.
Neighbors dtermined by user-dfined distance cutoff.
"""
def __init__(self, max_num_neighbors=None, neighbor_cutoff=4):
if neighbor_cutoff <= 0:
raise ValueError("neighbor_cutoff must be positive value.")
if max_num_neighbors is not None:
if not isinstance(max_num_neighbors, int) or max_num_neighbors <= 0:
raise ValueError("max_num_neighbors must be positive integer.")
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
# Type of data created by this featurizer
self.dtype = object
self.coordinates_featurizer = AtomicCoordinates()
def _featurize_complex(self, mol_pdb_file, protein_pdb_file):
"""
Compute neighbor list for complex.
Parameters
----------
mol_pdb: list
Should be a list of lines of the PDB file.
complex_pdb: list
Should be a list of lines of the PDB file.
"""
mol_coords, ob_mol = rdkit_util.load_molecule(mol_pdb_file)
protein_coords, protein_mol = rdkit_util.load_molecule(protein_pdb_file)
system_coords = rdkit_util.merge_molecules_xyz(mol_coords, protein_coords)
system_neighbor_list = compute_neighbor_list(
system_coords, self.neighbor_cutoff, self.max_num_neighbors, None)
return (system_coords, system_neighbor_list)
class ComplexNeighborListFragmentAtomicCoordinates(ComplexFeaturizer):
def __init__(self,
frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
neighbor_cutoff,
strip_hydrogens=True):
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.complex_num_atoms = complex_num_atoms
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
self.strip_hydrogens = strip_hydrogens
self.neighborlist_featurizer = NeighborListComplexAtomicCoordinates(
self.max_num_neighbors, self.neighbor_cutoff)
def _featurize_complex(self, mol_pdb_file, protein_pdb_file):
frag1_coords, frag1_mol = rdkit_util.load_molecule(mol_pdb_file)
frag2_coords, frag2_mol = rdkit_util.load_molecule(protein_pdb_file)
system_mol = rdkit_util.merge_molecules(frag1_mol, frag2_mol)
system_coords = rdkit_util.get_xyz_from_mol(system_mol)
frag1_coords, frag1_mol = self._strip_hydrogens(frag1_coords, frag1_mol)
frag2_coords, frag2_mol = self._strip_hydrogens(frag2_coords, frag2_mol)
system_coords, system_mol = self._strip_hydrogens(system_coords, system_mol)
frag1_coords, frag1_neighbor_list, frag1_z = self.featurize_mol(
frag1_coords, frag1_mol, self.frag1_num_atoms)
frag2_coords, frag2_neighbor_list, frag2_z = self.featurize_mol(
frag2_coords, frag2_mol, self.frag2_num_atoms)
system_coords, system_neighbor_list, system_z = self.featurize_mol(
system_coords, system_mol, self.complex_num_atoms)
return frag1_coords, frag1_neighbor_list, frag1_z, frag2_coords, frag2_neighbor_list, frag2_z, \
system_coords, system_neighbor_list, system_z
def get_Z_matrix(self, mol, max_atoms):
return pad_array(
np.array([atom.GetAtomicNum() for atom in mol.GetAtoms()]), max_atoms)
def featurize_mol(self, coords, mol, max_num_atoms):
neighbor_list = compute_neighbor_list(coords, self.neighbor_cutoff,
self.max_num_neighbors, None)
z = self.get_Z_matrix(mol, max_num_atoms)
z = pad_array(z, max_num_atoms)
coords = pad_array(coords, (max_num_atoms, 3))
return coords, neighbor_list, z
def _strip_hydrogens(self, coords, mol):
class MoleculeShim(object):
"""
Shim of a Molecule which supports #GetAtoms()
"""
def __init__(self, atoms):
self.atoms = [AtomShim(x) for x in atoms]
def GetAtoms(self):
return self.atoms
class AtomShim(object):
def __init__(self, atomic_num):
self.atomic_num = atomic_num
def GetAtomicNum(self):
return self.atomic_num
if not self.strip_hydrogens:
return coords, mol
indexes_to_keep = []
atomic_numbers = []
for index, atom in enumerate(mol.GetAtoms()):
if atom.GetAtomicNum() != 1:
indexes_to_keep.append(index)
atomic_numbers.append(atom.GetAtomicNum())
mol = MoleculeShim(atomic_numbers)
coords = coords[indexes_to_keep]
return coords, mol | unknown | codeparrot/codeparrot-clean | ||
"""HTML 2.0 parser.
See the HTML 2.0 specification:
http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_toc.html
"""
from warnings import warnpy3k
warnpy3k("the htmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import sgmllib
from formatter import AS_IS
__all__ = ["HTMLParser", "HTMLParseError"]
class HTMLParseError(sgmllib.SGMLParseError):
"""Error raised when an HTML document can't be parsed."""
class HTMLParser(sgmllib.SGMLParser):
"""This is the basic HTML parser class.
It supports all entity names required by the XHTML 1.0 Recommendation.
It also defines handlers for all HTML 2.0 and many HTML 3.0 and 3.2
elements.
"""
from htmlentitydefs import entitydefs
def __init__(self, formatter, verbose=0):
"""Creates an instance of the HTMLParser class.
The formatter parameter is the formatter instance associated with
the parser.
"""
sgmllib.SGMLParser.__init__(self, verbose)
self.formatter = formatter
def error(self, message):
raise HTMLParseError(message)
def reset(self):
sgmllib.SGMLParser.reset(self)
self.savedata = None
self.isindex = 0
self.title = None
self.base = None
self.anchor = None
self.anchorlist = []
self.nofill = 0
self.list_stack = []
# ------ Methods used internally; some may be overridden
# --- Formatter interface, taking care of 'savedata' mode;
# shouldn't need to be overridden
def handle_data(self, data):
if self.savedata is not None:
self.savedata = self.savedata + data
else:
if self.nofill:
self.formatter.add_literal_data(data)
else:
self.formatter.add_flowing_data(data)
# --- Hooks to save data; shouldn't need to be overridden
def save_bgn(self):
"""Begins saving character data in a buffer instead of sending it
to the formatter object.
Retrieve the stored data via the save_end() method. Use of the
save_bgn() / save_end() pair may not be nested.
"""
self.savedata = ''
def save_end(self):
"""Ends buffering character data and returns all data saved since
the preceding call to the save_bgn() method.
If the nofill flag is false, whitespace is collapsed to single
spaces. A call to this method without a preceding call to the
save_bgn() method will raise a TypeError exception.
"""
data = self.savedata
self.savedata = None
if not self.nofill:
data = ' '.join(data.split())
return data
# --- Hooks for anchors; should probably be overridden
def anchor_bgn(self, href, name, type):
"""This method is called at the start of an anchor region.
The arguments correspond to the attributes of the <A> tag with
the same names. The default implementation maintains a list of
hyperlinks (defined by the HREF attribute for <A> tags) within
the document. The list of hyperlinks is available as the data
attribute anchorlist.
"""
self.anchor = href
if self.anchor:
self.anchorlist.append(href)
def anchor_end(self):
"""This method is called at the end of an anchor region.
The default implementation adds a textual footnote marker using an
index into the list of hyperlinks created by the anchor_bgn()method.
"""
if self.anchor:
self.handle_data("[%d]" % len(self.anchorlist))
self.anchor = None
# --- Hook for images; should probably be overridden
def handle_image(self, src, alt, *args):
"""This method is called to handle images.
The default implementation simply passes the alt value to the
handle_data() method.
"""
self.handle_data(alt)
# --------- Top level elememts
def start_html(self, attrs): pass
def end_html(self): pass
def start_head(self, attrs): pass
def end_head(self): pass
def start_body(self, attrs): pass
def end_body(self): pass
# ------ Head elements
def start_title(self, attrs):
self.save_bgn()
def end_title(self):
self.title = self.save_end()
def do_base(self, attrs):
for a, v in attrs:
if a == 'href':
self.base = v
def do_isindex(self, attrs):
self.isindex = 1
def do_link(self, attrs):
pass
def do_meta(self, attrs):
pass
def do_nextid(self, attrs): # Deprecated
pass
# ------ Body elements
# --- Headings
def start_h1(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h1', 0, 1, 0))
def end_h1(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h2(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h2', 0, 1, 0))
def end_h2(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h3(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h3', 0, 1, 0))
def end_h3(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h4(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h4', 0, 1, 0))
def end_h4(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h5(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h5', 0, 1, 0))
def end_h5(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h6(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h6', 0, 1, 0))
def end_h6(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
# --- Block Structuring Elements
def do_p(self, attrs):
self.formatter.end_paragraph(1)
def start_pre(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
self.nofill = self.nofill + 1
def end_pre(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
self.nofill = max(0, self.nofill - 1)
def start_xmp(self, attrs):
self.start_pre(attrs)
self.setliteral('xmp') # Tell SGML parser
def end_xmp(self):
self.end_pre()
def start_listing(self, attrs):
self.start_pre(attrs)
self.setliteral('listing') # Tell SGML parser
def end_listing(self):
self.end_pre()
def start_address(self, attrs):
self.formatter.end_paragraph(0)
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_address(self):
self.formatter.end_paragraph(0)
self.formatter.pop_font()
def start_blockquote(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_margin('blockquote')
def end_blockquote(self):
self.formatter.end_paragraph(1)
self.formatter.pop_margin()
# --- List Elements
def start_ul(self, attrs):
self.formatter.end_paragraph(not self.list_stack)
self.formatter.push_margin('ul')
self.list_stack.append(['ul', '*', 0])
def end_ul(self):
if self.list_stack: del self.list_stack[-1]
self.formatter.end_paragraph(not self.list_stack)
self.formatter.pop_margin()
def do_li(self, attrs):
self.formatter.end_paragraph(0)
if self.list_stack:
[dummy, label, counter] = top = self.list_stack[-1]
top[2] = counter = counter+1
else:
label, counter = '*', 0
self.formatter.add_label_data(label, counter)
def start_ol(self, attrs):
self.formatter.end_paragraph(not self.list_stack)
self.formatter.push_margin('ol')
label = '1.'
for a, v in attrs:
if a == 'type':
if len(v) == 1: v = v + '.'
label = v
self.list_stack.append(['ol', label, 0])
def end_ol(self):
if self.list_stack: del self.list_stack[-1]
self.formatter.end_paragraph(not self.list_stack)
self.formatter.pop_margin()
def start_menu(self, attrs):
self.start_ul(attrs)
def end_menu(self):
self.end_ul()
def start_dir(self, attrs):
self.start_ul(attrs)
def end_dir(self):
self.end_ul()
def start_dl(self, attrs):
self.formatter.end_paragraph(1)
self.list_stack.append(['dl', '', 0])
def end_dl(self):
self.ddpop(1)
if self.list_stack: del self.list_stack[-1]
def do_dt(self, attrs):
self.ddpop()
def do_dd(self, attrs):
self.ddpop()
self.formatter.push_margin('dd')
self.list_stack.append(['dd', '', 0])
def ddpop(self, bl=0):
self.formatter.end_paragraph(bl)
if self.list_stack:
if self.list_stack[-1][0] == 'dd':
del self.list_stack[-1]
self.formatter.pop_margin()
# --- Phrase Markup
# Idiomatic Elements
def start_cite(self, attrs): self.start_i(attrs)
def end_cite(self): self.end_i()
def start_code(self, attrs): self.start_tt(attrs)
def end_code(self): self.end_tt()
def start_em(self, attrs): self.start_i(attrs)
def end_em(self): self.end_i()
def start_kbd(self, attrs): self.start_tt(attrs)
def end_kbd(self): self.end_tt()
def start_samp(self, attrs): self.start_tt(attrs)
def end_samp(self): self.end_tt()
def start_strong(self, attrs): self.start_b(attrs)
def end_strong(self): self.end_b()
def start_var(self, attrs): self.start_i(attrs)
def end_var(self): self.end_i()
# Typographic Elements
def start_i(self, attrs):
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_i(self):
self.formatter.pop_font()
def start_b(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
def end_b(self):
self.formatter.pop_font()
def start_tt(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
def end_tt(self):
self.formatter.pop_font()
def start_a(self, attrs):
href = ''
name = ''
type = ''
for attrname, value in attrs:
value = value.strip()
if attrname == 'href':
href = value
if attrname == 'name':
name = value
if attrname == 'type':
type = value.lower()
self.anchor_bgn(href, name, type)
def end_a(self):
self.anchor_end()
# --- Line Break
def do_br(self, attrs):
self.formatter.add_line_break()
# --- Horizontal Rule
def do_hr(self, attrs):
self.formatter.add_hor_rule()
# --- Image
def do_img(self, attrs):
align = ''
alt = '(image)'
ismap = ''
src = ''
width = 0
height = 0
for attrname, value in attrs:
if attrname == 'align':
align = value
if attrname == 'alt':
alt = value
if attrname == 'ismap':
ismap = value
if attrname == 'src':
src = value
if attrname == 'width':
try: width = int(value)
except ValueError: pass
if attrname == 'height':
try: height = int(value)
except ValueError: pass
self.handle_image(src, alt, ismap, align, width, height)
# --- Really Old Unofficial Deprecated Stuff
def do_plaintext(self, attrs):
self.start_pre(attrs)
self.setnomoretags() # Tell SGML parser
# --- Unhandled tags
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag):
pass
def test(args = None):
import sys, formatter
if not args:
args = sys.argv[1:]
silent = args and args[0] == '-s'
if silent:
del args[0]
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
if silent:
f = formatter.NullFormatter()
else:
f = formatter.AbstractFormatter(formatter.DumbWriter())
p = HTMLParser(f)
p.feed(data)
p.close()
if __name__ == '__main__':
test() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# FIXME: rename this file, and add more text about how this is
# different from the base file_lock class.
"""This class helps to block NRWT threads when more NRWTs run
perf, http and websocket tests in a same time."""
import logging
import os
import sys
import tempfile
import time
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.file_lock import FileLock
from webkitpy.common.system.filesystem import FileSystem
_log = logging.getLogger(__name__)
class HttpLock(object):
def __init__(self, lock_path, lock_file_prefix="WebKitHttpd.lock.", guard_lock="WebKit.lock", filesystem=None, executive=None, name='HTTP'):
self._executive = executive or Executive()
self._filesystem = filesystem or FileSystem()
self._lock_path = lock_path
if not self._lock_path:
# FIXME: FileSystem should have an accessor for tempdir()
self._lock_path = tempfile.gettempdir()
self._lock_file_prefix = lock_file_prefix
self._lock_file_path_prefix = self._filesystem.join(self._lock_path, self._lock_file_prefix)
self._guard_lock_file = self._filesystem.join(self._lock_path, guard_lock)
self._guard_lock = FileLock(self._guard_lock_file)
self._process_lock_file_name = ""
self._name = name
def cleanup_http_lock(self):
"""Delete the lock file if exists."""
if self._filesystem.exists(self._process_lock_file_name):
_log.debug("Removing lock file: %s" % self._process_lock_file_name)
self._filesystem.remove(self._process_lock_file_name)
def _extract_lock_number(self, lock_file_name):
"""Return the lock number from lock file."""
prefix_length = len(self._lock_file_path_prefix)
return int(lock_file_name[prefix_length:])
def _lock_file_list(self):
"""Return the list of lock files sequentially."""
lock_list = self._filesystem.glob(self._lock_file_path_prefix + '*')
lock_list.sort(key=self._extract_lock_number)
return lock_list
def _next_lock_number(self):
"""Return the next available lock number."""
lock_list = self._lock_file_list()
if not lock_list:
return 0
return self._extract_lock_number(lock_list[-1]) + 1
def _current_lock_pid(self):
"""Return with the current lock pid. If the lock is not valid
it deletes the lock file."""
lock_list = self._lock_file_list()
if not lock_list:
_log.debug("No lock file list")
return
try:
current_pid = self._filesystem.read_text_file(lock_list[0])
if not (current_pid and self._executive.check_running_pid(int(current_pid))):
_log.debug("Removing stuck lock file: %s" % lock_list[0])
self._filesystem.remove(lock_list[0])
return
except IOError, e:
_log.debug("IOError: %s" % e)
return
except OSError, e:
_log.debug("OSError: %s" % e)
return
return int(current_pid)
def _create_lock_file(self):
"""The lock files are used to schedule the running test sessions in first
come first served order. The guard lock ensures that the lock numbers are
sequential."""
if not self._filesystem.exists(self._lock_path):
_log.debug("Lock directory does not exist: %s" % self._lock_path)
return False
if not self._guard_lock.acquire_lock():
_log.debug("Guard lock timed out!")
return False
self._process_lock_file_name = (self._lock_file_path_prefix + str(self._next_lock_number()))
_log.debug("Creating lock file: %s" % self._process_lock_file_name)
# FIXME: Executive.py should have an accessor for getpid()
self._filesystem.write_text_file(self._process_lock_file_name, str(os.getpid()))
self._guard_lock.release_lock()
return True
def wait_for_httpd_lock(self):
"""Create a lock file and wait until it's turn comes. If something goes wrong
it wont do any locking."""
if not self._create_lock_file():
_log.debug("Warning, %s locking failed!" % self._name)
return
# FIXME: This can hang forever!
while self._current_lock_pid() != os.getpid():
time.sleep(1)
_log.debug("%s lock acquired" % self._name) | unknown | codeparrot/codeparrot-clean | ||
"""
A Django command that dumps the structure of a course as a JSON object.
The resulting JSON object has one entry for each module in the course:
{
"$module_url": {
"category": "$module_category",
"children": [$module_children_urls... ],
"metadata": {$module_metadata}
},
"$module_url": ....
...
}
"""
import json
from optparse import make_option
from textwrap import dedent
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata, compute_inherited_metadata
from xblock.fields import Scope
FILTER_LIST = ['xml_attributes', 'checklists']
INHERITED_FILTER_LIST = ['children', 'xml_attributes', 'checklists']
class Command(BaseCommand):
"""
Write out to stdout a structural and metadata information for a
course as a JSON object
"""
args = "<course_id>"
help = dedent(__doc__).strip()
option_list = BaseCommand.option_list + (
make_option('--modulestore',
action='store',
default='default',
help='Name of the modulestore'),
make_option('--inherited',
action='store_true',
default=False,
help='Whether to include inherited metadata'),
make_option('--inherited_defaults',
action='store_true',
default=False,
help='Whether to include default values of inherited metadata'),
)
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("course_id not specified")
# Get the modulestore
try:
name = options['modulestore']
store = modulestore(name)
except KeyError:
raise CommandError("Unknown modulestore {}".format(name))
# Get the course data
course_id = args[0]
course = store.get_course(course_id)
if course is None:
raise CommandError("Invalid course_id")
# precompute inherited metadata at the course level, if needed:
if options['inherited']:
compute_inherited_metadata(course)
# Convert course data to dictionary and dump it as JSON to stdout
info = dump_module(course, inherited=options['inherited'], defaults=options['inherited_defaults'])
return json.dumps(info, indent=2, sort_keys=True)
def dump_module(module, destination=None, inherited=False, defaults=False):
"""
Add the module and all its children to the destination dictionary in
as a flat structure.
"""
destination = destination if destination else {}
items = own_metadata(module).iteritems()
filtered_metadata = {k: v for k, v in items if k not in FILTER_LIST}
destination[module.location.url()] = {
'category': module.location.category,
'children': [str(child) for child in getattr(module, 'children', [])],
'metadata': filtered_metadata,
}
if inherited:
# when calculating inherited metadata, don't include existing
# locally-defined metadata
inherited_metadata_filter_list = list(filtered_metadata.keys())
inherited_metadata_filter_list.extend(INHERITED_FILTER_LIST)
def is_inherited(field):
if field.name in inherited_metadata_filter_list:
return False
elif field.scope != Scope.settings:
return False
elif defaults:
return True
else:
return field.values != field.default
inherited_metadata = {field.name: field.read_json(module) for field in module.fields.values() if is_inherited(field)}
destination[module.location.url()]['inherited_metadata'] = inherited_metadata
for child in module.get_children():
dump_module(child, destination, inherited, defaults)
return destination | unknown | codeparrot/codeparrot-clean | ||
/*
* subtrans.h
*
* PostgreSQL subtransaction-log manager
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/access/subtrans.h
*/
#ifndef SUBTRANS_H
#define SUBTRANS_H
extern void SubTransSetParent(TransactionId xid, TransactionId parent);
extern TransactionId SubTransGetParent(TransactionId xid);
extern TransactionId SubTransGetTopmostTransaction(TransactionId xid);
extern Size SUBTRANSShmemSize(void);
extern void SUBTRANSShmemInit(void);
extern void BootStrapSUBTRANS(void);
extern void StartupSUBTRANS(TransactionId oldestActiveXID);
extern void CheckPointSUBTRANS(void);
extern void ExtendSUBTRANS(TransactionId newestXact);
extern void TruncateSUBTRANS(TransactionId oldestXact);
#endif /* SUBTRANS_H */ | c | github | https://github.com/postgres/postgres | src/include/access/subtrans.h |
"""Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import tkinter
_flatten = tkinter._flatten
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
format = "%s" if not script else "{%s}"
opts = []
for opt, value in optdict.items():
if ignore and opt in ignore:
continue
if isinstance(value, (list, tuple)):
v = []
for val in value:
if isinstance(val, str):
v.append(str(val) if val else '{}')
else:
v.append(str(val))
# format v according to the script option, but also check for
# space in any value in v in order to group them correctly
value = format % ' '.join(
('{%s}' if ' ' in val else '%s') % val for val in v)
if script and value == '':
value = '{}' # empty string in Python is equivalent to {} in Tcl
opts.append(("-%s" % opt, value))
# Remember: _flatten skips over None
return _flatten(opts)
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
format = "%s" if not script else "{%s}"
opts = []
for opt, value in mapdict.items():
opt_val = []
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
for statespec in value:
state, val = statespec[:-1], statespec[-1]
if len(state) > 1: # group multiple states
state = "{%s}" % ' '.join(state)
else: # single state
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or '{}'
if isinstance(val, (list, tuple)): # val needs to be grouped
val = "{%s}" % ' '.join(map(str, val))
opt_val.append("%s %s" % (state, val))
opts.append(("-%s" % opt, format % ' '.join(opt_val)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _format_mapdict({None: args[1:]})[1]
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _format_mapdict({None: args[2:]})[1]
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (args[1], )
if script:
spec = '{%s}' % spec
opts = ' '.join(map(str, opts))
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't has to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(map(str, _format_optdict(opts, True, "children")))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.items():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(map(str, _format_optdict(opts['configure'], True)))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(map(str, _format_mapdict(opts['map'], True)))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'items'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _dict_from_tcltuple(ttuple, cut_minus=True):
"""Break tuple in pairs, format it properly, then build the return
dict. If cut_minus is True, the supposed '-' prefixing options will
be removed.
ttuple is expected to contain an even number of elements."""
opt_start = 1 if cut_minus else 0
retdict = {}
it = iter(ttuple)
for opt, val in zip(it, it):
retdict[str(opt)[opt_start:]] = val
return tclobjs_to_py(retdict)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(val)
opts[opt] = val
return res
def _val_or_dict(options, func, *args):
"""Format options then call func with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If a option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = func(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _dict_from_tcltuple(res)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = str(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
if val and hasattr(val, '__len__') and not isinstance(val, str):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = list(map(_convert_stringval, val))
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
adict[opt] = val
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if tkinter._support_default_root:
master = tkinter._default_root or tkinter.Tk()
else:
raise RuntimeError(
"No master specified and tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
return _val_or_dict(kw, self.tk.call, self._name, "configure", style)
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(
self.tk.call(self._name, "map", style, '-%s' % query_opt))
return _dict_from_tcltuple(
self.tk.call(self._name, "map", style, *(_format_mapdict(kw))))
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return self.tk.call(self._name, "element", "names")
def element_options(self, elementname):
"""Return the list of elementname's options."""
return self.tk.call(self._name, "element", "options", elementname)
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.call(self._name, "theme", "names")
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.call(self._w, "instate", ' '.join(statespec))
if ret and callback:
return callback(*args, **kw)
return bool(ret)
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self.tk.call(self._w, "bbox", index)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return bool(self.tk.call(self._w, "validate"))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
# The "values" option may need special formatting, so leave to
# _format_optdict the responsibility to format it
if "values" in kw:
kw["values"] = _format_optdict({'v': kw["values"]})[1]
Entry.__init__(self, master, "ttk::combobox", **kw)
def __setitem__(self, item, value):
if item == "values":
value = _format_optdict({item: value})[1]
Entry.__setitem__(self, item, value)
def configure(self, cnf=None, **kw):
"""Custom Combobox configure, created to properly format the values
option."""
if "values" in kw:
kw["values"] = _format_optdict({'v': kw["values"]})[1]
return Entry.configure(self, cnf, **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.call(self._w, "index", tab_id)
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.call(self._w, "tabs") or ()
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.call(self._w, "sashpos", index, newpos)
PanedWindow = Panedwindow # tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if ommited."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
if cnf:
kw.update(cnf)
Widget.configure(self, **kw)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Treeview(Widget, tkinter.XView, tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self.tk.call(self._w, "bbox", item, column)
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.call(self._w, "children", item or '') or ()
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the three,
False otherwise."""
return bool(self.tk.call(self._w, "exists", item))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, str):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.call(self._w, "index", item)
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self, selop=None, items=None):
"""If selop is not specified, returns selected items."""
return self.tk.call(self._w, "selection", selop, items)
def selection_set(self, items):
"""items becomes the new selection."""
self.selection("set", items)
def selection_add(self, items):
"""Add items to the selection."""
self.selection("add", items)
def selection_remove(self, items):
"""Remove items from the selection."""
self.selection("remove", items)
def selection_toggle(self, items):
"""Toggle the selection state of each item in items."""
self.selection("toggle", items)
def set(self, item, column=None, value=None):
"""With one argument, returns a dictionary of column/value pairs
for the specified item. With two arguments, returns the current
value of the specified column. With three arguments, sets the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _dict_from_tcltuple(res, False)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(kw, self.tk.call, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
return self.tk.call(self._w, "tag", "has", tagname, item)
# Extensions
class LabeledScale(Frame):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct an horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
# widget has been destroyed already
pass
else:
del self._variable
Frame.destroy(self)
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_, to = self.scale['from'], self.scale['to']
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
def _get_value(self):
"""Return current scale value."""
return self._variable.get()
def _set_value(self, val):
"""Set new scale value."""
self._variable.set(val)
value = property(_get_value, _set_value)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise tkinter.TclError('unknown option -%s' % (
next(iter(kwargs.keys()))))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=tkinter._setit(self._variable, val, self._callback))
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
del self._variable
Menubutton.destroy(self) | unknown | codeparrot/codeparrot-clean | ||
"""Platform for the Aladdin Connect cover component."""
import logging
from aladdin_connect import AladdinConnectClient
import voluptuous as vol
from homeassistant.components.cover import (
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverDevice,
)
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
NOTIFICATION_ID = "aladdin_notification"
NOTIFICATION_TITLE = "Aladdin Connect Cover Setup"
STATES_MAP = {
"open": STATE_OPEN,
"opening": STATE_OPENING,
"closed": STATE_CLOSED,
"closing": STATE_CLOSING,
}
SUPPORTED_FEATURES = SUPPORT_OPEN | SUPPORT_CLOSE
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Aladdin Connect platform."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
acc = AladdinConnectClient(username, password)
try:
if not acc.login():
raise ValueError("Username or Password is incorrect")
add_entities(AladdinDevice(acc, door) for door in acc.get_doors())
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
hass.components.persistent_notification.create(
"Error: {}<br />"
"You will need to restart hass after fixing."
"".format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
class AladdinDevice(CoverDevice):
"""Representation of Aladdin Connect cover."""
def __init__(self, acc, device):
"""Initialize the cover."""
self._acc = acc
self._device_id = device["device_id"]
self._number = device["door_number"]
self._name = device["name"]
self._status = STATES_MAP.get(device["status"])
@property
def device_class(self):
"""Define this cover as a garage door."""
return "garage"
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_FEATURES
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._device_id}-{self._number}"
@property
def name(self):
"""Return the name of the garage door."""
return self._name
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._status == STATE_OPENING
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._status == STATE_CLOSING
@property
def is_closed(self):
"""Return None if status is unknown, True if closed, else False."""
if self._status is None:
return None
return self._status == STATE_CLOSED
def close_cover(self, **kwargs):
"""Issue close command to cover."""
self._acc.close_door(self._device_id, self._number)
def open_cover(self, **kwargs):
"""Issue open command to cover."""
self._acc.open_door(self._device_id, self._number)
def update(self):
"""Update status of cover."""
acc_status = self._acc.get_door_status(self._device_id, self._number)
self._status = STATES_MAP.get(acc_status) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Favien documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 6 16:22:43 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Favien'
copyright = '2014, Jihyeok Seo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Faviendoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Favien.tex', 'Favien Documentation',
'Jihyeok Seo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'favien', 'Favien Documentation',
['Jihyeok Seo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Favien', 'Favien Documentation',
'Jihyeok Seo', 'Favien', 'Network canvas community.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'flask': ('http://flask.pocoo.org/docs/', None),
'python': ('http://docs.python.org/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/rel_0_9/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
} | unknown | codeparrot/codeparrot-clean | ||
class HParams(object):
def __init__(self, **kwargs):
self._items = {}
for k, v in kwargs.items():
self._set(k, v)
def _set(self, k, v):
self._items[k] = v
setattr(self, k, v)
def __str__(self):
return str(self._items)
def parse(self, str_value):
hps = HParams(**self._items)
for entry in str_value.strip().split(","):
entry = entry.strip()
if not entry:
continue
key, sep, value = entry.partition("=")
if not sep:
raise ValueError("Unable to parse: %s" % entry)
default_value = hps._items[key]
if isinstance(default_value, bool):
hps._set(key, value.lower() == "true")
elif isinstance(default_value, int):
hps._set(key, int(value))
elif isinstance(default_value, float):
hps._set(key, float(value))
else:
hps._set(key, value)
return hps | unknown | codeparrot/codeparrot-clean | ||
input = """
strategic(451)|strategic(412)|strategic(1560).
strategic(1399)|strategic(1168)|strategic(1834)|strategic(726).
strategic(1291)|strategic(1361)|strategic(68).
strategic(610)|strategic(572)|strategic(1343)|strategic(783).
strategic(729)|strategic(1448)|strategic(113)|strategic(1649).
strategic(301)|strategic(1606)|strategic(2165)|strategic(504).
strategic(118)|strategic(754)|strategic(826)|strategic(266).
strategic(253)|strategic(1518)|strategic(572).
strategic(999)|strategic(783)|strategic(726)|strategic(62).
strategic(2148)|strategic(1868)|strategic(2274)|strategic(729).
strategic(62)|strategic(2148)|strategic(253)|strategic(1893).
strategic(118)|strategic(129)|strategic(45)|strategic(504).
strategic(1834)|strategic(166)|strategic(1811)|strategic(1516).
strategic(118)|strategic(253)|strategic(1834)|strategic(301).
strategic(1751)|strategic(45)|strategic(1518).
strategic(1240)|strategic(1724)|strategic(1577)|strategic(1648).
strategic(999)|strategic(301)|strategic(1868)|strategic(1751).
strategic(1856)|strategic(1788)|strategic(1811).
strategic(68)|strategic(754)|strategic(1577)|strategic(1868).
strategic(1736)|strategic(129)|strategic(451)|strategic(559).
strategic(1648)|strategic(1291)|strategic(266)|strategic(1856).
strategic(826)|strategic(1264)|strategic(55)|strategic(968).
strategic(1606)|strategic(1834)|strategic(1577)|strategic(253).
strategic(504)|strategic(1811)|strategic(968)|strategic(559).
strategic(1291)|strategic(729)|strategic(45)|strategic(1856).
strategic(999):-strategic(1856),strategic(1399),strategic(412),strategic(826).
strategic(968):-strategic(68),strategic(1361),strategic(1399),strategic(1868).
strategic(968):-strategic(1856),strategic(1361),strategic(1893),strategic(2274).
strategic(953):-strategic(1893),strategic(113),strategic(2274),strategic(504).
strategic(947):-strategic(559),strategic(62),strategic(1606),strategic(1577).
strategic(947):-strategic(1893),strategic(504),strategic(1399),strategic(1448).
strategic(947):-strategic(118),strategic(1577),strategic(412),strategic(1736).
strategic(826):-strategic(2148),strategic(1736),strategic(253),strategic(113).
strategic(729):-strategic(999),strategic(2148),strategic(1291),strategic(1343).
strategic(729):-strategic(1893),strategic(1168),strategic(999),strategic(412).
strategic(723):-strategic(572),strategic(1516),strategic(412),strategic(726).
strategic(68):-strategic(166),strategic(1168),strategic(504),strategic(559).
strategic(663):-strategic(1264),strategic(1518),strategic(1560),strategic(266).
strategic(657):-strategic(1399),strategic(113),strategic(1893),strategic(1649).
strategic(610):-strategic(826),strategic(1893),strategic(783),strategic(451).
strategic(610):-strategic(559),strategic(968),strategic(1560),strategic(1264).
strategic(610):-strategic(113),strategic(826),strategic(2274),strategic(1648).
strategic(605):-strategic(62),strategic(826),strategic(1856),strategic(1240).
strategic(592):-strategic(55),strategic(1724),strategic(2148),strategic(412).
strategic(592):-strategic(1343),strategic(129),strategic(1264),strategic(2274).
strategic(572):-strategic(1648),strategic(451),strategic(68),strategic(754).
strategic(55):-strategic(253),strategic(572),strategic(1168),strategic(1291).
strategic(45):-strategic(301),strategic(1606),strategic(1751),strategic(1264).
strategic(412):-strategic(68),strategic(1264),strategic(572),strategic(610).
strategic(412):-strategic(412),strategic(1893),strategic(2165),strategic(2274).
strategic(412):-strategic(2148),strategic(729),strategic(68),strategic(412).
strategic(369):-strategic(968),strategic(166),strategic(1751),strategic(118).
strategic(369):-strategic(1240),strategic(1291),strategic(2274),strategic(1868).
strategic(266):-strategic(968),strategic(1518),strategic(559),strategic(504).
strategic(266):-strategic(412),strategic(1560),strategic(999),strategic(45).
strategic(266):-strategic(1577),strategic(826),strategic(999),strategic(729).
strategic(266):-strategic(1168),strategic(754),strategic(968),strategic(1649).
strategic(2165):-strategic(2148),strategic(1516),strategic(1343),strategic(726).
strategic(2113):-strategic(1788),strategic(118),strategic(68),strategic(1736).
strategic(1893):-strategic(783),strategic(1448),strategic(1343),strategic(253).
strategic(1893):-strategic(1811),strategic(412),strategic(2165),strategic(1560).
strategic(1893):-strategic(118),strategic(999),strategic(968),strategic(729).
strategic(1868):-strategic(559),strategic(726),strategic(1893),strategic(783).
strategic(1856):-strategic(1856),strategic(451),strategic(113),strategic(968).
strategic(1856):-strategic(129),strategic(968),strategic(113),strategic(2274).
strategic(1852):-strategic(166),strategic(253),strategic(968),strategic(1868).
strategic(1834):-strategic(113),strategic(1399),strategic(1893),strategic(1399).
strategic(1811):-strategic(1893),strategic(826),strategic(1724),strategic(726).
strategic(1811):-strategic(1868),strategic(1751),strategic(1399),strategic(45).
strategic(1788):-strategic(1724),strategic(504),strategic(113),strategic(1811).
strategic(1788):-strategic(1291),strategic(2274),strategic(253),strategic(129).
strategic(1788):-strategic(113),strategic(1168),strategic(572),strategic(1606).
strategic(1757):-strategic(2148),strategic(68),strategic(1516),strategic(1856).
strategic(1757):-strategic(1868),strategic(968),strategic(504),strategic(1343).
strategic(1757):-strategic(1856),strategic(1291),strategic(1751),strategic(118).
strategic(1751):-strategic(68),strategic(166),strategic(55),strategic(1560).
strategic(1736):-strategic(301),strategic(559),strategic(1811),strategic(1577).
strategic(1736):-strategic(1751),strategic(2165),strategic(968),strategic(118).
strategic(166):-strategic(1361),strategic(1343),strategic(1724),strategic(783).
strategic(166):-strategic(1240),strategic(2165),strategic(266),strategic(1168).
strategic(1649):-strategic(783),strategic(1788),strategic(1751),strategic(451).
strategic(1649):-strategic(68),strategic(1560),strategic(118),strategic(451).
strategic(1649):-strategic(45),strategic(1399),strategic(504),strategic(253).
strategic(1606):-strategic(999),strategic(1893),strategic(559),strategic(2148).
strategic(1606):-strategic(729),strategic(1168),strategic(1893),strategic(412).
strategic(1577):-strategic(1736),strategic(1240),strategic(301),strategic(45).
strategic(1560):-strategic(412),strategic(301),strategic(1724),strategic(1606).
strategic(1560):-strategic(301),strategic(1560),strategic(729),strategic(1343).
strategic(1560):-strategic(2148),strategic(166),strategic(2148),strategic(1577).
strategic(1560):-strategic(1606),strategic(253),strategic(1518),strategic(45).
strategic(1448):-strategic(1788),strategic(1868),strategic(1606),strategic(2165).
strategic(1399):-strategic(968),strategic(266),strategic(1736),strategic(1518).
strategic(1399):-strategic(451),strategic(266),strategic(1868),strategic(1811).
strategic(1367):-strategic(1168),strategic(1264),strategic(1518),strategic(2148).
strategic(1361):-strategic(729),strategic(999),strategic(1516),strategic(1811).
strategic(1361):-strategic(118),strategic(1606),strategic(1856),strategic(1868).
strategic(1343):-strategic(999),strategic(1168),strategic(1649),strategic(2165).
strategic(1343):-strategic(62),strategic(1893),strategic(1516),strategic(2165).
strategic(1343):-strategic(1649),strategic(2165),strategic(1291),strategic(729).
strategic(129):-strategic(2148),strategic(2274),strategic(1893),strategic(1834).
strategic(1240):-strategic(113),strategic(1361),strategic(1264),strategic(55).
strategic(113):-strategic(1361),strategic(1649),strategic(1168),strategic(62).
strategic(1044):-strategic(1240),strategic(504),strategic(1736),strategic(1516).
strategic(999):-strategic(726),strategic(1399),strategic(953),strategic(1788).
strategic(999):-strategic(559),strategic(1649),strategic(1044),strategic(1788).
strategic(999):-strategic(55),strategic(1560),strategic(369),strategic(605).
strategic(999):-strategic(266),strategic(783),strategic(1588),strategic(657).
strategic(999):-strategic(1893),strategic(1588),strategic(754),strategic(369).
strategic(992):-strategic(826),strategic(1518),strategic(605),strategic(1516).
strategic(992):-strategic(2274),strategic(1986),strategic(729),strategic(1361).
strategic(992):-strategic(1648),strategic(559),strategic(2113),strategic(55).
strategic(992):-strategic(1168),strategic(1518),strategic(592),strategic(1367).
strategic(968):-strategic(1856),strategic(1448),strategic(663),strategic(1367).
strategic(968):-strategic(129),strategic(113),strategic(1588),strategic(451).
strategic(953):-strategic(45),strategic(1893),strategic(45),strategic(992).
strategic(947):-strategic(1788),strategic(1649),strategic(1044),strategic(968).
strategic(947):-strategic(1560),strategic(1240),strategic(1868),strategic(2113).
strategic(947):-strategic(1367),strategic(754),strategic(1856),strategic(592).
strategic(826):-strategic(62),strategic(605),strategic(1044),strategic(2274).
strategic(783):-strategic(1868),strategic(1264),strategic(663),strategic(504).
strategic(754):-strategic(947),strategic(1852),strategic(947),strategic(1606).
strategic(754):-strategic(253),strategic(369),strategic(412),strategic(754).
strategic(729):-strategic(605),strategic(783),strategic(1367),strategic(610).
strategic(729):-strategic(166),strategic(605),strategic(1291),strategic(1893).
strategic(729):-strategic(1577),strategic(605),strategic(1240),strategic(1560).
strategic(726):-strategic(451),strategic(968),strategic(1361),strategic(1986).
strategic(726):-strategic(1588),strategic(253),strategic(301),strategic(1264).
strategic(723):-strategic(1986),strategic(1751),strategic(783),strategic(1516).
strategic(723):-strategic(1588),strategic(2148),strategic(1834),strategic(1893).
strategic(68):-strategic(1518),strategic(412),strategic(129),strategic(953).
strategic(68):-strategic(1448),strategic(1044),strategic(1361),strategic(45).
strategic(663):-strategic(266),strategic(412),strategic(953),strategic(1834).
strategic(663):-strategic(1751),strategic(1986),strategic(592),strategic(1516).
strategic(657):-strategic(947),strategic(1518),strategic(1834),strategic(1852).
strategic(657):-strategic(726),strategic(2113),strategic(266),strategic(118).
strategic(657):-strategic(723),strategic(1893),strategic(1757),strategic(1856).
strategic(610):-strategic(1757),strategic(1834),strategic(1724),strategic(369).
strategic(610):-strategic(1240),strategic(663),strategic(1044),strategic(610).
strategic(605):-strategic(45),strategic(1757),strategic(1893),strategic(610).
strategic(592):-strategic(953),strategic(1361),strategic(2274),strategic(999).
strategic(592):-strategic(451),strategic(129),strategic(1986),strategic(1448).
strategic(1649):-strategic(1264),strategic(657),strategic(1649),strategic(2165).
strategic(1648):-strategic(45),strategic(1516),strategic(2148),strategic(1757).
strategic(1648):-strategic(369),strategic(1868),strategic(1811),strategic(968).
strategic(1648):-strategic(1856),strategic(663),strategic(1649),strategic(1044).
strategic(1648):-strategic(1648),strategic(55),strategic(754),strategic(947).
strategic(1606):-strategic(968),strategic(1856),strategic(657),strategic(118).
strategic(1606):-strategic(2165),strategic(947),strategic(166),strategic(947).
strategic(1588):-strategic(992),strategic(1343),strategic(118),strategic(118).
strategic(1588):-strategic(657),strategic(2113),strategic(62),strategic(266).
strategic(1588):-strategic(1788),strategic(1788),strategic(610),strategic(1852).
strategic(1588):-strategic(1736),strategic(999),strategic(451),strategic(1852).
strategic(1577):-strategic(947),strategic(1606),strategic(1291),strategic(113).
strategic(1577):-strategic(2148),strategic(369),strategic(1986),strategic(572).
strategic(1577):-strategic(1588),strategic(1448),strategic(1577),strategic(657).
strategic(1577):-strategic(1560),strategic(2165),strategic(1852),strategic(572).
strategic(1577):-strategic(1367),strategic(1361),strategic(1448),strategic(1751).
strategic(1560):-strategic(1757),strategic(1868),strategic(62),strategic(1516).
strategic(1560):-strategic(1751),strategic(1811),strategic(992),strategic(266).
strategic(1560):-strategic(1448),strategic(2274),strategic(1399),strategic(1367).
strategic(1516):-strategic(968),strategic(999),strategic(1868),strategic(369).
strategic(1516):-strategic(55),strategic(1343),strategic(369),strategic(826).
strategic(1516):-strategic(266),strategic(953),strategic(1291),strategic(504).
strategic(1516):-strategic(1560),strategic(1560),strategic(266),strategic(1986).
strategic(1448):-strategic(1399),strategic(592),strategic(1168),strategic(1367).
strategic(1399):-strategic(1291),strategic(266),strategic(1757),strategic(301).
strategic(1367):-strategic(68),strategic(605),strategic(2274),strategic(253).
strategic(1367):-strategic(663),strategic(1560),strategic(953),strategic(726).
strategic(1367):-strategic(62),strategic(953),strategic(68),strategic(451).
strategic(1367):-strategic(1399),strategic(166),strategic(1856),strategic(657).
strategic(1367):-strategic(1367),strategic(1852),strategic(1518),strategic(1291).
strategic(1367):-strategic(1264),strategic(1724),strategic(723),strategic(657).
strategic(1361):-strategic(663),strategic(1367),strategic(62),strategic(129).
strategic(1361):-strategic(663),strategic(1044),strategic(1856),strategic(412).
strategic(1361):-strategic(451),strategic(1399),strategic(1757),strategic(559).
strategic(1343):-strategic(369),strategic(2274),strategic(118),strategic(166).
strategic(1343):-strategic(2274),strategic(657),strategic(1606),strategic(55).
strategic(129):-strategic(68),strategic(1560),strategic(723),strategic(1044).
strategic(129):-strategic(266),strategic(754),strategic(723),strategic(1044).
strategic(129):-strategic(1868),strategic(1724),strategic(729),strategic(1757).
strategic(1291):-strategic(592),strategic(1986),strategic(1588),strategic(1868).
strategic(129):-strategic(1448),strategic(369),strategic(451),strategic(729).
strategic(129):-strategic(1367),strategic(1788),strategic(301),strategic(2148).
strategic(129):-strategic(1240),strategic(1044),strategic(729),strategic(1367).
strategic(1264):-strategic(2274),strategic(1757),strategic(129),strategic(62).
strategic(1264):-strategic(1893),strategic(729),strategic(129),strategic(663).
strategic(1264):-strategic(1448),strategic(1648),strategic(369),strategic(1736).
strategic(1240):-strategic(947),strategic(947),strategic(663),strategic(62).
strategic(1240):-strategic(1588),strategic(451),strategic(1343),strategic(1560).
strategic(118):-strategic(2274),strategic(412),strategic(2274),strategic(723).
strategic(118):-strategic(2113),strategic(1751),strategic(55),strategic(1516).
strategic(118):-strategic(1893),strategic(301),strategic(1291),strategic(1986).
strategic(118):-strategic(1291),strategic(266),strategic(301),strategic(605).
strategic(1168):-strategic(1648),strategic(953),strategic(1648),strategic(559).
strategic(1168):-strategic(1448),strategic(657),strategic(1811),strategic(1649).
strategic(1168):-strategic(1240),strategic(2165),strategic(723),strategic(992).
strategic(1168):-strategic(118),strategic(1367),strategic(451),strategic(1361).
:- strategic(1233).
:-not strategic(1044).
:-not strategic(1577).
"""
output = """
""" | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_server_volume
short_description: Attach/Detach Volumes from OpenStack VM's
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Attach or Detach volumes from OpenStack VM's
options:
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
required: false
server:
description:
- Name or ID of server you want to attach a volume to
required: true
volume:
description:
- Name or id of volume you want to attach to a server
required: true
device:
description:
- Device you want to attach. Defaults to auto finding a device name.
availability_zone:
description:
- Ignored. Present for backwards compatibility
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Attaches a volume to a compute host
- name: attach a volume
hosts: localhost
tasks:
- name: attach volume to host
os_server_volume:
state: present
cloud: mordred
server: Mysql-server
volume: mysql-data
device: /dev/vdb
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(state, device):
"""Check if system state would change."""
if state == 'present':
if device:
return False
return True
if state == 'absent':
if device:
return True
return False
return False
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
volume=dict(required=True),
device=dict(default=None), # None == auto choose device name
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
wait = module.params['wait']
timeout = module.params['timeout']
sdk, cloud = openstack_cloud_from_module(module)
try:
server = cloud.get_server(module.params['server'])
volume = cloud.get_volume(module.params['volume'])
if not volume:
module.fail_json(msg='volume %s is not found' % module.params['volume'])
dev = cloud.get_volume_attach_device(volume, server.id)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, dev))
if state == 'present':
if dev:
# Volume is already attached to this server
module.exit_json(changed=False)
cloud.attach_volume(server, volume, module.params['device'],
wait=wait, timeout=timeout)
server = cloud.get_server(module.params['server']) # refresh
volume = cloud.get_volume(module.params['volume']) # refresh
hostvars = cloud.get_openstack_vars(server)
module.exit_json(
changed=True,
id=volume['id'],
attachments=volume['attachments'],
openstack=hostvars
)
elif state == 'absent':
if not dev:
# Volume is not attached to this server
module.exit_json(changed=False)
cloud.detach_volume(server, volume, wait=wait, timeout=timeout)
module.exit_json(
changed=True,
result='Detached volume from server'
)
except (sdk.exceptions.OpenStackCloudException, sdk.exceptions.ResourceTimeout) as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
import sys
import os
from multiprocessing import Process, Queue, Manager
from threading import Timer
from wadi_harness import Harness
from wadi_debug_win import Debugger
import time
import hashlib
def test(msg):
while True:
print 'Process 2:' + msg
#print msg
def test2():
print 'Process 1'
time.sleep(2)
while True:
print 'Process 1'
def run_harness(t):
harness = Harness(sys.argv[1],sys.argv[2],t)
harness.run()
def run_debugger(q):
debugger = Debugger(q)
debugger.run_Browser('IE')
def timeout_debug(dp):
print '[*] Terminating Debugger Process PID: %d' % dp.pid
dp.terminate()
class wadi():
def __init__(self, args=None):
if args:
self.args = args
else:
pass
def writeTestCases(self,tcases,msg):
self.msg = msg[0]
self.code = msg[1]
self.add = msg[2]
self.testcases = tcases
self.hash = hashlib.md5()
self.b = self.code+self.add
self.hash.update(self.b)
self.dgst = self.hash.hexdigest()
self.path = "./"+self.dgst
if os.path.exists(self.path):
print "[*] Duplicate Crash: %s" % self.dgst
else:
os.makedirs(self.path)
f = open(self.path + "/" +self.dgst+".crash","w+b")
f.write(self.msg)
f.close()
print "[*] Written Crash file to: %s" % self.dgst+".crash"
for i in range(10):
self.tcase = self.testcases.pop()
f2 = open(self.path+"/"+self.dgst+"_"+str(i)+".html","w+b")
f2.write(self.tcase)
f2.close()
print "[*] Written testcases to %s" % self.path+"/"+self.dgst+str(i)+".html"
print "[*] Last TestCase Folder '%s'" % self.dgst
def close(self):
sys.exit()
def run(self):
self.queue = Manager().list()
self.tcases = Manager().list()
self.server_pid = None
self.debugger_pid = None
self.init = 0
while True:
if not self.server_pid:
self.server_process = Process(target=run_harness, args=(self.tcases,))
self.server_process.start()
self.server_pid = self.server_process.pid
print '[*] Running Server Process %s ' % (self.server_pid,)
#self.server_pid =
if not self.debugger_pid:
self.debugger_process = Process(target=run_debugger,args=(self.queue,))
self.debugger_process.start()
self.debugger_pid = self.debugger_process.pid
timer = Timer(120.0,timeout_debug,(self.debugger_process,))
timer.daemon = True
timer.start()
if not self.debugger_process.is_alive():
print "[*] Debugger Process %s exited" % self.debugger_pid
timer.cancel()
self.lenq = len(self.queue)
self.lentc = len(self.tcases)
if self.lenq:
self.msg = self.queue.pop()
#self.msg = self.queue.get()
print "[*] Wooops Crash !!!!"
print "[*] %s" % self.msg[0]
else:
print "[*] No Crashes"
#if not self.tcases.empty():
if self.lentc and self.lenq:
#self.tc = self.tcases.get()
self.writeTestCases(self.tcases, self.msg)
else:
print "[*] No TestCases"
self.debugger_pid = None
else:
pass
if __name__ == '__main__':
#try:
w = wadi()
w.run()
#except:
# w.close() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2022 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.server.config.yaml
import com.charleskorn.kaml.Yaml
import com.charleskorn.kaml.YamlMap
import io.ktor.server.config.*
import kotlinx.serialization.Serializable
import kotlinx.serialization.decodeFromString
import kotlin.test.*
class YamlConfigTest {
@Test
fun testYamlApplicationConfig() {
val content = """
auth:
hashAlgorithm: SHA-256
salt: ktor
nullable: null
users:
- name: test
values:
- a
- b
listValues: ['a', 'b', 'c']
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val auth = config.config("auth")
assertEquals("ktor", auth.property("salt").getString())
val users = auth.configList("users")
assertEquals(1, users.size)
assertEquals("test", users[0].property("name").getString())
assertEquals(listOf("a", "b", "c"), auth.property("listValues").getList())
val values = auth.property("values").getList()
assertEquals("[a, b]", values.toString())
assertEquals(null, auth.propertyOrNull("missingProperty"))
assertEquals("SHA-256", auth.propertyOrNull("hashAlgorithm")?.getString())
assertEquals(listOf("a", "b", "c"), auth.propertyOrNull("listValues")?.getList())
assertEquals(null, config.propertyOrNull("missingProperty"))
assertEquals(null, config.propertyOrNull("auth.missingProperty"))
assertEquals("SHA-256", config.propertyOrNull("auth.hashAlgorithm")?.getString())
assertEquals(listOf("a", "b", "c"), config.propertyOrNull("auth.listValues")?.getList())
assertEquals(null, config.propertyOrNull("auth.nullable"))
}
@Test
fun testKeysTopLevelYamlConfig() {
val content = """
auth:
hashAlgorithm: SHA-256
salt: ktor
users:
- name: test
values:
- a
- b
listValues: ['a', 'b', 'c']
data:
value1: 1
value2: 2
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val keys = config.keys()
assertEquals(
keys,
setOf(
"auth.hashAlgorithm",
"auth.salt",
"auth.users",
"auth.values",
"auth.listValues",
"auth.data.value1",
"auth.data.value2"
)
)
}
@Test
fun testKeysNestedYamlConfig() {
val content = """
auth:
nested:
data:
value1: 1
value2: 2
list:
- a
- b
data1:
value1: 1
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val nestedConfig = config.config("auth.nested")
val keys = nestedConfig.keys()
assertEquals(setOf("data.value1", "data.value2", "list"), keys)
assertEquals("1", nestedConfig.property("data.value1").getString())
assertEquals("2", nestedConfig.property("data.value2").getString())
assertEquals(listOf("a", "b"), nestedConfig.property("list").getList())
}
@Test
fun testEnvironmentVariable() {
val content = $$"""
ktor:
variable: $PATH
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val value = config.property("ktor.variable").getString()
assertTrue(value.isNotEmpty())
assertFalse(value.contains("PATH"))
}
@Test
fun testEnvVarCurlyBraces() {
val content = $$"""
ktor:
variable: ${PATH}
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val value = config.property("ktor.variable").getString()
assertTrue(value.isNotEmpty())
assertFalse(value.contains("PATH"))
}
@Test
fun testMissingEnvironmentVariable() {
val content = $$"""
ktor:
variable: $NON_EXISTING_VARIABLE
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
assertFailsWith<ApplicationConfigurationException> {
YamlConfig.from(yaml)
}
}
@Test
fun testMissingEnvVarCurlyBraces() {
val content = $$"""
ktor:
variable: ${NON_EXISTING}
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val cause = assertFailsWith<ApplicationConfigurationException> {
YamlConfig.from(yaml)
}
assertEquals(
"Required environment variable \"NON_EXISTING\" not found and no default value is present",
cause.message
)
}
@Test
fun testSelfReference() {
val content = $$"""
value:
my: "My value"
config:
database:
value: $value.my
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertEquals("My value", config.property("config.database.value").getString())
assertEquals("My value", config.config("config").property("database.value").getString())
assertEquals("My value", config.config("config.database").property("value").getString())
}
@Test
fun testSelfRefCurlyBraces() {
val content = $$"""
value:
my: "My value"
config:
database:
value: ${value.my}
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertEquals("My value", config.property("config.database.value").getString())
assertEquals("My value", config.config("config").property("database.value").getString())
assertEquals("My value", config.config("config.database").property("value").getString())
}
@Test
fun testSelfReferenceMissing() {
val content = $$"""
value:
my: "My value"
config:
database:
value: $value.missing
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
assertFailsWith<ApplicationConfigurationException> {
YamlConfig.from(yaml)
}
}
@Test
fun testSelfRefMissingCurlyBraces() {
val content = $$"""
value:
my: "My value"
config:
database:
value: ${value.missing}
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val cause = assertFailsWith<ApplicationConfigurationException> {
YamlConfig.from(yaml)
}
assertEquals(
"Required environment variable \"value.missing\" not found and no default value is present",
cause.message
)
}
@Test
fun testMissingEnvironmentVariableWithDefault() {
val content = $$"""
ktor:
variable: "$NON_EXISTING_VARIABLE:DEFAULT_VALUE"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertEquals("DEFAULT_VALUE", config.property("ktor.variable").getString())
}
@Test
fun testMissingEnvVarWithDefaultCurlyBraces() {
val content = $$"""
ktor:
variable: "${NON_EXISTING:DEFAULT}"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertEquals("DEFAULT", config.property("ktor.variable").getString())
}
@Test
fun testOptionalMissingEnvironmentVariable() {
val content = """
ktor:
variable: "$?NON_EXISTING_VARIABLE"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertNull(config.propertyOrNull("ktor.variable"))
}
@Test
fun testOptionalMissingEnvVarCurlyBraces() {
val content = $$"""
ktor:
variable: "${?NON_EXISTING_VARIABLE}"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertNull(config.propertyOrNull("ktor.variable"))
}
@Test
fun testOptionalExistingEnvironmentVariable() {
val content = """
ktor:
variable: "$?PATH"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val value = config.property("ktor.variable").getString()
assertTrue(value.isNotEmpty())
assertFalse(value.contains("PATH"))
}
@Test
fun testOptionalExistingEnvVarCurlyBraces() {
val content = $$"""
ktor:
variable: "${?PATH}"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val value = config.property("ktor.variable").getString()
assertTrue(value.isNotEmpty())
assertFalse(value.contains("PATH"))
}
@Test
fun testExistingEnvironmentVariableWithDefault() {
val content = $$"""
ktor:
variable: "$PATH:DEFAULT_VALUE"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val value = config.property("ktor.variable").getString()
assertTrue(value.isNotEmpty())
assertFalse(value.contains("PATH"))
assertFalse(value.contains("DEFAULT_VALUE"))
}
@Test
fun testExistingEnvVarWithDefaultCurlyBraces() {
val content = $$"""
ktor:
variable: "${PATH:DEFAULT_VALUE}"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val value = config.property("ktor.variable").getString()
assertTrue(value.isNotEmpty())
assertFalse(value.contains("PATH"))
assertFalse(value.contains("DEFAULT_VALUE"))
}
@Test
fun testMalformedVarCurlyBraces() {
val content = $$"""
ktor:
variable: "${some_name"
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val cause = assertFailsWith<ApplicationConfigurationException> {
YamlConfig.from(yaml)
}
assertEquals(
"Required environment variable \"{some_name\" not found and no default value is present",
cause.message
)
}
@Test
fun testToMap() {
val content = """
hashAlgorithm: SHA-256
salt: ktor
users:
- name: test
password: asd
- name: other
password: qwe
values:
- a
- b
listValues: ['a', 'b', 'c']
data:
value1: 1
value2: 2
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val map = config.toMap()
assertEquals(6, map.size)
assertEquals("SHA-256", map["hashAlgorithm"])
assertEquals("ktor", map["salt"])
assertEquals(
listOf(mapOf("name" to "test", "password" to "asd"), mapOf("name" to "other", "password" to "qwe")),
map["users"]
)
assertEquals(listOf("a", "b"), map["values"])
assertEquals(listOf("a", "b", "c"), map["listValues"])
assertEquals(mapOf("value1" to "1", "value2" to "2"), map["data"])
}
@Test
fun readNumber() {
val content = """
number: 1234
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertEquals(1234, config.property("number").getAs())
}
@Test
fun readSerializableClass() {
val content = """
auth:
hashAlgorithm: SHA-256
salt: ktor
users:
- name: test
password: asd
- name: other
password: qwe
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
val securityConfig = config.propertyOrNull("auth")?.getAs<SecurityConfig>()
assertNotNull(securityConfig)
assertEquals("SHA-256", securityConfig.hashAlgorithm)
assertEquals("ktor", securityConfig.salt)
assertEquals(
listOf(SecurityUser("test", "asd"), SecurityUser("other", "qwe")),
securityConfig.users
)
}
@Test
fun mergedConversion() {
val authYamlText = """
app:
auth:
hashAlgorithm: SHA-256
salt: ktor
users:
- name: test
password: asd
""".trimIndent()
val dbYamlText = """
app:
database:
driver: org.postgresql.Driver
url: jdbc:postgresql://localhost:5432/ktor
user: ktor
password: ktor
schema: public
maxPoolSize: 3
transactionIsolation: TRANSACTION_REPEATABLE_READ
useServerPrepStmts: true
cachePrepStmts: true
""".trimIndent()
val mapEntries = listOf(
"app.auth.salt" to "SALT&PEPPA",
"app.deployment.port" to "8080",
"app.deployment.host" to "localhost",
"app.database.maxPoolSize" to "2",
"app.database.cachePrepStmts" to "true",
"app.database.prepStmtCacheSize" to "250",
)
val authYamlConfig = YamlConfig.from(Yaml.default.decodeFromString<YamlMap>(authYamlText))
val dbYamlConfig = YamlConfig.from(Yaml.default.decodeFromString<YamlMap>(dbYamlText))
val mapConfig = MapApplicationConfig(mapEntries)
val yamlMerged = authYamlConfig.mergeWith(dbYamlConfig)
val mergedConfig = yamlMerged.mergeWith(mapConfig)
val expectedSecurity = SecurityConfig("SHA-256", "SALT&PEPPA", listOf(SecurityUser("test", "asd")))
val expectedDeployment = DeploymentConfig("localhost", 8080)
val expectedDatabase =
DatabaseConfig(
"org.postgresql.Driver",
"jdbc:postgresql://localhost:5432/ktor",
"ktor",
"ktor",
"public",
2
)
assertEquals(
expectedSecurity,
mergedConfig.property("app.auth").getAs()
)
assertEquals(
expectedDeployment,
mergedConfig.property("app.deployment").getAs()
)
assertEquals(
expectedDatabase,
mergedConfig.property("app.database").getAs()
)
assertEquals(
AppConfig(expectedDeployment, expectedSecurity, expectedDatabase),
mergedConfig.property("app").getAs()
)
}
@Test
fun testNestedSelfReference() {
val content = $$"""
a: $b
b: $c
c: foo
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertEquals("foo", config.property("a").getString())
}
@Test
fun testDeepNestedSelfReference() {
val content = $$"""
a: $b
b: $c
c: $d
d: foo
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val config = YamlConfig.from(yaml)
assertEquals("foo", config.property("a").getString())
}
@Test
fun testSelfReferenceCycle() {
val content = $$"""
a: $b
b: $a
""".trimIndent()
val yaml = Yaml.default.decodeFromString<YamlMap>(content)
val cause = assertFailsWith<ApplicationConfigurationException> {
YamlConfig.from(yaml)
}
assertTrue(cause.message!!.contains("Cycle detected"))
}
@Serializable
data class SecurityUser(
val name: String,
val password: String
)
@Serializable
data class AppConfig(
val deployment: DeploymentConfig,
val auth: SecurityConfig,
val database: DatabaseConfig,
)
@Serializable
data class SecurityConfig(
val hashAlgorithm: String,
val salt: String,
val users: List<SecurityUser>,
)
@Serializable
data class DeploymentConfig(
val host: String,
val port: Int,
)
@Serializable
data class DatabaseConfig(
val driver: String,
val url: String,
val user: String,
val password: String,
val schema: String,
val maxPoolSize: Int,
)
} | kotlin | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-config-yaml/jvmAndPosix/test/YamlConfigTest.kt |
# -*- coding: utf-8 -*-
import logging
import os
import time
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import threading
import subprocess
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
upgrade_template = """
<!DOCTYPE HTML>
<html>
<head>
<title>Odoo's PosBox - Software Upgrade</title>
<script src="http://code.jquery.com/jquery-1.11.0.min.js"></script>
<script>
$(function(){
var upgrading = false;
$('#upgrade').click(function(){
console.log('click');
if(!upgrading){
upgrading = true;
$('#upgrade').text('Upgrading, Please Wait');
$.ajax({
url:'/hw_proxy/perform_upgrade/'
}).then(function(status){
$('#upgrade').html('Upgrade successful, restarting the posbox...');
$('#upgrade').off('click');
},function(){
$('#upgrade').text('Upgrade Failed');
});
}
});
});
</script>
<style>
body {
width: 480px;
margin: 60px auto;
font-family: sans-serif;
text-align: justify;
color: #6B6B6B;
}
.centering{
text-align: center;
}
#upgrade {
padding: 20px;
background: rgb(121, 197, 107);
color: white;
border-radius: 3px;
text-align: center;
margin: 30px;
text-decoration: none;
display: inline-block;
}
</style>
</head>
<body>
<h1>PosBox Software Upgrade</h1>
<p>
This tool will help you perform an upgrade of the PosBox's software over the
internet.
<p></p>
However the preferred method to upgrade the posbox is to flash the sd-card with
the <a href='http://nightly.odoo.com/trunk/posbox/'>latest image</a>. The upgrade
procedure is explained into to the
<a href='https://www.odoo.com/documentation/user/point_of_sale/posbox/index.html'>PosBox manual</a>
</p>
<p>
To upgrade the posbox, click on the upgrade button. The upgrade will take a few minutes. <b>Do not reboot</b> the PosBox during the upgrade.
</p>
<p>
Latest patch:
</p>
<pre>
"""
upgrade_template += subprocess.check_output("git --work-tree=/home/pi/odoo/ --git-dir=/home/pi/odoo/.git log -1", shell=True).replace("\n", "<br/>")
upgrade_template += """
</pre>
<div class='centering'>
<a href='#' id='upgrade'>Upgrade</a>
</div>
</body>
</html>
"""
class PosboxUpgrader(hw_proxy.Proxy):
def __init__(self):
super(PosboxUpgrader,self).__init__()
self.upgrading = threading.Lock()
@http.route('/hw_proxy/upgrade', type='http', auth='none', )
def upgrade(self):
return upgrade_template
@http.route('/hw_proxy/perform_upgrade', type='http', auth='none')
def perform_upgrade(self):
self.upgrading.acquire()
os.system('/home/pi/odoo/addons/point_of_sale/tools/posbox/configuration/posbox_update.sh')
self.upgrading.release()
return 'SUCCESS' | unknown | codeparrot/codeparrot-clean | ||
import argparse
import os
import sys
import textwrap
import pandas as pd
# Hack to have something similar to DISABLED_TEST. These models are flaky.
flaky_models = {
"yolov3",
"detectron2_maskrcnn_r_101_c4",
"XGLMForCausalLM", # discovered in https://github.com/pytorch/pytorch/pull/128148
"moondream", # discovered in https://github.com/pytorch/pytorch/pull/159291
# discovered in https://github.com/pytorch/pytorch/issues/161419. Its not flaky but really hard to repro, so skipping it
"mobilenetv3_large_100",
# https://github.com/pytorch/pytorch/issues/163670
"vision_maskrcnn",
}
def get_field(csv, model_name: str, field: str):
try:
return csv.loc[csv["name"] == model_name][field].item()
except Exception:
return None
def check_accuracy(actual_csv, expected_csv, expected_filename):
failed = []
improved = []
if "rocm" in expected_filename:
flaky_models.update(
{
"Background_Matting",
"mnasnet1_0",
"llava",
"repvgg_a2",
"resnet152",
"resnet18",
"resnet50",
"stable_diffusion_unet",
"torchrec_dlrm",
"shufflenet_v2_x1_0",
"vgg16",
"BERT_pytorch",
# LLM
"google/gemma-2-2b",
"tts_angular", # RuntimeError: Cannot access data pointer of Tensor
# Discovered on gfx950 CI after ROCm 7.2 upgrade, eager mode non determinism
"alexnet",
"demucs",
}
)
for model in actual_csv["name"]:
accuracy = get_field(actual_csv, model, "accuracy")
expected_accuracy = get_field(expected_csv, model, "accuracy")
if accuracy is None:
status = "MISSING_ACCURACY:"
failed.append(model)
elif expected_accuracy is None:
status = "MISSING_EXPECTED:"
failed.append(model)
elif accuracy == expected_accuracy:
status = "PASS" if expected_accuracy == "pass" else "XFAIL"
print(f"{model:34} {status}")
continue
elif model in flaky_models:
if accuracy == "pass":
# model passed but marked xfailed
status = "PASS_BUT_FLAKY:"
else:
# model failed but marked passe
status = "FAIL_BUT_FLAKY:"
elif accuracy != "pass":
status = "FAIL:"
failed.append(model)
else:
status = "IMPROVED:"
improved.append(model)
print(
f"{model:34} {status:9} accuracy={accuracy}, expected={expected_accuracy}"
)
msg = ""
if failed or improved:
if failed:
msg += textwrap.dedent(
f"""
Error: {len(failed)} models have accuracy status regressed:
{" ".join(failed)}
"""
)
if improved:
msg += textwrap.dedent(
f"""
Improvement: {len(improved)} models have accuracy status improved:
{" ".join(improved)}
"""
)
sha = os.getenv("SHA1", "{your CI commit sha}")
msg += textwrap.dedent(
f"""
If this change is expected, you can update `{expected_filename}` to reflect the new baseline.
from pytorch/pytorch root, run
`python benchmarks/dynamo/ci_expected_accuracy/update_expected.py {sha}`
and then `git add` the resulting local changes to expected CSVs to your commit.
"""
)
return failed or improved, msg
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--actual", type=str, required=True)
parser.add_argument("--expected", type=str, required=True)
args = parser.parse_args()
actual = pd.read_csv(args.actual)
expected = pd.read_csv(args.expected)
failed, msg = check_accuracy(actual, expected, args.expected)
if failed:
print(msg)
sys.exit(1)
if __name__ == "__main__":
main() | python | github | https://github.com/pytorch/pytorch | benchmarks/dynamo/check_accuracy.py |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.api_fastapi.core_api.base import BaseModel
class VersionInfo(BaseModel):
"""Version information serializer for responses."""
version: str
git_version: str | None | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/version.py |
---
applies_to:
stack: ga
serverless: ga
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/es-build-connector.html
---
# Self-managed connectors [es-build-connector]
::::{admonition} Naming history
Self-managed connectors were initially known as "connector clients". You might find this term in older documentation.
::::
Self-managed [Elastic connectors](/reference/search-connectors/index.md) are run on your own infrastructure. This means they run outside of your Elastic deployment.
## Availability and Elastic prerequisites [es-build-connector-prerequisites]
::::{note}
Self-managed connectors currently don’t support Windows. Use this [compatibility matrix](https://www.elastic.co/support/matrix#matrix_os) to check which operating systems are supported by self-managed connectors.
% Find this information under **self-managed connectors** on that page.
::::
:::::{dropdown} Expand for Elastic prerequisites information
Your Elastic deployment must include the following Elastic services:
* **Elasticsearch**
* **Kibana**
A new {{ech}} deployment or {{es-serverless}} project includes these services by default.
To run self-managed connectors, your self-deployed connector service version must match your Elasticsearch version. For example, if you’re running Elasticsearch 8.10.1, your connector service should be version 8.10.1.x. Elastic does not support deployments running mismatched versions (except during upgrades).
::::{note}
As of 8.10.0 *new* self-managed connectors no longer require the Enterprise Search service. However, if you are upgrading connectors from versions earlier than 8.9, you’ll need to run Enterprise Search once to migrate your connectors to the new format. In future releases, you may still need to run Enterprise Search for the purpose of migrations or upgrades.
Please note that Enterprise Search is not available in versions 9.0+.
::::
You must have access to Kibana and have `write` [indices privileges^](/reference/elasticsearch/security-privileges.md) for the `.elastic-connectors` index.
To use connector clients in a self-managed environment, you must deploy the [connectors service](#es-connectors-deploy-connector-service).
**Support and licensing requirements**
Depending on how you use self-managed connectors, support and licensing requirements will vary.
Refer to the following subscriptions pages for details. Find your connector of interest in the **Elastic Search** section under **Client Integrations**:
* [Elastic self-managed subscriptions page](https://www.elastic.co/subscriptions/)
* [Elastic Cloud subscriptions page](https://www.elastic.co/subscriptions/cloud)
Note the following information regarding support for self-managed connectors:
* A converted but *unmodified* managed connector is supported by Elastic.
* A converted but *customized* managed connector is *not* supported by Elastic.
:::::
## Workflow
In order to set up, configure, and run a connector you’ll be moving between your third-party service, the Elastic UI, and your terminal. At a high-level, the workflow looks like this:
1. Satisfy any data source prerequisites (e.g., create an OAuth application).
2. Create a connector.
- Use the UI. Search for "connectors" in the [global search field](docs-content://explore-analyze/find-and-organize/find-apps-and-objects.md).
- Use the API. Refer to [create connector API]({{es-apis}}/operation/operation-connector-post).
3. Deploy the connector service:
- [Option 1: Run with Docker](es-connectors-run-from-docker.md) (recommended)
- [Option 2: Run from source](es-connectors-run-from-source.md)
4. Enter data source configuration details in the UI.
## Deploy the connector service [es-connectors-deploy-connector-service]
The connector service is a Python application that you must run on your own infrastructure when using self-managed connectors. The source code is hosted in the [elastic/connectors](https://github.com/elastic/connectors) repository.
You can run the connector service from source or use Docker:
* [Run the connectors from source](/reference/search-connectors/es-connectors-run-from-source.md). Use this option if you’re comfortable working with Python and want to iterate quickly locally.
* [Run the connectors from Docker](/reference/search-connectors/es-connectors-run-from-docker.md). Use this option if you want to deploy the connectors to a server, or use a container orchestration platform.
* Refer to our [Docker Compose quickstart](/reference/search-connectors/es-connectors-docker-compose-quickstart.md) for a quick way to spin up all the required services at once.
## Tutorials [es-build-connector-example]
* Follow our [UI-based tutorial](/reference/search-connectors/es-postgresql-connector-client-tutorial.md) to learn how run the self-managed connector service and a set up a self-managed connector, **using the UI**.
* Follow our [API-based tutorial](/reference/search-connectors/api-tutorial.md) to learn how to set up a self-managed connector **using the** [**connector APIs**](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-connector).
These examples use the PostgreSQL connector but the basic process is the same for all self-managed connectors.
## E2E testing [es-build-connector-testing]
The connector framework enables you to run end-to-end (E2E) tests on your self-managed connectors, against a real data source.
To avoid tampering with a real Elasticsearch instance, E2E tests run an isolated Elasticsearch instance in Docker. Configuration values are set in your `docker-compose.yml` file. Docker Compose manages the setup of the development environment, including both the mock Elastic instance and mock data source.
E2E tests use **default** configuration values for the connector. Find instructions about testing in each connector’s documentation.
## Build or customize connectors [es-build-connector-framework]
The Elastic connector framework enables you to:
* Customize existing self-managed connectors.
* Build your own self-managed connectors.
Refer to [Build and customize connectors](/reference/search-connectors/build-customize-connectors.md) for more information. | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/search-connectors/self-managed-connectors.md |
''' Convenience class for writing cron scripts'''
# pylint: disable=R0903
# Copyright 2014 42Lines, Inc.
# Original Author: Jim Browne
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as DT
from lockfile import FileLock, LockFailed, LockTimeout
import logging
import logging.handlers
import __main__ as main
from optparse import OptionParser, make_option
import os
from random import randint
import sys
import time
# Support for RotateFileHandler in multiple processes
from multiprocessinglog import MultiProcessingLog, MultiProcessingLogStream
__version__ = '0.2.1'
class StdErrFilter(logging.Filter):
''' Discard all events below a configured level '''
def __init__(self, level=logging.WARNING, discard_all=False):
self.level = level
self.discard_all = discard_all
super(StdErrFilter, self).__init__()
def filter(self, record):
if self.discard_all:
return False
else:
return (record.levelno >= self.level)
class CronScript(object):
''' Convenience class for writing cron scripts '''
def __init__(self, args=None, options=None, usage=None,
disable_interspersed_args=False):
self.lock = None
self.start_time = None
self.end_time = None
if options is None:
options = []
if args is None:
args = sys.argv[1:]
prog = os.path.basename(main.__file__)
logfile = os.path.join('/var/log/', "%s.log" % prog)
lockfile = os.path.join('/var/lock/', "%s" % prog)
stampfile = os.path.join('/var/tmp/', "%s.success" % prog)
options.append(make_option("--debug", "-d", action="store_true",
help="Minimum log level of DEBUG"))
options.append(make_option("--quiet", "-q", action="store_true",
help="Only WARN and above to stdout"))
options.append(make_option("--nolog", action="store_true",
help="Do not log to LOGFILE"))
options.append(make_option("--logfile", type="string",
default=logfile,
help="File to log to, default %default"))
options.append(make_option("--syslog", action="store_true",
help="Log to syslog instead of a file"))
options.append(make_option("--nolock", action="store_true",
help="Do not use a lockfile"))
options.append(make_option("--lockfile", type="string",
default=lockfile,
help="Lock file, default %default"))
options.append(make_option("--nostamp", action="store_true",
help="Do not use a success stamp file"))
options.append(make_option("--stampfile", type="string",
default=stampfile,
help="Success stamp file, default %default"))
helpmsg = "Lock timeout in seconds, default %default"
options.append(make_option("--locktimeout", default=90, type="int",
help=helpmsg))
helpmsg = "Sleep a random time between 0 and N seconds before starting, default %default"
options.append(make_option("--splay", default=0, type="int",
help=helpmsg))
parser = OptionParser(option_list=options, usage=usage)
if disable_interspersed_args:
# Stop option parsing at first non-option
parser.disable_interspersed_args()
(self.options, self.args) = parser.parse_args(args)
self.logger = logging.getLogger(main.__name__)
if self.options.debug:
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
# Log to syslog
if self.options.syslog:
syslog_formatter = logging.Formatter("%s: %%(levelname)s %%(message)s" % prog)
handler = logging.handlers.SysLogHandler(
address="/dev/log",
facility=logging.handlers.SysLogHandler.LOG_LOCAL3
)
handler.setFormatter(syslog_formatter)
self.logger.addHandler(handler)
default_formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s",
"%Y-%m-%d-%H:%M:%S")
if not self.options.nolog:
# Log to file
try:
handler = MultiProcessingLog(
"%s" % (self.options.logfile),
maxBytes=(50 * 1024 * 1024),
backupCount=10)
except IOError:
sys.stderr.write("Fatal: Could not open log file: %s\n"
% self.options.logfile)
sys.exit(1)
handler.setFormatter(default_formatter)
self.logger.addHandler(handler)
# If quiet, only WARNING and above go to STDERR; otherwise all
# logging goes to stderr
handler2 = MultiProcessingLogStream(sys.stderr)
if self.options.quiet:
err_filter = StdErrFilter()
handler2.addFilter(err_filter)
handler2.setFormatter(default_formatter)
self.logger.addHandler(handler2)
self.logger.debug(self.options)
def __enter__(self):
if self.options.splay > 0:
splay = randint(0, self.options.splay)
self.logger.debug('Sleeping for %d seconds (splay=%d)' %
(splay, self.options.splay))
time.sleep(splay)
self.start_time = DT.datetime.today()
if not self.options.nolock:
self.logger.debug('Attempting to acquire lock %s (timeout %s)',
self.options.lockfile,
self.options.locktimeout)
self.lock = FileLock(self.options.lockfile)
try:
self.lock.acquire(timeout=self.options.locktimeout)
except LockFailed as e:
self.logger.error("Lock could not be acquired.")
self.logger.error(str(e))
sys.exit(1)
except LockTimeout as e:
msg = "Lock could not be acquired. Timeout exceeded."
self.logger.error(msg)
sys.exit(1)
def __exit__(self, etype, value, traceback):
self.end_time = DT.datetime.today()
self.logger.debug('Run time: %s', self.end_time - self.start_time)
if not self.options.nolock:
self.logger.debug('Attempting to release lock %s',
self.options.lockfile)
self.lock.release()
if etype is None:
if not self.options.nostamp:
open(self.options.stampfile, "w") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright 2003, 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
t = BoostBuild.Tester()
# Regression test: when staging V2 used to change suffixes on targets
# corresponding to real files.
t.write("jamfile.jam", """
import type : register ;
register A : a1 a2 a3 ;
stage a : a.a3 ;
""")
t.write("jamroot.jam", "")
t.write("a.a3", "")
t.run_build_system()
t.expect_addition("a/a.a3");
# Regression test: we should be able to specify empty suffix for derived target
# type, even if base type has non-empty suffix.
t.write("a.cpp", "")
t.write("suffixes.jam", """
import type ;
import generators ;
import common ;
type.register First : first : ;
type.register Second : "" : First ;
generators.register-standard $(__name__).second : CPP : Second ;
rule second
{
TOUCH on $(<) = [ common.file-creation-command ] ;
}
actions second
{
$(TOUCH) $(<)
}
""")
t.write("suffixes.py", """
import b2.build.type as type
import b2.build.generators as generators
import b2.tools.common as common
from b2.manager import get_manager
type.register("First", ["first"])
type.register("Second", [""], "First")
generators.register_standard("suffixes.second", ["CPP"], ["Second"])
get_manager().engine().register_action("suffixes.second",
"%s $(<)" % common.file_creation_command())
""")
t.write("jamroot.jam", """
import suffixes ;
""")
t.write("jamfile.jam", """
second a : a.cpp ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a")
t.cleanup() | unknown | codeparrot/codeparrot-clean | ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _ # noqa
import horizon
class SystemPanels(horizon.PanelGroup):
slug = "admin"
name = _("System Panel")
panels = ('overview', 'metering', 'hypervisors', 'instances', 'volumes',
'flavors', 'images', 'networks', 'routers', 'defaults', 'info')
class IdentityPanels(horizon.PanelGroup):
slug = "identity"
name = _("Identity Panel")
panels = ('domains', 'projects', 'users', 'groups', 'roles')
class Admin(horizon.Dashboard):
name = _("Admin")
slug = "admin"
panels = (SystemPanels, IdentityPanels)
default_panel = 'overview'
permissions = ('openstack.roles.admin',)
horizon.register(Admin) | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2017-2021 Glen Joseph Fernandes
(glenjofe@gmail.com)
Distributed under the Boost Software License, Version 1.0.
(http://www.boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_CORE_POINTER_TRAITS_HPP
#define BOOST_CORE_POINTER_TRAITS_HPP
#include <boost/config.hpp>
#include <boost/core/addressof.hpp>
#include <cstddef>
namespace boost {
namespace detail {
struct ptr_none { };
template<class>
struct ptr_valid {
typedef void type;
};
template<class>
struct ptr_first {
typedef ptr_none type;
};
#if !defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES)
template<template<class, class...> class T, class U, class... Args>
struct ptr_first<T<U, Args...> > {
typedef U type;
};
#else
template<template<class> class T, class U>
struct ptr_first<T<U> > {
typedef U type;
};
template<template<class, class> class T, class U1, class U2>
struct ptr_first<T<U1, U2> > {
typedef U1 type;
};
template<template<class, class, class> class T, class U1, class U2, class U3>
struct ptr_first<T<U1, U2, U3> > {
typedef U1 type;
};
#endif
template<class T, class = void>
struct ptr_element {
typedef typename ptr_first<T>::type type;
};
template<class T>
struct ptr_element<T, typename ptr_valid<typename T::element_type>::type> {
typedef typename T::element_type type;
};
template<class, class = void>
struct ptr_difference {
typedef std::ptrdiff_t type;
};
template<class T>
struct ptr_difference<T,
typename ptr_valid<typename T::difference_type>::type> {
typedef typename T::difference_type type;
};
template<class, class>
struct ptr_transform { };
#if !defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES)
template<template<class, class...> class T, class U, class... Args, class V>
struct ptr_transform<T<U, Args...>, V> {
typedef T<V, Args...> type;
};
#else
template<template<class> class T, class U, class V>
struct ptr_transform<T<U>, V> {
typedef T<V> type;
};
template<template<class, class> class T, class U1, class U2, class V>
struct ptr_transform<T<U1, U2>, V> {
typedef T<V, U2> type;
};
template<template<class, class, class> class T,
class U1, class U2, class U3, class V>
struct ptr_transform<T<U1, U2, U3>, V> {
typedef T<V, U2, U3> type;
};
#endif
template<class T, class U, class = void>
struct ptr_rebind
: ptr_transform<T, U> { };
#if !defined(BOOST_NO_CXX11_TEMPLATE_ALIASES)
template<class T, class U>
struct ptr_rebind<T, U,
typename ptr_valid<typename T::template rebind<U> >::type> {
typedef typename T::template rebind<U> type;
};
#else
template<class T, class U>
struct ptr_rebind<T, U,
typename ptr_valid<typename T::template rebind<U>::other>::type> {
typedef typename T::template rebind<U>::other type;
};
#endif
#if !defined(BOOST_NO_CXX11_DECLTYPE_N3276)
template<class T, class E>
class ptr_to_expr {
template<class>
struct result {
char x, y;
};
static E& source();
template<class O>
static auto check(int) -> result<decltype(O::pointer_to(source()))>;
template<class>
static char check(long);
public:
BOOST_STATIC_CONSTEXPR bool value = sizeof(check<T>(0)) > 1;
};
template<class T, class E>
struct ptr_to_expr<T*, E> {
BOOST_STATIC_CONSTEXPR bool value = true;
};
template<class T, class E>
struct ptr_has_to {
BOOST_STATIC_CONSTEXPR bool value = ptr_to_expr<T, E>::value;
};
#else
template<class, class>
struct ptr_has_to {
BOOST_STATIC_CONSTEXPR bool value = true;
};
#endif
template<class T>
struct ptr_has_to<T, void> {
BOOST_STATIC_CONSTEXPR bool value = false;
};
template<class T>
struct ptr_has_to<T, const void> {
BOOST_STATIC_CONSTEXPR bool value = false;
};
template<class T>
struct ptr_has_to<T, volatile void> {
BOOST_STATIC_CONSTEXPR bool value = false;
};
template<class T>
struct ptr_has_to<T, const volatile void> {
BOOST_STATIC_CONSTEXPR bool value = false;
};
template<class T, class E, bool = ptr_has_to<T, E>::value>
struct ptr_to { };
template<class T, class E>
struct ptr_to<T, E, true> {
static T pointer_to(E& v) {
return T::pointer_to(v);
}
};
template<class T>
struct ptr_to<T*, T, true> {
static T* pointer_to(T& v) BOOST_NOEXCEPT {
return boost::addressof(v);
}
};
template<class T, class E>
struct ptr_traits
: ptr_to<T, E> {
typedef T pointer;
typedef E element_type;
typedef typename ptr_difference<T>::type difference_type;
template<class U>
struct rebind_to
: ptr_rebind<T, U> { };
#if !defined(BOOST_NO_CXX11_TEMPLATE_ALIASES)
template<class U>
using rebind = typename rebind_to<U>::type;
#endif
};
template<class T>
struct ptr_traits<T, ptr_none> { };
} /* detail */
template<class T>
struct pointer_traits
: detail::ptr_traits<T, typename detail::ptr_element<T>::type> { };
template<class T>
struct pointer_traits<T*>
: detail::ptr_to<T*, T> {
typedef T* pointer;
typedef T element_type;
typedef std::ptrdiff_t difference_type;
template<class U>
struct rebind_to {
typedef U* type;
};
#if !defined(BOOST_NO_CXX11_TEMPLATE_ALIASES)
template<class U>
using rebind = typename rebind_to<U>::type;
#endif
};
template<class T>
BOOST_CONSTEXPR inline T*
to_address(T* v) BOOST_NOEXCEPT
{
return v;
}
#if !defined(BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION)
namespace detail {
template<class T>
inline T*
ptr_address(T* v, int) BOOST_NOEXCEPT
{
return v;
}
template<class T>
inline auto
ptr_address(const T& v, int) BOOST_NOEXCEPT
-> decltype(boost::pointer_traits<T>::to_address(v))
{
return boost::pointer_traits<T>::to_address(v);
}
template<class T>
inline auto
ptr_address(const T& v, long) BOOST_NOEXCEPT
{
return boost::detail::ptr_address(v.operator->(), 0);
}
} /* detail */
template<class T>
inline auto
to_address(const T& v) BOOST_NOEXCEPT
{
return boost::detail::ptr_address(v, 0);
}
#else
template<class T>
inline typename pointer_traits<T>::element_type*
to_address(const T& v) BOOST_NOEXCEPT
{
return boost::to_address(v.operator->());
}
#endif
} /* boost */
#endif | unknown | github | https://github.com/mysql/mysql-server | extra/boost/boost_1_87_0/boost/core/pointer_traits.hpp |
"""
LUFA Library
Copyright (C) Dean Camera, 2017.
dean [at] fourwalledcubicle [dot] com
www.lufa-lib.org
"""
"""
Front-end programmer for the LUFA HID class bootloader.
Usage:
python hid_bootloader_loader.py <Device> <Input>.hex
Example:
python hid_bootloader_loader.py at90usb1287 Mouse.hex
Requires the pywinusb (https://pypi.python.org/pypi/pywinusb/) and
IntelHex (https://pypi.python.org/pypi/IntelHex/) libraries.
"""
import sys
from pywinusb import hid
from intelhex import IntelHex
# Device information table
device_info_map = dict()
device_info_map['at90usb1287'] = {'page_size': 256, 'flash_kb': 128}
device_info_map['at90usb1286'] = {'page_size': 256, 'flash_kb': 128}
device_info_map['at90usb647'] = {'page_size': 256, 'flash_kb': 64}
device_info_map['at90usb646'] = {'page_size': 256, 'flash_kb': 64}
device_info_map['atmega32u4'] = {'page_size': 128, 'flash_kb': 32}
device_info_map['atmega32u2'] = {'page_size': 128, 'flash_kb': 32}
device_info_map['atmega16u4'] = {'page_size': 128, 'flash_kb': 16}
device_info_map['atmega16u2'] = {'page_size': 128, 'flash_kb': 16}
device_info_map['at90usb162'] = {'page_size': 128, 'flash_kb': 16}
device_info_map['atmega8u2'] = {'page_size': 128, 'flash_kb': 8}
device_info_map['at90usb82'] = {'page_size': 128, 'flash_kb': 8}
def get_hid_device_handle():
hid_device_filter = hid.HidDeviceFilter(vendor_id=0x03EB,
product_id=0x2067)
valid_hid_devices = hid_device_filter.get_devices()
if len(valid_hid_devices) is 0:
return None
else:
return valid_hid_devices[0]
def send_page_data(hid_device, address, data):
# Bootloader page data should be the HID Report ID (always zero) followed
# by the starting address to program, then one device's flash page worth
# of data
output_report_data = [0]
output_report_data.extend([address & 0xFF, address >> 8])
output_report_data.extend(data)
hid_device.send_output_report(output_report_data)
def program_device(hex_data, device_info):
hid_device = get_hid_device_handle()
if hid_device is None:
print("No valid HID device found.")
sys.exit(1)
try:
hid_device.open()
print("Connected to bootloader.")
# Program in all data from the loaded HEX file, in a number of device
# page sized chunks
for addr in range(0, hex_data.maxaddr(), device_info['page_size']):
# Compute the address range of the current page in the device
current_page_range = range(addr, addr+device_info['page_size'])
# Extract the data from the hex file at the specified start page
# address and convert it to a regular list of bytes
page_data = [hex_data[i] for i in current_page_range]
print("Writing address 0x%04X-0x%04X" % (current_page_range[0], current_page_range[-1]))
# Devices with more than 64KB of flash should shift down the page
# address so that it is 16-bit (page size is guaranteed to be
# >= 256 bytes so no non-zero address bits are discarded)
if device_info['flash_kb'] < 64:
send_page_data(hid_device, addr, page_data)
else:
send_page_data(hid_device, addr >> 8, page_data)
# Once programming is complete, start the application via a dummy page
# program to the page address 0xFFFF
print("Programming complete, starting application.")
send_page_data(hid_device, 0xFFFF, [0] * device_info['page_size'])
finally:
hid_device.close()
if __name__ == '__main__':
# Load the specified HEX file
try:
hex_data = IntelHex(sys.argv[2])
except:
print("Could not open the specified HEX file.")
sys.exit(1)
# Retrieve the device information entry for the specified device
try:
device_info = device_info_map[sys.argv[1]]
except:
print("Unknown device name specified.")
sys.exit(1)
program_device(hex_data, device_info) | unknown | codeparrot/codeparrot-clean | ||
from shared import RequestHandler, cross_origin
import json #hm
class GraphQLEndpoint(RequestHandler):
def get(self):
"""
Renders the GraphiQL IDE, populated with a query if it exists
"""
query = self.request.GET.get('query')
# this does shit all, but might be useful later
# res = urllib.unquote(query).decode()
template = ninja.get_template('graphiql.html')
template_values = {
'query': query
}
# hack for un-quoting double quotes like these "
output = template.render(template_values).replace('"', '"')
self.response.write(output)
@cross_origin
def post(self):
"""
Accepts a query, executes it, and returns the result
"""
data = json.loads(self.request.body)
query = data.get('query', '')
variables = data.get('variables')
result = schema.execute(query, variable_values=variables)
response = {'data' : result.data}
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.out.write(json.dumps(response)) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
from .common import InfoExtractor
class ComedyCentralIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(video-clips|episodes|cc-studios|video-collections|shows(?=/[^/]+/(?!full-episodes)))
/(?P<title>.*)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TESTS = [{
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
'info_dict': {
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
'ext': 'mp4',
'title': 'CC:Stand-Up|August 18, 2013|1|0101|Uncensored - Too Good of a Mother',
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
'timestamp': 1376798400,
'upload_date': '20130818',
},
}, {
'url': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/interviews/6yx39d/exclusive-rand-paul-extended-interview',
'only_matching': True,
}]
class ComedyCentralFullEpisodesIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
(?:full-episodes|shows(?=/[^/]+/full-episodes))
/(?P<id>[^?]+)'''
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
_TESTS = [{
'url': 'http://www.cc.com/full-episodes/pv391a/the-daily-show-with-trevor-noah-november-28--2016---ryan-speedo-green-season-22-ep-22028',
'info_dict': {
'description': 'Donald Trump is accused of exploiting his president-elect status for personal gain, Cuban leader Fidel Castro dies, and Ryan Speedo Green discusses "Sing for Your Life."',
'title': 'November 28, 2016 - Ryan Speedo Green',
},
'playlist_count': 4,
}, {
'url': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
mgid = self._extract_triforce_mgid(webpage, data_zone='t2_lc_promo1')
videos_info = self._get_videos_info(mgid)
return videos_info
class ToshIE(MTVServicesInfoExtractor):
IE_DESC = 'Tosh.0'
_VALID_URL = r'^https?://tosh\.cc\.com/video-(?:clips|collections)/[^/]+/(?P<videotitle>[^/?#]+)'
_FEED_URL = 'http://tosh.cc.com/feeds/mrss'
_TESTS = [{
'url': 'http://tosh.cc.com/video-clips/68g93d/twitter-users-share-summer-plans',
'info_dict': {
'description': 'Tosh asked fans to share their summer plans.',
'title': 'Twitter Users Share Summer Plans',
},
'playlist': [{
'md5': 'f269e88114c1805bb6d7653fecea9e06',
'info_dict': {
'id': '90498ec2-ed00-11e0-aca6-0026b9414f30',
'ext': 'mp4',
'title': 'Tosh.0|June 9, 2077|2|211|Twitter Users Share Summer Plans',
'description': 'Tosh asked fans to share their summer plans.',
'thumbnail': r're:^https?://.*\.jpg',
# It's really reported to be published on year 2077
'upload_date': '20770610',
'timestamp': 3390510600,
'subtitles': {
'en': 'mincount:3',
},
},
}]
}, {
'url': 'http://tosh.cc.com/video-collections/x2iz7k/just-plain-foul/m5q4fp',
'only_matching': True,
}]
class ComedyCentralTVIE(MTVServicesInfoExtractor):
_VALID_URL = r'https?://(?:www\.)?comedycentral\.tv/(?:staffeln|shows)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.comedycentral.tv/staffeln/7436-the-mindy-project-staffel-4',
'info_dict': {
'id': 'local_playlist-f99b626bdfe13568579a',
'ext': 'flv',
'title': 'Episode_the-mindy-project_shows_season-4_episode-3_full-episode_part1',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.comedycentral.tv/shows/1074-workaholics',
'only_matching': True,
}, {
'url': 'http://www.comedycentral.tv/shows/1727-the-mindy-project/bonus',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mrss_url = self._search_regex(
r'data-mrss=(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage, 'mrss url', group='url')
return self._get_videos_info_from_url(mrss_url, video_id)
class ComedyCentralShortnameIE(InfoExtractor):
_VALID_URL = r'^:(?P<id>tds|thedailyshow)$'
_TESTS = [{
'url': ':tds',
'only_matching': True,
}, {
'url': ':thedailyshow',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
shortcut_map = {
'tds': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
'thedailyshow': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/full-episodes',
}
return self.url_result(shortcut_map[video_id]) | unknown | codeparrot/codeparrot-clean | ||
"""SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 2013/03/03 09:48:35 garyo"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing.testers;
import static com.google.common.collect.testing.features.CollectionFeature.SUPPORTS_REMOVE;
import static com.google.common.collect.testing.features.CollectionSize.ONE;
import static com.google.common.collect.testing.features.CollectionSize.ZERO;
import com.google.common.annotations.GwtCompatible;
import com.google.common.collect.testing.features.CollectionFeature;
import com.google.common.collect.testing.features.CollectionSize;
import org.junit.Ignore;
/**
* A generic JUnit test which tests {@code remove(Object)} operations on a list. Can't be invoked
* directly; please see {@link com.google.common.collect.testing.ListTestSuiteBuilder}.
*
* @author George van den Driessche
*/
@GwtCompatible
@Ignore("test runners must not instantiate and run this directly, only via suites we build")
// @Ignore affects the Android test runner, which respects JUnit 4 annotations on JUnit 3 tests.
@SuppressWarnings("JUnit4ClassUsedInJUnit3")
public class ListRemoveTester<E> extends AbstractListTester<E> {
@CollectionFeature.Require(SUPPORTS_REMOVE)
@CollectionSize.Require(absent = {ZERO, ONE})
public void testRemove_duplicate() {
ArrayWithDuplicate<E> arrayAndDuplicate = createArrayWithDuplicateElement();
collection = getSubjectGenerator().create(arrayAndDuplicate.elements);
E duplicate = arrayAndDuplicate.duplicate;
int firstIndex = getList().indexOf(duplicate);
int initialSize = getList().size();
assertTrue("remove(present) should return true", getList().remove(duplicate));
assertTrue(
"After remove(duplicate), a list should still contain the duplicate element",
getList().contains(duplicate));
assertFalse(
"remove(duplicate) should remove the first instance of the "
+ "duplicate element in the list",
firstIndex == getList().indexOf(duplicate));
assertEquals(
"remove(present) should decrease the size of a list by one.",
initialSize - 1,
getList().size());
}
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/collect/testing/testers/ListRemoveTester.java |
# -*- coding: utf-8 -*-
# Import Python Libs
from __future__ import absolute_import
import multiprocessing
import ctypes
import logging
import os
import hashlib
import shutil
import binascii
# Import Salt Libs
import salt.crypt
import salt.payload
import salt.master
import salt.utils.event
from salt.utils.cache import CacheCli
# Import Third Party Libs
import tornado.gen
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
log = logging.getLogger(__name__)
# TODO: rename
class AESPubClientMixin(object):
def _verify_master_signature(self, payload):
if payload.get('sig') and self.opts.get('sign_pub_messages'):
# Verify that the signature is valid
master_pubkey_path = os.path.join(self.opts['pki_dir'], 'minion_master.pub')
if not salt.crypt.verify_signature(master_pubkey_path, payload['load'], payload.get('sig')):
raise salt.crypt.AuthenticationError('Message signature failed to validate.')
@tornado.gen.coroutine
def _decode_payload(self, payload):
# we need to decrypt it
log.trace('Decoding payload: {0}'.format(payload))
if payload['enc'] == 'aes':
self._verify_master_signature(payload)
try:
payload['load'] = self.auth.crypticle.loads(payload['load'])
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
payload['load'] = self.auth.crypticle.loads(payload['load'])
raise tornado.gen.Return(payload)
# TODO: rename?
class AESReqServerMixin(object):
'''
Mixin to house all of the master-side auth crypto
'''
def pre_fork(self, _):
'''
Pre-fork we need to create the zmq router device
'''
salt.master.SMaster.secrets['aes'] = {'secret': multiprocessing.Array(ctypes.c_char,
salt.crypt.Crypticle.generate_key_string()),
'reload': salt.crypt.Crypticle.generate_key_string,
}
def post_fork(self, _, __):
self.serial = salt.payload.Serial(self.opts)
self.crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
# other things needed for _auth
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.auto_key = salt.daemons.masterapi.AutoKey(self.opts)
# only create a con_cache-client if the con_cache is active
if self.opts['con_cache']:
self.cache_cli = CacheCli(self.opts)
else:
self.cache_cli = False
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(self.opts)
self.master_key = salt.crypt.MasterKeys(self.opts)
def _encrypt_private(self, ret, dictkey, target):
'''
The server equivalent of ReqChannel.crypted_transfer_decode_dictentry
'''
# encrypt with a specific AES key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
target)
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
with salt.utils.fopen(pubfn) as f:
pub = RSA.importKey(f.read())
except (ValueError, IndexError, TypeError):
return self.crypticle.dumps({})
pret = {}
cipher = PKCS1_OAEP.new(pub)
pret['key'] = cipher.encrypt(key)
pret[dictkey] = pcrypt.dumps(
ret if ret is not False else {}
)
return pret
def _update_aes(self):
'''
Check to see if a fresh AES key is available and update the components
of the worker
'''
if salt.master.SMaster.secrets['aes']['secret'].value != self.crypticle.key_string:
self.crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
return True
return False
def _decode_payload(self, payload):
# we need to decrypt it
if payload['enc'] == 'aes':
try:
payload['load'] = self.crypticle.loads(payload['load'])
except salt.crypt.AuthenticationError:
if not self._update_aes():
raise
payload['load'] = self.crypticle.loads(payload['load'])
return payload
def _auth(self, load):
'''
Authenticate the client, use the sent public key to encrypt the AES key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
# Verify that the key we are receiving matches the stored key
# Store the key if it is not there
# Make an RSA key with the pub key
# Encrypt the AES key as an encrypted salt.payload
# Package the return and return it
'''
if not salt.utils.verify.valid_id(self.opts, load['id']):
log.info(
'Authentication request from invalid id {id}'.format(**load)
)
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication request from {id}'.format(**load))
# 0 is default which should be 'unlimited'
if self.opts['max_minions'] > 0:
# use the ConCache if enabled, else use the minion utils
if self.cache_cli:
minions = self.cache_cli.get_cached()
else:
minions = self.ckminions.connected_ids()
if len(minions) > 1000:
log.info('With large numbers of minions it is advised '
'to enable the ConCache with \'con_cache: True\' '
'in the masters configuration file.')
if not len(minions) <= self.opts['max_minions']:
# we reject new minions, minions that are already
# connected must be allowed for the mine, highstate, etc.
if load['id'] not in minions:
msg = ('Too many minions connected (max_minions={0}). '
'Rejecting connection from id '
'{1}'.format(self.opts['max_minions'],
load['id']))
log.info(msg)
eload = {'result': False,
'act': 'full',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': 'full'}}
# Check if key is configured to be auto-rejected/signed
auto_reject = self.auto_key.check_autoreject(load['id'])
auto_sign = self.auto_key.check_autosign(load['id'])
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
pubfn_denied = os.path.join(self.opts['pki_dir'],
'minions_denied',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
pass
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for {0}. Key is present in '
'rejection key dir.'.format(load['id']))
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
elif os.path.isfile(pubfn):
# The key has been accepted, check it
if salt.utils.fopen(pubfn, 'r').read().strip() != load['pub'].strip():
log.error(
'Authentication attempt from {id} failed, the public '
'keys did not match. This may be an attempt to compromise '
'the Salt cluster.'.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
elif not os.path.isfile(pubfn_pend):
# The key has not been accepted, this is a new minion
if os.path.isdir(pubfn_pend):
# The key path is a directory, error out
log.info(
'New public key {id} is a directory'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
if auto_reject:
key_path = pubfn_rejected
log.info('New public key for {id} rejected via autoreject_file'
.format(**load))
key_act = 'reject'
key_result = False
elif not auto_sign:
key_path = pubfn_pend
log.info('New public key for {id} placed in pending'
.format(**load))
key_act = 'pend'
key_result = True
else:
# The key is being automatically accepted, don't do anything
# here and let the auto accept logic below handle it.
key_path = None
if key_path is not None:
# Write the key to the appropriate location
with salt.utils.fopen(key_path, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear',
'load': {'ret': key_result}}
eload = {'result': key_result,
'act': key_act,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return ret
elif os.path.isfile(pubfn_pend):
# This key is in the pending dir and is awaiting acceptance
if auto_reject:
# We don't care if the keys match, this minion is being
# auto-rejected. Move the key file from the pending dir to the
# rejected dir.
try:
shutil.move(pubfn_pend, pubfn_rejected)
except (IOError, OSError):
pass
log.info('Pending public key for {id} rejected via '
'autoreject_file'.format(**load))
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'act': 'reject',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return ret
elif not auto_sign:
# This key is in the pending dir and is not being auto-signed.
# Check if the keys are the same and error out if this is the
# case. Otherwise log the fact that the minion is still
# pending.
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'key in pending did not match. This may be an '
'attempt to compromise the Salt cluster.'
.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
log.info(
'Authentication failed from host {id}, the key is in '
'pending and needs to be accepted with salt-key '
'-a {id}'.format(**load)
)
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': True}}
else:
# This key is in pending and has been configured to be
# auto-signed. Check to see if it is the same key, and if
# so, pass on doing anything here, and let it get automatically
# accepted below.
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an '
'attempt to compromise the Salt cluster.'
.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
pass
else:
# Something happened that I have not accounted for, FAIL!
log.warn('Unaccounted for authentication failure')
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
# only write to disk if you are adding the file, and in open mode,
# which implies we accept any key from a minion.
if not os.path.isfile(pubfn) and not self.opts['open_mode']:
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
elif self.opts['open_mode']:
disk_key = ''
if os.path.isfile(pubfn):
with salt.utils.fopen(pubfn, 'r') as fp_:
disk_key = fp_.read()
if load['pub'] and load['pub'] != disk_key:
log.debug('Host key change detected in open mode.')
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
# the con_cache is enabled, send the minion id to the cache
if self.cache_cli:
self.cache_cli.put_cache([load['id']])
# The key payload may sometimes be corrupt when using auto-accept
# and an empty request comes in
try:
with salt.utils.fopen(pubfn) as f:
pub = RSA.importKey(f.read())
except (ValueError, IndexError, TypeError) as err:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, err))
return {'enc': 'clear',
'load': {'ret': False}}
cipher = PKCS1_OAEP.new(pub)
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port']}
# sign the masters pubkey (if enabled) before it is
# send to the minion that was just authenticated
if self.opts['master_sign_pubkey']:
# append the pre-computed signature to the auth-reply
if self.master_key.pubkey_signature():
log.debug('Adding pubkey signature to auth-reply')
log.debug(self.master_key.pubkey_signature())
ret.update({'pub_sig': self.master_key.pubkey_signature()})
else:
# the master has its own signing-keypair, compute the master.pub's
# signature and append that to the auth-reply
log.debug("Signing master public key before sending")
pub_sign = salt.crypt.sign_message(self.master_key.get_sign_paths()[1],
ret['pub_key'])
ret.update({'pub_sig': binascii.b2a_base64(pub_sign)})
mcipher = PKCS1_OAEP.new(self.master_key.key)
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:
mtoken = mcipher.decrypt(load['token'])
aes = '{0}_|-{1}'.format(salt.master.SMaster.secrets['aes']['secret'].value, mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
else:
aes = salt.master.SMaster.secrets['aes']['secret'].value
ret['aes'] = cipher.encrypt(aes)
else:
if 'token' in load:
try:
mtoken = mcipher.decrypt(load['token'])
ret['token'] = cipher.encrypt(mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
aes = salt.master.SMaster.secrets['aes']['secret'].value
ret['aes'] = cipher.encrypt(salt.master.SMaster.secrets['aes']['secret'].value)
# Be aggressive about the signature
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = salt.crypt.private_encrypt(self.master_key.key, digest)
eload = {'result': True,
'act': 'accept',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, salt.utils.event.tagify(prefix='auth'))
return ret | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env bash
# Copyright 2023 The Cockroach Authors.
#
# Use of this software is governed by the CockroachDB Software License
# included in the /LICENSE file.
BASE_SHA="$1"
HEAD_SHA="$2"
if [ -z "$HEAD_SHA" ];then
echo "Usage: $0 <base-sha> <head-sha>"
exit 1
fi
git diff --name-only "${BASE_SHA}..${HEAD_SHA}" -- "pkg/**/*.go" ":!*/testdata/*" ":!pkg/acceptance/compose/gss/psql/**" \
| xargs -rn1 dirname \
| sort -u \
| { while read path; do if ls "$path"/*.go &>/dev/null; then echo -n "$path "; fi; done; } | unknown | github | https://github.com/cockroachdb/cockroach | build/ghactions/changed-go-pkgs.sh |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.glue import AwsGlueJobHook
from airflow.sensors.base import BaseSensorOperator
class AwsGlueJobSensor(BaseSensorOperator):
"""
Waits for an AWS Glue Job to reach any of the status below
'FAILED', 'STOPPED', 'SUCCEEDED'
:param job_name: The AWS Glue Job unique name
:type job_name: str
:param run_id: The AWS Glue current running job identifier
:type run_id: str
"""
template_fields = ('job_name', 'run_id')
def __init__(self, *, job_name: str, run_id: str, aws_conn_id: str = 'aws_default', **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
self.run_id = run_id
self.aws_conn_id = aws_conn_id
self.success_states = ['SUCCEEDED']
self.errored_states = ['FAILED', 'STOPPED', 'TIMEOUT']
def poke(self, context):
hook = AwsGlueJobHook(aws_conn_id=self.aws_conn_id)
self.log.info("Poking for job run status :for Glue Job %s and ID %s", self.job_name, self.run_id)
job_state = hook.get_job_state(job_name=self.job_name, run_id=self.run_id)
if job_state in self.success_states:
self.log.info("Exiting Job %s Run State: %s", self.run_id, job_state)
return True
elif job_state in self.errored_states:
job_error_message = "Exiting Job " + self.run_id + " Run State: " + job_state
raise AirflowException(job_error_message)
else:
return False | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import pytz
import pytest
def test_utcts_fail():
from pynunzen.helpers import utcts
dt = datetime.datetime(2017, 1, 1, 12, 0, 0)
mytz = pytz.timezone('Europe/Amsterdam')
dt = mytz.normalize(mytz.localize(dt, is_dst=True))
with pytest.raises(ValueError):
utcts(dt)
def test_utcts():
from pynunzen.helpers import utcts
dt = datetime.datetime(2017, 1, 1, 12, 0, 0)
utcts = utcts(dt)
assert utcts == 1483272000
def test_double_hash256():
from pynunzen.helpers import double_sha256
value = "Foobar"
hashed = double_sha256(value)
assert hashed == "e501c5d9636166687cb24409f3a45684cf3722bf1f18bf485acd4f64635a09f0"
def test_double_hash256_None():
from pynunzen.helpers import double_sha256
value = None
hashed = double_sha256(value)
assert hashed == "cd372fb85148700fa88095e3492d3f9f5beb43e555e5ff26d95f5a6adc36f8e6"
def test_double_hash256_nonstring():
from pynunzen.helpers import double_sha256
value = 21
hashed = double_sha256(value)
assert hashed == "053b22ca1fcea7a8de0da76b0f4deaef4aa9fb1100bff13965c3c0da76272862" | unknown | codeparrot/codeparrot-clean | ||
#############################################################################
##
## Copyright (c) 2015 Riverbank Computing Limited <info@riverbankcomputing.com>
##
## This file is part of PyQt4.
##
## This file may be used under the terms of the GNU General Public License
## version 3.0 as published by the Free Software Foundation and appearing in
## the file LICENSE included in the packaging of this file. Please review the
## following information to ensure the GNU General Public License version 3.0
## requirements will be met: http://www.gnu.org/copyleft/gpl.html.
##
## If you do not wish to use this file under the terms of the GPL version 3.0
## then you may purchase a commercial license. For more information contact
## info@riverbankcomputing.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
# If pluginType is MODULE, the plugin loader will call moduleInformation. The
# variable MODULE is inserted into the local namespace by the plugin loader.
pluginType = MODULE
# moduleInformation() must return a tuple (module, widget_list). If "module"
# is "A" and any widget from this module is used, the code generator will write
# "import A". If "module" is "A[.B].C", the code generator will write
# "from A[.B] import C". Each entry in "widget_list" must be unique.
def moduleInformation():
return "PyQt4.phonon", ("Phonon.SeekSlider", "Phonon.VideoPlayer", "Phonon.VolumeSlider") | unknown | codeparrot/codeparrot-clean | ||
test_kind: js_test
selector:
roots:
- src/mongo/db/modules/*/jstests/streams_kafka/*.js
exclude_files:
- src/mongo/db/modules/enterprise/jstests/streams_kafka/kafka_utils.js
executor:
fixture:
class: ReplicaSetFixture
mongod_options:
bind_ip_all: ""
set_parameters:
enableTestCommands: 1
featureFlagStreams: true
diagnosticDataCollectionEnabled: false
num_nodes: 1 | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/streams_kafka.yml |
"""Support for information from HP iLO sensors."""
from datetime import timedelta
import logging
import hpilo
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_VARIABLES,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SENSOR_TYPE,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "HP ILO"
DEFAULT_PORT = 443
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
SENSOR_TYPES = {
"server_name": ["Server Name", "get_server_name"],
"server_fqdn": ["Server FQDN", "get_server_fqdn"],
"server_host_data": ["Server Host Data", "get_host_data"],
"server_oa_info": ["Server Onboard Administrator Info", "get_oa_info"],
"server_power_status": ["Server Power state", "get_host_power_status"],
"server_power_readings": ["Server Power readings", "get_power_readings"],
"server_power_on_time": ["Server Power On time", "get_server_power_on_time"],
"server_asset_tag": ["Server Asset Tag", "get_asset_tag"],
"server_uid_status": ["Server UID light", "get_uid_status"],
"server_health": ["Server Health", "get_embedded_health"],
"network_settings": ["Network Settings", "get_network_settings"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE): vol.All(
cv.string, vol.In(SENSOR_TYPES)
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
],
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the HP iLO sensors."""
hostname = config.get(CONF_HOST)
port = config.get(CONF_PORT)
login = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
monitored_variables = config.get(CONF_MONITORED_VARIABLES)
# Create a data fetcher to support all of the configured sensors. Then make
# the first call to init the data and confirm we can connect.
try:
hp_ilo_data = HpIloData(hostname, port, login, password)
except ValueError as error:
_LOGGER.error(error)
return
# Initialize and add all of the sensors.
devices = []
for monitored_variable in monitored_variables:
new_device = HpIloSensor(
hass=hass,
hp_ilo_data=hp_ilo_data,
sensor_name=f"{config.get(CONF_NAME)} {monitored_variable[CONF_NAME]}",
sensor_type=monitored_variable[CONF_SENSOR_TYPE],
sensor_value_template=monitored_variable.get(CONF_VALUE_TEMPLATE),
unit_of_measurement=monitored_variable.get(CONF_UNIT_OF_MEASUREMENT),
)
devices.append(new_device)
add_entities(devices, True)
class HpIloSensor(Entity):
"""Representation of a HP iLO sensor."""
def __init__(
self,
hass,
hp_ilo_data,
sensor_type,
sensor_name,
sensor_value_template,
unit_of_measurement,
):
"""Initialize the HP iLO sensor."""
self._hass = hass
self._name = sensor_name
self._unit_of_measurement = unit_of_measurement
self._ilo_function = SENSOR_TYPES[sensor_type][1]
self.hp_ilo_data = hp_ilo_data
if sensor_value_template is not None:
sensor_value_template.hass = hass
self._sensor_value_template = sensor_value_template
self._state = None
self._state_attributes = None
_LOGGER.debug("Created HP iLO sensor %r", self)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the sensor."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._state_attributes
def update(self):
"""Get the latest data from HP iLO and updates the states."""
# Call the API for new data. Each sensor will re-trigger this
# same exact call, but that's fine. Results should be cached for
# a short period of time to prevent hitting API limits.
self.hp_ilo_data.update()
ilo_data = getattr(self.hp_ilo_data.data, self._ilo_function)()
if self._sensor_value_template is not None:
ilo_data = self._sensor_value_template.render(
ilo_data=ilo_data, parse_result=False
)
self._state = ilo_data
class HpIloData:
"""Gets the latest data from HP iLO."""
def __init__(self, host, port, login, password):
"""Initialize the data object."""
self._host = host
self._port = port
self._login = login
self._password = password
self.data = None
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from HP iLO."""
try:
self.data = hpilo.Ilo(
hostname=self._host,
login=self._login,
password=self._password,
port=self._port,
)
except (
hpilo.IloError,
hpilo.IloCommunicationError,
hpilo.IloLoginFailed,
) as error:
raise ValueError(f"Unable to init HP ILO, {error}") from error | unknown | codeparrot/codeparrot-clean | ||
from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
gdal = GDALInfo()
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2018 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import select
import time
from contextlib import contextmanager
from schroot.chroot import SchrootChroot
@contextmanager
def spark_schroot(name, job_id):
ncwd = os.getcwd()
ch = SchrootChroot()
# the workspace dir name inside the chroot
wsdir = '/workspaces/{}'.format(job_id)
results_dir = '/workspaces/{}/result'.format(job_id)
try:
# change to neutral directory
os.chdir('/tmp')
ch.start(name)
yield (ch, wsdir, results_dir)
finally:
try:
# hack to allow the worker to delete the newly created files
ch.run([
'chmod', '-R', '777', wsdir
], user='root')
except: # noqa: E722
pass
ch.end()
os.chdir(ncwd)
def chroot_run_logged(schroot, jlog, cmd, **kwargs):
p = schroot.Popen(cmd, **kwargs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
sel = select.poll()
sel.register(p.stdout, select.POLLIN)
while True:
if sel.poll(2):
jlog.write(p.stdout.read())
else:
time.sleep(4) # wait a little for the process to write more output
if p.poll() is not None:
if sel.poll(1):
jlog.write(p.stdout.read())
break
ret = p.returncode
if ret:
jlog.write('Command {0} failed with error code {1}'.format(cmd, ret))
return ret
def chroot_copy(chroot, what, whence, user=None):
import shutil
with chroot.create_file(whence, user) as f:
with open(what, 'rb') as src:
shutil.copyfileobj(src, f)
def chroot_upgrade(chroot, jlog):
ret = chroot_run_logged(chroot, jlog, [
'apt-get', 'update'
], user='root')
if ret:
return False
ret = chroot_run_logged(chroot, jlog, [
'apt-get', 'full-upgrade', '-y'
], user='root')
if ret:
return False
return True | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
// SetNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets
// a node OwnerReference to the given lease object
func SetNodeOwnerFunc(ctx context.Context, c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error {
return func(lease *coordinationv1.Lease) error {
// Setting owner reference needs node's UID. Note that it is different from
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
// the connection between master and node is not ready yet. So try to set
// owner reference every time when renewing the lease, until successful.
if len(lease.OwnerReferences) == 0 {
if node, err := c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}); err == nil {
lease.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
Name: nodeName,
UID: node.UID,
},
}
} else {
logger := klog.FromContext(ctx)
logger.Error(err, "Failed to get node when trying to set owner ref to the node lease", "node", klog.KRef("", nodeName))
return err
}
}
return nil
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/util/nodelease.go |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/mrp/protobuf/RemoveClientMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
from pyatv.mrp.protobuf import NowPlayingClient_pb2 as pyatv_dot_mrp_dot_protobuf_dot_NowPlayingClient__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pyatv/mrp/protobuf/RemoveClientMessage.proto',
package='',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n,pyatv/mrp/protobuf/RemoveClientMessage.proto\x1a(pyatv/mrp/protobuf/ProtocolMessage.proto\x1a)pyatv/mrp/protobuf/NowPlayingClient.proto\"8\n\x13RemoveClientMessage\x12!\n\x06\x63lient\x18\x01 \x01(\x0b\x32\x11.NowPlayingClient:C\n\x13removeClientMessage\x12\x10.ProtocolMessage\x18\x39 \x01(\x0b\x32\x14.RemoveClientMessage'
,
dependencies=[pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.DESCRIPTOR,pyatv_dot_mrp_dot_protobuf_dot_NowPlayingClient__pb2.DESCRIPTOR,])
REMOVECLIENTMESSAGE_FIELD_NUMBER = 57
removeClientMessage = _descriptor.FieldDescriptor(
name='removeClientMessage', full_name='removeClientMessage', index=0,
number=57, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)
_REMOVECLIENTMESSAGE = _descriptor.Descriptor(
name='RemoveClientMessage',
full_name='RemoveClientMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='client', full_name='RemoveClientMessage.client', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=133,
serialized_end=189,
)
_REMOVECLIENTMESSAGE.fields_by_name['client'].message_type = pyatv_dot_mrp_dot_protobuf_dot_NowPlayingClient__pb2._NOWPLAYINGCLIENT
DESCRIPTOR.message_types_by_name['RemoveClientMessage'] = _REMOVECLIENTMESSAGE
DESCRIPTOR.extensions_by_name['removeClientMessage'] = removeClientMessage
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RemoveClientMessage = _reflection.GeneratedProtocolMessageType('RemoveClientMessage', (_message.Message,), {
'DESCRIPTOR' : _REMOVECLIENTMESSAGE,
'__module__' : 'pyatv.mrp.protobuf.RemoveClientMessage_pb2'
# @@protoc_insertion_point(class_scope:RemoveClientMessage)
})
_sym_db.RegisterMessage(RemoveClientMessage)
removeClientMessage.message_type = _REMOVECLIENTMESSAGE
pyatv_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(removeClientMessage)
# @@protoc_insertion_point(module_scope) | unknown | codeparrot/codeparrot-clean | ||
import os
import subprocess
import sys
from PyQt5 import QtCore, QtWidgets
from ouf.filemodel.proxymodel import FileProxyModel
from ouf.view.filenamedelegate import FileNameDelegate
from ouf import shortcuts
# TODO: modifiers to open in new window
# TODO: switch icons / tree
# TODO: modify icon size
class FileView(QtWidgets.QTreeView):
current_path_changed = QtCore.pyqtSignal(str)
def __init__(self, model, parent=None):
super().__init__(parent)
self.proxy = FileProxyModel()
self.proxy.setSourceModel(model)
self._create_actions()
self.setModel(self.proxy)
self.setSortingEnabled(True)
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.setIconSize(QtCore.QSize(32, 32))
self.setSelectionMode(self.ExtendedSelection)
self.setSelectionBehavior(self.SelectRows)
self.setUniformRowHeights(True)
self.setAllColumnsShowFocus(True)
# self.setAnimated(True)
self.setEditTriggers(self.SelectedClicked | self.EditKeyPressed)
self.setDefaultDropAction(QtCore.Qt.MoveAction)
self.setDragDropMode(self.DragDrop)
self.setDragDropOverwriteMode(False)
self.setDragEnabled(True)
self.setAutoExpandDelay(200)
self._file_name_delegate = FileNameDelegate(self)
self.setItemDelegateForColumn(0, self._file_name_delegate)
self.activated.connect(self.open_action)
def _create_actions(self):
self.action_delete = QtWidgets.QAction(_("Suppress Selected Files"), self)
self.action_delete.setShortcuts(shortcuts.delete)
self.action_delete.triggered.connect(self.delete_selected_files)
self.action_delete.setEnabled(False)
self.action_hidden = QtWidgets.QAction(_("Show Hidden Files"), self)
self.action_hidden.setShortcuts(shortcuts.hidden_files)
self.action_hidden.setCheckable(True)
self.action_hidden.setChecked(self.proxy.show_hidden)
self.action_hidden.toggled.connect(self.show_hide_hidden_files)
def selectionChanged(self, selected, deselected):
super().selectionChanged(selected, deselected)
self.action_delete.setEnabled(bool(self.selectedIndexes()))
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
if any(u.toLocalFile() for u in event.mimeData().urls()):
event.accept()
return
event.ignore()
def dragLeaveEvent(self, event):
pass
def dragMoveEvent(self, event):
super().dragMoveEvent(event)
if event.keyboardModifiers() & QtCore.Qt.CTRL:
if event.keyboardModifiers() & QtCore.Qt.SHIFT:
event.setDropAction(QtCore.Qt.LinkAction)
else:
event.setDropAction(QtCore.Qt.CopyAction)
else:
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
def dropEvent(self, event):
index = self.indexAt(event.pos())
# index = self.proxy.mapToSource(pindex)
self.model().dropMimeData(event.mimeData(), event.dropAction(), index.row(), index.column(), index.parent())
def open_action(self, index):
"""
Args:
index: proxy index
Returns:
"""
if index.isValid():
item = self.proxy.mapToSource(index).internalPointer()
if item.is_lock:
# TODO: prevent user
return
if item.is_dir:
self.setRootIndex(self.proxy.index(index.row(), 0, index.parent()))
#TODO: unselect
self.current_path_changed.emit(item.path)
QtCore.QCoreApplication.processEvents() # Ensure the new path is set before resizing
self.resizeColumnToContents(0)
else:
# TODO: open file / exec process / etc.
if sys.platform.startswith('linux'):
subprocess.run(['xdg-open', item.path])
else:
os.startfile(item.path) # windows
else:
# go to root
self.setRootIndex(index)
self.current_path_changed.emit("")
def delete_selected_files(self):
selection = self.proxy.mapSelectionToSource(self.selectionModel().selection())
self.proxy.sourceModel().delete_files(selection.indexes())
self.proxy.invalidate()
def show_hide_hidden_files(self, show):
self.proxy.show_hidden = show | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises
import pandas
import pandas.util.testing as ptesting
from statsmodels.base import data as sm_data
from statsmodels.formula import handle_formula_data
#class TestDates(object):
# @classmethod
# def setupClass(cls):
# nrows = 10
# cls.dates_result = cls.dates_results = np.random.random(nrows)
#
# def test_dates(self):
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
class TestArrays(object):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10)
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y'
cls.row_labels = None
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
def test_names(self):
data = self.data
np.testing.assert_equal(data.xnames, self.xnames)
np.testing.assert_equal(data.ynames, self.ynames)
def test_labels(self):
#HACK: because numpy master after NA stuff assert_equal fails on
# pandas indices
np.testing.assert_(np.all(self.data.row_labels == self.row_labels))
class TestArrays2dEndog(TestArrays):
@classmethod
def setupClass(cls):
super(TestArrays2dEndog, cls).setupClass()
cls.endog = np.random.random((10,1))
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
#cls.endog = endog.squeeze()
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
class TestArrays1dExog(TestArrays):
@classmethod
def setupClass(cls):
super(TestArrays1dExog, cls).setupClass()
cls.endog = np.random.random(10)
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:,None]
cls.xnames = ['x1']
cls.ynames = 'y'
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog.squeeze())
class TestDataFrames(TestArrays):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
ptesting.assert_series_equal(data.wrap_output(self.col_input,
'columns'),
self.col_result)
ptesting.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
class TestLists(TestArrays):
@classmethod
def setupClass(cls):
super(TestLists, cls).setupClass()
cls.endog = np.random.random(10).tolist()
cls.exog = np.c_[np.ones(10), np.random.random((10,2))].tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
class TestRecarrays(TestArrays):
@classmethod
def setupClass(cls):
super(TestRecarrays, cls).setupClass()
cls.endog = np.random.random(9).view([('y_1',
'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'),('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.view(float))
np.testing.assert_equal(self.data.exog, self.exog.view((float,3)))
class TestStructarrays(TestArrays):
@classmethod
def setupClass(cls):
super(TestStructarrays, cls).setupClass()
cls.endog = np.random.random(9).view([('y_1',
'f8')]).view(np.recarray)
exog = np.random.random(9*3).view([('const', 'f8'),('x_1', 'f8'),
('x_2', 'f8')]).view(np.recarray)
exog['const'] = 1
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.view(float))
np.testing.assert_equal(self.data.exog, self.exog.view((float,3)))
class TestListDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10).tolist()
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameList(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x1','x2'])
exog.insert(0, 'const', 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestArrayDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = np.random.random(10)
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y'
cls.row_labels = cls.exog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog)
np.testing.assert_equal(self.data.exog, self.exog.values)
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestDataFrameArray(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.DataFrame(np.random.random(10), columns=['y_1'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x1','x2']) # names mimic defaults
exog.insert(0, 'const', 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = 'y_1'
cls.row_labels = cls.endog.index
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog)
def test_orig(self):
ptesting.assert_frame_equal(self.data.orig_endog, self.endog)
np.testing.assert_equal(self.data.orig_exog, self.exog)
class TestSeriesDataFrame(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.Series(np.random.random(10), name='y_1')
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_series_equal(self.data.orig_endog, self.endog)
ptesting.assert_frame_equal(self.data.orig_exog, self.exog)
class TestSeriesSeries(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = pandas.Series(np.random.random(10), name='y_1')
exog = pandas.Series(np.random.random(10), name='x_1')
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 1
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index = [exog.name])
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index = exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = [exog.name],
columns = [exog.name])
cls.xnames = ['x_1']
cls.ynames = 'y_1'
cls.row_labels = cls.exog.index
def test_orig(self):
ptesting.assert_series_equal(self.data.orig_endog, self.endog)
ptesting.assert_series_equal(self.data.orig_exog, self.exog)
def test_endogexog(self):
np.testing.assert_equal(self.data.endog, self.endog.values.squeeze())
np.testing.assert_equal(self.data.exog, self.exog.values[:,None])
def test_alignment():
#Fix Issue #206
from statsmodels.regression.linear_model import OLS
from statsmodels.datasets.macrodata import load_pandas
d = load_pandas().data
#growth rates
gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
lint = d['realint'][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they won't conform to lint
realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog = pandas.DataFrame(data)
# which index do we get??
np.testing.assert_raises(ValueError, OLS, *(endog, exog))
class TestMultipleEqsArrays(TestArrays):
@classmethod
def setupClass(cls):
cls.endog = np.random.random((10,4))
cls.exog = np.c_[np.ones(10), np.random.random((10,2))]
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.cov_eq_result = cls.cov_eq_input = np.random.random((neqs,neqs))
cls.col_eq_result = cls.col_eq_input = np.array((neqs, nvars))
cls.xnames = ['const', 'x1', 'x2']
cls.ynames = ['y1', 'y2', 'y3', 'y4']
cls.row_labels = None
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
self.col_result)
np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
np.testing.assert_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
self.cov_eq_result)
np.testing.assert_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMultipleEqsDataFrames(TestDataFrames):
@classmethod
def setupClass(cls):
cls.endog = endog = pandas.DataFrame(np.random.random((10,4)),
columns=['y_1', 'y_2', 'y_3', 'y_4'])
exog = pandas.DataFrame(np.random.random((10,2)),
columns=['x_1','x_2'])
exog.insert(0, 'const', 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_input = np.random.random(nvars)
cls.col_result = pandas.Series(cls.col_input,
index=exog.columns)
cls.row_input = np.random.random(nrows)
cls.row_result = pandas.Series(cls.row_input,
index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
cls.cov_result = pandas.DataFrame(cls.cov_input,
index = exog.columns,
columns = exog.columns)
cls.cov_eq_input = np.random.random((neqs, neqs))
cls.cov_eq_result = pandas.DataFrame(cls.cov_eq_input,
index=endog.columns,
columns=endog.columns)
cls.col_eq_input = np.random.random((nvars, neqs))
cls.col_eq_result = pandas.DataFrame(cls.col_eq_input,
index=exog.columns,
columns=endog.columns)
cls.xnames = ['const', 'x_1', 'x_2']
cls.ynames = ['y_1', 'y_2', 'y_3', 'y_4']
cls.row_labels = cls.exog.index
def test_attach(self):
data = self.data
ptesting.assert_series_equal(data.wrap_output(self.col_input,
'columns'),
self.col_result)
ptesting.assert_series_equal(data.wrap_output(self.row_input, 'rows'),
self.row_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
self.cov_result)
ptesting.assert_frame_equal(data.wrap_output(self.cov_eq_input,
'cov_eq'),
self.cov_eq_result)
ptesting.assert_frame_equal(data.wrap_output(self.col_eq_input,
'columns_eq'),
self.col_eq_result)
class TestMissingArray(object):
@classmethod
def setupClass(cls):
X = np.random.random((25,4))
y = np.random.random(25)
y[10] = np.nan
X[2,3] = np.nan
X[14,2] = np.nan
cls.y, cls.X = y, X
def test_raise_no_missing(self):
# smoke test for #1700
sm_data.handle_data(np.random.random(20), np.random.random((20, 2)),
'raise')
def test_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, self.X, 'raise'))
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y[idx]
X = X[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y)
np.testing.assert_array_equal(data.exog, X)
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y)
np.testing.assert_array_equal(data.exog, self.X)
def test_endog_only_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, None, 'raise'))
def test_endog_only_drop(self):
y = self.y
y = y[~np.isnan(y)]
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_mv_endog(self):
y = self.X
y = y[~np.isnan(y).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y)
def test_extra_kwargs_2d(self):
sigma = np.random.random((25, 25))
sigma = sigma + sigma.T - np.diag(np.diag(sigma))
data = sm_data.handle_data(self.y, self.X, 'drop', sigma=sigma)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
sigma = sigma[idx][:,idx]
np.testing.assert_array_equal(data.sigma, sigma)
def test_extra_kwargs_1d(self):
weights = np.random.random(25)
data = sm_data.handle_data(self.y, self.X, 'drop', weights=weights)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
weights = weights[idx]
np.testing.assert_array_equal(data.weights, weights)
class TestMissingPandas(object):
@classmethod
def setupClass(cls):
X = np.random.random((25,4))
y = np.random.random(25)
y[10] = np.nan
X[2,3] = np.nan
X[14,2] = np.nan
cls.y, cls.X = pandas.Series(y), pandas.DataFrame(X)
def test_raise_no_missing(self):
# smoke test for #1700
sm_data.handle_data(pandas.Series(np.random.random(20)),
pandas.DataFrame(np.random.random((20, 2))),
'raise')
def test_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, self.X, 'raise'))
def test_drop(self):
y = self.y
X = self.X
combined = np.c_[y, X]
idx = ~np.isnan(combined).any(axis=1)
y = y.ix[idx]
X = X.ix[idx]
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
ptesting.assert_series_equal(data.orig_endog, self.y.ix[idx])
np.testing.assert_array_equal(data.exog, X.values)
ptesting.assert_frame_equal(data.orig_exog, self.X.ix[idx])
def test_none(self):
data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
np.testing.assert_array_equal(data.endog, self.y.values)
np.testing.assert_array_equal(data.exog, self.X.values)
def test_endog_only_raise(self):
np.testing.assert_raises(Exception, sm_data.handle_data,
(self.y, None, 'raise'))
def test_endog_only_drop(self):
y = self.y
y = y.dropna()
data = sm_data.handle_data(self.y, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_mv_endog(self):
y = self.X
y = y.ix[~np.isnan(y.values).any(axis=1)]
data = sm_data.handle_data(self.X, None, 'drop')
np.testing.assert_array_equal(data.endog, y.values)
def test_labels(self):
2, 10, 14
labels = pandas.Index([0, 1, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24])
data = sm_data.handle_data(self.y, self.X, 'drop')
np.testing.assert_(data.row_labels.equals(labels))
class TestConstant(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load_pandas
cls.data = load_pandas()
def test_array_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_constant(self):
exog = self.data.exog.copy()
exog['const'] = 1
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
def test_array_noconstant(self):
exog = self.data.exog.copy()
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 0)
np.testing.assert_equal(data.const_idx, None)
class TestHandleMissing(object):
def test_pandas(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
def test_arrays(self):
arr = np.random.randn(20, 4)
arr[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = arr[:,0], arr[:,1:]
data, _ = sm_data.handle_missing(y, X, missing='drop')
bools_mask = np.ones(20, dtype=bool)
bools_mask[[2, 5, 10]] = False
y_exp = arr[bools_mask, 0]
X_exp = arr[bools_mask, 1:]
np.testing.assert_array_equal(data['endog'], y_exp)
np.testing.assert_array_equal(data['exog'], X_exp)
def test_pandas_array(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]].values
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]].values
np.testing.assert_array_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
def test_array_pandas(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]].values, df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='drop')
df = df.dropna()
y_exp, X_exp = df[df.columns[0]].values, df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
np.testing.assert_array_equal(data['endog'], y_exp)
def test_noop(self):
df = ptesting.makeDataFrame()
df.values[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
data, _ = sm_data.handle_missing(y, X, missing='none')
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
ptesting.assert_frame_equal(data['exog'], X_exp)
ptesting.assert_series_equal(data['endog'], y_exp)
class CheckHasConstant(object):
def test_hasconst(self):
for x, result in zip(self.exogs, self.results):
mod = self.mod(self.y, x)
assert_equal(mod.k_constant, result[0]) #['k_constant'])
assert_equal(mod.data.k_constant, result[0])
if result[1] is None:
assert_(mod.data.const_idx is None)
else:
assert_equal(mod.data.const_idx, result[1])
# extra check after fit, some models raise on singular
fit_kwds = getattr(self, 'fit_kwds', {})
try:
res = mod.fit(**fit_kwds)
assert_equal(res.model.k_constant, result[0])
assert_equal(res.model.data.k_constant, result[0])
except:
pass
@classmethod
def setup_class(cls):
# create data
np.random.seed(0)
cls.y_c = np.random.randn(20)
cls.y_bin = (cls.y_c > 0).astype(int)
x1 = np.column_stack((np.ones(20), np.zeros(20)))
result1 = (1, 0)
x2 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5)).astype(float)
result2 = (1, None)
x3 = np.column_stack((np.arange(20), np.zeros(20)))
result3 = (0, None)
x4 = np.column_stack((np.arange(20), np.zeros((20, 2))))
result4 = (0, None)
x5 = np.column_stack((np.zeros(20), 0.5 * np.ones(20)))
result5 = (1, 1)
x5b = np.column_stack((np.arange(20), np.ones((20, 3))))
result5b = (1, 1)
x5c = np.column_stack((np.arange(20), np.ones((20, 3)) * [0.5, 1, 1]))
result5c = (1, 2)
# implicit and zero column
x6 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros(20))).astype(float)
result6 = (1, None)
x7 = np.column_stack((np.arange(20) < 10.5,
np.arange(20) > 10.5,
np.zeros((20, 2)))).astype(float)
result7 = (1, None)
cls.exogs = (x1, x2, x3, x4, x5, x5b, x5c, x6, x7)
cls.results = (result1, result2, result3, result4, result5, result5b,
result5c, result6, result7)
class TestHasConstantOLS(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.regression.linear_model import OLS
self.mod = OLS
self.y = self.y_c
class TestHasConstantGLM(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
self.mod = lambda y, x : GLM(y, x, family=families.Binomial())
self.y = self.y_bin
class TestHasConstantLogit(CheckHasConstant):
def __init__(self):
self.setup_class() # why does nose do it properly
from statsmodels.discrete.discrete_model import Logit
self.mod = Logit
self.y = self.y_bin
self.fit_kwds = {'disp': False}
def test_dtype_object():
# see #880
X = np.random.random((40,2))
df = pandas.DataFrame(X)
df[2] = np.random.randint(2, size=40).astype('object')
df['constant'] = 1
y = pandas.Series(np.random.randint(2, size=40))
np.testing.assert_raises(ValueError, sm_data.handle_data, y, df)
def test_formula_missing_extra_arrays():
np.random.seed(1)
# because patsy can't turn off missing data-handling as of 0.3.0, we need
# separate tests to make sure that missing values are handled correctly
# when going through formulas
# there is a handle_formula_data step
# then there is the regular handle_data step
# see 2083
# the untested cases are endog/exog have missing. extra has missing.
# endog/exog are fine. extra has missing.
# endog/exog do or do not have missing and extra has wrong dimension
y = np.random.randn(10)
y_missing = y.copy()
y_missing[[2, 5]] = np.nan
X = np.random.randn(10)
X_missing = X.copy()
X_missing[[1, 3]] = np.nan
weights = np.random.uniform(size=10)
weights_missing = weights.copy()
weights_missing[[6]] = np.nan
weights_wrong_size = np.random.randn(12)
data = {'y': y,
'X': X,
'y_missing': y_missing,
'X_missing': X_missing,
'weights': weights,
'weights_missing': weights_missing}
data = pandas.DataFrame.from_dict(data)
data['constant'] = 1
formula = 'y_missing ~ X_missing'
((endog, exog),
missing_idx, design_info) = handle_formula_data(data, None, formula,
depth=2,
missing='drop')
kwargs = {'missing_idx': missing_idx, 'missing': 'drop',
'weights': data['weights_missing']}
model_data = sm_data.handle_data(endog, exog, **kwargs)
data_nona = data.dropna()
assert_equal(data_nona['y'].values, model_data.endog)
assert_equal(data_nona[['constant', 'X']].values, model_data.exog)
assert_equal(data_nona['weights'].values, model_data.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
weights_2d = np.random.randn(10, 10)
weights_2d[[8, 7], [7, 8]] = np.nan #symmetric missing values
kwargs.update({'weights': weights_2d,
'missing_idx': missing_idx})
model_data2 = sm_data.handle_data(endog, exog, **kwargs)
good_idx = [0, 4, 6, 9]
assert_equal(data.ix[good_idx, 'y'], model_data2.endog)
assert_equal(data.ix[good_idx, ['constant', 'X']], model_data2.exog)
assert_equal(weights_2d[good_idx][:, good_idx], model_data2.weights)
tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
(endog, exog), missing_idx, design_info = tmp
kwargs.update({'weights': weights_wrong_size,
'missing_idx': missing_idx})
assert_raises(ValueError, sm_data.handle_data, endog, exog, **kwargs)
if __name__ == "__main__":
import nose
#nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# exit=False)
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False) | unknown | codeparrot/codeparrot-clean | ||
"""
==============================================================
Post-tuning the decision threshold for cost-sensitive learning
==============================================================
Once a classifier is trained, the output of the :term:`predict` method outputs class
label predictions corresponding to a thresholding of either the
:term:`decision_function` or the :term:`predict_proba` output. For a binary classifier,
the default threshold is defined as a posterior probability estimate of 0.5 or a
decision score of 0.0.
However, this default strategy is most likely not optimal for the task at hand.
Here, we use the "Statlog" German credit dataset [1]_ to illustrate a use case.
In this dataset, the task is to predict whether a person has a "good" or "bad" credit.
In addition, a cost-matrix is provided that specifies the cost of
misclassification. Specifically, misclassifying a "bad" credit as "good" is five
times more costly on average than misclassifying a "good" credit as "bad".
We use the :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to select the
cut-off point of the decision function that minimizes the provided business
cost.
In the second part of the example, we further extend this approach by
considering the problem of fraud detection in credit card transactions: in this
case, the business metric depends on the amount of each individual transaction.
.. rubric :: References
.. [1] "Statlog (German Credit Data) Data Set", UCI Machine Learning Repository,
`Link <https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29>`_.
.. [2] `Charles Elkan, "The Foundations of Cost-Sensitive Learning",
International joint conference on artificial intelligence.
Vol. 17. No. 1. Lawrence Erlbaum Associates Ltd, 2001.
<https://cseweb.ucsd.edu/~elkan/rescale.pdf>`_
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Cost-sensitive learning with constant gains and costs
# -----------------------------------------------------
#
# In this first section, we illustrate the use of the
# :class:`~sklearn.model_selection.TunedThresholdClassifierCV` in a setting of
# cost-sensitive learning when the gains and costs associated to each entry of the
# confusion matrix are constant. We use the problematic presented in [2]_ using the
# "Statlog" German credit dataset [1]_.
#
# "Statlog" German credit dataset
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We fetch the German credit dataset from OpenML.
import sklearn
from sklearn.datasets import fetch_openml
sklearn.set_config(transform_output="pandas")
german_credit = fetch_openml(data_id=31, as_frame=True, parser="pandas")
X, y = german_credit.data, german_credit.target
# %%
# We check the feature types available in `X`.
X.info()
# %%
# Many features are categorical and usually string-encoded. We need to encode
# these categories when we develop our predictive model. Let's check the targets.
y.value_counts()
# %%
# Another observation is that the dataset is imbalanced. We would need to be careful
# when evaluating our predictive model and use a family of metrics that are adapted
# to this setting.
#
# In addition, we observe that the target is string-encoded. Some metrics
# (e.g. precision and recall) require to provide the label of interest also called
# the "positive label". Here, we define that our goal is to predict whether or not
# a sample is a "bad" credit.
pos_label, neg_label = "bad", "good"
# %%
# To carry our analysis, we split our dataset using a single stratified split.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0)
# %%
# We are ready to design our predictive model and the associated evaluation strategy.
#
# Evaluation metrics
# ^^^^^^^^^^^^^^^^^^
#
# In this section, we define a set of metrics that we use later. To see
# the effect of tuning the cut-off point, we evaluate the predictive model using
# the Receiver Operating Characteristic (ROC) curve and the Precision-Recall curve.
# The values reported on these plots are therefore the true positive rate (TPR),
# also known as the recall or the sensitivity, and the false positive rate (FPR),
# also known as the specificity, for the ROC curve and the precision and recall for
# the Precision-Recall curve.
#
# From these four metrics, scikit-learn does not provide a scorer for the FPR. We
# therefore need to define a small custom function to compute it.
from sklearn.metrics import confusion_matrix
def fpr_score(y, y_pred, neg_label, pos_label):
cm = confusion_matrix(y, y_pred, labels=[neg_label, pos_label])
tn, fp, _, _ = cm.ravel()
tnr = tn / (tn + fp)
return 1 - tnr
# %%
# As previously stated, the "positive label" is not defined as the value "1" and calling
# some of the metrics with this non-standard value raise an error. We need to
# provide the indication of the "positive label" to the metrics.
#
# We therefore need to define a scikit-learn scorer using
# :func:`~sklearn.metrics.make_scorer` where the information is passed. We store all
# the custom scorers in a dictionary. To use them, we need to pass the fitted model,
# the data and the target on which we want to evaluate the predictive model.
from sklearn.metrics import make_scorer, precision_score, recall_score
tpr_score = recall_score # TPR and recall are the same metric
scoring = {
"precision": make_scorer(precision_score, pos_label=pos_label),
"recall": make_scorer(recall_score, pos_label=pos_label),
"fpr": make_scorer(fpr_score, neg_label=neg_label, pos_label=pos_label),
"tpr": make_scorer(tpr_score, pos_label=pos_label),
}
# %%
# In addition, the original research [1]_ defines a custom business metric. We
# call a "business metric" any metric function that aims at quantifying how the
# predictions (correct or wrong) might impact the business value of deploying a
# given machine learning model in a specific application context. For our
# credit prediction task, the authors provide a custom cost-matrix which
# encodes that classifying a "bad" credit as "good" is 5 times more costly on
# average than the opposite: it is less costly for the financing institution to
# not grant a credit to a potential customer that will not default (and
# therefore miss a good customer that would have otherwise both reimbursed the
# credit and paid interests) than to grant a credit to a customer that will
# default.
#
# We define a python function that weighs the confusion matrix and returns the
# overall cost.
# The rows of the confusion matrix hold the counts of observed classes
# while the columns hold counts of predicted classes. Recall that here we
# consider "bad" as the positive class (second row and column).
# Scikit-learn model selection tools expect that we follow a convention
# that "higher" means "better", hence the following gain matrix assigns
# negative gains (costs) to the two kinds of prediction errors:
#
# - a gain of `-1` for each false positive ("good" credit labeled as "bad"),
# - a gain of `-5` for each false negative ("bad" credit labeled as "good"),
# - a `0` gain for true positives and true negatives.
#
# Note that theoretically, given that our model is calibrated and our data
# set representative and large enough, we do not need to tune the
# threshold, but can safely set it to 1/5 of the cost ratio, as stated by
# Eq. (2) in Elkan's paper [2]_.
import numpy as np
def credit_gain_score(y, y_pred, neg_label, pos_label):
cm = confusion_matrix(y, y_pred, labels=[neg_label, pos_label])
gain_matrix = np.array(
[
[0, -1], # -1 gain for false positives
[-5, 0], # -5 gain for false negatives
]
)
return np.sum(cm * gain_matrix)
scoring["credit_gain"] = make_scorer(
credit_gain_score, neg_label=neg_label, pos_label=pos_label
)
# %%
# Vanilla predictive model
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# We use :class:`~sklearn.ensemble.HistGradientBoostingClassifier` as a predictive model
# that natively handles categorical features and missing values.
from sklearn.ensemble import HistGradientBoostingClassifier
model = HistGradientBoostingClassifier(
categorical_features="from_dtype", random_state=0
).fit(X_train, y_train)
model
# %%
# We evaluate the performance of our predictive model using the ROC and Precision-Recall
# curves.
import matplotlib.pyplot as plt
from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(14, 6))
PrecisionRecallDisplay.from_estimator(
model, X_test, y_test, pos_label=pos_label, ax=axs[0], name="GBDT"
)
axs[0].plot(
scoring["recall"](model, X_test, y_test),
scoring["precision"](model, X_test, y_test),
marker="o",
markersize=10,
color="tab:blue",
label="Default cut-off point at a probability of 0.5",
)
axs[0].set_title("Precision-Recall curve")
axs[0].legend()
RocCurveDisplay.from_estimator(
model,
X_test,
y_test,
pos_label=pos_label,
ax=axs[1],
name="GBDT",
plot_chance_level=True,
)
axs[1].plot(
scoring["fpr"](model, X_test, y_test),
scoring["tpr"](model, X_test, y_test),
marker="o",
markersize=10,
color="tab:blue",
label="Default cut-off point at a probability of 0.5",
)
axs[1].set_title("ROC curve")
axs[1].legend()
_ = fig.suptitle("Evaluation of the vanilla GBDT model")
# %%
# We recall that these curves give insights on the statistical performance of the
# predictive model for different cut-off points. For the Precision-Recall curve, the
# reported metrics are the precision and recall and for the ROC curve, the reported
# metrics are the TPR (same as recall) and FPR.
#
# Here, the different cut-off points correspond to different levels of posterior
# probability estimates ranging between 0 and 1. By default, `model.predict` uses a
# cut-off point at a probability estimate of 0.5. The metrics for such a cut-off point
# are reported with the blue dot on the curves: it corresponds to the statistical
# performance of the model when using `model.predict`.
#
# However, we recall that the original aim was to minimize the cost (or maximize the
# gain) as defined by the business metric. We can compute the value of the business
# metric:
print(f"Business defined metric: {scoring['credit_gain'](model, X_test, y_test)}")
# %%
# At this stage we don't know if any other cut-off can lead to a greater gain. To find
# the optimal one, we need to compute the cost-gain using the business metric for all
# possible cut-off points and choose the best. This strategy can be quite tedious to
# implement by hand, but the
# :class:`~sklearn.model_selection.TunedThresholdClassifierCV` class is here to help us.
# It automatically computes the cost-gain for all possible cut-off points and optimizes
# for the `scoring`.
#
# .. _cost_sensitive_learning_example:
#
# Tuning the cut-off point
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# We use :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to tune the
# cut-off point. We need to provide the business metric to optimize as well as the
# positive label. Internally, the optimum cut-off point is chosen such that it maximizes
# the business metric via cross-validation. By default a 5-fold stratified
# cross-validation is used.
from sklearn.model_selection import TunedThresholdClassifierCV
tuned_model = TunedThresholdClassifierCV(
estimator=model,
scoring=scoring["credit_gain"],
store_cv_results=True, # necessary to inspect all results
)
tuned_model.fit(X_train, y_train)
print(f"{tuned_model.best_threshold_=:0.2f}")
# %%
# We plot the ROC and Precision-Recall curves for the vanilla model and the tuned model.
# Also we plot the cut-off points that would be used by each model. Because, we are
# reusing the same code later, we define a function that generates the plots.
def plot_roc_pr_curves(vanilla_model, tuned_model, *, title):
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(21, 6))
linestyles = ("dashed", "dotted")
markerstyles = ("o", ">")
colors = ("tab:blue", "tab:orange")
names = ("Vanilla GBDT", "Tuned GBDT")
for idx, (est, linestyle, marker, color, name) in enumerate(
zip((vanilla_model, tuned_model), linestyles, markerstyles, colors, names)
):
decision_threshold = getattr(est, "best_threshold_", 0.5)
PrecisionRecallDisplay.from_estimator(
est,
X_test,
y_test,
pos_label=pos_label,
linestyle=linestyle,
color=color,
ax=axs[0],
name=name,
)
axs[0].plot(
scoring["recall"](est, X_test, y_test),
scoring["precision"](est, X_test, y_test),
marker,
markersize=10,
color=color,
label=f"Cut-off point at probability of {decision_threshold:.2f}",
)
RocCurveDisplay.from_estimator(
est,
X_test,
y_test,
pos_label=pos_label,
curve_kwargs=dict(linestyle=linestyle, color=color),
ax=axs[1],
name=name,
plot_chance_level=idx == 1,
)
axs[1].plot(
scoring["fpr"](est, X_test, y_test),
scoring["tpr"](est, X_test, y_test),
marker,
markersize=10,
color=color,
label=f"Cut-off point at probability of {decision_threshold:.2f}",
)
axs[0].set_title("Precision-Recall curve")
axs[0].legend()
axs[1].set_title("ROC curve")
axs[1].legend()
axs[2].plot(
tuned_model.cv_results_["thresholds"],
tuned_model.cv_results_["scores"],
color="tab:orange",
)
axs[2].plot(
tuned_model.best_threshold_,
tuned_model.best_score_,
"o",
markersize=10,
color="tab:orange",
label="Optimal cut-off point for the business metric",
)
axs[2].legend()
axs[2].set_xlabel("Decision threshold (probability)")
axs[2].set_ylabel("Objective score (using cost-matrix)")
axs[2].set_title("Objective score as a function of the decision threshold")
fig.suptitle(title)
# %%
title = "Comparison of the cut-off point for the vanilla and tuned GBDT model"
plot_roc_pr_curves(model, tuned_model, title=title)
# %%
# The first remark is that both classifiers have exactly the same ROC and
# Precision-Recall curves. It is expected because by default, the classifier is fitted
# on the same training data. In a later section, we discuss more in detail the
# available options regarding model refitting and cross-validation.
#
# The second remark is that the cut-off points of the vanilla and tuned model are
# different. To understand why the tuned model has chosen this cut-off point, we can
# look at the right-hand side plot that plots the objective score that is our exactly
# the same as our business metric. We see that the optimum threshold corresponds to the
# maximum of the objective score. This maximum is reached for a decision threshold
# much lower than 0.5: the tuned model enjoys a much higher recall at the cost of
# of significantly lower precision: the tuned model is much more eager to
# predict the "bad" class label to larger fraction of individuals.
#
# We can now check if choosing this cut-off point leads to a better score on the testing
# set:
print(f"Business defined metric: {scoring['credit_gain'](tuned_model, X_test, y_test)}")
# %%
# We observe that tuning the decision threshold almost improves our business gains
# by factor of 2.
#
# .. _TunedThresholdClassifierCV_no_cv:
#
# Consideration regarding model refitting and cross-validation
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In the above experiment, we used the default setting of the
# :class:`~sklearn.model_selection.TunedThresholdClassifierCV`. In particular, the
# cut-off point is tuned using a 5-fold stratified cross-validation. Also, the
# underlying predictive model is refitted on the entire training data once the cut-off
# point is chosen.
#
# These two strategies can be changed by providing the `refit` and `cv` parameters.
# For instance, one could provide a fitted `estimator` and set `cv="prefit"`, in which
# case the cut-off point is found on the entire dataset provided at fitting time.
# Also, the underlying classifier is not be refitted by setting `refit=False`. Here, we
# can try to do such experiment.
model.fit(X_train, y_train)
tuned_model.set_params(cv="prefit", refit=False).fit(X_train, y_train)
print(f"{tuned_model.best_threshold_=:0.2f}")
# %%
# Then, we evaluate our model with the same approach as before:
title = "Tuned GBDT model without refitting and using the entire dataset"
plot_roc_pr_curves(model, tuned_model, title=title)
# %%
# We observe the that the optimum cut-off point is different from the one found
# in the previous experiment. If we look at the right-hand side plot, we
# observe that the business gain has large plateau of near-optimal 0 gain for a
# large span of decision thresholds. This behavior is symptomatic of an
# overfitting. Because we disable cross-validation, we tuned the cut-off point
# on the same set as the model was trained on, and this is the reason for the
# observed overfitting.
#
# This option should therefore be used with caution. One needs to make sure that the
# data provided at fitting time to the
# :class:`~sklearn.model_selection.TunedThresholdClassifierCV` is not the same as the
# data used to train the underlying classifier. This could happen sometimes when the
# idea is just to tune the predictive model on a completely new validation set without a
# costly complete refit.
#
# When cross-validation is too costly, a potential alternative is to use a
# single train-test split by providing a floating number in range `[0, 1]` to the `cv`
# parameter. It splits the data into a training and testing set. Let's explore this
# option:
tuned_model.set_params(cv=0.75).fit(X_train, y_train)
# %%
title = "Tuned GBDT model without refitting and using the entire dataset"
plot_roc_pr_curves(model, tuned_model, title=title)
# %%
# Regarding the cut-off point, we observe that the optimum is similar to the multiple
# repeated cross-validation case. However, be aware that a single split does not account
# for the variability of the fit/predict process and thus we are unable to know if there
# is any variance in the cut-off point. The repeated cross-validation averages out
# this effect.
#
# Another observation concerns the ROC and Precision-Recall curves of the tuned model.
# As expected, these curves differ from those of the vanilla model, given that we
# trained the underlying classifier on a subset of the data provided during fitting and
# reserved a validation set for tuning the cut-off point.
#
# Cost-sensitive learning when gains and costs are not constant
# -------------------------------------------------------------
#
# As stated in [2]_, gains and costs are generally not constant in real-world problems.
# In this section, we use a similar example as in [2]_ for the problem of
# detecting fraud in credit card transaction records.
#
# The credit card dataset
# ^^^^^^^^^^^^^^^^^^^^^^^
credit_card = fetch_openml(data_id=1597, as_frame=True, parser="pandas")
credit_card.frame.info()
# %%
# The dataset contains information about credit card records from which some are
# fraudulent and others are legitimate. The goal is therefore to predict whether or
# not a credit card record is fraudulent.
columns_to_drop = ["Class"]
data = credit_card.frame.drop(columns=columns_to_drop)
target = credit_card.frame["Class"].astype(int)
# %%
# First, we check the class distribution of the datasets.
target.value_counts(normalize=True)
# %%
# The dataset is highly imbalanced with fraudulent transaction representing only 0.17%
# of the data. Since we are interested in training a machine learning model, we should
# also make sure that we have enough samples in the minority class to train the model.
target.value_counts()
# %%
# We observe that we have around 500 samples that is on the low end of the number of
# samples required to train a machine learning model. In addition of the target
# distribution, we check the distribution of the amount of the
# fraudulent transactions.
fraud = target == 1
amount_fraud = data["Amount"][fraud]
_, ax = plt.subplots()
ax.hist(amount_fraud, bins=30)
ax.set_title("Amount of fraud transaction")
_ = ax.set_xlabel("Amount (€)")
# %%
# Addressing the problem with a business metric
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now, we create the business metric that depends on the amount of each transaction. We
# define the cost matrix similarly to [2]_. Accepting a legitimate transaction provides
# a gain of 2% of the amount of the transaction. However, accepting a fraudulent
# transaction result in a loss of the amount of the transaction. As stated in [2]_, the
# gain and loss related to refusals (of fraudulent and legitimate transactions) are not
# trivial to define. Here, we define that a refusal of a legitimate transaction
# is estimated to a loss of 5€ while the refusal of a fraudulent transaction is
# estimated to a gain of 50€. Therefore, we define the following function to
# compute the total benefit of a given decision:
def business_metric(y_true, y_pred, amount):
mask_true_positive = (y_true == 1) & (y_pred == 1)
mask_true_negative = (y_true == 0) & (y_pred == 0)
mask_false_positive = (y_true == 0) & (y_pred == 1)
mask_false_negative = (y_true == 1) & (y_pred == 0)
fraudulent_refuse = mask_true_positive.sum() * 50
fraudulent_accept = -amount[mask_false_negative].sum()
legitimate_refuse = mask_false_positive.sum() * -5
legitimate_accept = (amount[mask_true_negative] * 0.02).sum()
return fraudulent_refuse + fraudulent_accept + legitimate_refuse + legitimate_accept
# %%
# From this business metric, we create a scikit-learn scorer that given a fitted
# classifier and a test set compute the business metric. In this regard, we use
# the :func:`~sklearn.metrics.make_scorer` factory. The variable `amount` is an
# additional metadata to be passed to the scorer and we need to use
# :ref:`metadata routing <metadata_routing>` to take into account this information.
sklearn.set_config(enable_metadata_routing=True)
business_scorer = make_scorer(business_metric).set_score_request(amount=True)
# %%
# So at this stage, we observe that the amount of the transaction is used twice: once
# as a feature to train our predictive model and once as a metadata to compute the
# the business metric and thus the statistical performance of our model. When used as a
# feature, we are only required to have a column in `data` that contains the amount of
# each transaction. To use this information as metadata, we need to have an external
# variable that we can pass to the scorer or the model that internally routes this
# metadata to the scorer. So let's create this variable.
amount = credit_card.frame["Amount"].to_numpy()
# %%
from sklearn.model_selection import train_test_split
data_train, data_test, target_train, target_test, amount_train, amount_test = (
train_test_split(
data, target, amount, stratify=target, test_size=0.5, random_state=42
)
)
# %%
# We first evaluate some baseline policies to serve as reference. Recall that
# class "0" is the legitimate class and class "1" is the fraudulent class.
from sklearn.dummy import DummyClassifier
always_accept_policy = DummyClassifier(strategy="constant", constant=0)
always_accept_policy.fit(data_train, target_train)
benefit = business_scorer(
always_accept_policy, data_test, target_test, amount=amount_test
)
print(f"Benefit of the 'always accept' policy: {benefit:,.2f}€")
# %%
# A policy that considers all transactions as legitimate would create a profit of
# around 220,000€. We make the same evaluation for a classifier that predicts all
# transactions as fraudulent.
always_reject_policy = DummyClassifier(strategy="constant", constant=1)
always_reject_policy.fit(data_train, target_train)
benefit = business_scorer(
always_reject_policy, data_test, target_test, amount=amount_test
)
print(f"Benefit of the 'always reject' policy: {benefit:,.2f}€")
# %%
# Such a policy would entail a catastrophic loss: around 670,000€. This is
# expected since the vast majority of the transactions are legitimate and the
# policy would refuse them at a non-trivial cost.
#
# A predictive model that adapts the accept/reject decisions on a per
# transaction basis should ideally allow us to make a profit larger than the
# 220,000€ of the best of our constant baseline policies.
#
# We start with a logistic regression model with the default decision threshold
# at 0.5. Here we tune the hyperparameter `C` of the logistic regression with a
# proper scoring rule (the log loss) to ensure that the model's probabilistic
# predictions returned by its `predict_proba` method are as accurate as
# possible, irrespectively of the choice of the value of the decision
# threshold.
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
logistic_regression = make_pipeline(StandardScaler(), LogisticRegression())
param_grid = {"logisticregression__C": np.logspace(-6, 6, 13)}
model = GridSearchCV(logistic_regression, param_grid, scoring="neg_log_loss").fit(
data_train, target_train
)
model
# %%
print(
"Benefit of logistic regression with default threshold: "
f"{business_scorer(model, data_test, target_test, amount=amount_test):,.2f}€"
)
# %%
# The business metric shows that our predictive model with a default decision
# threshold is already winning over the baseline in terms of profit and it would be
# already beneficial to use it to accept or reject transactions instead of
# accepting all transactions.
#
# Tuning the decision threshold
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now the question is: is our model optimum for the type of decision that we want to do?
# Up to now, we did not optimize the decision threshold. We use the
# :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to optimize the decision
# given our business scorer. To avoid a nested cross-validation, we will use the
# best estimator found during the previous grid-search.
tuned_model = TunedThresholdClassifierCV(
estimator=model.best_estimator_,
scoring=business_scorer,
thresholds=100,
n_jobs=2,
)
# %%
# Since our business scorer requires the amount of each transaction, we need to pass
# this information in the `fit` method. The
# :class:`~sklearn.model_selection.TunedThresholdClassifierCV` is in charge of
# automatically dispatching this metadata to the underlying scorer.
tuned_model.fit(data_train, target_train, amount=amount_train)
# %%
# We observe that the tuned decision threshold is far away from the default 0.5:
print(f"Tuned decision threshold: {tuned_model.best_threshold_:.2f}")
# %%
print(
"Benefit of logistic regression with a tuned threshold: "
f"{business_scorer(tuned_model, data_test, target_test, amount=amount_test):,.2f}€"
)
# %%
# We observe that tuning the decision threshold increases the expected profit
# when deploying our model - as indicated by the business metric. It is therefore
# valuable, whenever possible, to optimize the decision threshold with respect
# to the business metric.
#
# Manually setting the decision threshold instead of tuning it
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In the previous example, we used the
# :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to find the optimal
# decision threshold. However, in some cases, we might have some prior knowledge about
# the problem at hand and we might be happy to set the decision threshold manually.
#
# The class :class:`~sklearn.model_selection.FixedThresholdClassifier` allows us to
# manually set the decision threshold. At prediction time, it behave as the previous
# tuned model but no search is performed during the fitting process. Note that here
# we use :class:`~sklearn.frozen.FrozenEstimator` to wrap the predictive model to
# avoid any refitting.
#
# Here, we will reuse the decision threshold found in the previous section to create a
# new model and check that it gives the same results.
from sklearn.frozen import FrozenEstimator
from sklearn.model_selection import FixedThresholdClassifier
model_fixed_threshold = FixedThresholdClassifier(
estimator=FrozenEstimator(model), threshold=tuned_model.best_threshold_
)
# %%
business_score = business_scorer(
model_fixed_threshold, data_test, target_test, amount=amount_test
)
print(f"Benefit of logistic regression with a tuned threshold: {business_score:,.2f}€")
# %%
# We observe that we obtained the exact same results but the fitting process
# was much faster since we did not perform any hyper-parameter search.
#
# Finally, the estimate of the (average) business metric itself can be unreliable, in
# particular when the number of data points in the minority class is very small.
# Any business impact estimated by cross-validation of a business metric on
# historical data (offline evaluation) should ideally be confirmed by A/B testing
# on live data (online evaluation). Note however that A/B testing models is
# beyond the scope of the scikit-learn library itself.
#
# At the end, we disable the configuration flag for metadata routing::
sklearn.set_config(enable_metadata_routing=False) | python | github | https://github.com/scikit-learn/scikit-learn | examples/model_selection/plot_cost_sensitive_learning.py |
---
name: Bug report
about: Report a problem to help us improve
---
<!--
Thanks for taking an interest in Scrapy!
If you have a question that starts with "How to...", please see the Scrapy Community page: https://scrapy.org/community/.
The GitHub issue tracker's purpose is to deal with bug reports and feature requests for the project itself.
Keep in mind that by filing an issue, you are expected to comply with Scrapy's Code of Conduct, including treating everyone with respect: https://github.com/scrapy/scrapy/blob/master/CODE_OF_CONDUCT.md
The following is a suggested template to structure your issue, you can find more guidelines at https://doc.scrapy.org/en/latest/contributing.html#reporting-bugs
-->
### Description
[Description of the issue]
### Steps to Reproduce
1. [First Step]
2. [Second Step]
3. [and so on...]
**Expected behavior:** [What you expect to happen]
**Actual behavior:** [What actually happens]
**Reproduces how often:** [What percentage of the time does it reproduce?]
### Versions
Please paste here the output of executing `scrapy version --verbose` in the command line.
### Additional context
Any additional information, configuration, data or output from commands that might be necessary to reproduce or understand the issue. Please try not to include screenshots of code or the command line, paste the contents as text instead. You can use [GitHub Flavored Markdown](https://help.github.com/en/articles/creating-and-highlighting-code-blocks) to make the text look better. | unknown | github | https://github.com/scrapy/scrapy | .github/ISSUE_TEMPLATE/bug_report.md |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package stackplan
import (
"context"
"fmt"
"github.com/zclconf/go-cty/cty"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/collections"
"github.com/hashicorp/terraform/internal/configs"
"github.com/hashicorp/terraform/internal/lang/marks"
"github.com/hashicorp/terraform/internal/plans"
"github.com/hashicorp/terraform/internal/providers"
"github.com/hashicorp/terraform/internal/stacks/stackaddrs"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/tfdiags"
)
// PlanProducer is an interface of an object that can produce a plan and
// require it to be converted into PlannedChange objects.
type PlanProducer interface {
Addr() stackaddrs.AbsComponentInstance
// RequiredComponents returns the static set of components that this
// component depends on. Static in this context means based on the
// configuration, so this result shouldn't change based on the type of
// plan.
//
// Normal and destroy plans should return the same set of components,
// with dependents and dependencies computed from this set during the
// apply phase.
RequiredComponents(ctx context.Context) collections.Set[stackaddrs.AbsComponent]
// ResourceSchema returns the schema for a resource type from a provider.
ResourceSchema(ctx context.Context, providerTypeAddr addrs.Provider, mode addrs.ResourceMode, resourceType string) (providers.Schema, error)
}
func FromPlan(ctx context.Context, config *configs.Config, plan *plans.Plan, refreshPlan *plans.Plan, action plans.Action, producer PlanProducer) ([]PlannedChange, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
var changes []PlannedChange
var outputs map[string]cty.Value
if refreshPlan != nil {
// we're going to be a little cheeky and publish the outputs as being
// the results from the refresh part of the plan. This will then be
// consumed by the apply part of the plan to ensure that the outputs
// are correctly updated. The refresh plan should only be present if the
// main plan was a destroy plan in which case the outputs that the
// apply needs do actually come from the refresh.
outputs = OutputsFromPlan(config, refreshPlan)
} else {
outputs = OutputsFromPlan(config, plan)
}
// We must always at least announce that the component instance exists,
// and that must come before any resource instance changes referring to it.
changes = append(changes, &PlannedChangeComponentInstance{
Addr: producer.Addr(),
Action: action,
Mode: plan.UIMode,
PlanApplyable: plan.Applyable,
PlanComplete: plan.Complete,
RequiredComponents: producer.RequiredComponents(ctx),
PlannedInputValues: plan.VariableValues,
PlannedInputValueMarks: plan.VariableMarks,
PlannedOutputValues: outputs,
PlannedCheckResults: plan.Checks,
PlannedProviderFunctionResults: plan.FunctionResults,
// We must remember the plan timestamp so that the plantimestamp
// function can return a consistent result during a later apply phase.
PlanTimestamp: plan.Timestamp,
})
seenObjects := addrs.MakeSet[addrs.AbsResourceInstanceObject]()
for _, rsrcChange := range plan.Changes.Resources {
schema, err := producer.ResourceSchema(
ctx,
rsrcChange.ProviderAddr.Provider,
rsrcChange.Addr.Resource.Resource.Mode,
rsrcChange.Addr.Resource.Resource.Type,
)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Can't fetch provider schema to save plan",
fmt.Sprintf(
"Failed to retrieve the schema for %s from provider %s: %s. This is a bug in Terraform.",
rsrcChange.Addr, rsrcChange.ProviderAddr.Provider, err,
),
))
continue
}
objAddr := addrs.AbsResourceInstanceObject{
ResourceInstance: rsrcChange.Addr,
DeposedKey: rsrcChange.DeposedKey,
}
var priorStateSrc *states.ResourceInstanceObjectSrc
if plan.PriorState != nil {
priorStateSrc = plan.PriorState.ResourceInstanceObjectSrc(objAddr)
}
changes = append(changes, &PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: stackaddrs.AbsResourceInstanceObject{
Component: producer.Addr(),
Item: objAddr,
},
ChangeSrc: rsrcChange,
Schema: schema,
PriorStateSrc: priorStateSrc,
ProviderConfigAddr: rsrcChange.ProviderAddr,
// TODO: Also provide the previous run state, if it's
// different from the prior state, and signal whether the
// difference from previous run seems "notable" per
// Terraform Core's heuristics. Only the external plan
// description needs that info, to populate the
// "changes outside of Terraform" part of the plan UI;
// the raw plan only needs the prior state.
})
seenObjects.Add(objAddr)
}
// We need to keep track of the deferred changes as well
for _, dr := range plan.DeferredResources {
rsrcChange := dr.ChangeSrc
objAddr := addrs.AbsResourceInstanceObject{
ResourceInstance: rsrcChange.Addr,
DeposedKey: rsrcChange.DeposedKey,
}
var priorStateSrc *states.ResourceInstanceObjectSrc
if plan.PriorState != nil {
priorStateSrc = plan.PriorState.ResourceInstanceObjectSrc(objAddr)
}
schema, err := producer.ResourceSchema(
ctx,
rsrcChange.ProviderAddr.Provider,
rsrcChange.Addr.Resource.Resource.Mode,
rsrcChange.Addr.Resource.Resource.Type,
)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Can't fetch provider schema to save plan",
fmt.Sprintf(
"Failed to retrieve the schema for %s from provider %s: %s. This is a bug in Terraform.",
rsrcChange.Addr, rsrcChange.ProviderAddr.Provider, err,
),
))
continue
}
plannedChangeResourceInstance := PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: stackaddrs.AbsResourceInstanceObject{
Component: producer.Addr(),
Item: objAddr,
},
ChangeSrc: rsrcChange,
Schema: schema,
PriorStateSrc: priorStateSrc,
ProviderConfigAddr: rsrcChange.ProviderAddr,
}
changes = append(changes, &PlannedChangeDeferredResourceInstancePlanned{
DeferredReason: dr.DeferredReason,
ResourceInstancePlanned: plannedChangeResourceInstance,
})
seenObjects.Add(objAddr)
}
// We also need to catch any objects that exist in the "prior state"
// but don't have any actions planned, since we still need to capture
// the prior state part in case it was updated by refreshing during
// the plan walk.
if priorState := plan.PriorState; priorState != nil {
for _, addr := range priorState.AllResourceInstanceObjectAddrs() {
if seenObjects.Has(addr) {
// We're only interested in objects that didn't appear
// in the plan, such as data resources whose read has
// completed during the plan phase.
continue
}
rs := priorState.Resource(addr.ResourceInstance.ContainingResource())
os := priorState.ResourceInstanceObjectSrc(addr)
schema, err := producer.ResourceSchema(
ctx,
rs.ProviderConfig.Provider,
addr.ResourceInstance.Resource.Resource.Mode,
addr.ResourceInstance.Resource.Resource.Type,
)
if err != nil {
diags = diags.Append(tfdiags.Sourceless(
tfdiags.Error,
"Can't fetch provider schema to save plan",
fmt.Sprintf(
"Failed to retrieve the schema for %s from provider %s: %s. This is a bug in Terraform.",
addr, rs.ProviderConfig.Provider, err,
),
))
continue
}
changes = append(changes, &PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: stackaddrs.AbsResourceInstanceObject{
Component: producer.Addr(),
Item: addr,
},
Schema: schema,
PriorStateSrc: os,
ProviderConfigAddr: rs.ProviderConfig,
// We intentionally omit ChangeSrc, because we're not actually
// planning to change this object during the apply phase, only
// to update its state data.
})
seenObjects.Add(addr)
}
}
prevRunState := plan.PrevRunState
if refreshPlan != nil {
// If we executed a refresh plan as part of this, then the true
// previous run state is the one from the refresh plan, because
// the later plan used the output of the refresh plan as the
// previous state.
prevRunState = refreshPlan.PrevRunState
}
// We also have one more unusual case to deal with: if an object
// existed at the end of the previous run but was found to have
// been deleted when we refreshed during planning then it will
// not be present in either the prior state _or_ the plan, but
// we still need to include a stubby object for it in the plan
// so we can remember to discard it from the state during the
// apply phase.
if prevRunState != nil {
for _, addr := range prevRunState.AllResourceInstanceObjectAddrs() {
if seenObjects.Has(addr) {
// We're only interested in objects that didn't appear
// in the plan, such as data resources whose read has
// completed during the plan phase.
continue
}
rs := prevRunState.Resource(addr.ResourceInstance.ContainingResource())
changes = append(changes, &PlannedChangeResourceInstancePlanned{
ResourceInstanceObjectAddr: stackaddrs.AbsResourceInstanceObject{
Component: producer.Addr(),
Item: addr,
},
ProviderConfigAddr: rs.ProviderConfig,
// Everything except the addresses are omitted in this case,
// which represents that we should just delete the object
// from the state when applied, and not take any other
// action.
})
seenObjects.Add(addr)
}
}
return changes, diags
}
func OutputsFromPlan(config *configs.Config, plan *plans.Plan) map[string]cty.Value {
if plan == nil {
return nil
}
// We need to vary our behavior here slightly depending on what action
// we're planning to take with this overall component: normally we want
// to use the "planned new state"'s output values, but if we're actually
// planning to destroy all of the infrastructure managed by this
// component then the planned new state has no output values at all,
// so we'll use the prior state's output values instead just in case
// we also need to plan destroying another component instance
// downstream of this one which will make use of this instance's
// output values _before_ we destroy it.
//
// FIXME: We're using UIMode for this decision, despite its doc comment
// saying we shouldn't, because this behavior is an offshoot of the
// already-documented annoying exception to that rule where various
// parts of Terraform use UIMode == DestroyMode in particular to deal
// with necessary variations during a "full destroy". Hopefully we'll
// eventually find a more satisfying solution for that, in which case
// we should update the following to use that solution too.
attrs := make(map[string]cty.Value)
switch plan.UIMode {
case plans.DestroyMode:
// The "prior state" of the plan includes any new information we
// learned by "refreshing" before we planned to destroy anything,
// and so should be as close as possible to the current
// (pre-destroy) state of whatever infrastructure this component
// instance is managing.
for _, os := range plan.PriorState.RootOutputValues {
v := os.Value
if os.Sensitive {
// For our purposes here, a static sensitive flag on the
// output value is indistinguishable from the value having
// been dynamically marked as sensitive.
v = v.Mark(marks.Sensitive)
}
attrs[os.Addr.OutputValue.Name] = v
}
default:
for _, changeSrc := range plan.Changes.Outputs {
if len(changeSrc.Addr.Module) > 0 {
// Only include output values of the root module as part
// of the component.
continue
}
name := changeSrc.Addr.OutputValue.Name
change, err := changeSrc.Decode()
if err != nil {
attrs[name] = cty.DynamicVal
continue
}
if changeSrc.Sensitive {
// For our purposes here, a static sensitive flag on the
// output value is indistinguishable from the value having
// been dynamically marked as sensitive.
attrs[name] = change.After.Mark(marks.Sensitive)
continue
}
// Otherwise, just use the value as-is.
attrs[name] = change.After
}
}
if config != nil {
// If the plan only ran partially then we might be missing
// some planned changes for output values, which could
// cause "attrs" to have an incomplete set of attributes.
// To avoid confusing downstream errors we'll insert unknown
// values for any declared output values that don't yet
// have a final value.
for name := range config.Module.Outputs {
if _, ok := attrs[name]; !ok {
// We can't do any better than DynamicVal because
// output values in the modules language don't
// have static type constraints.
attrs[name] = cty.DynamicVal
}
}
// In the DestroyMode case above we might also find ourselves
// with some remnant additional output values that have since
// been removed from the configuration, but yet remain in the
// state. Destroying with a different configuration than was
// most recently applied is not guaranteed to work, but we
// can make it more likely to work by dropping anything that
// isn't currently declared, since referring directly to these
// would be a static validation error anyway, and including
// them might cause aggregate operations like keys(component.foo)
// to produce broken results.
for name := range attrs {
_, declared := config.Module.Outputs[name]
if !declared {
// (deleting map elements during iteration is valid in Go,
// unlike some other languages.)
delete(attrs, name)
}
}
}
return attrs
} | go | github | https://github.com/hashicorp/terraform | internal/stacks/stackplan/from_plan.go |
//go:build linux
package bridge
import (
"context"
"errors"
"fmt"
"os"
"github.com/containerd/log"
"github.com/moby/moby/v2/daemon/libnetwork/drivers/bridge/internal/firewaller"
)
const (
ipv4ForwardConf = "/proc/sys/net/ipv4/ip_forward"
ipv6ForwardConfDefault = "/proc/sys/net/ipv6/conf/default/forwarding"
ipv6ForwardConfAll = "/proc/sys/net/ipv6/conf/all/forwarding"
)
type filterForwardDropper interface {
FilterForwardDrop(context.Context, firewaller.IPVersion) error
}
func checkIPv4Forwarding() error {
enabled, err := getKernelBoolParam(ipv4ForwardConf)
if err != nil {
return fmt.Errorf("checking IPv4 forwarding: %w", err)
}
if enabled {
return nil
}
// It's the user's responsibility to enable forwarding and secure their host. Or,
// start docker with --ip-forward=false to disable this check.
return errors.New("IPv4 forwarding is disabled: check your host's firewalling and set sysctl net.ipv4.ip_forward=1, or disable this check using daemon option --ip-forward=false")
}
func setupIPv4Forwarding(ffd filterForwardDropper, wantFilterForwardDrop bool) (retErr error) {
changed, err := configureIPForwarding(ipv4ForwardConf, '1')
if err != nil {
return err
}
if changed {
defer func() {
if retErr != nil {
if _, err := configureIPForwarding(ipv4ForwardConf, '0'); err != nil {
log.G(context.TODO()).WithError(err).Error("Cannot disable IPv4 forwarding")
}
}
}()
}
// When enabling ip_forward set the default policy on forward chain to drop.
if changed && wantFilterForwardDrop {
if err := ffd.FilterForwardDrop(context.TODO(), firewaller.IPv4); err != nil {
return err
}
}
return nil
}
func checkIPv6Forwarding() error {
enabledDef, err := getKernelBoolParam(ipv6ForwardConfDefault)
if err != nil {
return fmt.Errorf("checking IPv6 default forwarding: %w", err)
}
enabledAll, err := getKernelBoolParam(ipv6ForwardConfAll)
if err != nil {
return fmt.Errorf("checking IPv6 global forwarding: %w", err)
}
if enabledDef && enabledAll {
return nil
}
// It's the user's responsibility to enable forwarding and secure their host. Or,
// start docker with --ip-forward=false to disable this check.
return errors.New("IPv6 global forwarding is disabled: check your host's firewalling and set sysctls net.ipv6.conf.all.forwarding=1 and net.ipv6.conf.default.forwarding=1, or disable this check using daemon option --ip-forward=false")
}
func setupIPv6Forwarding(ffd filterForwardDropper, wantFilterForwardDrop bool) (retErr error) {
// Set IPv6 default.forwarding, if needed.
// Setting "all" (below) sets "default" as well, but need to check that "default" is
// set even if "all" is already set.
changedDef, err := configureIPForwarding(ipv6ForwardConfDefault, '1')
if err != nil {
return err
}
if changedDef {
defer func() {
if retErr != nil {
if _, err := configureIPForwarding(ipv6ForwardConfDefault, '0'); err != nil {
log.G(context.TODO()).WithError(err).Error("Cannot disable IPv6 default.forwarding")
}
}
}()
}
// Set IPv6 all.forwarding, if needed.
changedAll, err := configureIPForwarding(ipv6ForwardConfAll, '1')
if err != nil {
return err
}
if changedAll {
defer func() {
if retErr != nil {
if _, err := configureIPForwarding(ipv6ForwardConfAll, '0'); err != nil {
log.G(context.TODO()).WithError(err).Error("Cannot disable IPv6 all.forwarding")
}
}
}()
}
if (changedAll || changedDef) && wantFilterForwardDrop {
if err := ffd.FilterForwardDrop(context.TODO(), firewaller.IPv6); err != nil {
return err
}
}
return nil
}
func configureIPForwarding(file string, val byte) (changed bool, _ error) {
data, err := os.ReadFile(file)
if err != nil || len(data) == 0 {
return false, fmt.Errorf("cannot read IP forwarding setup from '%s': %w", file, err)
}
if len(data) == 0 {
return false, fmt.Errorf("cannot read IP forwarding setup from '%s': 0 bytes", file)
}
if data[0] == val {
return false, nil
}
if err := os.WriteFile(file, []byte{val, '\n'}, 0o644); err != nil {
return false, fmt.Errorf("failed to set IP forwarding '%s' = '%c': %w", file, val, err)
}
return true, nil
} | go | github | https://github.com/moby/moby | daemon/libnetwork/drivers/bridge/setup_ip_forwarding.go |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import EUCKRDistributionAnalysis
from mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR" | unknown | codeparrot/codeparrot-clean | ||
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from io import StringIO
from re import finditer, search
from textwrap import dedent
import numpy as np
import pytest
from numpy.random import RandomState
from sklearn.base import is_classifier
from sklearn.exceptions import NotFittedError
from sklearn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
export_graphviz,
export_text,
plot_tree,
)
CLF_CRITERIONS = ("gini", "log_loss")
REG_CRITERIONS = ("squared_error", "absolute_error", "poisson")
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, 0.5, 0.5, 0.5]
y_degraded = [1, 1, 1, 1, 1, 1]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(
max_depth=3, min_samples_split=2, criterion="gini", random_state=2
)
clf.fit(X, y)
# Test export code
contents1 = export_graphviz(clf, out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with feature_names
contents1 = export_graphviz(
clf, feature_names=["feature0", "feature1"], out_file=None
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with feature_names (escaped)
contents1 = export_graphviz(
clf, feature_names=['feature"0"', 'feature"1"'], out_file=None
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="feature\\"0\\" <= 0.0\\n'
"gini = 0.5\\nsamples = 6\\n"
'value = [3, 3]"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with class_names
contents1 = export_graphviz(clf, class_names=["yes", "no"], out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]\\nclass = yes"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n'
'class = yes"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n'
'class = no"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with class_names (escaped)
contents1 = export_graphviz(clf, class_names=['"yes"', '"no"'], out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]\\nclass = \\"yes\\""] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n'
'class = \\"yes\\""] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n'
'class = \\"no\\""] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test plot_options
contents1 = export_graphviz(
clf,
filled=True,
impurity=False,
proportion=True,
special_characters=True,
rounded=True,
out_file=None,
fontname="sans",
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled, rounded", color="black", '
'fontname="sans"] ;\n'
'edge [fontname="sans"] ;\n'
"0 [label=<x<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>"
'value = [0.5, 0.5]>, fillcolor="#ffffff"] ;\n'
"1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, "
'fillcolor="#e58139"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
"2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, "
'fillcolor="#399de5"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test max_depth
contents1 = export_graphviz(clf, max_depth=0, class_names=True, out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]\\nclass = y[0]"] ;\n'
'1 [label="(...)"] ;\n'
"0 -> 1 ;\n"
'2 [label="(...)"] ;\n'
"0 -> 2 ;\n"
"}"
)
assert contents1 == contents2
# Test max_depth with plot_options
contents1 = export_graphviz(
clf, max_depth=0, filled=True, out_file=None, node_ids=True
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled", color="black", '
'fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="node #0\\nx[0] <= 0.0\\ngini = 0.5\\n'
'samples = 6\\nvalue = [3, 3]", fillcolor="#ffffff"] ;\n'
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n'
"0 -> 1 ;\n"
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n'
"0 -> 2 ;\n"
"}"
)
assert contents1 == contents2
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(
max_depth=2, min_samples_split=2, criterion="gini", random_state=2
)
clf = clf.fit(X, y2, sample_weight=w)
contents1 = export_graphviz(clf, filled=True, impurity=False, out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled", color="black", '
'fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\nsamples = 6\\n'
"value = [[3.0, 1.5, 0.0]\\n"
'[3.0, 1.0, 0.5]]", fillcolor="#ffffff"] ;\n'
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n'
'[3, 0, 0]]", fillcolor="#e58139"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="x[0] <= 1.5\\nsamples = 3\\n'
"value = [[0.0, 1.5, 0.0]\\n"
'[0.0, 1.0, 0.5]]", fillcolor="#f1bd97"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n'
'[0, 1, 0]]", fillcolor="#e58139"] ;\n'
"2 -> 3 ;\n"
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n'
'[0.0, 0.0, 0.5]]", fillcolor="#e58139"] ;\n'
"2 -> 4 ;\n"
"}"
)
assert contents1 == contents2
# Test regression output with plot_options
clf = DecisionTreeRegressor(
max_depth=3, min_samples_split=2, criterion="squared_error", random_state=2
)
clf.fit(X, y)
contents1 = export_graphviz(
clf,
filled=True,
leaves_parallel=True,
out_file=None,
rotate=True,
rounded=True,
fontname="sans",
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled, rounded", color="black", '
'fontname="sans"] ;\n'
"graph [ranksep=equally, splines=polyline] ;\n"
'edge [fontname="sans"] ;\n'
"rankdir=LR ;\n"
'0 [label="x[0] <= 0.0\\nsquared_error = 1.0\\nsamples = 6\\n'
'value = 0.0", fillcolor="#f2c09c"] ;\n'
'1 [label="squared_error = 0.0\\nsamples = 3\\'
'nvalue = -1.0", '
'fillcolor="#ffffff"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=-45, "
'headlabel="True"] ;\n'
'2 [label="squared_error = 0.0\\nsamples = 3\\nvalue = 1.0", '
'fillcolor="#e58139"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=45, "
'headlabel="False"] ;\n'
"{rank=same ; 0} ;\n"
"{rank=same ; 1; 2} ;\n"
"}"
)
assert contents1 == contents2
# Test classifier with degraded learning set
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X, y_degraded)
contents1 = export_graphviz(clf, filled=True, out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled", color="black", '
'fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="gini = 0.0\\nsamples = 6\\nvalue = 6.0", '
'fillcolor="#ffffff"] ;\n'
"}"
)
@pytest.mark.parametrize("constructor", [list, np.array])
def test_graphviz_feature_class_names_array_support(constructor):
# Check that export_graphviz treats feature names
# and class names correctly and supports arrays
clf = DecisionTreeClassifier(
max_depth=3, min_samples_split=2, criterion="gini", random_state=2
)
clf.fit(X, y)
# Test with feature_names
contents1 = export_graphviz(
clf, feature_names=constructor(["feature0", "feature1"]), out_file=None
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with class_names
contents1 = export_graphviz(
clf, class_names=constructor(["yes", "no"]), out_file=None
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]\\nclass = yes"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n'
'class = yes"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n'
'class = no"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
# Check not-fitted decision tree error
out = StringIO()
with pytest.raises(NotFittedError):
export_graphviz(clf, out)
clf.fit(X, y)
# Check if it errors when length of feature_names
# mismatches with number of features
message = "Length of feature_names, 1 does not match number of features, 2"
with pytest.raises(ValueError, match=message):
export_graphviz(clf, None, feature_names=["a"])
message = "Length of feature_names, 3 does not match number of features, 2"
with pytest.raises(ValueError, match=message):
export_graphviz(clf, None, feature_names=["a", "b", "c"])
# Check error when feature_names contains non-string elements
message = "All feature names must be strings."
with pytest.raises(ValueError, match=message):
export_graphviz(clf, None, feature_names=["a", 1])
# Check error when argument is not an estimator
message = "is not an estimator instance"
with pytest.raises(TypeError, match=message):
export_graphviz(clf.fit(X, y).tree_)
# Check class_names error
out = StringIO()
with pytest.raises(IndexError):
export_graphviz(clf, out, class_names=[])
@pytest.mark.parametrize("criterion", CLF_CRITERIONS + REG_CRITERIONS)
def test_criterion_in_gradient_boosting_graphviz(criterion):
dot_data = StringIO()
is_reg = criterion in REG_CRITERIONS
Tree = DecisionTreeRegressor if is_reg else DecisionTreeClassifier
clf = Tree(random_state=0, criterion=criterion)
# positive values for poisson criterion:
y_ = [yi + 2 for yi in y] if is_reg else y
clf.fit(X, y_)
export_graphviz(clf, out_file=dot_data)
for finding in finditer(r"\[.*?samples.*?\]", dot_data.getvalue()):
assert criterion in finding.group()
def test_precision():
rng_reg = RandomState(2)
rng_clf = RandomState(8)
for X, y, clf in zip(
(rng_reg.random_sample((5, 2)), rng_clf.random_sample((1000, 4))),
(rng_reg.random_sample((5,)), rng_clf.randint(2, size=(1000,))),
(
DecisionTreeRegressor(random_state=0, max_depth=1),
DecisionTreeClassifier(max_depth=1, random_state=0),
),
):
clf.fit(X, y)
for precision in (4, 3):
dot_data = export_graphviz(
clf, out_file=None, precision=precision, proportion=True
)
# With the current random state, the impurity and the threshold
# will have the number of precision set in the export_graphviz
# function. We will check the number of precision with a strict
# equality. The value reported will have only 2 precision and
# therefore, only a less equal comparison will be done.
# check value
for finding in finditer(r"value = \d+\.\d+", dot_data):
assert len(search(r"\.\d+", finding.group()).group()) <= precision + 1
# check impurity
if is_classifier(clf):
pattern = r"gini = \d+\.\d+"
else:
pattern = r"squared_error = \d+\.\d+"
# check impurity
for finding in finditer(pattern, dot_data):
assert len(search(r"\.\d+", finding.group()).group()) == precision + 1
# check threshold
for finding in finditer(r"<= \d+\.\d+", dot_data):
assert len(search(r"\.\d+", finding.group()).group()) == precision + 1
def test_export_text_errors():
clf = DecisionTreeClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
err_msg = "feature_names must contain 2 elements, got 1"
with pytest.raises(ValueError, match=err_msg):
export_text(clf, feature_names=["a"])
err_msg = (
"When `class_names` is an array, it should contain as"
" many items as `decision_tree.classes_`. Got 1 while"
" the tree was fitted with 2 classes."
)
with pytest.raises(ValueError, match=err_msg):
export_text(clf, class_names=["a"])
def test_export_text():
clf = DecisionTreeClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
expected_report = dedent(
"""
|--- feature_1 <= 0.00
| |--- class: -1
|--- feature_1 > 0.00
| |--- class: 1
"""
).lstrip()
assert export_text(clf) == expected_report
# testing that leaves at level 1 are not truncated
assert export_text(clf, max_depth=0) == expected_report
# testing that the rest of the tree is truncated
assert export_text(clf, max_depth=10) == expected_report
expected_report = dedent(
"""
|--- feature_1 <= 0.00
| |--- weights: [3.00, 0.00] class: -1
|--- feature_1 > 0.00
| |--- weights: [0.00, 3.00] class: 1
"""
).lstrip()
assert export_text(clf, show_weights=True) == expected_report
expected_report = dedent(
"""
|- feature_1 <= 0.00
| |- class: -1
|- feature_1 > 0.00
| |- class: 1
"""
).lstrip()
assert export_text(clf, spacing=1) == expected_report
X_l = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, 1]]
y_l = [-1, -1, -1, 1, 1, 1, 2]
clf = DecisionTreeClassifier(max_depth=4, random_state=0)
clf.fit(X_l, y_l)
expected_report = dedent(
"""
|--- feature_1 <= 0.00
| |--- class: -1
|--- feature_1 > 0.00
| |--- truncated branch of depth 2
"""
).lstrip()
assert export_text(clf, max_depth=0) == expected_report
X_mo = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_mo = [[-1, -1], [-1, -1], [-1, -1], [1, 1], [1, 1], [1, 1]]
reg = DecisionTreeRegressor(max_depth=2, random_state=0)
reg.fit(X_mo, y_mo)
expected_report = dedent(
"""
|--- feature_1 <= 0.0
| |--- value: [-1.0, -1.0]
|--- feature_1 > 0.0
| |--- value: [1.0, 1.0]
"""
).lstrip()
assert export_text(reg, decimals=1) == expected_report
assert export_text(reg, decimals=1, show_weights=True) == expected_report
X_single = [[-2], [-1], [-1], [1], [1], [2]]
reg = DecisionTreeRegressor(max_depth=2, random_state=0)
reg.fit(X_single, y_mo)
expected_report = dedent(
"""
|--- first <= 0.0
| |--- value: [-1.0, -1.0]
|--- first > 0.0
| |--- value: [1.0, 1.0]
"""
).lstrip()
assert export_text(reg, decimals=1, feature_names=["first"]) == expected_report
assert (
export_text(reg, decimals=1, show_weights=True, feature_names=["first"])
== expected_report
)
@pytest.mark.parametrize("constructor", [list, np.array])
def test_export_text_feature_class_names_array_support(constructor):
# Check that export_graphviz treats feature names
# and class names correctly and supports arrays
clf = DecisionTreeClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
expected_report = dedent(
"""
|--- b <= 0.00
| |--- class: -1
|--- b > 0.00
| |--- class: 1
"""
).lstrip()
assert export_text(clf, feature_names=constructor(["a", "b"])) == expected_report
expected_report = dedent(
"""
|--- feature_1 <= 0.00
| |--- class: cat
|--- feature_1 > 0.00
| |--- class: dog
"""
).lstrip()
assert export_text(clf, class_names=constructor(["cat", "dog"])) == expected_report
def test_plot_tree_entropy(pyplot):
# mostly smoke tests
# Check correctness of export_graphviz for criterion = entropy
clf = DecisionTreeClassifier(
max_depth=3, min_samples_split=2, criterion="entropy", random_state=2
)
clf.fit(X, y)
# Test export code
feature_names = ["first feat", "sepal_width"]
nodes = plot_tree(clf, feature_names=feature_names)
assert len(nodes) == 5
assert (
nodes[0].get_text()
== "first feat <= 0.0\nentropy = 1.0\nsamples = 6\nvalue = [3, 3]"
)
assert nodes[1].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [3, 0]"
assert nodes[2].get_text() == "True "
assert nodes[3].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [0, 3]"
assert nodes[4].get_text() == " False"
@pytest.mark.parametrize("fontsize", [None, 10, 20])
def test_plot_tree_gini(pyplot, fontsize):
# mostly smoke tests
# Check correctness of export_graphviz for criterion = gini
clf = DecisionTreeClassifier(
max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2,
)
clf.fit(X, y)
# Test export code
feature_names = ["first feat", "sepal_width"]
nodes = plot_tree(clf, feature_names=feature_names, fontsize=fontsize)
assert len(nodes) == 5
if fontsize is not None:
assert all(node.get_fontsize() == fontsize for node in nodes)
assert (
nodes[0].get_text()
== "first feat <= 0.0\ngini = 0.5\nsamples = 6\nvalue = [3, 3]"
)
assert nodes[1].get_text() == "gini = 0.0\nsamples = 3\nvalue = [3, 0]"
assert nodes[2].get_text() == "True "
assert nodes[3].get_text() == "gini = 0.0\nsamples = 3\nvalue = [0, 3]"
assert nodes[4].get_text() == " False"
def test_not_fitted_tree(pyplot):
# Testing if not fitted tree throws the correct error
clf = DecisionTreeRegressor()
with pytest.raises(NotFittedError):
plot_tree(clf) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/tree/tests/test_export.py |
import sys
import re
import socket
import helpers
import options
import client
import server
import firewall
import hostwatch
from helpers import log, Fatal
# 1.2.3.4/5 or just 1.2.3.4
def parse_subnet4(s):
m = re.match(r'(\d+)(?:\.(\d+)\.(\d+)\.(\d+))?(?:/(\d+))?$', s)
if not m:
raise Fatal('%r is not a valid IP subnet format' % s)
(a, b, c, d, width) = m.groups()
(a, b, c, d) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0))
if width is None:
width = 32
else:
width = int(width)
if a > 255 or b > 255 or c > 255 or d > 255:
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a, b, c, d))
if width > 32:
raise Fatal('*/%d is greater than the maximum of 32' % width)
return(socket.AF_INET, '%d.%d.%d.%d' % (a, b, c, d), width)
# 1:2::3/64 or just 1:2::3
def parse_subnet6(s):
m = re.match(r'(?:([a-fA-F\d:]+))?(?:/(\d+))?$', s)
if not m:
raise Fatal('%r is not a valid IP subnet format' % s)
(net, width) = m.groups()
if width is None:
width = 128
else:
width = int(width)
if width > 128:
raise Fatal('*/%d is greater than the maximum of 128' % width)
return(socket.AF_INET6, net, width)
# Subnet file, supporting empty lines and hash-started comment lines
def parse_subnet_file(s):
try:
handle = open(s, 'r')
except OSError:
raise Fatal('Unable to open subnet file: %s' % s)
raw_config_lines = handle.readlines()
config_lines = []
for line_no, line in enumerate(raw_config_lines):
line = line.strip()
if len(line) == 0:
continue
if line[0] == '#':
continue
config_lines.append(line)
return config_lines
# list of:
# 1.2.3.4/5 or just 1.2.3.4
# 1:2::3/64 or just 1:2::3
def parse_subnets(subnets_str):
subnets = []
for s in subnets_str:
if ':' in s:
subnet = parse_subnet6(s)
else:
subnet = parse_subnet4(s)
subnets.append(subnet)
return subnets
# 1.2.3.4:567 or just 1.2.3.4 or just 567
def parse_ipport4(s):
s = str(s)
m = re.match(r'(?:(\d+)\.(\d+)\.(\d+)\.(\d+))?(?::)?(?:(\d+))?$', s)
if not m:
raise Fatal('%r is not a valid IP:port format' % s)
(a, b, c, d, port) = m.groups()
(a, b, c, d, port) = (int(a or 0), int(b or 0), int(c or 0), int(d or 0),
int(port or 0))
if a > 255 or b > 255 or c > 255 or d > 255:
raise Fatal('%d.%d.%d.%d has numbers > 255' % (a, b, c, d))
if port > 65535:
raise Fatal('*:%d is greater than the maximum of 65535' % port)
if a is None:
a = b = c = d = 0
return ('%d.%d.%d.%d' % (a, b, c, d), port)
# [1:2::3]:456 or [1:2::3] or 456
def parse_ipport6(s):
s = str(s)
m = re.match(r'(?:\[([^]]*)])?(?::)?(?:(\d+))?$', s)
if not m:
raise Fatal('%s is not a valid IP:port format' % s)
(ip, port) = m.groups()
(ip, port) = (ip or '::', int(port or 0))
return (ip, port)
optspec = """
sshuttle [-l [ip:]port] [-r [username@]sshserver[:port]] <subnets...>
sshuttle --server
sshuttle --firewall <port> <subnets...>
sshuttle --hostwatch
--
l,listen= transproxy to this ip address and port number
H,auto-hosts scan for remote hostnames and update local /etc/hosts
N,auto-nets automatically determine subnets to route
dns capture local DNS requests and forward to the remote DNS server
method= auto, nat, tproxy, pf or ipfw
python= path to python interpreter on the remote server
r,remote= ssh hostname (and optional username) of remote sshuttle server
x,exclude= exclude this subnet (can be used more than once)
X,exclude-from= exclude the subnets in a file (whitespace separated)
v,verbose increase debug message verbosity
e,ssh-cmd= the command to use to connect to the remote [ssh]
seed-hosts= with -H, use these hostnames for initial scan (comma-separated)
no-latency-control sacrifice latency to improve bandwidth benchmarks
wrap= restart counting channel numbers after this number (for testing)
D,daemon run in the background as a daemon
s,subnets= file where the subnets are stored, instead of on the command line
syslog send log messages to syslog (default if you use --daemon)
pidfile= pidfile name (only if using --daemon) [./sshuttle.pid]
server (internal use only)
firewall (internal use only)
hostwatch (internal use only)
"""
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[2:])
if opt.daemon:
opt.syslog = 1
if opt.wrap:
import ssnet
ssnet.MAX_CHANNEL = int(opt.wrap)
helpers.verbose = opt.verbose
try:
if opt.server:
if len(extra) != 0:
o.fatal('no arguments expected')
server.latency_control = opt.latency_control
sys.exit(server.main())
elif opt.firewall:
if len(extra) != 6:
o.fatal('exactly six arguments expected')
sys.exit(firewall.main(int(extra[0]), int(extra[1]),
int(extra[2]), int(extra[3]),
extra[4], int(extra[5]), opt.syslog))
elif opt.hostwatch:
sys.exit(hostwatch.hw_main(extra))
else:
if len(extra) < 1 and not opt.auto_nets and not opt.subnets:
o.fatal('at least one subnet, subnet file, or -N expected')
includes = extra
excludes = ['127.0.0.0/8']
for k, v in flags:
if k in ('-x', '--exclude'):
excludes.append(v)
if k in ('-X', '--exclude-from'):
excludes += open(v).read().split()
remotename = opt.remote
if remotename == '' or remotename == '-':
remotename = None
if opt.seed_hosts and not opt.auto_hosts:
o.fatal('--seed-hosts only works if you also use -H')
if opt.seed_hosts:
sh = re.split(r'[\s,]+', (opt.seed_hosts or "").strip())
elif opt.auto_hosts:
sh = []
else:
sh = None
if opt.subnets:
includes = parse_subnet_file(opt.subnets)
if not opt.method:
method = "auto"
elif opt.method in ["auto", "nat", "tproxy", "ipfw", "pf"]:
method = opt.method
else:
o.fatal("method %s not supported" % opt.method)
if not opt.listen:
if opt.method == "tproxy":
ipport_v6 = parse_ipport6('[::1]:0')
else:
ipport_v6 = None
ipport_v4 = parse_ipport4('127.0.0.1:0')
else:
ipport_v6 = None
ipport_v4 = None
list = opt.listen.split(",")
for ip in list:
if '[' in ip and ']' in ip and opt.method == "tproxy":
ipport_v6 = parse_ipport6(ip)
else:
ipport_v4 = parse_ipport4(ip)
return_code = client.main(ipport_v6, ipport_v4,
opt.ssh_cmd,
remotename,
opt.python,
opt.latency_control,
opt.dns,
method,
sh,
opt.auto_nets,
parse_subnets(includes),
parse_subnets(excludes),
opt.syslog, opt.daemon, opt.pidfile)
if return_code == 0:
log('Normal exit code, exiting...')
else:
log('Abnormal exit code detected, failing...' % return_code)
sys.exit(return_code)
except Fatal, e:
log('fatal: %s\n' % e)
sys.exit(99)
except KeyboardInterrupt:
log('\n')
log('Keyboard interrupt: exiting.\n')
sys.exit(1) | unknown | codeparrot/codeparrot-clean | ||
{
"schemaVersion": 1,
"title": "V2 Comprehensive Migration Test Dashboard",
"services": {
"filter": {
"time": {
"from": "now-6h",
"to": "now"
},
"list": [
{
"name": "server",
"type": "query",
"datasource": "prometheus",
"query": "label_values(server)"
},
{
"name": "env",
"type": "custom",
"options": [
{"text": "Production", "value": "prod"},
{"text": "Staging", "value": "stage"}
]
}
]
}
},
"panels": [
{
"id": 1,
"type": "graphite",
"title": "CPU Usage",
"legend": true,
"grid": {
"min": 0,
"max": 100
},
"y_format": "percent",
"y2_format": "short",
"targets": [
{
"refId": "A",
"target": "cpu.usage"
}
]
},
{
"id": 2,
"type": "graph",
"title": "Memory Usage",
"legend": false,
"grid": {
"min": 0
},
"y_format": "bytes",
"targets": [
{
"refId": "A",
"target": "memory.usage"
}
]
},
{
"id": 3,
"type": "table",
"title": "Server Stats",
"legend": true,
"grid": {
"min": 0,
"max": 100
},
"y_format": "short",
"y2_format": "bytes"
},
{
"id": 4,
"type": "graphite",
"title": "Disk I/O",
"legend": true,
"y2_format": "Bps",
"targets": [
{
"refId": "A",
"target": "disk.io"
}
]
}
]
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/testdata/input/v2.panels-and-services.json |
# -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
"""
from __future__ import absolute_import, division, print_function
import random
import math
from flask import request, current_app, url_for
from ibeis.control import controller_inject
from ibeis import constants as const
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY
from ibeis.web import appfuncs as appf
from ibeis.web import routes_ajax
import utool as ut
import vtool as vt
import numpy as np
register_route = controller_inject.get_ibeis_flask_route(__name__)
@register_route('/', methods=['GET'])
def root():
return appf.template(None)
@register_route('/view/', methods=['GET'])
def view():
def _date_list(gid_list):
unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(unixtime)
if unixtime is not None else
'UNKNOWN'
for unixtime in unixtime_list
]
datetime_split_list = [ datetime.split(' ') for datetime in datetime_list ]
date_list = [ datetime_split[0] if len(datetime_split) == 2 else 'UNKNOWN' for datetime_split in datetime_split_list ]
return date_list
def filter_annots_imageset(aid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR ANNOTATION FILTERING')
return aid_list
imgsetids_list = ibs.get_annot_imgsetids(aid_list)
aid_list = [
aid
for aid, imgsetid_list_ in zip(aid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return aid_list
def filter_images_imageset(gid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR IMAGE FILTERING')
return gid_list
imgsetids_list = ibs.get_image_imgsetids(gid_list)
gid_list = [
gid
for gid, imgsetid_list_ in zip(gid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return gid_list
def filter_names_imageset(nid_list):
try:
imgsetid = request.args.get('imgsetid', '')
imgsetid = int(imgsetid)
imgsetid_list = ibs.get_valid_imgsetids()
assert imgsetid in imgsetid_list
except:
print('ERROR PARSING IMAGESET ID FOR ANNOTATION FILTERING')
return nid_list
aids_list = ibs.get_name_aids(nid_list)
imgsetids_list = [
set(ut.flatten(ibs.get_annot_imgsetids(aid_list)))
for aid_list in aids_list
]
nid_list = [
nid
for nid, imgsetid_list_ in zip(nid_list, imgsetids_list)
if imgsetid in imgsetid_list_
]
return nid_list
ibs = current_app.ibs
filter_kw = {
'multiple': None,
'minqual': 'good',
'is_known': True,
'min_pername': 1,
'view': ['right'],
}
aid_list = ibs.get_valid_aids()
aid_list = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
aid_list = filter_annots_imageset(aid_list)
gid_list = ibs.get_annot_gids(aid_list)
unixtime_list = ibs.get_image_unixtime(gid_list)
nid_list = ibs.get_annot_name_rowids(aid_list)
date_list = _date_list(gid_list)
flagged_date_list = ['2016/01/29', '2016/01/30', '2016/01/31', '2016/02/01']
gid_list_unique = list(set(gid_list))
date_list_unique = _date_list(gid_list_unique)
date_taken_dict = {}
for gid, date in zip(gid_list_unique, date_list_unique):
if date not in flagged_date_list:
continue
if date not in date_taken_dict:
date_taken_dict[date] = [0, 0]
date_taken_dict[date][1] += 1
gid_list_all = ibs.get_valid_gids()
gid_list_all = filter_images_imageset(gid_list_all)
date_list_all = _date_list(gid_list_all)
for gid, date in zip(gid_list_all, date_list_all):
if date not in flagged_date_list:
continue
if date in date_taken_dict:
date_taken_dict[date][0] += 1
value = 0
label_list = []
value_list = []
index_list = []
seen_set = set()
current_seen_set = set()
previous_seen_set = set()
last_date = None
date_seen_dict = {}
for index, (unixtime, aid, nid, date) in enumerate(sorted(zip(unixtime_list, aid_list, nid_list, date_list))):
if date not in flagged_date_list:
continue
index_list.append(index + 1)
# Add to counters
if date not in date_seen_dict:
date_seen_dict[date] = [0, 0, 0, 0]
date_seen_dict[date][0] += 1
if nid not in current_seen_set:
current_seen_set.add(nid)
date_seen_dict[date][1] += 1
if nid in previous_seen_set:
date_seen_dict[date][3] += 1
if nid not in seen_set:
seen_set.add(nid)
value += 1
date_seen_dict[date][2] += 1
# Add to register
value_list.append(value)
# Reset step (per day)
if date != last_date and date != 'UNKNOWN':
last_date = date
previous_seen_set = set(current_seen_set)
current_seen_set = set()
label_list.append(date)
else:
label_list.append('')
# def optimization1(x, a, b, c):
# return a * np.log(b * x) + c
# def optimization2(x, a, b, c):
# return a * np.sqrt(x) ** b + c
# def optimization3(x, a, b, c):
# return 1.0 / (a * np.exp(-b * x) + c)
# def process(func, opts, domain, zero_index, zero_value):
# values = func(domain, *opts)
# diff = values[zero_index] - zero_value
# values -= diff
# values[ values < 0.0 ] = 0.0
# values[:zero_index] = 0.0
# values = values.astype(int)
# return list(values)
# optimization_funcs = [
# optimization1,
# optimization2,
# optimization3,
# ]
# # Get data
# x = np.array(index_list)
# y = np.array(value_list)
# # Fit curves
# end = int(len(index_list) * 1.25)
# domain = np.array(range(1, end))
# zero_index = len(value_list) - 1
# zero_value = value_list[zero_index]
# regressed_opts = [ curve_fit(func, x, y)[0] for func in optimization_funcs ]
# prediction_list = [
# process(func, opts, domain, zero_index, zero_value)
# for func, opts in zip(optimization_funcs, regressed_opts)
# ]
# index_list = list(domain)
prediction_list = []
date_seen_dict.pop('UNKNOWN', None)
bar_label_list = sorted(date_seen_dict.keys())
bar_value_list1 = [ date_taken_dict[date][0] for date in bar_label_list ]
bar_value_list2 = [ date_taken_dict[date][1] for date in bar_label_list ]
bar_value_list3 = [ date_seen_dict[date][0] for date in bar_label_list ]
bar_value_list4 = [ date_seen_dict[date][1] for date in bar_label_list ]
bar_value_list5 = [ date_seen_dict[date][2] for date in bar_label_list ]
bar_value_list6 = [ date_seen_dict[date][3] for date in bar_label_list ]
# label_list += ['Models'] + [''] * (len(index_list) - len(label_list) - 1)
# value_list += [0] * (len(index_list) - len(value_list))
# Counts
imgsetid_list = ibs.get_valid_imgsetids()
gid_list = ibs.get_valid_gids()
gid_list = filter_images_imageset(gid_list)
aid_list = ibs.get_valid_aids()
aid_list = filter_annots_imageset(aid_list)
nid_list = ibs.get_valid_nids()
nid_list = filter_names_imageset(nid_list)
# contrib_list = ibs.get_valid_contrib_rowids()
note_list = ibs.get_image_notes(gid_list)
note_list = [
','.join(note.split(',')[:-1])
for note in note_list
]
contrib_list = set(note_list)
# nid_list = ibs.get_valid_nids()
aid_list_count = ibs.filter_annots_general(aid_list, filter_kw=filter_kw)
aid_list_count = filter_annots_imageset(aid_list_count)
gid_list_count = list(set(ibs.get_annot_gids(aid_list_count)))
nid_list_count_dup = ibs.get_annot_name_rowids(aid_list_count)
nid_list_count = list(set(nid_list_count_dup))
# Calculate the Petersen-Lincoln index form the last two days
from ibeis.other import dbinfo as dbinfo_
try:
try:
raise KeyError()
vals = dbinfo_.estimate_ggr_count(ibs)
nsight1, nsight2, resight, pl_index, pl_error = vals
# pl_index = 'Undefined - Zero recaptured (k = 0)'
except KeyError:
index1 = bar_label_list.index('2016/01/30')
index2 = bar_label_list.index('2016/01/31')
c1 = bar_value_list4[index1]
c2 = bar_value_list4[index2]
c3 = bar_value_list6[index2]
pl_index, pl_error = dbinfo_.sight_resight_count(c1, c2, c3)
except (IndexError, ValueError):
pl_index = 0
pl_error = 0
# Get the markers
gid_list_markers = ibs.get_annot_gids(aid_list_count)
gps_list_markers = map(list, ibs.get_image_gps(gid_list_markers))
gps_list_markers_all = map(list, ibs.get_image_gps(gid_list))
REMOVE_DUP_CODE = True
if not REMOVE_DUP_CODE:
# Get the tracks
nid_track_dict = ut.ddict(list)
for nid, gps in zip(nid_list_count_dup, gps_list_markers):
if gps[0] == -1.0 and gps[1] == -1.0:
continue
nid_track_dict[nid].append(gps)
gps_list_tracks = [ nid_track_dict[nid] for nid in sorted(nid_track_dict.keys()) ]
else:
__nid_list, gps_track_list, aid_track_list = ibs.get_name_gps_tracks(aid_list=aid_list_count)
gps_list_tracks = list(map(lambda x: list(map(list, x)), gps_track_list))
gps_list_markers = [ gps for gps in gps_list_markers if tuple(gps) != (-1, -1, ) ]
gps_list_markers_all = [ gps for gps in gps_list_markers_all if tuple(gps) != (-1, -1, ) ]
gps_list_tracks = [
[ gps for gps in gps_list_track if tuple(gps) != (-1, -1, ) ]
for gps_list_track in gps_list_tracks
]
valid_aids = ibs.get_valid_aids()
valid_aids = filter_annots_imageset(valid_aids)
used_gids = list(set( ibs.get_annot_gids(valid_aids) ))
# used_contrib_tags = list(set( ibs.get_image_contributor_tag(used_gids) ))
note_list = ibs.get_image_notes(used_gids)
note_list = [
','.join(note.split(',')[:-1])
for note in note_list
]
used_contrib_tags = set(note_list)
# Get Age and sex (By Annot)
# annot_sex_list = ibs.get_annot_sex(valid_aids_)
# annot_age_months_est_min = ibs.get_annot_age_months_est_min(valid_aids_)
# annot_age_months_est_max = ibs.get_annot_age_months_est_max(valid_aids_)
# age_list = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
# for sex, min_age, max_age in zip(annot_sex_list, annot_age_months_est_min, annot_age_months_est_max):
# if sex not in [0, 1]:
# sex = 2
# # continue
# if (min_age is None or min_age < 12) and max_age < 12:
# age_list[sex][0] += 1
# elif 12 <= min_age and min_age < 36 and 12 <= max_age and max_age < 36:
# age_list[sex][1] += 1
# elif 36 <= min_age and (36 <= max_age or max_age is None):
# age_list[sex][2] += 1
# Get Age and sex (By Name)
name_sex_list = ibs.get_name_sex(nid_list_count)
name_age_months_est_mins_list = ibs.get_name_age_months_est_min(nid_list_count)
name_age_months_est_maxs_list = ibs.get_name_age_months_est_max(nid_list_count)
age_list = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
age_unreviewed = 0
age_ambiguous = 0
for nid, sex, min_ages, max_ages in zip(nid_list_count, name_sex_list, name_age_months_est_mins_list, name_age_months_est_maxs_list):
if len(set(min_ages)) > 1 or len(set(max_ages)) > 1:
# print('[web] Invalid name %r: Cannot have more than one age' % (nid, ))
age_ambiguous += 1
continue
min_age = None
max_age = None
if len(min_ages) > 0:
min_age = min_ages[0]
if len(max_ages) > 0:
max_age = max_ages[0]
# Histogram
if (min_age is None and max_age is None) or (min_age is -1 and max_age is -1):
# print('[web] Unreviewded name %r: Specify the age for the name' % (nid, ))
age_unreviewed += 1
continue
if sex not in [0, 1]:
sex = 2
# continue
if (min_age is None or min_age < 12) and max_age < 12:
age_list[sex][0] += 1
elif 12 <= min_age and min_age < 24 and 12 <= max_age and max_age < 24:
age_list[sex][1] += 1
elif 24 <= min_age and min_age < 36 and 24 <= max_age and max_age < 36:
age_list[sex][2] += 1
elif 36 <= min_age and (36 <= max_age or max_age is None):
age_list[sex][3] += 1
age_total = sum(map(sum, age_list)) + age_unreviewed + age_ambiguous
age_total = np.nan if age_total == 0 else age_total
age_fmt_str = (lambda x: '% 4d (% 2.02f%%)' % (x, 100 * x / age_total, ))
age_str_list = [
[
age_fmt_str(age)
for age in age_list_
]
for age_list_ in age_list
]
age_str_list.append(age_fmt_str(age_unreviewed))
age_str_list.append(age_fmt_str(age_ambiguous))
# dbinfo_str = dbinfo()
dbinfo_str = 'SKIPPED DBINFO'
path_dict = ibs.compute_ggr_path_dict()
if 'North' in path_dict:
path_dict.pop('North')
if 'Core' in path_dict:
path_dict.pop('Core')
return appf.template('view',
line_index_list=index_list,
line_label_list=label_list,
line_value_list=value_list,
prediction_list=prediction_list,
pl_index=pl_index,
pl_error=pl_error,
gps_list_markers=gps_list_markers,
gps_list_markers_all=gps_list_markers_all,
gps_list_tracks=gps_list_tracks,
path_dict=path_dict,
bar_label_list=bar_label_list,
bar_value_list1=bar_value_list1,
bar_value_list2=bar_value_list2,
bar_value_list3=bar_value_list3,
bar_value_list4=bar_value_list4,
bar_value_list5=bar_value_list5,
bar_value_list6=bar_value_list6,
age_list=age_list,
age_str_list=age_str_list,
age_ambiguous=age_ambiguous,
age_unreviewed=age_unreviewed,
age_total=age_total,
dbinfo_str=dbinfo_str,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
contrib_list=contrib_list,
contrib_list_str=','.join(map(str, contrib_list)),
num_contribs=len(contrib_list),
gid_list_count=gid_list_count,
gid_list_count_str=','.join(map(str, gid_list_count)),
num_gids_count=len(gid_list_count),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
aid_list_count=aid_list_count,
aid_list_count_str=','.join(map(str, aid_list_count)),
num_aids_count=len(aid_list_count),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
nid_list_count=nid_list_count,
nid_list_count_str=','.join(map(str, nid_list_count)),
num_nids_count=len(nid_list_count),
used_gids=used_gids,
num_used_gids=len(used_gids),
used_contribs=used_contrib_tags,
num_used_contribs=len(used_contrib_tags),
__wrapper_header__=False)
@register_route('/view/imagesets/', methods=['GET'])
def view_imagesets():
ibs = current_app.ibs
filtered = True
imgsetid = request.args.get('imgsetid', '')
if len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
else:
imgsetid_list = ibs.get_valid_imgsetids()
filtered = False
start_time_posix_list = ibs.get_imageset_start_time_posix(imgsetid_list)
datetime_list = [
ut.unixtime_to_datetimestr(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ]
aids_list = [ ut.flatten(ibs.get_image_aids(gid_list)) for gid_list in gids_list ]
images_reviewed_list = [ appf.imageset_image_processed(ibs, gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ appf.imageset_annot_viewpoint_processed(ibs, aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ appf.imageset_annot_quality_processed(ibs, aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
imageset_list = zip(
imgsetid_list,
ibs.get_imageset_text(imgsetid_list),
ibs.get_imageset_num_gids(imgsetid_list),
image_processed_list,
ibs.get_imageset_num_aids(imgsetid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
imageset_list.sort(key=lambda t: t[7])
return appf.template('view', 'imagesets',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
imageset_list=imageset_list,
num_imagesets=len(imageset_list))
@register_route('/view/image/<gid>/', methods=['GET'])
def image_view_api(gid=None, thumbnail=False, fresh=False, **kwargs):
r"""
Returns the base64 encoded image of image <gid>
RESTful:
Method: GET
URL: /image/view/<gid>/
"""
encoded = routes_ajax.image_src(gid, thumbnail=thumbnail, fresh=fresh, **kwargs)
return appf.template(None, 'single', encoded=encoded)
@register_route('/view/images/', methods=['GET'])
def view_images():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid) for imgsetid_ in imgsetid_list ])
else:
gid_list = ibs.get_valid_gids()
filtered = False
# Page
page_start = min(len(gid_list), (page - 1) * appf.PAGE_SIZE)
page_end = min(len(gid_list), page * appf.PAGE_SIZE)
page_total = int(math.ceil(len(gid_list) / appf.PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(gid_list) else page + 1
gid_list = gid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(gid_list), page_previous, page_next, ))
image_unixtime_list = ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetimestr(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_image_imgsetids(gid_list) ],
ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
ibs.get_image_gps(gid_list),
ibs.get_image_party_tag(gid_list),
ibs.get_image_contributor_tag(gid_list),
ibs.get_image_notes(gid_list),
appf.imageset_image_processed(ibs, gid_list),
)
image_list.sort(key=lambda t: t[3])
return appf.template('view', 'images',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/annotations/', methods=['GET'])
def view_annotations():
ibs = current_app.ibs
filtered = True
imgsetid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
else:
aid_list = ibs.get_valid_aids()
filtered = False
# Page
page_start = min(len(aid_list), (page - 1) * appf.PAGE_SIZE)
page_end = min(len(aid_list), page * appf.PAGE_SIZE)
page_total = int(math.ceil(len(aid_list) / appf.PAGE_SIZE))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(aid_list) else page + 1
aid_list = aid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(aid_list), page_previous, page_next, ))
annotation_list = zip(
aid_list,
ibs.get_annot_gids(aid_list),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list) ],
ibs.get_annot_image_names(aid_list),
ibs.get_annot_names(aid_list),
ibs.get_annot_exemplar_flags(aid_list),
ibs.get_annot_species_texts(aid_list),
ibs.get_annot_yaw_texts(aid_list),
ibs.get_annot_quality_texts(aid_list),
ibs.get_annot_sex_texts(aid_list),
ibs.get_annot_age_months_est(aid_list),
ibs.get_annot_reviewed(aid_list),
# [ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(appf.imageset_annot_viewpoint_processed(ibs, aid_list), appf.imageset_annot_quality_processed(ibs, aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return appf.template('view', 'annotations',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/view/names/', methods=['GET'])
def view_names():
ibs = current_app.ibs
filtered = True
aid_list = []
imgsetid_list = []
gid_list = []
nid = request.args.get('nid', '')
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
imgsetid = request.args.get('imgsetid', '')
page = max(0, int(request.args.get('page', 1)))
if len(nid) > 0:
nid_list = nid.strip().split(',')
nid_list = [ None if nid_ == 'None' or nid_ == '' else int(nid_) for nid_ in nid_list ]
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
elif len(imgsetid) > 0:
imgsetid_list = imgsetid.strip().split(',')
imgsetid_list = [ None if imgsetid_ == 'None' or imgsetid_ == '' else int(imgsetid_) for imgsetid_ in imgsetid_list ]
gid_list = ut.flatten([ ibs.get_valid_gids(imgsetid=imgsetid_) for imgsetid_ in imgsetid_list ])
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_name_rowids(aid_list)
else:
nid_list = ibs.get_valid_nids()
filtered = False
# Page
appf.PAGE_SIZE_ = int(appf.PAGE_SIZE / 5)
page_start = min(len(nid_list), (page - 1) * appf.PAGE_SIZE_)
page_end = min(len(nid_list), page * appf.PAGE_SIZE_)
page_total = int(math.ceil(len(nid_list) / appf.PAGE_SIZE_))
page_previous = None if page_start == 0 else page - 1
page_next = None if page_end == len(nid_list) else page + 1
nid_list = nid_list[page_start:page_end]
print('[web] Loading Page [ %d -> %d ] (%d), Prev: %s, Next: %s' % (page_start, page_end, len(nid_list), page_previous, page_next, ))
aids_list = ibs.get_name_aids(nid_list)
annotations_list = [ zip(
aid_list_,
ibs.get_annot_gids(aid_list_),
[ ','.join(map(str, imgsetid_list_)) for imgsetid_list_ in ibs.get_annot_imgsetids(aid_list_) ],
ibs.get_annot_image_names(aid_list_),
ibs.get_annot_names(aid_list_),
ibs.get_annot_exemplar_flags(aid_list_),
ibs.get_annot_species_texts(aid_list_),
ibs.get_annot_yaw_texts(aid_list_),
ibs.get_annot_quality_texts(aid_list_),
ibs.get_annot_sex_texts(aid_list_),
ibs.get_annot_age_months_est(aid_list_),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(appf.imageset_annot_viewpoint_processed(ibs, aid_list_), appf.imageset_annot_quality_processed(ibs, aid_list_)) ],
) for aid_list_ in aids_list ]
name_list = zip(
nid_list,
annotations_list
)
name_list.sort(key=lambda t: t[0])
return appf.template('view', 'names',
filtered=filtered,
imgsetid_list=imgsetid_list,
imgsetid_list_str=','.join(map(str, imgsetid_list)),
num_imgsetids=len(imgsetid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list),
name_list=name_list,
num_names=len(name_list),
page=page,
page_start=page_start,
page_end=page_end,
page_total=page_total,
page_previous=page_previous,
page_next=page_next)
@register_route('/turk/', methods=['GET'])
def turk():
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
return appf.template('turk', None, imgsetid=imgsetid)
def _make_review_image_info(ibs, gid):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.web.apis_detect import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> gid = ibs.get_valid_gids()[0]
"""
# Shows how to use new object-like interface to populate data
import numpy as np
image = ibs.images([gid])[0]
annots = image.annots
width, height = image.sizes
bbox_denom = np.array([width, height, width, height])
annotation_list = []
for aid in annots.aids:
annot_ = ibs.annots(aid)[0]
bbox = np.array(annot_.bboxes)
bbox_percent = bbox / bbox_denom * 100
temp = {
'left' : bbox_percent[0],
'top' : bbox_percent[1],
'width' : bbox_percent[2],
'height' : bbox_percent[3],
'label' : annot_.species,
'id' : annot_.aids,
'theta' : annot_.thetas,
'tags' : annot_.case_tags,
}
annotation_list.append(temp)
@register_route('/turk/detection/', methods=['GET'])
def turk_detection():
ibs = current_app.ibs
refer_aid = request.args.get('refer_aid', None)
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
reviewed_list = appf.imageset_image_processed(ibs, gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list_ = ut.filterfalse_items(gid_list, reviewed_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('ia-detection_instructions_seen', 1) == 0
display_species_examples = False # request.cookies.get('ia-detection_example_species_seen', 0) == 0
if not finished:
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
imgdata = ibs.get_image_imgdata(gid)
image_src = appf.embed_image_html(imgdata)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['theta'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif appf.default_species(ibs) is not None:
species = appf.default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), imgsetid, )
return appf.template('turk', 'detection',
imgsetid=imgsetid,
gid=gid,
refer_aid=refer_aid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review)
@register_route('/turk/detection/dynamic/', methods=['GET'])
def turk_detection_dynamic():
ibs = current_app.ibs
gid = request.args.get('gid', None)
gpath = ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ibs.get_image_imgdata(gid)
image_src = appf.embed_image_html(image)
# Get annotations
width, height = ibs.get_image_sizes(gid)
aid_list = ibs.get_image_aids(gid)
annot_bbox_list = ibs.get_annot_bboxes(aid_list)
annot_thetas_list = ibs.get_annot_thetas(aid_list)
species_list = ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for aid, annot_bbox, annot_theta, species in zip(aid_list, annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = 100.0 * (annot_bbox[0] / width)
temp['top'] = 100.0 * (annot_bbox[1] / height)
temp['width'] = 100.0 * (annot_bbox[2] / width)
temp['height'] = 100.0 * (annot_bbox[3] / height)
temp['label'] = species
temp['id'] = aid
temp['theta'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif appf.default_species(ibs) is not None:
species = appf.default_species(ibs)
else:
species = KEY_DEFAULTS[SPECIES_KEY]
callback_url = '%s?imgsetid=%s' % (url_for('submit_detection'), gid, )
return appf.template('turk', 'detection_dynamic',
gid=gid,
refer_aid=None,
species=species,
image_path=gpath,
image_src=image_src,
annotation_list=annotation_list,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
__wrapper__=False)
@register_route('/turk/annotation/', methods=['GET'])
def turk_annotation():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-annotation_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
# image_src = routes_ajax.annotation_src(aid)
species = ibs.get_annot_species_texts(aid)
viewpoint_value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
quality_value = ibs.get_annot_qualities(aid)
if quality_value in [-1, None]:
quality_value = None
elif quality_value > 2:
quality_value = 2
elif quality_value <= 2:
quality_value = 1
multiple_value = ibs.get_annot_multiple(aid) == 1
else:
gid = None
gpath = None
image_src = None
species = None
viewpoint_value = None
quality_value = None
multiple_value = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
callback_url = url_for('submit_annotation')
return appf.template('turk', 'annotation',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
viewpoint_value=viewpoint_value,
quality_value=quality_value,
multiple_value=multiple_value,
image_path=gpath,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review)
@register_route('/turk/annotation/dynamic/', methods=['GET'])
def turk_annotation_dynamic():
ibs = current_app.ibs
aid = request.args.get('aid', None)
imgsetid = request.args.get('imgsetid', None)
review = 'review' in request.args.keys()
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
species = ibs.get_annot_species_texts(aid)
viewpoint_value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
quality_value = ibs.get_annot_qualities(aid)
if quality_value == -1:
quality_value = None
if quality_value == 0:
quality_value = 1
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
callback_url = url_for('submit_annotation')
return appf.template('turk', 'annotation_dynamic',
imgsetid=imgsetid,
gid=gid,
aid=aid,
viewpoint_value=viewpoint_value,
quality_value=quality_value,
image_path=gpath,
image_src=image_src,
species_list=species_list,
callback_url=callback_url,
callback_method='POST',
EMBEDDED_CSS=None,
EMBEDDED_JAVASCRIPT=None,
review=review,
__wrapper__=False)
@register_route('/turk/viewpoint/', methods=['GET'])
def turk_viewpoint():
"""
CommandLine:
python -m ibeis.web.app --exec-turk_viewpoint --db PZ_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='PZ_Master1')
>>> aid_list_ = ibs.find_unlabeled_name_members(suspect_yaws=True)
>>> aid_list = ibs.filter_aids_to_quality(aid_list_, 'good', unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_viewpoint_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = appf.convert_yaw_to_old_viewpoint(ibs.get_annot_yaws(aid))
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-viewpoint_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
species = ibs.get_annot_species_texts(aid)
else:
gid = None
gpath = None
image_src = None
species = None
imagesettext = ibs.get_imageset_text(imgsetid)
species_rowids = ibs._get_all_species_rowids()
species_nice_list = ibs.get_species_nice(species_rowids)
combined_list = sorted(zip(species_nice_list, species_rowids))
species_nice_list = [ combined[0] for combined in combined_list ]
species_rowids = [ combined[1] for combined in combined_list ]
species_text_list = ibs.get_species_texts(species_rowids)
species_selected_list = [ species == species_ for species_ in species_text_list ]
species_list = zip(species_nice_list, species_text_list, species_selected_list)
species_list = [ ('Unspecified', const.UNKNOWN, True) ] + species_list
return appf.template('turk', 'viewpoint',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
species_list=species_list,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/quality/', methods=['GET'])
def turk_quality():
"""
PZ Needs Tags:
17242
14468
14427
15946
14771
14084
4102
6074
3409
GZ Needs Tags;
1302
CommandLine:
python -m ibeis.web.app --exec-turk_quality --db PZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GZ_Master1
python -m ibeis.web.app --exec-turk_quality --db GIRM_Master1
Example:
>>> # SCRIPT
>>> from ibeis.other.ibsfuncs import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> aid_list_ = ibs.find_unlabeled_name_members(qual=True)
>>> valid_views = ['primary', 'primary1', 'primary-1']
>>> aid_list = ibs.filter_aids_to_viewpoint(aid_list_, valid_views, unknown_ok=False)
>>> ibs.start_web_annot_groupreview(aid_list)
"""
ibs = current_app.ibs
tup = appf.get_turk_annot_args(appf.imageset_annot_quality_processed)
(aid_list, reviewed_list, imgsetid, src_ag, dst_ag, progress, aid, previous) = tup
value = ibs.get_annot_qualities(aid)
if value == -1:
value = None
if value == 0:
value = 1
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-quality_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
imagesettext = ibs.get_imageset_text(imgsetid)
return appf.template('turk', 'quality',
imgsetid=imgsetid,
src_ag=src_ag,
dst_ag=dst_ag,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/turk/additional/', methods=['GET'])
def turk_additional():
ibs = current_app.ibs
imgsetid = request.args.get('imgsetid', '')
imgsetid = None if imgsetid == 'None' or imgsetid == '' else int(imgsetid)
gid_list = ibs.get_valid_gids(imgsetid=imgsetid)
aid_list = ut.flatten(ibs.get_image_aids(gid_list))
nid_list = ibs.get_annot_nids(aid_list)
reviewed_list = appf.imageset_annot_additional_processed(ibs, aid_list, nid_list)
try:
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
except ZeroDivisionError:
progress = '0.00'
imagesettext = None if imgsetid is None else ibs.get_imageset_text(imgsetid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
aid_list_ = ut.filterfalse_items(aid_list, reviewed_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value_sex = ibs.get_annot_sex([aid])[0]
if value_sex >= 0:
value_sex += 2
else:
value_sex = None
value_age_min, value_age_max = ibs.get_annot_age_months_est([aid])[0]
value_age = None
if (value_age_min is -1 or value_age_min is None) and (value_age_max is -1 or value_age_max is None):
value_age = 1
if (value_age_min is 0 or value_age_min is None) and value_age_max == 2:
value_age = 2
elif value_age_min is 3 and value_age_max == 5:
value_age = 3
elif value_age_min is 6 and value_age_max == 11:
value_age = 4
elif value_age_min is 12 and value_age_max == 23:
value_age = 5
elif value_age_min is 24 and value_age_max == 35:
value_age = 6
elif value_age_min is 36 and (value_age_max > 36 or value_age_max is None):
value_age = 7
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('ia-additional_instructions_seen', 1) == 0
if not finished:
gid = ibs.get_annot_gids(aid)
gpath = ibs.get_annot_chip_fpath(aid)
image = vt.imread(gpath)
image_src = appf.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
name_aid_list = None
nid = ibs.get_annot_name_rowids(aid)
if nid is not None:
name_aid_list = ibs.get_name_aids(nid)
quality_list = ibs.get_annot_qualities(name_aid_list)
quality_text_list = ibs.get_annot_quality_texts(name_aid_list)
yaw_text_list = ibs.get_annot_yaw_texts(name_aid_list)
name_aid_combined_list = list(zip(
name_aid_list,
quality_list,
quality_text_list,
yaw_text_list,
))
name_aid_combined_list.sort(key=lambda t: t[1], reverse=True)
else:
name_aid_combined_list = []
region_str = 'UNKNOWN'
if aid is not None and gid is not None:
imgsetid_list = ibs.get_image_imgsetids(gid)
imgset_text_list = ibs.get_imageset_text(imgsetid_list)
imgset_text_list = [
imgset_text
for imgset_text in imgset_text_list
if 'GGR Special Zone' in imgset_text
]
assert len(imgset_text_list) < 2
if len(imgset_text_list) == 1:
region_str = imgset_text_list[0]
return appf.template('turk', 'additional',
imgsetid=imgsetid,
gid=gid,
aid=aid,
region_str=region_str,
value_sex=value_sex,
value_age=value_age,
image_path=gpath,
name_aid_combined_list=name_aid_combined_list,
image_src=image_src,
previous=previous,
imagesettext=imagesettext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
@register_route('/group_review/', methods=['GET'])
def group_review():
prefill = request.args.get('prefill', '')
if len(prefill) > 0:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
bad_species_list, bad_viewpoint_list = ibs.validate_annot_species_viewpoint_cnn(aid_list)
GROUP_BY_PREDICTION = True
if GROUP_BY_PREDICTION:
grouped_dict = ut.group_items(bad_viewpoint_list, ut.get_list_column(bad_viewpoint_list, 3))
grouped_list = grouped_dict.values()
regrouped_items = ut.flatten(ut.sortedby(grouped_list, map(len, grouped_list)))
candidate_aid_list = ut.get_list_column(regrouped_items, 0)
else:
candidate_aid_list = [ bad_viewpoint[0] for bad_viewpoint in bad_viewpoint_list]
elif request.args.get('aid_list', None) is not None:
aid_list = request.args.get('aid_list', '')
if len(aid_list) > 0:
aid_list = aid_list.replace('[', '')
aid_list = aid_list.replace(']', '')
aid_list = aid_list.strip().split(',')
candidate_aid_list = [ int(aid_.strip()) for aid_ in aid_list ]
else:
candidate_aid_list = ''
else:
candidate_aid_list = ''
return appf.template(None, 'group_review', candidate_aid_list=candidate_aid_list, mode_list=appf.VALID_TURK_MODES)
@register_route('/sightings/', methods=['GET'])
def sightings(html_encode=True):
ibs = current_app.ibs
complete = request.args.get('complete', None) is not None
sightings = ibs.report_sightings_str(complete=complete, include_images=True)
if html_encode:
sightings = sightings.replace('\n', '<br/>')
return sightings
@register_route('/api/', methods=['GET'], __api_prefix_check__=False)
def api_root():
rules = current_app.url_map.iter_rules()
rule_dict = {}
for rule in rules:
methods = rule.methods
url = str(rule)
if '/api/' in url:
methods -= set(['HEAD', 'OPTIONS'])
if len(methods) == 0:
continue
if len(methods) > 1:
print('methods = %r' % (methods,))
method = list(methods)[0]
if method not in rule_dict.keys():
rule_dict[method] = []
rule_dict[method].append((method, url, ))
for method in rule_dict.keys():
rule_dict[method].sort()
url = '%s/api/core/dbname/' % (current_app.server_url, )
app_auth = controller_inject.get_url_authorization(url)
return appf.template(None, 'api',
app_url=url,
app_name=controller_inject.GLOBAL_APP_NAME,
app_secret=controller_inject.GLOBAL_APP_SECRET,
app_auth=app_auth,
rule_list=rule_dict)
@register_route('/upload/', methods=['GET'])
def upload():
return appf.template(None, 'upload')
@register_route('/dbinfo/', methods=['GET'])
def dbinfo():
try:
ibs = current_app.ibs
dbinfo_str = ibs.get_dbinfo_str()
except:
dbinfo_str = ''
dbinfo_str_formatted = '<pre>%s</pre>' % (dbinfo_str, )
return dbinfo_str_formatted
@register_route('/counts/', methods=['GET'])
def wb_counts():
fmt_str = '''<p># Annotations: <b>%d</b></p>
<p># MediaAssets (images): <b>%d</b></p>
<p># MarkedIndividuals: <b>%d</b></p>
<p># Encounters: <b>%d</b></p>
<p># Occurrences: <b>%d</b></p>'''
try:
ibs = current_app.ibs
aid_list = ibs.get_valid_aids()
nid_list = ibs.get_annot_nids(aid_list)
nid_list = [ nid for nid in nid_list if nid > 0 ]
gid_list = ibs.get_annot_gids(aid_list)
imgset_id_list = ibs.get_valid_imgsetids()
aids_list = ibs.get_imageset_aids(imgset_id_list)
imgset_id_list = [
imgset_id
for imgset_id, aid_list_ in zip(imgset_id_list, aids_list)
if len(aid_list_) > 0
]
valid_nid_list = list(set(nid_list))
valid_aid_list = list(set(aid_list))
valid_gid_list = list(set(gid_list))
valid_imgset_id_list = list(set(imgset_id_list))
valid_imgset_id_list = list(set(imgset_id_list))
aids_list = ibs.get_imageset_aids(valid_imgset_id_list)
nids_list = map(ibs.get_annot_nids, aids_list)
nids_list = map(set, nids_list)
nids_list = ut.flatten(nids_list)
num_nid = len(valid_nid_list)
num_aid = len(valid_aid_list)
num_gid = len(valid_gid_list)
num_imgset = len(valid_imgset_id_list)
num_encounters = len(nids_list)
args = (num_aid, num_gid, num_nid, num_encounters, num_imgset, )
counts_str = fmt_str % args
except:
counts_str = ''
return counts_str
@register_route('/test/counts.jsp', methods=['GET'], __api_postfix_check__=False)
def wb_counts_alias1():
return wb_counts()
@register_route('/gzgc/counts.jsp', methods=['GET'], __api_postfix_check__=False)
def wb_counts_alias2():
return wb_counts()
@register_route('/404/', methods=['GET'])
def error404(exception=None):
import traceback
exception_str = str(exception)
traceback_str = str(traceback.format_exc())
print('[web] %r' % (exception_str, ))
print('[web] %r' % (traceback_str, ))
return appf.template(None, '404', exception_str=exception_str,
traceback_str=traceback_str)
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs() | unknown | codeparrot/codeparrot-clean | ||
import pytest
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_classic.agents.output_parsers.react_single_input import (
ReActSingleInputOutputParser,
)
def test_action() -> None:
"""Test standard parsing of action/action input."""
parser = ReActSingleInputOutputParser()
_input = """Thought: agent thought here
Action: search
Action Input: what is the temperature in SF?"""
output = parser.invoke(_input)
expected_output = AgentAction(
tool="search",
tool_input="what is the temperature in SF?",
log=_input,
)
assert output == expected_output
def test_finish() -> None:
"""Test standard parsing of agent finish."""
parser = ReActSingleInputOutputParser()
_input = """Thought: agent thought here
Final Answer: The temperature is 100"""
output = parser.invoke(_input)
expected_output = AgentFinish(
return_values={"output": "The temperature is 100"},
log=_input,
)
assert output == expected_output
def test_action_with_finish() -> None:
"""Test that if final thought is in action/action input, error is raised."""
parser = ReActSingleInputOutputParser()
_input = """Thought: agent thought here
Action: search Final Answer:
Action Input: what is the temperature in SF?"""
with pytest.raises(OutputParserException):
parser.invoke(_input) | python | github | https://github.com/langchain-ai/langchain | libs/langchain/tests/unit_tests/agents/output_parsers/test_react_single_input.py |
<!DOCTYPE html>
<html lang="en">
<head>
<title>DownloadResponse Structure Reference</title>
<link rel="stylesheet" type="text/css" href="../css/jazzy.css" />
<link rel="stylesheet" type="text/css" href="../css/highlight.css" />
<meta charset="utf-8">
<script src="../js/jquery.min.js" defer></script>
<script src="../js/jazzy.js" defer></script>
<script src="../js/lunr.min.js" defer></script>
<script src="../js/typeahead.jquery.js" defer></script>
<script src="../js/jazzy.search.js" defer></script>
</head>
<body>
<a name="//apple_ref/swift/Struct/DownloadResponse" class="dashAnchor"></a>
<a title="DownloadResponse Structure Reference"></a>
<header class="header">
<p class="header-col header-col--primary">
<a class="header-link" href="../index.html">
Alamofire 5.11.0 Docs
</a>
(96% documented)
</p>
<div class="header-col--secondary">
<form role="search" action="../search.json">
<input type="text" placeholder="Search documentation" data-typeahead>
</form>
</div>
<p class="header-col header-col--secondary">
<a class="header-link" href="https://github.com/Alamofire/Alamofire">
<img class="header-icon" src="../img/gh.png" alt="GitHub"/>
View on GitHub
</a>
</p>
<p class="header-col header-col--secondary">
<a class="header-link" href="dash-feed://https%3A%2F%2Falamofire.github.io%2FAlamofire%2Fdocsets%2FAlamofire.xml">
<img class="header-icon" src="../img/dash.png" alt="Dash"/>
Install in Dash
</a>
</p>
</header>
<p class="breadcrumbs">
<a class="breadcrumb" href="../index.html">Alamofire</a>
<img class="carat" src="../img/carat.png" alt=""/>
<a class="breadcrumb" href="../Structs.html">Structures</a>
<img class="carat" src="../img/carat.png" alt=""/>
DownloadResponse Structure Reference
</p>
<div class="content-wrapper">
<nav class="navigation">
<ul class="nav-groups">
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Classes.html">Classes</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Adapter.html">Adapter</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/AlamofireNotifications.html">AlamofireNotifications</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/AuthenticationInterceptor.html">AuthenticationInterceptor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/AuthenticationInterceptor/RefreshWindow.html">– RefreshWindow</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/ClosureEventMonitor.html">ClosureEventMonitor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/CompositeEventMonitor.html">CompositeEventMonitor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/CompositeTrustEvaluator.html">CompositeTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/ConnectionLostRetryPolicy.html">ConnectionLostRetryPolicy</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataRequest.html">DataRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataResponseSerializer.html">DataResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest.html">DataStreamRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest/Stream.html">– Stream</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest/Event.html">– Event</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest/Completion.html">– Completion</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest/CancellationToken.html">– CancellationToken</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DecodableResponseSerializer.html">DecodableResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DefaultTrustEvaluator.html">DefaultTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DisabledTrustEvaluator.html">DisabledTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DownloadRequest.html">DownloadRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DownloadRequest/Options.html">– Options</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DownloadRequest/Downloadable.html">– Downloadable</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Interceptor.html">Interceptor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/JSONParameterEncoder.html">JSONParameterEncoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/JSONResponseSerializer.html">JSONResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/MultipartFormData.html">MultipartFormData</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/NetworkReachabilityManager.html">NetworkReachabilityManager</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/NetworkReachabilityManager/NetworkReachabilityStatus.html">– NetworkReachabilityStatus</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/OfflineRetrier.html">OfflineRetrier</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/PinnedCertificatesTrustEvaluator.html">PinnedCertificatesTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/PublicKeysTrustEvaluator.html">PublicKeysTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Request.html">Request</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Request/State.html">– State</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Request/ResponseDisposition.html">– ResponseDisposition</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Retrier.html">Retrier</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/RetryPolicy.html">RetryPolicy</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/RevocationTrustEvaluator.html">RevocationTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/RevocationTrustEvaluator/Options.html">– Options</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/ServerTrustManager.html">ServerTrustManager</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Session.html">Session</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Session/RequestSetup.html">– RequestSetup</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/SessionDelegate.html">SessionDelegate</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/StringResponseSerializer.html">StringResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder.html">URLEncodedFormEncoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/ArrayEncoding.html">– ArrayEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/BoolEncoding.html">– BoolEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/DataEncoding.html">– DataEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/DateEncoding.html">– DateEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/KeyEncoding.html">– KeyEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/KeyPathEncoding.html">– KeyPathEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/NilEncoding.html">– NilEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/SpaceEncoding.html">– SpaceEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/Error.html">– Error</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormParameterEncoder.html">URLEncodedFormParameterEncoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormParameterEncoder/Destination.html">– Destination</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/UploadRequest.html">UploadRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/UploadRequest/Uploadable.html">– Uploadable</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Global%20Variables.html">Global Variables</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Global%20Variables.html#/s:9Alamofire2AFAA7SessionCvp">AF</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Enums.html">Enumerations</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError.html">AFError</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/MultipartEncodingFailureReason.html">– MultipartEncodingFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/UnexpectedInputStreamLength.html">– UnexpectedInputStreamLength</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ParameterEncodingFailureReason.html">– ParameterEncodingFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ParameterEncoderFailureReason.html">– ParameterEncoderFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ResponseValidationFailureReason.html">– ResponseValidationFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ResponseSerializationFailureReason.html">– ResponseSerializationFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ServerTrustFailureReason.html">– ServerTrustFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/URLRequestValidationFailureReason.html">– URLRequestValidationFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFInfo.html">AFInfo</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AuthenticationError.html">AuthenticationError</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/RetryResult.html">RetryResult</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Extensions.html">Extensions</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/s:Sa">Array</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:objc(cs)NSBundle">Bundle</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/CharacterSet.html">CharacterSet</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/Error.html">Error</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/HTTPURLResponse.html">HTTPURLResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/s:10Foundation11JSONDecoderC">JSONDecoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/Notification.html">Notification</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@T@OSStatus">OSStatus</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/s:10Foundation19PropertyListDecoderC">PropertyListDecoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@T@SecCertificateRef">SecCertificate</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@T@SecPolicyRef">SecPolicy</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@T@SecTrustRef">SecTrust</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@E@SecTrustResultType">SecTrustResultType</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/String.html">String</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/URL.html">URL</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/URLComponents.html">URLComponents</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/URLRequest.html">URLRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/URLSessionConfiguration.html">URLSessionConfiguration</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/%5BServerTrustEvaluating%5D.html">[ServerTrustEvaluating]</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Protocols.html">Protocols</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/AlamofireExtended.html">AlamofireExtended</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/AuthenticationCredential.html">AuthenticationCredential</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/Authenticator.html">Authenticator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/CachedResponseHandler.html">CachedResponseHandler</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DataDecoder.html">DataDecoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DataPreprocessor.html">DataPreprocessor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DataResponseSerializerProtocol.html">DataResponseSerializerProtocol</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DataStreamSerializer.html">DataStreamSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DownloadResponseSerializerProtocol.html">DownloadResponseSerializerProtocol</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/EmptyResponse.html">EmptyResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/EventMonitor.html">EventMonitor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/ParameterEncoder.html">ParameterEncoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/ParameterEncoding.html">ParameterEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RedirectHandler.html">RedirectHandler</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RequestAdapter.html">RequestAdapter</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RequestDelegate.html">RequestDelegate</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RequestInterceptor.html">RequestInterceptor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RequestRetrier.html">RequestRetrier</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/ResponseSerializer.html">ResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/ServerTrustEvaluating.html">ServerTrustEvaluating</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/URLConvertible.html">URLConvertible</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/URLRequestConvertible.html">URLRequestConvertible</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols.html#/s:9Alamofire17UploadConvertibleP">UploadConvertible</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/UploadableConvertible.html">UploadableConvertible</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/WebSocketMessageSerializer.html">WebSocketMessageSerializer</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Structs.html">Structures</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/AlamofireExtension.html">AlamofireExtension</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataResponse.html">DataResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataResponsePublisher.html">DataResponsePublisher</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataStreamPublisher.html">DataStreamPublisher</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataStreamTask.html">DataStreamTask</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataTask.html">DataTask</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DecodableStreamSerializer.html">DecodableStreamSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DecodableWebSocketMessageDecoder.html">DecodableWebSocketMessageDecoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DecodableWebSocketMessageDecoder/Error.html">– Error</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DeflateRequestCompressor.html">DeflateRequestCompressor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DeflateRequestCompressor/DuplicateHeaderBehavior.html">– DuplicateHeaderBehavior</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DeflateRequestCompressor.html#/s:9Alamofire24DeflateRequestCompressorV20DuplicateHeaderErrorV">– DuplicateHeaderError</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DownloadResponse.html">DownloadResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DownloadResponsePublisher.html">DownloadResponsePublisher</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DownloadTask.html">DownloadTask</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/Empty.html">Empty</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/GoogleXSSIPreprocessor.html">GoogleXSSIPreprocessor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/HTTPHeader.html">HTTPHeader</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/HTTPHeaders.html">HTTPHeaders</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/HTTPMethod.html">HTTPMethod</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/JSONEncoding.html">JSONEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/JSONEncoding/Error.html">– Error</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/PassthroughPreprocessor.html">PassthroughPreprocessor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/PassthroughStreamSerializer.html">PassthroughStreamSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/Redirector.html">Redirector</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/Redirector/Behavior.html">– Behavior</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/RequestAdapterState.html">RequestAdapterState</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/ResponseCacher.html">ResponseCacher</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/ResponseCacher/Behavior.html">– Behavior</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/StreamOf.html">StreamOf</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/StreamOf/Iterator.html">– Iterator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/StringStreamSerializer.html">StringStreamSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLEncoding.html">URLEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLEncoding/Destination.html">– Destination</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLEncoding/ArrayEncoding.html">– ArrayEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLEncoding/BoolEncoding.html">– BoolEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLResponseSerializer.html">URLResponseSerializer</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Typealiases.html">Type Aliases</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire14AFDataResponsea">AFDataResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire18AFDownloadResponsea">AFDownloadResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire8AFResulta">AFResult</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire12AdaptHandlera">AdaptHandler</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire17DisabledEvaluatora">DisabledEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire10Parametersa">Parameters</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire12RetryHandlera">RetryHandler</a>
</li>
</ul>
</li>
</ul>
</nav>
<article class="main-content">
<section class="section">
<div class="section-content top-matter">
<h1>DownloadResponse</h1>
<div class="declaration">
<div class="language">
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">struct</span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">Success</span><span class="p">,</span> <span class="kt">Failure</span><span class="o">></span> <span class="p">:</span> <span class="kt">Sendable</span> <span class="k">where</span> <span class="kt">Success</span> <span class="p">:</span> <span class="kt">Sendable</span><span class="p">,</span> <span class="kt">Failure</span> <span class="p">:</span> <span class="kt">Error</span></code></pre>
<pre class="highlight swift"><code><span class="kd">extension</span> <span class="kt">DownloadResponse</span><span class="p">:</span> <span class="kt">CustomStringConvertible</span><span class="p">,</span> <span class="kt">CustomDebugStringConvertible</span></code></pre>
</div>
</div>
<p>Used to store all data associated with a serialized response of a download request.</p>
</div>
</section>
<section class="section">
<div class="section-content">
<div class="task-group">
<ul class="item-container">
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV7request10Foundation10URLRequestVSgvp"></a>
<a name="//apple_ref/swift/Property/request" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV7request10Foundation10URLRequestVSgvp">request</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The URL request sent to the server.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">let</span> <span class="nv">request</span><span class="p">:</span> <span class="kt">URLRequest</span><span class="p">?</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV8responseSo17NSHTTPURLResponseCSgvp"></a>
<a name="//apple_ref/swift/Property/response" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV8responseSo17NSHTTPURLResponseCSgvp">response</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The server’s response to the URL request.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">let</span> <span class="nv">response</span><span class="p">:</span> <span class="kt">HTTPURLResponse</span><span class="p">?</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV7fileURL10Foundation0E0VSgvp"></a>
<a name="//apple_ref/swift/Property/fileURL" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV7fileURL10Foundation0E0VSgvp">fileURL</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The final destination URL of the data returned from the server after it is moved.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">let</span> <span class="nv">fileURL</span><span class="p">:</span> <span class="kt">URL</span><span class="p">?</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV10resumeData10Foundation0E0VSgvp"></a>
<a name="//apple_ref/swift/Property/resumeData" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV10resumeData10Foundation0E0VSgvp">resumeData</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The resume data generated if the request was cancelled.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">let</span> <span class="nv">resumeData</span><span class="p">:</span> <span class="kt">Data</span><span class="p">?</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV7metricsSo23NSURLSessionTaskMetricsCSgvp"></a>
<a name="//apple_ref/swift/Property/metrics" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV7metricsSo23NSURLSessionTaskMetricsCSgvp">metrics</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The final metrics of the response.</p>
<div class="aside aside-note">
<p class="aside-title">Note</p>
Due to <code>FB7624529</code>, collection of <code>URLSessionTaskMetrics</code> on watchOS is currently disabled.`
</div>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">let</span> <span class="nv">metrics</span><span class="p">:</span> <span class="kt">URLSessionTaskMetrics</span><span class="p">?</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV21serializationDurationSdvp"></a>
<a name="//apple_ref/swift/Property/serializationDuration" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV21serializationDurationSdvp">serializationDuration</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The time taken to serialize the response.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">let</span> <span class="nv">serializationDuration</span><span class="p">:</span> <span class="kt">TimeInterval</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV6results6ResultOyxq_Gvp"></a>
<a name="//apple_ref/swift/Property/result" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV6results6ResultOyxq_Gvp">result</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The result of response serialization.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">let</span> <span class="nv">result</span><span class="p">:</span> <span class="kt">Result</span><span class="o"><</span><span class="kt">Success</span><span class="p">,</span> <span class="kt">Failure</span><span class="o">></span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV5valuexSgvp"></a>
<a name="//apple_ref/swift/Property/value" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV5valuexSgvp">value</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Returns the associated value of the result if it is a success, <code>nil</code> otherwise.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">value</span><span class="p">:</span> <span class="kt">Success</span><span class="p">?</span> <span class="p">{</span> <span class="k">get</span> <span class="p">}</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV5errorq_Sgvp"></a>
<a name="//apple_ref/swift/Property/error" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV5errorq_Sgvp">error</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Returns the associated error value if the result if it is a failure, <code>nil</code> otherwise.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">error</span><span class="p">:</span> <span class="kt">Failure</span><span class="p">?</span> <span class="p">{</span> <span class="k">get</span> <span class="p">}</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV7request8response7fileURL10resumeData7metrics21serializationDuration6resultACyxq_G10Foundation10URLRequestVSg_So17NSHTTPURLResponseCSgAL0G0VSgAL0I0VSgSo23NSURLSessionTaskMetricsCSgSds6ResultOyxq_Gtcfc"></a>
<a name="//apple_ref/swift/Method/init(request:response:fileURL:resumeData:metrics:serializationDuration:result:)" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV7request8response7fileURL10resumeData7metrics21serializationDuration6resultACyxq_G10Foundation10URLRequestVSg_So17NSHTTPURLResponseCSgAL0G0VSgAL0I0VSgSo23NSURLSessionTaskMetricsCSgSds6ResultOyxq_Gtcfc">init(request:<wbr>response:<wbr>fileURL:<wbr>resumeData:<wbr>metrics:<wbr>serializationDuration:<wbr>result:<wbr>)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Creates a <code>DownloadResponse</code> instance with the specified parameters derived from response serialization.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="nf">init</span><span class="p">(</span><span class="nv">request</span><span class="p">:</span> <span class="kt">URLRequest</span><span class="p">?,</span>
<span class="nv">response</span><span class="p">:</span> <span class="kt">HTTPURLResponse</span><span class="p">?,</span>
<span class="nv">fileURL</span><span class="p">:</span> <span class="kt">URL</span><span class="p">?,</span>
<span class="nv">resumeData</span><span class="p">:</span> <span class="kt">Data</span><span class="p">?,</span>
<span class="nv">metrics</span><span class="p">:</span> <span class="kt">URLSessionTaskMetrics</span><span class="p">?,</span>
<span class="nv">serializationDuration</span><span class="p">:</span> <span class="kt">TimeInterval</span><span class="p">,</span>
<span class="nv">result</span><span class="p">:</span> <span class="kt">Result</span><span class="o"><</span><span class="kt">Success</span><span class="p">,</span> <span class="kt">Failure</span><span class="o">></span><span class="p">)</span></code></pre>
</div>
</div>
<div>
<h4>Parameters</h4>
<table class="graybox">
<tbody>
<tr>
<td>
<code>
<em>request</em>
</code>
</td>
<td>
<div>
<p>The <code>URLRequest</code> sent to the server.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>response</em>
</code>
</td>
<td>
<div>
<p>The <code>HTTPURLResponse</code> from the server.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>fileURL</em>
</code>
</td>
<td>
<div>
<p>The final destination URL of the data returned from the server after it is moved.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>resumeData</em>
</code>
</td>
<td>
<div>
<p>The resume <code>Data</code> generated if the request was cancelled.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>metrics</em>
</code>
</td>
<td>
<div>
<p>The <code>URLSessionTaskMetrics</code> of the <code><a href="../Classes/DownloadRequest.html">DownloadRequest</a></code>.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>serializationDuration</em>
</code>
</td>
<td>
<div>
<p>The duration taken by serialization.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>result</em>
</code>
</td>
<td>
<div>
<p>The <code>Result</code> of response serialization.</p>
</div>
</td>
</tr>
</tbody>
</table>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV11descriptionSSvp"></a>
<a name="//apple_ref/swift/Property/description" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV11descriptionSSvp">description</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The textual representation used when written to an output stream, which includes whether the result was a
success or failure.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">description</span><span class="p">:</span> <span class="kt">String</span> <span class="p">{</span> <span class="k">get</span> <span class="p">}</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV16debugDescriptionSSvp"></a>
<a name="//apple_ref/swift/Property/debugDescription" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV16debugDescriptionSSvp">debugDescription</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The debug textual representation used when written to an output stream, which includes the URL request, the URL
response, the temporary and destination URLs, the resume data, the durations of the network and serialization
actions, and the response serialization result.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">debugDescription</span><span class="p">:</span> <span class="kt">String</span> <span class="p">{</span> <span class="k">get</span> <span class="p">}</span></code></pre>
</div>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV3mapyACyqd__q_Gqd__xXEs8SendableRd__lF"></a>
<a name="//apple_ref/swift/Method/map(_:)" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV3mapyACyqd__q_Gqd__xXEs8SendableRd__lF">map(_:<wbr>)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Evaluates the given closure when the result of this <code>DownloadResponse</code> is a success, passing the unwrapped
result value as a parameter.</p>
<p>Use the <code>map</code> method with a closure that does not throw. For example:</p>
<pre class="highlight swift"><code><span class="k">let</span> <span class="nv">possibleData</span><span class="p">:</span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">Data</span><span class="o">></span> <span class="o">=</span> <span class="o">...</span>
<span class="k">let</span> <span class="nv">possibleInt</span> <span class="o">=</span> <span class="n">possibleData</span><span class="o">.</span><span class="n">map</span> <span class="p">{</span> <span class="nv">$0</span><span class="o">.</span><span class="n">count</span> <span class="p">}</span>
</code></pre>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">func</span> <span class="n">map</span><span class="o"><</span><span class="kt">NewSuccess</span><span class="o">></span><span class="p">(</span><span class="n">_</span> <span class="nv">transform</span><span class="p">:</span> <span class="p">(</span><span class="kt">Success</span><span class="p">)</span> <span class="o">-></span> <span class="kt">NewSuccess</span><span class="p">)</span> <span class="o">-></span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">NewSuccess</span><span class="p">,</span> <span class="kt">Failure</span><span class="o">></span> <span class="k">where</span> <span class="kt">NewSuccess</span> <span class="p">:</span> <span class="kt">Sendable</span></code></pre>
</div>
</div>
<div>
<h4>Parameters</h4>
<table class="graybox">
<tbody>
<tr>
<td>
<code>
<em>transform</em>
</code>
</td>
<td>
<div>
<p>A closure that takes the success value of the instance’s result.</p>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<div>
<h4>Return Value</h4>
<p>A <code>DownloadResponse</code> whose result wraps the value returned by the given closure. If this instance’s
result is a failure, returns a response wrapping the same failure.</p>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV6tryMapyACyqd__s5Error_pGqd__xKXEs8SendableRd__lF"></a>
<a name="//apple_ref/swift/Method/tryMap(_:)" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV6tryMapyACyqd__s5Error_pGqd__xKXEs8SendableRd__lF">tryMap(_:<wbr>)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Evaluates the given closure when the result of this <code>DownloadResponse</code> is a success, passing the unwrapped
result value as a parameter.</p>
<p>Use the <code>tryMap</code> method with a closure that may throw an error. For example:</p>
<pre class="highlight swift"><code><span class="k">let</span> <span class="nv">possibleData</span><span class="p">:</span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">Data</span><span class="o">></span> <span class="o">=</span> <span class="o">...</span>
<span class="k">let</span> <span class="nv">possibleObject</span> <span class="o">=</span> <span class="n">possibleData</span><span class="o">.</span><span class="n">tryMap</span> <span class="p">{</span>
<span class="k">try</span> <span class="kt">JSONSerialization</span><span class="o">.</span><span class="nf">jsonObject</span><span class="p">(</span><span class="nv">with</span><span class="p">:</span> <span class="nv">$0</span><span class="p">)</span>
<span class="p">}</span>
</code></pre>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">func</span> <span class="n">tryMap</span><span class="o"><</span><span class="kt">NewSuccess</span><span class="o">></span><span class="p">(</span><span class="n">_</span> <span class="nv">transform</span><span class="p">:</span> <span class="p">(</span><span class="kt">Success</span><span class="p">)</span> <span class="k">throws</span> <span class="o">-></span> <span class="kt">NewSuccess</span><span class="p">)</span> <span class="o">-></span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">NewSuccess</span><span class="p">,</span> <span class="kd">any</span> <span class="kt">Error</span><span class="o">></span> <span class="k">where</span> <span class="kt">NewSuccess</span> <span class="p">:</span> <span class="kt">Sendable</span></code></pre>
</div>
</div>
<div>
<h4>Parameters</h4>
<table class="graybox">
<tbody>
<tr>
<td>
<code>
<em>transform</em>
</code>
</td>
<td>
<div>
<p>A closure that takes the success value of the instance’s result.</p>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<div>
<h4>Return Value</h4>
<p>A success or failure <code>DownloadResponse</code> depending on the result of the given closure. If this
instance’s result is a failure, returns the same failure.</p>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV8mapErroryACyxqd__Gqd__q_XEs0E0Rd__lF"></a>
<a name="//apple_ref/swift/Method/mapError(_:)" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV8mapErroryACyxqd__Gqd__q_XEs0E0Rd__lF">mapError(_:<wbr>)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Evaluates the specified closure when the <code>DownloadResponse</code> is a failure, passing the unwrapped error as a parameter.</p>
<p>Use the <code>mapError</code> function with a closure that does not throw. For example:</p>
<pre class="highlight swift"><code><span class="k">let</span> <span class="nv">possibleData</span><span class="p">:</span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">Data</span><span class="o">></span> <span class="o">=</span> <span class="o">...</span>
<span class="k">let</span> <span class="nv">withMyError</span> <span class="o">=</span> <span class="n">possibleData</span><span class="o">.</span><span class="n">mapError</span> <span class="p">{</span> <span class="kt">MyError</span><span class="o">.</span><span class="nf">error</span><span class="p">(</span><span class="nv">$0</span><span class="p">)</span> <span class="p">}</span>
</code></pre>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">func</span> <span class="n">mapError</span><span class="o"><</span><span class="kt">NewFailure</span><span class="o">></span><span class="p">(</span><span class="n">_</span> <span class="nv">transform</span><span class="p">:</span> <span class="p">(</span><span class="kt">Failure</span><span class="p">)</span> <span class="o">-></span> <span class="kt">NewFailure</span><span class="p">)</span> <span class="o">-></span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">Success</span><span class="p">,</span> <span class="kt">NewFailure</span><span class="o">></span> <span class="k">where</span> <span class="kt">NewFailure</span> <span class="p">:</span> <span class="kt">Error</span></code></pre>
</div>
</div>
<div>
<h4>Parameters</h4>
<table class="graybox">
<tbody>
<tr>
<td>
<code>
<em>transform</em>
</code>
</td>
<td>
<div>
<p>A closure that takes the error of the instance.</p>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<div>
<h4>Return Value</h4>
<p>A <code>DownloadResponse</code> instance containing the result of the transform.</p>
</div>
</section>
</div>
</li>
<li class="item">
<div>
<code>
<a name="/s:9Alamofire16DownloadResponseV11tryMapErroryACyxs0F0_pGqd__q_KXEsAERd__lF"></a>
<a name="//apple_ref/swift/Method/tryMapError(_:)" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire16DownloadResponseV11tryMapErroryACyxs0F0_pGqd__q_KXEsAERd__lF">tryMapError(_:<wbr>)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Evaluates the specified closure when the <code>DownloadResponse</code> is a failure, passing the unwrapped error as a parameter.</p>
<p>Use the <code>tryMapError</code> function with a closure that may throw an error. For example:</p>
<pre class="highlight swift"><code><span class="k">let</span> <span class="nv">possibleData</span><span class="p">:</span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">Data</span><span class="o">></span> <span class="o">=</span> <span class="o">...</span>
<span class="k">let</span> <span class="nv">possibleObject</span> <span class="o">=</span> <span class="n">possibleData</span><span class="o">.</span><span class="n">tryMapError</span> <span class="p">{</span>
<span class="k">try</span> <span class="nf">someFailableFunction</span><span class="p">(</span><span class="nv">taking</span><span class="p">:</span> <span class="nv">$0</span><span class="p">)</span>
<span class="p">}</span>
</code></pre>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">func</span> <span class="n">tryMapError</span><span class="o"><</span><span class="kt">NewFailure</span><span class="o">></span><span class="p">(</span><span class="n">_</span> <span class="nv">transform</span><span class="p">:</span> <span class="p">(</span><span class="kt">Failure</span><span class="p">)</span> <span class="k">throws</span> <span class="o">-></span> <span class="kt">NewFailure</span><span class="p">)</span> <span class="o">-></span> <span class="kt">DownloadResponse</span><span class="o"><</span><span class="kt">Success</span><span class="p">,</span> <span class="kd">any</span> <span class="kt">Error</span><span class="o">></span> <span class="k">where</span> <span class="kt">NewFailure</span> <span class="p">:</span> <span class="kt">Error</span></code></pre>
</div>
</div>
<div>
<h4>Parameters</h4>
<table class="graybox">
<tbody>
<tr>
<td>
<code>
<em>transform</em>
</code>
</td>
<td>
<div>
<p>A throwing closure that takes the error of the instance.</p>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<div>
<h4>Return Value</h4>
<p>A <code>DownloadResponse</code> instance containing the result of the transform.</p>
</div>
</section>
</div>
</li>
</ul>
</div>
</div>
</section>
</article>
</div>
<section class="footer">
<p>© 2026 <a class="link" href="http://alamofire.org/" target="_blank" rel="external noopener">Alamofire Software Foundation</a>. All rights reserved. (Last updated: 2026-01-31)</p>
<p>Generated by <a class="link" href="https://github.com/realm/jazzy" target="_blank" rel="external noopener">jazzy ♪♫ v0.15.4</a>, a <a class="link" href="https://realm.io" target="_blank" rel="external noopener">Realm</a> project.</p>
</section>
</body>
</html> | html | github | https://github.com/Alamofire/Alamofire | docs/Structs/DownloadResponse.html |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package database
import (
"context"
"strings"
"testing"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/versions"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/logical"
)
func TestWriteConfig_PluginVersionInStorage(t *testing.T) {
cluster, sys := getCluster(t)
t.Cleanup(cluster.Cleanup)
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
config.System = sys
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
defer b.Cleanup(context.Background())
const hdb = "hana-database-plugin"
hdbBuiltin := versions.GetBuiltinVersion(consts.PluginTypeDatabase, hdb)
// Configure a connection
writePluginVersion := func() {
t.Helper()
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/plugin-test",
Storage: config.StorageView,
Data: map[string]interface{}{
"connection_url": "test",
"plugin_name": hdb,
"plugin_version": hdbBuiltin,
"verify_connection": false,
},
}
resp, err := b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
}
writePluginVersion()
getPluginVersionFromAPI := func() string {
t.Helper()
req := &logical.Request{
Operation: logical.ReadOperation,
Path: "config/plugin-test",
Storage: config.StorageView,
}
resp, err := b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
return resp.Data["plugin_version"].(string)
}
pluginVersion := getPluginVersionFromAPI()
if pluginVersion != "" {
t.Fatalf("expected plugin_version empty but got %s", pluginVersion)
}
// Directly store config to get the builtin plugin version into storage,
// simulating a write that happened before upgrading to 1.12.2+
err = storeConfig(context.Background(), config.StorageView, "plugin-test", &DatabaseConfig{
PluginName: hdb,
PluginVersion: hdbBuiltin,
})
if err != nil {
t.Fatal(err)
}
// Now replay the read request, and we still shouldn't get the builtin version back.
pluginVersion = getPluginVersionFromAPI()
if pluginVersion != "" {
t.Fatalf("expected plugin_version empty but got %s", pluginVersion)
}
// Check the underlying data, which should still have the version in storage.
getPluginVersionFromStorage := func() string {
t.Helper()
entry, err := config.StorageView.Get(context.Background(), "config/plugin-test")
if err != nil {
t.Fatal(err)
}
if entry == nil {
t.Fatal()
}
var config DatabaseConfig
if err := entry.DecodeJSON(&config); err != nil {
t.Fatal(err)
}
return config.PluginVersion
}
storagePluginVersion := getPluginVersionFromStorage()
if storagePluginVersion != hdbBuiltin {
t.Fatalf("Expected %s, got: %s", hdbBuiltin, storagePluginVersion)
}
// Trigger a write to storage, which should clean up plugin version in the storage entry.
writePluginVersion()
storagePluginVersion = getPluginVersionFromStorage()
if storagePluginVersion != "" {
t.Fatalf("Expected empty, got: %s", storagePluginVersion)
}
// Finally, confirm API requests still return empty plugin version too
pluginVersion = getPluginVersionFromAPI()
if pluginVersion != "" {
t.Fatalf("expected plugin_version empty but got %s", pluginVersion)
}
}
func TestWriteConfig_HelpfulErrorMessageWhenBuiltinOverridden(t *testing.T) {
cluster, sys := getClusterPostgresDB(t)
t.Cleanup(cluster.Cleanup)
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
config.System = sys
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
defer b.Cleanup(context.Background())
const pg = "postgresql-database-plugin"
pgBuiltin := versions.GetBuiltinVersion(consts.PluginTypeDatabase, pg)
// Configure a connection
data := map[string]interface{}{
"connection_url": "test",
"plugin_name": pg,
"plugin_version": pgBuiltin,
"verify_connection": false,
}
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/plugin-test",
Storage: config.StorageView,
Data: data,
}
resp, err := b.HandleRequest(namespace.RootContext(nil), req)
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.IsError() {
t.Fatalf("resp:%#v", resp)
}
if !strings.Contains(resp.Error().Error(), "overridden by an unversioned plugin") {
t.Fatalf("expected overridden error but got: %s", resp.Error())
}
} | go | github | https://github.com/hashicorp/vault | builtin/logical/database/path_config_connection_test.go |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.ListOffsetsRequestData;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic;
import org.apache.kafka.common.message.ListOffsetsResponseData;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.MessageUtil;
import org.junit.jupiter.api.Test;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ListOffsetsRequestTest {
@Test
public void testDuplicatePartitions() {
List<ListOffsetsTopic> topics = Collections.singletonList(
new ListOffsetsTopic()
.setName("topic")
.setPartitions(Arrays.asList(
new ListOffsetsPartition()
.setPartitionIndex(0),
new ListOffsetsPartition()
.setPartitionIndex(0))));
ListOffsetsRequestData data = new ListOffsetsRequestData()
.setTopics(topics)
.setReplicaId(-1);
ListOffsetsRequest request = ListOffsetsRequest.parse(MessageUtil.toByteBufferAccessor(data, (short) 1), (short) 1);
assertEquals(Collections.singleton(new TopicPartition("topic", 0)), request.duplicatePartitions());
assertEquals(0, data.timeoutMs()); // default value
}
@Test
public void testGetErrorResponse() {
for (short version = 1; version <= ApiKeys.LIST_OFFSETS.latestVersion(); version++) {
List<ListOffsetsTopic> topics = Collections.singletonList(
new ListOffsetsTopic()
.setName("topic")
.setPartitions(Collections.singletonList(
new ListOffsetsPartition()
.setPartitionIndex(0))));
ListOffsetsRequest request = ListOffsetsRequest.Builder
.forConsumer(true, IsolationLevel.READ_COMMITTED)
.setTargetTimes(topics)
.build(version);
ListOffsetsResponse response = (ListOffsetsResponse) request.getErrorResponse(0, Errors.NOT_LEADER_OR_FOLLOWER.exception());
List<ListOffsetsTopicResponse> v = Collections.singletonList(
new ListOffsetsTopicResponse()
.setName("topic")
.setPartitions(Collections.singletonList(
new ListOffsetsPartitionResponse()
.setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code())
.setLeaderEpoch(ListOffsetsResponse.UNKNOWN_EPOCH)
.setOffset(ListOffsetsResponse.UNKNOWN_OFFSET)
.setPartitionIndex(0)
.setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP))));
ListOffsetsResponseData data = new ListOffsetsResponseData()
.setThrottleTimeMs(0)
.setTopics(v);
ListOffsetsResponse expectedResponse = new ListOffsetsResponse(data);
assertEquals(expectedResponse.data().topics(), response.data().topics());
assertEquals(expectedResponse.throttleTimeMs(), response.throttleTimeMs());
}
}
@Test
public void testToListOffsetsTopics() {
ListOffsetsPartition lop0 = new ListOffsetsPartition()
.setPartitionIndex(0)
.setCurrentLeaderEpoch(1)
.setTimestamp(123L);
ListOffsetsPartition lop1 = new ListOffsetsPartition()
.setPartitionIndex(1)
.setCurrentLeaderEpoch(3)
.setTimestamp(567L);
Map<TopicPartition, ListOffsetsPartition> timestampsToSearch = new HashMap<>();
timestampsToSearch.put(new TopicPartition("topic", 0), lop0);
timestampsToSearch.put(new TopicPartition("topic", 1), lop1);
List<ListOffsetsTopic> listOffsetTopics = ListOffsetsRequest.toListOffsetsTopics(timestampsToSearch);
assertEquals(1, listOffsetTopics.size());
ListOffsetsTopic topic = listOffsetTopics.get(0);
assertEquals("topic", topic.name());
assertEquals(2, topic.partitions().size());
assertTrue(topic.partitions().contains(lop0));
assertTrue(topic.partitions().contains(lop1));
}
@Test
public void testListOffsetsRequestOldestVersion() {
ListOffsetsRequest.Builder consumerRequestBuilder = ListOffsetsRequest.Builder
.forConsumer(false, IsolationLevel.READ_UNCOMMITTED);
ListOffsetsRequest.Builder requireTimestampRequestBuilder = ListOffsetsRequest.Builder
.forConsumer(true, IsolationLevel.READ_UNCOMMITTED);
ListOffsetsRequest.Builder requestCommittedRequestBuilder = ListOffsetsRequest.Builder
.forConsumer(false, IsolationLevel.READ_COMMITTED);
ListOffsetsRequest.Builder maxTimestampRequestBuilder = ListOffsetsRequest.Builder
.forConsumer(false, IsolationLevel.READ_UNCOMMITTED, true, false, false, false);
ListOffsetsRequest.Builder requireEarliestLocalTimestampRequestBuilder = ListOffsetsRequest.Builder
.forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, true, false, false);
ListOffsetsRequest.Builder requireTieredStorageTimestampRequestBuilder = ListOffsetsRequest.Builder
.forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, false, true, false);
ListOffsetsRequest.Builder requireEarliestPendingUploadTimestampRequestBuilder = ListOffsetsRequest.Builder
.forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, false, false, true);
assertEquals((short) 1, consumerRequestBuilder.oldestAllowedVersion());
assertEquals((short) 1, requireTimestampRequestBuilder.oldestAllowedVersion());
assertEquals((short) 2, requestCommittedRequestBuilder.oldestAllowedVersion());
assertEquals((short) 7, maxTimestampRequestBuilder.oldestAllowedVersion());
assertEquals((short) 8, requireEarliestLocalTimestampRequestBuilder.oldestAllowedVersion());
assertEquals((short) 9, requireTieredStorageTimestampRequestBuilder.oldestAllowedVersion());
assertEquals((short) 11, requireEarliestPendingUploadTimestampRequestBuilder.oldestAllowedVersion());
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package pki
import (
"context"
"encoding/pem"
"fmt"
"net/http"
"strings"
"time"
"github.com/hashicorp/vault/builtin/logical/pki/issuing"
"github.com/hashicorp/vault/builtin/logical/pki/observe"
"github.com/hashicorp/vault/builtin/logical/pki/revocation"
"github.com/hashicorp/vault/helper/constants"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/certutil"
"github.com/hashicorp/vault/sdk/helper/errutil"
"github.com/hashicorp/vault/sdk/logical"
)
var pathFetchReadSchema = map[int][]framework.Response{
http.StatusOK: {{
Description: "OK",
Fields: map[string]*framework.FieldSchema{
"certificate": {
Type: framework.TypeString,
Description: `Certificate`,
Required: false,
},
"revocation_time": {
Type: framework.TypeInt64,
Description: `Revocation time`,
Required: false,
},
"revocation_time_rfc3339": {
Type: framework.TypeString,
Description: `Revocation time RFC 3339 formatted`,
Required: false,
},
"issuer_id": {
Type: framework.TypeString,
Description: `ID of the issuer`,
Required: false,
},
"ca_chain": {
Type: framework.TypeString,
Description: `Issuing CA Chain`,
Required: false,
},
"authority_key_id": {
Type: framework.TypeString,
Description: `AuthorityKeyID of certificate`,
Required: false,
},
},
}},
}
// Returns the CA in raw format
func pathFetchCA(b *backend) *framework.Path {
return &framework.Path{
Pattern: `ca(/pem)?`,
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixPKI,
OperationSuffix: "ca-der|ca-pem",
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.pathFetchRead,
Responses: pathFetchReadSchema,
},
},
HelpSynopsis: pathFetchHelpSyn,
HelpDescription: pathFetchHelpDesc,
}
}
// Returns the CA chain
func pathFetchCAChain(b *backend) *framework.Path {
return &framework.Path{
Pattern: `(cert/)?ca_chain`,
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixPKI,
OperationSuffix: "ca-chain-pem|cert-ca-chain",
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.pathFetchRead,
Responses: pathFetchReadSchema,
},
},
HelpSynopsis: pathFetchHelpSyn,
HelpDescription: pathFetchHelpDesc,
}
}
// Returns the CRL in raw format
func pathFetchCRL(b *backend) *framework.Path {
return &framework.Path{
Pattern: `crl(/pem|/delta(/pem)?)?`,
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixPKI,
OperationSuffix: "crl-der|crl-pem|crl-delta|crl-delta-pem",
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.pathFetchRead,
Responses: pathFetchReadSchema,
},
},
HelpSynopsis: pathFetchHelpSyn,
HelpDescription: pathFetchHelpDesc,
}
}
// Returns the CRL in raw format
func pathFetchUnifiedCRL(b *backend) *framework.Path {
return &framework.Path{
Pattern: `unified-crl(/pem|/delta(/pem)?)?`,
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixPKI,
OperationSuffix: "unified-crl-der|unified-crl-pem|unified-crl-delta|unified-crl-delta-pem",
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.pathFetchRead,
},
},
HelpSynopsis: pathFetchHelpSyn,
HelpDescription: pathFetchHelpDesc,
}
}
// Returns any valid (non-revoked) cert in raw format.
func pathFetchValidRaw(b *backend) *framework.Path {
return &framework.Path{
Pattern: `cert/(?P<serial>[0-9A-Fa-f-:]+)/raw(/pem)?`,
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixPKI,
OperationSuffix: "cert-raw-der|cert-raw-pem",
},
Fields: map[string]*framework.FieldSchema{
"serial": {
Type: framework.TypeString,
Description: `Certificate serial number, in colon- or
hyphen-separated octal`,
},
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.pathFetchRead,
Responses: pathFetchReadSchema,
},
},
HelpSynopsis: pathFetchHelpSyn,
HelpDescription: pathFetchHelpDesc,
}
}
// Returns any valid (non-revoked) cert. Since "ca" fits the pattern, this path
// also handles returning the CA cert in a non-raw format.
func pathFetchValid(b *backend) *framework.Path {
return &framework.Path{
Pattern: `cert/(?P<serial>[0-9A-Fa-f-:]+)`,
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixPKI,
OperationSuffix: "cert",
},
Fields: map[string]*framework.FieldSchema{
"serial": {
Type: framework.TypeString,
Description: `Certificate serial number, in colon- or
hyphen-separated octal`,
},
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.pathFetchRead,
Responses: pathFetchReadSchema,
},
},
HelpSynopsis: pathFetchHelpSyn,
HelpDescription: pathFetchHelpDesc,
}
}
// This returns the CRL in a non-raw format
func pathFetchCRLViaCertPath(b *backend) *framework.Path {
pattern := `cert/(crl|delta-crl)`
if constants.IsEnterprise {
pattern = `cert/(crl|delta-crl|unified-crl|unified-delta-crl)`
}
return &framework.Path{
Pattern: pattern,
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixPKI,
OperationSuffix: "cert-crl|cert-delta-crl|cert-unified-crl|cert-unified-delta-crl",
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ReadOperation: &framework.PathOperation{
Callback: b.pathFetchRead,
Responses: pathFetchReadSchema,
},
},
HelpSynopsis: pathFetchHelpSyn,
HelpDescription: pathFetchHelpDesc,
}
}
// This returns the list of serial numbers for certs
func pathFetchListCerts(b *backend) *framework.Path {
return &framework.Path{
Pattern: "certs/?$",
DisplayAttrs: &framework.DisplayAttributes{
OperationPrefix: operationPrefixPKI,
OperationSuffix: "certs",
},
Operations: map[logical.Operation]framework.OperationHandler{
logical.ListOperation: &framework.PathOperation{
Callback: b.pathFetchCertList,
},
},
HelpSynopsis: pathFetchHelpSyn,
HelpDescription: pathFetchHelpDesc,
}
}
func (b *backend) pathFetchCertList(ctx context.Context, req *logical.Request, _ *framework.FieldData) (response *logical.Response, retErr error) {
entries, err := req.Storage.List(ctx, issuing.PathCerts)
if err != nil {
return nil, err
}
for i := range entries {
entries[i] = denormalizeSerial(entries[i])
}
return logical.ListResponse(entries), nil
}
func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (response *logical.Response, retErr error) {
var serial, pemType, contentType string
var certEntry, revokedEntry *logical.StorageEntry
var funcErr error
var certificate []byte
var fullChain []byte
var revocationTime int64
var revocationIssuerId string
var revocationTimeRfc3339 string
var authorityKeyId []byte
response = &logical.Response{
Data: map[string]interface{}{},
}
sc := b.makeStorageContext(ctx, req.Storage)
// Some of these need to return raw and some non-raw;
// this is basically handled by setting contentType or not.
// Errors don't cause an immediate exit, because the raw
// paths still need to return raw output.
modifiedCtx := &IfModifiedSinceHelper{
req: req,
issuerRef: defaultRef,
}
switch {
case req.Path == "ca" || req.Path == "ca/pem" || req.Path == "cert/ca" || req.Path == "cert/ca/raw" || req.Path == "cert/ca/raw/pem":
modifiedCtx.reqType = ifModifiedCA
ret, err := sendNotModifiedResponseIfNecessary(modifiedCtx, sc, response)
if err != nil || ret {
retErr = err
goto reply
}
serial = "ca"
contentType = "application/pkix-cert"
if req.Path == "ca/pem" || req.Path == "cert/ca/raw/pem" {
pemType = "CERTIFICATE"
contentType = "application/pem-certificate-chain"
} else if req.Path == "cert/ca" {
pemType = "CERTIFICATE"
contentType = ""
}
case req.Path == "ca_chain" || req.Path == "cert/ca_chain":
serial = "ca_chain"
if req.Path == "ca_chain" {
contentType = "application/pkix-cert"
}
case req.Path == "crl" || req.Path == "crl/pem" || req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/crl" || req.Path == "cert/crl/raw" || req.Path == "cert/crl/raw/pem" || req.Path == "cert/delta-crl" || req.Path == "cert/delta-crl/raw" || req.Path == "cert/delta-crl/raw/pem" || req.Path == "unified-crl" || req.Path == "unified-crl/pem" || req.Path == "unified-crl/delta" || req.Path == "unified-crl/delta/pem" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-crl/raw" || req.Path == "cert/unified-crl/raw/pem" || req.Path == "cert/unified-delta-crl" || req.Path == "cert/unified-delta-crl/raw" || req.Path == "cert/unified-delta-crl/raw/pem":
config, err := b.CrlBuilder().GetConfigWithUpdate(sc)
if err != nil {
retErr = err
goto reply
}
var isDelta bool
var isUnified bool
if strings.Contains(req.Path, "delta") {
isDelta = true
}
if strings.Contains(req.Path, "unified") || shouldLocalPathsUseUnified(config) {
isUnified = true
}
modifiedCtx.reqType = ifModifiedCRL
if !isUnified && isDelta {
modifiedCtx.reqType = ifModifiedDeltaCRL
} else if isUnified && !isDelta {
modifiedCtx.reqType = ifModifiedUnifiedCRL
} else if isUnified && isDelta {
modifiedCtx.reqType = ifModifiedUnifiedDeltaCRL
}
ret, err := sendNotModifiedResponseIfNecessary(modifiedCtx, sc, response)
if err != nil || ret {
retErr = err
goto reply
}
serial = legacyCRLPath
if !isUnified && isDelta {
serial = deltaCRLPath
} else if isUnified && !isDelta {
serial = unifiedCRLPath
} else if isUnified && isDelta {
serial = unifiedDeltaCRLPath
}
contentType = "application/pkix-crl"
if strings.Contains(req.Path, "pem") {
pemType = "X509 CRL"
contentType = "application/x-pem-file"
} else if req.Path == "cert/crl" || req.Path == "cert/delta-crl" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-delta-crl" {
pemType = "X509 CRL"
contentType = ""
}
case strings.HasSuffix(req.Path, "/pem") || strings.HasSuffix(req.Path, "/raw"):
serial = data.Get("serial").(string)
contentType = "application/pkix-cert"
if strings.HasSuffix(req.Path, "/pem") {
pemType = "CERTIFICATE"
contentType = "application/pem-certificate-chain"
}
default:
if ser, ok := data.GetOk("serial"); ok {
serial = ser.(string)
}
pemType = "CERTIFICATE"
}
if len(serial) == 0 {
response = logical.ErrorResponse("The serial number must be provided")
goto reply
}
// Prefer fetchCAInfo to fetchCertBySerial for CA certificates.
if serial == "ca_chain" || serial == "ca" {
caInfo, err := sc.fetchCAInfo(defaultRef, issuing.ReadOnlyUsage)
if err != nil {
switch err.(type) {
case errutil.UserError:
response = logical.ErrorResponse(err.Error())
goto reply
default:
retErr = err
goto reply
}
}
if serial == "ca_chain" {
rawChain := caInfo.GetFullChain()
var chainStr string
for _, ca := range rawChain {
block := pem.Block{
Type: "CERTIFICATE",
Bytes: ca.Bytes,
}
chainStr = strings.Join([]string{chainStr, strings.TrimSpace(string(pem.EncodeToMemory(&block)))}, "\n")
}
fullChain = []byte(strings.TrimSpace(chainStr))
certificate = fullChain
} else if serial == "ca" {
certificate = caInfo.Certificate.Raw
authorityKeyId = caInfo.Certificate.AuthorityKeyId
if len(pemType) != 0 {
block := pem.Block{
Type: pemType,
Bytes: certificate,
}
// This is convoluted on purpose to ensure that we don't have trailing
// newlines via various paths
certificate = []byte(strings.TrimSpace(string(pem.EncodeToMemory(&block))))
}
}
goto reply
}
certEntry, funcErr = fetchCertBySerial(sc, req.Path, serial)
if funcErr != nil {
switch funcErr.(type) {
case errutil.UserError:
response = logical.ErrorResponse(funcErr.Error())
goto reply
default:
retErr = funcErr
goto reply
}
}
if certEntry == nil {
response = nil
goto reply
}
certificate = certEntry.Value
if len(pemType) != 0 {
block := pem.Block{
Type: pemType,
Bytes: certEntry.Value,
}
// This is convoluted on purpose to ensure that we don't have trailing
// newlines via various paths
certificate = []byte(strings.TrimSpace(string(pem.EncodeToMemory(&block))))
}
revokedEntry, funcErr = fetchCertBySerial(sc, "revoked/", serial)
if funcErr != nil {
switch funcErr.(type) {
case errutil.UserError:
response = logical.ErrorResponse(funcErr.Error())
goto reply
default:
retErr = funcErr
goto reply
}
}
if revokedEntry != nil {
var revInfo revocation.RevocationInfo
err := revokedEntry.DecodeJSON(&revInfo)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("Error decoding revocation entry for serial %s: %s", serial, err)), nil
}
revocationTime = revInfo.RevocationTime
revocationIssuerId = revInfo.CertificateIssuer.String()
if !revInfo.RevocationTimeUTC.IsZero() {
revocationTimeRfc3339 = revInfo.RevocationTimeUTC.Format(time.RFC3339Nano)
}
}
reply:
if len(authorityKeyId) == 0 && len(certificate) > 0 {
if certs, err := certutil.ParseCertsPEM(certificate); err == nil && len(certs) > 0 {
authorityKeyId = certs[0].AuthorityKeyId
}
}
switch {
case len(contentType) != 0:
response = &logical.Response{
Data: map[string]interface{}{
logical.HTTPContentType: contentType,
logical.HTTPRawBody: certificate,
},
}
if retErr != nil {
if b.Logger().IsWarn() {
b.Logger().Warn("possible error, but cannot return in raw response. Note that an empty CA probably means none was configured, and an empty CRL is possibly correct", "error", retErr)
}
}
retErr = nil
if len(certificate) > 0 {
response.Data[logical.HTTPStatusCode] = 200
} else {
response.Data[logical.HTTPStatusCode] = 204
}
case retErr != nil:
response = nil
return
case response == nil:
return
case response.IsError():
return response, nil
default:
response.Data["certificate"] = string(certificate)
response.Data["revocation_time"] = revocationTime
response.Data["revocation_time_rfc3339"] = revocationTimeRfc3339
if len(authorityKeyId) > 0 {
response.Data["authority_key_id"] = certutil.GetHexFormatted(authorityKeyId, ":")
}
// Only output this field if we have a value for it as it doesn't make sense for a
// bunch of code paths that go through here
if revocationIssuerId != "" {
response.Data["issuer_id"] = revocationIssuerId
}
if len(fullChain) > 0 {
response.Data["ca_chain"] = string(fullChain)
}
}
b.pkiObserver.RecordPKIObservation(ctx, req, observe.ObservationTypePKIReadIssuerCertificate,
observe.NewAdditionalPKIMetadata("pem_type", pemType),
observe.NewAdditionalPKIMetadata("content_type", contentType),
observe.NewAdditionalPKIMetadata("revocation_time", revocationTimeRfc3339),
observe.NewAdditionalPKIMetadata("serial_number", serial),
)
return
}
const pathFetchHelpSyn = `
Fetch a CA, CRL, CA Chain, or non-revoked certificate.
`
const pathFetchHelpDesc = `
This allows certificates to be fetched. Use /cert/:serial for JSON responses.
Using "ca" or "crl" as the value fetches the appropriate information in DER encoding. Add "/pem" to either to get PEM encoding.
Using "ca_chain" as the value fetches the certificate authority trust chain in PEM encoding.
Otherwise, specify a serial number to fetch the specified certificate. Add "/raw" to get just the certificate in DER form, "/raw/pem" to get the PEM encoded certificate.
` | go | github | https://github.com/hashicorp/vault | builtin/logical/pki/path_fetch.go |
import numpy as np
import pytest
from pandas.errors import DataError
from pandas.core.dtypes.common import pandas_dtype
from pandas import (
NA,
DataFrame,
Series,
)
import pandas._testing as tm
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
def get_dtype(dtype, coerce_int=None):
if coerce_int is False and "int" in dtype:
return None
return pandas_dtype(dtype)
@pytest.fixture(
params=[
"object",
"category",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"m8[ns]",
"M8[ns]",
"datetime64[ns, UTC]",
]
)
def dtypes(request):
"""Dtypes for window tests"""
return request.param
@pytest.mark.parametrize(
"method, data, expected_data, coerce_int, min_periods",
[
("count", np.arange(5), [1, 2, 2, 2, 2], True, 0),
("count", np.arange(10, 0, -2), [1, 2, 2, 2, 2], True, 0),
("count", [0, 1, 2, np.nan, 4], [1, 2, 2, 1, 1], False, 0),
("max", np.arange(5), [np.nan, 1, 2, 3, 4], True, None),
("max", np.arange(10, 0, -2), [np.nan, 10, 8, 6, 4], True, None),
("max", [0, 1, 2, np.nan, 4], [np.nan, 1, 2, np.nan, np.nan], False, None),
("min", np.arange(5), [np.nan, 0, 1, 2, 3], True, None),
("min", np.arange(10, 0, -2), [np.nan, 8, 6, 4, 2], True, None),
("min", [0, 1, 2, np.nan, 4], [np.nan, 0, 1, np.nan, np.nan], False, None),
("sum", np.arange(5), [np.nan, 1, 3, 5, 7], True, None),
("sum", np.arange(10, 0, -2), [np.nan, 18, 14, 10, 6], True, None),
("sum", [0, 1, 2, np.nan, 4], [np.nan, 1, 3, np.nan, np.nan], False, None),
("mean", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),
("mean", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),
("mean", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False, None),
("std", np.arange(5), [np.nan] + [np.sqrt(0.5)] * 4, True, None),
("std", np.arange(10, 0, -2), [np.nan] + [np.sqrt(2)] * 4, True, None),
(
"std",
[0, 1, 2, np.nan, 4],
[np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2,
False,
None,
),
("var", np.arange(5), [np.nan, 0.5, 0.5, 0.5, 0.5], True, None),
("var", np.arange(10, 0, -2), [np.nan, 2, 2, 2, 2], True, None),
("var", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 0.5, np.nan, np.nan], False, None),
("median", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),
("median", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),
(
"median",
[0, 1, 2, np.nan, 4],
[np.nan, 0.5, 1.5, np.nan, np.nan],
False,
None,
),
],
)
def test_series_dtypes(
method, data, expected_data, coerce_int, dtypes, min_periods, step
):
ser = Series(data, dtype=get_dtype(dtypes, coerce_int=coerce_int))
rolled = ser.rolling(2, min_periods=min_periods, step=step)
if dtypes in ("m8[ns]", "M8[ns]", "datetime64[ns, UTC]") and method != "count":
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
getattr(rolled, method)()
else:
result = getattr(rolled, method)()
expected = Series(expected_data, dtype="float64")[::step]
tm.assert_almost_equal(result, expected)
def test_series_nullable_int(any_signed_int_ea_dtype, step):
# GH 43016
ser = Series([0, 1, NA], dtype=any_signed_int_ea_dtype)
result = ser.rolling(2, step=step).mean()
expected = Series([np.nan, 0.5, np.nan])[::step]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, expected_data, min_periods",
[
("count", {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}, 0),
(
"max",
{0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])},
None,
),
(
"min",
{0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])},
None,
),
(
"sum",
{0: Series([np.nan, 2, 6, 10, 14]), 1: Series([np.nan, 4, 8, 12, 16])},
None,
),
(
"mean",
{0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
None,
),
(
"std",
{
0: Series([np.nan] + [np.sqrt(2)] * 4),
1: Series([np.nan] + [np.sqrt(2)] * 4),
},
None,
),
(
"var",
{0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])},
None,
),
(
"median",
{0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
None,
),
],
)
def test_dataframe_dtypes(method, expected_data, dtypes, min_periods, step):
df = DataFrame(np.arange(10).reshape((5, 2)), dtype=get_dtype(dtypes))
rolled = df.rolling(2, min_periods=min_periods, step=step)
if dtypes in ("m8[ns]", "M8[ns]", "datetime64[ns, UTC]") and method != "count":
msg = "Cannot aggregate non-numeric type"
with pytest.raises(DataError, match=msg):
getattr(rolled, method)()
else:
result = getattr(rolled, method)()
expected = DataFrame(expected_data, dtype="float64")[::step]
tm.assert_frame_equal(result, expected) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/window/test_dtypes.py |
import EventHandler from '../../src/dom/event-handler.js'
import Modal from '../../src/modal.js'
import ScrollBarHelper from '../../src/util/scrollbar.js'
import {
clearBodyAndDocument, clearFixture, createEvent, getFixture, jQueryMock
} from '../helpers/fixture.js'
describe('Modal', () => {
let fixtureEl
beforeAll(() => {
fixtureEl = getFixture()
})
afterEach(() => {
clearFixture()
clearBodyAndDocument()
document.body.classList.remove('modal-open')
for (const backdrop of document.querySelectorAll('.modal-backdrop')) {
backdrop.remove()
}
})
beforeEach(() => {
clearBodyAndDocument()
})
describe('VERSION', () => {
it('should return plugin version', () => {
expect(Modal.VERSION).toEqual(jasmine.any(String))
})
})
describe('Default', () => {
it('should return plugin default config', () => {
expect(Modal.Default).toEqual(jasmine.any(Object))
})
})
describe('DATA_KEY', () => {
it('should return plugin data key', () => {
expect(Modal.DATA_KEY).toEqual('bs.modal')
})
})
describe('constructor', () => {
it('should take care of element either passed as a CSS selector or DOM element', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modalBySelector = new Modal('.modal')
const modalByElement = new Modal(modalEl)
expect(modalBySelector._element).toEqual(modalEl)
expect(modalByElement._element).toEqual(modalEl)
})
})
describe('toggle', () => {
it('should call ScrollBarHelper to handle scrollBar on body', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const spyHide = spyOn(ScrollBarHelper.prototype, 'hide').and.callThrough()
const spyReset = spyOn(ScrollBarHelper.prototype, 'reset').and.callThrough()
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
modalEl.addEventListener('shown.bs.modal', () => {
expect(spyHide).toHaveBeenCalled()
modal.toggle()
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(spyReset).toHaveBeenCalled()
resolve()
})
modal.toggle()
})
})
})
describe('show', () => {
it('should show a modal', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
modalEl.addEventListener('show.bs.modal', event => {
expect(event).toBeDefined()
})
modalEl.addEventListener('shown.bs.modal', () => {
expect(modalEl.getAttribute('aria-modal')).toEqual('true')
expect(modalEl.getAttribute('role')).toEqual('dialog')
expect(modalEl.getAttribute('aria-hidden')).toBeNull()
expect(modalEl.style.display).toEqual('block')
expect(document.querySelector('.modal-backdrop')).not.toBeNull()
resolve()
})
modal.show()
})
})
it('should show a modal without backdrop', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl, {
backdrop: false
})
modalEl.addEventListener('show.bs.modal', event => {
expect(event).toBeDefined()
})
modalEl.addEventListener('shown.bs.modal', () => {
expect(modalEl.getAttribute('aria-modal')).toEqual('true')
expect(modalEl.getAttribute('role')).toEqual('dialog')
expect(modalEl.getAttribute('aria-hidden')).toBeNull()
expect(modalEl.style.display).toEqual('block')
expect(document.querySelector('.modal-backdrop')).toBeNull()
resolve()
})
modal.show()
})
})
it('should show a modal and append the element', () => {
return new Promise(resolve => {
const modalEl = document.createElement('div')
const id = 'dynamicModal'
modalEl.setAttribute('id', id)
modalEl.classList.add('modal')
modalEl.innerHTML = '<div class="modal-dialog"></div>'
const modal = new Modal(modalEl)
modalEl.addEventListener('shown.bs.modal', () => {
const dynamicModal = document.getElementById(id)
expect(dynamicModal).not.toBeNull()
dynamicModal.remove()
resolve()
})
modal.show()
})
})
it('should do nothing if a modal is shown', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const spy = spyOn(EventHandler, 'trigger')
modal._isShown = true
modal.show()
expect(spy).not.toHaveBeenCalled()
})
it('should do nothing if a modal is transitioning', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const spy = spyOn(EventHandler, 'trigger')
modal._isTransitioning = true
modal.show()
expect(spy).not.toHaveBeenCalled()
})
it('should not fire shown event when show is prevented', () => {
return new Promise((resolve, reject) => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
modalEl.addEventListener('show.bs.modal', event => {
event.preventDefault()
const expectedDone = () => {
expect().nothing()
resolve()
}
setTimeout(expectedDone, 10)
})
modalEl.addEventListener('shown.bs.modal', () => {
reject(new Error('shown event triggered'))
})
modal.show()
})
})
it('should be shown after the first call to show() has been prevented while fading is enabled ', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal fade"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
let prevented = false
modalEl.addEventListener('show.bs.modal', event => {
if (!prevented) {
event.preventDefault()
prevented = true
setTimeout(() => {
modal.show()
})
}
})
modalEl.addEventListener('shown.bs.modal', () => {
expect(prevented).toBeTrue()
expect(modal._isAnimated()).toBeTrue()
resolve()
})
modal.show()
})
})
it('should set is transitioning if fade class is present', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal fade"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
modalEl.addEventListener('show.bs.modal', () => {
setTimeout(() => {
expect(modal._isTransitioning).toBeTrue()
})
})
modalEl.addEventListener('shown.bs.modal', () => {
expect(modal._isTransitioning).toBeFalse()
resolve()
})
modal.show()
})
})
it('should close modal when a click occurred on data-bs-dismiss="modal" inside modal', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<div class="modal fade">',
' <div class="modal-dialog">',
' <div class="modal-header">',
' <button type="button" data-bs-dismiss="modal"></button>',
' </div>',
' </div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const btnClose = fixtureEl.querySelector('[data-bs-dismiss="modal"]')
const modal = new Modal(modalEl)
const spy = spyOn(modal, 'hide').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
btnClose.click()
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(spy).toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should close modal when a click occurred on a data-bs-dismiss="modal" with "bs-target" outside of modal element', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<button type="button" data-bs-dismiss="modal" data-bs-target="#modal1"></button>',
'<div id="modal1" class="modal fade">',
' <div class="modal-dialog"></div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const btnClose = fixtureEl.querySelector('[data-bs-dismiss="modal"]')
const modal = new Modal(modalEl)
const spy = spyOn(modal, 'hide').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
btnClose.click()
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(spy).toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should set .modal\'s scroll top to 0', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<div class="modal fade">',
' <div class="modal-dialog"></div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
modalEl.addEventListener('shown.bs.modal', () => {
expect(modalEl.scrollTop).toEqual(0)
resolve()
})
modal.show()
})
})
it('should set modal body scroll top to 0 if modal body do not exists', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<div class="modal fade">',
' <div class="modal-dialog">',
' <div class="modal-body"></div>',
' </div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const modalBody = modalEl.querySelector('.modal-body')
const modal = new Modal(modalEl)
modalEl.addEventListener('shown.bs.modal', () => {
expect(modalBody.scrollTop).toEqual(0)
resolve()
})
modal.show()
})
})
it('should not trap focus if focus equal to false', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal fade"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl, {
focus: false
})
const spy = spyOn(modal._focustrap, 'activate').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
expect(spy).not.toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should add listener when escape touch is pressed', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const spy = spyOn(modal, 'hide').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
const keydownEscape = createEvent('keydown')
keydownEscape.key = 'Escape'
modalEl.dispatchEvent(keydownEscape)
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(spy).toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should do nothing when the pressed key is not escape', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const spy = spyOn(modal, 'hide')
const expectDone = () => {
expect(spy).not.toHaveBeenCalled()
resolve()
}
modalEl.addEventListener('shown.bs.modal', () => {
const keydownTab = createEvent('keydown')
keydownTab.key = 'Tab'
modalEl.dispatchEvent(keydownTab)
setTimeout(expectDone, 30)
})
modal.show()
})
})
it('should adjust dialog on resize', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const spy = spyOn(modal, '_adjustDialog').and.callThrough()
const expectDone = () => {
expect(spy).toHaveBeenCalled()
resolve()
}
modalEl.addEventListener('shown.bs.modal', () => {
const resizeEvent = createEvent('resize')
window.dispatchEvent(resizeEvent)
setTimeout(expectDone, 10)
})
modal.show()
})
})
it('should not close modal when clicking on modal-content', () => {
return new Promise((resolve, reject) => {
fixtureEl.innerHTML = [
'<div class="modal">',
' <div class="modal-dialog">',
' <div class="modal-content"></div>',
' </div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const shownCallback = () => {
setTimeout(() => {
expect(modal._isShown).toEqual(true)
resolve()
}, 10)
}
modalEl.addEventListener('shown.bs.modal', () => {
fixtureEl.querySelector('.modal-dialog').click()
fixtureEl.querySelector('.modal-content').click()
shownCallback()
})
modalEl.addEventListener('hidden.bs.modal', () => {
reject(new Error('Should not hide a modal'))
})
modal.show()
})
})
it('should not close modal when clicking outside of modal-content if backdrop = false', () => {
return new Promise((resolve, reject) => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl, {
backdrop: false
})
const shownCallback = () => {
setTimeout(() => {
expect(modal._isShown).toBeTrue()
resolve()
}, 10)
}
modalEl.addEventListener('shown.bs.modal', () => {
modalEl.click()
shownCallback()
})
modalEl.addEventListener('hidden.bs.modal', () => {
reject(new Error('Should not hide a modal'))
})
modal.show()
})
})
it('should not close modal when clicking outside of modal-content if backdrop = static', () => {
return new Promise((resolve, reject) => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl, {
backdrop: 'static'
})
const shownCallback = () => {
setTimeout(() => {
expect(modal._isShown).toBeTrue()
resolve()
}, 10)
}
modalEl.addEventListener('shown.bs.modal', () => {
modalEl.click()
shownCallback()
})
modalEl.addEventListener('hidden.bs.modal', () => {
reject(new Error('Should not hide a modal'))
})
modal.show()
})
})
it('should close modal when escape key is pressed with keyboard = true and backdrop is static', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl, {
backdrop: 'static',
keyboard: true
})
const shownCallback = () => {
setTimeout(() => {
expect(modal._isShown).toBeFalse()
resolve()
}, 10)
}
modalEl.addEventListener('shown.bs.modal', () => {
const keydownEscape = createEvent('keydown')
keydownEscape.key = 'Escape'
modalEl.dispatchEvent(keydownEscape)
shownCallback()
})
modal.show()
})
})
it('should not close modal when escape key is pressed with keyboard = false', () => {
return new Promise((resolve, reject) => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl, {
keyboard: false
})
const shownCallback = () => {
setTimeout(() => {
expect(modal._isShown).toBeTrue()
resolve()
}, 10)
}
modalEl.addEventListener('shown.bs.modal', () => {
const keydownEscape = createEvent('keydown')
keydownEscape.key = 'Escape'
modalEl.dispatchEvent(keydownEscape)
shownCallback()
})
modalEl.addEventListener('hidden.bs.modal', () => {
reject(new Error('Should not hide a modal'))
})
modal.show()
})
})
it('should not overflow when clicking outside of modal-content if backdrop = static', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog" style="transition-duration: 20ms;"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl, {
backdrop: 'static'
})
modalEl.addEventListener('shown.bs.modal', () => {
modalEl.click()
setTimeout(() => {
expect(modalEl.clientHeight).toEqual(modalEl.scrollHeight)
resolve()
}, 20)
})
modal.show()
})
})
it('should not queue multiple callbacks when clicking outside of modal-content and backdrop = static', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog" style="transition-duration: 50ms;"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl, {
backdrop: 'static'
})
modalEl.addEventListener('shown.bs.modal', () => {
const spy = spyOn(modal, '_queueCallback').and.callThrough()
const mouseDown = createEvent('mousedown')
modalEl.dispatchEvent(mouseDown)
modalEl.click()
modalEl.dispatchEvent(mouseDown)
modalEl.click()
setTimeout(() => {
expect(spy).toHaveBeenCalledTimes(1)
resolve()
}, 20)
})
modal.show()
})
})
it('should trap focus', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const spy = spyOn(modal._focustrap, 'activate').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
expect(spy).toHaveBeenCalled()
resolve()
})
modal.show()
})
})
})
describe('hide', () => {
it('should hide a modal', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const backdropSpy = spyOn(modal._backdrop, 'hide').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
modal.hide()
})
modalEl.addEventListener('hide.bs.modal', event => {
expect(event).toBeDefined()
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(modalEl.getAttribute('aria-modal')).toBeNull()
expect(modalEl.getAttribute('role')).toBeNull()
expect(modalEl.getAttribute('aria-hidden')).toEqual('true')
expect(modalEl.style.display).toEqual('none')
expect(backdropSpy).toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should close modal when clicking outside of modal-content', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const dialogEl = modalEl.querySelector('.modal-dialog')
const modal = new Modal(modalEl)
const spy = spyOn(modal, 'hide')
modalEl.addEventListener('shown.bs.modal', () => {
const mouseDown = createEvent('mousedown')
dialogEl.dispatchEvent(mouseDown)
modalEl.click()
expect(spy).not.toHaveBeenCalled()
modalEl.dispatchEvent(mouseDown)
modalEl.click()
expect(spy).toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should not close modal when clicking on an element removed from modal content', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<div class="modal">',
' <div class="modal-dialog">',
' <button class="btn">BTN</button>',
' </div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const buttonEl = modalEl.querySelector('.btn')
const modal = new Modal(modalEl)
const spy = spyOn(modal, 'hide')
buttonEl.addEventListener('click', () => {
buttonEl.remove()
})
modalEl.addEventListener('shown.bs.modal', () => {
modalEl.dispatchEvent(createEvent('mousedown'))
buttonEl.click()
expect(spy).not.toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should do nothing is the modal is not shown', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
modal.hide()
expect().nothing()
})
it('should do nothing is the modal is transitioning', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
modal._isTransitioning = true
modal.hide()
expect().nothing()
})
it('should not hide a modal if hide is prevented', () => {
return new Promise((resolve, reject) => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
modalEl.addEventListener('shown.bs.modal', () => {
modal.hide()
})
const hideCallback = () => {
setTimeout(() => {
expect(modal._isShown).toBeTrue()
resolve()
}, 10)
}
modalEl.addEventListener('hide.bs.modal', event => {
event.preventDefault()
hideCallback()
})
modalEl.addEventListener('hidden.bs.modal', () => {
reject(new Error('should not trigger hidden'))
})
modal.show()
})
})
it('should release focus trap', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const spy = spyOn(modal._focustrap, 'deactivate').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
modal.hide()
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(spy).toHaveBeenCalled()
resolve()
})
modal.show()
})
})
})
describe('dispose', () => {
it('should dispose a modal', () => {
fixtureEl.innerHTML = '<div id="exampleModal" class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const focustrap = modal._focustrap
const spyDeactivate = spyOn(focustrap, 'deactivate').and.callThrough()
expect(Modal.getInstance(modalEl)).toEqual(modal)
const spyOff = spyOn(EventHandler, 'off')
modal.dispose()
expect(Modal.getInstance(modalEl)).toBeNull()
expect(spyOff).toHaveBeenCalledTimes(3)
expect(spyDeactivate).toHaveBeenCalled()
})
})
describe('handleUpdate', () => {
it('should call adjust dialog', () => {
fixtureEl.innerHTML = '<div id="exampleModal" class="modal"><div class="modal-dialog"></div></div>'
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const spy = spyOn(modal, '_adjustDialog')
modal.handleUpdate()
expect(spy).toHaveBeenCalled()
})
})
describe('data-api', () => {
it('should toggle modal', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<button type="button" data-bs-toggle="modal" data-bs-target="#exampleModal"></button>',
'<div id="exampleModal" class="modal"><div class="modal-dialog"></div></div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const trigger = fixtureEl.querySelector('[data-bs-toggle="modal"]')
modalEl.addEventListener('shown.bs.modal', () => {
expect(modalEl.getAttribute('aria-modal')).toEqual('true')
expect(modalEl.getAttribute('role')).toEqual('dialog')
expect(modalEl.getAttribute('aria-hidden')).toBeNull()
expect(modalEl.style.display).toEqual('block')
expect(document.querySelector('.modal-backdrop')).not.toBeNull()
setTimeout(() => trigger.click(), 10)
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(modalEl.getAttribute('aria-modal')).toBeNull()
expect(modalEl.getAttribute('role')).toBeNull()
expect(modalEl.getAttribute('aria-hidden')).toEqual('true')
expect(modalEl.style.display).toEqual('none')
expect(document.querySelector('.modal-backdrop')).toBeNull()
resolve()
})
trigger.click()
})
})
it('should not recreate a new modal', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<button type="button" data-bs-toggle="modal" data-bs-target="#exampleModal"></button>',
'<div id="exampleModal" class="modal"><div class="modal-dialog"></div></div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const modal = new Modal(modalEl)
const trigger = fixtureEl.querySelector('[data-bs-toggle="modal"]')
const spy = spyOn(modal, 'show').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
expect(spy).toHaveBeenCalled()
resolve()
})
trigger.click()
})
})
it('should prevent default when the trigger is <a> or <area>', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<a data-bs-toggle="modal" href="#" data-bs-target="#exampleModal"></a>',
'<div id="exampleModal" class="modal"><div class="modal-dialog"></div></div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const trigger = fixtureEl.querySelector('[data-bs-toggle="modal"]')
const spy = spyOn(Event.prototype, 'preventDefault').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
expect(modalEl.getAttribute('aria-modal')).toEqual('true')
expect(modalEl.getAttribute('role')).toEqual('dialog')
expect(modalEl.getAttribute('aria-hidden')).toBeNull()
expect(modalEl.style.display).toEqual('block')
expect(document.querySelector('.modal-backdrop')).not.toBeNull()
expect(spy).toHaveBeenCalled()
resolve()
})
trigger.click()
})
})
it('should focus the trigger on hide', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<a data-bs-toggle="modal" href="#" data-bs-target="#exampleModal"></a>',
'<div id="exampleModal" class="modal"><div class="modal-dialog"></div></div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const trigger = fixtureEl.querySelector('[data-bs-toggle="modal"]')
const spy = spyOn(trigger, 'focus')
modalEl.addEventListener('shown.bs.modal', () => {
const modal = Modal.getInstance(modalEl)
modal.hide()
})
const hideListener = () => {
setTimeout(() => {
expect(spy).toHaveBeenCalled()
resolve()
}, 20)
}
modalEl.addEventListener('hidden.bs.modal', () => {
hideListener()
})
trigger.click()
})
})
it('should open modal, having special characters in its id', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<button class="btn btn-primary" data-bs-toggle="modal" data-bs-target="#j_id22:exampleModal">',
' Launch demo modal',
'</button>',
'<div class="modal fade" id="j_id22:exampleModal" aria-labelledby="exampleModalLabel" aria-hidden="true">',
' <div class="modal-dialog">',
' <div class="modal-content">',
' <div class="modal-body">',
' <p>modal body</p>',
' </div>',
' </div>',
' </div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const trigger = fixtureEl.querySelector('[data-bs-toggle="modal"]')
modalEl.addEventListener('shown.bs.modal', () => {
resolve()
})
trigger.click()
})
})
it('should not prevent default when a click occurred on data-bs-dismiss="modal" where tagName is DIFFERENT than <a> or <area>', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<div class="modal">',
' <div class="modal-dialog">',
' <button type="button" data-bs-dismiss="modal"></button>',
' </div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const btnClose = fixtureEl.querySelector('button[data-bs-dismiss="modal"]')
const modal = new Modal(modalEl)
const spy = spyOn(Event.prototype, 'preventDefault').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
btnClose.click()
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(spy).not.toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should prevent default when a click occurred on data-bs-dismiss="modal" where tagName is <a> or <area>', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<div class="modal">',
' <div class="modal-dialog">',
' <a type="button" data-bs-dismiss="modal"></a>',
' </div>',
'</div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const btnClose = fixtureEl.querySelector('a[data-bs-dismiss="modal"]')
const modal = new Modal(modalEl)
const spy = spyOn(Event.prototype, 'preventDefault').and.callThrough()
modalEl.addEventListener('shown.bs.modal', () => {
btnClose.click()
})
modalEl.addEventListener('hidden.bs.modal', () => {
expect(spy).toHaveBeenCalled()
resolve()
})
modal.show()
})
})
it('should not focus the trigger if the modal is not visible', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<a data-bs-toggle="modal" href="#" data-bs-target="#exampleModal" style="display: none;"></a>',
'<div id="exampleModal" class="modal" style="display: none;"><div class="modal-dialog"></div></div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const trigger = fixtureEl.querySelector('[data-bs-toggle="modal"]')
const spy = spyOn(trigger, 'focus')
modalEl.addEventListener('shown.bs.modal', () => {
const modal = Modal.getInstance(modalEl)
modal.hide()
})
const hideListener = () => {
setTimeout(() => {
expect(spy).not.toHaveBeenCalled()
resolve()
}, 20)
}
modalEl.addEventListener('hidden.bs.modal', () => {
hideListener()
})
trigger.click()
})
})
it('should not focus the trigger if the modal is not shown', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<a data-bs-toggle="modal" href="#" data-bs-target="#exampleModal"></a>',
'<div id="exampleModal" class="modal"><div class="modal-dialog"></div></div>'
].join('')
const modalEl = fixtureEl.querySelector('.modal')
const trigger = fixtureEl.querySelector('[data-bs-toggle="modal"]')
const spy = spyOn(trigger, 'focus')
const showListener = () => {
setTimeout(() => {
expect(spy).not.toHaveBeenCalled()
resolve()
}, 10)
}
modalEl.addEventListener('show.bs.modal', event => {
event.preventDefault()
showListener()
})
trigger.click()
})
})
it('should call hide first, if another modal is open', () => {
return new Promise(resolve => {
fixtureEl.innerHTML = [
'<button data-bs-toggle="modal" data-bs-target="#modal2"></button>',
'<div id="modal1" class="modal fade"><div class="modal-dialog"></div></div>',
'<div id="modal2" class="modal"><div class="modal-dialog"></div></div>'
].join('')
const trigger2 = fixtureEl.querySelector('button')
const modalEl1 = document.querySelector('#modal1')
const modalEl2 = document.querySelector('#modal2')
const modal1 = new Modal(modalEl1)
modalEl1.addEventListener('shown.bs.modal', () => {
trigger2.click()
})
modalEl1.addEventListener('hidden.bs.modal', () => {
expect(Modal.getInstance(modalEl2)).not.toBeNull()
expect(modalEl2).toHaveClass('show')
resolve()
})
modal1.show()
})
})
})
describe('jQueryInterface', () => {
it('should create a modal', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const div = fixtureEl.querySelector('div')
jQueryMock.fn.modal = Modal.jQueryInterface
jQueryMock.elements = [div]
jQueryMock.fn.modal.call(jQueryMock)
expect(Modal.getInstance(div)).not.toBeNull()
})
it('should create a modal with given config', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const div = fixtureEl.querySelector('div')
jQueryMock.fn.modal = Modal.jQueryInterface
jQueryMock.elements = [div]
jQueryMock.fn.modal.call(jQueryMock, { keyboard: false })
const spy = spyOn(Modal.prototype, 'constructor')
expect(spy).not.toHaveBeenCalledWith(div, { keyboard: false })
const modal = Modal.getInstance(div)
expect(modal).not.toBeNull()
expect(modal._config.keyboard).toBeFalse()
})
it('should not re create a modal', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const div = fixtureEl.querySelector('div')
const modal = new Modal(div)
jQueryMock.fn.modal = Modal.jQueryInterface
jQueryMock.elements = [div]
jQueryMock.fn.modal.call(jQueryMock)
expect(Modal.getInstance(div)).toEqual(modal)
})
it('should throw error on undefined method', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const div = fixtureEl.querySelector('div')
const action = 'undefinedMethod'
jQueryMock.fn.modal = Modal.jQueryInterface
jQueryMock.elements = [div]
expect(() => {
jQueryMock.fn.modal.call(jQueryMock, action)
}).toThrowError(TypeError, `No method named "${action}"`)
})
it('should call show method', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const div = fixtureEl.querySelector('div')
const modal = new Modal(div)
jQueryMock.fn.modal = Modal.jQueryInterface
jQueryMock.elements = [div]
const spy = spyOn(modal, 'show')
jQueryMock.fn.modal.call(jQueryMock, 'show')
expect(spy).toHaveBeenCalled()
})
it('should not call show method', () => {
fixtureEl.innerHTML = '<div class="modal" data-bs-show="false"><div class="modal-dialog"></div></div>'
const div = fixtureEl.querySelector('div')
jQueryMock.fn.modal = Modal.jQueryInterface
jQueryMock.elements = [div]
const spy = spyOn(Modal.prototype, 'show')
jQueryMock.fn.modal.call(jQueryMock)
expect(spy).not.toHaveBeenCalled()
})
})
describe('getInstance', () => {
it('should return modal instance', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const div = fixtureEl.querySelector('div')
const modal = new Modal(div)
expect(Modal.getInstance(div)).toEqual(modal)
expect(Modal.getInstance(div)).toBeInstanceOf(Modal)
})
it('should return null when there is no modal instance', () => {
fixtureEl.innerHTML = '<div class="modal"><div class="modal-dialog"></div></div>'
const div = fixtureEl.querySelector('div')
expect(Modal.getInstance(div)).toBeNull()
})
})
describe('getOrCreateInstance', () => {
it('should return modal instance', () => {
fixtureEl.innerHTML = '<div></div>'
const div = fixtureEl.querySelector('div')
const modal = new Modal(div)
expect(Modal.getOrCreateInstance(div)).toEqual(modal)
expect(Modal.getInstance(div)).toEqual(Modal.getOrCreateInstance(div, {}))
expect(Modal.getOrCreateInstance(div)).toBeInstanceOf(Modal)
})
it('should return new instance when there is no modal instance', () => {
fixtureEl.innerHTML = '<div></div>'
const div = fixtureEl.querySelector('div')
expect(Modal.getInstance(div)).toBeNull()
expect(Modal.getOrCreateInstance(div)).toBeInstanceOf(Modal)
})
it('should return new instance when there is no modal instance with given configuration', () => {
fixtureEl.innerHTML = '<div></div>'
const div = fixtureEl.querySelector('div')
expect(Modal.getInstance(div)).toBeNull()
const modal = Modal.getOrCreateInstance(div, {
backdrop: true
})
expect(modal).toBeInstanceOf(Modal)
expect(modal._config.backdrop).toBeTrue()
})
it('should return the instance when exists without given configuration', () => {
fixtureEl.innerHTML = '<div></div>'
const div = fixtureEl.querySelector('div')
const modal = new Modal(div, {
backdrop: true
})
expect(Modal.getInstance(div)).toEqual(modal)
const modal2 = Modal.getOrCreateInstance(div, {
backdrop: false
})
expect(modal).toBeInstanceOf(Modal)
expect(modal2).toEqual(modal)
expect(modal2._config.backdrop).toBeTrue()
})
})
}) | javascript | github | https://github.com/twbs/bootstrap | js/tests/unit/modal.spec.js |
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=authentication.k8s.io
package authentication | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/authentication/doc.go |
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.projectStructure
/**
* A list of all modules of type [M] that the given module directly depends on with a regular dependency.
*
* @see KaModule.directRegularDependencies
*/
public inline fun <reified M : KaModule> KaModule.directRegularDependenciesOfType(): Sequence<M> =
directRegularDependencies.asSequence().filterIsInstance<M>()
/**
* A list of all modules of type [M] that the given module directly depends on with a friend dependency.
*
* @see KaModule.directFriendDependencies
*/
public inline fun <reified M : KaModule> KaModule.directFriendDependenciesOfType(): Sequence<M> =
directFriendDependencies.asSequence().filterIsInstance<M>()
/**
* A list of all modules of type [M] that the given module directly depends on with a depends-on dependency.
*
* @see KaModule.directDependsOnDependencies
*/
public inline fun <reified M : KaModule> KaModule.directDependsOnDependenciesOfType(): Sequence<M> =
directDependsOnDependencies.asSequence().filterIsInstance<M>()
/**
* A list of all modules that the given module directly depends on.
*
* @see KaModule.directRegularDependencies
* @see KaModule.directDependsOnDependencies
* @see KaModule.directFriendDependencies
*/
public fun KaModule.allDirectDependencies(): Sequence<KaModule> =
sequence {
yieldAll(directRegularDependencies)
yieldAll(directDependsOnDependencies)
yieldAll(directFriendDependencies)
}
/**
* A list of all modules of type [M] that the given module directly depends on.
*
* @see KaModule.directRegularDependencies
* @see KaModule.directDependsOnDependencies
* @see KaModule.directFriendDependencies
*/
public inline fun <reified M : KaModule> KaModule.allDirectDependenciesOfType(): Sequence<M> =
allDirectDependencies().filterIsInstance<M>() | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/src/org/jetbrains/kotlin/analysis/api/projectStructure/dependencies.kt |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.